metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "18894142401/ribosome",
"score": 3
} |
#### File: 18894142401/ribosome/infer.py
```python
import numpy as np
import torch
from PIL import Image
from argparse import ArgumentParser
import base64
from model import *
DEFAULT_OUTPUT = 'output.jpg'
DEFAULT_MODEL = 'model.pt'
def get_opts():
parser = ArgumentParser()
parser.add_argument('hash', help='Base64-encoded hash', type=str)
parser.add_argument('--model', help='Model checkpoint', type=str, default=DEFAULT_MODEL)
parser.add_argument('--output', help='Output filename', type=str, default=DEFAULT_OUTPUT)
return parser.parse_args()
def main():
opts = get_opts()
# load model
model = Model()
# for a single image, faster to do this on CPU
model.load_state_dict(torch.load(opts.model, map_location=torch.device('cpu')))
model.eval()
hash_tensor = torch.tensor(np.array(list(base64.b64decode(opts.hash)), dtype=np.uint8))
with torch.no_grad():
# batch size 1
inverted = model(hash_tensor.unsqueeze(0))[0]
# convert from CHW to HWC and to uint8
inverted = np.clip(inverted.permute(1, 2, 0).numpy(), 0, 255).astype(np.uint8)
Image.fromarray(inverted).save(opts.output)
if __name__ == '__main__':
main()
```
#### File: 18894142401/ribosome/train.py
```python
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import ArgumentParser
from dataset import *
from model import *
DEFAULT_EPOCHS = 10
DEFAULT_OUTPUT = 'model'
DEFAULT_BATCH_SIZE = 256
def get_opts():
parser = ArgumentParser()
parser.add_argument('--data-dir', help='Directory containing train/validation images', type=str, default='.')
parser.add_argument('--train-data', help='Training data, CSV of pairs of (path, base64-encoded hash)', type=str, required=True)
parser.add_argument('--val-data', help='Validation data', type=str, required=False)
parser.add_argument('--epochs', help='Training epochs', type=int, default=DEFAULT_EPOCHS)
parser.add_argument('--output', help='Name of model output (without extension)', type=str, default=DEFAULT_OUTPUT)
parser.add_argument('--checkpoint-iter', help='Checkpoint frequency', type=int, default=-1)
parser.add_argument('--batch-size', help='Batch size', type=int, default=DEFAULT_BATCH_SIZE)
parser.add_argument('--verbose', help='Print intermediate statistics', action='store_true')
return parser.parse_args()
def main():
opts = get_opts()
use_cuda = check_cuda()
# init model
model = Model()
if use_cuda:
model = model.cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters())
# init dataset
train_data = HashToImage(opts.train_data, opts.data_dir)
train_dataloader = DataLoader(train_data, batch_size=opts.batch_size, shuffle=True, pin_memory=True)
val_data = HashToImage(opts.val_data, opts.data_dir) if opts.val_data else None
val_dataloader = DataLoader(val_data, batch_size=opts.batch_size, shuffle=True, pin_memory=True) if val_data else None
# train
with tqdm(range(opts.epochs), unit='epoch', total=opts.epochs) as tepochs:
for epoch in tepochs:
train_loss = 0
for data in tqdm(train_dataloader, unit='batch', total=len(train_dataloader), leave=False):
x, y = data
y = y.type(torch.float32)
optimizer.zero_grad()
if use_cuda:
x = x.cuda()
y = y.cuda()
y_ = model(x)
loss = criterion(y_, y)
loss.backward()
optimizer.step()
train_loss += loss
train_loss = train_loss.item() / len(train_dataloader)
# save checkpoint
if opts.checkpoint_iter > 0 and epoch % opts.checkpoint_iter == 0:
torch.save(model.state_dict(), '{}-epoch{:d}.pt'.format(opts.output, epoch))
# stats
if opts.verbose:
tepochs.clear()
if val_dataloader:
val_loss = compute_val_loss(model, criterion, val_dataloader, use_cuda)
print('Epoch {}, train loss: {:.1f}, val loss: {:.1f}'.format(epoch, train_loss, val_loss))
else:
print('Epoch {}, train loss: {:.1f}'.format(epoch, train_loss))
else:
if val_dataloader:
val_loss = compute_val_loss(model, criterion, val_dataloader, use_cuda)
tepochs.set_postfix(train_loss=train_loss, val_loss=val_loss)
else:
tepochs.set_postfix(train_loss=train_loss)
# save final model
torch.save(model.state_dict(), '{}.pt'.format(opts.output))
def compute_val_loss(model, criterion, val_dataloader, use_cuda):
loss = 0
model.eval()
with torch.no_grad():
for data in val_dataloader:
x, y = data
if use_cuda:
x = x.cuda()
y = y.cuda()
y_ = model(x)
loss += criterion(y_, y).item()
model.train()
return loss / len(val_dataloader)
def check_cuda():
return torch.cuda.is_available() and torch.cuda.device_count() > 0
if __name__ == '__main__':
main()
``` |
{
"source": "189569400/ClickHouse",
"score": 3
} |
#### File: dbms/scripts/linear-counting-threshold.py
```python
import sys
import argparse
import tempfile
import random
import subprocess
import bisect
from copy import deepcopy
# Псевдослучайный генератор уникальных чисел.
# http://preshing.com/20121224/how-to-generate-a-sequence-of-unique-random-integers/
class UniqueRandomGenerator:
prime = 4294967291
def __init__(self, seed_base, seed_offset):
self.index = self.permutePQR(self.permutePQR(seed_base) + 0x682f0161)
self.intermediate_offset = self.permutePQR(self.permutePQR(seed_offset) + 0x46790905)
def next(self):
val = self.permutePQR((self.permutePQR(self.index) + self.intermediate_offset) ^ 0x5bf03635)
self.index = self.index + 1
return val
def permutePQR(self, x):
if x >=self.prime:
return x
else:
residue = (x * x) % self.prime
if x <= self.prime/2:
return residue
else:
return self.prime - residue
# Создать таблицу содержащую уникальные значения.
def generate_data_source(host, port, http_port, min_cardinality, max_cardinality, count):
chunk_size = round((max_cardinality - (min_cardinality + 1)) / float(count))
used_values = 0
cur_count = 0
next_size = 0
sup = 32768
n1 = random.randrange(0, sup)
n2 = random.randrange(0, sup)
urng = UniqueRandomGenerator(n1, n2)
is_first = True
with tempfile.TemporaryDirectory() as tmp_dir:
filename = tmp_dir + '/table.txt'
with open(filename, 'w+b') as file_handle:
while cur_count < count:
if is_first == True:
is_first = False
if min_cardinality != 0:
next_size = min_cardinality + 1
else:
next_size = chunk_size
else:
next_size += chunk_size
while used_values < next_size:
h = urng.next()
used_values = used_values + 1
out = str(h) + "\t" + str(cur_count) + "\n";
file_handle.write(bytes(out, 'UTF-8'));
cur_count = cur_count + 1
query = "DROP TABLE IF EXISTS data_source"
subprocess.check_output(["clickhouse-client", "--host", host, "--port", str(port), "--query", query])
query = "CREATE TABLE data_source(UserID UInt64, KeyID UInt64) ENGINE=TinyLog"
subprocess.check_output(["clickhouse-client", "--host", host, "--port", str(port), "--query", query])
cat = subprocess.Popen(("cat", filename), stdout=subprocess.PIPE)
subprocess.check_output(("POST", "http://{0}:{1}/?query=INSERT INTO data_source FORMAT TabSeparated".format(host, http_port)), stdin=cat.stdout)
cat.wait()
def perform_query(host, port):
query = "SELECT runningAccumulate(uniqExactState(UserID)) AS exact, "
query += "runningAccumulate(uniqCombinedRawState(UserID)) AS raw, "
query += "runningAccumulate(uniqCombinedLinearCountingState(UserID)) AS linear_counting, "
query += "runningAccumulate(uniqCombinedBiasCorrectedState(UserID)) AS bias_corrected "
query += "FROM data_source GROUP BY KeyID"
return subprocess.check_output(["clickhouse-client", "--host", host, "--port", port, "--query", query])
def parse_clickhouse_response(response):
parsed = []
lines = response.decode().split("\n")
for cur_line in lines:
rows = cur_line.split("\t")
if len(rows) == 4:
parsed.append([float(rows[0]), float(rows[1]), float(rows[2]), float(rows[3])])
return parsed
def accumulate_data(accumulated_data, data):
if not accumulated_data:
accumulated_data = deepcopy(data)
else:
for row1, row2 in zip(accumulated_data, data):
row1[1] += row2[1];
row1[2] += row2[2];
row1[3] += row2[3];
return accumulated_data
def dump_graphs(data, count):
with open("raw_graph.txt", "w+b") as fh1, open("linear_counting_graph.txt", "w+b") as fh2, open("bias_corrected_graph.txt", "w+b") as fh3:
expected_tab = []
bias_tab = []
for row in data:
exact = row[0]
raw = row[1] / count;
linear_counting = row[2] / count;
bias_corrected = row[3] / count;
outstr = "{0}\t{1}\n".format(exact, abs(raw - exact) / exact)
fh1.write(bytes(outstr, 'UTF-8'))
outstr = "{0}\t{1}\n".format(exact, abs(linear_counting - exact) / exact)
fh2.write(bytes(outstr, 'UTF-8'))
outstr = "{0}\t{1}\n".format(exact, abs(bias_corrected - exact) / exact)
fh3.write(bytes(outstr, 'UTF-8'))
def start():
parser = argparse.ArgumentParser(description = "Generate graphs that help to determine the linear counting threshold.")
parser.add_argument("-x", "--host", default="localhost", help="clickhouse host name");
parser.add_argument("-p", "--port", type=int, default=9000, help="clickhouse client TCP port");
parser.add_argument("-t", "--http_port", type=int, default=8123, help="clickhouse HTTP port");
parser.add_argument("-i", "--iterations", type=int, default=5000, help="number of iterations");
parser.add_argument("-m", "--min_cardinality", type=int, default=16384, help="minimal cardinality");
parser.add_argument("-M", "--max_cardinality", type=int, default=655360, help="maximal cardinality");
args = parser.parse_args()
accumulated_data = []
for i in range(0, args.iterations):
print(i + 1)
sys.stdout.flush()
generate_data_source(args.host, str(args.port), str(args.http_port), args.min_cardinality, args.max_cardinality, 1000)
response = perform_query(args.host, str(args.port))
data = parse_clickhouse_response(response)
accumulated_data = accumulate_data(accumulated_data, data)
dump_graphs(accumulated_data, args.iterations)
if __name__ == "__main__": start()
``` |
{
"source": "189569400/ParlAI",
"score": 2
} |
#### File: projects/style_gen/modules.py
```python
import random
import numpy as np
import torch
from torch import nn as nn
from parlai.agents.transformer.modules import (
TransformerDecoder,
TransformerGeneratorModel,
_normalize,
)
from parlai.core.torch_agent import History
from parlai.utils.misc import AttrDict, warn_once
STYLE_SEP_TOKEN = ' STYLE '
class StyleHistoryMixin(History):
"""
Methods for adding style to history.
"""
def __init__(self, opt, **kwargs):
super().__init__(opt, **kwargs)
self.use_style_frac = opt['use_style_frac']
self.style = None
def reset(self):
super().reset()
self.style = None
def update_history(self, obs, *args, **kwargs):
super().update_history(obs, *args, **kwargs)
use_style_rand = random.random()
if use_style_rand < self.use_style_frac:
# Use the style
self.style = obs.get('personality')
# This key name is dependent on Image-Chat and will change for other tasks.
# If obs does not contain 'personality' (i.e. at the end of an epoch during
# validation), there will be no style
if self.style == '':
self.style = None
else:
self.style = None
def get_history_str(self):
history_str = super().get_history_str()
if history_str is not None and self.style is not None:
history_str += STYLE_SEP_TOKEN + self.style
return history_str
def get_history_vec(self):
history = super().get_history_vec()
if history is not None and self.style is not None:
style = STYLE_SEP_TOKEN + self.style
style_tok = self.parse(style)
history += style_tok
return history
class StyleAgentMixin:
"""
Methods for agents that return style from their histories.
"""
@classmethod
def add_cmdline_args(cls, argparser):
"""
Add command-line arguments specifically for this agent.
Does not add arguments from its superclass because it's a mixin.
"""
agent = argparser.add_argument_group('StyleAgentMixin arguments')
agent.add_argument(
'--use-style-frac',
type=float,
default=1.0,
help='What fraction of the time to use the style label',
)
return agent
class StyleHistory(StyleHistoryMixin, History):
"""
Modify history to save the style.
"""
class ClassifierOnGeneratorModel(TransformerGeneratorModel):
"""
TransformerGeneratorModel with a classifier head on top of the decoder.
Useful for performing classification with a pretrained generator model.
"""
@classmethod
def build_decoder(
cls,
opt,
dictionary,
embedding=None,
padding_idx=None,
n_positions=1024,
n_segments=0,
):
"""
Return TransformerDecoderWithEmbeds instead of TransformerDecoder.
"""
n_layers = (
opt['n_decoder_layers']
if opt.get('n_decoder_layers', -1) > 0
else opt['n_layers']
)
return TransformerDecoderWithEmbeds(
n_heads=opt['n_heads'],
n_layers=n_layers,
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dictionary),
embedding=embedding,
dropout=opt['dropout'],
attention_dropout=opt['attention_dropout'],
relu_dropout=opt['relu_dropout'],
padding_idx=padding_idx,
learn_positional_embeddings=opt['learn_positional_embeddings'],
embeddings_scale=opt['embeddings_scale'],
n_positions=n_positions,
activation=opt['activation'],
variant=opt['variant'],
n_segments=n_segments,
)
def __init__(self, opt, dictionary, num_classes: int, personality_as_label: bool):
super().__init__(opt, dictionary)
self.classifier_head = nn.Linear(opt['embedding_size'], num_classes)
self.personality_as_label = personality_as_label
def forward(self, *xs):
"""
Get output class logits from the model.
:param xs:
- list of inputs to the encoder/decoder. Elements:
- text_vec: (LongTensor[bsz, text seqlen])
- label_vec: (LongTensor[bsz, label seqlen])
(Only used if not self.personality_as_label)
:return:
- the model's predicted per-class scores.
(FloatTensor[bsz, len(class_list)])
"""
if self.personality_as_label:
# All tokens go into the encoder and classification is learned from that.
# This is useful in the standard case where we have a fixed utterance that
# doesn't need to be generated, and we can just stick it all in the encoder
# to be classified.
assert len(xs) == 1
# Only one input allowed
bsz = xs[0].size(0)
encoder_states = self.encoder(*xs)
inputs = self.START.detach().expand(bsz, 1)
# Generate most likely class given start token as input
latent, _ = self.decoder(inputs, encoder_states)
# latent: [bsz, seqlen, emb_dim]
scores = self.classifier_head(latent.squeeze(dim=1))
else:
# Tokens are split between the encoder and decoder and classification is
# learned from both. This is useful when we want to classify a partially
# generated utterance, along with its context in the encoder.
text_vec, label_vec = xs
encoder_states = self.encoder(text_vec)
latent, _ = self.decoder(label_vec, encoder_states)
# latent: [bsz, seqlen, emb_dim]
scores = self.classifier_head(latent.mean(dim=1))
return scores
class BatchWithPersonalities(AttrDict):
"""
Adds a 'personalities' field to the batch in the case where personality information
is not encoded in any other field.
"""
def __init__(self, personalities=None, **kwargs):
super().__init__(personalities=personalities, **kwargs)
class TransformerDecoderWithEmbeds(TransformerDecoder):
def forward(self, input, encoder_state, embedded_input=None, incr_state=None):
"""
Forward pass with the ability to pass in token-embedded inputs.
"""
# TODO: perhaps reduce the amount of code duplicated from TransformerDecoder.
# This would require modularizing several snippets of code inside
# TransformerDecoder methods.
encoder_output, encoder_mask = encoder_state
if input is not None:
seq_len = input.size(1)
positions = input.new(seq_len).long()
else:
seq_len = embedded_input.size(1)
positions = embedded_input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
if incr_state is not None:
# We're doing incremental decoding, so select only the most recent position
if input is not None:
input = input[:, -1:]
if embedded_input is not None:
embedded_input = embedded_input[:, -1:, :]
if positions is not None:
positions = positions[:, -1:]
else:
incr_state = {}
if embedded_input is not None:
tensor = embedded_input # No need to copy because we only reassign below
else:
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
if self.variant == 'xlm':
tensor = _normalize(tensor, self.norm_embeddings)
if positions.max().item() > self.n_positions:
warn_once(
'You are inputting a sequence of {x} length, but only have '
'--n-positions {y}. Set --truncate or increase --n-positions'.format(
x=positions.max().item(), y=self.n_positions
)
)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor = self.dropout(tensor) # --dropout
new_incr_state = {}
if getattr(self.layers, 'is_model_parallel', False):
tensor, new_incr_state = self._apply_model_parallel(
tensor, encoder_output, encoder_mask, incr_state
)
else:
for idx, layer in enumerate(self.layers):
tensor, new_incr_state[idx] = layer(
x=tensor,
encoder_output=encoder_output,
encoder_mask=encoder_mask,
incr_state=incr_state.get(idx),
)
if self.variant == 'prelayernorm':
tensor = _normalize(tensor, self.norm_embeddings)
return tensor, new_incr_state
```
#### File: nightly/gpu/test_style_gen.py
```python
import unittest
import parlai.utils.testing as testing_utils
from parlai.core.opt import Opt
class TestClassifierOnGenerator(unittest.TestCase):
"""
Test classifier on generator.
"""
@testing_utils.retry()
def test_simple(self):
valid, test = testing_utils.train_model(
Opt(
dict(
task='integration_tests:classifier',
model='projects.style_gen.classifier:ClassifierAgent',
classes=['one', 'zero'],
optimizer='adamax',
truncate=8,
learningrate=7e-3,
batchsize=32,
num_epochs=5,
n_layers=1,
n_heads=1,
ffn_size=32,
embedding_size=32,
)
)
)
self.assertEqual(valid['accuracy'], 1.0)
self.assertEqual(test['accuracy'], 1.0)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1895-art/stock-price-predict",
"score": 3
} |
#### File: 1895-art/stock-price-predict/get_feature.py
```python
import getopt, sys, os
import csv
import pandas as pd
import locale
from locale import atof
locale.setlocale(locale.LC_NUMERIC, '')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v:f:", ["help", "output=", "filepath"])
except getopt.GetoptError as err:
usage()
sys.exit(2)
output = None
verbose = False
filepath = os.getcwd()
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-f", "--filepath"):
filepath = a
else:
assert False, "unhandled option"
return filepath
def usage():
print ("=======================\n"\
"please input filepath\n"\
"ex: python get_feature.py -f ./data/20180427 \n"\
"=======================")
def get_feature_data(filepath, encode=None, **argv):
"""
input:
filepath
encode
argv:
Code,Date,CB,Open,High,Low,Close,Volumn
True or False
"""
params = []
for param in argv:
params = [i for i, t in argv.items() if t == True]
# abs filepath
filepath = os.path.abspath(filepath)
get_date = os.path.basename(filepath)
tetfp_file = os.path.join(filepath, "tetfp.csv")
save_process_path = os.path.join(os.path.abspath("./data/" + get_date + "_process"))
with open(tetfp_file, encoding=encode) as file:
rows = csv.reader(file, delimiter=",")
data = []
for row in rows:
new_index = []
for index in row:
if index:
index = index.strip()
new_index.append(index)
data.append(new_index)
df = pd.DataFrame(data=data[1:], columns=change_columns(*data[0]))
df = df.dropna()
df["Volumn"] = pd.to_numeric(df["Volumn"].replace('\.','', regex=True)
.replace(',','', regex=True)
.astype(int))
types = set(df.loc[:,"Code"])
if not os.path.exists(save_process_path):
os.mkdir(save_process_path)
for t in types:
str_t = str(int(t))
t_types = df.loc[df['Code'] == t][params]
t_types.to_csv(os.path.join(save_process_path, get_date + "_" + str_t + ".csv"), index=False)
def change_columns(*header):
"""
replace header to English
"""
column_dict = {
"代碼":"Code",
"日期":"Date",
"中文簡稱":"CB",
"開盤價(元)":"Open",
"最高價(元)":"High",
"最低價(元)":"Low",
"收盤價(元)":"Close",
"成交張數(張)": "Volumn"
}
return [column_dict[h] for h in header]
if __name__ == "__main__":
"""
choose data output column
"""
choose = {
"Code":True,
"Date":True,
"CB": False,
"Open": True,
"High": True,
"Low": True,
"Close": True,
"Volumn": True
}
filepath = main()
get_feature_data(filepath, "big5", **choose)
``` |
{
"source": "18970738669/opencv_demo",
"score": 3
} |
#### File: opencv_test1/demo/histogram_demo.py
```python
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import pytesseract
def find_waves(threshold, histogram):
up_point = -1 # 上升点
is_peak = False
if histogram[0] > threshold:
up_point = 0
is_peak = True
wave_peaks = []
for i, x in enumerate(histogram):
if is_peak and x < threshold:
if i - up_point > 2:
is_peak = False
wave_peaks.append((up_point, i))
elif not is_peak and x >= threshold:
is_peak = True
up_point = i
if is_peak and up_point != -1 and i - up_point > 4:
wave_peaks.append((up_point, i))
return wave_peaks
color = "blue"
card_img = cv2.imread("/home/python/Desktop/opencv_test/pic/card_img.jpg", 1)
gray_img = cv2.cvtColor(card_img, cv2.COLOR_BGR2GRAY)
ret, gray_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# print("gray_img{}".format(gray_img))
x_histogram = np.sum(gray_img, axis=1)
x_min = np.min(x_histogram)
# print(x_histogram, x_min)
x_average = np.sum(x_histogram) / x_histogram.shape[0]
x_threshold = (x_min + x_average) / 2
wave_peaks = find_waves(x_threshold, x_histogram)
print("wavex:{}".format(wave_peaks))
if len(wave_peaks) == 0:
print("peak less 0:")
# 认为水平方向,最大的波峰为车牌区域
wave = max(wave_peaks, key=lambda x: x[1] - x[0])
gary_img = gray_img[wave[0]:wave[1]]
row_num, col_num = gray_img.shape[:2]
# 去掉车牌上下边缘1个像素,避免白边影响阈值判断
gray_img = gray_img[1:row_num - 1]
y_histogram = np.sum(gray_img, axis=0)
# print(y_histogram)
y_min = np.min(y_histogram)
y_average = np.sum(y_histogram) / y_histogram.shape[0]
y_threshold = (y_min + y_average) / 5 # U和0要求阈值偏小,否则U和0会被分成两半
wave_peaks = find_waves(y_threshold, y_histogram)
print("wavey:{}".format(wave_peaks))
# for wave in wave_peaks:
# cv2.line(card_img, pt1=(wave[0], 5), pt2=(wave[1], 5), color=(0, 0, 255), thickness=2)
# 车牌字符数应大于6
if len(wave_peaks) <= 6:
print("peak less 1:", len(wave_peaks))
wave = max(wave_peaks, key=lambda x: x[1] - x[0])
max_wave_dis = wave[1] - wave[0]
# 判断是否是左侧车牌边缘
if wave_peaks[0][1] - wave_peaks[0][0] < max_wave_dis / 3 and wave_peaks[0][0] == 0:
wave_peaks.pop(0)
# plt.imshow(x_gary_img), plt.title("x_gary_img")
# plt.show()
# cv2.imshow("x_gary_img", gary_img)
# cv2.imshow("img", gray_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
```
#### File: opencv_test1/demo/pinghuachuli.py
```python
import cv2
import numpy as np
def qu_zao(img):
kernel = np.ones((5, 5), np.float32) / 25
res = cv2.filter2D(img, -1, kernel)
# res2 = cv2.GaussianBlur(img, (5, 5), 0)
# cv2.imshow('res', res)
# cv2.imshow('res2', res2)
# cv2.waitKey()
return res
img = cv2.imread('/home/python/Desktop/opencv_test/pic/car4.jpg', 0)
kernel = np.ones((5, 5), np.float32) / 25
res = cv2.filter2D(img, -1, kernel)
res1 = cv2.blur(img, (5, 5))
res2 = cv2.GaussianBlur(img, (5, 5), 0)
res3 = cv2.medianBlur(img, 5)
res4 = cv2.bilateralFilter(img, 9, 75, 75)
cv2.imshow('pinghualvbo', res)
cv2.imshow('junzhilvbo', res1)
cv2.imshow('gaosilvbo', res2)
cv2.imshow('zhongzhilvbo', res3)
cv2.imshow('shuangbianlvbo', res4)
cv2.imshow('img', img)
cv2.waitKey()
```
#### File: opencv_demo/opencv_test1/dingwei.py
```python
import cv2
import numpy as np
SZ = 20
def point_limit(point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
def accurate_place(card_img_hsv, limit1, limit2, color):
row_num, col_num = card_img_hsv.shape[:2]
xl = col_num
xr = 0
yh = 0
yl = row_num
row_num_limit = 21
col_num_limit = col_num * 0.8 if color != "green" else col_num * 0.5 # 绿色有渐变
for i in range(row_num):
count = 0
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > col_num_limit:
if yl > i:
yl = i
if yh < i:
yh = i
for j in range(col_num):
count = 0
for i in range(row_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > row_num - row_num_limit:
if xl > j:
xl = j
if xr < j:
xr = j
return xl, xr, yh, yl
def dingwei(car_pic):
MAX_WIDTH = 2000
# 先读取图片
img = cv2.imread(car_pic, 1)
# 取得照片的宽高
pic_hight, pic_width = img.shape[0:2]
print(pic_hight, pic_width)
# 对照片大小进行调整
if pic_width > MAX_WIDTH:
resize_rate = MAX_WIDTH / pic_width
img = cv2.resize(img, (MAX_WIDTH, int(pic_hight * resize_rate)), interpolation=cv2.INTER_AREA)
# 开运算卷积核
# img = cv2.resize(img, (600, 450), interpolation=cv2.INTER_AREA)
oldimg = img
kernel = np.ones((20, 20), np.uint8)
# 高斯滤波去噪
img = cv2.GaussianBlur(img, (3, 3), 0)
# 开运算去噪
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
# 增加白点数用于精准定位车牌位置
img_opening = cv2.addWeighted(img, 1, opening, -1, 0)
# 利用阀值把图片转换成二进制图片
ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_OTSU)
# 找出图片边缘
img_edge = cv2.Canny(img_thresh, 100, 200)
# 使用开运算和闭运算让图像边缘成为一个整体img
kernel = np.ones((4, 22), np.uint8)
# kernel = np.ones((5, 12), np.uint8)
# kernel = np.ones((4, 85), np.uint8)
img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)
# 找出图片边缘为矩形的轮廓(车牌就在这些轮廓图中)
image, contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 1900]
cv2.drawContours(img, contours, 0, (0, 0, 255), 3)
# print(len(contours))
# cv2.drawContours(img, contours, 1, (0, 0, 255), 3)
# cv2.drawContours(img, contours, 2, (0, 0, 255), 3)
# rect = cv2.minAreaRect(contours[0])
# print("alpha=%d" % rect[2])
# box = cv2.boxPoints(rect)
# box = np.int0(box)opencv_test1/dingwei.py:272
car_contours = []
# oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)
# cv2.imshow("oldimg", oldimg)opencv_test1/dingwei.py:100
# cv2.imshow("edge4", oldimg)
# cv2.imshow("opening", opening)
# cv2.imshow("img_edge1", img_edge1)
# cv2.imshow("img_thresh", img_thresh)
# cv2.imshow("img_edge2.jpg", img_edge2)
# cv2.imshow("img.jpg", img)
# cv2.waitKey(0)
for cnt in contours:
rect = cv2.minAreaRect(cnt) # 返回值元组((最小外接矩形的中心坐标),(宽,高),旋转角度)-----> ((x, y), (w, h), θ )
area_width, area_height = rect[1]
if area_width < area_height:
area_width, area_height = area_height, area_width
wh_ratio = area_width / area_height
# print(wh_ratio)
# 要求矩形区域长宽比在2到6之间,2到6是车牌的长宽比,其余的矩形排除
if wh_ratio > 2 and wh_ratio < 6:
car_contours.append(rect)
box = cv2.boxPoints(rect)
box = np.int0(box)
# print(rect)
print(len(car_contours))
# print("cnt={}".format(cnt))
card_imgs=[]
for rect in car_contours:
if rect[2] > -1 and rect[2] < 1:
angle = 1
else:
angle = rect[2]
rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle) # 扩大范围,避免车牌边缘被排除
box = cv2.boxPoints(rect)
# print("alpha={}".format(rect[2]))
heigth_point = right_point = [0, 0]
# left_point = low_point = rect[1]
left_point = low_point = [pic_width, pic_hight]
# print("pic_width:{}, pic_hight{}".format(pic_width, pic_hight))
point_set = []
for point in box:
# print(point)
# print(left_point)
if left_point[0] > point[0]:
left_point = point
if low_point[1] > point[1]:
low_point = point
if heigth_point[1] < point[1]:
heigth_point = point
if right_point[0] < point[0]:
right_point = point
if left_point[1] <= right_point[1]: # 正角度
new_right_point = [right_point[0], heigth_point[1]]
pts2 = np.float32([left_point, heigth_point, new_right_point]) # 字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(new_right_point)
point_limit(heigth_point)
point_limit(left_point)
card_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
if card_img.shape[:2][0] <= 10 or card_img.shape[:2][1] <= 10:
print("hight or width too low")
continue
card_imgs.append(card_img)
# cv2.imshow("card", card_img)
# cv2.waitKey(0)
elif left_point[1] > right_point[1]: # 负角度
new_left_point = [left_point[0], heigth_point[1]]
pts2 = np.float32([new_left_point, heigth_point, right_point]) # 字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(right_point)
point_limit(heigth_point)
point_limit(new_left_point)
card_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
# card_img1 = oldimg[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
if card_img.shape[:2][0] <= 10 or card_img.shape[:2][1] <= 10:
print("hight or width too low")
continue
card_imgs.append(card_img)
# cv2.imshow("card_img1", card_img)
# cv2.waitKey(0)
# 开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌
colors = []
for card_index, card_img in enumerate(card_imgs):
green = yello = blue = black = white = 0
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
if card_img_hsv is None:
continue
row_num, col_num = card_img_hsv.shape[:2]
# print("row_num:{}, col_num:{}".format(row_num, col_num))
card_img_count = row_num * col_num
# 确定车牌颜色
for i in range(row_num):
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if 11 < H <= 34 and S > 34:
yello += 1
elif 35 < H <= 99 and S > 34:
green += 1
elif 99 < H <= 124 and S > 34:
blue += 1
if 0 < H < 180 and 0 < S < 255 and 0 < V < 46:
black += 1
elif 0 < H < 180 and 0 < S < 43 and 221 < V < 225:
white += 1
color = "no"
# 利用颜色点数来对轮廓进行排除
limit1 = limit2 = 0
if yello * 2 >= card_img_count:
color = "yello"
limit1 = 11
limit2 = 34
elif green * 2 >= card_img_count:
color = "green"
limit1 = 35
limit2 = 99
elif blue * 3 >= card_img_count:
color = "blue"
limit1 = 100
limit2 = 124
elif black + white >= card_img_count * 0.7: # TODO
color = "bw"
print(color)
colors.append(color)
print(blue, green, yello, black, white, card_img_count)
if limit1 == 0:
continue
if color == "yello":
continue
# cv2.imshow("color", card_img)
# cv2.imshow("img", img)
# cv2.waitKey(0)
xl, xr, yh, yl = accurate_place(card_img_hsv, limit1, limit2, color)
print("xl:{}, xr:{}, yh:{}, yl:{}".format(xl, xr, yh, yl))
# print("row_num:{}, col_num:{}".format(row_num, col_num))
if yl == yh and xl == xr:
continue
need_accurate = False
if yl >= yh:
yl = 0
yh = row_num
need_accurate = True
if xl >= xr:
xl = 0
xr = col_num
need_accurate = True
if abs(yh-yl) < row_num*0.7:
yl = 0
yh = row_num
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh - yl) // 4 else card_img[
yl - (
yh - yl) // 4:yh,
xl:xr]
print(xl, xr, yh, yl)
if need_accurate:
card_img = card_imgs[card_index]
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
xl, xr, yh, yl = accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
continue
if yl >= yh:
yl = 0
yh = row_num
if xl >= xr:
xl = 0
xr = col_num
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh - yl) // 4 else card_img[
yl - (
yh - yl) // 4:yh,
xl:xr]
print(xl, xr, yh, yl)
# print(len(card_imgs))
if limit1 != 0:
cv2.imshow("card_img", card_imgs[card_index])
print(xl, xr, yh, yl)
card1 = cv2.resize(card_imgs[card_index], (720, 180))
cv2.imwrite("card_img_104_{}.jpg".format(card_index), card1)
print(limit1)
# cv2.imshow("oldimg{}".format(card_index), oldimg)
else:
print("未捕捉到车牌")
cv2.waitKey(0)
# print('len(contours)', len(contours))
# cv2.imshow("kaiyunsuan", opening)
# cv2.imshow("yuantu", img)
# cv2.imshow("img_opening", img_opening)
# cv2.imshow("img_thresh", img_thresh)
# cv2.imshow("img_edge2", img_edge2)
# cv2.imshow("img", img)
# cv2.imshow("box", box)
# cv2.imwrite("img_edge2.jpg", img_edge2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if __name__ == '__main__':
car_pic = "/home/python/Desktop/opencv_test/opencv_demo/pic/car21jinK88888.jpg"
dingwei(car_pic)
```
#### File: opencv_demo/opencv_test1/svm_demo_chi.py
```python
import datetime
import cv2
import numpy as np
from numpy.linalg import norm
import os
SZ = 20 # 训练图片长宽
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11'] / m['mu02']
M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
class StatModel(object):
def load(self, fn):
self.model = self.model.load(fn)
def save(self, fn):
self.model.save(fn)
# 利用OpenCV中的SVM进行机器学习
class SVM(StatModel):
def __init__(self, C=1, gamma=0.5):
self.model = cv2.ml.SVM_create() # 创建SVM model
# 属性设置
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF) # 径向基核函数((Radial Basis Function),比较好的选择,gamma>0;
self.model.setType(cv2.ml.SVM_C_SVC)
# 训练svm
def train(self, samples, responses): # SVM的训练函数
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
# 字符识别
def predict(self, samples):
r = self.model.predict(samples)
return r[1].ravel()
# 来自opencv的sample,用于svm训练
def hog(digits):
samples = []
'''
step1.先计算图像 X 方向和 Y 方向的 Sobel 导数。
step2.然后计算得到每个像素的梯度角度angle和梯度大小magnitude。
step3.把这个梯度的角度转换成 0至16 之间的整数。
step4.将图像分为 4 个小的方块,对每一个小方块计算它们梯度角度的直方图(16 个 bin),使用梯度的大小做权重。
这样每一个小方块都会得到一个含有 16 个值的向量。
4 个小方块的 4 个向量就组成了这个图像的特征向量(包含 64 个值)。
这就是我们要训练数据的特征向量。
'''
for img in digits:
# plt.subplot(221)
# plt.imshow(img,'gray')
# step1.计算图像的 X 方向和 Y 方向的 Sobel 导数
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy) # step2.笛卡尔坐标(直角/斜角坐标)转换为极坐标, → magnitude, angle
bin_n = 16
bin = np.int32(bin_n * ang / (2 * np.pi)) # step3. quantizing binvalues in (0...16)。2π就是360度。
# step4. Divide to 4 sub-squares
bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]
mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
# zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。
# a = [1,2,3];b = [4,5,6];zipped = zip(a,b) 结果[(1, 4), (2, 5), (3, 6)]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist is a 64 bit vector
# plt.subplot(223)
# plt.plot(hist)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
# plt.subplot(224)
# plt.plot(hist)
# plt.show()
samples.append(hist)
return np.float32(samples)
def train_svm():
chinesemodel = SVM(C=1, gamma=0.5)
if os.path.exists("svmchi.dat"):
chinesemodel.load("svmchi.dat")
else:
chars_train = []
chars_label = []
``` |
{
"source": "18AdrianoH/SimSiam",
"score": 3
} |
#### File: SimSiam/augmentations/__init__.py
```python
from .simsiam_aug import SimSiamTransform
from .eval_aug import Transform_single
from .byol_aug import BYOL_transform
from .simclr_aug import SimCLRTransform
from .adriano_aug import MNIST_Transform
def get_aug(name='simsiam', image_size=224, train=True, train_classifier=None):
if train==True:
if name == 'simsiam_mnist':
augmentation = MNIST_Transform(sol_prob=0.5, sol_mode="rand", single=False)
elif name == 'simsiam':
augmentation = SimSiamTransform(image_size)
elif name == 'byol':
augmentation = BYOL_transform(image_size)
elif name == 'simclr':
augmentation = SimCLRTransform(image_size)
else:
raise NotImplementedError("Got name {}".format(name))
elif train==False:
if train_classifier is None:
raise Exception
# I'm starting to think they really didn't want anyone building on their code
if name == 'simsiam_mnist':
augmentation = MNIST_Transform(sol_prob=0.5, sol_mode="rand", single=True)
else:
augmentation = Transform_single(image_size, train=train_classifier)
else:
raise Exception
return augmentation
``` |
{
"source": "18alantom/fitloop",
"score": 3
} |
#### File: fitloop/defaults/configure_optimizer.py
```python
import numpy as np
from copy import deepcopy
from fitloop.helpers.constants import Module, Optimizer, Optional, Union, List
from fitloop.helpers.utils import unf_last_n, get_layers, get_lrs
def configure_optimizer(state,
lr:Optional[Union[List[float], slice, float, np.ndarray]]=None,
unlock:Optional[Union[bool, int]]=None):
"""
Can be used to set variable lrs and unlock and lock layers for training.
Note: Works properly only when initial optimizer has a single param_group,
and for single optimizers and lr_schedulers.
----
PARAMETERS:
state : The FitLoop object whose optimizer is to be configured, must
pass this if calling externally.
lr : If `lr` is a `slice` then spread the learning rates exponentially
over all the unlocked layers of the neural network.
unlock : If `unlock` is `True` unlock all the layers
else if `unlock` is `int`, unlock the last [`unlock`] layers.
"""
model = state.model
optimizer = state.optimizer
# Create a copy of the param_groups without params
_param_groups = optimizer.param_groups
param_groups = []
for param_group in _param_groups:
default = deepcopy({**param_group})
del default['params']
param_groups.append(default)
del _param_groups
if lr is None and unlock is None:
"""
lr is None : configure_optimizer called internally.
no changes to lr for all layers.
"""
# Get all parametrized layers
optimizer.param_groups.clear()
layers = get_layers(model)
for i,layer in enumerate(layers):
if len(param_groups) < (i + 1):
defaults = param_groups[-1]
else:
defaults = param_groups[i]
pg = {
'params': layer.parameters(),
**defaults
}
optimizer.add_param_group(pg)
else:
"""
This block will be reached only through an external call.
"""
# Unlock the last n layers only.
if unlock is not None:
if unlock is True:
unf_last_n(model)
else:
unf_last_n(model, n=unlock)
# Set learning rate for the last n unlocked layers.
if lr is not None:
# Get learning rates.
layers = [*get_layers(model)]
l = len(layers)
if isinstance(lr, slice):
lr = get_lrs(lr, count=len([*get_layers(model, True)]))
if isinstance(lr, (list,np.ndarray)):
diff = len(lr) - l
if diff < 0:
lrs = [*([0]*-diff),*lr]
elif diff > 0:
lrs = lr[diff:]
else:
lrs = lr
else:
lrs = [lr] * l
optimizer.param_groups.clear()
# Set rates to all the layers.
for i,(lr, layer) in enumerate(zip(lrs, layers)):
if len(param_groups) < (i + 1):
defaults = param_groups[-1]
else:
defaults = param_groups[i]
pg = {
"params":layer.parameters(),
**defaults
}
pg['lr'] = lr
pg['initial_lr'] = lr
optimizer.add_param_group(pg)
if state.lr_scheduler is not None:
state.lr_scheduler.optimizer = optimizer
```
#### File: fitloop/helpers/state.py
```python
import math
import torch
from .constants import STAGES
from .constants import Any, Tensor, DataLoader
from .constants import Tuple, Dict, List
from .constants import Optional, Union
class LoopState:
"""
Maintains train/valid/test loop state for a single run of
a certain number of epochs, does not used to preserve state
between runs.
"""
_stages = STAGES
_batch_step, _epoch_start, _epoch_end = _stages
def __init__(self, phase:str, floop:object, no_cast:bool,
no_float:bool, is_train:bool, is_test:bool,
dl:DataLoader
):
"""
phase : phase name 'train', 'valid' or 'test'
floop : the calling FitLoop object
"""
self.__batch = ()
self.__floop = floop
self._no_cast = no_cast
self._no_float = no_float
self.phase = phase
self.batch_num = 0
self.epoch_num = 0
self.metrics = {s:{} for s in self._stages}
self.is_train = is_train
self.is_test = is_test
# For easy access
bs = dl.batch_size
dr = dl.drop_last
sz = len(dl.dataset)
bt = sz / bs
# Gives dataset size and batch count
self.size = sz
self.batches = math.floor(bt) if dr else math.ceil(bt)
self.batch_size = 0
def __getattr__(self, name:str) -> Any:
# To get attributes from the FitLoop object
# for use in the stage functions.
return getattr(self.__floop, name)
def __getitem__(self, metric_name:str):
# To get the metrics stored in the batch step stage
metric_value = self.metrics[self._batch_step][metric_name]
try:
return torch.tensor(metric_value).float()
except:
return metric_value
"""
Getter and setter for the current batch
"""
@property
def batch(self) -> Tuple[Tensor,...]:
if self._no_cast:
return self.__batch
return (
d.to(device=self.device,dtype=self.dtype)
if d.is_floating_point()
else d.to(device=self.device,dtype=torch.long)
for d in self.__batch
)
@batch.setter
def batch(self, current_batch:Tuple[Tensor,...]) -> None:
self.__batch = current_batch
"""
Functions to append rdict values to self.metrics
"""
def _append(self, rdict:Dict[str, float], stage:str) -> None:
# Append metrics to the specific stage.
for key in rdict:
if key not in self.metrics[stage]:
self.metrics[stage][key] = []
self.metrics[stage][key].append(rdict[key])
def _append_batch_step(self, rdict:Dict[str, float]) -> None:
# Called after batch step rdict is returned
self._append(rdict, self._batch_step)
def _append_epoch_start(self, rdict:Dict[str, float]) -> None:
# Called before epoch start
self._append(rdict, self._epoch_start)
def _append_epoch_end(self, rdict:Dict[str, float]) -> None:
# Called after epoch end step rdict is returned
self._append(rdict, self._epoch_end)
"""
Functions to clear rdict values from self.metrics
"""
def _clear(self, stage:str) -> None:
# Clear the batch metrics at the end of the batch.
for mlist in self.metrics[stage]:
self.metrics[stage][mlist].clear()
def _clear_batch_step(self) -> None:
# Called before epoch start
self._clear(self._batch_step)
def _clear_epoch_start(self) -> None:
# Called ??
self._clear(self._epoch_start)
def _clear_epoch_end(self) -> None:
# Called after loop end
self._clear(self._epoch_end)
"""
State updates before epoch start and batch step stages
"""
def _pre_epoch_start_update(self, epoch_num:int) -> None:
self._clear_batch_step()
self.batch_num = 0
self.epoch_num = epoch_num
def _pre_batch_step_update(self, current_batch):
self.batch_size = current_batch[0].size(0)
self.batch_num += 1
self.batch = current_batch
"""
Functions to get various metrics at different stages
"""
def _get_epoch_metric(self, criteria:str) -> float:
# Last added metric that is to be used as a model
# selection criteria
metric = self.metrics[self._epoch_end][criteria][-1]
if self._no_float:
return metric
else:
try:
return float(metric)
except:
return metric
def _get_epoch_metrics(self,
display_metrics:Optional[Union[str,List[str]]]=None
) -> Dict[str,float]:
# Return the last saved epoch metrics
if isinstance(display_metrics, str):
return {display_metrics:self._get_epoch_metric(display_metrics)}
elif isinstance(display_metrics, list):
return {
metric:self._get_epoch_metric(metric)
for metric in display_metrics
}
else:
return {
metric: self._get_epoch_metric(metric)
for metric in self.metrics[self._epoch_end]
}
``` |
{
"source": "18alantom/flask_test_movr",
"score": 2
} |
#### File: flask_test_movr/server/app.py
```python
import os
# import json
from flask import Flask, request, jsonify
from dotenv import load_dotenv
from .db import DBConnectionHandler
from .helpers import parse_report, strify, get_id
load_dotenv()
app = Flask(__name__)
db = DBConnectionHandler(
user=os.getenv("DB_USER"),
password=os.getenv("DB_PASS"),
host=os.getenv("DB_HOST"),
database=os.getenv("DB_NAME")
)
# Used for getting data
data_responses = {
"products": (lambda: db.select("product")),
"locations": (lambda: db.select("location")),
"movements": (lambda: strify(db.movement())),
"report": (lambda: parse_report(db.report()))
}
@app.after_request
def allow_cors_if_dev(res):
if os.getenv("FLASK_ENV") == "development":
res.headers["Access-Control-Allow-Origin"] = os.getenv("DEV_ORIGIN")
res.headers["Access-Control-Allow-Methods"] = "GET, POST, PUT, DELETE, PATCH, OPTIONS"
res.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
return res
# @app.route("/test", methods=["GET"])
# def test_get():
# return "Hello, World!"
# @app.route("/test", methods=["POST"])
# def test_post():
# json_data = request.get_json()
# if json_data is None:
# res = {"got": "no json data"}
# else:
# json_data = json.loads(json_data)
# res = {"got": json_data}
# return jsonify(res)
@app.route("/data", methods=["POST"])
def get_data():
# For receiving row data.
data_names = request.get_json()
response = {name: data_responses[name]() if name in data_responses else [False, "invalid"]
for name in data_names}
return jsonify(response)
@app.route("/data", methods=["PUT"])
def put_row():
# Route used only for inserting new products and locations.
json_data = request.get_json()
suc, msg = db.insert(json_data["table"], vals=[
get_id(), json_data["value"]])
return jsonify([suc, msg])
@app.route("/data", methods=["PATCH"])
def update_row():
# Route used only for changing product and location names.
json_data = request.get_json()
suc, msg = db.update(json_data["table"],
json_data["value"], json_data["id"])
return jsonify([suc, msg])
@app.route("/data", methods=["DELETE"])
def del_row():
# Route used only for deleting product and location rows.
json_data = request.get_json()
suc, msg = db.delete(json_data["table"], json_data["id"])
return jsonify([suc, msg])
@app.route("/move", methods=["PUT"])
def move_products():
json_data = request.get_json()
suc, msg = db.move(product_id=json_data["product_id"], from_location=json_data["from"],
to_location=json_data["to"], qty=int(json_data["quantity"]))
return jsonify([suc, msg])
``` |
{
"source": "18alantom/ideal-fiesta",
"score": 3
} |
#### File: doctype/journal_entry/journal_entry.py.74943a06baad3cbd3ecd065ba638cc37.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class JournalEntry(Document):
def validate(self):
self.validate_journal_items()
def on_submit(self):
self.add_ledger_entries() # General Ledger Entry
def on_cancel(self):
if self.docstatus == 0:
return
self.cancel_ledger_entries()
def validate_journal_items(self):
self.validate_accounting_item_count()
self.validate_credit_debit_values()
self.validate_credit_debit_difference()
def validate_accounting_item_count(self):
item_count = len(self.journal_items)
if item_count != 2:
frappe.throw(
"There should be 2 accounting entries, \n"
"This is double entry accounting, "
f"not {item_count} entry accounting."
)
def validate_credit_debit_values(self):
for item in self.journal_items:
is_valid = (item.credit_in_account == 0 and item.debit_in_account > 0) or (
item.credit_in_account > 0 and item.debit_in_account == 0
)
if not is_valid:
frappe.throw("Either credit or debit of an entry should be 0.")
def validate_credit_debit_difference(self):
credit = 0
debit = 0
for item in self.journal_items:
credit += item.credit_in_account
debit += item.debit_in_account
diff = credit - debit
if abs(diff) > 0:
frappe.throw("Credit, Debit difference should be 0.")
def get_credit_item(self):
return self.get_credit_or_debit_item("credit_in_account")
def get_debit_item(self):
return self.get_credit_or_debit_item("debit_in_account")
def get_credit_or_debit_item(self, account_type):
for item in self.journal_items:
if getattr(item, account_type) > 0:
return item
def get_ledger_item(self, account, against_account, credit, debit, is_for_cancel):
return frappe.get_doc(
doctype="GL Entry",
posting_date=self.posting_date,
account=account,
against_account=against_account,
credit=credit,
debit=debit,
voucher_type=f"{'Cancel' if is_for_cancel else ''}Journal Entry",
company_name=self.company,
voucher_number=self.name,
)
def remove_ledger_entries(self):
# Journal Entries
credit = self.get_credit_item()
debit = self.get_debit_item()
# Ledger Entries
credit_item = self.get_ledger_item(
credit.account,
debit.account,
credit.debit_in_account,
credit.credit_in_account,
is_for_cancel=True,
)
debit_item = self.get_ledger_item(
debit.account,
credit.account,
debit.debit_in_account,
debit.credit_in_account,
is_for_cancel=True,
)
insert_ledger_entries(credit_item, debit_item)
def add_ledger_entries(self):
# Journal Entries
credit = self.get_credit_item()
debit = self.get_debit_item()
# Ledger Entries
credit_item = self.get_ledger_item(
credit.account,
debit.account,
credit.credit_in_account,
credit.debit_in_account,
)
debit_item = self.get_ledger_item(
debit.account,
credit.account,
debit.credit_in_account,
debit.debit_in_account,
)
insert_ledger_entries(credit_item, debit_item)
def insert_ledger_entries(credit_item, debit_item):
# Insert Ledger Items
for gl_entry in [credit_item, debit_item]:
gl_entry.docstatus = 1
gl_entry.insert(ignore_permissions=True, ignore_if_duplicate=True)
```
#### File: ideal-fiesta/accounting/test_helpers.py
```python
import frappe
from copy import deepcopy
def get_doc_names(module):
temp = frappe.db.get_all("DocType", filters=dict(module=module))
return [d["name"] for d in temp]
def delete_all_docs(name):
try:
records = frappe.db.get_all(name)
for record in records:
frappe.db.delete(name, dict(name=record["name"]))
except frappe.db.TableMissingError:
pass
def get_autonamed_items(items):
items = deepcopy(items)
for item_entry in items:
item_entry.update({"item": get_item_autoname(item_entry)})
return items
def check_if_doc_exists(doctype, doc_dict):
return len(frappe.db.get_all(doctype, filters=doc_dict)) > 0
def get_item_autoname(item_entry):
record = frappe.db.get_all("Item", filters={"item_name": item_entry["item"]})
return record[0]["name"]
```
#### File: accounting/www/home.py
```python
import frappe
def get_context(context):
context.company = frappe.get_list(
"Company", fields=["company_name", "description"]
)[0]
``` |
{
"source": "18alantom/the_genius_batch",
"score": 3
} |
#### File: 18alantom/the_genius_batch/fixer.py
```python
import re
import numpy as np
MAX_TOTAL_MARKS_DICT = {
'2C00255': 600,
'2C00455': 600,
'2C00345': 600,
'3A00145': 600,
'3L00115': 400,
}
NENG_NUM_COLS = [
('1_100', '2_100', '3_100', '4_100', 'total_marks'),
('total_marks', 'ac', 'acg', 'gpa'),
('ac', 'acg', 'gpa'),
]
NENG_KIDX = [
('3L00115'),
('2C00255','2C00455','2C00345','3A00145'),
('4E00143','2M00733','1P00137'),
]
def get_fixed_and_auged_df(dcourse):
"""
Function that fixes the numeric values and
maybe adds a total and percentage column.
"""
if check_is_eng(dcourse):
return get_fixed_and_auged_df_eng(dcourse)
else:
return get_fixed_and_auged_df_non_eng(dcourse)
def check_is_eng(dcourse):
return dcourse.meta['degree'] == 'B.E'
def get_fixed_and_auged_df_eng(dcourse):
df = dcourse.df.copy()
num_cols, mark_cols, max_tot = get_cols_and_tot(dcourse)
df = clean_numerics(df, num_cols)
if 'total_marks' in df.columns:
df = fix_tot_col(df)
df = df.fillna(0)
if 'total_marks' not in df.columns:
df = append_tot_col(df, mark_cols)
df = append_percent_col(df, max_tot)
return df
def get_fixed_and_auged_df_non_eng(dcourse):
df = dcourse.df.copy()
cols, max_tot = get_cols_and_tot_neng(dcourse.meta['code'])
df = clean_numerics(df, cols)
df = df.fillna(0)
if max_tot is not None:
append_percent_col(df, max_tot)
return df
def get_cols_and_tot_neng(code):
idx = [i for i, c in enumerate(NENG_KIDX) if code in c][0]
if code in MAX_TOTAL_MARKS_DICT:
total = MAX_TOTAL_MARKS_DICT[code]
else:
total = None
return NENG_NUM_COLS[idx], total
def get_cols_and_tot(dcourse):
if hasattr(dcourse, 'subj_break_down'):
sb = dcourse.subj_break_down
return (
get_all_num_cols_from_sb(sb),
*get_mark_cols_and_max_tot_from_sb(sb)
)
else:
raise NotImplementedError("yo wtf?!")
def clean_numerics(df, num_cols):
df = df.apply(fix_numeric_values, axis=1, args=(num_cols, ))
return df
def append_tot_col(df, marks_cols):
marks = df[marks_cols]
df['total_marks'] = marks.sum(axis=1)
return df
def append_percent_col(df, max_tot):
df['percent'] = df['total_marks'] / max_tot
return df
def fix_tot_col(df):
return df.apply(get_fixed_tm, axis=1)
def get_fixed_tm(row):
tm = row['total_marks']
if "NA" in tm:
tm = np.nan
elif "/" in tm:
tm = tm.split("/")[0]
tm = tm.split("@")[0]
tm = float(tm)
else:
tm = float(tm)
row['total_marks'] = tm
return row
def fix_numeric_values(row, num_cols):
for c in num_cols:
row[c] = convert_numeric(row[c])
return row
ex_num = lambda v: re.findall("(\d+(?:\.\d+)?)\w?", v)
def convert_numeric(v):
if v in ['--', '---', 'A', 'AA', 'ABS', 'EX']:
return np.nan
elif isinstance(v, str):
return float(ex_num(v)[0])
else:
return float(v)
def get_mark_tups_from_sb(sb):
return [(k,v) for k, i in sb.items() for v in i[0] if re.findall("^\d+/\d+$", v)]
def get_all_num_cols_from_sb(sb):
return [f"{k}_{v}" for k, i in sb.items() for v in i[0] if not re.findall("^G$", v)] + ['ac', 'acg', 'gpa']
def get_mark_cols_from_mt(mt):
return [f"{k}_{v}" for k,v in mt]
def get_max_tot_from_mt(mt):
return sum([int(v.split("/")[0]) for _, v in mt])
def get_mark_cols_and_max_tot_from_sb(sb):
# From subject break down
mt = get_mark_tups_from_sb(sb)
mc = get_mark_cols_from_mt(mt)
mx = get_max_tot_from_mt(mt)
return mc, mx
``` |
{
"source": "18argon/download-gitignore",
"score": 4
} |
#### File: download-gitignore/download_gitignore/github_api.py
```python
import requests
import urllib.request
from requests.exceptions import HTTPError
import click
class github_api:
def get_list_of_gitignore_files(self):
response = requests.get("https://api.github.com/repos/github/gitignore/contents").json()
list_of_gitignore_files = [obj for obj in response
if obj["type"] == "file" and ".gitignore" in obj["name"]]
return list_of_gitignore_files
def download_gitignore_file(self, language):
click.echo("Trying to download .gitignore file...\n")
try:
list_of_gitignore_files = self.get_list_of_gitignore_files()
# get a list with all (the only) files that are named <language>.gitignore in the repo
language_gitignore_object = [obj for obj in list_of_gitignore_files
if obj["name"].lower() == language.lower() + ".gitignore"]
# if this list is not empty, download the file
if len(language_gitignore_object) != 0:
language_gitignore_object = language_gitignore_object[0]
url = language_gitignore_object["download_url"]
click.echo("Downloading " + language_gitignore_object["name"] + " from " + url)
urllib.request.urlretrieve(url, ".gitignore") # download file
click.echo("Download successful")
# else, the user typed the language wrong
else:
click.echo(".gitignore not found for " + language + ". Use download-gitignore list to list all the available files.")
except HTTPError as http_err:
click.echo("HTTP error occurred: " + http_err)
except Exception as err:
click.echo("Other error occurred: " + err)
click.echo("\nYou may want to check your internet connection.")
``` |
{
"source": "18ariana/accreditation_system",
"score": 3
} |
#### File: accreditation_system/utils/notify.py
```python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from .config import *
def send_email(receivers, token):
sender = CONFIG['NOTIFY_EMAIL']
connection = smtplib.SMTP_SSL(CONFIG['NOTIFY_SMTP_HOST'], CONFIG['NOTIFY_SMTP_PORT'])
connection.login(sender, CONFIG['NOTIFY_PASSWORD'])
for receiver in receivers:
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receiver['user_email']
msg['Subject'] = 'Анкета от АОРПО'
msg_body = 'Здравствуйте! \n Пройдите, пожалуйста, анкету для АОРПО: {}:{}/quiz/{}/{}'.format(CONFIG['HOSTNAME'], \
CONFIG['PORT'], token, receiver['user_id'])
msg.attach(MIMEText(msg_body, 'plain'))
connection.send_message(msg)
connection.quit()
```
#### File: accreditation_system/utils/parse_xml.py
```python
from bs4 import BeautifulSoup
import io
def get_content_by_tag(page, tag):
result = page.findAll(tag)
return result
def get_text_by_tag(page, tag):
result = page.findAll(tag)
result = [res.get_text().encode('utf-8', 'ignore') for res in result] \
if len(result) > 1 else result[0].get_text().encode('utf-8', 'ignore')
return result
def get_generalized_work_functions(page):
tag = 'generalizedworkfunction'
content = get_content_by_tag(page, tag)
result = [
{
'codeOTF': get_text_by_tag(content[i], 'codeotf'),
'nameOTF': get_text_by_tag(content[i], 'nameotf'),
'registrationNumber': get_text_by_tag(page, 'registrationnumber'),
'levelOfQualification': get_text_by_tag(content[i], 'levelofqualification'),
'possibleJobTitle': get_text_by_tag(content[i], 'possiblejobtitle'),
'particularWorkFunctions': get_particular_work_function(content[i])
}
for i in range(len(content))
]
return result
def get_particular_work_function(page):
tag = 'particularworkfunction'
content = get_content_by_tag(page, tag)
result = [
{
'codeTF': get_text_by_tag(content[i], 'codetf'),
'nameTF': get_text_by_tag(content[i], 'nametf'),
'laborActions': get_text_by_tag(content[i], 'laboraction'),
'requiredSkills': get_text_by_tag(content[i], 'requiredskill'),
'necessaryKnowledges': get_text_by_tag(content[i], 'necessaryknowledge'),
# 'otherCharacteristics': get_text_by_tag(content[i], 'othercharacteristic')
}
for i in range(len(content))
]
return result
def parse_xml(filename):
xml = io.open(filename, mode="r", encoding="utf-8")
xml = xml.read()
page = BeautifulSoup(xml, 'lxml')
generalized_work_functions = get_generalized_work_functions(page)
return generalized_work_functions
``` |
{
"source": "18ariana/analytics_backend",
"score": 2
} |
#### File: workprogramsapp/expertise/models.py
```python
from django.db import models
from analytics_project import settings
from datetime import datetime
class UserExpertise(models.Model):
STUFF_STATUS_CHOICES = [
('AU', 'Автор РПД'),
('EX', 'Эксперт'),
]
USER_EXPERTISE_STATUS_CHOISES = [
("AP", "Одобрить"),
("RE", "Отправить на доработку")
]
expert = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Эксперт', on_delete=models.CASCADE,
related_name='expertse_in_rpd')
expertise = models.ForeignKey('Expertise', verbose_name='Экспертиза', on_delete=models.CASCADE,
related_name='expertse_users_in_rpd')
stuff_status = models.CharField(choices=STUFF_STATUS_CHOICES, max_length=1024, verbose_name="Роль эксперта",
default='EX')
user_expertise_status = models.CharField(choices=USER_EXPERTISE_STATUS_CHOISES, max_length=1024, blank=True,
null=True, verbose_name="статус экспертизы пользователя")
expert_result = models.CharField(verbose_name="Результаты экспертизы", max_length=50000, blank=True, null=True)
class Expertise(models.Model):
STATUS_CHOICES = [
('WK', 'В работе'),
('EX', 'На экспертизе'),
('AC', 'Одобрено'),
('AR', 'Архив')
]
work_program = models.ForeignKey('WorkProgram', related_name='expertise_with_rpd', on_delete=models.CASCADE)
expertise_status = models.CharField(choices=STATUS_CHOICES, max_length=1024, verbose_name="Статус экспертизы",
default='EX')
experts = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name='Эксперты', through=UserExpertise,
related_name='experts_in_expertise')
approval_date = models.DateTimeField(editable=True, auto_now_add=True, blank=True, null=True)
date_of_last_change = models.DateTimeField(editable=True, auto_now=True, blank=True, null=True)
#
#
# def __init__(self, *args, **kwargs):
# super(Expertise, self).__init__(*args, **kwargs)
# self.old_expertise_status = self.expertise_status
#
#
# def create(self, *args, **kwargs):
# if self.expertise_status and self.old_expertise_status != self.expertise_status:
# self.date_of_last_change = datetime.now()
# super(Expertise, self).save(*args, **kwargs)
def __str__(self):
return str(self.pk)
class ExpertiseComments(models.Model):
BLOCK_CHOICES = [
('MA', 'Главное'),
('PR', 'Пререквизиты'),
('SE', 'Разделы'),
('TH', 'Темы'),
('SO', 'Источники'),
('EV', 'Оценчные средства'),
('RE', 'Результаты обучения'),
('CO', 'Контрольные средства'),
]
comment_block = models.CharField(choices=BLOCK_CHOICES, max_length=1024, verbose_name="Блок комментария")
user_expertise = models.ForeignKey('UserExpertise', on_delete=models.CASCADE)
comment_text = models.CharField(max_length=50000, verbose_name="Комментарий")
comment_date = models.DateTimeField(auto_now_add=True, blank=True, verbose_name='Дата комментария')
```
#### File: workprogramsapp/expertise/serializers.py
```python
from rest_framework import serializers
from dataprocessing.serializers import userProfileSerializer
# from workprogramsapp.educational_program.serializers import EducationalProgramSerializer
from workprogramsapp.expertise.models import UserExpertise, Expertise, ExpertiseComments
from workprogramsapp.serializers import WorkProgramShortForExperiseSerializer
class UserExpertiseSerializer(serializers.ModelSerializer):
class Meta:
model = UserExpertise
fields = "__all__"
def to_representation(self, value):
self.fields['expert'] = userProfileSerializer(many=False)
self.fields['expertise'] = ExpertiseSerializer(many=False, read_only=True)
return super().to_representation(value)
class UserExpertiseForExpertiseSerializer(serializers.ModelSerializer):
class Meta:
model = UserExpertise
fields = ['id','expert', 'stuff_status', 'user_expertise_status', 'expert_result']
def to_representation(self, value):
self.fields['expert'] = userProfileSerializer(many=False)
return super().to_representation(value)
class ExpertiseSerializer(serializers.ModelSerializer):
"""
Автоматически добавляет пользователя-создателя как лидера экспертизы
"""
user_status_in_expertise = serializers.SerializerMethodField()
def get_user_status_in_expertise(self, instance):
request = self.context.get("request")
user_statuses = \
{
"expertise_master": False,
"expertise_member": bool(UserExpertise.objects.filter(
expert=request.user, expertise_id=instance.id,
stuff_status="EX")),
"structural_leader": bool(Expertise.objects.filter(
pk=instance.id,
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct())
}
for group in request.user.groups.all():
if group.name == "expertise_master":
user_statuses["expertise_master"] = True
return user_statuses
def create(self, validated_data):
is_exp_exist = Expertise.objects.filter(work_program=validated_data['work_program'])
if is_exp_exist:
print("такая экспертиза уже существует")
return is_exp_exist[0]
exp = Expertise.objects.create(**validated_data)
request = self.context.get('request')
UserExpertise.objects.create(expertise=exp, expert=request.user, stuff_status="AU") # ???
return exp
def to_representation(self, value):
self.fields['work_program'] = WorkProgramShortForExperiseSerializer(many=False, read_only=True)
self.fields['experts'] = userProfileSerializer(many=True, read_only=True)
self.fields['expertse_users_in_rpd'] = UserExpertiseForExpertiseSerializer(many=True, read_only=True)
return super().to_representation(value)
class Meta:
model = Expertise
fields = "__all__"
# class ExpertiseWithUsersStatusSerializer(serializers.ModelSerializer):
# """
# Автоматически добавляет пользователя-создателя как лидера экспертизы
# """
# work_program = WorkProgramShortForExperiseSerializer(many=False, read_only=True)
# expertse_users_in_rpd = UserExpertiseForExpertiseSerializer(many=True, read_only=True)
#
#
# class Meta:
# model = Expertise
# fields = ['work_program', 'expertse_users_in_rpdd']
class CommentSerializer(serializers.ModelSerializer):
def to_representation(self, value):
self.fields['user_expertise'] = OnlyUserExpertiseSerializer(many=False, read_only=True)
return super().to_representation(value)
class Meta:
model = ExpertiseComments
fields = "__all__"
class OnlyUserExpertiseSerializer(serializers.ModelSerializer):
expert = userProfileSerializer(many=False)
class Meta:
model = UserExpertise
fields = ['expert']
```
#### File: application/workprogramsapp/visualization.py
```python
from sklearn.metrics.pairwise import cosine_similarity
from dataprocessing.models import Items, Relation
from sklearn.feature_extraction.text import TfidfVectorizer
from itertools import combinations
import networkx as nx
from django.db.models import Q
from rest_framework import generics, status
from .models import *
from .serializers import *
from django.conf import settings
from .permissions import *
import numpy as np
from rest_framework.response import Response
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
""""Удалены старые views с использованием джанго рендеринга"""
"""Блок реализации API"""
# class WorkProgramsListApi(APIView):
# # """
# # Список рабочих программ для апи.
# # """
# # def get(self, request, format=None):
# # WorkPrograms = WorkProgram.objects.all()
# # serializer = WorkProgramSerializer(WorkPrograms, many=True)
# # return Response(serializer.data)
class WorkProgramSameProgramView(generics.ListAPIView):
"""
Список одинаковых рабочих программ
"""
queryset = WorkProgram.objects.all()
serializer_class = WorkProgramSerializerByName
permission_classes = [IsRpdDeveloperOrReadOnly]
def graph_directory_path(self, GRAPH_URL, id):
return '{}/workprogram/{}.png'.format(GRAPH_URL, id)
def get(self, request, pk):
num_workprogram = self.request.data.get('num_workprogram')
queryset = self.queryset.get(id=pk).outcomes.all()
outcomes = [val.id for val in queryset]
pre = self.queryset.get(id=pk).prerequisites.all()
prer = [val.id for val in pre]
queryset_without_pk = self.queryset.exclude(pk=pk)
outcomes += prer
same_programs = queryset_without_pk.filter(outcomes__in=outcomes)
results = {}
for i in same_programs:
program_prereq = [val.id for val in i.prerequisites.all()]
program_outcomes = [val.id for val in i.outcomes.all()]
coef1 = len(list(set(prer) & set(program_prereq)))
coef2 = len(list(set(outcomes) & set(program_outcomes)))
result = round((coef1 + coef2)/ (len(prer)+ len(outcomes)), 2)
results[i.id] = result
sorted_by_value = sorted(results.items(), key=lambda kv: kv[1], reverse=True)[:num_workprogram]
plt.figure(figsize=(25, 20))
plt.axis("off")
G = nx.Graph()
pos = {}
p = 0
out = 0
prog = 1.9
for el in sorted_by_value:
cur_program = self.queryset.get(id=el[0])
G.add_node(cur_program.title)
prereqisites = [val.name for val in cur_program.prerequisites.all()]
outcomes = [val.name for val in cur_program.outcomes.all()]
if len(prereqisites) > len(outcomes):
pos[cur_program.title] = (-0.01, prog)
elif len(prereqisites) < len(outcomes):
pos[cur_program.title] = (0.01, prog)
else:
pos[cur_program.title] = (0, prog)
prog -=0.5
for p1 in prereqisites:
pos[p1] = (-0.02, p)
G.add_node(p1)
p += 0.1
G.add_edge(p1, cur_program.title, color='y')
for o in outcomes:
pos[o] = (0.02, out)
G.add_node(o)
out +=0.1
G.add_edge(o, cur_program.title, color='g')
wp_title = self.queryset.get(id=pk).title
pos[wp_title] = (0, 2)
G.add_node(wp_title)
colors = []
for node in G:
if node == wp_title:
colors.append('darkorange')
elif pos[node][0] == 0.02:
colors.append('lightblue')
elif pos[node][0] == -0.02:
colors.append('y')
else:
colors.append('green')
edge_colors = [G[u][v]['color'] for u, v in G.edges()]
nx.draw_networkx(G, pos=pos, with_labels=True, node_size=250, font_size=12,node_color=colors, edge_color=edge_colors)
handles = [Line2D([0], [0], markerfacecolor='lightblue', marker='o', label='Результат', markersize=15, color='w'),
Line2D([0], [0], markerfacecolor='darkorange', marker='o', label='Выбранная РПД', markersize=15, color='w'),
Line2D([0], [0], markerfacecolor='g', marker='o', label='Схожая РПД', markersize=15, color='w'),
Line2D([0], [0], markerfacecolor='y', marker='o', label='Пререквизит', markersize=15, color='w'),
Line2D([0], [0], lw=3, label='Связь: РПД-Результат', color='g'),
Line2D([0], [0], lw=3, label='Связь: РПД-Переквизит', color='y')]
plt.legend(handles=handles, loc='upper left', fontsize='x-large')
plt.savefig(self.graph_directory_path(settings.GRAPH_ROOT, pk))
programs = {
"coordinates": pos,
"graph": f'{settings.BACKEND_URL}{settings.GRAPH_URL}/workprogram/{ pk }.png'
}
return Response(data=programs, status=status.HTTP_200_OK)
class WorkProgramItemsRelationView(generics.ListAPIView):
queryset = WorkProgram.objects.all()
permission_classes = [IsRpdDeveloperOrReadOnly]
def graph_directory_path(self, GRAPH_URL, id):
return '{}/item/{}.png'.format(GRAPH_URL, id)
def merge_nodes(self, G, n1, n2, pos, relation_dict_items):
# Get all predecessors and successors of two nodes
# Create the new node with combined name
name = str(n1) + '|' + str(n2)
G.add_node(name)
# coordinates
coor1 = pos[n1]
coor2 = pos[n2]
# Remove old nodes
G.remove_nodes_from([n1, n2])
pos[name] = (coor1[0], (coor1[1]+coor2[1]-0.05)/2)
del_list = [] # cписок ключей, которые нужно удалить
new_dict = {} # словарь с переименновыноми узлами
for key in relation_dict_items.keys():
if (n1 in key) & (n2 in key):
del_list.append(key)
else:
if n1 in key:
value = relation_dict_items[key]
del_list.append(key)
value_list = list(key)
index = value_list.index(n1)
value_list[index] = name
value_list = tuple(value_list)
new_dict[value_list] = value
elif n2 in key:
value = relation_dict_items[key]
del_list.append(key)
value_list = list(key)
index = value_list.index(n2)
value_list[index] = name
value_list = tuple(value_list)
new_dict[value_list] = value
for d in del_list:
del relation_dict_items[d]
relation_dict_items.update(new_dict)
def get(self, request, pk):
queryset = self.queryset.get(id=pk)
items = [val.name for val in queryset.outcomes.all()]
items_prerequisites = [val.name for val in queryset.prerequisites.all()]
all = items+items_prerequisites
plt.figure(figsize=(30, 10))
plt.axis('off')
G = nx.Graph()
workprogram_title = queryset.title
pos = {}
k = 0
z = 0
for i in items:
pos[i] = (0.01, k)
G.add_node(i)
k+=0.1
G.add_node(workprogram_title)
for i in items_prerequisites:
pos[i] = (-0.01, z)
G.add_node(i)
z+=0.1
num = k - 0.3
pos[workprogram_title] = (0, num)
nx.set_node_attributes(G, pos, 'coord')
relation_dict_items = {}
#relation_dict_items[('Администрирование реляционных БД', 'NoSQL системы хранения данных')] = '5'
#relation_dict_items[('Методы построения', 'Администрирование реляционных БД')] = '4'
#relation_dict_items[('Реляционные СУБД', 'Администрирование базы данных')] = '5'
#relation_dict_items[('Линейная регрессия', '\ufeffОсновы программирования')] = '4'
comb_all = list(x for x in combinations(all,2))
for elem in comb_all:
item1 = Items.objects.get(name=elem[0]).id
item2 = Items.objects.get(name=elem[1]).id
item_list = [item1, item2]
relation_item = Relation.objects.filter(Q(item1__in=item_list)&Q(item2__in=item_list))
if relation_item:
relation_dict_items[elem] = relation_item.first().relation
# интересующие связи для отображения
# 2 - является частью одного раздела
# 4 - имеет пререквизит
# 5 - синонимы / тождественно
#wanted_edges = [ '2', '4', '5']
value5 = 0
for key, value in relation_dict_items.items():
if value == '5':
value5 += 1
for i in range(value5):
for key, value in relation_dict_items.items():
if value == '5':
self.merge_nodes(G, key[0], key[1], pos, relation_dict_items)
break
for key, value in relation_dict_items.items():
if value == '4':
coor0 = pos[key[0]]
coor1 = pos[key[1]]
if abs(coor1[1] - coor0[1])> 0.1:
if (key[0] not in items_prerequisites) & (key[1] not in items_prerequisites):
new_val = coor1[0] + 0.005
coor_y = coor1[1]
coor1 = (new_val, coor_y)
pos[key[1]] = coor1
elif (key[0] not in items) & (key[1] not in items):
new_val = coor1[0] - 0.005
coor_y = coor1[1]
coor1 = (new_val, coor_y)
pos[key[1]] = coor1
G.add_edge(key[0], key[1], color='b')
for key, value in relation_dict_items.items():
if value == '2':
if(key[0] in items) & (key[1] in items):
coor0 = pos[key[0]]
coor1 = pos[key[1]]
new_x2 = coor1[0] + 0.005
new_y2 = coor0[1] - 0.05
new_coor = (new_x2, new_y2)
pos[key[1]] = new_coor
G.add_edge(key[0], key[1], color='green')
elif (key[0] in items_prerequisites) & (key[1] in items_prerequisites):
coor0 = pos[key[0]]
coor1 = pos[key[1]]
new_x2 = coor1[0] - 0.005
new_y2 = coor0[1] - 0.05
new_coor = (new_x2, new_y2)
pos[key[1]] = new_coor
G.add_edge(key[0], key[1], color='green')
else:
if (key[0] in items) & (key[1] in items_prerequisites):
coor0 = pos[key[0]] # in items
coor1 = pos[key[1]] # in prerequisites
if (key[0] in items) & (key[1] in items_prerequisites):
new_x1 = coor0[0] - 0.0025
new_x2 = coor1[0] + 0.0025
elif (key[0] in items_prerequisites) & (key[1] in items):
new_x1 = coor0[0] + 0.0025
new_x2 = coor1[0] - 0.0025
new_coor1 = (new_x1, coor0[1])
new_coor2 = (new_x2, coor0[1] - 0.1)
pos[key[0]] = new_coor1
pos[key[1]] = new_coor2
G.add_edge(key[0], key[1], color='green')
colour_map = []
for node in G:
if node in items:
colour_map.append('y')
elif node == workprogram_title:
colour_map.append('green')
elif '|' in node:
colour_map.append('darkorange')
else:
colour_map.append('lightgrey')
plt.text(
0.1,
0.99,
"Пререквизиты",
horizontalalignment="center",
transform=plt.gca().transAxes,
weight="bold",
size=13
)
plt.text(
0.7,
0.99,
"Результаты",
horizontalalignment="center",
transform=plt.gca().transAxes,
weight="bold",
size=13
)
if len(items)> 8:
value = 1000
else:
value = 1500
edges = G.edges()
colors = [G[u][v]['color'] for u, v in edges]
nx.draw_networkx(G, pos=pos, with_labels=True, font_size=9, font_weight='bold', node_color=colour_map, node_shape='o', node_size=value, edge_color=colors)
plt.axis('off')
legend_elements = [Line2D([0], [0], markerfacecolor='y', marker='o', label='Результат', markersize=15, color='w'),
Line2D([0], [0], markerfacecolor='g', marker='o', label='РПД', markersize=15, color='w'),
Line2D([0], [0], marker='o', markerfacecolor='lightgrey', label='Пререквизит', markersize=15, color='w'),
Line2D([0], [0], marker='o', markerfacecolor='darkorange', label='Связь - Тождество', markersize=15, color='w'),
Line2D([0], [0], lw=3, label='Связь - Имеет преревизит', color='g'),
Line2D([0], [0], lw=3, label='Связь - Являются частью одного раздела', color='b')]
plt.legend(handles=legend_elements, loc='upper right', fontsize='x-large')
plt.savefig(self.graph_directory_path(settings.GRAPH_ROOT, pk))
final = {
"coordinates": pos,
"graph": f'{settings.BACKEND_URL}{settings.GRAPH_URL}/item/{ pk }.png'
}
return Response(data=final, status=status.HTTP_200_OK)
class WorkProgramProfessionView(generics.ListAPIView):
queryset = WorkProgram.objects.all()
permission_classes = [IsRpdDeveloperOrReadOnly]
def clean_title(self, queryset, title_array):
to_fix = {
"e":"е",
"c":"с",
"a":"а",
"o":"о",
"y":"у",
"p":"р",
"H":"Н",
"K":"К",
"B":"В",
"M":"М"
}
for val in queryset:
if '\ufeff' in val.name:
title_array[0] += ' ' + val.name.split('\ufeff')[1]
else:
title_array[0] += ' ' + val.name
for i in val.name:
if i in to_fix.keys():
val.name = val.name.replace(i, to_fix[i])
return title_array
def get_len_list(self, size):
if size == 1:
return 1
else:
result = size + self.get_len_list(size-1)
return result
def graph_directory_path(self, GRAPH_URL, id):
return '{}/profession/{}.png'.format(GRAPH_URL, id)
def get(self, request, pk):
num_professions = self.request.data.get('num_professions')
queryset = self.queryset.get(id=pk)
outcomes = [val.id for val in queryset.outcomes.all()]
title_workprogram = ['']
title_workprogram = self.clean_title(queryset.outcomes.all(), title_workprogram)
title_workprogram = self.clean_title(queryset.prerequisites.all(), title_workprogram)
title_for_graph = [queryset.title]
for val in queryset.prerequisites.all():
outcomes.append(val.id)
if len(title_workprogram[0]) > 1:
tfidf_vectorizer = TfidfVectorizer()
title_matrix_workprogram = tfidf_vectorizer.fit_transform(title_workprogram).toarray()
# профессии с теми же учебными сущностями
professions_with_items = Profession.objects.filter(skills__in=outcomes)
# косинусное сходство
all_info = {}
similarity_wp_prof = {}
similarity_prof = {}
if len(professions_with_items):
for i in range(len(professions_with_items.values())):
title_profession = ['']
id = professions_with_items.values()[i]['id']
items_of_profession = Profession.objects.get(id=id).skills.all()
# список учебных сущностей профессии
title_profession = self.clean_title(items_of_profession, title_profession)
title_matrix_profession = tfidf_vectorizer.transform(title_profession).toarray()
# считаем косинусное сходство
# записываем значение в словарь
similarity_wp_prof[id] = round(cosine_similarity(title_matrix_workprogram, title_matrix_profession)[0][0], 4)
similarity_prof[id] = title_profession
similarity_wp_prof = sorted(similarity_wp_prof.items(), key=lambda kv: kv[1], reverse=True)[:num_professions]
value = len(similarity_wp_prof) + 1
professions = []
prof_id = []
el1 = []
for el in similarity_wp_prof:
cur_prof = Profession.objects.get(id=el[0])
professions.append(
{"id":cur_prof.id,
"profession": cur_prof.title,
"cosine_similarity":el[1]}
)
prof_id.append(cur_prof.id)
el1.append(1-el[1])
title_for_graph.append(cur_prof.title)
for key in similarity_prof.keys():
if key not in prof_id:
similarity_prof = {k: similarity_prof[k] for k in similarity_prof.keys() - {key}}
comb = list(x for x in combinations(similarity_prof.keys(),2))
prof_cosine = {}
for el in comb:
prof1 = Profession.objects.get(id=el[0]).title
prof2 = Profession.objects.get(id=el[1]).title
title1 = tfidf_vectorizer.fit_transform(similarity_prof[el[0]]).toarray()
title2 = tfidf_vectorizer.transform(similarity_prof[el[1]]).toarray()
prof_cosine[prof1+' - '+prof2] = round(cosine_similarity(title1, title2)[0][0], 4)
prof_cosine = sorted(prof_cosine.items(), key=lambda kv: kv[1], reverse=True)
distance =[]
el2 = []
for el in prof_cosine:
distance.append({
"professions":el[0],
"cosine_distance":el[1]
})
el2.append(1-el[1])
len_list = self.get_len_list(value-1)
el = el1+el2
graph_matrix = np.zeros((value,value))
for i in range(value):
for j in range(value):
if i == j:
graph_matrix[i][j] = 0
elif j == 0:
graph_matrix[i][j] = el[i-1]
k = 0
count = 0
for elem in range(len_list+1):
if elem == 0:
el2.insert(elem,0)
k+=2
elif elem == count + k:
el2.insert(elem,0)
k+=1
count = i
indices = np.tril_indices(value-1)
graph_matrix1 = np.zeros((value-1, value-1))
graph_matrix1[indices] = el2
c = np.zeros((value-1,1)) # новый столбец
graph_matrix1 = np.column_stack([c, graph_matrix1])
r = np.zeros((1, value)) # новая строка
graph_matrix1 = np.row_stack([r, graph_matrix1])
graph_matrix += graph_matrix1
graph_matrix = np.maximum( graph_matrix, graph_matrix.transpose())
# all_info["WorkProgram"] = queryset.title
# all_info["Close_professions"] = professions
#all_info["Professions_distance"] = distance
# отображение графа
dt = [('len', float)]
plt.figure(figsize=(40, 10))
graph_matrix = graph_matrix.view(dt)
G = nx.from_numpy_matrix(graph_matrix)
G = nx.relabel_nodes(G, dict(zip(range(len(G.nodes())), title_for_graph)))
colors = []
for node in G:
if node == queryset.title:
colors.append('darkorange')
else:
colors.append('green')
nx.draw(G, with_labels = True, node_size=2000, node_color=colors, edge_color='white', font_size=15)
handles = [Line2D([0], [0], markerfacecolor='darkorange', marker='o', label='РПД', markersize=20, color='w'),
Line2D([0], [0], markerfacecolor='green', marker='o', label='Профессия', markersize=20, color='w')]
plt.legend(handles=handles, loc='upper right', fontsize='x-large')
plt.savefig(self.graph_directory_path(settings.GRAPH_ROOT, pk))
all_info['graph_title'] = title_for_graph
all_info['matrix'] = graph_matrix
all_info['graph'] = f'{settings.BACKEND_URL}{settings.GRAPH_URL}/profession/{ pk }.png'
return Response(data=all_info, status=status.HTTP_200_OK)
``` |
{
"source": "18C1054-S-K/robot3_ros",
"score": 2
} |
#### File: robot3_18c1054/scripts/ball_initial_to_arm_target.py
```python
import math
import numpy as np
import rospy
from std_msgs.msg import Float32MultiArray, Float32
from robot3_18c1054.srv import GetHandState, GetHandStateResponse
class BallToTargetNode():
GRAVITY = 9.8
def __init__(self):
self.pub_arm = rospy.Publisher('target_arm_state', Float32MultiArray, queue_size=10)
self.pub_hand = rospy.Publisher('hand_close_reserve', Float32, queue_size=10)
self.sub = rospy.Subscriber('ball_initial_state', Float32MultiArray, self.update_target)
def update_target(self, msg):
time_till_highest = msg.data[4] / 9.8
if time_till_highest < 0.0:
time_till_highest = 0.0
ball_highest_pos = [0.0]*3
for i in range(3):
ball_highest_pos[i] = msg.data[i] + time_till_highest*msg.data[i+3]
ball_highest_pos[1] -= (9.8/2.0)*(time_till_highest**2)
ball_highest_att = [[0,0,0], [0,0,0], [0,0,0]]
ball_highest_att[1][1] = 1.0
temp = msg.data[3]**2 + msg.data[5]**2
temp = math.sqrt(temp)
if not temp == 0.0:
ball_highest_att[0][2] = msg.data[3] / temp
ball_highest_att[2][2] = msg.data[5] / temp
else:
ball_highest_att[2][2] = 1.0
ball_highest_att[0][0] = ball_highest_att[2][2]
ball_highest_att[2][0] = -ball_highest_att[0][2]
#pulish
state = [0.0]*24
for i in range(3):
state[i] = ball_highest_pos[i]
for j in range(3):
state[i*3+j+6] = ball_highest_att[i][j]
close_time = msg.data[6] #time when shoot
close_time += time_till_highest
time_pub = Float32(close_time)
self.pub_hand.publish(time_pub)
state_pub = Float32MultiArray(data=state)
self.pub_arm.publish(state_pub)
'''
def update_target_2(self, msg):
rospy.wait_for_service('get_hand_state')
try:
#search point in ball's trajctry which nearest from hand
get_hand_state = rospy.ServiceProxy('get_hand_state', GetHandState)
resp = get_hand_state()
print('current_hand_state : ',resp.hand_state)
rel_state = [(msg.data[i] - resp.hand_state[i]) for i in range(6)]
print('current relative state : ',rel_state)
poly_coefs = [0.0]*4
poly_coefs[0] = 1.0
poly_coefs[1] = -3.0*rel_state[4]/self.GRAVITY
poly_coefs[2] = -rel_state[1]/(2.0*self.GRAVITY)
for i in range(3):
poly_coefs[2] += (rel_state[i+3]/self.GRAVITY)**2
poly_coefs[3] += rel_state[i]*rel_state[i+3]
poly_coefs[2] *= 4.0
poly_coefs[3] *= 2.0
poly_coefs[3] /= self.GRAVITY**2
poly_coefs[3] -= 1.0
print('oh yeah : ', poly_coefs)
poly_roots = np.roots(poly_coefs)
print('catch time candidate : ',poly_roots)
#calcurate the point's time
time_catch = 0.0
for i in range(3):
if poly_roots[i].imag == 0.0 and poly_roots[i].real > time_catch:
time_catch = poly_roots[i].real
#calcurate the point's position
pos = [0.0]*3
for i in range(3):
pos[i] = msg.data[i] + time_catch*msg.data[i+3]
pos[1] -= (self.GRAVITY/2.0)*(time_catch**2)
#calcurate the point's attitude
att_y = np.zeros(3)
att_y[1] = 1.0
att_z = np.zeros(3)
for i in range(3):
att_z[i] = msg.data[i+3]
att_z[1] -= time_catch*msg.data[4]
temp = np.linalg.norm(att_z)
if temp == 0.0:
att_z[0]=0.0
att_z[1]=0.0
att_z[2]=1.0
else:
att_z = att_z / temp
att_x = np.cross(att_y,att_z)
#att_z is never vartical, so don't need to think about att_x=0
att_x = att_x / np.linalg.norm(att_x)
att_y = np.cross(att_z,att_x)
#pulish
state = [0.0]*24
for i in range(3):
state[i] = pos[i]
state[i*3 + 0 + 6] = att_x[i]
state[i*3 + 1 + 6] = att_y[i]
state[i*3 + 2 + 6] = att_z[i]
close_time = msg.data[6] #time when shoot
close_time += time_catch
print('--target--')
print('time : ',time_catch)
print('pos : ',pos)
print('att :')
print(att_x)
print(att_y)
print(att_z)
time_pub = Float32(close_time)
self.pub_hand.publish(time_pub)
state_pub = Float32MultiArray(data=state)
self.pub_arm.publish(state_pub)
except rospy.ServiceException:
update_target_1(msg)
'''
def main():
rospy.init_node('ball_initial_to_arm_target', anonymous=True)
node = BallToTargetNode()
rospy.spin()
if __name__ == '__main__':
main()
```
#### File: robot3_18c1054/scripts/visualizer.py
```python
import rospy
import numpy as np
import math
import time
from sensor_msgs.msg import JointState
from std_msgs.msg import Float32, Float32MultiArray, Bool, Header
from robot3_18c1054.msg import HandClose
from robot3_18c1054.srv import GetHandState, GetHandStateResponse, GetInitTime, GetInitTimeResponse
class VisualizerNode():
GRAVITY = 9.8
JOINT_NUM = 6
DELTA_TIME = 0.05
arm_ang = [0.0]*6
is_hand_close = False
hand_state = [0.0]*6
hand_ang_close = 0.0
hand_ang_open = 0.8
is_shooted = False
shoot_timestamp = time.time()
is_ball_flying = False
shooter_state = [0.0, -1.0, 0.0, 0.0]
ball_state = [0.0]*6
ball_initial_state = [0.0]*6
init_time = 0.0
def __init__(self):
#get init_time
rospy.wait_for_service('get_init_time')
try:
get_init_time = rospy.ServiceProxy('get_init_time', GetInitTime)
resp = get_init_time()
self.init_time = resp.year
self.init_time = self.init_time*12 + resp.month
self.init_time = self.init_time*30 + resp.day
self.init_time = self.init_time*24 + resp.hour
self.init_time = self.init_time*60 + resp.minute
self.init_time = self.init_time*60 + resp.second
self.init_time += resp.lower
except rospy.ServiceException:
print('service err in visualizer')
self.init_time = time.time()
self.pub_viz = rospy.Publisher('joint_states', JointState, queue_size=10)
self.sub_arm = rospy.Subscriber('arm_ang_angv', Float32MultiArray, self.update_arm)
self.sub_hand = rospy.Subscriber('hand_close', HandClose, self.update_hand)
self.sub_shooter = rospy.Subscriber('shooter_state', Float32MultiArray, self.update_shooter)
self.sub_shoot_1 = rospy.Subscriber('ball_initial_state', Float32MultiArray, self.shoot)
self.timer = rospy.Timer(rospy.Duration(self.DELTA_TIME), self.redisp)
def update_arm(self, msg):
for i in range(self.JOINT_NUM):
self.arm_ang[i] = msg.data[i]
def update_hand(self, msg):
self.is_hand_close = msg.close
if self.is_hand_close:
count = (msg.time_stamp + self.init_time) - self.shoot_timestamp
for i in range(3):
self.ball_state[i+3] = self.ball_initial_state[i+3]
self.ball_state[i] = self.ball_initial_state[i]
self.ball_state[i] += count * self.ball_initial_state[i+3]
self.ball_state[4] -= self.GRAVITY * count
self.ball_state[1] -= (self.GRAVITY/2.0)*(count**2)
self.check_catch()
# self.is_ball_flying = False
SAFE_DIST_X = 0.02
SAFE_DIST_Y = 0.05
SAFE_DIST_Z = 0.05
SAFE_COS_ANG_Z = 0.75
SAFE_COS_ANG_Y = 0.5
def check_catch(self):
rospy.wait_for_service('get_hand_state')
try:
get_hand_state = rospy.ServiceProxy('get_hand_state', GetHandState)
resp = get_hand_state()
hand_pos = np.zeros((3,1))
hand_pos_v = np.zeros((3,1))
hand_att = np.zeros((3,3))
for i in range(3):
hand_pos[i,0]=resp.hand_state[i]
hand_pos_v[i,0]=resp.hand_state[i+3]
for j in range(3):
hand_att[i,j]=resp.hand_state[i*3+j+6]
ball_pos = np.zeros((3,1))
ball_pos_v = np.zeros((3,1))
for i in range(3):
ball_pos[i,0] = self.ball_state[i]
ball_pos_v[i,0] = self.ball_state[i+3]
print('--------------------------')
rel_p = ball_pos - hand_pos
rel_p = hand_att.T @ rel_p
print("ball's relative position from hand :")
print(rel_p)
if abs(rel_p[0,0]) < self.SAFE_DIST_X and abs(rel_p[1,0]) < self.SAFE_DIST_Y and abs(rel_p[2,0]) < self.SAFE_DIST_Z:
rel_v = ball_pos_v - hand_pos_v
rel_v = hand_att.T @ rel_v
v_norm = np.linalg.norm(rel_v)
if (rel_v[2,0]>v_norm*self.SAFE_COS_ANG_Z or rel_v[2,0]<v_norm*self.SAFE_COS_ANG_Z) and rel_v[1,0]<v_norm*self.SAFE_COS_ANG_Y:
self.is_ball_flying = False
if self.is_ball_flying and self.is_shooted:
print('ball catch : miss')
elif self.is_shooted:
print('ball catch : success')
print('--------------------------')
except rospy.ServiceException: pass
def update_shooter(self, msg):
for i in range(4):
self.shooter_state[i] = msg.data[i]
def shoot(self, msg):
self.shoot_timestamp = msg.data[6] + self.init_time
self.is_shooted = True
self.is_ball_flying = True
for i in range(6):
self.ball_initial_state[i]=msg.data[i]
def redisp(self, event):
if not rospy.is_shutdown():
j_s = JointState()
j_s.header = Header()
j_s.header.stamp = rospy.Time.now()
j_s.name = ['hand_1','hand_2','shooter_x','shooter_z','shooter_roll','shooter_pitch','ball_x','ball_y','ball_z', 'joint_1','joint_2','joint_3','joint_4','joint_5','joint_6']
arr = [0.0]*(9+self.JOINT_NUM)
#arm
for i in range(self.JOINT_NUM):
arr[i+9] = self.arm_ang[i]
#hand
if self.is_hand_close:
arr[0]=self.hand_ang_close
arr[1]=self.hand_ang_close
else:
arr[0]=self.hand_ang_open
arr[1]=self.hand_ang_open
#shooter
for i in range(4):
arr[i+2]=self.shooter_state[i]
#ball
if self.is_shooted and self.is_ball_flying:
count = time.time() - self.shoot_timestamp
for i in range(3):
self.ball_state[i+3] = self.ball_initial_state[i+3]
self.ball_state[i] = self.ball_initial_state[i]
self.ball_state[i] += count * self.ball_initial_state[i+3]
self.ball_state[4] -= self.GRAVITY * count
self.ball_state[1] -= (self.GRAVITY/2.0)*(count**2)
elif not self.is_shooted:
self.ball_state[0]=self.shooter_state[0]
self.ball_state[1]=0.0
self.ball_state[2]=self.shooter_state[1]
for i in range(3):
arr[i+6]=self.ball_state[i]
#publish
j_s.position = arr
try:
self.pub_viz.publish(j_s)
except rospy.ROSException: pass
def main():
rospy.init_node('visualizer', anonymous=True)
node = VisualizerNode()
rospy.spin()
if __name__ == '__main__':
main()
``` |
{
"source": "18F/10x-MLaaS",
"score": 3
} |
#### File: 10x-MLaaS/HSM/api.py
```python
from flask import Flask, jsonify, make_response
from flask_httpauth import HTTPBasicAuth
from utils import config, db, db_utils
from werkzeug.security import check_password_hash
# initialization
app = Flask(__name__)
app.config['SECRET_KEY'] = config.APP_SECRET_KEY
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
if username in config.users:
return check_password_hash(config.users.get(username), password)
return False
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/')
@auth.login_required
def index():
return jsonify({'title': 'Predict->Validate->Train',
'username': auth.username()
})
@app.route('/predict', methods=['GET'])
@auth.login_required
def predict():
return jsonify({'task': 'predict',
'username': auth.username()
})
@app.route('/validate') # , methods=['POST'])
@auth.login_required
def validate():
return jsonify({'task': 'validate',
'username': auth.username(),
}), 200
@app.route('/train') # , methods=['POST'])
@auth.login_required
def train():
return jsonify({'task': 'train',
'username': auth.username()
}), 200
if __name__ == "__main__":
db_utils.create_postgres_db()
db.dal.connect()
session = db.dal.Session()
port = int(config.APP_PORT)
app.run(host='0.0.0.0', port=port)
```
#### File: 10x-MLaaS/HSM/load_data.py
```python
import json
from argparse import ArgumentParser
import pandas as pd
from utils import db, db_utils
from utils.db import Data, SupportData
filter_feature = 'Comments Concatenated'
validation = 'Validation'
def main(file):
db_utils.create_postgres_db()
db.dal.connect()
session = db.dal.Session()
df = pd.read_excel(file)
data_columns = [filter_feature, validation]
data = df[data_columns]
support_data = json.loads(df[df.columns.difference(data_columns)].to_json(orient='records'))
for i in range(len(data)):
data_row = data.iloc[i]
support_data_row = support_data[i]
data_obj = Data(filter_feature=str(data_row[filter_feature]), validation=int(data_row[validation]))
session.add(data_obj)
session.flush()
support_data_obj = SupportData(support_data=support_data_row)
data_obj.support_data = support_data_obj
support_data_obj.data = data_obj
support_data_obj.data_id = support_data_obj.data.id
session.add(support_data_obj)
session.commit()
print(f'Loaded {len(data)} records of data and support_data.')
if __name__ == '__main__':
program_desc = '''This application will get the spreadsheet and pull out essential data to fill out
the database. It will populate the database in the `data` table. It also put all
other data in the database as well in support_data table.'''
parser = ArgumentParser(description=program_desc)
parser.add_argument("file", help="specify path to file")
args = parser.parse_args()
main(file=args.file)
```
#### File: HSM/utils/db.py
```python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, JSON, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, sessionmaker
from utils.config import SQLALCHEMY_URI
Base = declarative_base()
class DataAccessLayer:
def __init__(self):
self.engine = None
self.conn_string = SQLALCHEMY_URI
def connect(self):
self.engine = create_engine(self.conn_string, echo=True)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
dal = DataAccessLayer()
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True, index=True)
filter_feature = Column(String(10000), nullable=True)
validation = Column(Integer)
support_data = relationship("SupportData", uselist=False, back_populates="data")
class SupportData(Base):
__tablename__ = 'support_data'
id = Column(Integer, primary_key=True, index=True)
support_data = Column(JSON)
data_id = Column(Integer, ForeignKey('data.id'), nullable=False)
data = relationship("Data", back_populates="support_data")
```
#### File: HSM/utils/validate.py
```python
import pandas as pd
class Validate():
def __init__(self, results_path):
self.results_path = results_path
def get_validations(self):
'''
Returns a mapping of responseIds to user-validated spam/ham codes
'''
validated_df = pd.read_excel(self.results_path)
validated_id_pred_map = dict(zip(validated_df['ResponseID'],
validated_df['SPAM']))
return validated_id_pred_map
``` |
{
"source": "18F/2015-foia",
"score": 2
} |
#### File: documents/tasks/utils.py
```python
import datetime
import urllib.parse
import os, os.path, errno, sys, traceback, subprocess
import re, html.entities
import traceback
import json
import logging
from bs4 import BeautifulSoup
import requests
from requests.exceptions import RequestException
# scraper should be instantiated at class-load time, so that it can rate limit appropriately
import scrapelib
scraper = scrapelib.Scraper(requests_per_minute=30, retry_attempts=3)
scraper.user_agent = "18F (https://18f.gsa.gov, https://github.com/18f/foia)"
# serialize and pretty print json
def json_for(object):
return json.dumps(object, sort_keys=True, indent=2, default=format_datetime)
def format_datetime(obj):
if isinstance(obj, datetime.datetime):
return eastern_time_zone.localize(obj.replace(microsecond=0)).isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, str):
return obj
else:
return None
# mkdir -p, then write content
def write(content, destination, binary=False):
mkdir_p(os.path.dirname(destination))
if binary:
mode = "bw"
else:
mode = "w"
f = open(destination, mode)
f.write(content)
f.close()
# mkdir -p in python, from:
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
# read options from the command line
# e.g. ./state.py --since=2012-03-04 --debug
# => {"since": "2012-03-04", "debug": True}
def options():
options = {}
for arg in sys.argv[1:]:
if arg.startswith("--"):
if "=" in arg:
key, value = arg.split('=')
else:
key, value = arg, "True"
key = key.split("--")[1]
if value.lower() == 'true': value = True
elif value.lower() == 'false': value = False
options[key.lower()] = value
return options
# used mainly in debugging, quick download a URL and parse it
def quick_parse(url):
body = download(url)
doc = BeautifulSoup(body)
return doc
# get content-type and content-disposition from server
def content_headers(url):
'''
e.g. https://foiaonline.regulations.gov/foia/action/getContent;jsessionid=D793C614B6F66FF414F03448F64B7AF6?objectId=o9T34dVQIKi7v1Dv3L1K_HXAo4KBMt2C
Content-Disposition: attachment;filename="EPA Pavillion - list of withheld docs.final.12-03-2012.xlsx"
Content-Type: application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;charset=ISO-8859-1
'''
try:
response = requests.head(url)
except RequestException:
return None
headers = response.headers
# try to do some smart parsing of headers here, but also leave
# the originals
if headers.get("Content-Disposition"):
filename = headers["Content-Disposition"].split(";")[-1]
extension = os.path.splitext(filename.replace("\"", '').replace("'", ''))[1]
headers["extension"] = extension.replace(".", "")
if headers.get("Content-Type"):
headers["type"] = headers["Content-Type"].split(";")[0]
return headers
# download the data at url
def download(url, destination=None, options=None):
options = {} if not options else options
cache = options.get('cache', True) # default to caching
binary = options.get('binary', False) # default to assuming text
# check cache first
if destination and cache and os.path.exists(destination):
logging.info("## Cached: (%s, %s)" % (destination, url))
# if a binary file is cached, we're done
if binary:
return True
# otherwise, decode it for return
with open(destination, 'r', encoding='utf-8') as f:
body = f.read()
# otherwise, download from the web
else:
logging.info("## Downloading: %s" % url)
if binary:
if destination:
logging.info("## \tto: %s" % destination)
else:
raise Exception("A destination path is required for downloading a binary file")
try:
mkdir_p(os.path.dirname(destination))
scraper.urlretrieve(url, destination)
except scrapelib.HTTPError as e:
# intentionally print instead of using logging,
# so that all 404s get printed at the end of the log
print("Error downloading %s:\n\n%s" % (url, format_exception(e)))
return None
else: # text
try:
if destination: logging.info("## \tto: %s" % destination)
response = scraper.urlopen(url)
except scrapelib.HTTPError as e:
# intentionally print instead of using logging,
# so that all 404s get printed at the end of the log
print("Error downloading %s:\n\n%s" % (url, format_exception(e)))
return None
body = response
if not isinstance(body, str): raise ValueError("Content not decoded.")
# don't allow 0-byte files
if (not body) or (not body.strip()):
return None
# save content to disk
if destination:
write(body, destination, binary=binary)
# don't return binary content
if binary:
return True
else:
# whether from disk or web, unescape HTML entities
return unescape(body)
# taken from http://effbot.org/zone/re-sub.htm#unescape-html
def unescape(text):
def remove_unicode_control(str):
remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]')
return remove_re.sub('', str)
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
text = re.sub("&#?\w+;", fixup, text)
text = remove_unicode_control(text)
return text
# uses pdftotext to get text out of PDFs, returns the /data-relative path
def text_from_pdf(pdf_path):
try:
subprocess.Popen(["pdftotext", "-v"], shell=False, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT).communicate()
except FileNotFoundError:
logging.warn("Install pdftotext to extract text! The pdftotext executable must be in a directory that is in your PATH environment variable.")
return None
real_pdf_path = os.path.abspath(os.path.expandvars(pdf_path))
text_path = "%s.txt" % os.path.splitext(pdf_path)[0]
real_text_path = os.path.abspath(os.path.expandvars(text_path))
try:
subprocess.check_call(["pdftotext", "-layout", real_pdf_path, real_text_path], shell=False)
except subprocess.CalledProcessError as exc:
logging.warn("Error extracting text to %s:\n\n%s" % (text_path, format_exception(exc)))
return None
if os.path.exists(real_text_path):
return text_path
else:
logging.warn("Text not extracted to %s" % text_path)
return None
def format_exception(exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
def data_dir():
params = options()
if params.get("data"):
return params.get("data")
else:
return "data"
def configure_logging(options=None):
options = {} if not options else options
if options.get('debug', False):
log_level = "debug"
else:
log_level = options.get("log", "warn")
if log_level not in ["debug", "info", "warn", "error"]:
print("Invalid log level (specify: debug, info, warn, error).")
sys.exit(1)
logging.basicConfig(format='%(message)s', level=log_level.upper())
configure_logging(options())
``` |
{
"source": "18F/acqstackdb",
"score": 2
} |
#### File: acqstackdb/acquisitions/admin.py
```python
from django.contrib import admin
from ordered_model.admin import OrderedModelAdmin, OrderedTabularInline
# Register your models here.
from .models import Acquisition, Agency, Subagency, ContractingOffice, \
ContractingOfficer, COR, Evaluator, Release, Vendor, \
Role, Actor, Step, Track, Stage, StepTrackThroughModel
@admin.register(Agency, Subagency, ContractingOffice, ContractingOfficer, COR,
Evaluator, Release, Vendor, Role, Actor, Track)
class AdminAdmin(admin.ModelAdmin):
pass
class AcquisitionAdmin(admin.ModelAdmin):
filter_horizontal = ('roles',)
class StepTrackThroughModelInline(OrderedTabularInline):
model = StepTrackThroughModel
fields = ('track', 'order', 'wip_limit', 'move_up_down_links',)
readonly_fields = ('order', 'move_up_down_links',)
extra = 1
ordering = ('order',)
class StepAdmin(OrderedModelAdmin):
list_display = ('actor', 'stage',)
inlines = (StepTrackThroughModelInline,)
def get_urls(self):
urls = super(StepAdmin, self).get_urls()
for inline in self.inlines:
if hasattr(inline, 'get_urls'):
urls = inline.get_urls(self) + urls
return urls
class StageAdmin(OrderedModelAdmin):
list_display = ('name', 'move_up_down_links')
admin.site.register(Stage, StageAdmin)
admin.site.register(Acquisition, AcquisitionAdmin)
admin.site.register(Step, StepAdmin)
```
#### File: acqstackdb/acquisitions/forms.py
```python
import floppyforms.__future__ as forms
from acquisitions import models
class AcquisitionForm(forms.ModelForm):
class Meta:
model = models.Acquisition
# fields = ['subagency', 'track', 'task', 'step']
exclude = []
class TrackForm(forms.ModelForm):
class Meta:
model = models.Track
fields = ['name']
class StageForm(forms.ModelForm):
class Meta:
model = models.Stage
fields = ['name']
class HiddenStageForm(StageForm):
name = forms.CharField(widget=forms.HiddenInput())
class StepForm(forms.ModelForm):
track = forms.ModelMultipleChoiceField(queryset=models.Track.objects.all())
class Meta:
model = models.Step
exclude = []
def save(self, commit=True):
# step = super(StepForm, self).save()
form_data = self.cleaned_data
step = models.Step.objects.create(
stage=form_data["stage"],
actor=form_data["actor"]
)
for track in form_data["track"]:
models.StepTrackThroughModel.objects.create(
step=step,
track=track
)
class AgencyForm(forms.ModelForm):
class Meta:
model = models.Agency
fields = ['name']
class SubagencyForm(forms.ModelForm):
class Meta:
model = models.Subagency
fields = ['agency', 'name']
class ActorForm(forms.ModelForm):
class Meta:
model = models.Actor
fields = ['name']
```
#### File: management/commands/add_teammate.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add teammate'
def add_arguments(self, parser):
parser.add_argument('username', nargs='+', type=str)
def handle(self, *args, **options):
for username in options['username']:
try:
user = User.objects.get(username=username)
user.is_staff = True
user.is_superuser = True
print('Making %s a superuser!' % username)
user.save()
except User.DoesNotExist:
print("%s does not exist" % username)
```
#### File: management/commands/seed_database.py
```python
from django.core.management.base import BaseCommand, CommandError
from acquisitions import factories
class Command(BaseCommand):
def handle(self, *args, **options):
factories.ActorFactory.create_batch(5)
factories.AgencyFactory.create_batch(5)
factories.SubagencyFactory.create_batch(10)
factories.ContractingOfficeFactory.create_batch(2)
factories.ContractingOfficerFactory.create_batch(10)
factories.CORFactory.create_batch(10)
factories.TrackFactory.create_batch(3)
factories.StageFactory.create_batch(6)
factories.StepFactory.create_batch(10)
factories.StepTrackThroughFactory.create_batch(30)
factories.AcquisitionFactory.create_batch(50)
```
#### File: acqstackdb/acquisitions/models.py
```python
from django.db import models
from django.core.validators import RegexValidator, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from smart_selects.db_fields import ChainedForeignKey, ChainedManyToManyField
from ordered_model.models import OrderedModel
# Create your models here.
class Agency(models.Model):
name = models.CharField(max_length=100, blank=False)
abbreviation = models.CharField(max_length=10, null=True, blank=True)
department = models.CharField(max_length=100, null=True, blank=True)
omb_agency_code = models.IntegerField(null=True, blank=True)
omb_bureau_code = models.IntegerField(null=True, blank=True)
treasury_agency_code = models.IntegerField(null=True, blank=True)
cgac_agency_code = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Agencies"
ordering = ('name',)
class Subagency(models.Model):
name = models.CharField(max_length=100, blank=False)
abbreviation = models.CharField(max_length=10, null=True, blank=True)
agency = models.ForeignKey(Agency)
def __str__(self):
return "%s - %s" % (self.agency, self.name)
class Meta:
ordering = ('name',)
verbose_name_plural = "Subagencies"
class ContractingOffice(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name = "Contracting Office"
verbose_name_plural = "Contracting Offices"
class ContractingOfficer(models.Model):
name = models.CharField(max_length=100)
contracting_office = models.ForeignKey(ContractingOffice)
def __str__(self):
return "%s - %s" % (self.name, self.contracting_office)
class Meta:
ordering = ('name',)
verbose_name = "Contracting Officer"
verbose_name_plural = "Contracting Officers"
class COR(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
verbose_name = "Contracting Officer Representative"
verbose_name_plural = "Contracting Officer Representatives"
# Is the acquisition internal or external?
class Track(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s" % (self.name)
class Stage(OrderedModel):
name = models.CharField(max_length=50)
wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit")
def __str__(self):
return "%s" % (self.name)
class Meta(OrderedModel.Meta):
pass
class Actor(models.Model):
name = models.CharField(max_length=200, blank=False)
def __str__(self):
return "%s" % (self.name)
class Step(models.Model):
actor = models.ForeignKey(
Actor,
blank=False
)
track = models.ManyToManyField(
Track,
blank=False,
through="StepTrackThroughModel"
)
stage = models.ForeignKey(
Stage,
blank=False
)
def __str__(self):
return "%s - %s" % (self.stage, self.actor,)
class Meta:
ordering = ('steptrackthroughmodel__order',)
class StepTrackThroughModel(OrderedModel):
track = models.ForeignKey(Track)
step = models.ForeignKey(Step)
wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit")
order_with_respect_to = 'track'
class Meta(OrderedModel.Meta):
unique_together = ('track', 'step')
ordering = ('track', 'order')
class Vendor(models.Model):
name = models.CharField(max_length=200, blank=False)
email = models.EmailField(blank=False)
duns = models.CharField(max_length=9, blank=False, validators=[
RegexValidator(regex='^\d{9}$', message="DUNS number must be 9 digits")
])
def __str__(self):
return self.name
class Role(models.Model):
description = models.CharField(max_length=100, choices=(
('P', 'Product Lead'),
('A', 'Acquisition Lead'),
('T', 'Technical Lead')
), null=True, blank=True)
teammate = models.ForeignKey(User, blank=True, null=True)
def __str__(self):
return "%s - %s" % (self.get_description_display(), self.teammate)
class Acquisition(models.Model):
SET_ASIDE_CHOICES = (
("AbilityOne", "AbilityOne"),
("HUBZone Small Business", "HUBZone Small Business"),
("Multiple Small Business Categories",
"Multiple Small Business Categories"),
("Other Than Small", "Other Than Small"),
("Service Disabled Veteran-owned Small Business",
"Service Disabled Veteran-owned Small Business"),
("Small Business", "Small Business"),
("Small Disadvantaged Business (includes Section 8a)",
"Small Disadvantaged Business (includes Section 8a)"),
("To Be Determined-BPA", "To Be Determined-BPA"),
("To Be Determined-IDIQ", "To Be Determined-IDIQ"),
("Veteran-Owned Small Business", "Veteran-Owned Small Business"),
("Woman-Owned Small Business", "Woman-Owned Small Business"),
)
CONTRACT_TYPE_CHOICES = (
("Cost No Fee", "Cost No Fee"),
("Cost Plus Award Fee", "Cost Plus Award Fee"),
("Cost Plus Fixed Fee", "Cost Plus Fixed Fee"),
("Cost Plus Incentive Fee", "Cost Plus Incentive Fee"),
("Cost Sharing", "Cost Sharing"),
("Fixed Price Award Fee", "Fixed Price Award Fee"),
("Fixed Price Incentive", "Fixed Price Incentive"),
("Fixed Price Labor Hours", "Fixed Price Labor Hours"),
("Fixed Price Level of Effort", "Fixed Price Level of Effort"),
("Fixed Price Time and Materials", "Fixed Price Time and Materials"),
("Fixed Price with Economic Price Adjustment",
"Fixed Price with Economic Price Adjustment"),
("Fixed Price", "Fixed Price"),
("Interagency Agreement", "Interagency Agreement"),
("Labor Hours and Time and Materials",
"Labor Hours and Time and Materials"),
("Labor Hours", "Labor Hours"),
("Order Dependent", "Order Dependent"),
("Time and Materials", "Time and Materials"),
)
COMPETITION_STRATEGY_CHOICES = (
("A/E Procedures", "A/E Procedures"),
("Competed under SAP", "Competed under SAP"),
("Competitive Delivery Order Fair Opportunity Provided",
"Competitive Delivery Order Fair Opportunity Provided"),
("Competitive Schedule Buy", "Competitive Schedule Buy"),
("Fair Opportunity", "Fair Opportunity"),
("Follow On to Competed Action (FAR 6.302-1)",
"Follow On to Competed Action (FAR 6.302-1)"),
("Follow On to Competed Action", "Follow On to Competed Action"),
("Full and Open after exclusion of sources (competitive small business \
set-asides, competitive 8a)",
"Full and Open after exclusion of sources (competitive small \
business set-asides, competitive 8a)"),
("Full and Open Competition Unrestricted",
"Full and Open Competition Unrestricted"),
("Full and Open Competition", "Full and Open Competition"),
("Limited Sources FSS Order", "Limited Sources FSS Order"),
("Limited Sources", "Limited Sources"),
("Non-Competitive Delivery Order", "Non-Competitive Delivery Order"),
("Not Available for Competition (e.g., 8a sole source, HUBZone & \
SDVOSB sole source, Ability One, all > SAT)",
"Not Available for Competition (e.g., 8a sole source, HUBZone & \
SDVOSB sole source, Ability One, all > SAT)"),
("Not Competed (e.g., sole source, urgency, etc., all > SAT)",
"Not Competed (e.g., sole source, urgency, etc., all > SAT)"),
("Not Competed under SAP (e.g., Urgent, Sole source, Logical \
Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)",
"Not Competed under SAP (e.g., Urgent, Sole source, Logical \
Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)"),
("Partial Small Business Set-Aside",
"Partial Small Business Set-Aside"),
("Set-Aside", "Set-Aside"),
("Sole Source", "Sole Source"),
)
PROCUREMENT_METHOD_CHOICES = (
("Ability One", "Ability One"),
("Basic Ordering Agreement", "Basic Ordering Agreement"),
("Blanket Purchase Agreement-BPA", "Blanket Purchase Agreement-BPA"),
("BPA Call", "BPA Call"),
("Call Order under GSA Schedules BPA",
"Call Order under GSA Schedules BPA"),
("Commercial Item Contract", "Commercial Item Contract"),
("Contract modification", "Contract modification"),
("Contract", "Contract"),
("Definitive Contract other than IDV",
"Definitive Contract other than IDV"),
("Definitive Contract", "Definitive Contract"),
("Government-wide Agency Contract-GWAC",
"Government-wide Agency Contract-GWAC"),
("GSA Schedule Contract", "GSA Schedule Contract"),
("GSA Schedule", "GSA Schedule"),
("GSA Schedules Program BPA", "GSA Schedules Program BPA"),
("Indefinite Delivery Indefinite Quantity-IDIQ",
"Indefinite Delivery Indefinite Quantity-IDIQ"),
("Indefinite Delivery Vehicle (IDV)",
"Indefinite Delivery Vehicle (IDV)"),
("Indefinite Delivery Vehicle Base Contract",
"Indefinite Delivery Vehicle Base Contract"),
("Multi-Agency Contract", "Multi-Agency Contract"),
("Negotiated", "Negotiated"),
("Order under GSA Federal Supply Schedules Program",
"Order under GSA Federal Supply Schedules Program"),
("Order under GSA Schedules Program BPA",
"Order under GSA Schedules Program BPA"),
("Order under GSA Schedules Program",
"Order under GSA Schedules Program"),
("Order under IDV", "Order under IDV"),
("Purchase Order", "Purchase Order"),
("Sealed Bid", "Sealed Bid"),
)
subagency = models.ForeignKey(Subagency)
task = models.CharField(max_length=100, blank=False)
description = models.TextField(max_length=500, null=True, blank=True)
track = models.ForeignKey(
Track,
blank=False,
related_name="%(class)s_track"
)
step = ChainedForeignKey(
Step,
chained_field="track",
chained_model_field="track",
blank=False
)
dollars = models.DecimalField(decimal_places=2, max_digits=14, null=True,
blank=True)
period_of_performance = models.DateField(null=True, blank=True)
product_owner = models.CharField(max_length=50, null=True, blank=True)
roles = models.ManyToManyField(Role, blank=True)
contracting_officer = models.ForeignKey(ContractingOfficer, null=True,
blank=True)
contracting_officer_representative = models.ForeignKey(COR, null=True,
blank=True)
contracting_office = models.ForeignKey(ContractingOffice, null=True,
blank=True)
vendor = models.ForeignKey(Vendor, null=True, blank=True)
rfq_id = models.IntegerField(null=True, blank=True, verbose_name="RFQ ID")
naics = models.IntegerField(
null=True,
blank=True,
verbose_name="NAICS Code"
)
set_aside_status = models.CharField(max_length=100, null=True, blank=True,
choices=SET_ASIDE_CHOICES)
amount_of_competition = models.IntegerField(null=True, blank=True)
contract_type = models.CharField(max_length=100, null=True, blank=True,
choices=CONTRACT_TYPE_CHOICES)
competition_strategy = models.CharField(
max_length=100,
null=True,
blank=True,
choices=COMPETITION_STRATEGY_CHOICES)
procurement_method = models.CharField(
max_length=100,
null=True,
blank=True,
choices=PROCUREMENT_METHOD_CHOICES)
award_date = models.DateField(null=True, blank=True)
delivery_date = models.DateField(null=True, blank=True)
def clean(self):
print(self.step.track.all())
print(self.track)
if self.track not in self.step.track.all():
raise ValidationError(_('Tracks are not equal.'))
def __str__(self):
return "%s (%s)" % (self.task, self.subagency)
class Evaluator(models.Model):
name = models.CharField(max_length=100)
acquisition = models.ManyToManyField(Acquisition)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Release(models.Model):
acquisition = models.ForeignKey(Acquisition)
def __str__(self):
return self.id
class Meta:
ordering = ('id',)
```
#### File: acquisitions/providers/fake_agency.py
```python
import random
from faker.providers import BaseProvider
class AgencyProvider(BaseProvider):
agency_parts = (
(
"Department of", "Office of", "Bureau of",
),
(
"the Interior", "Administrating", "Hats", "Management", "Labor",
"Finance", "Departments", "Flying"
)
)
big_agency_start = (
"Department of", "Office of", "Bureau of",
)
big_agency_end = (
"Administration", "Agency",
)
medium_agency_end = (
"Division", "Section",
)
small_agency_end = (
"Region", "Office", "Room",
)
extra_parts = (
"Synergy", "Failure", "High-Profile Success", "First Aid", "Gravy",
"Sandwiches", "Wine", "Budget", "Style"
)
def agency(self, size="large"):
result = []
for part in self.agency_parts:
result.append(self.random_element(part))
if random.randint(0, 100) > 70:
result.append("and")
result.append(self.random_element(self.extra_parts))
return " ".join(result)
```
#### File: acquisitions/tests/test_add_teammate.py
```python
from django.core.management import call_command
from django.test import TestCase
from django.contrib.auth.models import User
class AddTeammateTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test_user', email='',
password='')
self.assertFalse(self.user.is_superuser, 'User *is not* an admin \
before the command.')
def test_superuser_command_output(self):
call_command('add_teammate', 'test_user')
self.user = User.objects.get(username='test_user')
self.assertTrue(self.user.is_superuser, 'User *is* an admin \
after the command.')
``` |
{
"source": "18F/acquisitions.18f.gov",
"score": 2
} |
#### File: nda/tests/test_buy_nda.py
```python
import pytest
from django.contrib.auth.models import User, Group
from projects.factories import BuyFactory
from acquisitions.factories import UserFactory
class TestBuyNDA:
@pytest.fixture
@pytest.mark.django_db
def user(self):
user = UserFactory()
group = Group.objects.create(name='NDA Signed')
user.groups.add(group)
return user
@pytest.fixture
@pytest.mark.django_db
def buy(self):
buy = BuyFactory()
return buy
@pytest.mark.django_db
def test_signing_nda(self, user, buy):
buy.technical_evaluation_panel.add(user)
assert buy.all_nda_signed() is False
buy.nda_signed.add(user)
assert buy.all_nda_signed() is True
```
#### File: acquisitions.18f.gov/nda/views.py
```python
import os
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from nda.forms import NDAForm
# Create your views here.
@login_required
def sign_nda(request):
try:
group = Group.objects.get(name='NDA Signed')
except Group.DoesNotExist:
print('NDA Signed group not yet created')
raise Http404
try:
request.user.groups.get(id=group.id)
return render(request, 'nda/already-signed.html')
except Group.DoesNotExist:
pass
nda_form = NDAForm(request.POST or None)
if nda_form.is_valid():
request.user.groups.add(group)
return render(request, 'nda/success.html')
return render(request, 'nda/nda.html', {
'nda_form': nda_form
})
```
#### File: acquisitions.18f.gov/news/models.py
```python
from datetime import datetime
from dateutil.tz import tzlocal
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(
max_length=100,
blank=False,
null=False,
)
slug = models.SlugField(
max_length=50,
blank=True,
null=True,
)
authors = models.ManyToManyField(
User,
blank=False,
)
content = models.TextField(
blank=False,
null=False,
)
publication_date = models.DateTimeField(
blank=True,
null=True,
)
draft = models.BooleanField(
default=True,
)
def __str__(self):
return "{0} | {1}".format(self.title, self.publication_date)
def get_absolute_url(self):
return reverse('news:post', args=[self.slug])
def clean(self):
if self.draft and self.publication_date is not None:
raise ValidationError({
'publication_date': 'Drafts may not have a publication date'
})
if not self.draft and self.publication_date is None:
raise ValidationError({
'publication_date': 'Please set a publication date'
})
class Meta:
pass
```
#### File: acquisitions.18f.gov/news/views.py
```python
from datetime import datetime
from dateutil.tz import tzlocal
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from news.models import Post
# Create your views here.
def posts(request):
post_list = Post.objects.filter(
draft=False,
publication_date__lte=datetime.now(tzlocal())
).order_by('publication_date')
# Pagination: https://docs.djangoproject.com/en/1.10/topics/pagination/
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
return render(request, "news/posts.html", {"posts": posts})
def post(request, slug):
post = get_object_or_404(
Post,
slug=slug,
draft=False,
publication_date__lte=datetime.now(tzlocal()),
)
return render(request, "news/post.html", {'post': post})
```
#### File: management/commands/create_projects.py
```python
from django.core.management.base import BaseCommand, CommandError
from projects.factories import ProjectFactory
class Command(BaseCommand):
help = 'Create a team'
def handle(self, *args, **options):
ProjectFactory.create_batch(5)
```
#### File: projects/tests/test_iaa_model.py
```python
import pytest
from datetime import date, timedelta
from projects.factories import (
IAAFactory,
)
@pytest.mark.django_db
def test_active():
iaa = IAAFactory()
iaa.performance_begins = date.today() - timedelta(days=1)
iaa.performance_ends = date.today() + timedelta(days=1)
assert iaa.active()
@pytest.mark.django_db
def test_inactive():
iaa = IAAFactory()
iaa.performance_begins = date.today() - timedelta(days=10)
iaa.performance_ends = date.today() - timedelta(days=5)
assert not iaa.active()
```
#### File: acquisitions.18f.gov/web/views.py
```python
from datetime import datetime, tzinfo
from dateutil.tz import tzlocal
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.decorators import api_view
from news.models import Post
# Create your views here.
def index(request):
posts = Post.objects.filter(
draft=False,
publication_date__lte=datetime.now(tzlocal())
).order_by('publication_date')[:5]
return render(request, 'web/index.html', {'posts': posts})
def guides(request):
return render(request, 'web/guides.html')
@login_required
def profile(request):
return render(request, 'web/profile.html')
@login_required
def refresh_token(request):
# TODO: Updating in place seems better, but couldn't get that to work.
# Commented lines below are what I tried.
token = Token.objects.get_or_create(user=request.user)[0]
# token.key = token.generate_key()
# token.save(update_fields=['key'])
token.delete()
Token.objects.create(user=request.user)
return redirect("/profile/")
``` |
{
"source": "18F/analytics-proxy",
"score": 2
} |
#### File: 18F/analytics-proxy/util_functions.py
```python
import pickle
from oauth2client.client import SignedJwtAssertionCredentials
from httplib2 import Http
from apiclient.discovery import build
from flask import make_response, request, current_app
from datetime import timedelta
from functools import update_wrapper
def initialize_service(config):
""" Initalizes google analytics service """
client_email = config['CLIENT_EMAIL']
with open(config['GA_P12_KEY'], 'r') as f:
private_key = f.read()
credentials = SignedJwtAssertionCredentials(
client_email, private_key,
'https://www.googleapis.com/auth/analytics.readonly')
http_auth = credentials.authorize(Http())
return build('analytics', 'v3', http=http_auth)
def call_api(query, service):
""" calls api and returns result """
result = service.data().ga().get(**query).execute()
return result
def prepare_data(result):
""" Prepares data to return """
header = [col['name'].strip("ga:") for col in result['columnHeaders']]
data = []
for row in result.get('rows'):
data.append(dict(zip(header, row)))
return {'data': data}
def load_reports(redis_client):
""" Loads reports into redis """
from reports import report_dict
for item in report_dict:
redis_client.set(item['report_name'], pickle.dumps(item))
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
``` |
{
"source": "18F/aocourt-api",
"score": 2
} |
#### File: api_v1/endpoints/login.py
```python
from datetime import timedelta
from typing import Any
from sqlalchemy.orm import Session
from fastapi import APIRouter, HTTPException, Depends
from fastapi.security import OAuth2PasswordRequestForm
from app.entities import Token
from app import data
from app.core.config import settings
from app.core import security
from app.data.database import get_db
router = APIRouter()
@router.post("/login/access-token", response_model=Token, summary="Get an access token")
def login_access_token(
db: Session = Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends()
) -> Any:
'''
Exchange name and password credentials for a JWT.
'''
user = data.user.authenticate(
db, email=form_data.username, password=<PASSWORD>
)
if not user:
raise HTTPException(status_code=400, detail="Incorrect email or password")
elif not user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": security.create_access_token(
user.id, expires_delta=access_token_expires
),
"token_type": "bearer",
}
```
#### File: api/graph_ql/routes_public.py
```python
from fastapi import APIRouter, Depends
from starlette.requests import Request
from sqlalchemy.orm import Session
from ariadne.asgi import GraphQL
from app.data.database import get_db
from app.entities import PublicUser
from . import schema
graphql_app = GraphQL(schema, debug=False)
graphQL_router_public = APIRouter()
@graphQL_router_public.get("/")
async def graphiql(request: Request):
return await graphql_app.render_playground(request=request)
@graphQL_router_public.post("/")
async def graphql_post(request: Request, db: Session = Depends(get_db)):
request.state.db = db
request.state.user = PublicUser()
return await graphql_app.graphql_http_server(request=request)
```
#### File: data/case/case.py
```python
import datetime
from sqlalchemy import Boolean, Column, Integer, String, Table, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import DateTime
from ..database import mapper_registry
from app.entities.case import Case, DocketEntry, DistrictCase, AppellateCase
cases_table = Table(
'cases',
mapper_registry.metadata,
Column('id', Integer, primary_key=True, index=True),
Column('title', String, nullable=False),
Column('date_filed', DateTime),
Column('sealed', Boolean, default=False),
Column('type', String),
Column('court', String),
Column('status', String, nullable=True),
Column('original_case_id', Integer),
Column('reviewed', Boolean, default=False),
Column('remanded', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
docket_entry_table = Table(
"docket_entries",
mapper_registry.metadata,
Column('id', Integer, nullable=False, primary_key=True),
Column('case_id', Integer, ForeignKey('cases.id'), nullable=False),
Column('sequence_no', Integer, nullable=False),
Column('court', String),
Column('text', String, nullable=False),
Column('date_filed', DateTime),
Column('entry_type', String, nullable=False),
Column('sealed', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
def run_mappers():
mapper_registry.map_imperatively(DocketEntry, docket_entry_table)
mapper_registry.map_imperatively(
Case,
cases_table,
polymorphic_on=cases_table.c.type,
polymorphic_identity="case",
properties={
'docket_entries': relationship(
DocketEntry,
order_by="asc(DocketEntry.sequence_no)"
)
}
)
mapper_registry.map_imperatively(
DistrictCase,
inherits=Case,
polymorphic_identity="district"
)
mapper_registry.map_imperatively(
AppellateCase,
inherits=Case,
polymorphic_identity="appellate"
)
```
#### File: app/data/init_db.py
```python
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from app.data import user, role
from app.core.config import settings
from app.entities import User
from .database import mapper_registry, SessionLocal
from app.core.security import get_password_hash
def init_db(db: Session) -> None:
'''This is a stub for inserting initial data that may be needed for the application,
such as initial users. At the moment this is just a couple roles and
an admin user with these roles assigned.
Structural changes to the database should happen in migrations, not here.
In fact, if if turns out data like roles is needed for the application, we
may opt to put this in migrations as well.
'''
initial = user.get_by_email(db, email=settings.INITIAL_ADMIN_USER)
if not initial:
admin_role = role.get_by_name(db, rolename='admin')
clerk_role = role.get_by_name(db, rolename='clerk')
hashed_password = get_password_hash(settings.INITIAL_ADMIN_PASSWORD)
roles = [r for r in (admin_role, clerk_role) if r]
user_in = User(
email=settings.INITIAL_ADMIN_USER,
hashed_password=<PASSWORD>,
roles=roles,
full_name="Initial Admin",
username="admin"
)
user.add(db, user_in)
db.commit()
def create_tables():
'''Set up tables for the tests'''
engine = create_engine(settings.DATABASE_URL)
mapper_registry.metadata.create_all(engine)
if __name__ == "__main__":
create_tables()
db = SessionLocal()
init_db(db)
```
#### File: app/entities/court.py
```python
from typing import Optional, TypeVar, Type, List
from dataclasses import dataclass
from app.core.enums import CourtType
from app.core.courts import courts
T = TypeVar('T', bound='Court')
@dataclass(frozen=True)
class Court():
'''
Info on Federal Courts
'''
id: str
type: CourtType
short_name: str
full_name: str
parent: Optional[str] = None
@classmethod
def from_id(cls: Type[T], court_id: str) -> T:
court_data = courts[court_id]
return cls(**court_data, id=court_id)
def parent_court(self: T) -> Optional[T]:
if self.parent:
return courts.get(self.parent)
def lower_courts(self: T) -> List[T]:
print(courts)
return [self.__class__.from_id(id) for id, c in courts.items() if c['parent'] == self.id]
```
#### File: tests/integration/test_auth.py
```python
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from app.core.config import settings
def test_get_access_token_valid_user(client: TestClient, db_session: Session, default_user) -> None:
'''Correct credentials should return HTTP 200 and an access token'''
login_data = {
"username": settings.INITIAL_ADMIN_USER,
"password": settings.INITIAL_<PASSWORD>_PASSWORD,
}
r = client.post(f"{settings.API_V1_STR}/login/access-token", data=login_data)
tokens = r.json()
assert r.status_code == 200
assert "access_token" in tokens
assert tokens["access_token"]
@pytest.mark.parametrize('login_data', [
{"username": settings.INITIAL_ADMIN_USER, "password": "<PASSWORD>"},
{"username": '<EMAIL>', "password": settings.INITIAL_ADMIN_PASSWORD},
{"username": '<EMAIL>', "password": '<PASSWORD>'},
])
def test_get_access_token_invalid_user(login_data, client: TestClient, db_session: Session, default_user) -> None:
'''Incorrect credentials should return HTTP 400 and no token'''
r = client.post(f"{settings.API_V1_STR}/login/access-token", data=login_data)
tokens = r.json()
assert r.status_code == 400
assert "access_token" not in tokens
@pytest.mark.parametrize('login_data', [
{"username": '', "password": '<PASSWORD>'},
{"username": '', "password": ''},
{"username": '<EMAIL>', "password": ''},
{"username": None, "password": None},
])
def test_get_access_token_bad_input(login_data, client: TestClient, db_session: Session, default_user) -> None:
'''Missing credentials should return HTTP 422'''
r = client.post(f"{settings.API_V1_STR}/login/access-token", data=login_data)
tokens = r.json()
assert r.status_code == 422
assert "access_token" not in tokens
```
#### File: tests/unit/conftest.py
```python
import pytest
from datetime import datetime
from app.core.enums import CaseStatus
from app.entities import DistrictCase, RecordOnAppeal
@pytest.fixture()
def simple_case():
case = DistrictCase(
title="Godzilla v. Mothra",
date_filed=datetime.now(),
status=CaseStatus.new,
sealed=True,
court="tnmd",
docket_entries=[]
)
case.id = 123
return case
@pytest.fixture
def simple_roa():
roa = RecordOnAppeal(
court='nysd',
title='Predator v. Alien',
original_case_id=123,
date_filed=datetime.now(),
docket_entries=[],
receiving_court=None,
status=CaseStatus.new
)
roa.id = 456
return roa
``` |
{
"source": "18F/calc",
"score": 2
} |
#### File: data_capture/tests/test_masconsolidated.py
```python
import json
import html as html_module
from decimal import Decimal
from .common import path, uploaded_xlsx_file, FakeWorkbook, FakeSheet
from .test_models import ModelTestCase
from ..schedules import mas_consolidated as mas, registry
from django.test import TestCase, override_settings
from django.core.exceptions import ValidationError
MAS = '{}.MASConsolidatedPriceList'.format(mas.__name__)
file_name = 'Price_Proposal_Template_SERVICES_AND_TRAINING_FINAL.xlsx'
MAS_XLSX_PATH = path('static', 'data_capture', file_name)
# TODO: These tests should be DRY'd out since they nearly identical to test_s70
# Or really the shared methods should be generalized and those should be
# tested.
class GleaningTests(TestCase):
def create_fake_book(self):
return FakeWorkbook(sheets=[
FakeSheet(mas.DEFAULT_SHEET_NAME, mas.EXAMPLE_SHEET_ROWS)])
def test_rows_are_returned(self):
rows = mas.glean_labor_categories_from_file(
uploaded_xlsx_file(MAS_XLSX_PATH))
self.assertEqual(rows, [{
'sin': '874-1',
'labor_category': 'Principal Consultant',
'education_level': 'Bachelors',
'min_years_experience': '10',
'unit_of_issue': 'Hour',
'price_including_iff': '200.0',
'keywords': 'Process improvement, finance, senior project manager',
'certifications': 'PMP'
}])
def test_text_formatted_prices_are_gleaned(self):
book = self.create_fake_book()
book._sheets[0]._cells[1][13] = '$ 200.00 '
rows = mas.glean_labor_categories_from_book(book)
self.assertEqual(rows[0]['price_including_iff'], '200.00')
def test_min_education_is_gleaned_from_text(self):
book = self.create_fake_book()
book._sheets[0]._cells[1][2] = 'GED or high school'
rows = mas.glean_labor_categories_from_book(book)
self.assertEqual(rows[0]['education_level'], 'Bachelors')
def test_unit_of_issue_is_gleaned_to_hour(self):
book = self.create_fake_book()
book._sheets[0]._cells[1][7] = 'Hourly'
rows = mas.glean_labor_categories_from_book(book)
self.assertEqual(rows[0]['unit_of_issue'], 'Hour')
def test_validation_error_raised_when_sheet_not_present(self):
with self.assertRaisesRegexp(
ValidationError,
r'There is no sheet in the workbook called "foo"'
):
mas.glean_labor_categories_from_file(
uploaded_xlsx_file(MAS_XLSX_PATH),
sheet_name='foo'
)
@override_settings(DATA_CAPTURE_SCHEDULES=[MAS])
class MASConsolidatedPriceListTests(ModelTestCase):
DEFAULT_SCHEDULE = MAS
def test_valid_rows_are_populated(self):
p = mas.MASConsolidatedPriceList.load_from_upload(
uploaded_xlsx_file(MAS_XLSX_PATH))
self.assertEqual(len(p.valid_rows), 1)
try:
self.assertEqual(len(p.invalid_rows), 1)
except Exception:
print("invalid row found")
self.assertEqual(p.valid_rows[0].cleaned_data, {
'education_level': 'Bachelors',
'labor_category': 'Principal Consultant',
'min_years_experience': 10,
'price_including_iff': Decimal('200.00'),
'sin': '874-1',
'unit_of_issue': 'Hour',
'keywords': 'Process improvement, finance, senior project manager',
'certifications': 'PMP'
})
def test_education_level_is_validated(self):
p = mas.MASConsolidatedPriceList(rows=[{'education_level': 'Bachelors'}])
if 'education_level' in p.invalid_rows[0].errors.keys():
self.assertRegexpMatches(
p.invalid_rows[0].errors['education_level'][0],
r'This field must contain one of the following values'
)
def test_price_including_iff_is_validated(self):
p = mas.MASConsolidatedPriceList(rows=[{'price_including_iff': '1.10'}])
self.assertRegexpMatches(
p.invalid_rows[0].errors['price_including_iff'][0],
r'Price must be at least'
)
def test_min_years_experience_is_validated(self):
p = mas.MASConsolidatedPriceList(rows=[{'min_years_experience': ''}])
self.assertEqual(p.invalid_rows[0].errors['min_years_experience'],
['This field is required.'])
def test_unit_of_issue_is_validated(self):
p = mas.MASConsolidatedPriceList(rows=[{'unit_of_issue': ''}])
if 'unit_of_issue' in p.invalid_rows[0].errors.keys():
self.assertEqual(p.invalid_rows[0].errors['unit_of_issue'],
['This field is required.'])
p = mas.MASConsolidatedPriceList(rows=[{'unit_of_issue': 'Day'}])
self.assertEqual(p.invalid_rows[0].errors['unit_of_issue'],
['Value must be "Hour" or "Hourly"'])
def test_unit_of_issue_can_be_hour_or_hourly(self):
p = mas.MASConsolidatedPriceList(rows=[{'unit_of_issue': 'Hour'}])
self.assertNotIn('unit_of_issue', p.invalid_rows[0])
p = mas.MASConsolidatedPriceList(rows=[{'unit_of_issue': 'hourly'}])
self.assertNotIn('unit_of_issue', p.invalid_rows[0])
def test_add_to_price_list_works(self):
s = mas.MASConsolidatedPriceList.load_from_upload(
uploaded_xlsx_file(MAS_XLSX_PATH))
p = self.create_price_list()
p.save()
s.add_to_price_list(p)
row = p.rows.all()[0]
self.assertEqual(row.labor_category, 'Principal Consultant')
self.assertEqual(row.education_level, 'BA')
self.assertEqual(row.min_years_experience, 10)
self.assertEqual(row.base_year_rate, Decimal('200.00'))
self.assertEqual(row.sin, '874-1')
self.assertEqual(row.keywords, 'Process improvement, finance, senior project manager')
self.assertEqual(row.certifications, 'PMP')
row.full_clean()
def test_serialize_and_deserialize_work(self):
s = mas.MASConsolidatedPriceList.load_from_upload(
uploaded_xlsx_file(MAS_XLSX_PATH))
saved = json.dumps(registry.serialize(s))
restored = registry.deserialize(json.loads(saved))
self.assertTrue(isinstance(restored, mas.MASConsolidatedPriceList))
self.assertEqual(s.rows, restored.rows)
def test_to_table_works(self):
s = mas.MASConsolidatedPriceList.load_from_upload(
uploaded_xlsx_file(MAS_XLSX_PATH))
table_html = s.to_table()
self.assertIsNotNone(table_html)
self.assertTrue(isinstance(table_html, str))
def test_to_error_table_works(self):
s = mas.MASConsolidatedPriceList.load_from_upload(
uploaded_xlsx_file(MAS_XLSX_PATH))
table_html = s.to_error_table()
self.assertIsNotNone(table_html)
self.assertTrue(isinstance(table_html, str))
def test_render_upload_example_works(self):
html = mas.MASConsolidatedPriceList.render_upload_example()
for row in mas.EXAMPLE_SHEET_ROWS:
for col in row:
self.assertIn(html_module.escape(col), html)
``` |
{
"source": "18F/census-similarity",
"score": 3
} |
#### File: census-similarity/census_similarity/metrics.py
```python
import distance
from scipy import spatial
def cosine(left, right):
elements = set(left) | set(right)
elements = list(sorted(elements))
left = [int(el in left) for el in elements]
right = [int(el in right) for el in elements]
return spatial.distance.cosine(left, right)
def levenshtein(left, right):
return distance.levenshtein(left, right, normalized=True)
def jaccard(left, right):
return distance.jaccard(left, right)
``` |
{
"source": "18F/cf-limit-check",
"score": 2
} |
#### File: 18F/cf-limit-check/check.py
```python
import os
import json
import sys
import requests
import marshmallow as ma
from webargs import fields
from awslimitchecker.checker import AwsLimitChecker
# todo (mxplusb): remove this in favour of something more...reasonable.
class Config(ma.Schema):
region = fields.Str(load_from='AWS_DEFAULT_REGION', required=True)
access_key_id = fields.Str(load_from='AWS_ACCESS_KEY_ID', required=False)
secret_access_key = fields.Str(load_from='AWS_SECRET_ACCESS_KEY', required=False)
services = fields.DelimitedList(fields.Str, load_from='SERVICES', required=True)
use_ta = fields.Bool(load_from='USE_TA', missing=True)
slack_url = fields.Str(load_from='SLACK_URL', required=True)
slack_username = fields.Str(load_from='SLACK_USERNAME', missing='limit-check')
slack_channel = fields.Str(load_from='SLACK_CHANNEL', required=True)
slack_icon = fields.Str(load_from='SLACK_ICON', required=True)
limit_overrides = fields.Str(load_from='LIMIT_OVERRIDES')
def check(config):
region = config["region"]
if "gov" in region: # servicequota is not yet available in govcloud.
checker = AwsLimitChecker(region=config["region"], skip_quotas=True)
else:
checker = AwsLimitChecker(region=config["region"])
overrides = json.loads(config["limit_overrides"])
# commercial doesn't have any services in ec2.
if "gov" not in region:
del overrides["EC2"]
checker.set_limit_overrides(override_dict=overrides)
warnings, errors = [], []
result = checker.check_thresholds(service=config['services'], use_ta=config['use_ta'])
w, e = process_result(result)
warnings.extend(w)
errors.extend(e)
attachments = errors + warnings
if attachments:
try:
requests.post(
config['slack_url'],
json={
'username': config['slack_username'],
'channel': config['slack_channel'],
'icon_url': config['slack_icon'],
'text': 'AWS Quota report:',
'attachments': attachments,
},
).raise_for_status()
except Exception as e:
print(e)
sys.exit(e)
for attachment in attachments:
print(
f'{attachment["title"]} - Current: {attachment["fields"][0]["value"]} Quota: {attachment["fields"][1]["value"]}')
def make_attachment(color, service, limit_name, usage, limit):
return {
"color": color,
"title": "{service}: {limit_name}".format(service=service, limit_name=limit_name),
"fields": [
{
"title": "Current Usage:",
"value": usage,
"short": True,
},
{
"title": "Quota Limit:",
"value": limit,
"short": True,
},
],
}
def process_result(result):
warnings, errors = [], []
for service, svc_limits in result.items():
for limit_name, limit in svc_limits.items():
for warn in limit.get_warnings():
warnings.append(make_attachment('warning', service, limit_name, str(warn), str(limit.get_limit())))
for crit in limit.get_criticals():
errors.append(make_attachment('danger', service, limit_name, str(crit), str(limit.get_limit())))
return warnings, errors
if __name__ == "__main__":
config = Config(strict=True).load(os.environ).data
check(config)
``` |
{
"source": "18F/cg-deploy-kibosh",
"score": 2
} |
#### File: kibosh/test/test_bind.py
```python
import uuid
import mysql.connector
import requests
from test_broker_base import TestBrokerBase
class TestBindUnbind(TestBrokerBase):
@classmethod
def setUpClass(self):
super().setUpClass()
self.binding_id = uuid.uuid4()
def call_bind(self):
path = "/v2/service_instances/{}/service_bindings/{}".format(self.instance_id, self.binding_id)
return self.call_broker(path, {
"service_id": self.service_id,
"plan_id": self.plan_id,
}, requests.put)
def test_bind_response_credentials(self):
json_body = self.call_bind()
self.assertIn("credentials", json_body)
def test_bind_template(self):
json_body = self.call_bind()
credentials = json_body["credentials"]
self.assertIn("hostname", credentials)
self.assertIn("jdbcUrl", credentials)
self.assertIn("name", credentials)
self.assertIn("password", credentials)
self.assertEqual(credentials["port"], 3306)
self.assertIn("uri", credentials)
self.assertEqual(credentials["username"], "root")
def test_connection(self):
json_body = self.call_bind()
credentials = json_body["credentials"]
user = credentials["username"]
password = <PASSWORD>["password"]
host = credentials["hostname"]
port = credentials["port"]
cnx = mysql.connector.connect(
user=user, password=password, host=host, port=port
)
cnx.close()
def test_unbind_response(self):
path = "/v2/service_instances/{}/service_bindings/{}?service_id={}&plan_id={}".format(
self.instance_id, self.binding_id, self.service_id, self.plan_id,
)
delete_body = self.call_broker(path, {}, requests.delete)
self.assertEqual({}, delete_body)
``` |
{
"source": "18F/cg-deploy-shibboleth",
"score": 2
} |
#### File: cg-deploy-shibboleth/SATS/test_shibboleth.py
```python
import os
import random
import string
import time
from bs4 import BeautifulSoup
import pytest
from .uaaclient import UAAClient
from .integration_test import IntegrationTestClient, get_csrf_for_form
@pytest.fixture
def config():
config = {}
urls = {}
urls["uaa"] = os.environ["UAA_URL"]
urls["extras"] = os.environ["EXTRAS_URL"]
urls["idp"] = os.environ["IDP_URL"]
for url in urls:
if not urls[url][0:4] == "http":
urls[url] = "https://" + urls[url]
config["urls"] = urls
config["idp_name"] = os.environ["IDP_NAME"]
config["uaa_client"] = os.environ["UAA_USER"]
config["uaa_secret"] = os.environ["UAA_SECRET"]
return config
@pytest.fixture
def uaa(config):
uaac = UAAClient(config["urls"]["uaa"], None, verify_tls=True)
token = uaac._get_client_token(config["uaa_client"], config["uaa_secret"])
uaac.token = token
return uaac
@pytest.fixture
def user(uaa, config):
user = {}
user["name"] = (
"noreply+" + "".join(random.choices(string.ascii_lowercase, k=8)) + "@cloud.gov"
)
user["password"] = "".join(
random.choices(
string.ascii_lowercase + string.ascii_uppercase + string.digits, k=20
)
)
r = uaa.create_user(
user["name"],
"unimportant",
"alsounimportant",
user["name"],
password=user["password"],
origin="cloud.gov",
)
uaa.set_temporary_password(
config["uaa_client"], config["uaa_secret"], user["name"], user["password"]
)
yield user
uaa.delete_user(r["id"])
@pytest.fixture
def unauthenticated(config):
itc = IntegrationTestClient(
config["urls"]["extras"],
config["urls"]["idp"],
config["urls"]["uaa"],
config["idp_name"],
)
return itc
@pytest.fixture
def authenticated(unauthenticated, user):
token, changed = unauthenticated.log_in(user["name"], user["password"])
if changed:
user["token"] = token
return unauthenticated
def get_csrf(page_text) -> str:
page = BeautifulSoup(page_text, features="html.parser")
csrf = page.find(attrs={"name": "_csrf_token"}).attrs["value"]
return csrf
@pytest.mark.parametrize("page", ["/invite", "/change-password", "/first-login"])
def test_unauthenticated_pages_redirect(unauthenticated, page, config):
r = unauthenticated.get_page(page)
assert r.status_code == 200
assert r.url == config["urls"]["uaa"] + "/login"
def test_login_no_totp(unauthenticated, config, user):
# this is the happiest-path test
# log in to get/set our totp
token, changed = unauthenticated.log_in(user["name"], user["password"])
assert changed
# log out, so log in will work
unauthenticated.log_out()
# log in again to make sure we have the right totp
_, changed = unauthenticated.log_in(user["name"], user["password"], token)
assert not changed
def test_no_login_with_bad_password(unauthenticated, config, user):
response = unauthenticated.uaa_pick_idp()
assert response.status_code == 200
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
next_url = form.attrs["action"]
csrf = get_csrf_for_form(form)
response = unauthenticated.idp_start_log_in(next_url, csrf)
assert response.status_code == 200
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
next_url = form.attrs["action"]
csrf = get_csrf_for_form(form)
response = unauthenticated.idp_username_password_login(next_url, user["name"], "<PASSWORD>", csrf)
assert response.status_code == 200
assert "invalid username or password" in response.text.lower()
def test_no_login_with_bad_totp(unauthenticated, config, user):
# log in so we have a user with a good totp seed
token, changed = unauthenticated.log_in(user["name"], user["password"])
assert changed
# log out, so log in will work
unauthenticated.log_out()
response = unauthenticated.uaa_pick_idp()
assert response.status_code == 200
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
next_url = form.attrs["action"]
csrf = get_csrf_for_form(form)
response = unauthenticated.idp_start_log_in(next_url, csrf)
assert response.status_code == 200
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
next_url = form.attrs["action"]
csrf = get_csrf_for_form(form)
response = unauthenticated.idp_username_password_login(next_url, user["name"], user["password"], csrf)
assert response.status_code == 200
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
# the action will look like <url>?execution=e1s5
first_totp_step = int(form.attrs["action"][-1])
_, _, response = unauthenticated.idp_totp_login(response.text, totp_seed="asdf")
soup = BeautifulSoup(response.text, features="html.parser")
form = soup.find("form")
second_totp_step = int(form.attrs["action"][-1])
# the last digit of the execution value increments every time you retry the token
# this is kind of a silly test, but the token input UI doesn't change if you put in a bad token
# so this hopefully tests that we actually posted, failed the comparison, and got a response back
assert first_totp_step + 1 == second_totp_step
assert "token code from your authentication app" in response.text
``` |
{
"source": "18F/cg-django-uaa",
"score": 2
} |
#### File: 18F/cg-django-uaa/test.py
```python
import os
import sys
import distutils.cmd
from setuptools import setup, find_packages
import subprocess
from uaa_client import VERSION
class SimpleCommand(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class ManualTestCommand(SimpleCommand):
description = "Run example app in a Docker container for manual testing."
SDIST_PATH = os.path.join("dist", "cg-django-uaa-{}.tar.gz".format(VERSION))
def run(self):
if not os.path.exists(self.SDIST_PATH):
print("Please run 'python setup.py sdist' first.")
sys.exit(1)
import django
django_version = django.get_version()
tag_name = "cg-django-uaa"
subprocess.check_call(
[
"docker",
"build",
"--build-arg",
"version={}".format(VERSION),
"--build-arg",
"django_version={}".format(django_version),
"-t",
tag_name,
".",
]
)
subprocess.check_call(
["docker", "run", "-it", "-p", "8000:8000", "--rm", tag_name]
)
class DevDocsCommand(SimpleCommand):
description = "Run development server for documentation"
def run(self):
subprocess.check_call(
["sphinx-autobuild", ".", "_build_html", "--port", "8001"], cwd="docs"
)
setup(cmdclass={"devdocs": DevDocsCommand, "manualtest": ManualTestCommand})
``` |
{
"source": "18F/cg-elb-log-ingestor",
"score": 3
} |
#### File: cg-elb-log-ingestor/elb_log_ingestor/api_endpoint.py
```python
import json
from http.server import BaseHTTPRequestHandler
class ApiEndpoint(BaseHTTPRequestHandler):
"""
Responds to web requests for health and stats checks. Set parser_stats, shipper_stats, shipper, and fetcher before using!
"""
parser_stats = None
shipper_stats = None
shipper = None
fetcher = None
def do_GET(self) -> None:
"""
Handle an HTTP GET
"""
self.protocol_version = 'HTTP/1.1'
if self.path == "/stats":
self.send_stats()
elif self.path == "/health":
self.send_health()
else:
self.send_error(404)
self.end_headers()
def send_stats(self) -> None:
"""
Send statistics as JSON
"""
parser = self.parser_stats.summary
shipper = self.shipper_stats.summary
parser["last_new_file_time"] = str(parser["last_new_file_time"])
shipper["last_document_indexed_at"] = str(shipper["last_document_indexed_at"])
stats = dict(parser=parser, shipper=shipper)
stats['queues'] = dict()
stats['queues']['shipper'] = dict(description='Records waiting to be sent to Elasticsearch', length=self.shipper.record_queue.qsize())
stats['queues']['files'] = dict(description='Files waiting to be processed', length=self.fetcher.to_do.qsize())
response = bytes(json.dumps(stats), 'utf-8')
self.send_response(200)
self.send_header("Content-type", "application/json")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def send_health(self) -> None:
"""
Send health information, with a 500 if the service is unhealthy
"""
response = dict()
response["elasticsearch_connected"] = self.shipper.healthy
response["s3_connected"] = self.fetcher.healthy
if response["elasticsearch_connected"] and response["s3_connected"]:
response["status"] = "UP"
response = bytes(json.dumps(response), 'utf-8')
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
else:
response["status"] = "DOWN"
response = bytes(json.dumps(stats), 'utf-8')
self.send_error(500, explain=response)
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
```
#### File: cg-elb-log-ingestor/elb_log_ingestor/elb_log_parse.py
```python
import datetime
import hashlib
import logging
import re
import typing
from pathlib import Path
import queue
from .stats import ParserStats
logger = logging.Logger(__name__)
def timestamp_to_timestamp(timestamp: str) -> str:
"""
Convert timestamp from what we want to what Elasticsearch wants
"""
dt = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z")
dt = datetime.datetime.utcfromtimestamp(dt.timestamp())
return dt.isoformat(timespec="milliseconds") + "Z"
# source: https://docs.aws.amazon.com/athena/latest/ug/application-load-balancer-logs.html
ALB_LOG_LINE_REGEX = re.compile(
r"""
(?P<type>[^ ]*)
\ (?P<time>[^ ]*) # leading backslash escapes the leading space
\ (?P<elb>[^ ]*)
\ (?P<client_ip>[^ ]*):(?P<client_port>[0-9]*)
\ (?P<target_ip>[^ ]*)[:-](?P<target_port>[0-9]*)
\ (?P<request_processing_time>[-.0-9]*)
\ (?P<target_processing_time>[-.0-9]*)
\ (?P<response_processing_time>[-.0-9]*)
\ (?P<elb_status_code>[-0-9]*)
\ (?P<target_status_code>[-0-9]*)
\ (?P<received_bytes>[-0-9]*)
\ (?P<sent_bytes>[-0-9]*)
\ "(?P<request_verb>[^ ]*)
\ (?P<request_url>[^ ]*)
\ (?P<request_proto>-|[^ ]*)\ ?"
\ "(?P<user_agent>[^"]*)"
\ (?P<ssl_cipher>[A-Z0-9-]+)
\ (?P<ssl_protocol>[A-Za-z0-9.-]*)
\ (?P<target_group_arn>[^ ]*)
\ "(?P<trace_id>[^"]*)"
\ "(?P<domain_name>[^"]*)"
\ "(?P<chosen_cert_arn>[^"]*)"
\ (?P<matched_rule_priority>[-.0-9]*)
\ (?P<request_creation_time>[^ ]*)
\ "(?P<actions_executed>[^"]*)"
\ "(?P<redirect_url>[^"]*)
"(?P<lambda_error_reason>$|\ "[^ ]*") # probably never used
(?P<new_field>.*) # probably never used
""",
re.VERBOSE,
)
ELB_LOG_LINE_REGEX = re.compile(
r"""
(?P<time>[^ ]*)
\ (?P<elb>[^ ]*)
\ (?P<client_ip>[^ ]*):(?P<client_port>[0-9]*)
\ (?P<target_ip>[^ ]*)[:-](?P<target_port>[0-9]*)
\ (?P<request_processing_time>[-.0-9]*)
\ (?P<target_processing_time>[-.0-9]*)
\ (?P<response_processing_time>[-.0-9]*)
\ (?P<elb_status_code>|[-0-9]*)
\ (?P<target_status_code>-|[-0-9]*)
\ (?P<received_bytes>[-0-9]*)
\ (?P<sent_bytes>[-0-9]*)
\ "(?P<request_verb>[^ ]*)
\ (?P<request_url>[^ ]*)
\ (?P<request_proto>-|[^ ]*)\ ?"
\ "(?P<user_agent>[^"]*)"
\ (?P<ssl_cipher>[A-Z0-9-]+)
\ (?P<ssl_protocol>[A-Za-z0-9.-]*)
""",
re.VERBOSE,
)
# map of key name to desired type constructor
ALB_LOGS_FIELD_TYPES = {
"type": str,
"time": timestamp_to_timestamp,
"elb": str,
"client_ip": str,
"client_port": int,
"target_ip": str,
"target_port": int,
"request_processing_time": float,
"target_processing_time": float,
"response_processing_time": float,
"elb_status_code": int,
"target_status_code": int,
"received_bytes": int,
"sent_bytes": int,
"request_verb": str,
"request_url": str,
"request_proto": str,
"user_agent": str,
"ssl_cipher": str,
"ssl_protocol": str,
"target_group_arn": str,
"trace_id": str,
"domain_name": str,
"chosen_cert_arn": str,
"matched_rule_priority": str,
"request_creation_time": timestamp_to_timestamp,
"actions_executed": str,
"redirect_url": str,
"lambda_error_reason": str,
"new_field": str,
}
ALB = "alb"
ELB = "elb"
class LogParser:
"""
Parses a/elb log files into dictionaries of a/elb events
"""
def __init__(
self,
file_in_queue: queue.Queue,
file_out_queue: queue.Queue,
record_out_queue: queue.Queue,
stats: ParserStats,
) -> None:
# where we get files to process
self.file_in_queue = file_in_queue
# where we notifiy when files are done processing
self.file_out_queue = file_out_queue
# where we send records
self.outbox = record_out_queue
# where we publish stats
self.stats = stats
def run(self) -> None:
"""
Actually do the work:
- pull log files off the queue
- parse them
- put log events on the out queue
- put log filenames on the done queue
"""
while True:
lines = None
name = None
try:
name, lines = self.file_in_queue.get()
except queue.Empty:
pass
if name is not None:
self.stats.new_file_time()
self.parse_alb_logs(name, lines)
self.file_out_queue.put(name)
self.stats.increment_files_processed()
def parse_alb_logs(self, name, lines: typing.List[str]) -> None:
"""
Parse log lines and push their messages to the queue
"""
for line in lines:
line = line.strip()
log_type = ALB
match = ALB_LOG_LINE_REGEX.match(line)
if match is None:
log_type = ELB
match = ELB_LOG_LINE_REGEX.match(line)
if match is None:
self.stats.increment_lines_errored()
logger.error("failed to match: '%s'", line)
return
try:
match = coerce_match_types(match)
except ValueError as e:
logger.error("failed to coerce match: %s with %s", match, e)
if log_type is ALB:
match = format_alb_match(match)
else:
match = format_elb_match(match)
match = remove_empty_fields(match)
match = add_metadata(match, line, name)
if match is not None:
id_ = generate_id(match)
self.outbox.put((id_, match,))
self.stats.increment_lines_processed()
else:
self.stats.increment_lines_errored()
logger.error("match None after processing: '%s'", line)
def format_alb_match(match: typing.Dict) -> typing.Dict:
"""
Turn a match dict from an ELB log into a record appropriate for Elasticsearch
"""
new_match = {
# request_verb, _url, and _proto will be empty strings in some cases. Replace the empty strings with -
"@message": f"{match['request_verb'] or '-'} {match['request_url'] or '-'} {match['request_proto'] or '-'}",
"@timestamp": match["time"],
"@alb": {
"matched_rule_priority": match["matched_rule_priority"],
"actions_executed": match["actions_executed"],
"target_group_arn": match["target_group_arn"],
"domain_name": match["domain_name"],
"alb": {"id": match["elb"], "status_code": match["elb_status_code"]},
"received_bytes": match["received_bytes"],
"chosen_cert_arn": match["chosen_cert_arn"],
"client": {"ip": match["client_ip"], "port": match["client_port"]},
"response": {"processing_time": match["response_processing_time"]},
"redirect_url": match["redirect_url"],
"sent_bytes": match["sent_bytes"],
"trace_id": match["trace_id"],
"target": {
"port": match["target_port"],
"processing_time": match["target_processing_time"],
"status_code": match["target_status_code"],
"ip": match["target_ip"],
},
"type": match["type"],
"request": {
"verb": match["request_verb"],
"url": match["request_url"],
"protocol": match["request_proto"],
"processing_time": match["request_processing_time"],
"creation_time": match["request_creation_time"],
},
"user_agent": match["user_agent"],
},
}
return new_match
def format_elb_match(match: typing.Dict) -> typing.Dict:
"""
Turn a match dict from an ALB log into a record appropriate for Elasticsearch
"""
new_match = {
# request_verb, _url, and _proto will be empty strings in some cases. Replace the empty strings with -
"@message": f"{match['request_verb'] or '-'} {match['request_url'] or '-'} {match['request_proto'] or '-'}",
"@elb": {
"response": {"processing_time": match["response_processing_time"]},
"elb": {"id": match["elb"], "status_code": match["elb_status_code"]},
"ssl": {"cipher": match["ssl_cipher"], "protocol": match["ssl_protocol"]},
"sent_bytes": match["sent_bytes"],
"target": {
"port": match["target_port"],
"processing_time": match["target_processing_time"],
"status_code": match["target_status_code"],
"ip": match["target_ip"],
},
"received_bytes": match["received_bytes"],
"request": {
"user_agent": match["user_agent"],
"url": match["request_url"],
"processing_time": match["request_processing_time"],
"verb": match["request_verb"],
"protocol": match["request_proto"],
},
"client": {"ip": match["client_ip"], "port": match["client_port"]},
},
"@timestamp": match["time"],
}
return new_match
def add_metadata(record: typing.Dict, line: str, filename: str) -> typing.Dict:
"""
Add common metadata to match _in place_
line: the line the match was based on
filename: the name of the logfile the log was found in
"""
extra_metadata = {
"@input": "s3",
"@shipper.name": "elb_log_ingestor",
"@version": "1",
"@raw": line,
"@level": "INFO",
"tags": [],
"path": filename,
}
return {**record, **extra_metadata}
def coerce_match_types(match: re.Match) -> typing.Dict:
"""Convert an A/ELB log match into a dict with appropriate datatypes"""
d = match.groupdict()
for field, converter in ALB_LOGS_FIELD_TYPES.items():
if field in d:
# '-' is used to represent None-ish values
if d[field] == "-" or d[field] == "":
d[field] = None
if d[field] is not None:
d[field] = converter(d[field])
return d
def remove_empty_fields(d: typing.Dict) -> typing.Dict:
"""
Recursively remove empty collections and Nones from dict
"""
if d is None:
return None
# stash keys because we can't change a dict while iterating
# using keys()
keys = list(d.keys())
for k in keys:
v = d[k]
if isinstance(v, dict):
remove_empty_fields(v)
if v is None or v == {} or v == []:
d.pop(k)
return d
def generate_id(entry: typing.Dict) -> str:
"""
Generate a fairly unique key for a log entry
"""
if "@alb" in entry:
# ALBs already have one
key = entry["@alb"]["trace_id"]
else:
# for ELBs, take the elb id, client socket, timestamp, and size of the client request`
key = ":".join(
[
entry["@elb"]["elb"]["id"],
entry["@elb"]["client"]["ip"],
str(entry["@elb"]["client"]["port"]),
entry["@timestamp"],
str(entry["@elb"]["received_bytes"]),
]
)
key = bytes(key, "utf-8")
# take a shasum of the key, mostly so people don't try to attach meaning to it
return hashlib.sha256(key).hexdigest()
``` |
{
"source": "18F/cg-quotas-db",
"score": 3
} |
#### File: 18F/cg-quotas-db/api.py
```python
import io
import csv
import os
from sqlalchemy import func
from models import QuotaData, Quota
from quotas import db
class QuotaDataResource(QuotaData):
def details(self):
""" Displays QuotaData in dict format """
return {
'quota_guid': self.quota,
'date_collected': str(self.date_collected),
'memory_limit': self.memory_limit,
'total_routes': self.total_routes,
'total_services': self.total_services,
}
@classmethod
def aggregate(cls, quota_guid, start_date=None, end_date=None):
""" Counts the number of days a specific memory setting has
been active """
q = db.session.query(cls.memory_limit, func.count(cls.date_collected))
if start_date and end_date:
q = q.filter(cls.date_collected.between(start_date, end_date))
q = q.filter_by(quota=quota_guid).group_by(cls.memory_limit)
return q.all()
class QuotaResource(Quota):
def foreign_key_preparer(self, model, start_date=None, end_date=None):
""" Prepares data from foreign keys """
data = model.query.filter_by(quota=self.guid)
if start_date and end_date:
data = data.filter(
model.date_collected.between(start_date, end_date))
data = data.order_by(model.date_collected).all()
return [item.details() for item in data]
@staticmethod
def get_mem_cost(data):
""" Calculate the cost of services currently contains a
hard-coded cost for the short-term """
if data:
mb_cost = os.getenv('MB_COST_PER_DAY', 0.0033)
return sum([mem[0] * mem[1] * mb_cost for mem in data])
return 0
@staticmethod
def prepare_memory_data(data):
""" Given a memory array converts it to a more expressive dict
[[1875, 14]] -> [{'size': 1875, 'days': 14}] """
return [{'size': memory[0], 'days': memory[1]} for memory in data]
@staticmethod
def prepare_csv_row(row):
""" Prepares one quota to be exported in a csv row """
return [
row.get('name'),
row.get('guid'),
str(row.get('cost')),
str(row.get('created_at')),
]
def details(self):
""" Displays Quota in dict format """
return {
'guid': self.guid,
'name': self.name,
'created_at': str(self.created_at),
'updated_at': str(self.updated_at)
}
def data_details(self, start_date=None, end_date=None):
""" Displays Quota in dict format with data details """
memory_data = self.foreign_key_preparer(
model=QuotaDataResource, start_date=start_date, end_date=end_date)
return {
'guid': self.guid,
'name': self.name,
'created_at': str(self.created_at),
'updated_at': str(self.updated_at),
'memory': memory_data,
}
def data_aggregates(self, start_date=None, end_date=None):
""" Displays Quota in dict format with data details """
memory_data = QuotaDataResource.aggregate(
quota_guid=self.guid, start_date=start_date, end_date=end_date)
return {
'guid': self.guid,
'name': self.name,
'created_at': str(self.created_at),
'updated_at': str(self.updated_at),
'memory': self.prepare_memory_data(memory_data),
'cost': self.get_mem_cost(memory_data)
}
# Resources
@classmethod
def list_one_details(cls, guid, start_date=None, end_date=None):
""" List one quota along with all data on memory usage and services """
quota = cls.query.filter_by(guid=guid).first()
if quota:
return quota.data_details(
start_date=start_date, end_date=end_date)
@classmethod
def list_one_aggregate(cls, guid, start_date=None, end_date=None):
""" List one quota and aggregation of service and memory usage
by date """
quota = cls.query.filter_by(guid=guid).first()
if quota:
return quota.data_aggregates(
start_date=start_date, end_date=end_date)
@classmethod
def list_all(cls, start_date=None, end_date=None):
""" Lists all of the Quota data (This endpoint will be
refactored later) """
quotas = cls.query.order_by(cls.guid).all()
return [
quota.data_aggregates(start_date=start_date, end_date=end_date)
for quota in quotas
]
@classmethod
def generate_cvs(cls, start_date=None, end_date=None):
""" Return a csv version of the data starting with the header row """
output = io.StringIO()
writer = csv.writer(output)
writer.writerow([
'quota_name', 'quota_guid', 'quota_cost', 'quota_created_date'
])
for row in cls.list_all(start_date=start_date, end_date=end_date):
writer.writerow(cls.prepare_csv_row(row))
return output.getvalue()
```
#### File: 18F/cg-quotas-db/manage.py
```python
import os
from subprocess import call
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from quotas import app, db
from scripts import load_data
app.config.from_object(os.environ['APP_SETTINGS'])
manager = Manager(app)
# Migration Commands
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def update_database():
"Updates database with quotas"
load_data()
@manager.command
def tests():
""" Run tests """
test_command = "nosetests --cover-package=cloudfoundry "
test_command += "--cover-package=models --cover-package=quotas "
test_command += "--cover-package=scripts --with-coverage"
call([test_command], shell=True)
@manager.command
def build():
""" Calls out to npm and ensures that the front end is built """
build_command = "npm install && npm run build"
call([build_command], shell=True)
if __name__ == '__main__':
manager.run()
```
#### File: migrations/versions/d869ae3c6_.py
```python
revision = 'd869ae3c6'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('quota',
sa.Column('guid', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('guid')
)
op.create_table('data',
sa.Column('quota', sa.String(), nullable=False),
sa.Column('date_collected', sa.Date(), nullable=False),
sa.Column('memory_limit', sa.Integer(), nullable=True),
sa.Column('total_routes', sa.Integer(), nullable=True),
sa.Column('total_services', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['quota'], ['quota.guid'], ),
sa.PrimaryKeyConstraint('quota', 'date_collected', name='quota_guid_date')
)
op.create_table('service',
sa.Column('quota', sa.String(), nullable=False),
sa.Column('guid', sa.String(), nullable=False),
sa.Column('date_collected', sa.Date(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['quota'], ['quota.guid'], ),
sa.PrimaryKeyConstraint('quota', 'guid', 'date_collected', name='quota_serviceguid_date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('service')
op.drop_table('data')
op.drop_table('quota')
### end Alembic commands ###
```
#### File: 18F/cg-quotas-db/tests.py
```python
from unittest import mock
from flask.ext.testing import TestCase
import base64
import copy
import datetime
import requests
import types
import unittest
# App imports
from cloudfoundry import CloudFoundry
from quotas import app, db
from models import Quota, QuotaData
from api import QuotaResource, QuotaDataResource
import scripts
# Auth testings
from config import Config
from werkzeug.test import Client
from werkzeug.datastructures import Headers
# Flip app settings to testing
app.config.from_object('config.TestingConfig')
mock_quota = {
'metadata': {
'created_at': '2015-01-01T01:01:01Z',
'guid': 'test_quota',
'total_routes': 5,
'updated_at': '2015-01-01T01:01:01Z',
'url': '/v2/quota_definitions/test'
},
'entity': {
'name': 'test_quota_name',
'memory_limit': 1875,
'total_routes': 5,
'total_services': 2
}
}
mock_quota_2 = copy.deepcopy(mock_quota)
mock_quota_2['metadata']['guid'] = 'test_quota_2'
mock_quotas_data = {
'next_url': None,
'resources': [mock_quota, mock_quota_2]
}
mock_org_data = {
'next_url': None,
'resources': [
{
"entity": {
"quota_definition_url": "/v2/quota_definitions/f7963421",
"spaces_url": "/v2/organizations/f190f9a3/spaces",
}
},
{
"entity": {
"quota_definition_url": "/v2/quota_definitions/guid_2",
"spaces_url": "/v2/organizations/org_1/spaces",
}
}
]
}
mock_space_summary = {
'services': [
{'service_plan': {
'name': 'instance_1',
'service': {
'guid': 'guid_1',
'label': 'plan_label_1',
'provider': 'core'
},
}},
{'service_plan': {
'name': 'instance_1',
'service': {
'guid': 'guid_2',
'label': 'plan_label_2',
'provider': 'core'
},
}},
]
}
mock_token_data = {'access_token': '<PASSWORD>', 'expires_in': 0}
class MockReq:
""" Returns a mock token in json form """
def __init__(self, data):
self.data = data
def json(self):
return self.data
def mock_token(func):
""" Patches post request and return a mock token """
def test_mock_token(*args, **kwargs):
with mock.patch.object(
requests, 'post', return_value=MockReq(data=mock_token_data)):
return func(*args, **kwargs)
return test_mock_token
def mock_quotas_request(func):
""" Patches get request and return mock quota definitions """
def test_mock_get(*args, **kwargs):
with mock.patch.object(
requests, 'get',
return_value=MockReq(data=mock_quotas_data)):
return func(*args, **kwargs)
return test_mock_get
def mock_orgs_request(func):
""" Patches get request and return mock quota definitions """
def test_mock_get(*args, **kwargs):
with mock.patch.object(
requests, 'get',
return_value=MockReq(data=mock_org_data)):
return func(*args, **kwargs)
return test_mock_get
class CloudFoundryTest(unittest.TestCase):
""" Test CloudFoundry client """
@mock_token
def setUp(self):
self.cf = CloudFoundry(
uaa_url='login.test.com',
api_url='api.test.com',
username='<EMAIL>',
password='******')
@mock_token
def test_init(self):
""" Test that CloudFoundry object initializes properly """
self.assertEqual(self.cf.api_url, 'api.test.com')
self.assertEqual(self.cf.username, 'mock<EMAIL>')
self.assertEqual(self.cf.password, '******')
self.assertEqual(self.cf.token['access_token'], '<PASSWORD>')
self.assertEqual(self.cf.token['expires_in'], 0)
@mock_token
def test_prepare_token(self):
""" Test that token is prepared properly to make api call """
token = self.cf.prepare_token()
self.assertEqual(token, '<PASSWORD>')
@mock_token
def test_token_renewed(self):
""" Check that token is renewed """
old_token_time = self.cf.token['time_stamp']
self.cf.prepare_token()
new_token_time = self.cf.token['time_stamp']
self.assertNotEqual(old_token_time, new_token_time)
@mock_token
@mock_quotas_request
def test_make_request(self):
""" Check that calling api works properly """
get_req = self.cf.make_request('http://api.test.com')
self.assertEqual(len(get_req.json()['resources']), 2)
@mock_token
@mock_quotas_request
def test_get_quotas(self):
""" Test that quotas are obtained properly """
quotas = list(self.cf.get_quotas())
self.assertEqual(len(quotas), 2)
@mock_token
@mock_quotas_request
def test_yield_request(self):
""" Test that yield_request produces a generator that iterates through
pages """
quotas = self.cf.yield_request('v2/quota_definitions/quota_guid')
self.assertTrue(isinstance(quotas, types.GeneratorType))
self.assertEqual(len(list(quotas)[0]['resources']), 2)
@mock_token
@mock_orgs_request
def test_get_orgs(self):
""" Test that function produces a generator that iterates through
orgs """
orgs = list(self.cf.get_orgs())
self.assertEqual(len(orgs[0]['resources']), 2)
class QuotaModelsTest(TestCase):
""" Test Database """
def create_app(self):
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_create_quota(self):
""" Check that quota is created properly """
# Create quota
new_quota = Quota(guid='test_guid', name='test_name', url='test_url')
new_quota.created_at = datetime.date(2014, 1, 1)
new_quota.updated_at = datetime.date(2015, 1, 1)
db.session.add(new_quota)
db.session.commit()
# Find quota in database
quota = Quota.query.filter_by(guid='test_guid').first()
self.assertEqual(quota.name, 'test_name')
self.assertEqual(quota.url, 'test_url')
self.assertEqual(quota.created_at, datetime.datetime(2014, 1, 1))
self.assertEqual(quota.updated_at, datetime.datetime(2015, 1, 1))
def test_primary_key_constraint(self):
""" Test that only one instance of a quota can be created """
# Adding two instances of the same Quota with same dates
new_quota = Quota(guid='test_guid', name='test_name', url='test_url')
db.session.add(new_quota)
new_quota = Quota(guid='test_guid', name='test_name', url='test_url')
db.session.merge(new_quota)
db.session.commit()
# Getting data from quota
quotas = Quota.query.filter_by(guid='test_guid').all()
self.assertEqual(len(quotas), 1)
class DatabaseForeignKeyTest(TestCase):
""" Test Database """
def create_app(self):
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
db.create_all()
self.quota = Quota(guid='guid', name='test_name', url='test_url')
db.session.add(self.quota)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_quota_data(self):
""" Check that quota data can be added """
# Adding QuotaData
quota_data = QuotaData(self.quota, datetime.date(2014, 1, 1))
quota_data.memory_limit = 1
quota_data.total_routes = 2
quota_data.total_services = 3
self.quota.data.append(quota_data)
db.session.commit()
# Retrieve QuotaData
quota = Quota.query.filter_by(guid='guid').first()
self.assertEqual(quota.name, 'test_name')
self.assertEqual(len(list(quota.data)), 1)
self.assertEqual(quota.data[0].memory_limit, 1)
self.assertEqual(quota.data[0].total_routes, 2)
self.assertEqual(quota.data[0].total_services, 3)
self.assertEqual(quota.data[0].date_collected.year, 2014)
def test_primary_key_constraints_for_quotadata(self):
""" Check that the PrimaryKeyConstraints work for QuotaData """
failed = False
quota_data = QuotaData(self.quota)
quota_data_2 = QuotaData(self.quota)
self.quota.data.append(quota_data)
self.quota.data.append(quota_data_2)
try:
db.session.commit()
except:
failed = True
self.assertTrue(failed)
def test_quota_data_one_to_many(self):
""" Check that the relationship between Quota and QuotaData is
one to many """
# Creating Quota and 2 instances QuotaData with diff. dates
quota_data = QuotaData(self.quota)
quota_data.date_collected = datetime.date(2015, 1, 1)
quota_data_2 = QuotaData(self.quota)
self.quota.data.append(quota_data)
self.quota.data.append(quota_data_2)
db.session.commit()
# Retrieve QuotaData
quota = Quota.query.filter_by(guid='guid').first()
self.assertEqual(len(list(quota.data)), 2)
class APITest(TestCase):
""" Test API """
def create_app(self):
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
db.create_all()
quota = Quota(guid='test_guid', name='test_name', url='test_url')
db.session.add(quota)
quota2 = QuotaResource(
guid='test_guid_2', name='test_name_2', url='test_url_2')
db.session.add(quota2)
db.session.commit()
quota_data = QuotaData(quota, datetime.date(2013, 1, 1))
quota_data.memory_limit = 2000
quota.data.append(quota_data)
quota_data = QuotaData(quota, datetime.date(2014, 1, 1))
quota_data.memory_limit = 1000
quota.data.append(quota_data)
quota_data = QuotaData(quota, datetime.date(2015, 1, 1))
quota_data.memory_limit = 1000
quota.data.append(quota_data)
db.session.commit()
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_details(self):
""" Check that the details function returns dict of the quota """
quota = QuotaResource.query.filter_by(guid='test_guid').first()
quota_dict = quota.details()
self.assertEqual(
sorted(list(quota_dict.keys())),
['created_at', 'guid', 'name', 'updated_at'])
def test_list_one_details(self):
""" Check that list one function returns dict of one quota """
one_quota = QuotaResource.list_one_details(guid='test_guid')
self.assertEqual(one_quota['guid'], 'test_guid')
self.assertEqual(one_quota['name'], 'test_name')
def test_list_one_aggregate(self):
""" Check that the aggregator functionp produces all data include
cost """
one_quota = QuotaResource.list_one_aggregate(guid='test_guid')
self.assertEqual(one_quota['guid'], 'test_guid')
self.assertEqual(one_quota['cost'], 13.2)
def test_list_all(self):
""" Check that list all function returns dict of multiple quotas """
quotas = QuotaResource.list_all()
self.assertEqual(len(quotas), 2)
self.assertEqual(quotas[0]['guid'], 'test_guid')
self.assertEqual(quotas[1]['guid'], 'test_guid_2')
def test_get_mem_single_cost(self):
""" Check that the cost function works with multiple days
with single mem limit """
sample_data = [[1875, 14]]
cost = QuotaResource.get_mem_cost(sample_data)
self.assertEqual(cost, 86.625)
def test_get_mem_cost_multipe_mem_types(self):
""" Check that the cost function works with multiple days
with multiple mem limits """
sample_data = [[1875, 14], [2000, 15]]
cost = QuotaResource.get_mem_cost(sample_data)
self.assertEqual(cost, 185.625)
def test_prepare_memory_data(self):
""" Check that memory data is prepared into more descriptive format """
sample_data = [[1875, 14], [2000, 15]]
memory_data = QuotaResource.prepare_memory_data(sample_data)
self.assertEqual([
{'size': 1875, 'days': 14}, {'size': 2000, 'days': 15}
], memory_data)
def test_prepare_csv_row(self):
""" Check that function returns one row of prepared csv data """
sample_row = {
'name': 'test',
'guid': 'id2',
'cost': 4,
'created_at': datetime.datetime(2014, 4, 4)
}
row = QuotaResource.prepare_csv_row(sample_row)
self.assertEqual(['test', 'id2', '4', '2014-04-04 00:00:00'], row)
def test_generate_cvs(self):
""" Check that function returns a csv generator """
csv = QuotaResource.generate_cvs().split('\r\n')
self.assertEqual(
'quota_name,quota_guid,quota_cost,quota_created_date',
csv[0])
self.assertEqual('test_name,test_guid,13.2,None', csv[1])
self.assertEqual('test_name_2,test_guid_2,0,None', csv[2])
def test_quota_list_one_with_data_details(self):
""" Check that list one returns a list of data details within the
designated time period """
# Check that correct quota data is returned by date strings
one_quota = QuotaResource.list_one_details(
guid='test_guid', start_date='2013-12-31', end_date='2014-07-02')
self.assertEqual(len(one_quota['memory']), 1)
# Check that correct quota data is returned by datetime.dates
one_quota = QuotaResource.list_one_details(
guid='test_guid',
start_date=datetime.date(2013, 12, 31),
end_date=datetime.date(2014, 1, 2))
self.assertEqual(len(one_quota['memory']), 1)
def test_quotadata_details(self):
""" Check that details function returns dict for a specific
quotadata object """
data = QuotaDataResource.query.filter_by(quota='test_guid').first()
self.assertTrue('memory_limit' in data.details().keys())
def test_quotadata_aggregate(self):
""" Check that the aggregate function return the number of days a
Quota has been active
"""
# Aggregate
data = QuotaDataResource.aggregate(quota_guid='test_guid')
# Data looks like this [(1000, 2), (2000, 1)]
# Addition test allows the test to work with postgres and sqlite
self.assertEqual(data[0][1] + data[1][1], 3)
# Aggregate with dates
data = QuotaDataResource.aggregate(
quota_guid='test_guid',
start_date='2013-01-01',
end_date='2014-07-01')
# Data looks like this [(1000, 1), (2000, 1)]
# Addition test allows the test to work with postgres and sqlite
self.assertEqual(data[0][1] + data[1][1], 2)
def test_foreign_key_preparer(self):
""" Verify that function prepares a details list for a given
foreign key """
quota = QuotaResource.query.filter_by(guid='test_guid').first()
# Check function with no date range
data = quota.foreign_key_preparer(QuotaDataResource)
self.assertEqual(len(data), 3)
# Check function with date range
data = quota.foreign_key_preparer(
QuotaDataResource, start_date='2013-12-31', end_date='2014-1-2')
self.assertEqual(len(data), 1)
# Set header for auth
valid_header = Headers()
auth = '{0}:{1}'.format(Config.USERNAME, Config.PASSWORD).encode('ascii')
valid_header.add('Authorization', b'Basic ' + base64.b64encode(auth))
class QuotaAppTest(TestCase):
""" Test Database """
def create_app(self):
app.config['LIVESERVER_PORT'] = 8943
return app
@classmethod
def setUpClass(cls):
db.create_all()
quota_1 = Quota(guid='guid', name='test_name', url='test_url')
db.session.add(quota_1)
quota_2 = Quota(guid='guid_2', name='test_name_2', url='test_url_2')
db.session.add(quota_2)
quota_data = QuotaData(quota_1)
quota_data.date_collected = datetime.date(2014, 1, 1)
quota_data.memory_limit = 1000
quota_data_2 = QuotaData(quota_1)
quota_data_2.memory_limit = 1000
quota_1.data.append(quota_data)
quota_1.data.append(quota_data_2)
db.session.commit()
@classmethod
def tearDownClass(cls):
db.session.remove()
db.drop_all()
def test_main_page_locked(self):
""" Check if main page is locked """
response = self.client.get("/")
self.assert_401(response)
self.assertTrue('WWW-Authenticate' in response.headers)
self.assertTrue('Basic' in response.headers['WWW-Authenticate'])
def test_api_page_locked(self):
""" Check if api endpoints are locked """
response = self.client.get("/api/quotas/")
self.assert_401(response)
response = self.client.get("/api/quotas/guid/")
self.assert_401(response)
def test_admin_page_rejects_bad_password(self):
""" Check that incorrect password won't allow access """
h = Headers()
auth = '{0}:foo'.format(Config.USERNAME).encode('ascii')
h.add('Authorization', b'Basic ' + base64.b64encode(auth))
rv = Client.open(self.client, path='/', headers=h)
self.assert_401(rv)
def test_admin_page_rejects_bad_username(self):
""" Check that incorrect username won't allow access """
h = Headers()
auth = 'foo:{0}'.format(Config.PASSWORD).encode('ascii')
h.add('Authorization', b'Basic ' + base64.b64encode(auth))
rv = Client.open(self.client, path='/', headers=h)
self.assert_401(rv)
def test_admin_page_allows_valid_login(self):
""" Check that correct username and password will allow access """
h = Headers()
auth = '{0}:{1}'.format(
Config.USERNAME, Config.PASSWORD).encode('ascii')
h.add('Authorization', b'Basic ' + base64.b64encode(auth))
rv = Client.open(self.client, path='/', headers=h)
self.assert_200(rv)
def test_main_page(self):
""" Test the main page """
response = Client.open(self.client, path='/', headers=valid_header)
self.assertEqual(response.status_code, 200)
def test_api_quotas_page(self):
""" Test the quota list page """
response = Client.open(
self.client, path="/api/quotas/", headers=valid_header)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json['Quotas']), 2)
def test_api_quota_detail_page(self):
""" Test the quota details page """
response = Client.open(
self.client, path="/api/quotas/guid/", headers=valid_header)
self.assertEqual(response.status_code, 200)
# Check if quota was rendered
self.assertTrue('guid' in response.json.keys())
# Check if quota data was rendered
self.assertEqual(len(response.json['memory']), 1)
def test_api_quota_detail_page_no_data(self):
""" Test the quota details page when there is no data """
response = Client.open(
self.client, path="/api/quota/wrongguid/", headers=valid_header)
self.assertEqual(response.status_code, 404)
def test_api_quota_detail_dates(self):
""" Test the quota details date range page functions """
response = Client.open(
self.client,
path="/api/quotas/guid/?since=2013-12-31&until=2014-1-1",
headers=valid_header)
self.assertEqual(response.status_code, 200)
# Check if quota data was rendered within date range
self.assertEqual(len(response.json['memory']), 1)
def test_api_quota_detail_page_one_date(self):
""" Test the quota details page with only the since parameter """
response = Client.open(
self.client,
path="/api/quotas/guid/?since=2013-12-31",
headers=valid_header)
self.assertEqual(response.status_code, 200)
# Check if quota was rendered
self.assertTrue('guid' in response.json.keys())
# Check if quota data was rendered
self.assertEqual(len(response.json['memory']), 1)
def test_api_quota_detail_dates_no_data(self):
""" Test the quota details page when there are date but no data """
response = Client.open(
self.client,
path="/api/quota/wrongguid/2013-12-31/2014-1-1/",
headers=valid_header)
self.assertEqual(response.status_code, 404)
def test_api_quotas_list_page(self):
""" Test the quotas list page """
response = Client.open(
self.client, path="/api/quotas/", headers=valid_header)
self.assertEqual(response.status_code, 200)
data = response.json['Quotas']
# Check if all quotas present
self.assertEqual(len(data), 2)
# Check if quota data contains data details
self.assertEqual(len(data[0]['memory']), 1)
def test_api_quotas_list_dates(self):
""" Test the quotas list page with dates """
response = Client.open(
self.client,
path="/api/quotas/?since=2012-12-31&until=2013-1-1",
headers=valid_header)
self.assertEqual(response.status_code, 200)
data = response.json['Quotas']
# Check if all quotas present
self.assertEqual(len(data), 2)
# Check if quota data contains memory data only when inbetween dates
self.assertEqual(len(data[0]['memory']), 0)
def test_api_quotas_list_page_one_date(self):
""" Test the quotas list page when only since date is given """
response = Client.open(
self.client,
path="/api/quotas/?since=2012-12-31",
headers=valid_header)
self.assertEqual(response.status_code, 200)
data = response.json['Quotas']
# Check if all quotas present
self.assertEqual(len(data), 2)
# Check if quota data contains data details
self.assertEqual(len(data[0]['memory']), 1)
class LoadingTest(TestCase):
""" Test Database """
def create_app(self):
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_get_datetime(self):
""" Test that date string coverted to date """
date = '2015-01-01T01:01:01Z'
new_data = scripts.get_datetime(date)
self.assertEqual(new_data, datetime.date(2015, 1, 1))
def test_update_quota(self):
""" Test that function inserts quota into database """
scripts.update_quota(mock_quota)
quota = Quota.query.filter_by(guid='test_quota').first()
self.assertEqual(quota.name, 'test_quota_name')
def test_name_change(self):
""" Test that function changes name but keeps guid in case of a name
change """
scripts.update_quota(mock_quota)
quota = Quota.query.filter_by(guid='test_quota').first()
self.assertEqual(quota.name, 'test_quota_name')
self.assertEqual(quota.data[0].memory_limit, 1875)
mock_quota_name = copy.deepcopy(mock_quota)
mock_quota_name['entity']['name'] = "new_name"
scripts.update_quota(mock_quota_name)
quota = Quota.query.filter_by(guid='test_quota').first()
self.assertEqual(quota.name, 'new_name')
self.assertEqual(quota.data[0].memory_limit, 1875)
def test_update_quota_data(self):
""" Test that function inserts quota data into database """
# Add quota
quota = Quota(guid='guid', name='test_name', url='test_url')
db.session.add(quota)
# Add quota data
scripts.update_quota_data(
quota_model=quota, entity_data=mock_quota['entity'])
db.session.commit()
# Check if data was added
quota = Quota.query.filter_by(guid='guid').first()
self.assertEqual(quota.data[0].memory_limit, 1875)
def test_get_or_create_create(self):
""" Test that get_or_create function creates a new object """
quota = Quota.query.filter_by(guid='test_guid').all()
self.assertEqual(len(quota), 0)
create_quota, created = scripts.get_or_create(
Quota, guid='test_guid', name='test_name', url='test_url')
self.assertTrue(created)
found = Quota.query.filter_by(guid='test_guid').all()
self.assertEqual(len(found), 1)
def test_get_or_create_get(self):
""" Test that get_or_create function gets an old object """
# Create and add a quota
quota = Quota(guid='test_guid', name='test_name', url='test_url')
db.session.add(quota)
db.session.commit()
# Try to get the same quota
ret_quota, created = scripts.get_or_create(
Quota, guid='test_guid', name='test_name', url='test_url')
self.assertEqual(ret_quota.guid, 'test_guid')
self.assertFalse(created)
# Check if there are duplicates
found = Quota.query.filter_by(guid='test_guid').all()
self.assertEqual(len(found), 1)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "18F/cg-scripts",
"score": 2
} |
#### File: 18F/cg-scripts/cf-audit.py
```python
description = """
Retrieve Cloud Foundry events from CF by time and user.
Useful for auditing following suspected credential leaks or unauthorized access.
Events retrieved will be all events visible to the user running the script,
regardless of what org/space they're logged in to.
cf cli does most of the lifting here. This script basically just implements paging
and user filtering for your convenienve.
Events are returned as JSON to stdout.
"""
import argparse
import json
import logging
import subprocess
import urllib
from urllib.parse import urlparse
def main():
args = get_args()
queries = []
if args.after:
queries.append(f'created_ats[gt]={args.after}')
if args.before:
queries.append(f'created_ats[lt]={args.before}')
initial_request = f'/v3/audit_events?{"&".join(queries)}'
logging.info('getting %s', initial_request)
cf_out = subprocess.check_output(['cf', 'curl', initial_request], universal_newlines=True)
cf_out = json.loads(cf_out)
if args.user:
events = [event for event in cf_out['resources'] if event['actor']['name'] == args.user]
else:
events = cf_out['resources']
raw_next_url = cf_out['pagination']['next']
next_url_split = urlparse(raw_next_url['href'])
next_url = next_url_split.path+'?'+next_url_split.query
while raw_next_url is not None:
logging.info('getting %s', next_url)
cf_out = subprocess.check_output(['cf', 'curl', next_url], universal_newlines=True)
cf_out = json.loads(cf_out)
if args.user:
resources = [event for event in cf_out['resources'] if event['actor']['name'] == args.user]
else:
resources = cf_out['resources']
events.extend(resources)
raw_next_url = cf_out['pagination']['next']
if raw_next_url is not None:
next_url_split = urlparse(raw_next_url['href'])
next_url = next_url_split.path+'?'+next_url_split.query
else:
print(json.dumps(events))
def get_args():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description=description
)
parser.add_argument('--after', help="find events after this timestamp (timestamp UTC format YYYY-MM-DDThh:mm:ssZ)")
parser.add_argument('--before', help="find events before this timestamp (timestamp UTC format YYYY-MM-DDThh:mm:ssZ)")
parser.add_argument('--user', help='find events for this user')
return parser.parse_args()
if __name__ == '__main__':
main()
```
#### File: 18F/cg-scripts/update-cloudfront-distributions.py
```python
import argparse
import json
import datetime
import time
import boto3
RESTRICTIONS = {
"GeoRestriction": {
"RestrictionType": "whitelist",
"Quantity": 43,
"Items": [
"SI",
"UM",
"NL",
"MT",
"VI",
"FR",
"ES",
"RO",
"AQ",
"AU",
"AS",
"SE",
"FI",
"BG",
"DE",
"PR",
"SK",
"EE",
"LV",
"LU",
"KR",
"GU",
"LT",
"GB",
"GR",
"NZ",
"AT",
"CX",
"MX",
"JP",
"HU",
"MH",
"IE",
"PL",
"CA",
"PT",
"BE",
"DK",
"CZ",
"IT",
"CH",
"US",
"HR"
]
}
}
UPDATED_TAG = {"Key": "GeoRestriction", "Value": "Default"}
def main():
args = get_args()
client = boto3.client("cloudfront")
distros = get_all_distros(client)
if args.filters:
distros = [distro for distro in distros if distro['id'] in args.filters]
for distro in distros:
distro['config']['Restrictions'] = RESTRICTIONS
if args.update:
client.update_distribution(DistributionConfig=distro['config'], Id=distro['id'], IfMatch=distro['etag'])
client.tag_resource(Resource=distro['arn'], Tags=dict(Items=[UPDATED_TAG]))
time.sleep(3)
def get_all_distros(client):
distroresponse = client.list_distributions()
distros = []
for item in distroresponse['DistributionList']['Items']:
cfg = client.get_distribution_config(Id=item['Id'])
distros.append(dict(id=item['Id'], etag=cfg['ETag'], arn=item['ARN'], config=cfg['DistributionConfig']))
while distroresponse['DistributionList'].get("NextMarker"):
distroresponse = client.list_distributions(Marker=distroresponse['DistributionList']['NextMarker'])
for item in distroresponse['DistributionList']['Items']:
cfg = client.get_distribution_config(Id=item['Id'])
distros.append(dict(id=item['Id'], etag=cfg['ETag'], arn=item['ARN'], config=cfg['DistributionConfig']))
return distros
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--filters", type=str, nargs="+")
parser.add_argument("--update", action="store_true", help="actually update CloudFront")
return parser.parse_args()
if __name__ == "__main__":
main()
``` |
{
"source": "18F/cg-uaa-extras",
"score": 2
} |
#### File: uaaextras/clients/cf.py
```python
import logging
from cloudfoundry_client.client import CloudFoundryClient
from cloudfoundry_client.errors import InvalidStatusCode
logger = logging.getLogger(__name__)
class CFClient(object):
"""
A minimal client for CF
"""
def __init__(self, target_endpoint, token):
self.target_endpoint = target_endpoint
self.token = token
def _get_cf_client(self):
client = CloudFoundryClient(self.target_endpoint)
client._access_token = self.token
return client
def is_org_manager(self, client, user_id):
for role in client.v3.roles.list(user_guids=user_id):
if role['type'] == 'organization_manager':
return True
return False
```
#### File: uaaextras/clients/totp.py
```python
from sqlalchemy.sql import text
class TOTPClient:
def __init__(self, db_engine) -> None:
self.db_engine = db_engine
def unset_totp_seed(self, user: str):
"""
remove existing totp seed for a user
"""
with self.db_engine.connect() as conn:
delete = text("DELETE FROM totp_seed WHERE username = :username")
conn.execute(delete, username=user)
def get_user_totp_seed(self, user: str) -> str:
"""
get a user's totp seed
"""
with self.db_engine.connect() as conn:
select = text("SELECT seed FROM totp_seed WHERE username = :username")
result = conn.execute(select, username=user).fetchall()
if len(result) == 1:
return result[0][0]
else:
return None
```
#### File: cg-uaa-extras/uaaextras/validators.py
```python
def email_valid_for_domains(address, domains):
"""
check if the email address's domain appears to be a hostname within the provided domains
"""
address, address_domain = address.split('@')
return any(host_valid_for_domain(address_domain, domain) for domain in domains)
def host_valid_for_domain(host, domain):
valid_parts = domain.split('.')
host_parts = host.split('.')
overlap = host_parts[-1*len(valid_parts):]
return overlap == valid_parts
``` |
{
"source": "18F/ckanext-datajson",
"score": 2
} |
#### File: ckanext/datajson/parse_datajson.py
```python
import re
def parse_datajson_entry(datajson, package, defaults):
package["title"] = datajson.get("title", defaults.get("Title"))
package["notes"] = datajson.get("description", defaults.get("Notes"))
package["tags"] = [ { "name": t } for t in
datajson.get("keyword", defaults.get("Tags", "")).split(",") if t.strip() != ""]
package["groups"] = [ { "name": g } for g in
defaults.get("Groups", [])] # the complexity of permissions makes this useless, CKAN seems to ignore
package["organization"] = datajson.get("organization", defaults.get("Organization"))
extra(package, "Group Name", defaults.get("Group Name")) # i.e. dataset grouping string
extra(package, "Date Updated", datajson.get("modified"))
extra(package, "Agency", defaults.get("Agency")) # i.e. federal department
package["publisher"] = datajson.get("publisher", defaults.get("Author")) # i.e. agency within HHS
extra(package, "author_id", defaults.get("author_id")) # i.e. URI for agency
extra(package, "Agency Program URL", defaults.get("Agency Program URL")) # i.e. URL for agency program
extra(package, "Contact Person", datajson.get("person")) # not in HHS schema
extra(package, "Contact Email", datajson.get("mbox")) # not in HHS schema
# "identifier" is handled by the harvester
extra(package, "Access Level", datajson.get("accessLevel")) # not in HHS schema
extra(package, "Data Dictionary", datajson.get("dataDictionary", defaults.get("Data Dictionary")))
# accessURL is redundant with resources
# webService is redundant with resources
extra(package, "Format", datajson.get("format")) # not in HHS schema
extra(package, "License Agreement", datajson.get("license"))
#extra(package, "License Agreement Required", ...)
extra(package, "Geographic Scope", datajson.get("spatial"))
extra(package, "Temporal", datajson.get("temporal")) # HHS uses Coverage Period (FY) Start/End
extra(package, "Date Released", datajson.get("issued"))
#extra(package, "Collection Frequency", ...)
extra(package, "Publish Frequency", datajson.get("accrualPeriodicity")) # not in HHS schema
extra(package, "Language", datajson.get("language")) # not in HHS schema
extra(package, "Granularity", datajson.get("granularity")) # not in HHS schema
extra(package, "Data Quality Met", datajson.get("dataQuality")) # not in HHS schema
#extra(package, "Unit of Analysis", ...)
#extra(package, "Collection Instrument", ...)
extra(package, "Subject Area 1", datajson.get("theme", defaults.get("Subject Area 1")))
extra(package, "Subject Area 2", defaults.get("Subject Area 2"))
extra(package, "Subject Area 2", defaults.get("Subject Area 3"))
extra(package, "Technical Documentation", datajson.get("references"))
extra(package, "Size", datajson.get("size")) # not in HHS schema
package["url"] = datajson.get("landingPage", datajson.get("webService", datajson.get("accessURL")))
extra(package, "Feed", datajson.get("feed")) # not in HHS schema
extra(package, "System Of Records", datajson.get("systemOfRecords")) # not in HHS schema
package["resources"] = [ ]
for d in datajson.get("distribution", []):
for k in ("accessURL", "webService"):
if d.get(k, "").strip() != "":
r = {
"url": d[k],
"format": normalize_format(d.get("format", "Query Tool" if k == "webService" else "Unknown")),
}
extra(r, "Language", d.get("language"))
extra(r, "Size", d.get("size"))
# work-around for Socrata-style formats array
try:
r["format"] = normalize_format(d["formats"][0]["label"])
except:
pass
r["name"] = r["format"]
package["resources"].append(r)
def extra(package, key, value):
if not value: return
package.setdefault("extras", []).append({ "key": key, "value": value })
def normalize_format(format):
# Format should be a file extension. But sometimes Socrata outputs a MIME type.
format = format.lower()
m = re.match(r"((application|text)/(\S+))(; charset=.*)?", format)
if m:
if m.group(1) == "text/plain": return "Text"
if m.group(1) == "application/zip": return "ZIP"
if m.group(1) == "application/vnd.ms-excel": return "XLS"
if m.group(1) == "application/x-msaccess": return "Access"
return "Other"
if format == "text": return "Text"
return format.upper() # hope it's one of our formats by converting to upprecase
``` |
{
"source": "18F/cloud-marketplace-prototype",
"score": 2
} |
#### File: cloud-marketplace-prototype/marketplace/converters.py
```python
from .models import Product
class ProductConverter:
regex = '[A-Za-z0-9_\-]+'
def to_python(self, value):
product = Product.objects.filter(slug=value).first()
if product:
return product
raise ValueError(f'No product slug matching "{value}"')
def to_url(self, value):
return value.slug
```
#### File: management/commands/seeddb.py
```python
import random
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.contrib.auth.models import User
from marketplace.models import Product, LicenseRequest
from marketplace.tests import factories
PRODUCTS = [
factories.FavroFactory.build(),
factories.MuralFactory.build(),
factories.TrelloFactory.build(),
factories.ZoomFactory.build(),
]
def create_admin_user(stdout, email='<EMAIL>'):
user = User.objects.filter(email=email).first()
if user:
stdout.write(f'Ensuring {email} is a superuser.')
if not user:
stdout.write(f'Creating superuser {email}.')
user = factories.UserFactory.build(
username=email,
email=email,
)
user.is_staff = True
user.is_superuser = True
user.save()
def create_products(products, stdout):
created_products = []
for product in products:
existing = Product.objects.filter(slug=product.slug).first()
if existing:
stdout.write(f'{product.name} already exists.')
else:
product.save()
existing = product
stdout.write(f'Created {product.name}.')
created_products.append(existing)
return created_products
def create_license_types(products, stdout, count=3):
lts = []
for product in products:
lts.extend(factories.LicenseTypeFactory.create_batch(
count,
product=product,
))
for lt in lts:
stdout.write(f'Created license type "{lt}".')
return lts
def create_teams(stdout, count=3, team_size=5):
teams = factories.TeamFactory.create_batch(count)
users = []
for team in teams:
stdout.write(f'Created team "{team}".')
members = factories.UserFactory.create_batch(team_size)
users.extend(members)
for member in members:
member.marketplace.team = team
member.marketplace.save()
stdout.write(f'Added "{member.email}" to team "{team}".')
return (teams, users)
def create_purchases(license_types, teams, stdout, license_count=2):
purchases = []
for lt in license_types:
team = random.choice(teams)
p = factories.PurchaseFactory.create(
license_count=license_count,
license_type=lt,
team=team,
)
product = lt.product
if not product.teams_approved_for.filter(pk=team.pk).exists():
stdout.write(f'Approving "{product}" for use by team "{team}".')
product.teams_approved_for.add(team)
stdout.write(
f'Created a purchase of {p.license_count} licenses of '
f'"{p.license_type}" for team "{p.team}".'
)
purchases.append(p)
return purchases
def create_license_requests(users, purchases, stdout, min_waitlisted=1):
waitlisted = 0
while waitlisted < min_waitlisted:
for user in users:
team = user.marketplace.team
assert team is not None
purchase = team.purchases.all().order_by('?').first()
status = random.choice([
LicenseRequest.REQUESTED,
LicenseRequest.GRANTED,
LicenseRequest.DENIED,
LicenseRequest.RELINQUISHED,
])
lt = purchase.license_type
if (status == LicenseRequest.GRANTED and
lt.get_stats_for_team(team).available == 0):
status = LicenseRequest.WAITLISTED
waitlisted += 1
req = factories.LicenseRequestFactory.create(
user=user,
license_type=lt,
status=status,
)
stdout.write(
f'Created a license request on behalf of {user.email} '
f'for "{lt}" with status "{status}".'
)
class Command(BaseCommand):
help = 'Seeds the marketplace with initial development data'
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--reset', action='store_true', dest='full_reset',
default=False,
help='Destroy everything currently in the database first.',
)
def handle(self, *args, **options):
if options.get('full_reset'):
call_command('reset_db')
call_command('migrate')
products = create_products(PRODUCTS, self.stdout)
lts = create_license_types(products, self.stdout)
teams, users = create_teams(self.stdout)
purchases = create_purchases(lts, teams, self.stdout)
create_license_requests(users, purchases, self.stdout)
create_admin_user(self.stdout)
```
#### File: marketplace/tests/factories.py
```python
import datetime
import factory
import random
from factory.django import DjangoModelFactory as Factory
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.utils import timezone
from marketplace import models
class UserFactory(Factory):
class Meta:
model = User
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name')
email = factory.LazyAttribute(
lambda a: f"{a.first_name.lower()}.{a.last_name.lower()}<EMAIL>"
)
username = factory.LazyAttribute(
lambda a: a.email
)
is_active = True
job = factory.Faker('job')
def simple_job():
j = job.generate({})
# Some jobs are like "Nurse, adult"; we don't want the part
# after the comma.
return j.split(',')[0]
class TeamFactory(Factory):
class Meta:
model = models.Team
name = factory.LazyAttribute(
lambda a: f"GSA {simple_job()}s"
)
class BaseProductFactory(Factory):
class Meta:
model = models.Product
abstract = True
@factory.post_generation
def teams_approved_for(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for team in extracted:
self.teams_approved_for.add(team)
@factory.post_generation
def licenses(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for license in extracted:
self.license_types.add(license)
class FavroFactory(BaseProductFactory):
slug = 'favro'
name = 'Favro'
category = "Project management"
description = """
Favro is a planning and collaboration app which enables
developers, designers, and clients to all stay on the same
page and track progress.
"""
class MuralFactory(BaseProductFactory):
slug = 'mural'
name = 'Mural'
category = "Live collaboration"
description = """
A web-based solution for small teams that need a virtual
workspace in which they can brainstorm, plan and collaborate.
"""
class TrelloFactory(BaseProductFactory):
slug = 'trello'
name = 'Trello'
category = "Project management"
description = """
This is a card-based tool for managing projects and tasks.
It uses customizable Kanban-style boards, and offers
"power ups" such as GitHub integration.
"""
class ZoomFactory(BaseProductFactory):
slug = 'zoom'
name = 'Zoom'
category = "Video conferencing"
description = """
This is a video conferencing tool with features such as chat,
screen sharing, and session recording. No account is required
to participate in conferences.
"""
class ProductFactory(BaseProductFactory):
name = factory.Faker('company')
slug = factory.LazyAttribute(
lambda a: slugify(a.name)
)
category = factory.Faker('catch_phrase')
description = factory.Faker('paragraph')
def license_type_name():
NOUNS = [
'Free',
'Personal',
'Economy',
'Pro',
'Professional',
'Enterprise',
'Premium',
'Ultimate',
]
MODIFIERS = [
'Basically ',
'Simply ',
'Ultra-',
'Super-',
'Ludicrously ',
'Ridiculously ',
'Ostentatiously ',
'Alarmingly ',
]
return random.choice(MODIFIERS) + random.choice(NOUNS)
class LicenseTypeFactory(Factory):
class Meta:
model = models.LicenseType
name = factory.LazyFunction(license_type_name)
product = factory.SubFactory(ProductFactory)
class PurchaseFactory(Factory):
class Meta:
model = models.Purchase
license_type = factory.SubFactory(LicenseTypeFactory)
license_count = 50
start_date = factory.LazyFunction(timezone.now)
end_date = factory.LazyAttribute(
lambda a: a.start_date + datetime.timedelta(days=365),
)
team = factory.SubFactory(TeamFactory)
class LicenseRequestFactory(Factory):
class Meta:
model = models.LicenseRequest
user = factory.SubFactory(UserFactory)
license_type = factory.SubFactory(LicenseTypeFactory)
status = models.LicenseRequest.REQUESTED
is_self_reported = False
```
#### File: marketplace/tests/test_management_commands.py
```python
import re
from io import StringIO
import pytest
from django.core.management import call_command
@pytest.mark.django_db
def test_seeddb_works():
out = StringIO()
call_command('seeddb', stdout=out)
output = out.getvalue()
assert 'Created Favro.' in output
assert 'object at 0x' not in output
assert 'Created license type' in output
assert 'Created team' in output
assert re.search(r'Approving ".*" for use by team ".*"', output)
assert 'Created a purchase of' in output
assert re.search(r'Added ".*" to team ".*"', output)
assert 'Created a license request' in output
assert 'with status "granted"' in output
assert 'with status "waitlisted"' in output
assert 'Creating superuser' in output
out = StringIO()
call_command('seeddb', stdout=out)
output = out.getvalue()
assert 'Favro already exists.' in output
assert 'Ensuring <EMAIL> is a superuser' in output
```
#### File: marketplace/tests/test_models.py
```python
import pytest
from django.core.exceptions import ValidationError
from marketplace import models
from .factories import *
def test_team_str():
team = TeamFactory.build()
assert team.name == str(team)
def test_product_str():
trello = TrelloFactory.build()
assert trello.name == str(trello)
def test_license_type_str():
trello = TrelloFactory.build()
lt = LicenseTypeFactory.build(name='Enterprise', product=trello)
assert str(lt) == 'Trello - Enterprise'
@pytest.mark.django_db
def test_marketplace_info_str():
user = UserFactory.create(email='<EMAIL>')
assert str(user.marketplace) == 'Marketplace info for <EMAIL>'
@pytest.mark.django_db
def test_license_type_factory_works():
lt = LicenseTypeFactory.create()
assert lt.name
assert lt.product.name
assert list(lt.product.license_types.all()) == [lt]
@pytest.mark.django_db
def test_purchase_factory_works():
purchase = PurchaseFactory.create()
@pytest.mark.django_db
def test_license_request_factory_works():
req = LicenseRequestFactory.create()
@pytest.mark.django_db
def test_is_approved_for_user_works():
user = UserFactory.create()
team = TeamFactory.create()
product = ProductFactory.create(teams_approved_for=[team])
assert product.is_approved_for_user(user) is False
user.marketplace.team = team
assert product.is_approved_for_user(user) is True
@pytest.mark.django_db
def test_granted_license_requests_must_have_user_with_team_set():
req = LicenseRequestFactory.create()
req.clean()
req.status = req.GRANTED
with pytest.raises(ValidationError,
match=r'Users of granted licenses must have a team.'):
req.clean()
@pytest.mark.django_db
def test_get_license_stats_for_team_works():
lt = LicenseTypeFactory.create()
team = TeamFactory.create()
stats = lt.get_stats_for_team(team)
assert stats.purchased == 0
assert stats.used == 0
assert stats.available == 0
purchase = PurchaseFactory.create(
license_type=lt,
license_count=5,
team=team,
)
stats = lt.get_stats_for_team(team)
assert stats.purchased == 5
assert stats.used == 0
assert stats.available == 5
user = UserFactory.create()
user.marketplace.team = team
user.marketplace.save()
req = LicenseRequestFactory.create(
license_type=lt,
user=user,
status=models.LicenseRequest.GRANTED,
)
stats = lt.get_stats_for_team(team)
assert stats.purchased == 5
assert stats.used == 1
assert stats.available == 4
@pytest.mark.django_db
def test_get_license_stats_for_team_by_product_works():
lt1 = LicenseTypeFactory.create()
lt2 = LicenseTypeFactory.create(product=lt1.product)
p1 = PurchaseFactory.create(license_type=lt1, license_count=5)
p2 = PurchaseFactory.create(license_type=lt2, team=p1.team,
license_count=30)
stats = lt1.product.get_stats_for_team(p1.team)
assert stats.purchased == 35
```
#### File: cloud-marketplace-prototype/marketplace/views.py
```python
from django.shortcuts import render
import django.contrib.auth
def home(request):
return render(request, 'marketplace/home.html')
def logout(request):
django.contrib.auth.logout(request)
return render(request, 'marketplace/logged_out.html')
def product_detail(request, product):
return render(request, 'marketplace/product_detail.html', {
'product': product,
})
def usage(request):
return render(request, 'marketplace/usage.html')
```
#### File: cloud-marketplace-prototype/project/decorators.py
```python
import logging
from django.contrib.auth import REDIRECT_FIELD_NAME, decorators
from django.core.exceptions import PermissionDenied
logger = logging.getLogger('cmp')
def staff_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=None):
'''
Decorator to check that the user accessing the decorated view has their
is_staff flag set to True.
It will first redirect to login_url or the default login url if the user is
not authenticated. If the user is authenticated but is not staff, then
a PermissionDenied exception will be raised.
'''
# Based off code from the Django project
# License: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/LICENSE # NOQA
# Code reference: https://github.com/django/django/blob/c1aec0feda73ede09503192a66f973598aef901d/django/contrib/auth/decorators.py#L40 # NOQA
def check_if_staff(user):
if not user.is_authenticated:
# returning False will cause the user_passes_test decorator
# to redirect to the login flow
logger.info(f'Unauthenticated user has attempted to access '
f'is_staff view')
return False
if user.is_staff:
# then all good
logger.info(f'User with id {user.id} ({user.email}) has passed '
f'is_staff check')
return True
# otherwise the user is authenticated but isn't staff, so
# they do not have the correct permissions and should be directed
# to the 403 page
logger.info(f'User with id {user.id} ({user.email}) is '
f'authenticated but has not passed is_staff check')
raise PermissionDenied
actual_decorator = decorators.user_passes_test(
check_if_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
``` |
{
"source": "18F/crime-data-api",
"score": 2
} |
#### File: crime_data/common/newmodels.py
```python
from copy import deepcopy
import logging
from psycopg2 import ProgrammingError
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy import func, UniqueConstraint
from sqlalchemy.sql import sqltypes
from flask_restful import abort
from crime_data.common import models
from crime_data.common.base import ExplorerOffenseMapping
from crime_data.common.models import RefAgency, RefState, RefCounty
from crime_data.extensions import db
from sqlalchemy import or_,and_
# fixme. ugh!
MAX_AGENCY_YEAR = 2016
class FilterableModel:
@classmethod
def column_is_string(cls, col_name):
col = getattr(cls.__table__.c, col_name)
return isinstance(col.type, sqltypes.String)
@classmethod
def filtered(cls, filters, args=None):
args = args or []
qry = cls.query
# This could be generalized to other places in the future
if 'fields' in args:
fields = args['fields'].split(',')
qry = qry.with_entities(*fields).select_from(cls)
for filter in filters:
if isinstance(filter, BinaryExpression):
qry = qry.filter(filter)
else:
(col_name, comparitor, values) = filter
col = getattr(cls, col_name)
if cls.column_is_string(col_name):
col = func.lower(col)
operation = getattr(col, comparitor)
qry = qry.filter(or_(operation(v) for v in values)).order_by(
col)
if 'by' in args:
for col_name in args['by'].split(','):
col = getattr(cls, col_name)
qry = qry.order_by(col)
return qry
class AgencyParticipation(db.Model, FilterableModel):
"""Represents agency participation for a single month."""
__tablename__ = 'agency_participation'
year = db.Column(db.SmallInteger, nullable=False, primary_key=True)
state_name = db.Column(db.String)
state_abbr = db.Column(db.String)
agency_id = db.Column(db.Integer, nullable=False, primary_key=True)
agency_ori = db.Column(db.String)
agency_name = db.Column(db.String)
agency_population = db.Column(db.BigInteger)
population_group_code = db.Column(db.String)
population_group = db.Column(db.String)
reported = db.Column(db.SmallInteger, nullable=False)
months_reported = db.Column(db.SmallInteger, nullable=False)
nibrs_reported = db.Column(db.SmallInteger, nullable=False)
nibrs_months_reported = db.Column(db.SmallInteger, nullable=False)
covered = db.Column(db.SmallInteger)
participated = db.Column(db.SmallInteger)
nibrs_participated = db.Column(db.SmallInteger)
class ArsonSummary(db.Model):
__tablename__ = 'arson_summary'
arson_summary_id = db.Column(db.Integer, nullable=False, primary_key=True)
grouping_bitmap = db.Column(db.Integer)
year = db.Column(db.SmallInteger)
state_id = db.Column(db.Integer)
state_abbr = db.Column(db.Text)
agency_id = db.Column(db.Integer)
ori = db.Column(db.Text)
subcategory_code = db.Column(db.Text)
subcategory_name = db.Column(db.Text)
reported = db.Column(db.Integer)
unfounded = db.Column(db.Integer)
actual = db.Column(db.Integer)
cleared = db.Column(db.Integer)
juvenile_cleared = db.Column(db.Integer)
uninhabited = db.Column(db.Integer)
est_damage_value = db.Column(db.Integer)
class ParticipationRate(db.Model):
__tablename__ = 'participation_rates'
participation_id = db.Column(db.Integer, nullable=False, primary_key=True)
year = db.Column(db.SmallInteger, nullable=False)
state_id = db.Column(db.Integer,
db.ForeignKey(RefState.state_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
county_id = db.Column(db.Integer,
db.ForeignKey(RefCounty.county_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
state_name = db.Column(db.String)
county_name = db.Column(db.String)
total_agencies = db.Column(db.Integer)
participating_agencies = db.Column(db.Integer)
participation_rate = db.Column(db.Float)
nibrs_participating_agencies = db.Column(db.Integer)
nibrs_participation_rate = db.Column(db.Float)
covered_agencies = db.Column(db.Integer)
covered_rate = db.Column(db.Float)
total_population = db.Column(db.BigInteger)
participating_population = db.Column(db.BigInteger)
nibrs_participating_population = db.Column(db.BigInteger)
class CreatableModel:
@classmethod
def create(cls):
"""Creates database table for the model, unless it already exists."""
try:
cls.__table__.create(db.session.bind)
except ProgrammingError:
pass
class NibrsIncidentRepresentation(db.Model, CreatableModel):
__tablename__ = 'nibrs_incident_representation'
incident_representation_id = db.Column(db.BigInteger, primary_key=True)
incident_id = db.Column(db.BigInteger,
db.ForeignKey(models.NibrsIncident.incident_id))
representation = db.Column(JSONB)
incident = db.relationship(models.NibrsIncident,
uselist=False,
backref=backref('representation',
uselist=False))
@classmethod
def regenerate(cls):
"""Generates or replaces cached representations for all records."""
for incident in models.NibrsIncident.query:
if not incident.representation:
incident.representation = cls(incident=incident)
incident.representation.generate()
models.NibrsIncident.query.session.commit()
@classmethod
def fill(cls, batch_size=None):
"""Generates cached representations for records that lack them.
Using a `batch_size` helps for large operations that may fail."""
finished = False
batch_no = 0
while not finished:
finished = True
qry = models.NibrsIncident.query.filter(
models.NibrsIncident.representation == None).limit(batch_size)
for incident in qry:
finished = False # until the query comes back empty
incident.representation = cls(incident=incident)
incident.representation.generate()
models.NibrsIncident.query.session.commit()
logging.warning(
"Batch #{batch_no} of #{batch_size} complete".format(
batch_no=batch_no,
batch_size=batch_size))
batch_no += 1
def generate(self):
"""Generates and caches output for a single NibrsIncident."""
from crime_data.common import marshmallow_schemas
_schema = marshmallow_schemas.NibrsIncidentSchema()
self.representation = _schema.dump(self.incident).data
class RetaEstimated(db.Model):
"""
Estimated data loaded from a CSV data file created from published data
tables from the _Crime in the United States_ reports.
"""
__tablename__ = 'reta_estimated'
__table_args__ = (
UniqueConstraint('year', 'state_id'), )
estimate_id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.SmallInteger)
state_id = db.Column(db.SmallInteger,
db.ForeignKey(RefState.state_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
state_abbr = db.Column(db.String(2))
population = db.Column(db.BigInteger)
violent_crime = db.Column(db.BigInteger)
homicide = db.Column(db.BigInteger)
rape_legacy = db.Column(db.BigInteger)
rape_revised = db.Column(db.BigInteger)
robbery = db.Column(db.BigInteger)
aggravated_assault = db.Column(db.BigInteger)
property_crime = db.Column(db.BigInteger)
burglary = db.Column(db.BigInteger)
larceny = db.Column(db.BigInteger)
motor_vehicle_theft = db.Column(db.BigInteger)
caveats = db.Column(db.Text)
state = relationship(RefState)
class ArrestsNational(db.Model):
"""Estimated data about national arrest totals"""
__tablename__ = 'asr_national'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
population = db.Column(db.BigInteger)
total_arrests = db.Column(db.BigInteger)
homicide = db.Column(db.BigInteger)
rape = db.Column(db.BigInteger)
robbery = db.Column(db.BigInteger)
aggravated_assault = db.Column(db.BigInteger)
burglary = db.Column(db.BigInteger)
larceny = db.Column(db.BigInteger)
motor_vehicle_theft = db.Column(db.BigInteger)
arson = db.Column(db.BigInteger)
violent_crime = db.Column(db.BigInteger)
property_crime = db.Column(db.BigInteger)
other_assault = db.Column(db.BigInteger)
forgery = db.Column(db.BigInteger)
fraud = db.Column(db.BigInteger)
embezzlement = db.Column(db.BigInteger)
stolen_property = db.Column(db.BigInteger)
vandalism = db.Column(db.BigInteger)
weapons = db.Column(db.BigInteger)
prostitution = db.Column(db.BigInteger)
other_sex_offenses = db.Column(db.BigInteger)
drug_abuse = db.Column(db.BigInteger)
gambling = db.Column(db.BigInteger)
against_family = db.Column(db.BigInteger)
dui = db.Column(db.BigInteger)
liquor_laws = db.Column(db.BigInteger)
drunkenness = db.Column(db.BigInteger)
disorderly_conduct = db.Column(db.BigInteger)
vagrancy = db.Column(db.BigInteger)
other = db.Column(db.BigInteger)
suspicion = db.Column(db.BigInteger)
curfew_loitering = db.Column(db.BigInteger)
class AgencySums(db.Model):
__tablename__ = 'agency_sums_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
offense_id = db.Column(db.BigInteger) # reta_offense_subcat
offense_subcat_id = db.Column(db.BigInteger)
offense_code = db.Column(db.Text) # reta_offense
offense_subcat_code = db.Column(db.Text)
offense_subcat_name = db.Column(db.Text)
offense_name = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, explorer_offense = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencySums.query
if state:
query = query.filter(func.lower(AgencySums.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencySums.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencySums.ori == agency)
if year:
query = query.filter(AgencySums.year == year)
if explorer_offense:
offense = ExplorerOffenseMapping(explorer_offense).reta_offense_code
query = query.filter(AgencySums.offense_code == offense)
query = query.filter(AgencySums.year <= MAX_AGENCY_YEAR)
query = query.filter(AgencyParticipation.year <= MAX_AGENCY_YEAR)
query = query.join(AgencyParticipation, and_(AgencyParticipation.agency_id == AgencySums.agency_id, AgencyParticipation.year == AgencySums.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencySums.year.desc()) # Agency reported 12 Months.
return query
class AgencyOffenseCounts(db.Model):
__tablename__ = 'agency_offenses_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
offense_id = db.Column(db.BigInteger) # reta_offense_subcat
offense_code = db.Column(db.Text) # reta_offense
offense_name = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, explorer_offense = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencyOffenseCounts.query
if state:
query = query.filter(func.lower(AgencyOffenseCounts.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencyOffenseCounts.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencyOffenseCounts.ori == agency)
if year:
query = query.filter(AgencyOffenseCounts.year == year)
if explorer_offense:
offense = ExplorerOffenseMapping(explorer_offense).reta_offense_code
query = query.filter(AgencyOffenseCounts.offense_code == offense)
query = query.join(AgencyParticipation,
and_(AgencyParticipation.agency_id == AgencyOffenseCounts.agency_id,
AgencyParticipation.year == AgencyOffenseCounts.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencyOffenseCounts.year.desc()) # Agency reported 12 Months.
query = query.filter(AgencyOffenseCounts.year <= MAX_AGENCY_YEAR)
return query
class AgencyClassificationCounts(db.Model):
__tablename__ = 'agency_classification_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
classification = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, classification = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencyClassificationCounts.query
if state:
query = query.filter(func.lower(AgencyClassificationCounts.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencyClassificationCounts.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencyClassificationCounts.ori == agency)
if year:
query = query.filter(AgencyClassificationCounts.year == year)
if classification:
query = query.filter(func.lower(AgencyClassificationCounts.classification) == func.lower(classification))
query = query.join(AgencyParticipation,
and_(AgencyParticipation.agency_id == AgencyClassificationCounts.agency_id,
AgencyParticipation.year == AgencyClassificationCounts.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencyClassificationCounts.year.desc()) # Agency reported 12 Months.
query = query.filter(AgencyClassificationCounts.year <= MAX_AGENCY_YEAR)
return query
class CdeAgency(db.Model, FilterableModel):
"""A class for the denormalized cde_agencies table"""
__tablename__ = 'cde_agencies'
__table_args__ = (UniqueConstraint('agency_id'), )
agency_id = db.Column(db.BigInteger, primary_key=True)
ori = db.Column(db.String(9))
legacy_ori = db.Column(db.String(9))
agency_name = db.Column(db.Text)
short_name = db.Column(db.Text)
agency_type_id = db.Column(db.String(1))
agency_type_name = db.Column(db.String(100))
# FIXME: can add associations when we need them
tribe_id = db.Column(db.BigInteger)
campus_id = db.Column(db.BigInteger)
city_id = db.Column(db.BigInteger)
city_name = db.Column(db.Text)
state_id = db.Column(db.SmallInteger)
state_abbr = db.Column(db.String(2))
primary_county_id = db.Column(db.BigInteger)
primary_county = db.Column(db.Text)
primary_county_fips = db.Column(db.String(5))
agency_status = db.Column(db.String(1))
submitting_agency_id = db.Column(db.BigInteger)
submitting_sai = db.Column(db.String(9))
submitting_name = db.Column(db.Text)
submitting_state_abbr = db.Column(db.String(2))
start_year = db.Column(db.SmallInteger)
dormant_year = db.Column(db.SmallInteger)
revised_rape_start = db.Column(db.SmallInteger)
current_nibrs_start_year = db.Column(db.SmallInteger)
current_year = db.Column(db.SmallInteger)
population = db.Column(db.BigInteger)
population_group_code = db.Column(db.String(2))
population_group_desc = db.Column(db.Text)
population_source_flag = db.Column(db.String(1))
suburban_area_flag = db.Column(db.String(1))
core_city_flag = db.Column(db.String(1))
months_reported = db.Column(db.SmallInteger)
nibrs_months_reported = db.Column(db.SmallInteger)
past_10_years_reported = db.Column(db.SmallInteger)
covered_by_id = db.Column(db.BigInteger)
covered_by_ori = db.Column(db.String(9))
covered_by_name = db.Column(db.Text)
staffing_year = db.Column(db.SmallInteger)
total_officers = db.Column(db.Integer)
total_civilians = db.Column(db.Integer)
icpsr_zip = db.Column(db.String(5))
icpsr_lat = db.Column(db.Float)
icpsr_lng = db.Column(db.Float)
class HtAgency(db.Model, FilterableModel):
"""Represents human trafficking counts reported by a single agency in a given year"""
class Meta:
__tablename__ = 'ht_agency'
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.SmallInteger)
ori = db.Column(db.Text)
agency_id = db.Column(db.BigInteger)
agency_name = db.Column(db.Text)
population = db.Column(db.BigInteger)
state_id = db.Column(db.Integer)
state_abbr = db.Column(db.Text)
months_reported = db.Column(db.SmallInteger)
sex_acts = db.Column(db.Integer)
sex_acts_cleared = db.Column(db.Integer)
sex_acts_juvenile_cleared = db.Column(db.Integer)
servitude = db.Column(db.Integer)
servitude_cleared = db.Column(db.Integer)
servitude_juvenile_cleared = db.Column(db.Integer)
class HtSummary(db.Model):
"""Collects rollups of multiple HtAgency reports. You can use this
table to get counts of human trafficking for a given agency, a
specific state or national on a single or all years. Note that
counts from US Territories are not available in this table (the
FBI says they shouldn't be included."""
class Meta:
__tablename__ = 'ht_summary'
ht_summary_id = db.Column(db.Integer, primary_key=True)
grouping_bitmap = db.Column(db.Integer)
year = db.Column(db.SmallInteger)
ori = db.Column(db.Text)
agency_id = db.Column(db.BigInteger)
agency_name = db.Column(db.Text)
agencies = db.Column(db.Integer)
population = db.Column(db.BigInteger)
state_id = db.Column(db.Integer)
state_abbr = db.Column(db.Text)
months_reported = db.Column(db.SmallInteger)
sex_acts = db.Column(db.Integer)
sex_acts_cleared = db.Column(db.Integer)
sex_acts_juvenile_cleared = db.Column(db.Integer)
servitude = db.Column(db.Integer)
servitude_cleared = db.Column(db.Integer)
servitude_juvenile_cleared = db.Column(db.Integer)
@classmethod
def grouped_by_state(cls, year=None, state_abbr=None):
query = HtSummary.query
query = query.filter(HtSummary.state_id != None)
query = query.filter(HtSummary.agency_id == None)
if year is not None:
query = query.filter(HtSummary.year == year)
if state_abbr is not None:
query = query.filter(HtSummary.state_abbr == state_abbr)
query = query.order_by(HtSummary.year, HtSummary.state_abbr)
query = query.filter(HtSummary.year <= MAX_AGENCY_YEAR)
return query
```
#### File: crime_data/resources/estimates.py
```python
from webargs.flaskparser import use_args
from crime_data.common import cdemodels, marshmallow_schemas, models, lookupmodels
from crime_data.common.newmodels import RetaEstimated
from crime_data.common.models import RefState
from crime_data.common.base import CdeResource, tuning_page, ExplorerOffenseMapping
from crime_data.extensions import DEFAULT_MAX_AGE
from flask.ext.cachecontrol import cache
from sqlalchemy import func
class EstimatesState(CdeResource):
"""Return the estimates for a state"""
schema = marshmallow_schemas.EstimateSchema(many=True)
fast_count = False
@use_args(marshmallow_schemas.ArgumentsSchema)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_id):
self.verify_api_key(args)
estimates = RetaEstimated.query.filter(func.lower(RetaEstimated.state_abbr) == func.lower(state_id)).order_by(RetaEstimated.year)
return self.with_metadata(estimates, args)
class EstimatesRegion(CdeResource):
"""Return the estimates for a region"""
schema = marshmallow_schemas.EstimateSchema(many=True)
fast_count = False
@use_args(marshmallow_schemas.ArgumentsSchema)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, region_name):
self.verify_api_key(args)
region = lookupmodels.RegionLK.getByName(region_name=region_name).first()
states = lookupmodels.StateLK.get(region_code=region.region_code).all()
id_arr= []
[id_arr.append(state.state_id) for state in states]
estimates = RetaEstimated.query.filter(RetaEstimated.state_id.in_(id_arr)).order_by(RetaEstimated.year)
return self.with_metadata(estimates, args)
class EstimatesNational(CdeResource):
"""Return the estimates for nationwide"""
schema = marshmallow_schemas.NationalEstimateSchema(many=True)
fast_count = False
@use_args(marshmallow_schemas.ArgumentsSchema)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args):
self.verify_api_key(args)
estimates = RetaEstimated.query.filter(RetaEstimated.state_abbr == None).order_by(RetaEstimated.year)
return self.with_metadata(estimates, args)
```
#### File: crime_data/resources/incidents.py
```python
from webargs.flaskparser import use_args
from itertools import filterfalse
from crime_data.common import cdemodels, marshmallow_schemas, models, newmodels
from crime_data.common.base import CdeResource, tuning_page, ExplorerOffenseMapping
from crime_data.extensions import DEFAULT_MAX_AGE
from flask.ext.cachecontrol import cache
from flask import jsonify
def _is_string(col):
col0 = list(col.base_columns)[0]
return issubclass(col0.type.python_type, str)
class AgenciesSumsState(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_sums_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesSumsCounty(CdeResource):
'''''
Agency Suboffense Sums by (year, agency) - Only agencies reporting all 12 months.
'''''
schema = marshmallow_schemas.AgencySumsSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencySums()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgs)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, agency_ori = None):
self.verify_api_key(args)
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = None
# ugh
if explorer_offense == 'violent' or explorer_offense == 'property':
agency_sums = newmodels.AgencyClassificationCounts().get(state = state_abbr, agency = agency_ori, year = year, classification = explorer_offense)
else:
agency_sums = newmodels.AgencyOffenseCounts().get(state = state_abbr, agency = agency_ori, year = year, explorer_offense = explorer_offense)
filename = 'agency_offenses_state'
return self.render_response(agency_sums, args, csv_filename=filename)
class AgenciesOffensesCountyCount(CdeResource):
'''''
Agency Offense counts by year.
'''''
schema = marshmallow_schemas.AgencyOffensesSchema(many=True)
fast_count = True
@use_args(marshmallow_schemas.OffenseCountViewArgsYear)
@cache(max_age=DEFAULT_MAX_AGE, public=True)
@tuning_page
def get(self, args, state_abbr = None, county_fips_code = None, agency_ori = None):
'''''
Year is a required field atm.
'''''
self.verify_api_key(args)
model = newmodels.AgencyOffenseCounts()
year = args.get('year', None)
explorer_offense = args.get('explorer_offense', None)
agency_sums = model.get(agency = agency_ori, year = year, county = county_fips_code, state=state_abbr, explorer_offense=explorer_offense)
filename = 'agency_sums_county'
return self.render_response(agency_sums, args, csv_filename=filename)
``` |
{
"source": "18F/data-act-pilot",
"score": 3
} |
#### File: data-act-pilot/processors/compare_sam_addresses.py
```python
import csv
import time
import usaddress
from processors.get_sam_data import SamData
def get_address(record):
results = {}
results['address1'] = record['docvendor.address1']
results['address2'] = record['docvendor.address2']
results['address3'] = record['docvendor.address3']
results['city'] = record['docvendor.city']
results['name'] = record['docvendor.name']
results['state'] = record['docvendor.state']
results['zip'] = record['docvendor.zip']
return results
def format_sam_address(address):
result = ' '.join(filter(None, [address.get('Line1'),
address.get('Line2'),
address.get('City'),
address.get('stateorProvince'),
'-'.join(filter(None,
[address.get('Zip'),
address.get('Zip4')]))
]))
return result.lower()
def format_sba_address(address):
result = ' '.join(filter(None, [address.get('address1'),
address.get('address2'),
address.get('address3'),
address.get('city'),
address.get('state'),
address.get('zip')]))
return result.lower()
def compare_addresses(address1, address2):
try:
a1 = usaddress.tag(address1)
a2 = usaddress.tag(address2)
shared = dict(set(a1[0].items()) & set(a2[0].items()))
return shared
except RepeatedLabelError:
pass
def check_match(shared):
fields = ['AddressNumber',
'StreetName',
'PlaceName',
'StateName']
if set(fields).issubset(shared.keys()):
return True
return False
def run():
f = csv.DictReader(open('data/data_act_test.csv'))
r = {row['docvendor.duns']: {'sba': get_address(row)} for row in f}
for duns in r.keys():
print "Fetching data for {} from SAM Api".format(duns)
time.sleep(2)
data = SamData(duns)
mailing = data.get_field('mailingAddress')
sam_address = data.get_field('samAddress')
r[duns]['mailing'] = mailing
r[duns]['sam'] = sam_address
address_strings = {k: {'sam': format_sam_address(r[k]['sam']),
'sba': format_sba_address(r[k]['sba']),
'mailing': format_sam_address(r[k]['mailing'])}
for k in r.keys()}
results = {}
for duns in address_strings:
mail = compare_addresses(address_strings[duns]['sba'],
address_strings[duns]['mailing'])
sam = compare_addresses(address_strings[duns]['sba'],
address_strings[duns]['sam'])
results[duns] = {}
if mail:
results[duns]['mail'] = {'match': mail,
'pass': str(check_match(mail))}
else:
results[duns]['mail'] = 'Can\'t parse address for {}'.format(duns)
if sam:
results[duns]['sam'] = {'match': sam,
'pass': str(check_match(sam))}
else:
results[duns]['sam'] = 'Can\'t parse address for {}'.format(duns)
print results
run()
```
#### File: data-act-pilot/processors/validator.py
```python
import schema.data_act_schema_pb2
from google.protobuf import text_format
from protobuf_to_dict import protobuf_to_dict
import codecs
import argparse
import itertools
REQUIRED_FIELDS = [
'award.awardingAgency.officeCode',
'award.awardees.businessName',
'award.awardees.businessAddress.street1',
'award.awardees.businessAddress.city',
'award.awardees.businessAddress.state',
'award.awardees.businessAddress.postalCode'
]
LENGTHS = [
('award.awardingAgency.officeCode', 6),
('award.fundingAgency.officeCode', 6)]
NUMERIC_FIELDS = ['transaction.programActivity']
POSSIBLE_VALUES = [
('transaction.objectClass', ['4110', '4101']),
('award.awardees.businessType', ['00', '01','02','03','04','05', '06', '11','12','20','21','22','23','25'])
]
REPEATED_FIELDS = [
'award.awardees',
'award.placesOfPerformance',
'award.awardees.highlyCompensatedOfficers',
'transaction.outlays',
]
def get_values(field, record):
result = []
try:
value = reduce(dict.__getitem__, field.split('.'), record)
if isinstance(value, list):
result = value
else:
result.append(value)
except KeyError:
result.append('')
except TypeError:
for repeated_field in REPEATED_FIELDS:
if field.startswith(repeated_field) and repeated_field != field:
records = get_values(repeated_field, record)
for r in records:
new_field = field.replace(repeated_field + '.', '')
result.append(get_values(new_field, r))
if all([isinstance(l, list) for l in result]):
result = list(itertools.chain.from_iterable(result))
return result
def check_required_fields(record):
result = []
for field in REQUIRED_FIELDS:
for value in get_values(field, record):
if not value:
result.append("Required field {0} missing".format(field))
return result
def check_lengths(record):
result = []
for field, length in LENGTHS:
for value in get_values(field, record):
if value:
if len(value) != length:
result.append("Value of {0} must be exactly "
"{1} characters".format(field, length))
return result
def check_numeric_fields(record):
result = []
for field in NUMERIC_FIELDS:
for value in get_values(field, record):
if value:
try:
float(value)
except ValueError:
result.append("Value of {0} must be numeric".format(field))
return result
def check_enums(record):
result = []
for field, enum in POSSIBLE_VALUES:
for value in get_values(field, record):
if value not in enum:
result.append("Value of {0} must be one of {1}".format(field, enum))
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=(
'Schema Act Data Validation. Takes text format protobuf messages '
'and applies validation rules.'))
parser.add_argument('-i', dest='infile',
help=('A file of double newline delimited text format '
'protobufs.'))
args = parser.parse_args()
infile = args.infile
with codecs.open(infile, 'r', 'utf-8') as f:
pbs = f.read()
pbs = pbs.split('\n\n')
records = []
for pb in pbs:
if pb != '\n':
record = schema.data_act_schema_pb2.Action()
text_format.Merge(pb, record)
record = protobuf_to_dict(record)
records.append(record)
record_count = 0
error_count = 0
errors = {}
for record in records:
record_count += 1
results = []
results += check_required_fields(record)
results += check_lengths(record)
results += check_numeric_fields(record)
results += check_enums(record)
if results:
error_count += len(results)
errors[get_values('award.awardNumber', record)[0]] = results
print('\n')
print('Parsed {0} records and found {1} errors.'.format(record_count, error_count))
print('\n')
for rec, errs in errors.iteritems():
if errs:
print('In Award Number {0}, the following errors were found:'.format(rec))
for err in errs:
print(err)
```
#### File: processors/xbrl/xbrl_translate.py
```python
import argparse
from bs4 import UnicodeDammit
import codecs
import csv
import os
from unidecode import unidecode
import award as award_schema
import finassist
import gen
import ussgl
import xbrli
def fix_encoding(text):
""" Some files contain unknown encoding. This cleans that up."""
return unidecode(UnicodeDammit(text).unicode_markup)
def write_int(value=''):
'''Helper function to write an XBRL integer'''
if not value:
value = 42424242424242424242424242
return xbrli.integerItemType(value, unitRef='ID1', contextRef='ID2')
def write_string(value=''):
'''Helper function to write an XBRL string'''
if not value:
value = ' '
return xbrli.stringItemType(fix_encoding(value), contextRef='ID2')
def write_amount(value=''):
'''Helper function to write an XBRL amount'''
if not value:
value = '42424242'
return gen.amountItemType(value, unitRef='ID1', contextRef='ID2')
def get_or_undefined(d, k):
'''Helper function for setting undefined enum values
Accepts:
d: a dict
k: a potential key in d
Returns:
d[k] or "undefined"'''
result = 'undefined'
try:
if d[k]:
result = d[k]
except:
pass
return result
def create_agency(identifier, name, office_name, office_id):
'''Creates an agency as defined in the gen XBRL schema'''
agency = gen.agency()
office = gen.agencyOffice()
office.officeIdentifier = write_int(office_id)
office.officeName = write_string(office_name)
agency.agencyIdentifier = write_int(identifier)
agency.agencyName = write_string(name)
agency.agencyOffice.append(office)
return agency
def create_date(year, month, date):
'''Helper function to create XBRL dates'''
if not year:
year = '1981'
if not month:
month = '10'
if not date:
date = '03'
if len(month) == 1:
month = '0' + month
if len(date) == 1:
date = '0' + date
return xbrli.dateItemType('-'.join([year, month, date]), contextRef='ID1')
def create_tas_element(data):
'''Creates a TAS element as defined in the gen XBRL schema
Accepts:
data: a dict loaded from one of the four csvs
Returns:
A gen.TreasuryAccountSymbol element'''
tas = gen.treasuryAccountSymbol()
tas.agency = create_agency(data.get('AgencyIdentifier'), '', '', '')
tas.allocationTransferAgencyIdentifier.append(write_int(data.get('AllocationTransferAgencyIdentifier')))
tas.mainAccountNumber = write_int(data.get('MainAccountCode'))
tas.beginningPeriodOfAvailability.append(write_string(data.get('BeginningPeriodOfAvailability')))
tas.endingPeriodOfAvailability.append(write_string(data.get('EndingPeriodOfAvailability')))
tas.availabilityTypeCode = gen.availableTypeCodeItemType(data.get('AvailabilityTypeCode'), contextRef='ID2')
return tas
def create_tas(data):
'''Create TAS, with X for unknown elements
Accepts:
data: a dict loaded from on of the four csvs
Returns:
tas: a string version of the TAS'''
tas = ''
elements = ['AgencyIdentifier',
'AllocationTransferAgencyIdentifier',
'MainAccountCode',
'SubAccountSymbol',
'BeginningPeriodOfAvailability',
'EndingPeriodOfAvailability',
'AvailabilityTypeCode']
for element in elements:
if data.get(element):
tas += data.get(element) + '-'
else:
tas += 'X-'
return tas[:-1]
def index_by_tas(data):
'''Helper function to create a dict from a list of dicts, indexed by tas'''
indexed = {}
for d in data:
key = create_tas(d)
if key in indexed:
indexed[key].append(d)
else:
indexed[key] = [d]
return indexed
def fix_state(state):
'''Helper function to fix state codes'''
state_codes = {'alabama': 'AL',
'alaska': 'AK',
'arizona': 'AZ',
'arkansas': 'AR',
'california': 'CA',
'colorado': 'CO',
'connecticut': 'CT',
'delaware': 'DE',
'florida': 'FL',
'georgia': 'GA',
'hawaii': 'HI',
'idaho': 'ID',
'illinois': 'IL',
'indiana': 'IN',
'iowa': 'IA',
'kansas': 'KS',
'kentucky': 'KY',
'louisiana': 'LA',
'maine': 'ME',
'maryland': 'MD',
'massachusetts': 'MA',
'michigan': 'MI',
'minnesota': 'MN',
'mississippi': 'MS',
'missouri': 'MO',
'montana': 'MT',
'nebraska': 'NE',
'nevada': 'NV',
'new hampshire': 'NH',
'new jersey': 'NJ',
'new mexico': 'NM',
'new york': 'NY',
'north carolina': 'NC',
'north dakota': 'ND',
'ohio': 'OH',
'oklahoma': 'OK',
'oregon': 'OR',
'pennsylvania': 'PA',
'rhode island': 'RI',
'south carolina': 'SC',
'south dakota': 'SD',
'tennessee': 'TN',
'texas': 'TX',
'utah': 'UT',
'vermont': 'VT',
'virginia': 'VA',
'washington': 'WA',
'west virginia': 'WV',
'wisconsin': 'WI',
'wyoming': 'WY',
'american samoa': 'AS',
'district of columbia': 'DC',
'federated states of micronesia': 'FM',
'guam': 'GU',
'marshall islands': 'MH',
'northern mariana islands': 'MP',
'palau': 'PW',
'puerto rico': 'PR',
'u.s. virgin islands': 'VI',
'us armed forces europe': 'AE',
'us armed forces americas': 'AA',
'us armed forces pacific': 'AP'}
if state not in state_codes.values():
state = state_codes.get(state.lower(), 'undefined')
return state
def create_officer(first, middle, last, amount):
'''Creates a highlyCompensatedOfficer element'''
officer = award_schema.highlyCompensatedOfficer()
officer.highlyCompensatedOfficerFirstName = write_string(first)
officer.highlyCompensatedOfficerMiddleInitial = write_string(middle)
officer.highlyCompensatedOfficerLastName = write_string(last)
officer.highlyCompensatedOfficerCompensation = write_amount(amount)
return officer
def create_award(data):
'''Creates an award element'''
award = finassist.award()
award.awardDescription = write_string(data.get('AwardDescription'))
award.awardID = write_string(data.get('FainAwardNumber'))
award.parentAwardID = write_string('TODO')
award.modificationAmendmentNumber = write_string(data.get('AwardModAmendmentNumber'))
award.recordType = award_schema.recordType(get_or_undefined(data, 'RecordType'), contextRef='ID2')
award.typeOfAction = award_schema.typeOfAction(get_or_undefined(data, 'TypeOfAction'), contextRef='ID2')
# TODO: Figure out type of transaction
award.typeOfTransactionCode = award_schema.typeOfTransactionCode('undefined', contextRef='ID2')
award.awardeeInformation = finassist.awardeeInformation()
award.awardeeInformation.businessType = award_schema.businessType(get_or_undefined(data, 'BusinessType'), contextRef='ID2')
award.awardeeInformation.awardeeLegalBusinessName = write_string(data.get('RecipientLegalEntityName'))
award.awardeeInformation.ultimateParentUniqueIdentifier = write_int(data.get('RecipientUltimateParent1Id'))
award.awardeeInformation.awardeeUniqueIdentifier = write_int(data.get('RecipientDunsNumber'))
award.awardeeInformation.awardeeUniqueIdentifierSupplemental = write_string()
award.awardeeInformation.ultimateParentLegalBusinessName = write_string(data.get('RecipientUltimateParentLegalEntityName'))
award.awardeeInformation.awardeeAddress = gen.address()
award.awardeeInformation.awardeeAddress.streetAddress = gen.streetAddress()
award.awardeeInformation.awardeeAddress.streetAddress.streetAddressLine.append(write_string(data.get('RecipientLegalEntityAddressStreet1')))
award.awardeeInformation.awardeeAddress.city = write_string(data.get('RecipientLegalEntityCityName'))
award.awardeeInformation.awardeeAddress.county = write_string()
award.awardeeInformation.awardeeAddress.state = gen.state(fix_state(get_or_undefined(data, 'RecipientLegalEntityStateCode')), contextRef='ID2')
award.awardeeInformation.awardeeAddress.postalCode = write_string(data.get('RecipientLegalEntityPostalCode'))
award.awardeeInformation.awardeeAddress.zipCodePlus4 = write_string(data.get('RecipientLegalEntityZip+4'))
award.awardeeInformation.awardeeAddress.countryName = write_string(data.get('RecipientLegalEntityCountryName'))
award.awardeeInformation.awardeeAddress.countryCode = write_string(data.get('RecipientLegalEntityCountryCode'))
#award.awardeeInformation.awardeeAddress.congressionalDistrict = gen.congressionalDistrict(get_or_undefined(data, 'RecipientLegalEntityCongressionalDistrict'), contextRef='ID2')
award.primaryPlaceOfPerformance = gen.address()
award.primaryPlaceOfPerformance.streetAddress = gen.streetAddress()
award.primaryPlaceOfPerformance.streetAddress.streetAddressLine.append(write_string())
award.primaryPlaceOfPerformance.city = write_string(data.get('PlaceOfPerfCity'))
award.primaryPlaceOfPerformance.county = write_string(data.get('PlaceOfPerfCounty'))
award.primaryPlaceOfPerformance.state = gen.state(fix_state(get_or_undefined(data, 'PlaceOfPerfState')), contextRef='ID2')
award.primaryPlaceOfPerformance.postalCode = write_string()
award.primaryPlaceOfPerformance.zipCodePlus4 = write_string(data.get('PlaceOfPerfZip+4'))
award.primaryPlaceOfPerformance.countryName = write_string(data.get('PlaceOfPerfCountryName'))
award.primaryPlaceOfPerformance.countryCode = write_string()
#award.primaryPlaceOfPerformance.congressionalDistrict = gen.congressionalDistrict(get_or_undefined(data, 'PlaceOfPerfCongressionalDistrict'), contextRef='ID2')
award.periodOfPerformance = finassist.periodOfPerformance()
award.periodOfPerformance.periodOfPerformanceActionDate = create_date(data.get('ActionDateYear'), data.get('ActionDateMonth'), data.get('ActionDateDay'))
award.periodOfPerformance.periodOfPerformanceStartDate = create_date(data.get('PeriodOfPerfStartYear'), data.get('PeriodOfPerfStartMonth'), data.get('PeriodOfPerfStartDay'))
award.periodOfPerformance.periodOfPerformanceCurrentEndDate = create_date(data.get('PeriodOfPerfCurrentEndYear'), data.get('PerioOfPerfCurrentEndMonth'), data.get('PeriodOfPerfCurrentEndDay'))
award.periodOfPerformance.periodOfPerformancePotentialEndDate = create_date(data.get('PeriodOfPerfPotentialEndYear'), data.get('PeriodOfPerfPotentialEndMonth'), data.get('PeriodOfPerfPotentialEndDay'))
award.awardingAgency.append(create_agency(data.get('AwardingAgencyCode'), data.get('AwardingAgencyName'), data.get('AwardingOfficeName'),data.get('AwardingOfficeCode')))
award.fundingAgency.append(create_agency(data.get('FundingAgencyCode'), data.get('FundingAgencyName'), data.get('FundingOfficeName'), data.get('FundingOfficeCode')))
award.awardingSubTierAgency.append(create_agency(data.get('AwardingSubTierAgencyCode'), data.get('AwardingSubTierAgencyName'), '', ''))
award.fundingSubTierAgency.append(create_agency(data.get('FundingSubTierAgencyCode'), data.get('FundingSubTierAgencyName'), '', ''))
award.highlyCompensatedOfficer.append(create_officer(data.get('HighCompOfficer1FirstName'), data.get('HighCompOfficer1MiddleInitial'), data.get('HighCompOfficer1LastName'), data.get('HighCompOfficer1Amount')))
award.highlyCompensatedOfficer.append(create_officer(data.get('HighCompOfficer2FirstName'), data.get('HighCompOfficer2MiddleInitial'), data.get('HighCompOfficer2LastName'), data.get('HighCompOfficer2Amount')))
award.highlyCompensatedOfficer.append(create_officer(data.get('HighCompOfficer3FirstName'), data.get('HighCompOfficer3MiddleInitial'), data.get('HighCompOfficer3LastName'), data.get('HighCompOfficer3Amount')))
award.highlyCompensatedOfficer.append(create_officer(data.get('HighCompOfficer4FirstName'), data.get('HighCompOfficer4MiddleInitial'), data.get('HighCompOfficer4LastName'), data.get('HighCompOfficer4Amount')))
award.highlyCompensatedOfficer.append(create_officer(data.get('HighCompOfficer5FirstName'), data.get('HighCompOfficer5MiddleInitial'), data.get('HighCompOfficer5LastName'), data.get('HighCompOfficer5Amount')))
award.catalogOfFederalDomesticAssistanceProgram = finassist.catalogOfFederalDomesticAssistanceProgram()
award.catalogOfFederalDomesticAssistanceProgram.catalogOfFederalDomesticAssistanceTitle = write_string(data.get('CFDA_Description'))
award.catalogOfFederalDomesticAssistanceProgram.catalogOfFederalDomesticAssistanceNumber = write_string(data.get('CFDA_Code'))
award.awardAmounts = finassist.awardAmounts()
award.awardAmounts.federalFundingAmount = write_amount()
award.awardAmounts.totalFundingAmount = write_amount(data.get('CurrentTotalFundingObligationAmount'))
award.awardAmounts.nonFederalFundingAmount = write_amount(data.get('NonFederalFundingAmount'))
return award
def create_USSGL_header(indexed_af, approp):
'''Creates a USSGLEntryHeader element'''
tas = create_tas(approp)
header = ussgl.USSGLentryHeader()
header.treasuryAccountSymbol = create_tas_element(approp)
# TODO: Wrong level of aggregation?
header.obligatedAmount = write_amount()
header.unobligatedAmount = write_amount(approp.get('UnobligatedAmount'))
header.budgetAuthorityAppropriated = write_amount(approp.get('BudgetAuthorityAppropriatedAmount'))
header.otherBudgetaryResources = write_amount(approp.get('OtherBudgetaryResourcesAmount'))
# TODO: Wrong level of aggregation?
header.outlays = write_amount()
# TODO: Necessary?
header.appropriationAccount = write_string()
for af in indexed_af[tas]:
header.entryDetail.append(create_entry_detail(af))
return header
def create_entry_detail(single_af):
'''Creates an entryDetail element'''
entry = ussgl.entryDetail()
entry.account = ussgl.account()
entry.account.accountNumber = write_string(single_af.get('MainAccountCode'))
entry.account.objectClass = write_string(single_af.get('ObjectClass'))
entry.account.awardID = write_string(single_af.get('FainAwardNumber'))
# TODO: Are these still needed?
entry.account.accountDescription = write_string('')
entry.amount = write_amount()
entry.debitCreditCode = gen.debitCreditCodeItemType('undefined', contextRef='ID2')
entry.beginningEndIndicator = ussgl.beginningEndIndicatorItemType('E', contextRef='ID2')
# TODO: Wrong level of aggregation?
entry.programActivity = write_string('')
return entry
def create_accounting(approps, af):
'''Creates the accountingEntries element which will contain all
financial level data'''
result = ussgl.accountingEntries()
result.fiscalYear = write_string('2014')
result.period = write_string('FY')
indexed_af = index_by_tas(af)
for approp in approps:
result.USSGLentryHeader.append(create_USSGL_header(indexed_af, approp))
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts four csv files into XBRL')
parser.add_argument('appropriation', help='Appropriation CSV')
parser.add_argument('object_class', help='Object Class CSV')
parser.add_argument('award_financial', help='Award Financial CSV')
parser.add_argument('award', help='Award CSV')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
with codecs.open(args.award, 'U') as f:
awards = [row for row in csv.DictReader(f)]
with codecs.open(args.appropriation, 'U') as f:
appropriation = [row for row in csv.DictReader(f)]
with codecs.open(args.object_class, 'U') as f:
ocpa = [row for row in csv.DictReader()]
with codecs.open(args.award_financial, 'U') as f:
award_financial = [row for row in csv.DictReader(f)]
output_dir = args.output_dir
if output_dir[:-1] != '/':
output_dir += '/'
try:
os.mkdir(output_dir)
except:
pass
awards_xml = {award['FainAwardNumber']: create_award(award).toDOM().toprettyxml() for award in awards}
accounting_xml = create_accounting(appropriation, award_financial).toDOM().toprettyxml()
with codecs.open(output_dir + 'accounting.xml', 'w', 'utf-8') as f:
f.write(accounting_xml)
for key in awards_xml:
with codecs.open(output_dir + key + '.xml', 'w', 'utf-8') as f:
f.write(awards_xml[key])
``` |
{
"source": "18F/data-federation-ingest",
"score": 3
} |
#### File: data-federation-ingest/data_ingest/models.py
```python
import os.path
from urllib.parse import urlencode
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import JSONField
from django.db import models
User = get_user_model()
class Upload(models.Model):
"""
An abstract model intended to be subclassed by the project
to further define the Upload object.
Tracks state and history of the upload
and who has modified it at each step.
Also can resolve duplicate file issues,
if `unique_metadata_fields` is defined in the project.
"""
class Meta:
abstract = True
STATUS_CHOICES = (
('LOADING', 'Loading'),
('PENDING', 'Pending'),
('STAGED', 'Staged'),
('INSERTED', 'Inserted'),
('DELETED', 'Deleted'),
)
submitter = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
file_metadata = JSONField(null=True)
file = models.FileField()
raw = models.BinaryField(null=True)
validation_results = JSONField(null=True)
status = models.CharField(
max_length=10,
choices=STATUS_CHOICES,
default='LOADING',
)
updated_at = models.DateTimeField(auto_now=True)
status_changed_by = models.ForeignKey(User, related_name="+", null=True, on_delete=models.CASCADE)
status_changed_at = models.DateTimeField(null=True)
replaces = models.ForeignKey('self', null=True, related_name='replaced_by', on_delete=models.CASCADE)
unique_metadata_fields = []
def duplicate_of(self):
"""
We are assuming there won't be *multiple* duplicates.
This is far less efficient than using a database unique index,
but we want to leave file_metadata very flexibly defined.
"""
if self.unique_metadata_fields:
duplicates = self.__class__.objects
for field in self.unique_metadata_fields:
duplicates = duplicates.filter(
**{'file_metadata__' + field: self.file_metadata[field]})
# Silently delete abandoned in-process duplicates
duplicates.filter(status='LOADING').exclude(id=self.id).delete()
return duplicates.exclude(status='DELETED').exclude(
id=self.id).first()
return None
@property
def file_type(self):
(root, ext) = os.path.splitext(self.file.name)
return ext.lower()[1:]
def file_metadata_as_params(self):
if self.file_metadata:
return urlencode(self.file_metadata)
else:
return ""
def descriptive_fields(self):
return self.file_metadata or {'file_name': self.file.name}
class DefaultUpload(Upload):
"""
A simple subclass of Upload to provide a default implementation
"""
pass
```
#### File: data-federation-ingest/data_ingest/parsers.py
```python
from rest_framework import parsers
from .ingest_settings import UPLOAD_SETTINGS
class CsvParser(parsers.BaseParser):
"""
CSV parser.
"""
media_type = 'text/csv'
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a streamed CSV, return a dict of parameters for tabulator.Stream
"""
return {
'source': stream.read(),
'format': 'csv',
**UPLOAD_SETTINGS['STREAM_ARGS']
}
``` |
{
"source": "18F/datagen",
"score": 3
} |
#### File: datagen/datagen/datagen.py
```python
from __future__ import absolute_import
import argparse
import re
import csv
import sys
if sys.version_info.major == 2:
range = xrange
from . import method_dispatch
from . import types
re_parse_type = re.compile('(.*)\[(.*)\]')
re_parse_file = re.compile('([a-zA-Z_0-9]+)\s+([a-zA-Z0-9_]+\[?.*\]?)')
def parse_method(fstr):
match = re_parse_type.match(fstr)
if match is not None:
method_name, method_arg_str = match.groups()
else:
method_name = fstr.strip()
method_arg_str = None
method, arg_handler = method_dispatch[method_name.lower()]
if arg_handler is None:
return method, None
else:
return method, arg_handler(method_arg_str)
def read_schema_file(path):
names = []
fieldtypes = []
f = open(path, 'r')
n = 1
for line in f.readlines():
line = line.strip()
if line.startswith('#'):
continue
match = re_parse_file.match(line)
if match is None:
if line == '':
continue
else:
raise Exception('Invalid field declaration at line #%s: `%s`' % (n, line))
name, method = match.groups()
names.append(name)
fieldtypes.append(parse_method(method))
f.close()
return names, fieldtypes
def main():
parser = argparse.ArgumentParser(description='Generate dummy data')
parser.add_argument('-d', '--delimiter', required=False, help='Delimter to use. Default is |')
parser.add_argument('--with-header', required=False, action='store_true', help='Write column headers as first row')
parser.add_argument('-n', '--num-rows', required=True, help='Number of rows to write')
parser.add_argument('-s', '--schema', required=True, help='Schema file to load')
parser.add_argument('output', nargs='?', help='Path to write to (STDOUT if not specified')
args = parser.parse_args()
if args.output is None:
output = sys.stdout
else:
output = open(args.output, 'w')
delimiter = '|'
if args.delimiter:
delimiter = args.delimiter
names, fieldtypes = read_schema_file(args.schema)
writer = csv.writer(output, delimiter=delimiter)
num_rows = int(args.num_rows)
if args.with_header:
writer.writerow(names)
for n in range(num_rows):
writer.writerow([method(argument) for method, argument in fieldtypes])
``` |
{
"source": "18F/django-admin-interface",
"score": 2
} |
#### File: django-admin-interface/tests/tests.py
```python
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from django.template import Context, Template
import random
import shutil
from admin_interface.models import Theme
class AdminInterfaceTestCase(TestCase):
def setUp(self):
self.request_factory = RequestFactory()
pass
def tearDown(self):
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
pass
def __render_template(self, string, context):
return Template(string).render(context)
def __test_active_theme(self):
theme = Theme.get_active_theme()
print( theme )
self.assertTrue(theme != None)
self.assertTrue(theme.active)
self.assertEqual(Theme.objects.filter( active = True ).count(), 1);
def test_default_theme_created_if_no_themes(self):
Theme.objects.all().delete()
self.__test_active_theme()
def test_default_theme_created_if_all_themes_deleted(self):
Theme.objects.all().delete()
self.__test_active_theme()
def test_default_theme_activated_on_save_if_no_active_themes(self):
Theme.objects.all().delete()
theme = Theme.get_active_theme()
theme.active = False
theme.save()
self.__test_active_theme()
def test_default_theme_activated_after_update_if_no_active_themes(self):
Theme.objects.all().delete()
Theme.objects.all().update( active = False )
self.__test_active_theme()
def test_default_theme_activated_after_update_if_multiple_active_themes(self):
Theme.objects.all().delete()
theme_1 = Theme.objects.create( name = 'Custom 1', active = True )
theme_2 = Theme.objects.create( name = 'Custom 2', active = True )
theme_3 = Theme.objects.create( name = 'Custom 3', active = True )
Theme.objects.update( active = False )
Theme.objects.update( active = True )
self.__test_active_theme()
def test_default_theme_activated_on_active_theme_deleted(self):
Theme.objects.all().delete()
theme_1 = Theme.objects.create( name = 'Custom 1', active = True )
theme_2 = Theme.objects.create( name = 'Custom 2', active = True )
theme_3 = Theme.objects.create( name = 'Custom 3', active = True )
Theme.objects.filter( pk = Theme.get_active_theme().pk ).delete()
self.__test_active_theme()
def test_last_theme_activated_on_multiple_themes_created(self):
Theme.objects.all().delete()
theme_1 = Theme.objects.create( name = 'Custom 1', active = True )
theme_2 = Theme.objects.create( name = 'Custom 2', active = True )
theme_3 = Theme.objects.create( name = 'Custom 3', active = True )
self.assertEqual( Theme.get_active_theme().pk, theme_3.pk )
self.__test_active_theme()
def test_last_theme_activated_on_multiple_themes_activated(self):
Theme.objects.all().delete()
theme_1 = Theme.objects.create( name = 'Custom 1', active = True )
theme_2 = Theme.objects.create( name = 'Custom 2', active = True )
theme_3 = Theme.objects.create( name = 'Custom 3', active = True )
theme_4 = Theme.objects.create( name = 'Custom 4', active = True )
theme_5 = Theme.objects.create( name = 'Custom 5', active = True )
themes = [ theme_1, theme_2, theme_3, theme_4, theme_5 ]
for i in range(5):
random.shuffle(themes)
for theme in themes:
theme.set_active()
self.assertEqual( Theme.get_active_theme().pk, theme.pk )
self.__test_active_theme()
def test_templatetags(self):
Theme.objects.all().delete()
context = Context({})
rendered = self.__render_template('{% load admin_interface_tags %}{% get_admin_interface_theme as theme %}{{ theme.name }}', context)
self.assertEqual(rendered, 'Django')
def test_templatetags_with_request(self):
Theme.objects.all().delete()
context = Context({
'request': self.request_factory.get('/')
})
rendered = self.__render_template('{% load admin_interface_tags %}{% get_admin_interface_theme as theme %}{{ theme.name }}', context)
self.assertEqual(rendered, 'Django')
def test_repr(self):
theme = Theme.get_active_theme()
self.assertEqual( "{0}".format(theme), 'Django' )
``` |
{
"source": "18F/django-email-pal",
"score": 2
} |
#### File: emailpal/tests/test_version.py
```python
from .. import VERSION
def test_version_is_a_string():
assert type(VERSION) is str
```
#### File: emailpal/tests/test_views.py
```python
import pytest
from django.conf.urls import include, url
from django.test import Client, override_settings
from .util import all_template_engines
from .test_sendable_email import MY_SENDABLE_EMAIL
urlpatterns = [
url(r'^examples/', include('emailpal.urls')),
]
@pytest.fixture
def client():
with override_settings(SENDABLE_EMAILS=[MY_SENDABLE_EMAIL],
ROOT_URLCONF=__name__):
yield Client()
@pytest.mark.parametrize('template_engine', all_template_engines())
def test_index_works(client, template_engine):
with template_engine.enable():
response = client.get('/examples/')
assert response.status_code == 200
assert 'MySendableEmail' in response.content.decode('utf-8')
def test_invalid_example_raises_404(client):
response = client.get('/examples/blarg.html')
assert response.status_code == 404
def test_valid_html_example_works(client):
response = client.get('/examples/{}.html'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am HTML' in response.content.decode('utf-8')
def test_valid_plaintext_example_works(client):
response = client.get('/examples/{}.txt'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am plaintext' in response.content.decode('utf-8')
```
#### File: django-email-pal/emailpal/views.py
```python
from django.http import Http404, HttpResponse
from django.urls import reverse
from django.template.response import TemplateResponse
from .apps import get_sendable_emails
def example_view(request, name, is_html_email):
sendable_email = get_sendable_emails().get(name)
if sendable_email is None:
raise Http404("No such email")
email = sendable_email()
if is_html_email:
content_type = 'text/html'
content = email.render_body_as_html(email.example_ctx)
else:
content_type = 'text/plain'
content = email.render_body_as_plaintext(email.example_ctx)
return HttpResponse(content, content_type)
def example_index(request):
examples = []
for name, sendable_email in get_sendable_emails().items():
examples.append({
'name': name,
'description': sendable_email.__doc__,
'html_url': reverse('emailpal:example_view_html',
kwargs={'name': name}),
'txt_url': reverse('emailpal:example_view_txt',
kwargs={'name': name}),
})
return TemplateResponse(request, 'emailpal/index.html', {
'examples': examples,
})
```
#### File: example/tests/test_rendering_parity.py
```python
from django.test import TestCase
from emailpal.tests.util import all_template_engines
from ..emails import MySendableEmail
class RenderingParityTests(TestCase):
def test_email_renders_the_same_in_all_engines(self):
e = MySendableEmail()
engine_output = {}
for engine in all_template_engines():
out = {} # type: dict
engine_output[engine] = out
with engine.enable():
out['txt'] = e.render_body_as_plaintext(e.example_ctx)
out['html'] = e.render_body_as_html(e.example_ctx)
self.maxDiff = 5000
self.assertEqual(
engine_output['django']['txt'],
engine_output['jinja2']['txt'],
)
self.assertHTMLEqual(
engine_output['django']['html'],
engine_output['jinja2']['html'],
)
``` |
{
"source": "18F/django-pg-fts",
"score": 3
} |
#### File: django-pg-fts/pg_fts/utils.py
```python
class TranslationDictionary(object):
"""
TranslationDictionary
"""
def __init__(self, dictionaries=None, default=None):
self.dictionaries = dictionaries or {
'pt': ('portuguese', _('Portuguese')),
'en': ('english', _('English')),
'es': ('spanish', _('Spanish')),
'de': ('german', _('German')),
'da': ('danish', _('Danish')),
'nl': ('dutch', _('Dutch')),
'fi': ('finnish', _('Finnish')),
'fr': ('french', _('French')),
'hu': ('hungarian', _('Hungarian')),
'it': ('italian', _('Italian')),
'nn': ('norwegian', _('Norwegian')),
'ro': ('romanian', _('Romanian')),
'ru': ('russian', _('Russian')),
'sv': ('swedish', _('Swedish')),
'tr': ('turkish', _('Turkish')),
}
self.default = default or ('simple', _('Simple'))
def get_dictionary_tuple(self, language):
return self.dictionaries.get(language.split('-')[0], self.default)
def get_dictionary_pg(self, language):
return self.get_dictionary_tuple(language)[0]
def get_dictionaries(self, languages=None):
if languages:
return tuple(self.get_dictionary(l) for l in self.dictionaries)
return self.dictionaries.values()
```
#### File: django-pg-fts/testapp/models.py
```python
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from pg_fts.fields import TSVectorField
from django.db import models
@python_2_unicode_compatible
class TSQueryModel(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
sometext = models.CharField(max_length=50, null=True, blank=True)
tsvector = TSVectorField(('title', 'body'))
def __str__(self):
return self.title
@python_2_unicode_compatible
class TSMultidicModel(models.Model):
title = models.CharField(max_length=50)
body = models.TextField()
sometext = models.CharField(max_length=50, null=True, blank=True)
dictionary = models.CharField(
max_length=15,
choices=(('english', 'english'), ('portuguese', 'portuguese')),
default='english'
)
tsvector = TSVectorField((('title', 'A'), 'body'),
dictionary='dictionary')
def __str__(self):
return self.title
class Related(models.Model):
single = models.ForeignKey(TSQueryModel, blank=True, null=True)
multiple = models.ForeignKey(TSMultidicModel, blank=True, null=True)
```
#### File: testapp/tests/test_annotation.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from testapp.models import TSQueryModel, Related, TSMultidicModel
from pg_fts.ranks import (FTSRank, FTSRankDictionay, FTSRankCd,
FTSRankCdDictionary)
from django.core import exceptions
__all__ = ('AnnotateTestCase', 'FTSRankDictionayTestCase')
class AnnotateTestCase(TestCase):
def setUp(self):
a = TSQueryModel.objects.create(
title='para for os the mesmo same malucos crazy',
body="""para for os the mesmo same malucos crazy que that tomorow
salvão save o the planeta planet"""
)
b = TSQueryModel.objects.create(
title='malucos crazy como like eu me',
body="""para for os the mesmo same malucos crazy que that tomorow
salvão save o the planeta planet"""
)
Related.objects.create(single=a)
Related.objects.create(single=b)
def test_ts_rank_search(self):
q = TSQueryModel.objects.annotate(
rank=FTSRank(tsvector__search='para mesmo')
)
self.assertIn('''WHERE ("testapp_tsquerymodel"."tsvector" @@ to_tsquery('english', para & mesmo))''',
str(q.query))
self.assertIn('''ts_rank("testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo)) AS "rank"''',
str(q.query))
self.assertEqual(
q.order_by('-rank')[0].title, 'para for os the mesmo same malucos crazy')
self.assertEqual(
q.order_by('rank')[0].title, 'malucos crazy como like eu me')
def test_ts_rank_isearch(self):
q = TSQueryModel.objects.annotate(
rank=FTSRank(tsvector__isearch='para mesmo'))
self.assertIn('''WHERE ("testapp_tsquerymodel"."tsvector" @@ to_tsquery('english', para | mesmo))''',
str(q.query))
self.assertIn('''ts_rank("testapp_tsquerymodel"."tsvector", to_tsquery('english', para | mesmo)) AS "rank"''',
str(q.query))
self.assertEqual(
q.order_by('-rank')[0].title, 'para for os the mesmo same malucos crazy')
self.assertEqual(
q.order_by('rank')[0].title, 'malucos crazy como like eu me')
def test_ts_rank_tsquery(self):
q = TSQueryModel.objects.annotate(
rank=FTSRank(tsvector__tsquery='para & mesmo'))
self.assertIn('''WHERE ("testapp_tsquerymodel"."tsvector" @@ to_tsquery('english', para & mesmo))''',
str(q.query))
self.assertIn('''ts_rank("testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo)) AS "rank"''',
str(q.query))
self.assertEqual(
q.order_by('-rank')[0].title, 'para for os the mesmo same malucos crazy')
self.assertEqual(
q.order_by('rank')[0].title, 'malucos crazy como like eu me')
def test_ts_rank_search_related(self):
q = Related.objects.annotate(
rank=FTSRank(single__tsvector__search='para mesmo')
)
self.assertEqual(len(q), 2)
self.assertIn('''WHERE ("testapp_tsquerymodel"."tsvector" @@ to_tsquery('english', para & mesmo))''',
str(q.query))
self.assertIn('''ts_rank("testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo)) AS "rank"''',
str(q.query))
self.assertEqual(
q.order_by('-rank')[0].single.title, 'para for os the mesmo same malucos crazy')
self.assertEqual(
q.order_by('rank')[0].single.title, 'malucos crazy como like eu me')
def test_rank_dictionay_group_by_related(self):
qn = Related.objects.annotate(
rank=FTSRank(single__tsvector__search='para mesmo'))
self.assertIn('"testapp_tsquerymodel"."tsvector"',
str(qn.query).split('GROUP BY')[-1])
def test_normalization(self):
qs = TSQueryModel.objects.annotate(
rank=FTSRank(tsvector__tsquery='para & mesmo', normalization=[32, 8]))
self.assertIn('''ts_rank("testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo), 32|8) AS "rank"''',
str(qs.query))
self.assertEqual(len(qs), 2)
def test_weights(self):
qs = TSQueryModel.objects.annotate(
rank=FTSRank(
tsvector__tsquery='para & mesmo',
normalization=(32, 8),
weights=(0.1, 0.2, 0.4, 1.0)
)
)
self.assertIn('''ts_rank('{0.1, 0.2, 0.4, 1.0}', "testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo), 32|8) AS "rank"''',
str(qs.query))
self.assertEqual(len(qs), 2)
def test_rank_assertions(self):
with self.assertRaises(AssertionError):
qs = TSQueryModel.objects.annotate(
rank=FTSRank(
tsvector__tsquery='para & mesmo',
normalization=(2, 8),
weights=(0.1, 'asd', 0.4, 1.0)
)
)
list(qs)
with self.assertRaises(AssertionError):
qs = TSQueryModel.objects.annotate(
rank=FTSRank(
tsvector__tsquery='para & mesmo',
normalization=(3, 8),
weights=(0.1, 2, 0.4, 1.0)
)
)
list(qs)
# need to find a way to catch FieldError raised by
# django.db.models.sql.query in add fields
#
def test_transform_dictionary_exception(self):
with self.assertRaises(exceptions.FieldError) as msg:
TSQueryModel.objects.annotate(
rank=FTSRank(tsvector__nodict='malucos')),
self.assertEqual(
str(msg.exception),
"The 'nodict' isn't valid Lookup for FTSRank")
with self.assertRaises(exceptions.FieldError) as msg:
TSQueryModel.objects.annotate(
rank=FTSRank(tsvector='malucos')),
self.assertEqual(
str(msg.exception),
"The 'tsvector' isn't valid Lookup for FTSRank")
def test_ts_rank_cd_search(self):
q = TSQueryModel.objects.annotate(
rank=FTSRankCd(tsvector__search='para mesmo')
)
self.assertIn('''WHERE ("testapp_tsquerymodel"."tsvector" @@ to_tsquery('english', para & mesmo))''',
str(q.query))
self.assertIn('''ts_rank_cd("testapp_tsquerymodel"."tsvector", to_tsquery('english', para & mesmo)) AS "rank"''',
str(q.query))
self.assertEqual(
q.order_by('-rank')[0].title, 'para for os the mesmo same malucos crazy')
self.assertEqual(
q.order_by('rank')[0].title, 'malucos crazy como like eu me')
class FTSRankDictionayTestCase(TestCase):
'''tests for FTSRankDictionayTestCase'''
def setUp(self):
title = 'para for os the mesmo same malucos crazy'
body = """para for os the mesmo same malucos crazy que that tomorow
salvão save o the planeta planet"""
pt = TSMultidicModel.objects.create(
title=title,
body=body,
dictionary='english'
)
en = TSMultidicModel.objects.create(
title=title,
body=body,
dictionary='portuguese'
)
Related.objects.create(
multiple=pt
)
Related.objects.create(
multiple=en
)
def test_rank_dictionay_transform_search(self):
# `para``os` are stopwords in portuguese
qn_base_pt = TSMultidicModel.objects.filter(dictionary='portuguese')
qn_base_en = TSMultidicModel.objects.filter(dictionary='english')
pt = qn_base_pt.annotate(
rank=FTSRankDictionay(tsvector__portuguese__tsquery='para & os'))
self.assertIn(
'''("testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('portuguese', para & os))''',
str(pt.query))
self.assertIn(
'''ts_rank("testapp_tsmultidicmodel"."tsvector", to_tsquery('portuguese', para & os)) AS "rank"''',
str(pt.query))
en = qn_base_pt.annotate(
rank=FTSRankDictionay(tsvector__english__tsquery='para & os'))
self.assertIn(
'''("testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('english', para & os))''',
str(en.query))
self.assertIn(
'''ts_rank("testapp_tsmultidicmodel"."tsvector", to_tsquery('english', para & os)) AS "rank"''',
str(en.query))
qn_base_pt.annotate(
rank=FTSRankDictionay(tsvector__portuguese__tsquery='para & os')
)
def test_rank_dictionay_related_multidict(self):
qn_base_pt = Related.objects.filter(multiple__dictionary='portuguese')
qn_base_en = Related.objects.filter(multiple__dictionary='english')
qn_pt = qn_base_pt.annotate(rank=FTSRankDictionay(
multiple__tsvector__portuguese__tsquery='para & os'))
qn_en = qn_base_en.annotate(rank=FTSRankDictionay(
multiple__tsvector__english__tsquery='para & os'))
self.assertIn('''"testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('english', para & os))''',
str(qn_en.query))
self.assertIn('''ts_rank("testapp_tsmultidicmodel"."tsvector", to_tsquery('english', para & os)) AS "rank"''',
str(qn_en.query))
self.assertIn('''"testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('portuguese', para & os))''',
str(qn_pt.query))
self.assertIn('''ts_rank("testapp_tsmultidicmodel"."tsvector", to_tsquery('portuguese', para & os)) AS "rank"''',
str(qn_pt.query))
self.assertEqual(len(qn_en), 1)
self.assertEqual(len(qn_pt), 0)
def test_rank_dictionay_group_by_related(self):
qn_base_pt = Related.objects.filter(multiple__dictionary='portuguese')
qn_pt = qn_base_pt.annotate(rank=FTSRankDictionay(
multiple__tsvector__portuguese__tsquery='para & os'))
self.assertIn('"testapp_tsmultidicmodel"."tsvector"',
str(qn_pt.query).split('GROUP BY')[-1])
def test_rank_cd_dictionary(self):
qn_base_pt = Related.objects.filter(multiple__dictionary='portuguese')
qn_base_en = Related.objects.filter(multiple__dictionary='english')
qn_pt = qn_base_pt.annotate(rank=FTSRankCdDictionary(
multiple__tsvector__portuguese__tsquery='para & os'))
qn_en = qn_base_en.annotate(rank=FTSRankCdDictionary(
multiple__tsvector__english__tsquery='para & os'))
self.assertIn('''"testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('english', para & os))''',
str(qn_en.query))
self.assertIn('''ts_rank_cd("testapp_tsmultidicmodel"."tsvector", to_tsquery('english', para & os)) AS "rank"''',
str(qn_en.query))
self.assertIn('''"testapp_tsmultidicmodel"."tsvector" @@ to_tsquery('portuguese', para & os))''',
str(qn_pt.query))
self.assertIn('''ts_rank_cd("testapp_tsmultidicmodel"."tsvector", to_tsquery('portuguese', para & os)) AS "rank"''',
str(qn_pt.query))
def test_transform_dictionary_exception(self):
with self.assertRaises(exceptions.FieldError) as msg:
TSMultidicModel.objects.annotate(
rank=FTSRankDictionay(tsvector__nodict='malucos')),
self.assertEqual(
str(msg.exception),
"The 'nodict' isn't valid Lookup for FTSRankDictionay")
def test_transform_exception(self):
with self.assertRaises(exceptions.FieldError) as msg:
list(TSMultidicModel.objects.annotate(
rank=FTSRankDictionay(tsvector__portuguese='malucos')))
self.assertEqual(
str(msg.exception),
"The 'portuguese' isn't valid Lookup for FTSRankDictionay")
``` |
{
"source": "18F/django-pgjson",
"score": 2
} |
#### File: django-pgjson/django_pgjson/lookups.py
```python
from django.utils.functional import cached_property
from django.utils import six
from django.db.models import Transform, Lookup, CharField
class KeyTransform(Transform):
output_field = CharField()
def __init__(self, key, base_field, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
try:
self.key = int(key)
except ValueError:
self.key = key
self.base_field = base_field
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
if isinstance(self.key, int):
return "(%s->>%s)" % (lhs, self.key), params
return "(%s->>'%s')" % (lhs, self.key), params
@cached_property
def output_type(self):
return self.base_field
class KeyTransformFactory(object):
def __init__(self, key, base_field):
self.key = key
self.base_field = base_field
def __call__(self, *args, **kwargs):
return KeyTransform(self.key, self.base_field, *args, **kwargs)
class ExactLookup(Lookup):
lookup_name = 'exact'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
if len(rhs_params) == 1 and hasattr(rhs_params[0], "adapted"):
adapted = rhs_params[0].adapted
if isinstance(adapted, six.string_types):
rhs_params[0] = adapted
params = lhs_params + rhs_params
return '%s = %s' % (lhs, rhs), params
class ArrayLengthLookup(Lookup):
lookup_name = 'array_length'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return 'json_array_length(%s) = %s' % (lhs, rhs), params
class JsonBArrayLengthLookup(Lookup):
lookup_name = 'array_length'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return 'jsonb_array_length(%s) = %s' % (lhs, rhs), params
class JsonBContainsLookup(Lookup):
"""
jsonb-specific containment lookup that can be used as follows::
YourModel.objects.filter(data__jcontains={"author": "<NAME>"}
This will be translated into the following SQL::
select * from yourmodel where data @> '{"author": "<NAME>"}'::jsonb
You can also do interesting queries like::
MyMovie.objects.filter(data__jcontains={"tags": ["sad", "romantic"]}
Such queries can be accelerated by GiN indices on the jsonb field in
question.
:author: <NAME> <<EMAIL>>
"""
# ideally we would call this 'contains'. However, in Django 'contains'
# lookups are explicitly handled by LIKE queries, and the
# Field.get_db_prep_lookup will then prepare your data for a DB LIKE query
# breaking our jsonb containment query. -- cpb
lookup_name = 'jcontains'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} @> {1}::jsonb".format(lhs, rhs), params
class JsonBHasLookup(Lookup):
""" JsonB specific lookup for the has (?) operator """
lookup_name = 'jhas'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ? {1}".format(lhs, rhs), params
class JsonBHasAnyLookup(Lookup):
""" JsonB specific lookup for the has any (?|) operator """
lookup_name = 'jhas_any'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ?| {1}".format(lhs, rhs), params
class JsonBHasAllLookup(Lookup):
""" JsonB specific lookup for the has all (?&) operator """
lookup_name = 'jhas_all'
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "{0} ?& {1}".format(lhs, rhs), params
``` |
{
"source": "18F/django-uswds-forms",
"score": 2
} |
#### File: app/examples/_startexample_template.py
```python
from django.shortcuts import render
from django import forms
import uswds_forms
class MyForm(uswds_forms.UswdsForm):
president = forms.ChoiceField(
label="Example form field",
widget=uswds_forms.UswdsRadioSelect,
choices=(
('a', 'Choice A'),
('b', 'Choice B'),
)
)
def view(request):
return render(request, 'examples/_startexample_template.html', {
'form': MyForm() if request.method == 'GET' else MyForm(request.POST)
})
```
#### File: management/commands/startexample.py
```python
import re
from pathlib import Path
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from app.example import Example, APP_DIR
SLUG_RE = re.compile('^[A-Za-z0-9_]+$')
SLUG_HELP = "Alphanumerics and underscores only"
EXAMPLE_NAMES_PATH = "app.views.EXAMPLE_NAMES"
EXAMPLE_TESTS_PATH = APP_DIR / 'tests' / 'test_examples.py'
TEMPLATE_NAME = '_startexample_template'
def untemplatize(f: Path, example: Example):
return f.read_text().replace(TEMPLATE_NAME, example.basename)
class Command(BaseCommand):
help = 'Creates a new example for the gallery.'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument(
'example_slug',
help="Slug for example. {}.".format(SLUG_HELP)
)
parser.add_argument(
'--undo',
action='store_true',
help='Undo an earlier invocation of this command.'
)
def undo_copy(self, src: Path, dest: Path, example: Example) -> None:
relpath = dest.relative_to(Path.cwd())
if not dest.exists():
self.stdout.write("Hmm, {} does not exist.".format(relpath))
elif dest.read_text() != untemplatize(src, example):
self.stdout.write("{} has changed, not deleting.".format(relpath))
else:
dest.unlink()
self.stdout.write("Deleted {}.".format(relpath))
def copy(self, src: Path, dest: Path, example: Example) -> None:
relpath = dest.relative_to(Path.cwd())
if dest.exists():
self.stdout.write("{} exists, not overwriting.".format(relpath))
else:
dest.write_text(untemplatize(src, example))
self.stdout.write("Created {}.".format(relpath))
def handle(self, example_slug: str, undo: bool, **kwargs):
if not SLUG_RE.match(example_slug):
raise CommandError('Invalid slug! {}.'.format(SLUG_HELP))
template = Example(TEMPLATE_NAME)
ex = Example(example_slug)
if undo:
self.undo_copy(template.template_path, ex.template_path, ex)
self.undo_copy(template.jinja2_path, ex.jinja2_path, ex)
self.undo_copy(template.python_path, ex.python_path, ex)
else:
self.copy(template.template_path, ex.template_path, ex)
self.copy(template.jinja2_path, ex.jinja2_path, ex)
self.copy(template.python_path, ex.python_path, ex)
self.stdout.write("\nDone! Now edit the above files.")
self.stdout.write("Then, add '{}' to {}.".format(
example_slug,
EXAMPLE_NAMES_PATH,
))
self.stdout.write(
"You may also want to write tests for "
"the example in {}.".format(
EXAMPLE_TESTS_PATH.relative_to(Path.cwd())
)
)
```
#### File: example/app/render_source.py
```python
import ast
from pathlib import Path
from django.utils.safestring import SafeString
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.lexers.templates import HtmlDjangoLexer
from pygments.formatters import HtmlFormatter
except ImportError:
highlight = None
def render_source(contents: str, filetype: str):
if highlight is None:
return contents
else:
formatter = HtmlFormatter(noclasses=True, style='trac')
if filetype == 'html+django':
lexer = HtmlDjangoLexer()
elif filetype == 'python':
lexer = PythonLexer()
else:
raise ValueError('unknown filetype: {}'.format(filetype))
return SafeString(highlight(contents, lexer, formatter))
def clean_python_source(source: str):
'''
Remove the leading docstring from the given source code.
'''
mod = ast.parse(source)
first_non_docstring = mod.body[1]
return '\n'.join(source.splitlines()[first_non_docstring.lineno - 1:])
def clean_template_source(source: str):
'''
Remove any un-indented {% include %} tags in the given template source.
'''
return '\n'.join(
line for line in source.splitlines()
if not line.startswith(r'{% include')
)
def render_template_source(f: Path):
return render_source(clean_template_source(f.read_text()), 'html+django')
def render_python_source(f: Path):
return render_source(clean_python_source(f.read_text()), 'python')
```
#### File: app/tests/test_render_source.py
```python
from django.test import TestCase
from ..render_source import clean_python_source, clean_template_source
class RenderSourceTests(TestCase):
def test_clean_python_source_works(self):
self.assertEqual(
clean_python_source("'''boop\n'''\nhello()\nthere()\n"),
"hello()\nthere()"
)
def test_clean_template_source_works(self):
self.assertEqual(
clean_template_source(r"{% include 'foo' %}" + "\nblah\n"),
'blah'
)
```
#### File: uswds_forms/tests/test_date.py
```python
from datetime import date
from django.test import SimpleTestCase
from django.core.exceptions import ValidationError
from uswds_forms.date import UswdsDateWidget, UswdsDateField
class DateWidgetTests(SimpleTestCase):
def test_get_field_names_works(self):
names = UswdsDateWidget.get_field_names('boop')
self.assertEqual(names.year, 'boop_0')
self.assertEqual(names.month, 'boop_1')
self.assertEqual(names.day, 'boop_2')
def test_render_assigns_ids_and_labels(self):
widget = UswdsDateWidget()
content = widget.render('boop', None, {'id': 'blarg'})
self.assertRegexpMatches(content, 'id="blarg_0"')
self.assertRegexpMatches(content, 'id="blarg_1"')
self.assertRegexpMatches(content, 'id="blarg_2"')
self.assertRegexpMatches(content, 'for="blarg_0"')
self.assertRegexpMatches(content, 'for="blarg_1"')
self.assertRegexpMatches(content, 'for="blarg_2"')
def test_render_assigns_names(self):
widget = UswdsDateWidget()
content = widget.render('boop', None, {'id': 'blarg'})
self.assertRegexpMatches(content, 'name="boop_0"')
self.assertRegexpMatches(content, 'name="boop_1"')
self.assertRegexpMatches(content, 'name="boop_2"')
def test_render_assigns_hint_id_and_aria_describedby(self):
widget = UswdsDateWidget()
content = widget.render('boop', None, {'id': 'foo'})
self.assertRegexpMatches(content, 'id="foo_hint"')
self.assertRegexpMatches(content, 'aria-describedby="foo_hint"')
def test_render_takes_value_as_list(self):
widget = UswdsDateWidget()
content = widget.render('boop', [2006, 7, 29], {'id': 'foo'})
self.assertRegexpMatches(content, 'value="2006"')
self.assertRegexpMatches(content, 'value="7"')
self.assertRegexpMatches(content, 'value="29"')
def test_render_takes_value_as_date(self):
widget = UswdsDateWidget()
content = widget.render('boop', date(2005, 6, 28), {'id': 'foo'})
self.assertRegexpMatches(content, 'value="2005"')
self.assertRegexpMatches(content, 'value="6"')
self.assertRegexpMatches(content, 'value="28"')
def test_render_does_not_raise_exception_on_empty_lists(self):
widget = UswdsDateWidget()
content = widget.render('boop', [], {'id': 'foo'})
# The <input>s should not have any 'value' attribute whatsoever.
self.assertNotRegexpMatches(content, 'value')
def test_decompress_works_with_dates(self):
widget = UswdsDateWidget()
self.assertEqual(widget.decompress(date(2005, 6, 28)), [2005, 6, 28])
def test_decompress_works_with_none(self):
widget = UswdsDateWidget()
self.assertEqual(widget.decompress(None), [None, None, None])
class DateFieldTests(SimpleTestCase):
def test_compress_returns_date_for_valid_dates(self):
field = UswdsDateField()
self.assertEqual(field.compress([2005, 6, 28]), date(2005, 6, 28))
def test_compress_raises_validation_errors_for_invalid_dates(self):
field = UswdsDateField()
with self.assertRaisesRegexp(
ValidationError,
'Invalid date: day is out of range for month.'
):
field.compress([2001, 2, 31])
def test_compress_returns_none_when_data_list_is_falsy(self):
field = UswdsDateField()
self.assertEqual(field.compress(None), None)
self.assertEqual(field.compress([]), None)
```
#### File: uswds_forms/tests/test_form.py
```python
from django import forms
from uswds_forms.form import UswdsForm
def test_as_fieldsets_works():
class MyForm(UswdsForm):
my_field = forms.CharField(label="my field", help_text="my help")
form = MyForm()
html = form.as_fieldsets()
assert '<input' in html
assert 'my field' in html
assert 'my help' in html
``` |
{
"source": "18F/djorm-ext-pgfulltext",
"score": 2
} |
#### File: djorm-ext-pgfulltext/djorm_pgfulltext/utils.py
```python
import psycopg2
from django.db import connection
from django.utils.text import force_text
def adapt(text):
connection.ensure_connection()
a = psycopg2.extensions.adapt(force_text(text))
c = connection.connection
# This is a workaround for https://github.com/18F/calc/issues/1498.
if hasattr(c, '__wrapped__'):
c = getattr(c, '__wrapped__')
a.prepare(c)
return a
``` |
{
"source": "18F/dodsbir-scrape",
"score": 3
} |
#### File: dodsbir-scrape/lib/scrape.py
```python
from datetime import datetime
import json, re, sys, time
from bs4 import BeautifulSoup
import requests
from dodsbir.topic import Topic, TopicEncoder
URL_RESULTS_FORM = "http://dodsbir.net/Topics/BasicTopicsResultsForm.asp"
URL_TOPIC_LIST = "http://dodsbir.net/Topics/Default.asp"
URL_TOPIC_BASE = "http://www.dodsbir.net/sitis/display_topic.asp?Bookmark"
URL_TOPIC_QUICK = "http://www.dodsbir.net/sitis/quick_scan.asp"
URL_SOLICITATION_SCHED = "http://www.acq.osd.mil/osbp/sbir/sb/schedule.shtml"
class Scraper:
"""base class for DOD SBIR Scraping"""
def __init__(self, topic_list_url=URL_TOPIC_LIST):
self.solicitation = {}
self.solicitation_schedule = [] # this will contain nested dicts
self.topic_list_url = topic_list_url
self.topic_ids = {}
self.topics = []
def get_solicitation(self, soup):
"""extract solicitation information from page at URL_TOPIC_LIST"""
sol_header = soup.find(text=re.compile("Current Solicitation"))
s = {}
# maybe change solicitation_id to pull from
# http://www.dodsbir.net/sitis/quick_scan.asp -- seems to be more accurate
sol_id_raw = sol_header.parent.parent.next_sibling\
.next_sibling.contents[1].string
try:
s["solicitation_id"] = re.search(r'\d{4}\.[1-2A-B]', sol_id_raw).group()
except Exception as e:
s["solicitation_id"] = sol_id_raw
s["pre_release_date"] = self._parse_date(sol_header.parent.parent.next_sibling.next_sibling.contents[3].string)
s["proposals_begin_date"] = self._parse_date(sol_header.parent.parent.next_sibling.next_sibling.contents[5].string)
s["proposals_end_date"] = self._parse_date(sol_header.parent.parent.next_sibling.next_sibling.contents[7].contents[0].string)
agencies = sol_header.parent.parent.next_sibling.next_sibling.contents[9].string.split(',')
s["participating_components"] = [ x.strip() for x in agencies ]
return s
def stage_current_solicitation(self):
"""gets information for current solicitation and grabs list of topics
for the current solicitation"""
resp = requests.get(self.topic_list_url)
resp.connection.close() # fixes warning in Python 3.4 about unclosed socket
soup = BeautifulSoup(resp.text)
self.solicitation = self.get_solicitation(soup)
self.topic_ids = self.get_topic_list(soup)
return True
def get_topic_list(self, soup):
"""go to topic_list_url and extract list of topics"""
self.topic_ids = {}
options = soup.find_all('select')[0].find_all('option')
for option in options:
if option.string.strip() != '':
self.topic_ids[option.string.strip()] = option['value']
return self.topic_ids
def html_to_topic(self, html, topic_id):
"""extract topic information from html"""
soup = BeautifulSoup(html)
meta_rows = soup.findAll('table')[1].contents
rows = soup.findAll('table')[2].contents
topic = Topic()
topic.program = meta_rows[1].findAll('td')[1].contents[0].string
topic.topic_number = meta_rows[2].findAll('td')[1].contents[0].string
topic.title = meta_rows[3].findAll('td')[1].contents[0].string
topic.areas = [ x.strip() for x in meta_rows[4].findAll('td')[1]\
.contents[0].string.split(',') ]
topic.url = "%s=%s" % (URL_TOPIC_BASE, topic_id)
acq_header = soup.find(text=re.compile("Acquisition Program:"))
if acq_header is not None:
acq_field = acq_header.parent.parent.parent.next_sibling\
.contents[0].string
if acq_field is not None:
topic.acquisition_program = acq_field.strip()
obj_header = soup.find(text=re.compile("Objective:"))
topic.objective = obj_header.parent.parent.parent.next_sibling\
.contents[0].string.strip()
desc_header = soup.find(text=re.compile("Description:"))
topic.description = desc_header.parent.parent.parent.next_sibling\
.contents[0].string.strip()
topic.phases = [p.strip() for p in soup\
.find_all(text=re.compile("^PHASE"))]
ref_header = soup.find(text=re.compile("References:"))
topic.references = [reference.strip() \
for reference in ref_header.parent.parent.parent.next_sibling\
.contents[0].find_all(text=re.compile("\n\d."))]
kw_header = soup.find(text=re.compile("^Keywords:"))
try:
topic.keywords = [keyword.strip() for keyword in kw_header.parent\
.parent.parent.next_sibling.contents[0].string.strip()\
.rstrip('.').split(',')]
except:
topic.keywords = []
return topic
def get_topic(self, topic_number):
"""given a topic number, fetch topic page from dodsbir.net"""
if not self.solicitation:
self.stage_current_solicitation()
topic_id = self.topic_ids[topic_number]
data = {"selTopic":topic_id, "WhereFrom":"basicTopicNo"}
resp = requests.post(URL_RESULTS_FORM, data=data)
resp.connection.close() # fixes py3 warning
topic = self.html_to_topic(resp.text, topic_id)
topic.solicitation_id = "DoD {} {}".format(topic.program, self.solicitation['solicitation_id'])
topic.pre_release_date = self.solicitation['pre_release_date']
topic.proposals_begin_date = self.solicitation['proposals_begin_date']
topic.proposals_end_date = self.solicitation['proposals_end_date']
topic.participating_components = self.solicitation['participating_components']
return topic
def get_all_topics(self, max=None):
"""loop through each topic id in topic_ids and scrape topic from
dodsbir.net"""
i = 0
tot = len(self.topic_ids)
for key, value in self.topic_ids.items():
i = i + 1
self.topic_list = []
topic = self.get_topic(key)
self.topics.append(topic)
sys.stdout.write("Completed %d of %d \r" % (i, tot))
sys.stdout.flush()
if i == max:
break
time.sleep(1)
return self.topics
def __json__(self):
"""json representation of all topics scraped"""
j = "[%s]" % (",".join([json.dumps(t.__dict__, cls=TopicEncoder) for t in self.topics]))
return j
def save_as_json(self, path="alltopics.json"):
"""save json representation of all topics to filesystem"""
with open(path, "w") as outfile:
outfile.write(self.__json__())
return self.__json__()
def _parse_date(self, string):
return datetime.strptime(string, "%B %d, %Y")
```
#### File: dodsbir-scrape/lib/tests.py
```python
import unittest
from urllib import request
from scrape import Scraper, URL_TOPIC_LIST
class ServerTests(unittest.TestCase):
"""tests to ensure pages at dodsbir.net can be reached and lists and topics
can be retrieved"""
def setUp(self):
self.s = Scraper()
def testServerResponse(self):
self.assertTrue(request.urlopen(URL_TOPIC_LIST))
def testStageSolicitation(self):
self.assertFalse(self.s.topic_ids)
self.s.stage_current_solicitation()
self.assertTrue(self.s.topic_ids)
class TopicTests(unittest.TestCase):
"""tests to ensure Topics are successfully scraped and stored in the proper
format"""
def setUp(self):
self.s = Scraper()
def testTopicRetrieved(self):
self.assertFalse(hasattr(self, "topic"))
topic = self.s.get_topic("SB151-004")
self.assertTrue(topic)
def testTopicSolicitationID(self):
topic = self.s.get_topic("SB151-004")
self.assertEqual(topic.solicitation_id, "DoD SBIR 2015.1")
def testDatesConvertedToPythonObject(self):
topic = self.s.get_topic("SB151-004")
self.assertTrue(topic.pre_release_date.month)
self.assertTrue(topic.proposals_begin_date.month)
self.assertTrue(topic.proposals_end_date.month)
def main():
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "18F/dsnap_registration_service",
"score": 2
} |
#### File: dsnap_registration_service/dsnap_registration/serializers.py
```python
from django.utils import timezone
from jsonschema import Draft7Validator
from rest_framework import serializers
from .models import Registration
REGISTRATION_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"nonzero_money": {
"anyOf": [
{"type": "number", "minimum": 0},
{"type": "null"}
]
},
"address": {
"type": "object",
"properties": {
"street1": {"type": "string"},
"street2": {"type": "string"},
"city": {"type": "string"},
"state": {"type": "string"},
"zipcode": {"type": "string"}
}
},
},
"type": "object",
"properties": {
"disaster_id": {"type": "number", "minimum": 0},
"preferred_language": {"enum": ["en", "es"]},
"money_on_hand": {"$ref": "#/definitions/nonzero_money"},
"phone": {
"anyOf": [
{"type": "string", "pattern": r"^\d{10}$"},
{"type": "null"},
]
},
"email": {"type": "string"},
"residential_address": {"$ref": "#/definitions/address"},
"mailing_address": {"$ref": "#/definitions/address"},
"county": {"type": "string"},
"state_id": {"type": "string"},
"has_inaccessible_liquid_resources": {"type": "boolean"},
"has_lost_or_inaccessible_income": {"type": "boolean"},
"purchased_or_plans_to_purchase_food": {"type": "boolean"},
"disaster_expenses": {
"type": "object",
"properties": {
"food_loss": {
"$ref": "#/definitions/nonzero_money"},
"home_or_business_repairs": {
"$ref": "#/definitions/nonzero_money"},
"temporary_shelter_expenses": {
"$ref": "#/definitions/nonzero_money"},
"evacuation_expenses": {
"$ref": "#/definitions/nonzero_money"},
"other": {
"$ref": "#/definitions/nonzero_money"},
},
"additionalProperties": False
},
"household": {
"type": "array",
"items": {
"type": "object",
"properties": {
"first_name": {"type": "string"},
"middle_name": {"type": "string"},
"last_name": {"type": "string"},
"dob": {"type": "string"},
"sex": {"enum": ["male", "female", ""]},
"ssn": {
"anyOf": [
{"type": "string", "pattern": r"^\d{9}$"},
{"type": "null"},
]
},
"race": {
"enum": [
"American Indian or Alaskan Native",
"Asian",
"Black or African American",
"Native Hawaiian or Other Pacific Islander",
"White",
""
]
},
"ethnicity": {
"enum": [
"Hispanic or Latino",
"Not Hispanic or Latino",
""
]
},
"has_food_assistance": {"type": "boolean"},
"income": {
"type": "object",
"properties": {
"self_employed": {
"$ref": "#/definitions/nonzero_money"},
"unemployment":
{"$ref": "#/definitions/nonzero_money"},
"cash_assistance": {
"$ref": "#/definitions/nonzero_money"},
"disability": {
"$ref": "#/definitions/nonzero_money"},
"social_security": {
"$ref": "#/definitions/nonzero_money"},
"veterans_benefits": {
"$ref": "#/definitions/nonzero_money"},
"alimony": {
"$ref": "#/definitions/nonzero_money"},
"child_support": {
"$ref": "#/definitions/nonzero_money"},
"other_sources": {
"$ref": "#/definitions/nonzero_money"}
},
"additionalProperties": False
},
"jobs": {
"type": "array",
"items": {
"type": "object",
"properties": {
"employer_name": {"type": "string"},
"pay": {"$ref": "#/definitions/nonzero_money"},
"is_dsnap_agency": {"type": "boolean"}
},
"additionalProperties": False
}
}
},
"additionalProperties": False
}
},
"ebt_card_number": {
"anyOf": [
{"type": "string", "pattern": r"^\d*$"},
{"type": "null"},
]
},
},
"required": [
"disaster_id",
],
"additionalProperties": False
}
REGISTRATION_STATUS_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"rules_service_approved": {"type": "boolean"},
"user_approved": {"type": "boolean"},
},
"required": [
"rules_service_approved",
"user_approved",
],
"additionalProperties": False
}
class RegistrationSerializer(serializers.ModelSerializer):
approved_by = serializers.ReadOnlyField(source='approved_by.username')
class Meta:
model = Registration
fields = '__all__'
def create(self, validated_data):
"""
Set original_data (which is set to be not editable) to the latest_data
on creation
"""
# Force null on original creation
validated_data['latest_data']['ebt_card_number'] = None
return Registration.objects.create(
original_data=validated_data['latest_data'], **validated_data)
def to_internal_value(self, data):
"""
Eliminate the need to have POST and other submissions to have the
actual data under a "latest_data" key by ensuring it on deserialization
"""
new_data = {
"latest_data": data
}
return super().to_internal_value(new_data)
def validate(self, data):
errors = [e.message for e in
Draft7Validator(REGISTRATION_SCHEMA).iter_errors(data['latest_data'])]
if errors:
raise serializers.ValidationError(f"Validation failed: {errors}")
return data
class RegistrationStatusSerializer(serializers.ModelSerializer):
class Meta:
model = Registration
fields = '__all__'
def update(self, instance, validated_data):
instance.rules_service_approved = validated_data['rules_service_approved']
instance.user_approved = validated_data['user_approved']
instance.approved_by = validated_data['approved_by']
instance.approved_at = timezone.now()
instance.save()
return instance
def validate(self, data):
errors = [e.message for e in
Draft7Validator(REGISTRATION_STATUS_SCHEMA).iter_errors(data)]
if errors:
raise serializers.ValidationError(f"Validation failed: {errors}")
return data
```
#### File: dsnap_registration_service/tests/test_api.py
```python
import base64
import copy
import pytest
from django.contrib.auth import get_user_model
from rest_framework import status
TEST_USERNAME = "admin"
TEST_PASSWORD = "<PASSWORD>"
TEST_AUTHORIZATION = "Basic {}".format(str(base64.b64encode(
f"{TEST_USERNAME}:{TEST_PASSWORD}".encode()), "utf-8"))
GOOD_PAYLOAD = {
"disaster_id": 34,
"preferred_language": "en",
"phone": "2165555555",
"email": "<EMAIL>",
"state_id": "ABC9876",
"money_on_hand": 100,
"residential_address": {
"street1": "250 Oakland Way",
"street2": "",
"city": "Oakland",
"state": "CA",
"zipcode": "94612"
},
"mailing_address": {
"street1": "365 Campus Rd",
"street2": "",
"city": "Cleveland",
"state": "OH",
"zipcode": "44121"
},
"county": "Alameda",
"household": [
{
"first_name": "John",
"last_name": "Doe",
"ssn": "123456789",
"sex": "male",
"race": "",
"ethnicity": "Hispanic or Latino"
},
{
"first_name": "Jane",
"last_name": "Doe",
"ssn": "223456789",
"sex": "female",
"race": "White",
"ethnicity": "Not Hispanic or Latino"
},
],
"ebt_card_number": None
}
def test_missing_required_field(client):
payload = copy.deepcopy(GOOD_PAYLOAD)
del payload["disaster_id"]
response = client.post('/registrations', data=payload,
content_type="application/json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"Invalid request":
["""Validation failed: ["'disaster_id' is a required property"]"""]
}
def test_extra_field(client):
payload = copy.deepcopy(GOOD_PAYLOAD)
payload["EXTRA_FIELD"] = "Some extra value"
response = client.post('/registrations', data=payload,
content_type="application/json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"Invalid request":
['Validation failed: '
'["Additional properties are not allowed '
'(\'EXTRA_FIELD\' was unexpected)"]']
}
@pytest.mark.django_db
def test_authentication(client):
payload = copy.deepcopy(GOOD_PAYLOAD)
response = client.post('/registrations', data=payload,
content_type="application/json")
assert response.status_code == status.HTTP_201_CREATED
result = response.json()
registration_id = result["id"]
response = client.get(f'/registrations/{registration_id}',
content_type="application/json")
assert response.status_code in (
status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
payload["preferred_language"] = "es"
response = client.put(f'/registrations/{registration_id}', data=payload,
content_type="application/json")
assert response.status_code in (
status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
response = client.delete(f'/registrations/{registration_id}',
content_type="application/json")
assert response.status_code in (
status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
response = client.put(f'/registrations/{registration_id}/status',
content_type="application/json")
assert response.status_code in (
status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)
@pytest.mark.django_db
def test_lifecycle(authenticated_client):
payload = copy.deepcopy(GOOD_PAYLOAD)
response = authenticated_client.post('/registrations', data=payload,
content_type="application/json")
assert response.status_code == status.HTTP_201_CREATED
result = response.json()
assert "id" in result
assert "created_at" in result
assert "modified_at" in result
registration_id = result["id"]
assert result["original_data"] == result["latest_data"]
response = authenticated_client.get(f'/registrations/{registration_id}',
content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert result["original_data"] == result["latest_data"]
payload["preferred_language"] = "es"
response = authenticated_client.put(f'/registrations/{registration_id}',
data=payload,
content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
result = response.json()
assert result["original_data"]["preferred_language"] == "en"
assert result["latest_data"]["preferred_language"] == "es"
response = authenticated_client.delete(
f'/registrations/{registration_id}', content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION
)
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authenticated_client.get(f'/registrations/{registration_id}',
content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db
def test_status(authenticated_client):
payload = copy.deepcopy(GOOD_PAYLOAD)
status_payload = {
"rules_service_approved": True,
"user_approved": True
}
response = authenticated_client.post('/registrations', data=payload,
content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_201_CREATED
result = response.json()
registration_id = result["id"]
response = authenticated_client.put(
f'/registrations/{registration_id}/status',
content_type="application/json", data=status_payload,
HTTP_AUTHORIZATION=TEST_AUTHORIZATION
)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert result["rules_service_approved"] == \
status_payload["rules_service_approved"]
assert result["user_approved"] == status_payload["user_approved"]
# Strangely, the result of the PUT has the `approved_by` set to the
# userid and not the username. A GET is needed for the username
response = authenticated_client.get(f'/registrations/{registration_id}',
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
result = response.json()
assert result["approved_by"] == "admin"
@pytest.mark.django_db
def test_ebt_accepted_on_put_but_null_on_post(authenticated_client):
payload = copy.deepcopy(GOOD_PAYLOAD)
payload['ebt_card_number'] = '123456789'
response = authenticated_client.post(
'/registrations', data=payload, content_type="application/json",
HTTP_AUTHORIZATION=TEST_AUTHORIZATION
)
result = response.json()
assert result['original_data'] == result['latest_data']
assert result['original_data']['ebt_card_number'] is None
registration_id = result["id"]
response = authenticated_client.put(
f'/registrations/{registration_id}', data=payload,
content_type="application/json", HTTP_AUTHORIZATION=TEST_AUTHORIZATION
)
result = response.json()
assert result['original_data']['ebt_card_number'] is None
assert result['latest_data']['ebt_card_number'] ==\
payload['ebt_card_number']
@pytest.mark.django_db
def test_search_by_registrant_ssn(authenticated_client, payload1):
search_url = '/registrations?registrant_ssn={}'.format(
payload1['household'][0]['ssn'])
response = authenticated_client.get(search_url,
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert len(result) == 1
assert result[0]["original_data"] == payload1
@pytest.mark.django_db
def test_search_by_state_id(authenticated_client, payload1):
search_url = '/registrations?state_id={}'.format(payload1['state_id'])
response = authenticated_client.get(search_url,
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert len(result) == 1
assert result[0]["original_data"] == payload1
@pytest.mark.django_db
def test_search_by_non_registrant_ssn(authenticated_client, payload1):
search_url = '/registrations?registrant_ssn={}'.format(
payload1['household'][1]['ssn'])
response = authenticated_client.get(search_url,
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert len(result) == 0
@pytest.mark.django_db
def test_search_by_registrant_last_name(
authenticated_client, payload1, payload2):
search_url = '/registrations?registrant_last_name={}'.format("doe")
response = authenticated_client.get(search_url,
HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert len(result) == 2
@pytest.mark.django_db
def test_search_by_registrant_last_name_and_registrant_ssn(
authenticated_client, payload1, payload2):
search_url = '/registrations?registrant_last_name={}&state_id={}'.format(
"Doe", payload1["state_id"])
response = authenticated_client.get(
search_url, HTTP_AUTHORIZATION=TEST_AUTHORIZATION)
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert len(result) == 1
@pytest.fixture
def payload1(client):
payload1 = copy.deepcopy(GOOD_PAYLOAD)
client.post('/registrations', data=payload1,
content_type="application/json")
return payload1
@pytest.fixture
def payload2(client):
payload2 = copy.deepcopy(GOOD_PAYLOAD)
payload2["state_id"] = "ZZ987654321"
payload2["household"][0]["ssn"] = "987654321"
del payload2["household"][1]
client.post('/registrations', data=payload2,
content_type="application/json")
return payload2
@pytest.fixture
def authenticated_client(client):
get_user_model().objects.create_superuser(
username=TEST_USERNAME,
password=<PASSWORD>,
email="<EMAIL>")
return client
``` |
{
"source": "18F/ekip-api",
"score": 2
} |
#### File: ekip/nationalparks/api.py
```python
from restless.dj import DjangoResource
from restless.preparers import FieldsPreparer
from .models import FederalSite, FieldTripSite
class FederalSiteResource(DjangoResource):
""" The API endpoint for FederalSites. """
preparer = FieldsPreparer(fields={
'name': 'name',
'city': 'city'
})
def list(self, state=None, everykid=None):
if self.request and 'state' in self.request.GET:
state = self.request.GET.get('state')
if self.request and 'everykid' in self.request.GET:
# Only return those sites that issue the Every Kid in a Park pass
everykid = True
query = FederalSite.objects.all().order_by('name')
if state:
query = FederalSite.objects.filter(state=state)
if everykid:
query = query.filter(access_pass=True, active_participant=True)
return query
class FieldTripResource(DjangoResource):
""" The API endpoint for FieldTripSites. """
preparer = FieldsPreparer(fields={
'name': 'name',
'city': 'city',
'website': 'website',
'zipcode': 'zipcode',
'advance_reservation': 'advance_reservation',
'larger_groups': 'larger_groups'
})
def list(self, state=None):
if self.request and 'state' in self.request.GET:
state = self.request.GET.get('state', None)
if state:
return FieldTripSite.objects.filter(state=state).order_by('name')
return FieldTripSite.objects.all().order_by('name')
``` |
{
"source": "18F/emoji_search",
"score": 3
} |
#### File: 18F/emoji_search/emoji_search.py
```python
import click
from pprint import pprint
import re
import requests
import sys
import time
try:
from api_token import API_TOKEN
except ImportError:
print('Please store API_TOKEN in api_token.py')
sys.exit(1)
MESSAGE_ENDPOINT = 'https://slack.com/api/search.messages'
USER_ENDPOINT = 'https://slack.com/api/users.info'
CHANNEL_ENDPOINT = 'https://slack.com/api/channels.info'
def lookup_username(userid):
"""Looks up username from Slack API based on userid
Expects:
userid: a valid userid of form <@XXX>
Returns:
string of username in form @YYYYY
"""
userid = re.sub('[<@|>]', '', userid)
payload = {'token': API_TOKEN, 'user': userid}
results = requests.get(USER_ENDPOINT, params=payload).json()
if results['ok']:
return '@' + results['user']['name']
return 'Unknown User'
def lookup_channel(channelid):
"""Looks up channel name from Slack API based on channelid
Expects:
channelid: a valid userid of form <#XXX>
Returns:
string of username in form #YYYYY
"""
channelid = re.sub('[<#|>]', '', channelid)
payload = {'token': API_TOKEN, 'channel': channelid}
results = requests.get(CHANNEL_ENDPOINT, params=payload).json()
if results['ok']:
return '#' + results['channel']['name']
return 'Unknown Channel'
def filter_by_date(messages, start, end):
try:
start = time.mktime(time.strptime(start, '%m-%d-%Y'))
end = time.mktime(time.strptime(end, '%m-%d-%Y'))
except:
raise ValueError('Invalid date')
return [m for m in messages if end > float(m['ts']) > start]
def format_text(text):
"""Replaces all userids and channelids with plaintext names
Expects:
A string with 0 or more userids and channelids
Returns:
The same text with those ids replaced
"""
text = re.sub('<@\w*>', lambda x: lookup_username(x.group()), text)
text = re.sub('<#\w*>', lambda x: lookup_channel(x.group()), text)
return text
@click.command()
@click.option('--emoji', help=('Text name of the emoji to search, without '
'surrounding :s'))
@click.option('--outfile', help=('Outputfile to write results to.'))
@click.option('--startdate', help=('Date to begin search. Must be in '
'MM-DD-YYYY format.'), default='01-01-1999')
@click.option('--enddate', help=('Date to end search. Must be in '
'MM-DD-YYYY format.'), default='12-12-2222')
def get_messages(emoji, outfile, startdate, enddate):
if not emoji:
raise ValueError('Please supply an emoji name to search by')
print('Querying Slack api')
matches = []
paging = {'page': 0, 'pages': 99999}
while paging['page'] < paging['pages']:
payload = {'token': API_TOKEN,
'query': 'has::' + emoji + ':',
'page': paging['page'] + 1}
response = requests.get('https://slack.com/api/search.messages',
params=payload).json()
if not response['ok']:
raise Exception('Query Failed!')
matches += response['messages']['matches']
paging = response['messages']['paging']
print('Fetched results page {} of {}'.format(paging['page'],
paging['pages']))
print('Filtering by date')
matches = filter_by_date(matches, startdate, enddate)
print('Formatting results')
results = [{'ts': time.ctime(float(m['ts'])),
'username': m['username'],
'permalink': m['permalink'],
'text': format_text(m['text'])}
for m in matches]
if outfile:
print('Writing results to file')
with open(outfile, 'w') as f:
pprint(results, stream=f)
else:
pprint(results)
if __name__ == '__main__':
get_messages()
``` |
{
"source": "18F/EPA-emanifest",
"score": 2
} |
#### File: emanifest/api/views.py
```python
from django.core import serializers
from django.http import JsonResponse
from django.forms.models import model_to_dict
import json
from api.models import Manifest, Transporter
def manifest(request, manifest_id=None):
manifest = Manifest.objects.get(id=manifest_id)
return JsonResponse(manifest.as_json())
``` |
{
"source": "18F/FAC-Distiller-",
"score": 2
} |
#### File: audit_search/templatetags/agency_name.py
```python
from django.template import Library, Node
from distiller.data import constants
register = Library()
@register.filter
def agency_name(agency_prefix: str):
return ', '.join(constants.AGENCIES_BY_PREFIX[agency_prefix])
```
#### File: distiller/data/jobs.py
```python
import random
import sys
import time
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from django.conf import settings
from django_apscheduler.jobstores import (
DjangoJobStore, register_events, register_job
)
from .etls import load_dumps
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), 'default')
@register_job(
scheduler,
'cron',
hour=0,
timezone=pytz.timezone('US/Eastern'),
replace_existing=True
)
def download_and_update_tables():
for table in load_dumps.FAC_TABLES_NAMES:
sys.stdout.write(f'Downloading table "{table}"...\n')
sys.stdout.flush()
load_dumps.download_table(
table,
target_dir=settings.LOAD_TABLE_ROOT
)
for table in load_dumps.FAC_TABLES_NAMES:
sys.stdout.write(f'Loading FAC table "{table}"...\n')
sys.stdout.flush()
load_dumps.update_table(
table,
source_dir=settings.LOAD_TABLE_ROOT,
)
register_events(scheduler)
scheduler.start()
```
#### File: management/commands/extract_pdfs.py
```python
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from ...etls import extract_pdf
class Command(BaseCommand):
help = "Extract PDF text from audits where applicable"
def add_arguments(self, parser):
parser.add_argument(
"--all", action="store_true", help="Extract all PDFs",
)
parser.add_argument("pdf_ids", nargs="*", type=int)
def handle(self, *args, **options):
nlp = extract_pdf.setup()
pdf_ids = options["pdf_ids"]
if options["all"]:
pdf_ids = extract_pdf.get_all_pdfs()
sys.stdout.write("Extracting all PDFs ...\n")
sys.stdout.flush()
for pdf_id in pdf_ids:
sys.stdout.write(f'Extracting PDF id "{pdf_id}"...\n')
sys.stdout.flush()
extract_pdf.process_audit_pdf(nlp, pdf_id)
```
#### File: distiller/fac_scraper/models.py
```python
import os
from compositefk.fields import CompositeForeignKey
from django.conf import settings
from django.db import models
from distiller.data.models import Audit
class FacDocument(models.Model):
class Meta:
ordering = ('dbkey', 'audit_year')
indexes = [
models.Index(fields=('dbkey', 'audit_year')),
]
version = models.IntegerField()
audit_year = models.DecimalField(
max_digits=4,
decimal_places=0,
help_text='Audit Year and DBKEY (database key) combined make up the primary key.'
)
dbkey = models.CharField(
max_length=6,
help_text='Audit Year and DBKEY (database key) combined make up the primary key.'
)
file_type = models.CharField(
max_length=8,
choices=(
('form', 'form'),
('audit', 'audit'),
)
)
file_name = models.CharField(max_length=32) # <dbkey><audit-year><version>.<pdf | xlsx>
#
# These fields are available in the Scrapy-crawled results, but not
# available if we refresh this table via an S3 ListBucket operation.
# To support ListBucket refreshes, omit these fields.
#
# report_id = models.CharField(max_length=8)
# ein = models.CharField(
# max_length=9,
# help_text='Employer Identification Number'
# )
# fy_end_date = models.DateField()
# fac_accepted_date = models.DateField()
# date_received = models.DateField()
# Map to General/Audit
audit = CompositeForeignKey(
Audit,
on_delete=models.DO_NOTHING,
to_fields={
'audit_year': 'audit_year',
'dbkey': 'dbkey'
},
related_name='documents'
)
def __str__(self):
return self.file_name
def get_absolute_url(self):
return os.path.join(settings.FAC_DOWNLOAD_ROOT, self.file_name)
``` |
{
"source": "18F/fec-shipper",
"score": 3
} |
#### File: 18F/fec-shipper/regulations.py
```python
import requests
import json
import sys
import os
from datetime import datetime
# local, feature, dev, stage or prod
env = sys.argv[1] if len(sys.argv) > 1 else 'local'
if env == 'local' or env == 'dev' or env == 'feature':
list_regs_url = 'https://fec-stage-eregs.18f.gov/api/regulation'
else:
list_regs_url = 'https://fec-%s-eregs.18f.gov/api/regulation' % env
reg_versions = requests.get(list_regs_url).json()['versions']
print(reg_versions)
regs = {}
for reg in reg_versions:
if '2016-annual' in reg['version']:
regs[reg['regulation']] = reg
for reg in reg_versions:
if reg['regulation'] not in regs:
by_date = datetime.strptime(reg['by_date'], "%Y-%m-%d")
most_recent = datetime.strptime(regs[reg['by_date']], "%Y-%m-%d") \
if reg['by_date'] in regs else datetime(1900, 1, 1)
if by_date > most_recent:
regs[reg['regulation']] = reg
print(reg)
print('total regs: %d' % len(regs))
reg_versions = regs.values()
annual_count = 0
for reg in reg_versions:
if '2016-annual' in reg['version']:
annual_count += 1
print('annual count: %d' % annual_count)
def get_sections(reg):
sections = {}
for node in reg['children'][0]['children']:
sections[tuple(node['label'])] = {'text': get_text(node),
'title': node['title']}
return sections
def get_text(node):
text = ''
if "text" in node:
text = node["text"]
for child in node["children"]:
text += ' ' + get_text(child)
return text
def get_regs():
for reg in reg_versions:
if env == 'local' or env == 'dev' or env == 'feature':
url = 'https://fec-stage-eregs.18f.gov/api/regulation/%s/%s' \
% (reg['regulation'], reg['version'])
else:
url = 'https://fec-%s-eregs.18f.gov/api/regulation/%s/%s' \
% (env, reg['regulation'], reg['version'])
regulation = requests.get(url).json()
sections = get_sections(regulation)
docs = []
for section_label in sections:
doc_id = '%s_%s_%s' % (section_label[0], section_label[1],
reg['version'])
section_formatted = '%s-%s' % (section_label[0], section_label[1])
reg_url = '/regulations/{0}/{1}#{0}'.format(section_formatted,
reg['version'])
no = '%s.%s' % (section_label[0], section_label[1])
name = sections[section_label]['title'].split(no)[1].strip()
doc = {"doc_id": doc_id, "name": name,
"text": sections[section_label]['text'], 'url': reg_url,
"no": no}
docs.append(doc)
yield docs
for docs in get_regs():
if env == 'local':
url = 'http://localhost:5000/v1/load/legal/'
if env == 'dev' or env == 'feature':
url = 'https://fec-%s-api.18f.gov/v1/load/legal/' % env
if env == 'stage':
url = 'https://fec-stage-api.18f.gov/v1/load/legal/'
if env == 'prod':
url = 'https://api.open.fec.gov/v1/load/legal/?api_key=%s' \
% os.environ['FEC_API_KEY']
data = {'doc_type': 'regulations', 'docs': docs,
'api_key': os.environ['FEC_API_KEY']}
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data), headers=headers)
result = r.json()
print(result)
if not result['success']:
print(result)
print(reg_versions)
print('done.')
``` |
{
"source": "18F/federalist-garden-build-py",
"score": 3
} |
#### File: federalist-garden-build-py/publishing/SiteObject.py
```python
import binascii
import gzip
import hashlib
import mimetypes
from os import path
from datetime import datetime
mimetypes.init() # must initialize mimetypes
def remove_prefix(text, prefix):
'''Returns a copy of text with the given prefix removed'''
if text.startswith(prefix):
return text[len(prefix):]
return text
class SiteObject():
'''
An abstract class for an individual object that can be uploaded to S3
'''
def __init__(self, filename, md5, site_prefix='', dir_prefix=''):
self.filename = filename
self.md5 = md5
self.dir_prefix = dir_prefix
self.site_prefix = site_prefix
@property
def s3_key(self):
'''The object's key in the S3 bucket'''
filename = self.filename
if self.dir_prefix:
filename = remove_prefix(filename,
path.join(self.dir_prefix, ''))
return f'{self.site_prefix}/{filename}'
def upload_to_s3(self, bucket, s3_client):
'''Upload this object to S3'''
raise NotImplementedError # should be implemented in child classes
def delete_from_s3(self, bucket, s3_client):
'''Delete this object from S3'''
s3_client.delete_object(
Bucket=bucket,
Key=self.s3_key,
)
class SiteFile(SiteObject):
'''A file produced during a site build'''
GZIP_EXTENSIONS = ['html', 'css', 'js', 'json', 'svg']
def __init__(self, filename, dir_prefix, site_prefix, cache_control):
super().__init__(filename=filename,
md5=None,
dir_prefix=dir_prefix,
site_prefix=site_prefix)
self._compress()
self.md5 = self.generate_md5()
self.cache_control = cache_control
@property
def is_compressible(self):
'''Whether the file should be compressed'''
_, file_extension = path.splitext(self.filename)
# file_extension has a preceding '.' character, so use substring
return file_extension[1:].lower() in self.GZIP_EXTENSIONS
@property
def content_encoding(self):
'''"gzip" if the file is compressible, otherwise None'''
if self.is_compressible:
return 'gzip'
return None
@property
def content_type(self):
'''The best-guess mimetype of the file'''
content_type, _ = mimetypes.guess_type(self.filename)
return content_type
@property
def is_compressed(self):
'''Checks to see if the file is already compressed'''
with open(self.filename, 'rb') as test_f:
# '1f8b' is the magic flag that gzipped files start with
return binascii.hexlify(test_f.read(2)) == b'1f8b'
def generate_md5(self):
'''Generates an md5 hash of the file contents'''
hash_md5 = hashlib.md5() # nosec
with open(self.filename, "rb") as file:
for chunk in iter(lambda: file.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _compress(self):
'''GZips the file in-situ'''
if not self.is_compressible:
# shouldn't be compressed, so return
return
if self.is_compressed:
# already compressed, so return
return
# otherwise, gzip the file in place
with open(self.filename, 'rb') as f_in:
contents = f_in.read()
# Spoof the modification time so that MD5 hashes match next time
spoofed_mtime = datetime(2014, 3, 19).timestamp() # March 19, 2014
# Compress the contents and save over the original file
with gzip.GzipFile(self.filename, mode='wb',
mtime=spoofed_mtime) as gz_file:
gz_file.write(contents)
def upload_to_s3(self, bucket, s3_client):
extra_args = {
"CacheControl": self.cache_control,
"ServerSideEncryption": "AES256",
}
if self.content_encoding:
extra_args["ContentEncoding"] = self.content_encoding
if self.content_type:
extra_args["ContentType"] = self.content_type
s3_client.upload_file(
Filename=self.filename,
Bucket=bucket,
Key=self.s3_key,
# For allowed ExtraArgs, see
# https://boto3.readthedocs.io/en/latest/reference/customizations/s3.html#boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS
ExtraArgs=extra_args,
)
class SiteRedirect(SiteObject):
'''
A redirect, typically from `/path/to/page => /path/to/page/`
'''
def __init__(self, filename, dir_prefix, site_prefix, base_url):
super().__init__(filename=filename,
dir_prefix=dir_prefix,
md5=None, # update after super().__init()__
site_prefix=site_prefix)
self.base_url = base_url
self.md5 = hashlib.md5(self.destination.encode()).hexdigest() # nosec
@property
def destination(self):
'''The destination of the redirect object'''
filename = self.filename
if self.dir_prefix:
if filename == self.dir_prefix:
return f'{self.base_url}/'
filename = remove_prefix(filename,
path.join(self.dir_prefix, ''))
return f'{self.base_url}/{filename}/'
@property
def s3_key(self):
filename = self.filename
if self.dir_prefix:
if filename == self.dir_prefix:
# then this is 'root' site redirect object
# (ie, the main index.html file)
return self.site_prefix
filename = remove_prefix(filename,
path.join(self.dir_prefix, ''))
return f'{self.site_prefix}/{filename}'
def upload_to_s3(self, bucket, s3_client):
'''Uploads the redirect object to S3'''
s3_client.put_object(
Body=self.destination,
Bucket=bucket,
Key=self.s3_key,
ServerSideEncryption='AES256',
WebsiteRedirectLocation=self.destination,
)
```
#### File: federalist-garden-build-py/tasks/clone.py
```python
import os
from invoke import task, call
from log_utils import get_logger
from .common import (REPO_BASE_URL, CLONE_DIR_PATH,
SITE_BUILD_DIR_PATH, clean)
LOGGER = get_logger('CLONE')
def clone_url(owner, repository, access_token=''):
'''
Creates a URL to a remote git repository.
If `access_token` is specified, it will be included in the authentication
section of the returned URL.
>>> clone_url('owner', 'repo')
'https://github.com/owner/repo.git'
>>> clone_url('owner2', 'repo2', 'secret-token')
'https://[email protected]/owner2/repo2.git'
'''
repo_url = f'{REPO_BASE_URL}/{owner}/{repository}.git'
if access_token:
repo_url = f'{access_token}@{repo_url}'
return f'https://{repo_url}'
def _clone_repo(ctx, owner, repository, branch):
'''
Clones the GitHub repository specified by owner and repository
into CLONE_DIR_PATH.
Expects GITHUB_TOKEN to be in the environment.
'''
LOGGER.info(f'Cloning {owner}/{repository}/{branch} to {CLONE_DIR_PATH}')
github_token = os.environ['GITHUB_TOKEN']
ctx.run(
f'git clone -b {branch} --single-branch '
f'{clone_url(owner, repository, github_token)} '
f'{CLONE_DIR_PATH}'
)
# 'Exported' clone-repo task
clone_repo = task(
pre=[call(clean, which=CLONE_DIR_PATH)],
post=[
# Remove _site if it exists
call(clean, which=SITE_BUILD_DIR_PATH),
],
help={
"owner": "Owner of the repository to clone",
"repository": "Name of the repository to clone",
},
name='clone-repo'
)(_clone_repo)
@task
def push_repo_remote(ctx, owner, repository, branch,
remote_name='destination'):
'''
Pushes the git repo in CLONE_DIR_PATH to a new remote destination.
Expects GITHUB_TOKEN to be in the environment.
'''
LOGGER.info(f'Pushing cloned repository to {owner}/{repository}/{branch}')
github_token = os.environ['GITHUB_TOKEN']
with ctx.cd(CLONE_DIR_PATH):
ctx.run(f'git remote add {remote_name} '
f'{clone_url(owner, repository, github_token)}')
ctx.run(f'git push {remote_name} {branch}')
```
#### File: federalist-garden-build-py/test/test_build.py
```python
import requests_mock
from contextlib import ExitStack
from invoke import MockContext, Result
from tasks import (setup_node, run_federalist_script, setup_ruby,
build_jekyll, download_hugo, build_hugo, build_static)
from tasks.build import node_context
# TODO: use pyfakefs to setup PACKAGE_JSON_PATH, NVM_SH_PATH, etc
# so we can test those code paths
class TestSetupNode():
def test_it_is_callable(self):
ctx = MockContext(run=Result(''))
setup_node(ctx)
class TestNodeContext():
def test_default_node_context(self):
ctx = MockContext()
context_stack = node_context(ctx)
assert type(context_stack) == ExitStack
assert len(context_stack._exit_callbacks) == 1
def test_node_context_accepts_more_contexts(self):
ctx = MockContext()
context_stack = node_context(ctx, ctx.cd('boop'))
assert type(context_stack) == ExitStack
assert len(context_stack._exit_callbacks) == 2
class TestRunFederalistScript():
def test_it_is_callable(self):
ctx = MockContext()
run_federalist_script(ctx, branch='branch', owner='owner',
repository='repo', site_prefix='site/prefix',
base_url='/site/prefix')
class TestSetupRuby():
def test_it_is_callable(self):
ctx = MockContext(run=Result('ruby -v result'))
setup_ruby(ctx)
class TestBuildJekyll():
def test_it_is_callable(self):
ctx = MockContext(run=[
Result('gem install jekyll result'),
Result('jekyll version result'),
Result('jekyll build result'),
])
build_jekyll(ctx, branch='branch', owner='owner',
repository='repo', site_prefix='site/prefix',
config='boop: beep', base_url='/site/prefix')
class TestDownloadHugo():
def test_it_is_callable(self):
ctx = MockContext(run=[
Result('tar result'),
Result('chmod result'),
])
with requests_mock.Mocker() as m:
m.get(
'https://github.com/gohugoio/hugo/releases/download'
'/v0.23/hugo_0.23_Linux-64bit.tar.gz')
download_hugo(ctx)
def test_it_accepts_other_versions(self):
ctx = MockContext(run=[
Result('tar result'),
Result('chmod result'),
])
with requests_mock.Mocker() as m:
m.get(
'https://github.com/gohugoio/hugo/releases/download'
'/v0.25/hugo_0.25_Linux-64bit.tar.gz')
download_hugo(ctx, version='0.25')
class TestBuildHugo():
def test_it_is_callable(self):
ctx = MockContext(run=[
Result('tar result'),
Result('chmod result'),
Result('hugo version result'),
Result('hugo build result'),
])
with requests_mock.Mocker() as m:
m.get(
'https://github.com/gohugoio/hugo/releases/download'
'/v0.23/hugo_0.23_Linux-64bit.tar.gz')
build_hugo(ctx, branch='branch', owner='owner',
repository='repo', site_prefix='site/prefix',
base_url='/site/prefix', hugo_version='0.23')
class TestBuildstatic():
def test_it_is_callable(self):
ctx = MockContext()
build_static(ctx)
```
#### File: federalist-garden-build-py/test/test_clone.py
```python
import os
from invoke import MockContext, Result
from tasks import clone_repo, push_repo_remote
class TestCloneRepo():
def test_it_is_callable(self):
os.environ['GITHUB_TOKEN'] = 'fake_token'
ctx = MockContext(run=Result('git clone result'))
clone_repo(ctx, owner='owner', repository='repo', branch='master')
class TestPushRepoRemote():
def test_it_is_callable(self):
os.environ['GITHUB_TOKEN'] = 'fake_token'
ctx = MockContext(run=[
Result('git remote add result'),
Result('git push result')
]
)
push_repo_remote(ctx, owner='owner', repository='repo',
branch='branch', remote_name='boop')
``` |
{
"source": "18F/forest-service-prototype",
"score": 2
} |
#### File: forestserviceprototype/specialuseform/views.py
```python
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from .forms import NonCommercialUsePermitForm
from .models import NonCommercialUsePermit
from django.forms.models import model_to_dict
from django.core.mail import send_mail, mail_admins
import logging
import json
# Create your views here.
def home(request):
return render(request, "specialuseform/home.html")
def submit(request, id=None,
template_name='specialuseform/submit.html'):
if id:
permit = get_object_or_404(NonCommercialUsePermit, id=id)
submit_button_text = 'Save Changes'
else:
permit = NonCommercialUsePermit()
submit_button_text = 'Submit Your Application'
form = NonCommercialUsePermitForm(request.POST or None, instance=permit)
if request.POST and form.is_valid():
# Save the data to the database
form.save()
# Send the user a confirmation message
if form.instance.applicant_email:
recipient = [form.instance.applicant_email]
else:
recipient = []
send_mail(
subject='Application Submitted',
message='Your application for {0} has been received, and is #{1}. '
'To view its status or make changes, please visit {2}/submitted/{1}'.format(
form.instance.event_name,
form.instance.id,
get_current_site(request)
),
from_email='<EMAIL>',
recipient_list=recipient,
fail_silently=False
)
# Send the admins an email about the new permit
# TODO: The recipient(s) will vary based on the specific forest, but we
# don't have access to that information until this integrates with SUDS
send_mail(
subject='New application',
message='There\'s a new application for {0}. To see more, and'
'approve or reject it, please visit {2}/submitted/{1}'.format(
form.instance.event_name,
form.instance.id,
get_current_site(request)
),
from_email='<EMAIL>',
recipient_list=[],
fail_silently=False
)
# Save was successful, so redirect to another page
return redirect('/submitted/'+str(form.instance.id)+'?new=true')
return render(request, template_name, {
'form': form, 'submit_text': submit_button_text
})
def submitted_permit(request, id):
check_status = False if request.GET.get('new') else True
permit = get_object_or_404(NonCommercialUsePermit.objects.filter(id=id))
permit_dict = NonCommercialUsePermitForm(data=model_to_dict(permit))
return render(request,
"specialuseform/submitted_permit.html",
{'permit': permit,
'permit_dict': permit_dict,
'check_status': check_status
}
)
def change_application_status(request, id, status):
decision_explanation = request.POST.get('deny_reason')
permit = get_object_or_404(NonCommercialUsePermit.objects.filter(id=id))
permit.decision_explanation = decision_explanation
permit.status = status
permit.save()
# Send an email notification with the status update
send_mail(
subject='Application Status Changed',
message='The status for your application for {0} has been updated to {1}. \
For more information, please visit {2}/submitted/{3}'.format(
permit.event_name,
permit.status,
get_current_site(request),
permit.id
),
from_email='<EMAIL>',
recipient_list=[permit.applicant_email],
fail_silently=False
)
json_response = json.dumps(
{"status": permit.status, "reason": permit.decision_explanation}
)
return HttpResponse(json_response, content_type="application/json")
def cancel(request, id):
permit = get_object_or_404(NonCommercialUsePermit.objects.filter(id=id))
permit.status = 'user_cancelled'
permit.save()
return render(request, 'specialuseform/cancel_permit.html',
{'permit': permit})
def print_permit(request, id):
permit = get_object_or_404(NonCommercialUsePermit.objects.filter(id=id))
return render(request, 'specialuseform/print_permit.html',
{'permit': permit})
# @todo: Fix redirect so users don't get stuck in admin screen. See #7.
@login_required
def applications(request):
permits = NonCommercialUsePermit.objects.all().order_by('-permit_status', 'start_date', 'created')
return render(request, "specialuseform/applications.html", {
'permits': permits
})
``` |
{
"source": "18F/fpds-getter",
"score": 3
} |
#### File: 18F/fpds-getter/fpds.py
```python
import requests
import xmltodict
import json
from urllib.parse import urlparse, parse_qs
class FPDS:
def get_data_from_fpds(self, start_date, end_date):
"""
The powerhouse of the FPDS class. This iterates through the FPDS pages and returns a dict with all of the entries for a date range.
"""
d = self._get_data_from_url(start_date, end_date, offset=0)
yield d
# Here, we should call the generator to cycle through the range
pages = range(int(parse_qs(urlparse(d["feed"]["link"][2]["@href"]).query)["start"][0]), int(parse_qs(urlparse(d["feed"]["link"][1]["@href"]).query)["start"][0]), 10)
for page in pages:
# I *think* this is where we can take advantage of parallel processing...
d = self._get_data_from_url(start_date, end_date, page)
# This is where the XML for the page comes in... at this point, it's possible to do things like save data to file or do additional processing.
yield d
return results
def _get_data_from_url(self, start_date, end_date, offset):
"""
Private method to get a page from FPDS.
Returns a dict from the FPDS XML
"""
baseurl = "https://www.fpds.gov/ezsearch/FEEDS/ATOM?FEEDNAME=PUBLIC"
params = "LAST_MOD_DATE:[%s,%s]" % (start_date, end_date)
r = requests.get(baseurl, params={"q": params, "start": offset})
# Convert the XML into a JSON object
d = xmltodict.parse(r.text)
return d
if __name__ == '__main__':
d = FPDS().get_data_from_fpds(start_date="2016/01/01", end_date="2016/01/01")
with open('results.json', 'w') as f:
f.write(json.dumps(d))
``` |
{
"source": "18F/frstack",
"score": 2
} |
#### File: ansible/library/realm.py
```python
import urllib
import requests
import threading
import os
import json
import amlib
class AMConnection:
def __init__(self, url, admin, password):
self.url = url
authheaders = {"Content-type": "application/json",
"X-OpenAM-Username": admin ,
"X-OpenAM-Password": password}
authurl = url + "/json/authenticate"
r = requests.post(authurl, data="{}", headers=authheaders)
t = json.loads(r.text)
id = t['tokenId']
r.close()
self.id = id
def create_realm(ctx,realmpath):
body = '{ "realm": "' + realmpath + '"}'
url = ctx.url + "/json/realms?_action=create"
headers = {"Content-type": "application/json",
"iplanetDirectoryPro": ctx.id }
response = requests.post(url, data=body, headers=headers)
print response.text
response.close()
def main():
base_url = "http://openam.example.com:28080/openam"
ctx = AMConnection(base_url,"amadmin","password")
create_realm(ctx,"myrealm/foo2")
main()
``` |
{
"source": "18F/fuzzycsv",
"score": 3
} |
#### File: fuzzycsv/tests/test_reheader.py
```python
import csv
import re
from io import StringIO
import pytest
from reheader import reheadered
_raw_txt_1 = u"""name,email,zip,
<NAME>,<EMAIL>,45309,
<NAME>,<EMAIL>,12345-1234,
<NAME>,<EMAIL>,21401,EAFP
<NAME>,<EMAIL>,,
"""
_raw_txt_2 = u"""zipcode, Name, e-mail, profession
02139, <NAME>, <EMAIL>, programmer
19803-0000, <NAME>, <EMAIL>, chemist
48198, <NAME>, <EMAIL>"""
def _next(iter):
try:
return iter.__next__()
except AttributeError:
return iter.next()
def _data(src=_raw_txt_1, reader=csv.DictReader, with_headers=True):
for (row_num, row) in enumerate(reader(StringIO(src))):
if with_headers or (row_num > 0):
yield row
class TestReheaderedExistence(object):
@classmethod
def setup_class(cls):
pass
def test_reheadered_exists(self):
reheadered
def test_reheadered_accepts_basic_args(self):
reheadered([{}, ], [])
@classmethod
def teardown_class(cls):
pass
class TestReheaderedFuzzyMatch(object):
def test_perfect_column_name_match(self):
for row in reheadered(_data(), ['name', 'email', 'zip']):
assert 'name' in row
assert 'email' in row
assert 'zip' in row
def test_perfect_column_name_match_list_of_lists(self):
data = _data(reader=csv.reader, with_headers=True)
for row in reheadered(data, ['name', 'email', 'zip']):
assert 'name' in row
assert 'email' in row
assert 'zip' in row
def test_list_of_lists_no_data(self):
infile = StringIO(_raw_txt_1.splitlines()[0])
data = csv.reader(infile)
with pytest.raises(StopIteration):
_next(reheadered(data, ['name', 'email', 'zip']))
def test_list_of_lists_whitespace_before_headers(self):
src = "\n\n\n\n" + _raw_txt_1
data = _data(src=src, reader=csv.reader, with_headers=True)
for row in reheadered(data, ['name', 'email', 'zip']):
assert 'name' in row
assert 'email' in row
assert 'zip' in row
def test_whitespace_safe_in_expected(self):
for row in reheadered(_data(), [' name', 'email', ' zip']):
assert 'name' in row
assert 'email' in row
assert 'zip' in row
def test_whitespace_safe_in_data(self):
for row in reheadered(
_data(_raw_txt_2), ['zipcode', 'Name', 'e-mail'],
minimum_score=100):
assert 'zipcode' in row
assert 'Name' in row
assert 'e-mail' in row
def test_fuzzy_column_name_match(self):
headers = ['Name', 'mail', 'zipcode']
for row in reheadered(_data(), headers):
assert 'Name' in row
assert 'name' not in row
assert 'mail' in row
assert row['mail']
assert 'email' not in row
assert 'zipcode' in row
assert 'zip' not in row
def test_fuzzy_column_name_match_list_of_lists(self):
data = _data(reader=csv.reader, with_headers=True)
headers = ['Name', 'mail', 'zipcode']
for row in reheadered(data, headers):
assert 'Name' in row
assert 'name' not in row
assert 'mail' in row
assert row['mail']
assert 'email' not in row
assert 'zipcode' in row
assert 'zip' not in row
def test_fuzzy_column_name_match_failure(self):
headers = ['Name', 'mail', 'thy one true zip code']
with pytest.raises(KeyError):
_next(reheadered(_data(), headers))
def test_optional_column_marker_tolerated(self):
headers = ['Name', '?:mail', 'zip']
for row in reheadered(_data(), headers):
assert 'mail' in row
assert '?:mail' not in row
def test_optional_column_marker_honored(self):
headers = ['Name', 'mail', 'zip', '?:nationality']
for row in reheadered(_data(), headers):
assert 'mail' in row
assert 'nationality' not in row
def test_custom_optional_marker(self):
headers = ['Name', 'mail', 'zip', 'OPTIONAL~nationality']
for row in reheadered(_data(), headers, optional_prefix='OPTIONAL~'):
assert 'mail' in row
assert 'nationality' not in row
class TestReheaderedRegexMatch(object):
def setup_method(self, method):
self.data = _data(reader=csv.reader)
def test_dict_of_headers_accepted(self):
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
data = _data()
_next(reheadered(data, headers))
def test_list_of_lists_accepted(self):
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
data = _data(reader=csv.reader, with_headers=True)
_next(reheadered(data, headers))
def test_regexes_preferred_to_fuzzy_match(self):
headers = {'columnA': '\w+@\w+\.\w+', 'columnB': '\d+'}
for row in reheadered(_data(), headers):
assert 'columnA' in row
assert '@' in row['columnA']
assert 'columnB' in row
if row['columnB']:
assert re.search('\d+', row['columnB'])
def test_compiled_regexes(self):
columnAregex = re.compile(r"""\w+ # name
@\w+ # email provider
\.\w+ # domain""", re.VERBOSE)
headers = {'columnA': columnAregex, 'columnB': re.compile(r'\d+')}
for row in reheadered(_data(), headers):
assert 'columnA' in row
assert '@' in row['columnA']
assert 'columnB' in row
if row['columnB']:
assert re.search(r'\d+', row['columnB'])
def test_mix_regexes_with_column_name_matches(self):
headers = {'columnA': '\w+@\w+\.\w+', 'zip': None}
for row in reheadered(_data(), headers):
assert 'columnA' in row
assert '@' in row['columnA']
assert 'zip' in row
if row['zip']:
assert re.search('\d+', row['zip'])
def test_optional_in_regex(self):
headers = {'zip': '\w+@\w+\.\w+', '?:email': '\d+'}
for row in reheadered(_data(), headers):
assert 'zip' in row
assert '@' in row['zip']
assert 'email' in row
if row['email']:
assert re.search('\d+', row['email'])
class TestOptionalArgs(object):
def test_keep_extra_false(self):
for row in reheadered(_data(), ['name', 'email'], keep_extra=False):
assert 'name' in row
assert 'email' in row
assert 'zip' not in row
assert '' not in row
def test_keep_extra(self):
for row in reheadered(_data(), ['name', 'email'], keep_extra=True):
assert 'name' in row
assert 'email' in row
assert 'zip' in row
def test_keep_extra_with_fuzzy_match(self):
for row in reheadered(_data(), ['Name', 'e-mail'], keep_extra=True):
assert 'Name' in row
assert 'e-mail' in row
assert 'zip' in row
def test_low_minimum_score(self):
headers = ['Name', 'mail', 'zip_code']
for row in reheadered(_data(), headers, minimum_score=50):
assert 'zip_code' in row
def test_high_minimum_score(self):
headers = ['Name', 'mail', 'zip']
with pytest.raises(KeyError):
_next(reheadered(_data(), headers, minimum_score=90))
def test_prefer_fuzzy(self):
headers = {'columnA': '\w+@\w+\.\w+', 'name': '\d+'}
for row in reheadered(_data(), headers, prefer_fuzzy=True):
assert 'columnA' in row
assert '@' in row['columnA']
assert 'name' in row
if row['name']:
assert not re.search('\d+', row['name'])
def test_header_absent_and_no_regexes(self):
infile = StringIO('\n'.join(_raw_txt_1.splitlines()[1:]))
data = csv.reader(infile)
headers = ['name', 'email', 'zip']
with pytest.raises(KeyError):
_next(reheadered(data, headers, header_present=False))
def test_header_absent_regexes_present(self):
infile = StringIO('\n'.join(_raw_txt_1.splitlines()[1:]))
data = csv.reader(infile)
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
row = _next(reheadered(data, headers, header_present=False))
assert row['name'] == '<NAME>'
assert row['email'] == '<EMAIL>'
def test_header_present_regexes_present(self):
infile = StringIO('\n'.join(_raw_txt_1.splitlines()[1:]))
data = csv.reader(infile)
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
row = _next(reheadered(data, headers, header_present=True))
assert row['name'] == '<NAME>'
assert row['email'] == '<EMAIL>'
def test_header_absent_guessed(self):
infile = StringIO('\n'.join(_raw_txt_1.splitlines()[1:]))
data = csv.reader(infile)
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
row = _next(reheadered(data, headers))
assert row['name'] == '<NAME>'
assert row['email'] == '<EMAIL>'
def test_header_present_guessed(self):
infile = StringIO(_raw_txt_1)
data = csv.reader(infile)
headers = {'name': r'(\w+\s+)+', 'email': '\w+@\w+\.\w+'}
row = _next(reheadered(data, headers))
assert row['name'] == '<NAME>'
assert row['email'] == '<EMAIL>'
# form of data changes halfway through
# sparse data - use regex when lines are blank
# varying number of columns
# non-string input
``` |
{
"source": "18F/hmac_authentication_py",
"score": 2
} |
#### File: hmac_authentication_py/hmac_authentication/hmacauth.py
```python
import enum
import hmac
import base64
import hashlib
import collections
from werkzeug.wrappers import Request
from werkzeug.exceptions import abort, HTTPException
AuthenticationResult = collections.namedtuple(
'AuthenticationResult',
['result_code', 'header_signature', 'computed_signature'],
)
class AuthenticationResultCodes(enum.Enum):
'''Defines the result codes used in AuthenticationResult.'''
# The incoming result did not have a signature header
NO_SIGNATURE = 1
# The signature header was not parseable
INVALID_FORMAT = 2
# The signature header specified an unsupported algorithm
UNSUPPORTED_ALGORITHM = 3
# The signature from the request header matched the locally-computed
# signature
MATCH = 4
# The signature from the request header did not match the locally-computed
# signature
MISMATCH = 5
def header_name_to_wsgi(header_name):
wsgi_name = header_name.upper().replace('-', '_')
if wsgi_name not in ['CONTENT_TYPE', 'CONTENT_LENGTH']:
wsgi_name = 'HTTP_' + wsgi_name
return wsgi_name
def _compare_signatures(header, computed):
if hmac.compare_digest(header.encode('utf8'), computed.encode('utf8')):
return AuthenticationResultCodes.MATCH
return AuthenticationResultCodes.MISMATCH
def get_uri(environ):
uri = environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', '/')
if environ.get('QUERY_STRING'):
uri = '{}?{}'.format(uri, environ['QUERY_STRING'])
return uri
class HmacAuth(object):
'''HmacAuth signs outbound requests and authenticates inbound requests.
Note that the method parameters called "req" or "request" correspond to
the WSGI "environ" interface define in PEP 333:
https://www.python.org/dev/peps/pep-0333/
'''
def __init__(self, digest, secret_key, signature_header, headers):
self._digest = digest
self._secret_key = secret_key
self._signature_header = header_name_to_wsgi(signature_header)
self._headers = [header_name_to_wsgi(h) for h in headers]
# Note that compile multiply-defined headers should always be rewritten as
# a single header:
# http://stackoverflow.com/questions/1801124/how-does-wsgi-handle-multiple-request-headers-with-the-same-name
def _signed_headers(self, environ):
return [str(environ.get(h, '')) for h in self._headers]
def string_to_sign(self, environ):
'''Produces the string that will be prefixed to the request body and
used to generate the signature.
'''
components = [environ['REQUEST_METHOD']]
components.extend(self._signed_headers(environ))
components.append(get_uri(environ))
return '\n'.join(components) + '\n'
# NOTE(mbland): I'm not sure the outbound WSGI HTTP request interface is
# symmetrical to the inbound "environ" interface. Must go deeper.
def sign_request(self, environ):
'''Adds a signature header to the request.'''
environ[self._signature_header] = self.request_signature(environ)
def request_signature(self, environ):
'''Generates a signature for the request.'''
return self._request_signature(environ, self._digest)
def _request_signature(self, environ, digest):
h = hmac.new(
self._secret_key.encode('utf8'),
self.string_to_sign(environ).encode('utf8'),
digest,
)
request = Request(environ)
if 'wsgi.input' in environ:
h.update(request.get_data())
return digest().name + ' ' + base64.b64encode(h.digest()).decode('utf8')
def signature_from_header(self, environ):
'''Retrieves the signature included in the request header.'''
return environ.get(self._signature_header)
def authenticate_request(self, environ):
'''Authenticates the request by comparing HMAC signatures.
Returns the result code, the signature from the header, and the
locally-computed signature as a AuthenticationResult.
'''
header = self.signature_from_header(environ)
if header is None:
return AuthenticationResult(
AuthenticationResultCodes.NO_SIGNATURE, None, None)
components = header.split(' ')
if len(components) != 2:
return AuthenticationResult(
AuthenticationResultCodes.INVALID_FORMAT, header, None)
digest_name = components[0]
try:
digest = getattr(hashlib, digest_name)
except AttributeError:
return AuthenticationResult(
AuthenticationResultCodes.UNSUPPORTED_ALGORITHM, header, None)
computed = self._request_signature(environ, digest)
return AuthenticationResult(
_compare_signatures(header, computed), header, computed)
class HmacMiddleware(object):
'''WSGI middleware for authenticating incoming HTTP requests via HmacAuth.
Borrowed from http://stackoverflow.com/a/29265847/1222326.
'''
def __init__(self, app, hmac_auth):
self.app = app
self.hmac_auth = hmac_auth
def __call__(self, environ, start_response):
result = self.hmac_auth.authenticate_request(environ)
if result.result_code == AuthenticationResultCodes.MATCH:
return self.app(environ, start_response)
try:
abort(401)
except HTTPException as error:
return error(environ, start_response)
``` |
{
"source": "18F/identity-give-load-testin",
"score": 2
} |
#### File: identity-give-load-testin/locustfiles/loadtest.py
```python
import os
from locust import HttpUser, task, constant_pacing, tag
HTTP_FLOW_PATH = f"/v1/company/wdK3fH48XuoXzvZyeNJEYFA9i8K72BZg/flows/IU1iDIvviIth5jiYmNvgsS43Kg29RxyB/start"
SK_API_KEY = os.getenv("SK_API_KEY")
class SKLoadTestUser(HttpUser):
"""Load test SK"""
wait_time = constant_pacing(1)
@task(1)
@tag("basic_load")
def test_flow(self):
"""Invoke basic sk test flow"""
self.client.post(HTTP_FLOW_PATH, headers={"x-sk-api-key": SK_API_KEY})
```
#### File: identity-give-load-testin/locustfiles/usps.py
```python
import os
import random
import uuid
import csv
import logging
import json
from locust import HttpUser, task, tag
FLOW_PATH = f"/v1/company/wdK3fH48XuoXzvZyeNJEYFA9i8K72BZg/flows/YYAzKlqe8gwmwyLfaM2e7jkuvikvzpDi/start"
SK_API_KEY = os.getenv("SK_API_KEY")
CSV_FILE = os.getenv("CSV_FILE")
class SKUSPSTestUser(HttpUser):
with open(CSV_FILE) as csvfile:
reader = csv.reader(csvfile)
data = list(reader)
@tag("usps", "usps_ok")
@task(1)
def usps_av_valid(self):
"""Invoke basic usps av flow"""
index = random.randint(1, len(self.data) - 1)
rdata = {
"uid": str(uuid.uuid4()),
"first_name": self.data[index][0],
"last_name": self.data[index][2],
"middle_name": self.data[index][1],
"suffix": self.data[index][3],
"delivery_address": self.data[index][4],
"address_city_state_zip": self.data[index][5],
}
with self.client.post(
FLOW_PATH,
headers={"x-sk-api-key": SK_API_KEY},
json=rdata,
catch_response=True,
) as response:
try:
if response.json()["uid"] != rdata["uid"] or (
float(response.json()["confidence_indicator"])
!= float(self.data[index][6])
):
response.failure("response values do not match expected")
logging.info(
f"response values do not match expected: {response.json()}"
)
except Exception as e:
response.failure("error in response")
logging.info(f"error: {e}")
logging.info(f"in response: {response.status_code} {response.text}")
@tag("usps", "error", "usps_not_found")
@task(1)
def usps_av_not_found(self):
"""Invoke basic usps http flow with a person that does not exist"""
index = random.randint(1, len(self.data) - 1)
rdata = {
"uid": str(uuid.uuid4()),
"first_name": self.data[index][0],
"last_name": self.data[index][2],
"middle_name": self.data[index][1],
"suffix": self.data[index][3],
"delivery_address": self.data[index][4],
"address_city_state_zip": self.data[index][5],
}
with self.client.post(
FLOW_PATH,
headers={"x-sk-api-key": SK_API_KEY},
json=rdata,
catch_response=True,
) as response:
try:
if (
response.json()["uid"] != rdata["uid"]
or response.json()["confidence_indicator"] is not None
):
response.failure("response values do not match expected")
logging.info(f"{response.json()}")
except Exception as e:
response.failure("error in response")
logging.info(f"error: {e}")
logging.info(
f"error in response: {response.status_code} {response.text}"
)
@tag("usps", "error", "usps_missing_param")
@task(1)
def usps_av_missing_parameter(self):
"""Invoke basic sk http flow"""
index = random.randint(1, len(self.data) - 1)
rdata = {
"uid": str(uuid.uuid4()),
"first_name": self.data[index][0],
"last_name": self.data[index][2],
"middle_name": self.data[index][1],
"suffix": self.data[index][3],
"delivery_address": self.data[index][4],
"address_city_state_zip": self.data[index][5],
}
# Induce missing entry error
blank = random.choice(
["first_name", "last_name", "delivery_address", "address_city_state_zip"]
)
rdata[blank] = ""
with self.client.post(
FLOW_PATH,
headers={"x-sk-api-key": SK_API_KEY},
json=rdata,
catch_response=True,
) as response:
try:
message = response.json()["message"].split("-", 1)
org_resp = json.loads(message[1])
if (
int(message[0]) != 400
or org_resp["uid"] != rdata["uid"]
or not org_resp["error"].startswith("Mandatory field(s) missing")
):
response.failure("response values do not match expected")
logging.info(f"{response.json()}")
else:
response.success()
except Exception as e:
response.failure("error in response")
logging.info(f"error: {e}")
logging.info(
f"error in response: {response.status_code} {response.text}"
)
``` |
{
"source": "18F/identity-loadtes",
"score": 2
} |
#### File: identity-loadtes/load_testing/ial2_sign_in.locustfile.py
```python
from locust import HttpUser, TaskSet, task, between
from common_flows import flow_ial2_proofing, flow_sign_in, flow_helper
import logging
class IAL2SignInLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
def on_start(self):
logging.info(
"*** Starting Sign-In and IAL2 proof load tests with "
+ flow_helper.get_env("NUM_USERS")
+ " users ***"
)
def on_stop(self):
logging.info("*** Ending IAL2 Sign-In load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_in_and_proof_load_test(self):
# Sign in flow
flow_sign_in.do_sign_in(self)
# Get /account page
flow_helper.do_request(self, "get", "/account", "/account", "")
# IAL2 Proofing flow
flow_ial2_proofing.do_ial2_proofing(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [IAL2SignInLoad]
wait_time = between(5, 9)
```
#### File: identity-loadtes/load_testing/sp_ial2_sign_in.locustfile.py
```python
from locust import HttpUser, TaskSet, task, between
from common_flows import flow_sp_ial2_sign_in, flow_helper
class SP_IAL2_SignInLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
@task(1)
def sp_sign_in_load_test(self):
flow_sp_ial2_sign_in.ial2_sign_in(self)
class WebsiteUser(HttpUser):
tasks = [SP_IAL2_SignInLoad]
wait_time = between(5, 9)
```
#### File: identity-loadtes/tests/test_flow_helpers.py
```python
import pytest
import os
import re
import test_helper
# Import load_testing files
# :/ this kind of import only works when you run `pytest` from the root
# of the project.
import sys
sys.path.append("./load_testing")
from common_flows.flow_helper import (
authenticity_token,
choose_cred,
confirm_link,
desktop_agent_headers,
export_cookies,
get_env,
import_cookies,
load_fixture,
otp_code,
querystring_value,
random_cred,
random_phone,
resp_to_dom,
sp_signin_link,
sp_signout_link,
url_without_querystring,
use_previous_visitor,
)
"""
*** Unit test simple flow helpers
"""
def test_querystring_value():
url = "http://one.two?three=four&five=six"
assert querystring_value(url, "three") == "four"
assert querystring_value(url, "five") == "six"
def test_url_without_querystring():
assert (
url_without_querystring("http://one.two?three=four&five=six")
== "http://one.two"
)
assert url_without_querystring("http://one.two") == "http://one.two"
def test_random_cred():
cred = random_cred(1, {})
assert cred["number"] == 0
assert cred["email"] == "<EMAIL>"
assert cred["password"] == "<PASSWORD>"
def test_choose_cred():
choices = [777, 424242, 90210]
cred = choose_cred(choices)
number = cred["number"]
assert number in choices
assert cred["email"] == "<EMAIL>(number)
assert cred["password"] == "<PASSWORD>"
def test_use_previous_visitor():
# Under threshold should always be false
assert use_previous_visitor(0, 1, 0) is False
# Over threshold with a 100% limit should always be true
assert use_previous_visitor(1, 0, 100) is True
# Nondeterministic test with 75% target +/- 10% and 1000 samples
trues = 0
for i in range(1000):
if use_previous_visitor(1, 0, 75):
trues = trues + 1
assert (
trues >= 650 and trues <= 850
), "use_previous_visitor with target of 75% +/- 10 was out of spec"
def test_random_phone():
for i in range(5):
assert re.match(r"202555\d{4}", random_phone())
def test_desktop_agent_headers():
agent = desktop_agent_headers()
assert "Firefox" in agent["User-Agent"]
def test_get_env():
os.environ["TESTKEY"] = "testvalue"
assert get_env("TESTKEY") == "testvalue"
with pytest.raises(Exception):
get_env("UNSETKEY")
def test_resp_to_dom():
resp = test_helper.mock_response("doc_auth_verify.html")
assert resp_to_dom(resp)
def test_authentication_token():
resp = test_helper.mock_response("doc_auth_verify.html")
assert (
authenticity_token(resp)
== "<KEY>
)
assert (
authenticity_token(resp, 0)
== "<KEY>
)
assert (
authenticity_token(resp, 1)
== "<KEY>
)
assert (
authenticity_token(resp, 2)
== "<KEY>
)
with pytest.raises(Exception):
authenticity_token("a response without a token in it")
def test_otp_code():
resp = test_helper.mock_response("two_factor_sms.html")
assert otp_code(resp) == "543662"
with pytest.raises(Exception):
otp_code("a response without a code in it")
def test_confirm_link():
resp = test_helper.mock_response("verify_email.html")
assert "/sign_up/email/confirm?confirmation_token=" in confirm_link(resp)
with pytest.raises(Exception):
confirm_link("a response without a token in it")
def test_sp_signin_link():
resp = test_helper.mock_response("sp_without_session.html")
assert "openid_connect/authorize?" in sp_signin_link(resp)
with pytest.raises(Exception):
sp_signin_link("a response without a signin link in it")
def test_sp_signout_link():
resp = test_helper.mock_response("sp_with_session.html")
assert "openid_connect/logout?" in sp_signout_link(resp)
with pytest.raises(Exception):
sp_signout_link("A response without a sign-out link")
def test_load_file():
orig = open("README.md", "rb").read()
assert load_fixture("README.md", ".") == orig
with pytest.raises(RuntimeError):
load_fixture("NotReallyThere")
def test_export_import_cookies():
# Late load requests to avoid monkeypatch warning:
# https://github.com/gevent/gevent/issues/1016
from requests import Session
domain = "oh.yea"
r = Session()
# Cookie that should be exported
r.cookies.set("remember_device", "Sure", domain=domain)
r.cookies.set("user_opted_remember_device_preference", "Yep", domain=domain)
# Cookies that should not be exported
r.cookies.set("remember_device", "Wrong_Domain", domain="other.place")
r.cookies.set("wrong_domain_and_name", "me", domain="sumthing")
r.cookies.set("wrong_name", "me", domain=domain)
## Export tests
e = export_cookies(domain, r.cookies)
assert len(e) == 2, "Wrong number of cookies exported"
assert set([i.name for i in e]) == set(
["remember_device", "user_opted_remember_device_preference"]
)
assert e[0].domain == domain
e2 = export_cookies(domain, r.cookies, savelist=["wrong_name"])
assert len(e2) == 1
assert e2[0].name == "wrong_name"
assert export_cookies("foo.bar", r.cookies) == []
r.cookies.clear()
assert len(export_cookies(domain, r.cookies)) == 0
## Import tests
assert (
r.cookies.get("remember_device", domain=domain) is None
), "Cookies did not clear"
import_cookies(r, e)
assert r.cookies.get("remember_device", domain=domain) == "Sure"
assert r.cookies.get("user_opted_remember_device_preference") == "Yep"
assert r.cookies.get("remember_device", domain="other_place") is None
```
#### File: identity-loadtes/tests/test_helper.py
```python
from unittest.mock import MagicMock
def mock_response(fixture_name):
"""
Accepts the name of a file in the fixtures directory
Returns a mocked response object
"""
f = open("tests/fixtures/" + fixture_name, "r")
fixture_content = f.read()
response = MagicMock()
response.content = fixture_content
return response
``` |
{
"source": "18F/identity-loadtest",
"score": 2
} |
#### File: load_testing/common_flows/flow_sp_ial2_sign_in_async.py
```python
from faker import Faker
from .flow_helper import (
authenticity_token,
do_request,
get_env,
idv_phone_form_value,
otp_code,
personal_key,
querystring_value,
random_cred,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import logging
import time
"""
*** SP IAL2 Sign In Flow ***
"""
def ial2_sign_in_async(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# TODO add debugging around this statement to further investigate
# https://github.com/18F/identity-loadtest/issues/25
request_id = querystring_value(resp.url, "request_id")
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
# Choose a random user
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"user[request_id]": request_id,
"authenticity_token": auth_token,
}
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
idp_domain = urlparse(resp.url).netloc
logging.debug('/login/two_factor/sms')
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/verify/doc_auth/welcome",
'',
{
"code": code,
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/welcome')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/agreement')
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{"ial2_consent_given": "1", "authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/doc_auth/upload?type=desktop')
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/doc_auth/document_capture",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
files = {"doc_auth[front_image]": context.license_front,
"doc_auth[back_image]": context.license_back}
logging.debug('/verify/doc_auth/document_capture')
# Post the license images
resp = do_request(
context,
"put",
"/verify/doc_auth/document_capture",
"/verify/doc_auth/ssn",
'',
{"authenticity_token": auth_token, },
files
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
logging.debug('/verify/doc_auth/ssn')
resp = do_request(
context,
"put",
"/verify/doc_auth/ssn",
"/verify/doc_auth/verify",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens in the response text, get the second
auth_token = authenticity_token(resp, 1)
logging.debug('/verify/doc_auth/verify')
# Verify
expected_text = 'This might take up to a minute. We’ll load the next step '\
'automatically when it’s done.'
resp = do_request(
context,
"put",
"/verify/doc_auth/verify",
'/verify/doc_auth/verify_wait',
expected_text,
{"authenticity_token": auth_token, },)
while resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify_wait WHILE LOOP with #{credentials['email']}")
resp = do_request(
context,
"get",
"/verify/doc_auth/verify_wait",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/doc_auth/verify_wait':
logging.debug(
f"STILL IN /verify_wait WHILE LOOP with #{credentials['email']}")
else:
auth_token = authenticity_token(resp)
logging.debug("/verify/phone")
# Enter Phone
resp = do_request(
context,
"put",
"/verify/phone",
'/verify/phone',
'This might take up to a minute',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": idv_phone_form_value(resp), },
)
wait_text = 'This might take up to a minute. We’ll load the next step '\
'automatically when it’s done.'
while wait_text in resp.text:
time.sleep(3)
logging.debug(
f"SLEEPING IN /verify/phone WHILE LOOP with {credentials['email']}")
resp = do_request(
context,
"get",
"/verify/phone",
'',
'',
{},
)
if resp.url == 'https://idp.pt.identitysandbox.gov/verify/phone':
logging.debug(
f"STILL IN /verify/phone WHILE LOOP with {credentials['email']}")
else:
auth_token = authenticity_token(resp)
logging.debug('/verify/review')
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/confirmations",
'',
{"authenticity_token": auth_token,
"user[password]": "<PASSWORD>", },
)
auth_token = authenticity_token(resp)
logging.debug('/verify/confirmations')
# Confirmations
resp = do_request(
context,
"post",
"/verify/confirmations",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"personal_key": personal_key(resp)
},
)
auth_token = authenticity_token(resp)
logging.debug('/sign_up/completed')
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{"authenticity_token": auth_token,
"commit": "Agree and continue"},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
logging.error('this does not appear to be an IAL2 auth')
logout_link = sp_signout_link(resp)
logging.debug('SP /logout')
resp = do_request(
context,
"get",
logout_link,
sp_root_url,
'',
{},
{},
url_without_querystring(logout_link),
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
```
#### File: load_testing/common_flows/flow_sp_ial2_sign_in.py
```python
from faker import Faker
from .flow_helper import (
authenticity_token,
do_request,
get_env,
otp_code,
personal_key,
querystring_value,
random_cred,
random_phone,
sp_signout_link,
url_without_querystring,
)
from urllib.parse import urlparse
import os
import sys
"""
*** SP IAL2 Sign In Flow ***
"""
def ial2_sign_in(context):
"""
Requires following attributes on context:
* license_front - Image data for front of driver's license
* license_back - Image data for back of driver's license
"""
sp_root_url = get_env("SP_HOST")
context.client.cookies.clear()
# GET the SP root, which should contain a login link, give it a friendly
# name for output
resp = do_request(
context,
"get",
sp_root_url,
sp_root_url,
'',
{},
{},
sp_root_url
)
sp_signin_endpoint = sp_root_url + '/auth/request?aal=&ial=2'
# submit signin form
resp = do_request(
context,
"get",
sp_signin_endpoint,
'',
'',
{},
{},
sp_signin_endpoint
)
auth_token = authenticity_token(resp)
# TODO add debugging around this statement to further investigate
# https://github.com/18F/identity-loadtest/issues/25
request_id = querystring_value(resp.url, "request_id")
# This should match the number of users that were created for the DB with
# the rake task
num_users = get_env("NUM_USERS")
# Choose a random user
credentials = random_cred(num_users, None)
# POST username and password
resp = do_request(
context,
"post",
"/",
"/login/two_factor/sms",
'',
{
"user[email]": credentials["email"],
"user[password]": credentials["password"],
"user[request_id]": request_id,
"authenticity_token": auth_token,
}
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
idp_domain = urlparse(resp.url).netloc
if os.getenv("DEBUG"):
print("DEBUG: /login/two_factor/sms")
# Post to unauthenticated redirect
resp = do_request(
context,
"post",
"/login/two_factor/sms",
"/verify/doc_auth/welcome",
'',
{
"code": code,
"authenticity_token": auth_token,
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/welcome")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/welcome",
"/verify/doc_auth/agreement",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/agreement")
# Post consent to Welcome
resp = do_request(
context,
"put",
"/verify/doc_auth/agreement",
"/verify/doc_auth/upload",
'',
{"ial2_consent_given": "1", "authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/upload?type=desktop")
# Choose Desktop flow
resp = do_request(
context,
"put",
"/verify/doc_auth/upload?type=desktop",
"/verify/doc_auth/document_capture",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
files = {"doc_auth[front_image]": context.license_front,
"doc_auth[back_image]": context.license_back}
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/document_capture")
# Post the license images
resp = do_request(
context,
"put",
"/verify/doc_auth/document_capture",
"/verify/doc_auth/ssn",
'',
{"authenticity_token": auth_token, },
files
)
auth_token = authenticity_token(resp)
ssn = '900-12-3456'
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/ssn")
resp = do_request(
context,
"put",
"/verify/doc_auth/ssn",
"/verify/doc_auth/verify",
'',
{"authenticity_token": auth_token, "doc_auth[ssn]": ssn, },
)
# There are three auth tokens on the response, get the second
auth_token = authenticity_token(resp, 1)
if os.getenv("DEBUG"):
print("DEBUG: /verify/doc_auth/verify")
# Verify
resp = do_request(
context,
"put",
"/verify/doc_auth/verify",
"/verify/phone",
'',
{"authenticity_token": auth_token, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone")
# Enter Phone
resp = do_request(
context,
"put",
"/verify/phone",
"/verify/otp_delivery_method",
'',
{"authenticity_token": auth_token,
"idv_phone_form[phone]": random_phone(), },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/otp_delivery_method")
# Select SMS Delivery
resp = do_request(
context,
"put",
"/verify/otp_delivery_method",
"/verify/phone_confirmation",
'',
{"authenticity_token": auth_token, "otp_delivery_preference": "sms", },
)
auth_token = authenticity_token(resp)
code = otp_code(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/phone_confirmation")
# Verify SMS Delivery
resp = do_request(
context,
"put",
"/verify/phone_confirmation",
"/verify/review",
'',
{"authenticity_token": auth_token, "code": code, },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/review")
# Re-enter password
resp = do_request(
context,
"put",
"/verify/review",
"/verify/confirmations",
'',
{"authenticity_token": auth_token,
"user[password]": "<PASSWORD>", },
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /verify/confirmations")
# Confirmations
resp = do_request(
context,
"post",
"/verify/confirmations",
"/sign_up/completed",
'',
{
"authenticity_token": auth_token,
"personal_key": personal_key(resp)
},
)
auth_token = authenticity_token(resp)
if os.getenv("DEBUG"):
print("DEBUG: /sign_up/completed")
# Sign Up Completed
resp = do_request(
context,
"post",
"/sign_up/completed",
None,
'',
{"authenticity_token": auth_token,
"commit": "Agree and continue"},
)
ial2_sig = "ACR: http://idmanagement.gov/ns/assurance/ial/2"
# Does it include the IAL2 text signature?
if resp.text.find(ial2_sig) == -1:
print("ERROR: this does not appear to be an IAL2 auth")
logout_link = sp_signout_link(resp)
if os.getenv("DEBUG"):
print("DEBUG: /sign_up/completed")
resp = do_request(
context,
"get",
logout_link,
sp_root_url,
'',
{},
{},
url_without_querystring(logout_link),
)
# Does it include the logged out text signature?
if resp.text.find('You have been logged out') == -1:
print("ERROR: user has not been logged out")
``` |
{
"source": "18F/markov_bot",
"score": 3
} |
#### File: markov_bot/first_attempt/gen_text.py
```python
import sys
import json
import random
from string import punctuation
def contains(test, chars):
for c in chars:
if c in test:
return True
return False
if __name__ == "__main__":
my_data = "corpus_data.json"
print "Loading text :", my_data
with open(my_data, "r") as file_name:
everything = json.loads( file_name.read() )
# e.g. "BY AGENCY" => "CIO"
key_phrase = random.choice( everything['corpus'].keys() )
utterances = key_phrase.split(" ")
print "\nStarting with:", key_phrase
choices = everything['corpus'][key_phrase]
while choices:
next_word = random.choice( choices )
if next_word:
utterances.append(next_word)
if not next_word:
choices = None
elif len(utterances) > 500: # just want some termination
choices = None
elif contains(utterances[-1], "!.?_"): # typical punctuation may be stripped out in earlier proces
utterances[-2] += utterances[-1]
utterances.pop()
choices = None
else:
key_phrase = " ".join( utterances[-2:] )
choices = everything['corpus'][key_phrase]
print "\t>", " ".join( utterances )
```
#### File: 18F/markov_bot/popular_phrases.py
```python
import sys
import json
import random
from os import listdir
from os.path import isfile, join, exists
from collections import Counter
from nltk import word_tokenize
from nltk.tokenize import sent_tokenize
# Other alternatives are nltk.tokenize.punkt
# sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
# sent_detector.tokenize(text.strip(), realign_boundaries=False)))
import textract
class TextBucket(object):
""" General Class for working with data structure prototyped from the MarkovBot """
MAX_CHAIN_LEN = 1
my_data_file = "sent_parse.json"
my_data_dir = "sent_files/"
stop_punctuation = "._!?"
phrases = None # A pointer of convienance for info stores in everything['phrases']
everything = None
def _check_defaults(self):
""" Helper Function to ensure internal dictionaries are setup correctly """
try:
self.everything['input']
except:
self.everything = {}
self.everything['input'] = {}
try:
self.phrases = self.everything['phrases']
except:
self.everything['phrases'] = {}
self.phrases = self.everything['phrases']
def __init__(self, *args, **kwargs):
""" Setup initial class, and ideally load data from pre-built file """
super(TextBucket, self).__init__(*args, **kwargs)
try:
self.load_data()
except:
self._check_defaults()
def load_data(self):
""" Loads data (hopefully) from baseline file """
print "Loading Data:", self.my_data_file
with open(self.my_data_file, "r") as file_name:
self.everything = json.loads( file_name.read() )
## still have to check here vs. __init__() in case file is corrupt
self._check_defaults()
def save_data(self):
""" Saves data (hopefully) to baseline file """
print "Saving Data:", self.my_data_file
with open(self.my_data_file, "w") as file_name:
json.dump( { 'input': self.everything['input'], 'phrases': self.phrases } , file_name)
def add_text(self, my_dir = None):
""" Adds all text / files from raw_file directory """
if not my_dir:
my_dir = self.my_data_dir
## may not have to check here but seems pragmatic
self._check_defaults()
print "Extracting text from:", my_dir
file_list = [f for f in listdir(my_dir) if isfile(join(my_dir, f))]
for f in file_list: # Will overwrite text for any existing files
print "\tProcessing file:", f
txt = textract.process( join(my_dir, f), encoding="utf-8" )
txt = txt.replace("\xa0", " ")
txt = txt.decode('ascii', errors="ignore")
txt = txt.encode("ascii") #, errors="ignore")
self.everything['input'][f] = txt
def _make_chains(self, words, chain_len):
""" Helper function to return chain pairs """
if len(words) <= chain_len: # <= because we always want 1 stop "word" i.e. None at minimum
words.extend( [None] * (1 + chain_len - len(words)) )
for i in range( len(words) - chain_len ):
yield words[i:i + chain_len + 1]
def make_phrases(self, start = 1, end = None):
if not end: end = start + 1
for chain_len in range(start, end): # +1 because of the way range works
self.phrases[chain_len] = []
for f in self.everything['input']:
for line in sent_tokenize( self.everything['input'][f] ):
words = word_tokenize(line)
for chain in self._make_chains(words, chain_len):
try:
# print "ERROR.0:", chain
chain = chain[:-1] # drop last item in chain as it's "value" for markov
chain = [c for c in chain if c is not None] # quick clean as None is breaking join
except:
print "ERROR.1:", chain
# sys.exit(-1)
# print chain_len, " => ", chain
try:
self.phrases[chain_len].append(" ".join(chain) )
except:
print "ERROR.2:", chain
sys.exit(-1)
return Counter( self.phrases[chain_len] )
def count_phrases(self, num):
end = num + 1
#start = num
start = 1
self.everything['phrases'] = {}
self.phrases = self.everything['phrases']
for i in range(start, end):
# try:
self.phrases[i] = self.make_phrases(i)
self.MAX_CHAIN_LEN = i
# except:
# print "FAILED"
# pass
if __name__ == "__main__":
bot = TextBucket()
if not exists( bot.my_data_file ):
bot.add_text()
bot.save_data()
for k in bot.everything['input'].keys():
print "\t", k, "=> #", len(bot.everything['input'][k])
print
try:
num = int(sys.argv[1])
except:
num = 10
bot.count_phrases(num)
# print "Max Chain:", bot.MAX_CHAIN_LEN
for i in bot.phrases:
# for i in range( bot.MAX_CHAIN_LEN, 1, -1):
common_num = 1
while common_num:
common = bot.phrases[i].most_common(common_num)
if len(common[-1][0]) >= i:
print "\t", i, "=>", common[-1]
common_num = None
else:
common_num += 1
if common_num > i*2: # Random Stop Gap to prevent runaway
print "No Common Found!"
common_num = None
``` |
{
"source": "18F/medicare-appeals-prototyping",
"score": 3
} |
#### File: medicare-appeals-prototyping/etl/tasks.py
```python
import os
import psycopg2
import timeit
from invoke import task
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL)
cur = conn.cursor()
start_time = timeit.default_timer()
def elapsed():
return round(timeit.default_timer() - start_time, 2)
@task
def commit(c):
conn.commit()
c.run('echo "Committing database changes"')
@task
def close(c):
cur.close()
conn.close()
c.run(f'echo "Total Time elapsed: {elapsed()}seconds"')
c.run('echo "Closing database connection"')
@task
def dbsetup(c):
sql = open('./sql/db_setup.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Database setup"')
@task()
def createclaims(c):
sql = open('./sql/tmp_claims.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Created tmp_claims - {elapsed()}seconds"')
@task()
def createmappings(c):
sql = open('./sql/field_mapping.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Created field mappings - {elapsed()}seconds"')
@task()
def createappeals(c):
sql = open('./sql/tmp_appeals.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Created tmp_appeals {elapsed()}seconds"')
@task()
def createprocedures(c):
sql = open('./sql/tmp_procedure_codes.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Created tmp_procedure_codes.sql {elapsed()}seconds"')
@task()
def createclaimstoappeals(c):
sql = open('./sql/tmp_claims_to_appeals.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Created tmp_claims_to_appeals {elapsed()}seconds"')
@task()
def sanitize(c):
sql = open('./sql/sanitize.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Sanitized data - {elapsed()}seconds"')
@task()
def migratedata(c):
sql = open('./sql/migrate.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Migrated sample data into model - {elapsed()}seconds"')
@task
def cleanup(c):
sql = open('./sql/cleanup.sql', 'r')
cur.execute(sql.read())
sql.close()
c.run(f'echo "Database temp files cleaned - {elapsed()}seconds"')
@task(
pre=[
dbsetup,
createclaims,
createappeals,
createprocedures,
createmappings,
sanitize,
createclaimstoappeals,
migratedata,
cleanup
],
post=[
commit,
close
]
)
def createdata(c):
c.run(f'echo "Data created!"')
``` |
{
"source": "18F/NRC-MAP",
"score": 3
} |
#### File: tests/generator/vogtle_data_generator.py
```python
import logging
import sys
from argparse import ArgumentParser, ArgumentError
from datetime import date, timedelta
from faker import Faker
from common.faker_providers import ITAAC
__version__ = "0.1.0"
logging.basicConfig(
level=logging.INFO,
format='%(filename)s: %(asctime)s: %(levelname)s: %(message)s'
)
ARG_PARSER = ArgumentParser()
ARG_PARSER.add_argument('-d',
dest='directory',
type=str,
help='Set directory for synthetic data files.'
' e.g. `./data/`')
ARG_PARSER.add_argument('--version',
action='version',
version="%(prog)s v" + __version__)
class VogtleDataGenerator(object):
"""
This class holds all generator functions
for Vogtle Dashboard Synthetic data
"""
fake = None
directory = None
def __init__(self, directory=None):
if not directory:
logging.error("No directory provided, exiting..")
sys.exit(1)
self.fake = Faker()
self.fake.add_provider(ITAAC)
if directory:
self.directory = directory
def generate_default(self):
"""
Generate a predetermined set of synthetic data
"""
self.generate_inspections(800)
self.generate_news_feed(100)
self.generate_public_meetings(100)
self.generate_calendar(2019, 2021)
def generate_inspections(self, rows):
"""
Generate synthetic data for Inspections
"""
header = "id|itaac_status|icn_status|effort_required|facility|" \
"targeted_flag|target_amt\n"
with open('{}inspections.csv'
.format(self.directory), 'w') as output_file:
output_file.write(header)
for itaac_id in range(rows):
itaac_status = self.fake.format('itaac_status')
icn_status = self.fake.format('icn_status')
effort_required = self.fake.format('effort_required')
facility = self.fake.format('facility')
targeted_flag = self.fake.format('true_false_flag')
target_amt = self.fake.format('target_amt')
output_file.write("%s|%s|%s|%s|%s|%s|%s\n" %
(itaac_id,
itaac_status,
icn_status,
effort_required,
facility,
targeted_flag,
target_amt))
def generate_news_feed(self, rows):
"""
Generate synthetic data for News Feed
"""
header = "id|title|text|datetime|source_url\n"
with open('{}news_feed.csv'.format(self.directory), 'w') \
as output_file:
output_file.write(header)
for feed_id in range(rows):
title = self.fake.format('sentence',
nb_words=5,
variable_nb_words=False,
ext_word_list=None)
text = self.fake.format('sentence',
nb_words=12,
variable_nb_words=False,
ext_word_list=None)
datetime = self.fake.format('date_time_this_year',
before_now=True,
after_now=True,
tzinfo=None)
source_url = "http://www.{}.com/{}".format(
self.fake.format('word'), self.fake.format('word'))
output_file.write("%s|%s|%s|%s|%s\n" %
(feed_id,
title,
text,
datetime,
source_url))
def generate_public_meetings(self, rows):
"""
Generate synthetic data for Public Meetings
"""
header = "id|purpose|date|time|location|contact\n"
with open('{}public_meetings.csv'.format(self.directory), 'w') \
as output_file:
output_file.write(header)
for meeting_id in range(rows):
purpose = self.fake.format('sentence',
nb_words=10,
variable_nb_words=True,
ext_word_list=None)
meeting_date = str(self.fake.format('date_time_this_year',
before_now=True,
after_now=True,
tzinfo=None))[:10]
time = self.fake.format('time', pattern='%H:%M')
location = self.fake.format('address').replace("\n", " ")
contact = "{} : {}".format(
self.fake.format('name'),
self.fake.format('phone_number'))
output_file.write("%s|%s|%s|%s|%s|%s\n" %
(meeting_id,
purpose,
meeting_date,
time,
location,
contact))
def generate_calendar(self, start_year, end_year):
"""
Generate Calendar
"""
header = "id|date\n"
sdate = date(start_year, 1, 1) # start date
edate = date(end_year, 12, 31) # end date
delta = edate - sdate # as timedelta
with open('{}calendar.csv'.format(self.directory), 'w') \
as output_file:
output_file.write(header)
for i in range(delta.days + 1):
day = sdate + timedelta(days=i)
output_file.write("%s|%s\n" % (i, day))
if __name__ == '__main__':
logging.info("Generating synthetic data...")
if not sys.argv[1:]:
ARG_PARSER.print_help()
ARG_PARSER.exit()
OPTIONS = {}
try:
OPTIONS = ARG_PARSER.parse_args()
except ArgumentError as err:
ARG_PARSER.print_help()
ARG_PARSER.exit()
VOGTLE_GENERATOR = VogtleDataGenerator(directory=OPTIONS.directory)
VOGTLE_GENERATOR.generate_default()
logging.info("Synthetic data generation complete")
``` |
{
"source": "18F/NRM-Grants-Agreements",
"score": 4
} |
#### File: NRM-Grants-Agreements/import/fix_csv_newlines.py
```python
from argparse import ArgumentParser
def fix_newlines(filename):
"""Fix the newlines in the given file."""
with open(filename, encoding="ISO-8859-1") as csvfile:
# go through the csvfile finding lines with odd numbers of quotation
# marks. When we hit one, collect up the field contents until we find another
# one
in_multiline = False
multilines = []
for line in csvfile:
balanced = (line.count('"') % 2 == 0)
if in_multiline:
multilines.append(line.rstrip()) # take off the newline here
if balanced:
# this is not the last line, just keep going
pass
else:
# this is the last line of the section, emit everything
print(" ".join(l.rstrip() for l in multilines))
multilines = []; in_multiline = False
else: # not in a multi-line section
if line.count('"') % 2 == 0:
# line has balanced quotes, just emit it
print(line, end='') # line already has a newline at the end
else:
# line has unbalanced quotes so it starts a
# multi-line section
multilines.append(line.rstrip())
in_multiline=True
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("input_filename")
args = parser.parse_args()
fix_newlines(args.input_filename)
```
#### File: nrm_django/grants/forms.py
```python
import datetime
import uuid
from django import forms
from django.core.exceptions import ValidationError
from .models import Category, Grant
from contacts.models import AccomplishmentInstrument, Contact
instance_id = "10602"
class MinGrantForm(forms.ModelForm):
"""
Defines the minimum viable Grant/proposal creation form.
"""
# Require all fields in minimal grant creation form.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key in self.Meta.fields:
self.fields[key].required = True
self.fields[
"proposed_start_date"
].help_text = """
The date the project is expected to start as negotiated in the agreement.
"""
class Meta:
model = Grant
fields = [
"proj_title",
"proj_desc",
"applicant_name",
"app_submit_date",
"app_received_date",
"proposed_start_date",
"proposed_end_date",
"progrm_responsibility_type",
"master_fed_id",
"state_eo_date",
"state_eo_code",
"application_type",
"app_submission_type",
"proj_rwu",
"journal_ind",
"proj_science_cd",
"research_type",
"applicant_est_fund",
]
def save(self, commit=True):
"""
On initial save we generate several values:
* A new CN/PK
* `created_in_instance`
* `modified_in_instance`
Note that the instance ID is currently hard-coded.
It may have no value beyond mimicking the legacy system,
but if it is needed we will still need to learn how to capture it.
"""
instance = super().save(commit=False)
# first, we're gonna have to create our cn/pk
# * First character: Year
# * Second character: Region
# * Trailing characters: The instance created in
# * Middle characters: some unique ID
# TO-DO: figure out a better way to know the correct region.
# Maybe from contacts table?
# Assuming we end up keeping instance_ids as a useful thing we'll have to learn how to
# capture them correctly rather than just using a hard-coded value as we are here.
year_char = datetime.date.today().strftime("%y")[1:]
short_uuid = str(uuid.uuid4())[:8]
instance.cn = "%s%s%s0%s" % (year_char, "6", short_uuid, instance_id)
# stubbing out foo_in_instance rather than making defaults
# since we don't know yet how to populate them correctly or what the values mean.
# eventually we'll have some sort of check or logic here.
instance.created_in_instance = instance.modified_in_instance = instance_id
if commit:
instance.save()
return instance
def clean_state_eo_code(self):
"""
Ensures that if State EO Code is Yes, then a date must be entered.
"""
eo_code = self.cleaned_data["state_eo_code"]
if eo_code and eo_code.lower() == "y":
if not self.cleaned_data["state_eo_date"]:
raise ValidationError(
"If this agreement is subject to state EO, you must enter an EO date."
)
return eo_code
class GrantUpdateForm(forms.ModelForm):
"""
On update, we'll make all fields editable.
"""
# First we need to sort out project category.
# We're using modelChoiceField, but it may make more sense to use a ChoiceField.
# The actual queryset gets set down below in `init`
# TO-DO: Ideally we would migrate to Categories being just a list of cats,
# with Grant having an FK to that table, so we could avoid these sorts of shenanigans.
cat_set = Category.objects.order_by("category_desc").distinct("category_desc")
project_category = forms.ModelChoiceField(
label="Project category",
queryset=cat_set,
required=False,
help_text="If 'International Activities' is 'Yes', you must choose a category.",
)
org_select = forms.ModelChoiceField(
label="Organization",
queryset=Contact.objects.filter(
cn__in=AccomplishmentInstrument.objects.values("managing_contact")
).distinct(),
)
class Meta:
model = Grant
fields = "__all__"
def __init__(self, *args, **kwargs):
super(GrantUpdateForm, self).__init__(*args, **kwargs)
# If we're modifying an existing instance we'll need to manually
# set the initial value for org, because we don't have an FK on Grant
if self.instance:
self.fields["org_select"].initial = self.instance.org
# We'll also need to set the initial project_category.
# Because we used distinct() to toss out duplicate Cats, we have to find one
# that matches whatever one was set on the current Grant so it looks right.
if self.instance.cn:
try:
this_cat = Category.objects.get(grant=self.instance)
# if we dropped this cat becuase of the distinct() on cat_set, we need to add it back in:
if this_cat not in self.cat_set:
# there is a category in cat_set with this_cat's same
# category_desc. Swap that one out for this one.
cat_ids = [cat.cn for cat in self.cat_set]
replace_me = (
Category.objects.filter(
cn__in=cat_ids, category_desc=this_cat.category_desc
)
.first()
.cn
)
cat_ids[cat_ids.index(replace_me)] = this_cat.cn
# reconstruct the queryset
self.cat_set = Category.objects.filter(cn__in=cat_ids)
self.fields["project_category"].initial = this_cat.cn
except Category.DoesNotExist:
pass
def clean_project_category(self):
"""
Ensures at least one category is set if international_agreement_ind is true
"""
cleaned_data = super().clean()
intl_act = cleaned_data.get("international_act_ind")
if intl_act and intl_act.lower().startswith("y"):
if not cleaned_data.get("project_category"):
raise ValidationError(
"International agreements must have a program category."
)
return cleaned_data
def save(self, commit=True):
instance = super().save(commit=False)
# if some undetermined sequence of events happens, we will need to update `status`
# if that happens, we would need to update status_date, too.
if "wppp_status" in self.changed_data:
instance.wppp_status_date = datetime.datetime.now()
# stubbing out foo_in_instance rather than making defaults
# since we don't know yet how to populate them correctly or what the values mean.
# eventually we'll have some sort of check or logic here.
instance.created_in_instance = instance.modified_in_instance = instance_id
if commit:
instance.save()
# org is actually a property of the accomplishment instrument.
# When it changes, we need to change the change the value over there.
# To-Do: in the future, this should probably be migrated into grant itself
# since it's a 1:1 anyway.
if instance.cn and "org_select" in self.changed_data:
# first, is there an existing ai? Note that the 1:1 relationship is untrustworthy.
# Because we don't trust it and we're using first(), we won't use get_or_create() either.
ai = AccomplishmentInstrument.objects.filter(grant=instance).first()
if not ai:
ai = AccomplishmentInstrument(grant_id=instance.cn)
ai.managing_contact = Contact.objects.get(
cn=self.data.get("org_select")
)
# ai.save() TO-DO: Uncomment this when we've fixed AIs in #222
# We need to create a new category if one does not already exist for this grant.
# It might make sense do make this a signal or some other post_save() mechanism,
# since it's dependent on the grant being in the DB.
# TO-DO: when we migrate to a proper FK, remove all of this
if "project_category" in self.changed_data:
# temp_cat is the category the user selected from the
# limited set of cats defined in cat_set above.
# We want the raw value, so we're not using cleaned_data
temp_cat = Category.objects.get(cn=self.data.get("project_category"))
Category.objects.get_or_create(
cn=str(uuid.uuid4().hex),
grant=instance,
category_cd=temp_cat.category_cd,
category_desc=temp_cat.category_desc,
)
return instance
```
#### File: tests/models/test_note.py
```python
from django.test import TestCase
from grants.models import Note
class NoteTestCase(TestCase):
def test_creation(self):
subject = Note(
cn=1,
)
self.assertTrue(subject)
``` |
{
"source": "18F/omb-eregs",
"score": 2
} |
#### File: document/json_importer/importer.py
```python
from typing import Optional
from django.db import transaction
from document.models import DocNode
from document.tree import JSONAwareCursor, PrimitiveDict
from reqs.models import Policy
from .annotations import derive_annotations, get_content_text
def convert_node(node: PrimitiveDict, policy: Optional[Policy]=None,
parent: Optional[JSONAwareCursor]=None) -> JSONAwareCursor:
kwargs = node.copy()
children = kwargs.pop('children')
content = kwargs.pop('content')
kwargs['text'] = get_content_text(content)
if parent is None:
cursor = JSONAwareCursor.new_tree(policy=policy, **kwargs)
else:
cursor = parent.add_child(**kwargs)
for child in children:
convert_node(child, parent=cursor)
cursor.json_content = content
return cursor
@transaction.atomic
def import_json_doc(policy: Policy, doc: PrimitiveDict) -> JSONAwareCursor:
"""Imports a document from a JSON blob. It is assumed that the
blob has been validated and normalized by a Django REST API
serializer."""
DocNode.objects.filter(policy=policy).delete()
root = convert_node(doc, policy=policy)
root.nested_set_renumber()
annotations_by_cls = derive_annotations(root)
for cls, annotations in annotations_by_cls.items():
cls.objects.bulk_create(annotations)
return root
```
#### File: api/document/parsers.py
```python
from typing import List
from lxml import etree
from rest_framework.exceptions import ParseError
from rest_framework.parsers import BaseParser
from document.tree import PrimitiveDict
def text(text_content: str) -> PrimitiveDict:
return {
'content_type': '__text__',
'text': text_content
}
def convert_content(xml_el: etree.Element,
sourcelines=True) -> List[PrimitiveDict]:
data = []
if xml_el.text:
data.append(text(xml_el.text))
for child in xml_el:
child_data = {
**child.attrib,
'content_type': child.tag,
'inlines': convert_content(child, sourcelines),
}
if sourcelines:
child_data['_sourceline'] = child.sourceline
data.append(child_data)
if child.tail:
data.append(text(child.tail))
return data
def ensure_at_most_one_child_has_tag(xml_el: etree.Element, tag: str):
if len([child for child in xml_el if child.tag == tag]) > 1:
raise ParseError(f'<{xml_el.tag}> contains multiple '
f'<{tag}> elements')
def convert_node(xml_el: etree.Element,
sourcelines=True) -> PrimitiveDict:
data = {
'node_type': xml_el.tag,
'children': [],
'content': [],
}
if sourcelines:
data['_sourceline'] = xml_el.sourceline
ensure_at_most_one_child_has_tag(xml_el, 'content')
ensure_at_most_one_child_has_tag(xml_el, 'num')
if 'emblem' in xml_el.attrib:
data['type_emblem'] = xml_el.attrib['emblem']
for child in xml_el:
if child.tag == 'content':
data['content'] = convert_content(child, sourcelines)
elif child.tag == 'num':
data['marker'] = child.text
else:
data['children'].append(convert_node(child, sourcelines))
return {**xml_el.attrib, **data}
class AkomaNtosoParser(BaseParser):
media_type = 'application/akn+xml'
def parse(self, stream, media_type=None, parser_context=None):
try:
root = etree.fromstring(stream.read().decode('utf-8'))
except etree.XMLSyntaxError as e:
raise ParseError(f'XML syntax error - {e}')
return convert_node(root)
```
#### File: document/tests/doctest_test.py
```python
import doctest
from importlib import import_module
import pytest
@pytest.mark.parametrize('module_name', [
'document.tree',
])
def test_doctests(module_name):
_, test_count = doctest.testmod(
import_module(module_name),
report=True,
verbose=True,
raise_on_error=True,
optionflags=doctest.NORMALIZE_WHITESPACE,
)
assert test_count > 0
```
#### File: document/tests/factories.py
```python
from typing import List
from document.tree import PrimitiveDict
def text(value: str) -> PrimitiveDict:
return {
"content_type": "__text__",
"text": value
}
def external_link(href: str, inlines: List[PrimitiveDict]) -> PrimitiveDict:
return {
"content_type": "external_link",
"href": href,
"inlines": inlines,
}
def para(content: List[PrimitiveDict],
children: List[PrimitiveDict]=None) -> PrimitiveDict:
return {
"node_type": "para",
"content": content,
"children": children or [],
}
def footnote_citation(inlines: List[PrimitiveDict]) -> PrimitiveDict:
return {
"content_type": 'footnote_citation',
"inlines": inlines,
}
def footnote(marker: int, content: List[PrimitiveDict],
children: List[PrimitiveDict]=None) -> PrimitiveDict:
return {
"node_type": "footnote",
"marker": str(marker),
"type_emblem": str(marker),
"content": content,
"children": children or [],
}
def cite(inlines: List[PrimitiveDict]) -> PrimitiveDict:
return {
"content_type": "cite",
"inlines": inlines,
}
```
#### File: tests/json_importer/annotations_test.py
```python
from unittest.mock import Mock
import pytest
from document.json_importer.annotations import derive_annotations
from document.json_importer.importer import convert_node
from document.models import Cite, ExternalLink, FootnoteCitation
from .. import factories as f
def test_derive_annotations_works_on_children():
para = convert_node(f.para(content=[f.text('hi')], children=[
f.para(content=[f.external_link('http://one.org', [
f.text('blah')
])])
]))
annos = derive_annotations(para)
assert len(annos) == 1
links = annos[ExternalLink]
assert len(links) == 1
def test_derive_annotations_works_with_nested_content():
para = convert_node(f.para(content=[
f.external_link('http://one.org', [
f.text('foo'),
f.external_link('http://two.org', [f.text('bar')]),
f.text('baz'),
f.external_link('http://three.org', [f.text('quux')]),
])
]))
annos = derive_annotations(para)
assert len(annos) == 1
links = annos[ExternalLink]
assert len(links) == 3
one = links[0]
assert para.text[one.start:one.end] == 'foobarbazquux'
two = links[1]
assert para.text[two.start:two.end] == 'bar'
three = links[2]
assert para.text[three.start:three.end] == 'quux'
def test_derive_annotations_works_with_external_link():
annos = derive_annotations(convert_node(f.para(content=[
f.text('Hello '),
f.external_link('http://example.org/', [f.text('there')])
])))
assert len(annos) == 1
assert len(annos[ExternalLink]) == 1
link = annos[ExternalLink][0]
assert link.href == 'http://example.org/'
assert link.start == len('Hello ')
assert link.end == link.start + len('there')
def test_derive_annotations_works_with_footnote_citation():
para = convert_node(f.para(
content=[f.footnote_citation([f.text('3')])],
children=[f.footnote(3, content=[f.text('Hi I am a footnote')])]
))
annos = derive_annotations(para)
assert len(annos) == 1
assert len(annos[FootnoteCitation]) == 1
cit = annos[FootnoteCitation][0]
assert cit.start == 0
assert cit.end == 1
assert cit.footnote_node.text == 'Hi I am a footnote'
def test_derive_annotations_raises_err_on_invalid_footnote_citation():
para = convert_node(f.para(
content=[f.footnote_citation([f.text('3')])],
children=[f.footnote(4, content=[f.text('Hi I am a footnote')])]
))
with pytest.raises(ValueError,
match="unable to find footnote for citation 3"):
derive_annotations(para)
def test_derive_annotations_raises_err_on_invalid_annotation():
para = Mock(json_content=[{'content_type': 'blarg'}])
with pytest.raises(ValueError,
match="no annotator found for blarg"):
derive_annotations(para)
def test_derive_annotations_works_with_cite():
annos = derive_annotations(convert_node(f.para(content=[
f.text('Hello '),
f.cite([f.text('Federal STEM Education 5-Year Strategic Plan')])
])))
assert len(annos) == 1
assert len(annos[Cite]) == 1
cite = annos[Cite][0]
assert cite.start == len('Hello ')
assert cite.end == cite.start + len(
'Federal STEM Education 5-Year Strategic Plan')
```
#### File: api/document/views.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.parsers import JSONParser
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from document.models import DocNode
from document.parsers import AkomaNtosoParser
from document.renderers import AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer
from document.serializers.doc_cursor import DocCursorSerializer
from document.tree import DocCursor
from reqs.views.policies import policy_or_404
class TreeView(GenericAPIView):
serializer_class = DocCursorSerializer
renderer_classes = (JSONRenderer, BrowsableAPIRenderer,
AkomaNtosoRenderer, BrowsableAkomaNtosoRenderer)
parser_classes = (JSONParser, AkomaNtosoParser)
queryset = DocNode.objects.none() # Used to determine permissions
def get_object(self, prefetch_related=True):
only_published = not self.request.user.is_authenticated
policy = policy_or_404(self.kwargs['policy_id'], only_published)
# we'll pass this policy down when we serialize
self.policy = policy
query_args = {'policy_id': policy.pk}
if self.kwargs.get('identifier'):
query_args['identifier'] = self.kwargs['identifier']
else:
query_args['depth'] = 0
queryset = DocNode.objects
if prefetch_related:
queryset = queryset.prefetch_annotations()
root_doc = get_object_or_404(queryset, **query_args)
root = DocCursor.load_from_model(root_doc, subtree=False)
if prefetch_related:
root.add_models(root_doc.descendants().prefetch_annotations())
self.check_object_permissions(self.request, root)
return root
def get_serializer_context(self):
return {
'policy': getattr(self, 'policy', None),
}
def get(self, request, *args, **kwargs):
instance = self.get_object(prefetch_related=True)
serializer = self.get_serializer(instance)
return Response(serializer.data)
def put(self, request, *args, **kwargs):
if self.kwargs.get('identifier'):
return Response({
'detail': 'Identifiers are unsupported on PUT requests.',
}, status=status.HTTP_400_BAD_REQUEST)
# We don't care about prefetching related data because we're
# about to delete all of it anyways.
instance = self.get_object(prefetch_related=False)
serializer = self.get_serializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def render_editor(request, policy_id, filename, title):
# Verify that the policy is valid; 404 when not. We don't actually load
# the document content as they'll be retrieved from the API
policy_or_404(policy_id, only_published=False)
return render(request, filename, {
'document_url': reverse('document', kwargs={'policy_id': policy_id}),
'title': title,
})
@login_required
def editor(request, policy_id):
return render_editor(request, policy_id, 'document/editor.html',
'Document Editor')
@login_required
def editor_akn(request, policy_id):
return render_editor(request, policy_id, 'document/editor_akn.html',
'Akoma Ntoso Editor')
```
#### File: api/ereqs_admin/forms.py
```python
from django.contrib.auth.forms import UsernameField
from django.contrib.auth.models import User
from django.forms import ModelForm
class UserCreationForm(ModelForm):
"""A replacement for user creation form which does not contain a
password. Largely lifted from Django"""
class Meta:
model = User
fields = ("username",)
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
{'autofocus': True})
def save(self, commit=True):
user = super().save(commit=False)
user.is_staff = True
user.set_unusable_password()
if commit:
user.save()
return user
class UserChangeForm(ModelForm):
"""A replacement for user change which does not contain a password.
Largely lifted from Django"""
class Meta:
model = User
exclude = ['is_staff', 'password']
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
f = self.fields.get('user_permissions')
if f is not None:
f.queryset = f.queryset.select_related('content_type')
```
#### File: ereqs_admin/templatetags/nav_menu.py
```python
from functools import lru_cache
from typing import NamedTuple, Optional, Tuple
from django import template
from django.template import RequestContext
from django.urls import reverse
register = template.Library()
# Recursive types aren't fully supported by mypy
# https://github.com/python/mypy/issues/731
class MenuItem(NamedTuple): # type: ignore
title: str
active: bool
url: Optional[str]
children: Tuple['MenuItem', ...]
@classmethod
def new(cls, title, active=False, url=None, children=None):
return cls(title, active, url, children or ())
def update_active(self, active_path):
"""Figure out which part of the menu is active. Parents will be active
if their children are."""
children = tuple(child.update_active(active_path)
for child in self.children)
child_active = any(child.active for child in children)
active = self.url == active_path or child_active
return self._replace(active=active, children=children)
def append_child(self, child):
return self._replace(children=self.children + (child,))
@lru_cache()
def get_menu_archetype():
# Note that we need to create this lazily because we're not
# guaranteed to have 'admin' in our URLconf namespace at the
# time this is called. See e.g.:
#
# https://github.com/18F/omb-eregs/issues/863
return MenuItem.new('', children=(
MenuItem.new('Policies', children=(
MenuItem.new('View all policies',
url=reverse('admin:reqs_policy_changelist')),
MenuItem.new('Add new', url=reverse('admin:reqs_policy_add')),
)),
MenuItem.new('Users', url=reverse('admin:auth_user_changelist')),
# Settings depends on the user id, so is added later
))
@register.inclusion_tag('ereqs_admin/nav_menu.html', takes_context=True)
def nav_menu(context: RequestContext):
active_path = context['request'].path
user = context['user']
if user.is_authenticated:
menu_root = get_menu_archetype().append_child(MenuItem.new(
'Settings', url=reverse('admin:auth_user_change', args=(user.pk,))
))
menu_root = menu_root.update_active(active_path)
else:
menu_root = MenuItem.new('')
return {'menu': menu_root.children}
```
#### File: ereqs_admin/tests/make_revisions_tests.py
```python
import pytest
from django.core.management import call_command
from model_mommy import mommy
from reversion.models import Version
from reqs.models import AgencyGroup, Policy
@pytest.mark.django_db
def test_empty_database():
call_command('make_revisions')
assert Version.objects.count() == 0
@pytest.mark.django_db
def test_specify_model():
mommy.make(AgencyGroup, _quantity=3)
mommy.make(Policy, _quantity=4)
call_command('make_revisions', 'reqs.AgencyGroup')
assert Version.objects.count() == 3
assert Version.objects.get_for_model(AgencyGroup).count() == 3
assert Version.objects.get_for_model(Policy).count() == 0
@pytest.mark.django_db
def test_call_twice():
mommy.make(AgencyGroup, _quantity=3)
mommy.make(Policy, _quantity=4)
call_command('make_revisions')
assert Version.objects.count() == 3 + 4
call_command('make_revisions')
assert Version.objects.count() == 2 * (3 + 4)
```
#### File: ereqs_admin/tests/max_login_tests.py
```python
from unittest.mock import Mock, call
import pytest
from django.contrib.auth.models import User
from model_mommy import mommy
from ereqs_admin import max_backend
def mock_verify_ticket(monkeypatch, exists_in_max=True, first='First',
last='Last', email='<EMAIL>'):
if exists_in_max:
attributes = {
'First-Name': first,
'Last-Name': last,
'Email-Address': email,
}
max_username = 'max-username'
else:
attributes = {}
max_username = None
monkeypatch.setattr(max_backend, 'get_cas_client', Mock())
verify_ticket = max_backend.get_cas_client.return_value.verify_ticket
verify_ticket.return_value = (max_username, attributes, Mock())
return verify_ticket
@pytest.mark.django_db
def test_authenticate_success(monkeypatch):
"""Verify that a user is configured correctly"""
user = mommy.make(User, is_staff=False, username='<EMAIL>')
verify_ticket = mock_verify_ticket(
monkeypatch, first='Freddie', last='Mercury', email='<EMAIL>')
backend = max_backend.MAXBackend()
backend.authenticate(Mock(), 'some-ticket', 'service-url')
assert max_backend.get_cas_client.call_args == call(
service_url='service-url')
assert verify_ticket.call_args == call('some-ticket')
user = User.objects.get(pk=user.pk)
assert user.first_name == 'Freddie'
assert user.last_name == 'Mercury'
assert user.email == '<EMAIL>'
assert user.is_staff
@pytest.mark.django_db
def test_authenticate_failure_unknown_user(monkeypatch):
"""If MAX knows about the user but Django does not, we should fail"""
assert User.objects.count() == 0
mock_verify_ticket(monkeypatch)
backend = max_backend.MAXBackend()
result = backend.authenticate(Mock(), 'some-ticket', 'service-url')
assert result is None
assert User.objects.count() == 0
@pytest.mark.django_db
def test_authenticate_ticket_failure(monkeypatch):
"""If MAX isn't aware of the user, we shouldn't explode (nor create a new
user)"""
assert User.objects.count() == 0
mock_verify_ticket(monkeypatch, exists_in_max=False)
backend = max_backend.MAXBackend()
result = backend.authenticate(Mock(), 'some-ticket', 'service-url')
assert result is None
assert User.objects.count() == 0
@pytest.mark.django_db
def test_authenticate_logging_bad_ticket(monkeypatch):
monkeypatch.setattr(max_backend, 'logger', Mock())
mock_verify_ticket(monkeypatch, exists_in_max=False)
backend = max_backend.MAXBackend()
assert backend.authenticate(Mock(), 'some-ticket', 'service-url') is None
assert max_backend.logger.warning.called
warning_message = max_backend.logger.warning.call_args[0][0]
assert 'lookup was unsuccessful' in warning_message
@pytest.mark.django_db
def test_authenticate_logging_bad_email(monkeypatch):
monkeypatch.setattr(max_backend, 'logger', Mock())
mock_verify_ticket(monkeypatch, email='<EMAIL>')
assert User.objects.count() == 0
backend = max_backend.MAXBackend()
assert backend.authenticate(Mock(), 'some-ticket', 'service-url') is None
assert max_backend.logger.warning.called
warning_message = max_backend.logger.warning.call_args[0][0]
assert 'referred to a user not in our system' in warning_message
assert '<EMAIL>' in max_backend.logger.warning.call_args[0]
@pytest.mark.django_db
def test_authenticate_logging_success(monkeypatch):
mommy.make(User, username='<EMAIL>')
monkeypatch.setattr(max_backend, 'logger', Mock())
mock_verify_ticket(monkeypatch, email='<EMAIL>')
backend = max_backend.MAXBackend()
backend.authenticate(Mock(), 'some-ticket', 'service-url')
assert max_backend.logger.info.called
assert 'success' in max_backend.logger.info.call_args[0][0]
assert '<EMAIL>' in max_backend.logger.info.call_args[0]
```
#### File: ereqs_admin/tests/revision_creation_tests.py
```python
from unittest.mock import Mock, call
import pytest
from ereqs_admin import revision_creation
def test_reversion_models_active_only(monkeypatch):
monkeypatch.setattr(revision_creation, 'apps', Mock())
monkeypatch.setattr(revision_creation, 'reversion', Mock())
revision_creation.reversion.is_registered.return_value = True
def mock_get_model(app_label, model_name):
if app_label == 'valid':
return Mock(model_name=model_name)
else:
raise LookupError("bad data")
revision_creation.apps.get_model = mock_get_model
results = revision_creation.reversion_models([
('valid', 'first'), ('invalid', 'second'), ('valid', 'third')])
assert [r.model_name for r in results] == ['first', 'third']
def test_reversion_models_must_be_registered(monkeypatch):
monkeypatch.setattr(revision_creation, 'apps', Mock())
monkeypatch.setattr(revision_creation, 'reversion', Mock())
registered1, unregistered, registered2 = Mock(), Mock(), Mock()
revision_creation.apps.get_model.side_effect = [
registered1, unregistered, registered2]
def mock_is_registered(model):
return model in (registered1, registered2)
revision_creation.reversion.is_registered = mock_is_registered
model_pairs = [('a', 'b'), ('c', 'd'), ('e', 'f')]
assert list(revision_creation.reversion_models(model_pairs)) == [
registered1, registered2]
@pytest.mark.django_db
def test_create_versions_after_migration_no_plan(monkeypatch):
monkeypatch.setattr(revision_creation, 'create_revisions_for', Mock())
revision_creation.create_revisions_for.side_effect = ValueError
revision_creation.create_versions_after_migration()
revision_creation.create_versions_after_migration(plan=[])
@pytest.mark.django_db
def test_create_versions_after_migration(monkeypatch):
monkeypatch.setattr(revision_creation, 'reversion_models', Mock())
monkeypatch.setattr(revision_creation, 'create_revisions_for', Mock())
model1, model2, model3 = Mock(), Mock(), Mock()
revision_creation.reversion_models.return_value = [model1, model2, model3]
revision_creation.create_versions_after_migration(plan=[
(Mock(spec_set=[]), False), # no REVISED_MODELS
(Mock(REVISED_MODELS=[('a', 'b'), ('c', 'd')]), False),
(Mock(REVISED_MODELS=[('c', 'd'), ('e', 'f')]), False),
(Mock(REVISED_MODELS=[('g', 'h')]), True), # rollback
])
assert revision_creation.reversion_models.call_args == call({
('a', 'b'), ('c', 'd'), ('e', 'f')})
assert revision_creation.create_revisions_for.call_args_list == [
call(model1), call(model2), call(model3)]
```
#### File: management/commands/pdf_fonts.py
```python
from argparse import FileType
from django.core.management.base import BaseCommand
import ombpdf.fontsize
import ombpdf.util
class Command(BaseCommand):
help = 'Show font usage statistics for a PDF.' # NOQA
def add_arguments(self, parser):
parser.add_argument('filename', type=FileType('rb'))
def handle(self, *args, **options):
ombpdf.fontsize.main(ombpdf.util.get_ltpages(options['filename']))
```
#### File: ombpdf/tests/test_doctests.py
```python
import doctest
from importlib import import_module
import pytest
@pytest.mark.parametrize('module_name', [
'ombpdf.fontsize',
'ombpdf.eqnamedtuple',
'ombpdf.paragraphs',
'ombpdf.rawlayout',
'ombpdf.semtree',
'ombpdf.tests.bbox',
])
def test_doctests(module_name):
_, test_count = doctest.testmod(
import_module(module_name),
report=True,
verbose=True,
raise_on_error=True,
optionflags=doctest.NORMALIZE_WHITESPACE,
)
assert test_count > 0
```
#### File: ombpdf/tests/test_document.py
```python
from unittest.mock import Mock
from ombpdf import document
def test_textline_repr_works(m_16_19_doc):
assert repr(m_16_19_doc.pages[0][4]) == \
'<OMBTextLine with text "August 1, 2016 ">'
def test_textline_is_blank_works(m_16_19_doc):
assert m_16_19_doc.pages[0][0].is_blank()
assert not m_16_19_doc.pages[0][4].is_blank()
def test_page_numbers_work(m_16_19_doc):
assert m_16_19_doc.pages[0].number == 1
assert m_16_19_doc.pages[1].number == 2
def test_useful_lines_are_not_culled(m_11_29_doc):
lastpage_text = '\n'.join([str(line) for line in m_11_29_doc.pages[-1]])
assert 'opportunities to reduce duplication' in lastpage_text
def test_calc_left_edge(monkeypatch):
monkeypatch.setattr(document, 'logger', Mock())
lines = [
Mock(left_edge=10),
Mock(left_edge=20),
Mock(left_edge=10),
Mock(left_edge=20),
Mock(left_edge=30),
Mock(left_edge=10),
]
assert document.calc_left_edge(lines) == 10
assert not document.logger.warning.called
def test_no_significant_left_edge(monkeypatch):
monkeypatch.setattr(document, 'logger', Mock())
lines = [Mock(left_edge=i*10) for i in range(1, 20)]
assert document.calc_left_edge(lines) == 10
assert document.logger.warning.called
def test_no_left_edge(monkeypatch):
monkeypatch.setattr(document, 'logger', Mock())
assert document.calc_left_edge([]) == 0
assert document.logger.warning.called
def test_image_pdf(monkeypatch):
monkeypatch.setattr(document, 'logger', Mock())
mock_page = []
doc = document.OMBDocument([mock_page, mock_page, mock_page])
assert doc.paragraph_fontsize == 0
assert document.logger.warning.called
```
#### File: ombpdf/tests/test_paragraphs.py
```python
import pytest
from ombpdf import paragraphs
from . import bbox
def test_annotate_paragraphs_works(m_16_19_doc):
p = paragraphs.annotate_paragraphs(m_16_19_doc)
assert str(p[1][0]).startswith('In 2010, the Office')
assert str(p[2][0]).startswith('In December 2014, the')
# The first sentence on page 2 (a new paragraph)
assert str(p[4][0]).startswith('This memorandum defines')
# The last sentence on page 2 (an unfinished paragraph)
assert str(p[9][1]).strip().endswith('an existing data center')
# The first sentence on page 3 (continuation of paragraph from page 2)
assert str(p[9][2]).strip().startswith('without approval from OMB')
def test_annotate_paragraphs_works_with_indents(m_15_17_doc):
p = paragraphs.annotate_paragraphs(m_15_17_doc)
assert str(p[2][0]).startswith('Last summer, the President')
assert str(p[3][0]).startswith('Federal government funding for')
assert str(p[4][0]).startswith('This memorandum outlines')
assert str(p[5][0]).startswith('While the Administration')
assert str(p[6][0]).startswith('Native children are far')
@pytest.mark.xfail(raises=AssertionError)
def test_indents_2():
doc, _, lines = bbox.find_lines('http://localhost:8001/pdf/rawlayout/2011/m11-29.pdf?bbox=1,61,240.5,546,313.5#1') # NOQA
doc.annotators.require('paragraphs')
for line in lines:
assert isinstance(line.annotation, paragraphs.OMBParagraph)
```
#### File: ombpdf/tests/test_semdom.py
```python
from pathlib import Path
from ombpdf.semdom import to_dom
from .snapshot import assert_snapshot_matches
MY_DIR = Path(__file__).parent
def assert_dom_xml_snapshot_matches(doc, force_overwrite=False):
dom = to_dom(doc)
xml = dom.toprettyxml(indent=' ')
name = Path(doc.filename).stem
expected_xml_path = MY_DIR / f'test_semdom.snapshot.{name}.xml'
assert_snapshot_matches(expected_xml_path, xml, 'DOM XML',
force_overwrite=force_overwrite)
def test_m_14_10_import(m_14_10_doc):
assert_dom_xml_snapshot_matches(m_14_10_doc)
def test_m_16_19_import(m_16_19_doc):
assert_dom_xml_snapshot_matches(m_16_19_doc)
```
#### File: ombpdf/tests/test_views.py
```python
import pytest
from ombpdf.download_pdfs import download
PDFPATH = '2016/m_16_19_1.pdf'
def setup_module(module):
# Ensure we have at least one PDF for the webapp to process.
download(PDFPATH)
@pytest.mark.urls('ombpdf.urls')
def test_index_works(client):
assert PDFPATH.encode('ascii') in client.get('/').content
@pytest.mark.urls('ombpdf.urls')
def test_raw_pdf_works(client):
assert client.get(f'/raw/{PDFPATH}').status_code == 200
@pytest.mark.urls('ombpdf.urls')
def test_html_pdf_works(client):
assert client.get(f'/html/{PDFPATH}').status_code == 200
@pytest.mark.urls('ombpdf.urls')
def test_semhtml_pdf_works(client):
assert client.get(f'/semhtml/{PDFPATH}').status_code == 200
@pytest.mark.urls('ombpdf.urls')
def test_rawlayout_works(client):
assert client.get(f'/rawlayout/{PDFPATH}').status_code == 200
```
#### File: api/ombpdf/underlines.py
```python
from .horizlines import get_horizontal_lines
# Percentage of bbox height underline can be from bottom of a line's bbox.
MAX_LINE_BBOX_DIST = 0.25
# Maximum distance the left/right of a character's bbox can be from the
# start/end of an underline (in points, I think).
EPSILON = 1.0
def set_underlines(doc):
underlines = []
for page in doc.pages:
underlines.extend(set_underlines_in_page(page))
return underlines
def set_underlines_in_page(page):
underlines = []
hlines = get_horizontal_lines(page)
for line in page:
for hline in hlines:
tl = line.lttextline
height = tl.y1 - tl.y0
if abs(tl.y0 - hline.y) < MAX_LINE_BBOX_DIST * height:
chars = []
in_underline = False
for char in line:
if in_underline:
if char.ltchar.x1 - EPSILON > hline.end:
break
else:
chars.append(char)
elif (str(char) != ' ' and
char.ltchar.x0 + EPSILON >= hline.start):
in_underline = True
chars.append(char)
if chars:
underlines.append(chars)
for char in chars:
char.set_underlined()
return underlines
def main(doc):
underlines = set_underlines(doc)
print("Underlined words:")
for chars in underlines:
text = ''.join([str(c) for c in chars])
print(f" {text}")
```
#### File: api/ombpdf/views.py
```python
import json
from typing import Any, Dict # noqa
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.http import FileResponse, Http404, HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.safestring import SafeString
from ombpdf import html, rawlayout, semhtml
from ombpdf.document import OMBDocument
from ombpdf.download_pdfs import ROOT_DIR as DATA_DIR
from ombpdf.util import get_ltpages
def to_urlpath(path):
return '/'.join(path.relative_to(DATA_DIR).parts)
def get_pdfmap():
pdfmap = {}
for path in DATA_DIR.glob('**/*.pdf'):
pdfmap[to_urlpath(path)] = path
return pdfmap
_ltpages_cache: Dict[Any, Any] = {}
def to_doc(path):
if path not in _ltpages_cache:
with path.open('rb') as fp:
_ltpages_cache[path] = get_ltpages(fp)
return OMBDocument(
_ltpages_cache[path],
filename=path.relative_to(DATA_DIR),
)
def index(request):
return render(request, 'ombpdf/index.html', {
'name': __name__,
'pdfmap': get_pdfmap(),
})
def as_path(pathname):
pdfmap = get_pdfmap()
if pathname in pdfmap:
return pdfmap[pathname]
raise Http404()
def raw_pdf(request, pdf):
return FileResponse(as_path(pdf).open('rb'),
content_type='application/pdf')
def html_pdf(request, pdf):
return HttpResponse(html.to_html(to_doc(as_path(pdf))),
content_type='text/html')
def rawlayout_pdf(request, pdf):
doc = to_doc(as_path(pdf))
script_params = {
'pdfPath': reverse('raw_pdf', kwargs={'pdf': pdf}),
'workerSrc': static('bundles/js/pdf.worker.bundle.js'),
}
html, ctx = rawlayout.to_html(doc)
return render(request, 'ombpdf/rawlayout.html', {
'doc': doc,
'html': SafeString(html),
'script_params': SafeString(json.dumps(script_params)),
**ctx,
})
def semhtml_pdf(request, pdf):
return HttpResponse(semhtml.to_html(to_doc(as_path(pdf))),
content_type='text/html')
```
#### File: reqs/tests/link_agencies_tests.py
```python
from collections import namedtuple
import pytest
from model_mommy import mommy
from reqs.management.commands import link_agencies, sync_agencies
from reqs.models import Agency, AgencyGroup, Requirement
@pytest.fixture
def populate_agencies():
cmd = sync_agencies.Command()
cmd.create_system_groups()
mommy.make(Agency, name="Aquarius", omb_agency_code="123")
dod = mommy.make(Agency, name="Department of Defense", abbr="DOD",
omb_agency_code="111")
mommy.make(Agency, name="Department of Homeland Security", abbr="DHS",
omb_agency_code="100")
mommy.make(Agency, name="General Services Administration", abbr="GSA",
omb_agency_code="101")
mommy.make(Agency, name="Department of Justice", abbr="Justice",
omb_agency_code="102")
mommy.make(Agency, name="Department of Foo", abbr="FOO",
omb_agency_code="112")
MockAgencies = namedtuple("MockAgencies", ["dod", "all_agencies"])
all_agencies_group = AgencyGroup.objects.get(slug="all-agencies")
mock_agencies = MockAgencies(dod=dod, all_agencies=all_agencies_group)
yield mock_agencies
@pytest.mark.django_db
@pytest.mark.parametrize("impacted_entity,agencies,agency_groups", [
(("",), (set(),), (set(),)),
(("All agencies, Defense",), ({"DOD"},), ({"all-agencies"},)),
(("dhs, Defense",), ({"DHS", "DOD"},), (set(),)),
(("CFO, Defense",), ({"DOD"},), ({"cfo-act"},)),
(("CFO; Defense; all agencies",), ({"DOD"},),
({"all-agencies", "cfo-act"},)),
(("All agencies", "DOD"), (set(), {"DOD"}), ({"all-agencies"}, set())),
(("CFO\nDHS\nGSA\nDOJ\nDOD",), ({"DHS", "DOD", "Justice", "GSA"},),
({"cfo-act"},)),
(("All agencies\nDHS\nGSA\nDOJ\nDOD",),
({"DHS", "DOD", "Justice", "GSA"},), ({"all-agencies"},)),
(("DHS\nGSA\nDOJ\nDOD",), ({"DHS", "DOD", "Justice", "GSA"},),
(set(),)),
(("Federal CFO Council, DHS\nGSA\nDOJ\nDOD",),
({"DHS", "DOD", "Justice", "GSA"},), (set(),)),
(("All agencies except micro agencies\nDHS\nGSA\nDOJ\nDOD",),
({"DHS", "DOD", "Justice", "GSA"},), (set(),)),
(("All agencies except Paperwork\nDHS\nGSA\nDOJ\nDOD",),
({"DHS", "DOD", "Justice", "GSA"},), (set(),)),
(("department of homeland\ngeneral services\nJustice\nDefense",),
({"DHS", "DOD", "Justice", "GSA"},), (set(),)),
], ids=repr)
def test_linking(populate_agencies, impacted_entity, agencies,
agency_groups):
reqs = [mommy.make(Requirement, impacted_entity=_) for _ in
impacted_entity]
link_agencies.Command().handle()
for agency_set, agency_group, req in zip(agencies, agency_groups, reqs):
req_agencies = {a.abbr for a in req.agencies.all()}
assert req_agencies == agency_set
req_groups = {g.slug for g in req.agency_groups.all()}
assert req_groups == agency_group
@pytest.mark.django_db
def test_link_requirements(populate_agencies):
dod_req = mommy.make(Requirement, impacted_entity="DOD")
all_agencies_req = mommy.make(Requirement, impacted_entity="All agencies")
dod_agency = populate_agencies.dod
all_agencies_group = populate_agencies.all_agencies
assert Requirement.objects.count() == 2
assert Requirement.objects.filter(agencies=None,
agency_groups=None).count() == 2
cmd = link_agencies.Command()
cmd.process_requirements()
assert Requirement.objects.count() == 2
assert Requirement.objects.filter(agencies=None,
agency_groups=None).count() == 0
assert list(dod_req.agencies.all()) == [dod_agency]
assert list(dod_req.agency_groups.all()) == []
assert list(all_agencies_req.agencies.all()) == []
assert list(all_agencies_req.agency_groups.all()) == [all_agencies_group]
link_agencies.reset_agency_relationships()
assert Requirement.objects.count() == 2
assert Requirement.objects.filter(agencies=None,
agency_groups=None).count() == 2
```
#### File: reqs/tests/views_policies_tests.py
```python
from collections import namedtuple
import pytest
from django.http import Http404
from model_mommy import mommy
from rest_framework.test import APIClient
from reqs.models import Agency, AgencyGroup, Policy, Requirement, Topic
from reqs.views import policies as policies_views
PolicySetup = namedtuple('PolicySetup', ('policies', 'reqs'))
@pytest.fixture
def policy_setup():
policies = [mommy.make(Policy, policy_number='0',
workflow_phase='published'),
mommy.make(Policy, policy_number='1',
workflow_phase='published')]
reqs = [mommy.make(Requirement, policy=policies[0], _quantity=3),
mommy.make(Requirement, policy=policies[1], _quantity=4)]
yield PolicySetup(policies, reqs)
@pytest.fixture
def policy_topic_setup(policy_setup):
topics = mommy.make(Topic, _quantity=3)
reqs = policy_setup.reqs
reqs[0][0].topics.add(topics[0])
reqs[0][1].topics.add(topics[1])
reqs[0][2].topics.add(topics[0], topics[1])
reqs[1][0].topics.add(topics[1])
reqs[1][1].topics.add(topics[1], topics[2])
yield policy_setup, topics
@pytest.mark.django_db
def test_topics_counts_no_params(policy_topic_setup):
"""The API endpoint should include all requirements when no params are
given"""
(_, reqs), _ = policy_topic_setup
client = APIClient()
response = client.get("/policies/").json()
assert response['count'] == 2
assert response['results'][0]['total_reqs'] == len(reqs[0])
assert response['results'][0]['relevant_reqs'] == len(reqs[0])
assert response['results'][1]['total_reqs'] == len(reqs[1])
assert response['results'][1]['relevant_reqs'] == len(reqs[1])
@pytest.mark.django_db
def test_topics_counts_filter_req(policy_topic_setup):
"""The API endpoint should include only relevant policies when we filter
by an attribute of a requirement"""
(_, reqs), _ = policy_topic_setup
client = APIClient()
path = "/policies/?requirements__req_id=" + reqs[1][1].req_id
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['total_reqs'] == len(reqs[1])
assert response['results'][0]['relevant_reqs'] == 1
@pytest.mark.django_db
def test_topics_counts_filter_by_one_topic(policy_topic_setup):
"""The API endpoint should include only relevant policies when we filter
by a single topic"""
(_, reqs), topics = policy_topic_setup
client = APIClient()
path = "/policies/?requirements__topics__id__in={0}".format(topics[0].pk)
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['total_reqs'] == len(reqs[0])
# reqs[0][0] and reqs[0][2]
assert response['results'][0]['relevant_reqs'] == 2
@pytest.mark.django_db
def test_topics_counts_filter_by_multiple_topics(policy_topic_setup):
"""The API endpoint should include only relevant policies when we filter
by multiple topics"""
(_, reqs), topics = policy_topic_setup
client = APIClient()
path = "/policies/?requirements__topics__id__in={0},{1}".format(
topics[0].pk, topics[2].pk)
response = client.get(path).json()
assert response['count'] == 2
assert response['results'][0]['total_reqs'] == len(reqs[0])
# reqs[0][0] and reqs[0][2]
assert response['results'][0]['relevant_reqs'] == 2
assert response['results'][1]['total_reqs'] == len(reqs[1])
# reqs[1][1]
assert response['results'][1]['relevant_reqs'] == 1
@pytest.mark.django_db
def test_agencies_direct(policy_setup):
_, reqs = policy_setup
agencies = mommy.make(Agency, _quantity=3)
reqs[0][1].agencies.add(agencies[0], agencies[1])
reqs[1][0].agencies.add(agencies[2])
client = APIClient()
path = "/policies/?requirements__all_agencies__id__in={0}".format(
agencies[0].pk)
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['relevant_reqs'] == 1
path = "/policies/?requirements__agencies__id__in={0}".format(
agencies[0].pk)
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['relevant_reqs'] == 1
@pytest.mark.django_db
def test_agencies_indirect(policy_setup):
_, reqs = policy_setup
group = mommy.make(AgencyGroup)
agency_in_group, agency_no_group = mommy.make(Agency, _quantity=2)
group.agencies.add(agency_in_group)
reqs[0][0].agencies.add(agency_no_group)
reqs[1][0].agency_groups.add(group)
client = APIClient()
path = "/policies/?requirements__all_agencies__id__in={0}".format(
agency_in_group.pk)
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['relevant_reqs'] == 1
path = "/policies/?requirements__agencies__id__in={0}".format(
agency_in_group.pk)
response = client.get(path).json()
assert response['count'] == 0
path = "/policies/?requirements__agency_groups__id__in={0}".format(
group.pk)
response = client.get(path).json()
assert response['count'] == 1
assert response['results'][0]['relevant_reqs'] == 1
@pytest.mark.django_db
def test_nonpublic_reqs():
client = APIClient()
policy = mommy.make(Policy, workflow_phase='published')
mommy.make(Requirement, policy=policy, public=False)
assert client.get('/policies/').json()['count'] == 0
mommy.make(Requirement, _quantity=4, policy=policy)
response = client.get('/policies/').json()
assert response['count'] == 1
assert policy.requirements.count() == 5
assert response['results'][0]['relevant_reqs'] == 4
assert response['results'][0]['total_reqs'] == 4
@pytest.mark.django_db
def test_omb_policy_id():
client = APIClient()
omb_policy_id = "M-123-4"
path = "/policies/{0}".format(omb_policy_id)
response = client.get(path)
assert response.status_code == 301
mommy.make(Policy, omb_policy_id=omb_policy_id, workflow_phase='published')
response = client.get(path + '.json').json()
assert response['omb_policy_id'] == omb_policy_id
@pytest.mark.django_db
def test_pk_id():
client = APIClient()
pk_id = 123
path = "/policies/{0}".format(pk_id)
response = client.get(path)
assert response.status_code == 301
mommy.make(Policy, pk=pk_id, workflow_phase='published')
response = client.get(path + '.json').json()
assert response['id'] == pk_id
@pytest.mark.django_db
def test_slug():
client = APIClient()
slug = "hello-there"
path = f"/policies/{slug}.json"
response = client.get(path)
assert response.status_code == 404
mommy.make(Policy, slug=slug, pk=456, workflow_phase='published')
response = client.get(path).json()
assert response['id'] == 456
@pytest.mark.django_db
def test_policy_or_404():
policy = mommy.make(Policy, omb_policy_id='AAA-BBB-CCC',
workflow_phase='published')
assert policies_views.policy_or_404(f"{policy.pk}") == policy
assert policies_views.policy_or_404("AAA-BBB-CCC") == policy
with pytest.raises(Http404):
policies_views.policy_or_404('does-not-exist')
```
#### File: devops/integration_tests/conftest.py
```python
from collections import namedtuple
import pytest
AppUrls = namedtuple('AppUrls', ('ui', 'api', 'admin'))
@pytest.fixture
def selenium(selenium):
"""Configure selenium"""
selenium.implicitly_wait(10)
return selenium
def pytest_addoption(parser):
parser.addoption('--ui-baseurl', default='http://proxy:9002/',
help='base url for the agency UI')
parser.addoption('--api-baseurl', default='http://proxy:9001/',
help='base url for the API')
parser.addoption('--admin-baseurl', default='http://proxy:9001/admin/',
help='base url for the admin')
@pytest.fixture(scope='session')
def app_urls(request):
return AppUrls(
request.config.getoption('--ui-baseurl'),
request.config.getoption('--api-baseurl'),
request.config.getoption('--admin-baseurl')
)
@pytest.fixture()
def admin_login(selenium, app_urls):
selenium.get(app_urls.admin)
selenium.find_element_by_name('username').send_keys('admin')
selenium.find_element_by_name('password').send_keys('<PASSWORD>')
selenium.find_element_by_name('password').submit()
```
#### File: devops/integration_tests/smoke_test.py
```python
def test_policies_tab(selenium, app_urls):
selenium.get(app_urls.ui + 'requirements')
link = selenium.find_element_by_link_text('Policies')
link.click()
assert selenium.current_url == app_urls.ui + 'policies'
def test_requirements_tab(selenium, app_urls):
selenium.get(app_urls.ui + 'policies')
link = selenium.find_element_by_link_text('Requirements')
link.click()
assert selenium.current_url == app_urls.ui + 'requirements'
def test_html_api_loads(selenium, app_urls):
selenium.get(app_urls.api)
html = selenium.find_element_by_tag_name('html')
assert '"topics"' in html.text
def test_admin_loads(selenium, app_urls):
selenium.get(app_urls.admin)
form = selenium.find_element_by_tag_name('form')
assert 'Username' in form.text
assert 'Password' in form.text
def test_admin_lists_models(selenium, admin_login):
assert selenium.find_element_by_link_text('Topics') is not None
assert selenium.find_element_by_link_text('Policies') is not None
assert selenium.find_element_by_link_text('Requirements') is not None
def test_ui_proxies_404(selenium, app_urls):
selenium.get(app_urls.ui + 'requirements?page=9999')
html = selenium.find_element_by_tag_name('html')
assert 'Server Error' not in html.text
assert 'Page not found' in html.text
``` |
{
"source": "18F/openFEC",
"score": 3
} |
#### File: 18F/openFEC/jdbc_utils.py
```python
import re
def to_jdbc_url(dbi_url):
DB_URL_REGEX = re.compile(r'postgresql://(?P<username>[^:]*):?(?P<password>\S*)@(?P<host_port>\S*)$')
match = DB_URL_REGEX.match(dbi_url)
if match:
jdbc_url = 'jdbc:postgresql://{}?user={}'.format(
match.group('host_port'), match.group('username'))
if match.group('password'):
jdbc_url += '&password={}'.format(match.group('password'))
return jdbc_url
return None
```
#### File: openFEC/tests/test_load_legal_docs.py
```python
import unittest
from unittest.mock import patch
from webservices.legal_docs import (
delete_murs_from_es,
delete_murs_from_s3,
index_regulations,
index_statutes,
load_archived_murs,
create_docs_index
)
from webservices.legal_docs.load_legal_docs import (
get_subject_tree,
get_title_26_statutes,
get_title_52_statutes,
get_xml_tree_from_url,
)
from zipfile import ZipFile
from tempfile import NamedTemporaryFile
import json
def test_get_subject_tree():
assert get_subject_tree("foo") == [{"text": "Foo"}]
assert get_subject_tree("<li>foo</li>") == [{"text": "Foo"}]
assert get_subject_tree(
"foo<ul class='no-top-margin'><li>bar</li><li>baz</li></ul>") == [
{"text": "Foo", "children": [{"text": "Bar"}, {"text": "Baz"}]}]
class ElasticSearchMock:
class ElasticSearchIndicesMock:
def delete(self, index):
assert index in ['docs', 'archived_murs', 'docs_index']
def create(self, index, mappings):
assert index in ['docs', 'archived_murs']
assert mappings
def update_aliases(self, body):
pass
def __init__(self, dictToIndex):
self.dictToIndex = dictToIndex
self.indices = ElasticSearchMock.ElasticSearchIndicesMock()
def index(self, index, doc_type, doc, id):
assert self.dictToIndex == doc
def delete_by_query(self, index, body, doc_type):
assert index == 'docs_index'
def get_es_with_doc(doc):
def get_es():
return ElasticSearchMock(doc)
return get_es
def mock_xml(xml):
def request_zip(url, stream=False):
with NamedTemporaryFile('w+') as f:
f.write(xml)
f.seek(0)
with NamedTemporaryFile('w+') as n:
with ZipFile(n.name, 'w') as z:
z.write(f.name)
return open(n.name, 'rb')
return request_zip
def mock_archived_murs_get_request(html):
def request_murs_data(url, stream=False):
if stream:
return [b'ABC', b'def']
else:
return RequestResult(html)
return request_murs_data
class Engine:
def __init__(self, legal_loaded):
self.legal_loaded = legal_loaded
def __iter__(self):
return self.result.__iter__()
def __next__(self):
return self.result.__next__()
def fetchone(self):
return self.result[0]
def fetchall(self):
return self.result
def connect(self):
return self
def execution_options(self, stream_results):
return self
def execute(self, sql):
if sql == 'select document_id from document':
self.result = [(1,), (2,)]
if 'fileimage' in sql:
return [(1, 'ABC'.encode('utf8'))]
if 'EXISTS' in sql:
self.result = [(self.legal_loaded,)]
if 'COUNT' in sql:
self.result = [(5,)]
if 'aouser.players' in sql:
self.result = [{'name': '<NAME>', 'description': 'Individual'},
{'name': '<NAME>', 'description': 'Individual'}]
if 'SELECT ao_no, category, ocrtext' in sql:
self.result = [
{'ao_no': '1993-01', 'category': 'Votes', 'ocrtext': 'test 1993-01 test 2015-105 and 2014-1'},
{'ao_no': '2007-05', 'category': 'Final Opinion', 'ocrtext': 'test2 1993-01 test2'}]
if 'SELECT ao_no, name FROM' in sql:
self.result = [{'ao_no': '1993-01', 'name': 'RNC'}, {'ao_no': '2007-05', 'name': 'Church'},
{'ao_no': '2014-01', 'name': 'DNC'}, {'ao_no': '2015-105', 'name': 'Outkast'}]
if 'document_id' in sql:
self.result = [{'document_id': 123, 'ocrtext': 'textAB', 'description': 'description123',
'category': 'Votes', 'ao_id': 'id123',
'name': 'name4U', 'summary': 'summaryABC', 'tags': 'tags123',
'ao_no': '1993-01', 'document_date': 'date123', 'is_pending': True}]
return self
class Db:
def __init__(self, legal_loaded=True):
self.engine = Engine(legal_loaded)
def get_credential_mock(var, default):
return 'https://eregs.api.com/'
class RequestResult:
def __init__(self, result):
self.result = result
self.text = result
def json(self):
return self.result
def mock_get_regulations(url):
if url.endswith('regulation'):
return RequestResult({'versions': [{'version': 'versionA',
'regulation': 'reg104'}]})
if url.endswith('reg104/versionA'):
return RequestResult({'children': [{'children': [{'label': ['104', '1'],
'title': 'Section 104.1 Title',
'text': 'sectionContentA',
'children': [{'text': 'sectionContentB',
'children': []}]}]}]})
class obj:
def __init__(self, key):
self.key = key
def delete(self):
pass
class S3Objects:
def __init__(self):
self.objects = []
def filter(self, Prefix):
return [o for o in self.objects if o.key.startswith(Prefix)]
class BucketMock:
def __init__(self, key):
self.objects = S3Objects()
self.key = key
def put_object(self, Key, Body, ContentType, ACL):
assert Key == self.key
def get_bucket_mock(key):
def get_bucket():
return BucketMock(key)
return get_bucket
class IndexStatutesTest(unittest.TestCase):
@patch('webservices.legal_docs.load_legal_docs.requests.get', mock_xml('<test></test>'))
def test_get_xml_tree_from_url(self):
etree = get_xml_tree_from_url('anything.com')
assert etree.getroot().tag == 'test'
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc({'name': 'title',
'chapter': '1', 'title': '26', 'no': '123',
'text': ' title content ', 'doc_id': '/us/usc/t26/s123',
'url': 'http://api.fdsys.gov/link?collection=uscode&title=26&' +
'year=mostrecent§ion=123',
'sort1': 26, 'sort2': 123}))
@patch('webservices.legal_docs.load_legal_docs.requests.get', mock_xml(
"""<?xml version="1.0" encoding="UTF-8"?>
<uscDoc xmlns="http://xml.house.gov/schemas/uslm/1.0">
<subtitle identifier="/us/usc/t26/stH">
<chapter identifier="/us/usc/t26/stH/ch1">
<section identifier="/us/usc/t26/s123">
<heading>title</heading>
<subsection>content</subsection>
</section></chapter></subtitle></uscDoc>
"""))
def test_title_26(self):
get_title_26_statutes()
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc({'subchapter': 'I',
'doc_id': '/us/usc/t52/s123', 'chapter': '1',
'text': ' title content ',
'url': 'http://api.fdsys.gov/link?collection=uscode&title=52&' +
'year=mostrecent§ion=123',
'title': '52', 'name': 'title', 'no': '123',
'sort1': 52, 'sort2': 123}))
@patch('webservices.legal_docs.load_legal_docs.requests.get', mock_xml(
"""<?xml version="1.0" encoding="UTF-8"?>
<uscDoc xmlns="http://xml.house.gov/schemas/uslm/1.0">
<subtitle identifier="/us/usc/t52/stIII">
<subchapter identifier="/us/usc/t52/stIII/ch1/schI">
<section identifier="/us/usc/t52/s123">
<heading>title</heading>
<subsection>content</subsection>
</section></subchapter></subtitle></uscDoc>
"""))
def test_title_52(self):
get_title_52_statutes()
@patch('webservices.legal_docs.load_legal_docs.get_title_52_statutes', lambda: '')
@patch('webservices.legal_docs.load_legal_docs.get_title_26_statutes', lambda: '')
def test_index_statutes(self):
index_statutes()
class IndexRegulationsTest(unittest.TestCase):
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', get_credential_mock)
@patch('webservices.legal_docs.load_legal_docs.requests.get', mock_get_regulations)
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc({'text': 'sectionContentA sectionContentB',
'no': '104.1', 'name': 'Title',
'url': '/regulations/104-1/versionA#104-1',
'doc_id': '104_1',
'sort1': 104, 'sort2': 1}))
def test_index_regulations(self):
index_regulations()
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e, d: '')
def test_no_env_variable(self):
index_regulations()
class InitializeLegalDocsTest(unittest.TestCase):
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc({}))
def test_create_docs_index(self):
create_docs_index()
def raise_pdf_exception(PDF):
raise Exception('Could not parse PDF')
class LoadArchivedMursTest(unittest.TestCase):
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc(json.load(open('tests/data/archived_mur_doc.json'))))
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
@patch('webservices.legal_docs.load_legal_docs.slate.PDF', lambda t: ['page1', 'page2'])
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e: 'bucket123')
@patch('webservices.legal_docs.load_legal_docs.requests.get',
mock_archived_murs_get_request(open('tests/data/archived_mur_data.html').read()))
def test_base_case(self):
load_archived_murs(specific_mur_no=1)
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc(json.load(open('tests/data/archived_mur_empty_doc.json'))))
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
@patch('webservices.legal_docs.load_legal_docs.slate.PDF', lambda t: ['page1', 'page2'])
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e: 'bucket123')
@patch('webservices.legal_docs.load_legal_docs.requests.get',
mock_archived_murs_get_request(open('tests/data/archived_mur_empty_data.html').read()))
def test_with_empty_data(self):
load_archived_murs()
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc(json.load(open('tests/data/archived_mur_empty_doc.json'))))
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
@patch('webservices.legal_docs.load_legal_docs.slate.PDF', lambda t: ['page1', 'page2'])
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e: 'bucket123')
@patch('webservices.legal_docs.load_legal_docs.requests.get',
mock_archived_murs_get_request(open('tests/data/archived_mur_bad_subject.html').read()))
def test_bad_parse(self):
with self.assertRaises(Exception):
load_archived_murs()
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc(json.load(open('tests/data/archived_mur_empty_doc.json'))))
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
@patch('webservices.legal_docs.load_legal_docs.slate.PDF', lambda t: ['page1', 'page2'])
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e: 'bucket123')
@patch('webservices.legal_docs.load_legal_docs.requests.get',
mock_archived_murs_get_request(open('tests/data/archived_mur_bad_citation.html').read()))
def test_bad_citation(self):
with self.assertRaises(Exception):
load_archived_murs()
@patch('webservices.utils.get_elasticsearch_connection',
get_es_with_doc(json.load(open('tests/data/archived_mur_bad_pdf_doc.json'))))
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
@patch('webservices.legal_docs.load_legal_docs.env.get_credential', lambda e: 'bucket123')
@patch('webservices.legal_docs.load_legal_docs.requests.get',
mock_archived_murs_get_request(open('tests/data/archived_mur_data.html').read()))
@patch('webservices.legal_docs.load_legal_docs.slate.PDF', raise_pdf_exception)
def test_with_bad_pdf(self):
load_archived_murs(specific_mur_no=1)
@patch('webservices.legal_docs.load_legal_docs.get_bucket',
get_bucket_mock('legal/murs/1.pdf'))
def test_delete_murs_from_s3(self):
delete_murs_from_s3()
@patch('webservices.utils.get_elasticsearch_connection', get_es_with_doc({}))
def test_delete_murs_from_es(self):
delete_murs_from_es()
``` |
{
"source": "18F/openFEC-web-app",
"score": 2
} |
#### File: openFEC-web-app/openfecwebapp/routes.py
```python
import http
import datetime
import re
import furl
from webargs import fields
from webargs.flaskparser import use_kwargs
from flask import render_template, request, redirect, url_for, abort
from collections import OrderedDict
from openfecwebapp import views
from openfecwebapp import utils
from openfecwebapp import config
from openfecwebapp import constants
from openfecwebapp import api_caller
from openfecwebapp.app import app
@app.route('/')
def search():
"""Renders the top-level /data page.
If there's a query string, it will
load candidate and committee search results.
If the string is a 16 or 11 digit number then it will redirect to
the page-by-page viewer.
If there's no query, then we'll load the main landing page with all the
necessary data.
"""
query = request.args.get('search')
if query:
if re.match('\d{16}', query) or re.match('\d{11}', query):
url = 'http://docquery.fec.gov/cgi-bin/fecimg/?' + query
return redirect(url)
else:
results = api_caller.load_search_results(query)
return views.render_search_results(results, query)
else:
top_candidates_raising = api_caller.load_top_candidates('-receipts', per_page=3)
return render_template('landing.html',
page='home',
parent='data',
dates=utils.date_ranges(),
top_candidates_raising=top_candidates_raising['results'] if top_candidates_raising else None,
first_of_year=datetime.date(datetime.date.today().year, 1, 1).strftime('%m/%d/%Y'),
last_of_year=datetime.date(datetime.date.today().year, 12, 31).strftime('%m/%d/%Y'),
title='Campaign finance data')
@app.route('/api/')
def api():
"""Redirect to API as described at
https://18f.github.io/API-All-the-X/pages/developer_hub_kit.
"""
return redirect(config.api_location, http.client.MOVED_PERMANENTLY)
@app.route('/developers/')
def developers():
"""Redirect to developer portal as described at
https://18f.github.io/API-All-the-X/pages/developer_hub_kit.
"""
url = furl.furl(config.api_location)
url.path.add('developers')
return redirect(url.url, http.client.MOVED_PERMANENTLY)
@app.route('/candidate/<c_id>/')
@use_kwargs({
'cycle': fields.Int(),
'election_full': fields.Bool(missing=True),
})
def candidate_page(c_id, cycle=None, election_full=True):
"""Fetch and render data for candidate detail page.
:param int cycle: Optional cycle for associated committees and financials.
:param bool election_full: Load full election period
"""
candidate, committees, cycle = api_caller.load_with_nested(
'candidate', c_id, 'committees',
cycle=cycle, cycle_key='two_year_period',
election_full=election_full,
)
if election_full and cycle and cycle not in candidate['election_years']:
next_cycle = next(
(
year for year in sorted(candidate['election_years'])
if year > cycle
),
max(candidate['election_years']),
)
# If the next_cycle is odd set it to whatever the cycle value was
# and then set election_full to false
# This solves an issue with special elections
if next_cycle % 2 > 0:
next_cycle = cycle
election_full = False
return redirect(
url_for('candidate_page', c_id=c_id, cycle=next_cycle, election_full=election_full)
)
return views.render_candidate(candidate, committees, cycle, election_full)
@app.route('/committee/<c_id>/')
@app.route('/committee/<c_id>/')
@use_kwargs({
'cycle': fields.Int(),
})
def committee_page(c_id, cycle=None):
"""Fetch and render data for committee detail page.
:param int cycle: Optional cycle for financials.
"""
# If the cycle param is explicitly defined, then load that cycle
# Otherwise, redirect to the last cycle with reports, as determined in render_committee()
redirect_to_previous = False if cycle else True
committee, candidates, cycle = api_caller.load_with_nested('committee', c_id, 'candidates', cycle)
return views.render_committee(committee, candidates, cycle, redirect_to_previous)
@app.route('/advanced/')
def advanced():
return render_template(
'advanced.html',
parent='data',
title='Advanced data'
)
@app.route('/candidates/')
@use_kwargs({
'page': fields.Int(missing=1)
})
def candidates(**kwargs):
candidates = api_caller._call_api('candidates', **kwargs)
return render_template(
'datatable.html',
parent='data',
result_type='candidates',
slug='candidates',
title='Candidates',
data=candidates['results'],
query=kwargs,
columns=constants.table_columns['candidates']
)
@app.route('/candidates/<office>/')
def candidates_office(office):
if office.lower() not in ['president', 'senate', 'house']:
abort(404)
return render_template(
'datatable.html',
parent='data',
result_type='candidates',
title='candidates for ' + office,
slug='candidates-office',
table_context=OrderedDict([('office', office)]),
columns=constants.table_columns['candidates-office-' + office.lower()]
)
@app.route('/committees/')
@use_kwargs({
'page': fields.Int(missing=1)
})
def committees( **kwargs):
committees = api_caller._call_api('committees', **kwargs)
return render_template(
'datatable.html',
parent='data',
result_type='committees',
slug='committees',
title='Committees',
data=committees['results'],
query=kwargs,
columns=constants.table_columns['committees']
)
@app.route('/receipts/')
def receipts():
return render_template(
'datatable.html',
parent='data',
slug='receipts',
title='Receipts',
dates=utils.date_ranges(),
columns=constants.table_columns['receipts'],
has_data_type_toggle=True
)
@app.route('/receipts/individual-contributions/')
def individual_contributions():
return render_template(
'datatable.html',
parent='data',
result_type='receipts',
title='Individual contributions',
slug='individual-contributions',
dates=utils.date_ranges(),
columns=constants.table_columns['individual-contributions']
)
@app.route('/disbursements/')
def disbursements():
return render_template(
'datatable.html',
parent='data',
slug='disbursements',
title='Disbursements',
dates=utils.date_ranges(),
columns=constants.table_columns['disbursements'],
has_data_type_toggle=True
)
@app.route('/filings/')
def filings():
return render_template(
'datatable.html',
parent='data',
slug='filings',
title='Filings',
dates=utils.date_ranges(),
result_type='committees',
has_data_type_toggle=True,
columns=constants.table_columns['filings']
)
@app.route('/independent-expenditures/')
def independent_expenditures():
return render_template(
'datatable.html',
parent='data',
slug='independent-expenditures',
title='Independent expenditures',
dates=utils.date_ranges(),
columns=constants.table_columns['independent-expenditures'],
has_data_type_toggle=True
)
@app.route('/electioneering-communications/')
def electioneering_communications():
return render_template(
'datatable.html',
parent='data',
slug='electioneering-communications',
title='Electioneering communications',
dates=utils.date_ranges(),
columns=constants.table_columns['electioneering-communications']
)
@app.route('/communication-costs/')
def communication_costs():
return render_template(
'datatable.html',
parent='data',
slug='communication-costs',
title='Communication costs',
dates=utils.date_ranges(),
columns=constants.table_columns['communication-costs']
)
@app.route('/loans/')
def loans():
return render_template(
'datatable.html',
parent='data',
result_type='loans',
slug='loans',
title='loans',
columns=constants.table_columns['loans']
)
@app.route('/party-coordinated-expenditures/')
def party_coordinated_expenditures():
return render_template(
'datatable.html',
parent='data',
slug='party-coordinated-expenditures',
title='Party coordinated expenditures',
dates=utils.date_ranges(),
columns=constants.table_columns['party-coordinated-expenditures']
)
@app.route('/reports/<form_type>/')
def reports(form_type):
if form_type.lower() not in ['presidential', 'house-senate', 'pac-party', 'ie-only']:
abort(404)
if form_type.lower() == 'presidential':
title = 'Presidential committee reports'
if form_type.lower() == 'house-senate':
title = 'House and Senate committee reports'
if form_type.lower() == 'pac-party':
title = 'PAC and party committee reports'
if form_type.lower() == 'ie-only':
title = 'Independent expenditure only committee reports'
context = OrderedDict([('form_type', form_type.lower())])
return render_template(
'datatable.html',
parent='data',
slug='reports',
title=title,
table_context=context,
dates=utils.date_ranges(),
has_data_type_toggle=True,
columns=constants.table_columns['reports-' + form_type.lower()]
)
@app.route('/elections/')
def election_lookup():
return render_template('election-lookup.html', parent='data')
@app.route('/elections/<office>/<int:cycle>/')
@app.route('/elections/<office>/<state>/<int:cycle>/')
@app.route('/elections/<office>/<state>/<district>/<int:cycle>/')
def elections(office, cycle, state=None, district=None):
# Get all cycles up until the cycle from the URL if it's beyond the current cycle
# this fixes the issue of an election page not showing user-provided cycle
# in the cycle select
max_cycle = cycle if cycle > utils.current_cycle() else utils.current_cycle()
cycles = utils.get_cycles(max_cycle)
if office.lower() == 'president':
cycles = [each for each in cycles if each % 4 == 0]
elif office.lower() == 'senate':
cycles = utils.get_state_senate_cycles(state)
if office.lower() not in ['president', 'senate', 'house']:
abort(404)
if state and state.upper() not in constants.states:
abort(404)
return render_template(
'elections.html',
office=office,
office_code=office[0],
parent='data',
cycle=cycle,
cycles=cycles,
state=state,
state_full=constants.states[state.upper()] if state else None,
district=district,
title=utils.election_title(cycle, office, state, district),
)
@app.route('/election-page/')
@use_kwargs({
'state': fields.Str(),
'district': fields.Str(),
})
def election_page(state=None, district=None):
"""This route is used to redirect users to a specific senate or house district page
"""
if state and district:
# If district is S, redirect to a senate page
if district == 'S':
# Find the state's senate cycles given the classes and then choose the first one
cycles = utils.get_state_senate_cycles(state)
cycle = cycles[0]
redirect_url = url_for('elections',
office='senate',
state=state,
cycle=cycle)
else:
redirect_url = url_for('elections',
office='house',
district=district,
state=state,
cycle=constants.DEFAULT_TIME_PERIOD)
return redirect(redirect_url)
else:
return redirect(url_for('election_lookup', state=state, district=district))
@app.route('/raising/')
@use_kwargs({
'top_category': fields.Str(load_from='top_category', missing='P'),
'cycle': fields.Int(load_from='cycle', missing=2016),
})
def raising_breakdown(top_category, cycle):
if top_category in ['pac']:
top_raisers = api_caller.load_top_pacs('-receipts', cycle=cycle, per_page=10)
elif top_category in ['party']:
top_raisers = api_caller.load_top_parties('-receipts', cycle=cycle, per_page=10)
else:
top_raisers = api_caller.load_top_candidates('-receipts', office=top_category, cycle=cycle, per_page=10)
if cycle == datetime.datetime.today().year:
coverage_end_date = datetime.datetime.today()
else:
coverage_end_date = datetime.date(cycle, 12, 31)
page_info = top_raisers['pagination']
return render_template(
'raising-breakdown.html',
parent='data',
title='Raising breakdown',
top_category=top_category,
coverage_start_date=datetime.date(cycle - 1, 1, 1),
coverage_end_date=coverage_end_date,
cycle=cycle,
top_raisers=top_raisers['results'],
page_info=utils.page_info(top_raisers['pagination'])
)
@app.route('/spending/')
@use_kwargs({
'top_category': fields.Str(load_from='top_category', missing='P'),
'cycle': fields.Int(load_from='cycle', missing=2016),
})
def spending_breakdown(top_category, cycle):
if top_category in ['pac']:
top_spenders = api_caller.load_top_pacs('-disbursements', cycle=cycle, per_page=10)
elif top_category in ['party']:
top_spenders = api_caller.load_top_parties('-disbursements', cycle=cycle, per_page=10)
else:
top_spenders = api_caller.load_top_candidates('-disbursements', office=top_category, cycle=cycle, per_page=10)
if cycle == datetime.datetime.today().year:
coverage_end_date = datetime.datetime.today()
else:
coverage_end_date = datetime.date(cycle, 12, 31)
return render_template(
'spending-breakdown.html',
parent='data',
title='Spending breakdown',
top_category=top_category,
coverage_start_date=datetime.date(cycle - 1, 1, 1),
coverage_end_date=coverage_end_date,
cycle=cycle,
top_spenders=top_spenders['results'],
page_info=utils.page_info(top_spenders['pagination'])
)
@app.route('/legal/search/')
@use_kwargs({
'query': fields.Str(load_from='search'),
'result_type': fields.Str(load_from='search_type', missing='all')
})
def legal_search(query, result_type):
if result_type != 'all':
# search_type is used for google analytics
return redirect(url_for(result_type, search=query, search_type=result_type))
results = {}
# Only hit the API if there's an actual query
if query:
results = api_caller.load_legal_search_results(query, result_type, limit=3)
return views.render_legal_search_results(results, query, result_type)
def legal_doc_search(query, result_type, **kwargs):
"""Legal search for a specific document type."""
results = {}
# Only hit the API if there's an actual query or if the result_type is AOs
if query or result_type in ['advisory_opinions', 'murs']:
results = api_caller.load_legal_search_results(query, result_type, **kwargs)
return views.render_legal_doc_search_results(results, query, result_type)
@app.route('/legal/advisory-opinions/')
def advisory_opinions_landing():
return views.render_legal_ao_landing()
@app.route('/legal/statutes/')
def statutes_landing():
return render_template('legal-statutes-landing.html',
parent='legal',
result_type='statutes',
display_name='statutes')
@app.route('/legal/search/advisory-opinions/')
@use_kwargs({
'query': fields.Str(load_from='search'),
'offset': fields.Int(missing=0)
})
def advisory_opinions(query, offset):
return legal_doc_search(query, 'advisory_opinions')
@app.route('/legal/search/statutes/')
@use_kwargs({
'query': fields.Str(load_from='search'),
'offset': fields.Int(missing=0),
})
def statutes(query, offset):
return legal_doc_search(query, 'statutes', offset=offset)
@app.route('/legal/search/enforcement/')
@use_kwargs({
'query': fields.Str(load_from='search'),
'mur_no': fields.Str(load_from='mur_no'),
'mur_respondents': fields.Str(load_from='mur_respondents'),
'mur_election_cycles': fields.Int(load_from='mur_election_cycles'),
'offset': fields.Int(missing=0),
})
def murs(query, offset, mur_no=None, mur_respondents=None, mur_election_cycles=None,
**kwargs):
return legal_doc_search(query, 'murs',
mur_no=mur_no,
mur_respondents=mur_respondents,
mur_election_cycles=mur_election_cycles,
offset=offset)
# TODO migrating from /legal/regulations -> /legal/search/regulations,
# eventually there will be a regulations landing page
@app.route('/legal/regulations/')
def regulations_landing(*args, **kwargs):
return redirect(url_for('regulations', *args, **kwargs))
@app.route('/legal/search/regulations/')
@use_kwargs({
'query': fields.Str(load_from='search'),
'offset': fields.Int(missing=0),
})
def regulations(query, offset):
return legal_doc_search(query, 'regulations', offset=offset)
@app.route('/legal/advisory-opinions/<ao_no>/')
def advisory_opinion_page(ao_no):
advisory_opinion = api_caller.load_legal_advisory_opinion(ao_no)
if not advisory_opinion:
abort(404)
return views.render_legal_advisory_opinion(advisory_opinion)
@app.route('/legal/matter-under-review/<mur_no>/')
def mur_page(mur_no):
mur = api_caller.load_legal_mur(mur_no)
if not mur:
abort(404)
return views.render_legal_mur(mur)
```
#### File: openFEC-web-app/openfecwebapp/views.py
```python
import datetime
import furl
from flask.views import MethodView
from flask import request, render_template, redirect, url_for, jsonify
from flask_cors import cross_origin
from webargs import fields
from webargs.flaskparser import use_kwargs
from marshmallow import ValidationError
from collections import OrderedDict
import datetime
import github3
from werkzeug.utils import cached_property
from openfecwebapp import config
from openfecwebapp import api_caller
from openfecwebapp import utils
def render_search_results(results, query):
return render_template(
'search-results.html',
parent='data',
results=results,
query=query,
)
def render_legal_search_results(results, query, result_type):
return render_template(
'legal-search-results.html',
parent='legal',
query=query,
results=results,
result_type=result_type,
category_order=get_legal_category_order(results),
)
def render_legal_doc_search_results(results, query, result_type):
return render_template(
'legal-search-results-%s.html' % result_type,
parent='legal',
results=results,
result_type=result_type,
query=query
)
def render_legal_advisory_opinion(advisory_opinion):
final_opinion = [doc for doc in advisory_opinion['documents'] if doc['category'] == 'Final Opinion']
final_opinion = final_opinion[0] if len(final_opinion) > 0 else None
return render_template(
'legal-advisory-opinion.html',
advisory_opinion=advisory_opinion,
final_opinion=final_opinion,
parent='legal'
)
def render_legal_mur(mur):
return render_template(
'legal-%s-mur.html' % mur['mur_type'],
mur=mur,
parent='legal'
)
def render_legal_ao_landing():
today = datetime.date.today()
ao_min_date = today - datetime.timedelta(weeks=26)
recent_aos = api_caller.load_legal_search_results(
query='',
query_type='advisory_opinions',
ao_category=['F', 'W'],
ao_min_issue_date=ao_min_date
)
pending_aos = api_caller.load_legal_search_results(
query='',
query_type='advisory_opinions',
ao_category='R',
ao_is_pending=True
)
return render_template('legal-advisory-opinions-landing.html',
parent='legal',
result_type='advisory_opinions',
display_name='advisory opinions',
recent_aos=recent_aos['advisory_opinions'],
pending_aos=pending_aos['advisory_opinions'])
def to_date(committee, cycle):
if committee['committee_type'] in ['H', 'S', 'P']:
return None
return min(datetime.datetime.now().year, cycle)
def render_committee(committee, candidates, cycle, redirect_to_previous):
# committee fields will be top-level in the template
tmpl_vars = committee
tmpl_vars['parent'] = 'data'
tmpl_vars['cycle'] = cycle
tmpl_vars['year'] = to_date(committee, cycle)
tmpl_vars['result_type'] = 'committees'
# Link to current cycle if candidate has a corresponding page, else link
# without cycle query parameter
# See https://github.com/18F/openFEC/issues/1536
for candidate in candidates:
election_years = [
election_year for election_year in candidate['election_years']
if election_year - election_durations[candidate['office']] < cycle <= election_year
]
candidate['related_cycle'] = max(election_years) if election_years else None
# add related candidates a level below
tmpl_vars['candidates'] = candidates
financials = api_caller.load_cmte_financials(committee['committee_id'], cycle=cycle)
tmpl_vars['report_type'] = report_types.get(committee['committee_type'], 'pac-party')
tmpl_vars['reports'] = financials['reports']
tmpl_vars['totals'] = financials['totals']
tmpl_vars['context_vars'] = {
'cycle': cycle,
'timePeriod': str(cycle - 1) + '–' + str(cycle),
'name': committee['name'],
}
if financials['reports'] and financials['totals']:
# Format the current two-year-period's totals using the process utilities
if committee['committee_type'] == 'I':
# IE-only committees have very little data, so they just get this one
tmpl_vars['ie_summary'] = utils.process_ie_data(financials['totals'][0])
else:
# All other committees have three tables
tmpl_vars['raising_summary'] = utils.process_raising_data(financials['totals'][0])
tmpl_vars['spending_summary'] = utils.process_spending_data(financials['totals'][0])
tmpl_vars['cash_summary'] = utils.process_cash_data(financials['totals'][0])
if redirect_to_previous and not financials['reports']:
# If there's no reports, find the first year with reports and redirect there
for c in sorted(committee['cycles'], reverse=True):
financials = api_caller.load_cmte_financials(committee['committee_id'], cycle=c)
if financials['reports']:
return redirect(
url_for('committee_page', c_id=committee['committee_id'], cycle=c)
)
# If it's not a senate committee and we're in the current cycle
# check if there's any raw filings in the last two days
if committee['committee_type'] != 'S' and cycle == utils.current_cycle():
raw_filings = api_caller._call_api(
'efile', 'filings',
cycle=cycle,
committee_id=committee['committee_id'],
min_receipt_date=utils.two_days_ago()
)
if len(raw_filings.get('results')) > 0:
tmpl_vars['has_raw_filings'] = True
else:
tmpl_vars['has_raw_filings'] = False
return render_template('committees-single.html', **tmpl_vars)
def groupby(values, keygetter):
ret = {}
for value in values:
key = keygetter(value)
ret.setdefault(key, []).append(value)
return ret
election_durations = {
'P': 4,
'S': 6,
'H': 2,
}
report_types = {
'P': 'presidential',
'S': 'house-senate',
'H': 'house-senate',
'I': 'ie-only'
}
def render_candidate(candidate, committees, cycle, election_full=True):
# candidate fields will be top-level in the template
tmpl_vars = candidate
tmpl_vars['parent'] = 'data'
tmpl_vars['cycle'] = cycle
tmpl_vars['election_year'] = next(
(year for year in sorted(candidate['election_years']) if year >= cycle),
None,
)
tmpl_vars['result_type'] = 'candidates'
tmpl_vars['duration'] = election_durations.get(candidate['office'], 2)
tmpl_vars['min_cycle'] = cycle - tmpl_vars['duration'] if election_full else cycle
tmpl_vars['election_full'] = election_full
tmpl_vars['report_type'] = report_types.get(candidate['office'])
tmpl_vars['context_vars'] = {
'cycles': candidate['cycles'],
'name': candidate['name'],
'cycle': cycle,
'electionFull': election_full,
'candidateID': candidate['candidate_id']
}
# In the case of when a presidential or senate candidate has filed
# for a future year that's beyond the current cycle,
# set a max_cycle var to the current cycle we're in
# and when calling the API for totals, set election_full to False.
# The max_cycle value is also referenced in the templates for setting
# the cycle for itemized tables. Because these are only in 2-year chunks,
# the cycle should never be beyond the one we're in.
tmpl_vars['cycles'] = [cycle for cycle in candidate['cycles'] if cycle <= utils.current_cycle()]
tmpl_vars['max_cycle'] = cycle if cycle <= utils.current_cycle() else utils.current_cycle()
tmpl_vars['show_full_election'] = election_full if cycle <= utils.current_cycle() else False
# Annotate committees with most recent available cycle
tmpl_vars['aggregate_cycles'] = (
list(range(cycle, cycle - tmpl_vars['duration'], -2))
if election_full
else [cycle]
)
for committee in committees:
committee['related_cycle'] = (
max(cycle for cycle in tmpl_vars['aggregate_cycles'] if cycle in committee['cycles'])
if election_full
else candidate['two_year_period']
)
# Group the committees by designation
committee_groups = groupby(committees, lambda each: each['designation'])
committees_authorized = committee_groups.get('P', []) + committee_groups.get('A', [])
tmpl_vars['committee_groups'] = committee_groups
tmpl_vars['committees_authorized'] = committees_authorized
tmpl_vars['committee_ids'] = [committee['committee_id'] for committee in committees_authorized]
# Get aggregate totals for the financial summary
# And pass through the data processing utils
aggregate = api_caller.load_candidate_totals(
candidate['candidate_id'],
cycle=tmpl_vars['max_cycle'],
election_full=tmpl_vars['show_full_election'],
)
if aggregate:
tmpl_vars['raising_summary'] = utils.process_raising_data(aggregate)
tmpl_vars['spending_summary'] = utils.process_spending_data(aggregate)
tmpl_vars['cash_summary'] = utils.process_cash_data(aggregate)
tmpl_vars['aggregate'] = aggregate
# Get totals for the last two-year period of a cycle for showing on
# raising and spending tabs
two_year_totals = api_caller.load_candidate_totals(
candidate['candidate_id'],
cycle=tmpl_vars['max_cycle'],
election_full=False
)
if two_year_totals:
tmpl_vars['two_year_totals'] = two_year_totals
# Get the statements of candidacy
statement_of_candidacy = api_caller.load_candidate_statement_of_candidacy(
candidate['candidate_id'],
cycle=cycle
)
if statement_of_candidacy:
for statement in statement_of_candidacy:
# convert string to python datetime and parse for readable output
statement['receipt_date'] = datetime.datetime.strptime(statement['receipt_date'], '%Y-%m-%dT%H:%M:%S')
statement['receipt_date'] = statement['receipt_date'].strftime('%m/%d/%Y')
tmpl_vars['statement_of_candidacy'] = statement_of_candidacy
# Get all the elections
tmpl_vars['elections'] = sorted(
zip(candidate['election_years'], candidate['election_districts']),
key=lambda pair: pair[0],
reverse=True,
)
return render_template('candidates-single.html', **tmpl_vars)
def validate_referer(referer):
if furl.furl(referer).host != furl.furl(request.url).host:
raise ValidationError('Invalid referer.')
class GithubView(MethodView):
decorators = [cross_origin()]
@cached_property
def repo(self):
client = github3.login(token=config.github_token)
return client.repository('18F', 'fec')
@use_kwargs({
'referer': fields.Url(
required=True,
validate=validate_referer,
location='headers',
),
'action': fields.Str(),
'feedback': fields.Str(),
'about': fields.Str(),
'chart_reaction': fields.Str(),
'chart_location': fields.Str(),
'chart_name': fields.Str(),
'chart_comment': fields.Str()
})
def post(self, **kwargs):
if not any([kwargs['action'], kwargs['feedback'], kwargs['about'], kwargs['chart_comment']]):
return jsonify({
'message': 'Must provide one of "action", "feedback", or "about".',
}), 422
if kwargs['chart_comment']:
title = 'Chart reaction on {} page'.format(kwargs['chart_location'])
else:
title = 'User feedback on {}'.format(kwargs['referer'])
body = render_template('feedback.html', headers=request.headers, **kwargs)
issue = self.repo.create_issue(title, body=body)
return jsonify(issue.to_json()), 201
def get_legal_category_order(results):
""" Return categories in pre-defined order, moving categories with empty results
to the end.
"""
categories = ["advisory_opinions", "murs", "regulations", "statutes"]
category_order = [x for x in categories if results.get("total_" + x, 0) > 0] +\
[x for x in categories if results.get("total_" + x, 0) == 0]
return category_order
```
#### File: openFEC-web-app/tests/test_issues.py
```python
import mock
import pytest
from webtest import TestApp as WebTestApp
import github3
from flask import url_for
from openfecwebapp import config
from openfecwebapp.app import app
@pytest.yield_fixture
def client():
with app.test_request_context():
yield WebTestApp(app)
class TestGithub:
@pytest.fixture
def mock_repo(self):
return mock.Mock()
@pytest.fixture
def mock_client(self, mock_repo):
client = mock.Mock()
client.repository.return_value = mock_repo
return client
@pytest.fixture
def mock_login(self, monkeypatch, mock_client):
login = mock.Mock()
login.return_value = mock_client
monkeypatch.setattr(github3, 'login', login)
return login
def test_missing_referer(self, client):
res = client.post_json(
url_for('issue'),
{'feedback': 'i like it'},
expect_errors=True,
)
assert res.status_code == 422
def test_invalid_referer(self, client):
res = client.post_json(
url_for('issue'),
{'feedback': 'i do not like it'},
headers={'referer': 'http://fec.gov'},
expect_errors=True,
)
assert res.status_code == 422
def test_missing_input(self, client):
res = client.post_json(
url_for('issue'),
{},
headers={'referer': 'http://localhost:5000'},
expect_errors=True,
)
assert res.status_code == 422
def test_submit(self, client, mock_login, mock_client, mock_repo):
referer = 'http://localhost:5000'
mock_issue = mock.Mock()
mock_issue.to_json.return_value = {'body': 'it broke'}
mock_repo.create_issue.return_value = mock_issue
res = client.post_json(
url_for('issue'),
{
'action': 'i tried to use it',
'feedback': 'but nothing happened',
'about': 'i like data',
},
headers={'referer': referer},
)
assert res.status_code == 201
mock_login.assert_called_with(token=config.github_token)
mock_client.repository.assert_called_with('18F', 'fec')
assert len(mock_repo.create_issue.call_args_list) == 1
args, kwargs = mock_repo.create_issue.call_args
assert referer in args[0]
assert 'i tried to use it' in kwargs['body']
assert 'but nothing happened' in kwargs['body']
assert 'i like data' in kwargs['body']
assert res.json == {'body': 'it broke'}
```
#### File: openFEC-web-app/tests/test_utils.py
```python
import locale
import datetime
import unittest
from unittest import mock
from collections import OrderedDict
from openfecwebapp import filters
from openfecwebapp import utils
from openfecwebapp.app import app, get_election_url
def test_currency_filter_not_none():
locale.setlocale(locale.LC_ALL, '')
assert filters.currency_filter(1.05) == '$1.05'
def test_currency_filter_none():
assert filters.currency_filter(None) is None
def test_fmt_year_range_int():
assert filters.fmt_year_range(1985) == '1984–1985'
def test_fmt_year_range_not_int():
assert filters.fmt_year_range('1985') is None
assert filters.fmt_year_range(None) is None
def test_fmt_state_full():
value = 'ny'
assert filters.fmt_state_full(value) == 'New York'
def test_election_url():
with app.test_request_context():
candidate = {'office_full': 'President', 'state': 'US', 'district': None}
assert get_election_url(candidate, 2012) == '/elections/president/2012/'
candidate = {'office_full': 'Senate', 'state': 'NJ', 'district': None}
assert get_election_url(candidate, 2012) == '/elections/senate/NJ/2012/'
candidate = {'office_full': 'Senate', 'state': 'NJ', 'district': '00'}
assert get_election_url(candidate, 2012) == '/elections/senate/NJ/2012/'
candidate = {'office_full': 'House', 'state': 'NJ', 'district': '02'}
assert get_election_url(candidate, 2012) == '/elections/house/NJ/02/2012/'
def test_financial_summary_processor():
totals = {
'receipts': 200,
'disbursements': 100
}
formatter = OrderedDict([
('receipts', ('Total receipts', '1')),
('disbursements', ('Total disbursements', '1'))
])
assert utils.financial_summary_processor(totals, formatter) == [(200, ('Total receipts', '1')), (100, ('Total disbursements', '1'))]
def current_cycle():
return 2016
class TestCycles(unittest.TestCase):
@mock.patch('openfecwebapp.utils.current_cycle')
def test_get_cycles(self, current_cycle):
# Mock out the current_cycle so it doesn't change in the future
current_cycle.return_value = 2016
# Check that it returns the correct default when no arg supplied
assert utils.get_cycles() == range(2016, 1979, -2)
# Check that it returns the correct range when an arg is supplied
assert utils.get_cycles(2020) == range(2020, 1979, -2)
def test_get_senate_cycles(self):
assert utils.get_senate_cycles(1) == range(2018, 1979, -6)
def test_state_senate_cycles(self):
# Testing with an example state, Wisconsin
# There should be an election in 2016 but not 2014
# because of the classes the state has
wisconsin = utils.get_state_senate_cycles('wi')
assert 2016 in wisconsin
assert 2014 not in wisconsin
``` |
{
"source": "18F/openFEC",
"score": 2
} |
#### File: openFEC/webservices/args.py
```python
import functools
from marshmallow.compat import text_type
import sqlalchemy as sa
from webargs import ValidationError, fields, validate
from webservices import docs
from webservices.common.models import db
from webservices.config import SQL_CONFIG
def _validate_natural(value):
if value < 0:
raise ValidationError('Must be a natural number')
Natural = functools.partial(fields.Int, validate=_validate_natural)
per_page = Natural(
missing=20,
description='The number of results returned per page. Defaults to 20.',
)
class Currency(fields.Decimal):
def __init__(self, places=2, **kwargs):
super().__init__(places=places, **kwargs)
def _validated(self, value):
if isinstance(value, text_type):
value = value.lstrip('$').replace(',', '')
return super()._validated(value)
class IStr(fields.Str):
def _deserialize(self, value, attr, data):
return super()._deserialize(value, attr, data).upper()
class District(fields.Str):
def _validate(self, value):
super()._validate(value)
try:
value = int(value)
except (TypeError, ValueError):
raise ValidationError('District must be a number')
if value < 0:
raise ValidationError('District must be a natural number')
def _deserialize(self, value, attr, data):
return '{0:0>2}'.format(value)
election_full = fields.Bool(missing=False, description='Aggregate values over full election period')
paging = {
'page': Natural(missing=1, description='For paginating through results, starting at page 1'),
'per_page': per_page,
}
class OptionValidator(object):
"""Ensure that value is one of acceptable options.
:param list values: Valid options.
"""
def __init__(self, values):
self.values = values
def __call__(self, value):
if value.lstrip('-') not in self.values:
raise ValidationError(
'Cannot sort on value "{0}"'.format(value),
status_code=422
)
class IndexValidator(OptionValidator):
"""Ensure that value is an indexed column on the specified model.
:param Base model: SQLALchemy model.
:param list exclude: Optional list of columns to exclude.
"""
def __init__(self, model, extra=None, exclude=None, schema=None):
self.model = model
self.extra = extra or []
self.exclude = exclude or []
self.database_schema = schema
@property
def values(self):
inspector = sa.inspect(db.engine)
column_map = {
column.key: label
for label, column in self.model.__mapper__.columns.items()
}
return [
column_map[column['column_names'][0]]
for column in inspector.get_indexes(self.model.__tablename__, self.database_schema)
if not self._is_excluded(column_map.get(column['column_names'][0]))
] + self.extra
def _is_excluded(self, value):
return not value or value in self.exclude
class IndicesValidator(IndexValidator):
def __call__(self, value):
for sort_column in value:
if sort_column.lstrip('-') not in self.values:
raise ValidationError(
'Cannot sort on value "{0}"'.format(value),
status_code=422
)
def make_sort_args(default=None, validator=None, default_hide_null=False, default_reverse_nulls=True,
default_nulls_only=False):
return {
'sort': fields.Str(
missing=default,
validate=validator,
description='Provide a field to sort by. Use - for descending order.',
),
'sort_hide_null': fields.Bool(
missing=default_hide_null,
description='Hide null values on sorted column(s).'
),
'sort_null_only': fields.Bool(
missing=default_nulls_only,
description='Toggle that filters out all rows having sort column that is non-null'
)
}
def make_multi_sort_args(default=None, validator=None, default_hide_null=False, default_reverse_nulls=True,
default_nulls_only=False):
args = make_sort_args(default, validator, default_hide_null, default_reverse_nulls, default_nulls_only)
args['sort'] = fields.List(fields.Str, missing=default, validate=validator, required=False, allow_none=True,
description='Provide a field to sort by. Use - for descending order.',)
return args
def make_seek_args(field=fields.Int, description=None):
return {
'per_page': per_page,
'last_index': field(
missing=None,
description=description or 'Index of last result from previous page',
),
}
names = {
'q': fields.List(fields.Str, required=True, description='Name (candidate or committee) to search for'),
}
query = {
'q': fields.Str(required=False, description='Text to search legal documents for.'),
'from_hit': fields.Int(required=False, description='Get results starting from this index.'),
'hits_returned': fields.Int(required=False, description='Number of results to return (max 10).'),
'type': fields.Str(required=False, description='Document type to refine search by'),
'ao_no': fields.List(IStr, required=False, description='Force advisory opinion number'),
'ao_name': fields.List(IStr, required=False, description='Force advisory opinion name'),
'ao_min_issue_date': fields.Date(description="Earliest issue date of advisory opinion"),
'ao_max_issue_date': fields.Date(description="Latest issue date of advisory opinion"),
'ao_min_request_date': fields.Date(description="Earliest request date of advisory opinion"),
'ao_max_request_date': fields.Date(description="Latest request date of advisory opinion"),
'ao_category': fields.List(IStr(validate=validate.OneOf(['F', 'V', 'D', 'R', 'W', 'C', 'S'])),
description="Category of the document"),
'ao_is_pending': fields.Bool(description="AO is pending"),
'ao_status': fields.Str(description="Status of AO (pending, withdrawn, or final)"),
'ao_requestor': fields.Str(description="The requestor of the advisory opinion"),
'ao_requestor_type': fields.List(fields.Integer(validate=validate.OneOf(range(1, 17))),
description="Code of the advisory opinion requestor type."),
'ao_regulatory_citation': fields.List(IStr, required=False, description="Search for regulatory citations"),
'ao_statutory_citation': fields.List(IStr, required=False, description="Search for statutory citations"),
'ao_citation_require_all': fields.Bool(
description="Require all citations to be in document (default behavior is any)"),
'ao_entity_name': fields.List(IStr, required=False, description='Search by name of commenter or representative'),
'mur_no': fields.List(IStr, required=False, description='Filter MURs by case number'),
'mur_respondents': fields.Str(IStr, required=False, description='Filter MURs by respondents'),
'mur_dispositions': fields.List(IStr, required=False, description='Filter MURs by dispositions'),
'mur_election_cycles': fields.Int(IStr, required=False, description='Filter MURs by election cycles'),
'mur_document_category': fields.List(IStr, required=False,
description='Filter MURs by category of associated documents'),
'mur_min_open_date': fields.Date(required=False, description='Filter MURs by earliest date opened'),
'mur_max_open_date': fields.Date(required=False, description='Filter MURs by latest date opened'),
'mur_min_close_date': fields.Date(required=False,
description='Filter MURs by earliest date closed'),
'mur_max_close_date': fields.Date(required=False,
description='Filter MURs by latest date closed'),
}
candidate_detail = {
'cycle': fields.List(fields.Int, description=docs.CANDIDATE_CYCLE),
'election_year': fields.List(fields.Int, description=docs.ELECTION_YEAR),
'office': fields.List(fields.Str(validate=validate.OneOf(['', 'H', 'S', 'P'])), description=docs.OFFICE),
'state': fields.List(IStr, description=docs.STATE),
'party': fields.List(IStr, description=docs.PARTY),
'year': fields.Str(attribute='year', description=docs.YEAR),
'district': fields.List(District, description=docs.DISTRICT),
'candidate_status': fields.List(
IStr(validate=validate.OneOf(['', 'C', 'F', 'N', 'P'])),
description=docs.CANDIDATE_STATUS,
),
'incumbent_challenge': fields.List(
IStr(validate=validate.OneOf(['', 'I', 'C', 'O'])),
description=docs.INCUMBENT_CHALLENGE,
),
'federal_funds_flag': fields.Bool(description=docs.FEDERAL_FUNDS_FLAG),
'has_raised_funds': fields.Bool(description=docs.HAS_RAISED_FUNDS),
'name': fields.List(fields.Str, description='Name (candidate or committee) to search for. Alias for \'q\'.'),
}
candidate_list = {
'q': fields.List(fields.Str, description=docs.CANDIDATE_NAME),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'min_first_file_date': fields.Date(description='Selects all candidates whose first filing was received by the FEC after this date'),
'max_first_file_date': fields.Date(description='Selects all candidates whose first filing was received by the FEC before this date'),
}
candidate_history = {
'election_full': election_full,
}
committee = {
'year': fields.List(fields.Int, description=docs.COMMITTEE_YEAR),
'cycle': fields.List(fields.Int, description=docs.COMMITTEE_CYCLE),
'designation': fields.List(
IStr(validate=validate.OneOf(['', 'A', 'J', 'P', 'U', 'B', 'D'])),
description=docs.DESIGNATION,
),
'organization_type': fields.List(
IStr(validate=validate.OneOf(['', 'C', 'L', 'M', 'T', 'V', 'W'])),
description=docs.ORGANIZATION_TYPE,
),
'committee_type': fields.List(
IStr(validate=validate.OneOf(['', 'C', 'D', 'E', 'H', 'I', 'N', 'O', 'P', 'Q', 'S', 'U', 'V', 'W', 'X', 'Y', 'Z'])),
description=docs.COMMITTEE_TYPE,
),
}
committee_list = {
'q': fields.List(fields.Str, description=docs.COMMITTEE_NAME),
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'state': fields.List(IStr, description=docs.STATE_GENERIC),
'party': fields.List(IStr, description=docs.PARTY),
'min_first_file_date': fields.Date(description='Selects all committees whose first filing was received by the FEC after this date'),
'max_first_file_date': fields.Date(description='Selects all committees whose first filing was received by the FEC before this date'),
'treasurer_name': fields.List(fields.Str, description=docs.TREASURER_NAME),
}
committee_history = {
'election_full': election_full,
}
filings = {
'committee_type': fields.Str(description=docs.COMMITTEE_TYPE),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'is_amended': fields.Bool(description='Filing has been amended'),
'most_recent': fields.Bool(description='Filing is either new or is the most-recently filed amendment'),
'report_type': fields.List(IStr, description=docs.REPORT_TYPE),
'request_type': fields.List(IStr, description=docs.REQUEST_TYPE),
'document_type': fields.List(IStr, description=docs.DOC_TYPE),
'beginning_image_number': fields.List(fields.Str, description=docs.BEGINNING_IMAGE_NUMBER),
'report_year': fields.List(fields.Int, description=docs.REPORT_YEAR),
'min_receipt_date': fields.Date(description='Selects all items received by FEC after this date'),
'max_receipt_date': fields.Date(description='Selects all items received by FEC before this date'),
'form_type': fields.List(IStr, description=docs.FORM_TYPE),
'state': fields.List(IStr, description=docs.STATE),
'district': fields.List(IStr, description=docs.DISTRICT),
'office': fields.List(fields.Str(validate=validate.OneOf(['', 'H', 'S', 'P'])), description=docs.OFFICE),
'party': fields.List(IStr, description=docs.PARTY),
'filer_type': fields.Str(
validate=validate.OneOf(['e-file', 'paper']),
description=docs.MEANS_FILED,
),
'file_number': fields.List(fields.Int, description=docs.FILE_NUMBER),
'primary_general_indicator': fields.List(IStr, description='Primary, general or special election indicator'),
'amendment_indicator': fields.List(
IStr(validate=validate.OneOf(['', 'N', 'A', 'T', 'C' , 'M', 'S'])),
description=docs.AMENDMENT_INDICATOR),
}
efilings = {
'file_number': fields.List(fields.Int, description=docs.FILE_NUMBER),
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'min_receipt_date': fields.DateTime(description='Selects all items received by FEC after this date or datetime'),
'max_receipt_date': fields.DateTime(description='Selects all items received by FEC before this date or datetime'),
}
reports = {
'year': fields.List(fields.Int, description=docs.REPORT_YEAR),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'beginning_image_number': fields.List(fields.Str, description=docs.BEGINNING_IMAGE_NUMBER),
'report_type': fields.List(fields.Str, description=docs.REPORT_TYPE_W_EXCLUDE),
'is_amended': fields.Bool(description='Report has been amended'),
'most_recent': fields.Bool(description='Report is either new or is the most-recently filed amendment'),
'filer_type': fields.Str(
validate=validate.OneOf(['e-file', 'paper']),
description=docs.MEANS_FILED,
),
'min_disbursements_amount': Currency(description=docs.MIN_FILTER),
'max_disbursements_amount': Currency(description=docs.MAX_FILTER),
'min_receipts_amount': Currency(description=docs.MIN_FILTER),
'max_receipts_amount': Currency(description=docs.MAX_FILTER),
'min_receipt_date': fields.DateTime(description='Selects all items received by FEC after this date or datetime'),
'max_receipt_date': fields.DateTime(description='Selects all items received by FEC before this date or datetime'),
'min_cash_on_hand_end_period_amount': Currency(description=docs.MIN_FILTER),
'max_cash_on_hand_end_period_amount': Currency(description=docs.MAX_FILTER),
'min_debts_owed_amount': Currency(description=docs.MIN_FILTER),
'max_debts_owed_expenditures': Currency(description=docs.MAX_FILTER),
'min_independent_expenditures': Currency(description=docs.MIN_FILTER),
'max_independent_expenditures': Currency(description=docs.MAX_FILTER),
'min_party_coordinated_expenditures': Currency(description=docs.MIN_FILTER),
'max_party_coordinated_expenditures': Currency(description=docs.MAX_FILTER),
'min_total_contributions': Currency(description=docs.MIN_FILTER),
'max_total_contributions': Currency(description=docs.MAX_FILTER),
'type': fields.List(fields.Str, description=docs.COMMITTEE_TYPE),
'candidate_id': fields.Str(description=docs.CANDIDATE_ID),
'committee_id': fields.List(fields.Str, description=docs.COMMITTEE_ID),
'amendment_indicator': fields.List(
IStr(validate=validate.OneOf(['', 'N', 'A', 'T', 'C' , 'M', 'S'])),
description=docs.AMENDMENT_INDICATOR),
}
committee_reports = {
'year': fields.List(fields.Int, description=docs.REPORT_YEAR),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'beginning_image_number': fields.List(fields.Str, description=docs.BEGINNING_IMAGE_NUMBER),
'report_type': fields.List(fields.Str, description=docs.REPORT_TYPE_W_EXCLUDE),
'is_amended': fields.Bool(description='Report has been amended'),
'min_disbursements_amount': Currency(description=docs.MIN_FILTER),
'max_disbursements_amount': Currency(description=docs.MAX_FILTER),
'min_receipts_amount': Currency(description=docs.MIN_FILTER),
'max_receipts_amount': Currency(description=docs.MAX_FILTER),
'min_cash_on_hand_end_period_amount': Currency(description=docs.MIN_FILTER),
'max_cash_on_hand_end_period_amount': Currency(description=docs.MAX_FILTER),
'min_debts_owed_amount': Currency(description=docs.MIN_FILTER),
'max_debts_owed_expenditures': Currency(description=docs.MAX_FILTER),
'min_independent_expenditures': Currency(description=docs.MIN_FILTER),
'max_independent_expenditures': Currency(description=docs.MAX_FILTER),
'min_party_coordinated_expenditures': Currency(description=docs.MIN_FILTER),
'max_party_coordinated_expenditures': Currency(description=docs.MAX_FILTER),
'min_total_contributions': Currency(description=docs.MIN_FILTER),
'max_total_contributions': Currency(description=docs.MAX_FILTER),
'type': fields.List(fields.Str, description=docs.COMMITTEE_TYPE),
'candidate_id': fields.Str(description=docs.CANDIDATE_ID),
}
totals = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'type': fields.Str(description=docs.COMMITTEE_TYPE),
'designation': fields.Str(description=docs.DESIGNATION),
}
totals_all = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'committee_type_full': fields.Str(description=docs.COMMITTEE_TYPE),
'committee_designation_full': fields.Str(description=docs.DESIGNATION),
'committee_id': fields.Str(description=docs.COMMITTEE_ID),
}
candidate_committee_totals = {
'full_election': fields.Bool(description='Get totals for full election period.')
}
itemized = {
# TODO(jmcarp) Request integer image numbers from FEC and update argument types
'image_number': fields.List(
fields.Str,
description='The image number of the page where the schedule item is reported',
),
'min_image_number': fields.Str(),
'max_image_number': fields.Str(),
'min_amount': Currency(description='Filter for all amounts greater than a value.'),
'max_amount': Currency(description='Filter for all amounts less than a value.'),
'min_date': fields.Date(description='Minimum date'),
'max_date': fields.Date(description='Maximum date'),
'line_number': fields.Str(description='Filter for form and line number using the following format: '
'`FORM-LINENUMBER`. For example an argument such as `F3X-16` would filter'
' down to all entries from form `F3X` line number `16`.')
}
reporting_dates = {
'min_due_date': fields.Date(description='Date the report is due'),
'max_due_date': fields.Date(description='Date the report is due'),
'report_year': fields.List(fields.Int, description='Year of report'),
'report_type': fields.List(fields.Str, description=docs.REPORT_TYPE),
'min_create_date': fields.Date(description='Date this record was added to the system'),
'max_create_date': fields.Date(description='Date this record was added to the system'),
'min_update_date': fields.Date(description='Date this record was last updated'),
'max_update_date': fields.Date(description='Date this record was last updated'),
}
election_dates = {
'election_state': fields.List(fields.Str, description='State or territory of the office sought'),
'election_district': fields.List(fields.Str, description='House district of the office sought, if applicable.'),
'election_party': fields.List(fields.Str, description='Party, if applicable.'),
'office_sought': fields.List(fields.Str(validate=validate.OneOf(['H', 'S', 'P'])), description='House, Senate or presidential office'),
'min_election_date': fields.Date(description='Date of election'),
'max_election_date': fields.Date(description='Date of election'),
'election_type_id': fields.List(fields.Str, description='Election type'),
'min_update_date': fields.Date(description='Date this record was last updated'),
'max_update_date': fields.Date(description='Date this record was last updated'),
'min_create_date': fields.Date(description='Date this record was added to the system'),
'max_create_date': fields.Date(description='Date this record was added to the system'),
'election_year': fields.List(fields.Str, description='Year of election'),
'min_primary_general_date': fields.Date(description='Date of primary or general election'),
'max_primary_general_date': fields.Date(description='Date of primary or general election'),
}
calendar_dates = {
'calendar_category_id': fields.List(fields.Int, description=docs.CATEGORY),
'description': fields.List(IStr, description=docs.CAL_DESCRIPTION),
'summary': fields.List(IStr, description=docs.SUMMARY),
'min_start_date': fields.DateTime(description='The minimum start date and time'),
'min_end_date': fields.DateTime(description='The minimum end date and time'),
'max_start_date': fields.DateTime(description='The maximum start date and time'),
'max_end_date': fields.DateTime(description='The maximum end date and time'),
'event_id': fields.Int(description=docs.EVENT_ID),
}
schedule_a = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'contributor_id': fields.List(IStr, description=docs.CONTRIBUTOR_ID),
'contributor_name': fields.List(fields.Str, description=docs.CONTRIBUTOR_NAME),
'contributor_city': fields.List(IStr, description=docs.CONTRIBUTOR_CITY),
'contributor_state': fields.List(IStr, description=docs.CONTRIBUTOR_STATE),
'contributor_zip': fields.List(IStr, description=docs.CONTRIBUTOR_ZIP),
'contributor_employer': fields.List(fields.Str, description=docs.CONTRIBUTOR_EMPLOYER),
'contributor_occupation': fields.List(fields.Str, description=docs.CONTRIBUTOR_OCCUPATION),
'last_contribution_receipt_date': fields.Date(missing=None, description='When sorting by `contribution_receipt_date`, this is populated with the `contribution_receipt_date` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'last_contribution_receipt_amount': fields.Float(missing=None, description='When sorting by `contribution_receipt_amount`, this is populated with the `contribution_receipt_amount` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'last_contributor_aggregate_ytd': fields.Float(missing=None, description='When sorting by `contributor_aggregate_ytd`, this is populated with the `contributor_aggregate_ytd` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'is_individual': fields.Bool(missing=None, description=docs.IS_INDIVIDUAL),
'contributor_type': fields.List(
fields.Str(validate=validate.OneOf(['individual', 'committee'])),
description='Filters individual or committee contributions based on line number'
),
'two_year_transaction_period': fields.Int(
description=docs.TWO_YEAR_TRANSACTION_PERIOD,
required=True,
missing=SQL_CONFIG['CYCLE_END_YEAR_ITEMIZED']
),
}
schedule_a_e_file = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
#'contributor_id': fields.List(IStr, description=docs.CONTRIBUTOR_ID),
'contributor_name': fields.List(fields.Str, description=docs.CONTRIBUTOR_NAME),
'contributor_city': fields.List(IStr, description=docs.CONTRIBUTOR_CITY),
'contributor_state': fields.List(IStr, description=docs.CONTRIBUTOR_STATE),
'contributor_employer': fields.List(fields.Str, description=docs.CONTRIBUTOR_EMPLOYER),
'contributor_occupation': fields.List(fields.Str, description=docs.CONTRIBUTOR_OCCUPATION),
}
schedule_a_by_size = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'size': fields.List(fields.Int(validate=validate.OneOf([0, 200, 500, 1000, 2000])), description=docs.SIZE),
}
schedule_a_by_state = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'state': fields.List(IStr, description='State of contributor'),
'hide_null': fields.Bool(missing=False, description='Exclude values with missing state'),
}
schedule_a_by_zip = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'zip': fields.List(fields.Str, description='Zip code'),
'state': fields.List(IStr, description='State of contributor'),
}
schedule_a_by_employer = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'employer': fields.List(IStr, description=docs.EMPLOYER),
}
schedule_a_by_occupation = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'occupation': fields.List(IStr, description=docs.OCCUPATION),
}
schedule_a_by_contributor = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'contributor_id': fields.List(IStr, description=docs.CONTRIBUTOR_ID),
}
schedule_b_by_recipient = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'recipient_name': fields.List(fields.Str, description=docs.RECIPIENT_NAME),
}
schedule_b_by_recipient_id = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'recipient_id': fields.List(IStr, description=docs.RECIPIENT_ID),
}
schedule_b = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'recipient_committee_id': fields.List(IStr, description='The FEC identifier should be represented here if the contributor is registered with the FEC.'),
'recipient_name': fields.List(fields.Str, description='Name of recipient'),
'disbursement_description': fields.List(fields.Str, description='Description of disbursement'),
'recipient_city': fields.List(IStr, description='City of recipient'),
'recipient_state': fields.List(IStr, description='State of recipient'),
'disbursement_purpose_category': fields.List(IStr, description='Disbursement purpose category'),
'last_disbursement_date': fields.Date(missing=None, description='When sorting by `disbursement_date`, this is populated with the `disbursement_date` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'last_disbursement_amount': fields.Float(missing=None, description='When sorting by `disbursement_amount`, this is populated with the `disbursement_amount` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'two_year_transaction_period': fields.Int(
description=docs.TWO_YEAR_TRANSACTION_PERIOD,
required=True,
missing=SQL_CONFIG['CYCLE_END_YEAR_ITEMIZED']
),
}
schedule_b_efile = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
#'recipient_committee_id': fields.List(IStr, description='The FEC identifier should be represented here if the contributor is registered with the FEC.'),
#'recipient_name': fields.List(fields.Str, description='Name of recipient'),
'disbursement_description': fields.List(fields.Str, description='Description of disbursement'),
'image_number': fields.List(
fields.Str,
description='The image number of the page where the schedule item is reported',
),
'recipient_city': fields.List(IStr, description='City of recipient'),
'recipient_state': fields.List(IStr, description='State of recipient'),
'max_date': fields.Date(missing=None, description='When sorting by `disbursement_date`, this is populated with the `disbursement_date` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'min_date': fields.Date(missing=None, description='When sorting by `disbursement_date`, this is populated with the `disbursement_date` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'min_amount': Currency(description='Filter for all amounts less than a value.'),
'max_amount': Currency(description='Filter for all amounts less than a value.'),
}
schedule_b_by_purpose = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'purpose': fields.List(fields.Str, description='Disbursement purpose category'),
}
schedule_c = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_name': fields.List(fields.Str, description=docs.CANDIDATE_NAME),
'loaner_name': fields.List(fields.Str, description=docs.LOAN_SOURCE),
'min_payment_to_date': fields.Int(description='Minimum payment to date'),
'max_payment_to_date': fields.Int(description='Maximum payment to date'),
}
schedule_e_by_candidate = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'support_oppose': IStr(
missing=None,
validate=validate.OneOf(['S', 'O']),
description='Support or opposition'
),
}
#These arguments will evolve with updated filtering needs
schedule_d = {
'min_payment_period': fields.Float(),
'max_payment_period': fields.Float(),
'min_amount_incurred': fields.Float(),
'max_amount_incurred': fields.Float(),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'creditor_debtor_name': fields.List(fields.Str),
'nature_of_debt': fields.Str(),
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
}
schedule_f = {
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'payee_name': fields.List(fields.Str),
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
}
communication_cost = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'support_oppose_indicator': fields.List(
IStr(validate=validate.OneOf(['S', 'O'])),
description='Support or opposition',
),
}
electioneering = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'report_year': fields.List(fields.Int, description=docs.REPORT_YEAR),
'min_amount': Currency(description='Filter for all amounts greater than a value.'),
'max_amount': Currency(description='Filter for all amounts less than a value.'),
'min_date': fields.Date(description='Minimum disbursement date'),
'max_date': fields.Date(description='Maximum disbursement date'),
'description': fields.Str('Disbursement description'),
}
electioneering_by_candidate = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
}
elections_list = {
'state': fields.List(IStr, description=docs.STATE),
'district': fields.List(District, description=docs.DISTRICT),
'cycle': fields.List(fields.Int, description=docs.CANDIDATE_CYCLE),
'zip': fields.List(fields.Int, description=docs.ZIP_CODE),
'office': fields.List(
fields.Str(validate=validate.OneOf(['house', 'senate', 'president'])),
),
}
elections = {
'state': IStr(description=docs.STATE),
'district': District(description=docs.DISTRICT),
'cycle': fields.Int(description=docs.CANDIDATE_CYCLE),
'office': fields.Str(
validate=validate.OneOf(['house', 'senate', 'president']),
description=docs.OFFICE,
),
'election_full': election_full,
}
schedule_a_candidate_aggregate = {
'candidate_id': fields.List(IStr, required=True, description=docs.CANDIDATE_ID),
'cycle': fields.List(fields.Int, required=True, description=docs.RECORD_CYCLE),
'election_full': election_full,
}
candidate_totals = {
'q': fields.List(fields.Str, description=docs.CANDIDATE_NAME),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'election_year': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'office': fields.List(fields.Str(validate=validate.OneOf(['', 'H', 'S', 'P'])), description='Governmental office candidate runs for: House, Senate or presidential'),
'election_full': election_full,
'state': fields.List(IStr, description='State of candidate'),
'district': fields.List(District, description='District of candidate'),
'party': fields.List(IStr, description='Three-letter party code'),
'min_receipts': Currency(description='Minimum aggregated receipts'),
'max_receipts': Currency(description='Maximum aggregated receipts'),
'min_disbursements': Currency(description='Minimum aggregated disbursements'),
'max_disbursements': Currency(description='Maximum aggregated disbursements'),
'min_cash_on_hand_end_period': Currency(description='Minimum cash on hand'),
'max_cash_on_hand_end_period': Currency(description='Maximum cash on hand'),
'min_debts_owed_by_committee': Currency(description='Minimum debt'),
'max_debts_owed_by_committee': Currency(description='Maximum debt'),
'federal_funds_flag': fields.Bool(description=docs.FEDERAL_FUNDS_FLAG),
'has_raised_funds': fields.Bool(description=docs.HAS_RAISED_FUNDS),
}
totals_committee_aggregate = {
'min_receipts': Currency(description='Minimum aggregated receipts'),
'max_receipts': Currency(description='Maximum aggregated receipts'),
'min_disbursements': Currency(description='Minimum aggregated disbursements'),
'max_disbursements': Currency(description='Maximum aggregated disbursements'),
}
communication_cost_by_candidate = {
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'support_oppose': IStr(
missing=None,
validate=validate.OneOf(['S', 'O']),
description='Support or opposition',
),
}
entities = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
}
schedule_e = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'filing_form': fields.List(IStr, description='Filing form'),
'last_expenditure_date': fields.Date(missing=None, description='When sorting by `expenditure_date`, this is populated with the `expenditure_date` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'last_expenditure_amount': fields.Float(missing=None, description='When sorting by `expenditure_amount`, this is populated with the `expenditure_amount` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'last_office_total_ytd': fields.Float(missing=None, description='When sorting by `office_total_ytd`, this is populated with the `office_total_ytd` of the last result. However, you will need to pass the index of that last result to `last_index` to get the next page.'),
'payee_name': fields.List(fields.Str, description='Name of the entity that received the payment'),
'support_oppose_indicator': fields.List(
IStr(validate=validate.OneOf(['S', 'O'])),
description='Support or opposition',
),
'is_notice': fields.List(fields.Bool, description='Record filed as 24- or 48-hour notice'),
}
schedule_e_efile = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'candidate_id': fields.List(IStr, description=docs.CANDIDATE_ID),
'payee_name': fields.List(fields.Str, description='Name of the entity that received the payment'),
'candidate_name': fields.List(fields.Str, description=docs.CANDIDATE_NAME),
'image_number': fields.List(
fields.Str,
description='The image number of the page where the schedule item is reported',
),
'support_oppose_indicator': fields.List(
IStr(validate=validate.OneOf(['S', 'O'])),
description='Support or opposition',
),
'min_expenditure_date': fields.Date(description=docs.EXPENDITURE_MAX_DATE),
'max_expenditure_date': fields.Date(description=docs.EXPENDITURE_MIN_DATE),
'min_expenditure_amount': fields.Date(description=docs.EXPENDITURE_MIN_AMOUNT),
'max_expenditure_amount': fields.Date(description=docs.EXPENDITURE_MAX_AMOUNT),
}
rad_analyst = {
'committee_id': fields.List(IStr, description=docs.COMMITTEE_ID),
'analyst_id': fields.List(fields.Int(), description='ID of RAD analyst'),
'analyst_short_id': fields.List(fields.Int(), description='Short ID of RAD analyst'),
'telephone_ext': fields.List(fields.Int(), description='Telephone extension of RAD analyst'),
'name': fields.List(fields.Str, description='Name of RAD analyst'),
'email': fields.List(fields.Str, description='Email of RAD analyst'),
'title': fields.List(fields.Str, description='Title of RAD analyst'),
}
large_aggregates = {'cycle': fields.Int(required=True, description=docs.RECORD_CYCLE)}
schedule_a_by_state_recipient_totals = {
'cycle': fields.List(fields.Int, description=docs.RECORD_CYCLE),
'state': fields.List(IStr, description=docs.STATE_GENERIC),
'committee_type': fields.List(
IStr,
description=docs.COMMITTEE_TYPE_STATE_AGGREGATE_TOTALS
),
}
# endpoint audit-primary-category
PrimaryCategory = {
'primary_category_id': fields.List(fields.Str(), description=docs.PRIMARY_CATEGORY_ID),
'primary_category_name': fields.List(fields.Str, description=docs.PRIMARY_CATEGORY_NAME),
# 'tier': fields.List(fields.Int, description=docs.AUDIT_TIER),
}
# endpoint audit-category
Category = {
'primary_category_id': fields.List(fields.Str(), description=docs.PRIMARY_CATEGORY_ID),
'primary_category_name': fields.List(fields.Str, description=docs.PRIMARY_CATEGORY_NAME),
}
# endpoint audit-case
AuditCase = {
'primary_category_id': fields.List(fields.Str(), missing='all', description=docs.PRIMARY_CATEGORY_ID),
'sub_category_id': fields.List(fields.Str(), missing='all', description=docs.SUB_CATEGORY_ID),
'audit_case_id': fields.List(fields.Str(), description=docs.AUDIT_CASE_ID),
'cycle': fields.List(fields.Int(), description=docs.CYCLE),
'committee_id': fields.List(fields.Str(), description=docs.COMMITTEE_ID),
'committee_name': fields.List(fields.Str(), description=docs.COMMITTEE_NAME),
'committee_type': fields.List(fields.Str(), description=docs.COMMITTEE_TYPE),
'audit_id': fields.List(fields.Int(), description=docs.AUDIT_ID),
'candidate_id': fields.List(fields.Str(), description=docs.CANDIDATE_ID),
'candidate_name': fields.List(fields.Str(), description=docs.CANDIDATE_NAME),
'min_election_cycle': fields.Int(description=docs.CYCLE),
'max_election_cycle': fields.Int(description=docs.CYCLE),
}
```
#### File: common/models/base.py
```python
import random
import celery
from flask_sqlalchemy import SQLAlchemy
from flask_sqlalchemy import SignallingSession
class RoutingSession(SignallingSession):
"""Route requests to database leader or follower as appropriate.
Based on http://techspot.zzzeek.org/2012/01/11/django-style-database-routers-in-sqlalchemy/
"""
@property
def followers(self):
return self.app.config['SQLALCHEMY_FOLLOWERS']
@property
def follower_tasks(self):
return self.app.config['SQLALCHEMY_FOLLOWER_TASKS']
@property
def restrict_follower_traffic_to_tasks(self):
return self.app.config['SQLALCHEMY_RESTRICT_FOLLOWER_TRAFFIC_TO_TASKS']
@property
def use_follower(self):
# Check for read operations and configured followers.
use_follower = (
not self._flushing and
len(self.followers) > 0
)
# Optionally restrict traffic to followers for only supported tasks.
if use_follower and self.restrict_follower_traffic_to_tasks:
use_follower = (
celery.current_task and
celery.current_task.name in self.follower_tasks
)
return use_follower
def get_bind(self, mapper=None, clause=None):
if self.use_follower:
return random.choice(self.followers)
return super().get_bind(mapper=mapper, clause=clause)
class RoutingSQLAlchemy(SQLAlchemy):
def create_session(self, options):
return RoutingSession(self, **options)
db = RoutingSQLAlchemy()
class BaseModel(db.Model):
__abstract__ = True
idx = db.Column(db.Integer, primary_key=True)
```
#### File: common/models/candidates.py
```python
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import ARRAY, TSVECTOR
from sqlalchemy.ext.declarative import declared_attr
from webservices import docs
from .base import db, BaseModel
class CandidateSearch(BaseModel):
__tablename__ = 'ofec_candidate_fulltext_mv'
id = db.Column(db.String)
name = db.Column(db.String, doc=docs.CANDIDATE_NAME)
office_sought = db.Column(db.String, doc=docs.OFFICE)
fulltxt = db.Column(TSVECTOR)
receipts = db.Column(db.Numeric(30, 2))
class CandidateFlags(db.Model):
__tablename__ = 'ofec_candidate_flag_mv'
candidate_id = db.Column(db.String, index=True, primary_key=True, doc=docs.CANDIDATE_ID)
federal_funds_flag = db.Column(db.Boolean, index=True, doc=docs.FEDERAL_FUNDS_FLAG)
has_raised_funds = db.Column(db.Boolean, index=True, doc=docs.HAS_RAISED_FUNDS)
class BaseCandidate(BaseModel):
__abstract__ = True
name = db.Column(db.String(100), index=True, doc=docs.CANDIDATE_NAME)
office = db.Column(db.String(1), index=True, doc=docs.OFFICE)
office_full = db.Column(db.String(9), doc=docs.OFFICE_FULL)
party = db.Column(db.String(3), index=True, doc=docs.PARTY)
party_full = db.Column(db.String(255), doc=docs.PARTY_FULL)
state = db.Column(db.String(2), index=True, doc=docs.STATE)
district = db.Column(db.String(2), index=True, doc=docs.DISTRICT)
# ? difference between district and district_number
district_number = db.Column(db.Integer, index=True, doc=docs.CANDIDATE_STATUS)
election_districts = db.Column(ARRAY(db.String), index=True, doc=docs.DISTRICT)
election_years = db.Column(ARRAY(db.Integer), index=True, doc='Years in which a candidate ran for office.')
cycles = db.Column(ARRAY(db.Integer), index=True, doc=docs.CANDIDATE_CYCLE)
candidate_status = db.Column(db.String(1), index=True, doc=docs.CANDIDATE_STATUS)
incumbent_challenge = db.Column(db.String(1), index=True, doc=docs.INCUMBENT_CHALLENGE)
incumbent_challenge_full = db.Column(db.String(10), doc=docs.INCUMBENT_CHALLENGE_FULL)
load_date = db.Column(db.Date, index=True, doc=docs.LOAD_DATE)
first_file_date = db.Column(db.Date, index=True, doc=docs.FIRST_CANDIDATE_FILE_DATE)
last_file_date = db.Column(db.Date, doc=docs.LAST_CANDIDATE_FILE_DATE)
last_f2_date = db.Column(db.Date, doc=docs.LAST_F2_DATE)
@declared_attr
def flags(self):
return sa.orm.relationship(
CandidateFlags,
primaryjoin=sa.orm.foreign(CandidateFlags.candidate_id) == self.candidate_id,
uselist=False,
)
class BaseConcreteCandidate(BaseCandidate):
__tablename__ = 'ofec_candidate_detail_mv'
candidate_id = db.Column(db.String, unique=True, doc=docs.CANDIDATE_ID)
class Candidate(BaseConcreteCandidate):
__table_args__ = {'extend_existing': True}
__tablename__ = 'ofec_candidate_detail_mv'
active_through = db.Column(db.Integer, doc=docs.ACTIVE_THROUGH)
# Customize join to restrict to principal committees
principal_committees = db.relationship(
'Committee',
secondary='ofec_cand_cmte_linkage_mv',
secondaryjoin='''and_(
Committee.committee_id == ofec_cand_cmte_linkage_mv.c.cmte_id,
ofec_cand_cmte_linkage_mv.c.cmte_dsgn == 'P',
)''',
order_by=(
'desc(ofec_cand_cmte_linkage_mv.c.cand_election_yr),'
'desc(Committee.last_file_date),'
)
)
class CandidateDetail(BaseConcreteCandidate):
__table_args__ = {'extend_existing': True}
__tablename__ = 'ofec_candidate_detail_mv'
address_city = db.Column(db.String(100), doc='City of candidate\'s address, as reported on their Form 2.')
address_state = db.Column(db.String(2), doc='State of candidate\'s address, as reported on their Form 2.')
address_street_1 = db.Column(db.String(200), doc='Street of candidate\'s address, as reported on their Form 2.')
address_street_2 = db.Column(db.String(200), doc='Additional street information of candidate\'s address, as reported on their Form 2.')
address_zip = db.Column(db.String(10), doc='Zip code of candidate\'s address, as reported on their Form 2.')
candidate_inactive = db.Column(db.Boolean, doc='True indicates that a candidate is inactive.')
active_through = db.Column(db.Integer, doc=docs.ACTIVE_THROUGH)
class CandidateHistory(BaseCandidate):
__tablename__ = 'ofec_candidate_history_mv'
candidate_id = db.Column(db.String, primary_key=True, index=True, doc=docs.CANDIDATE_ID)
two_year_period = db.Column(db.Integer, primary_key=True, index=True, doc=docs.CANDIDATE_CYCLE)
candidate_election_year = db.Column(db.Integer, doc="The last year of the cycle for this election.")
address_city = db.Column(db.String(100), doc='City of candidate\'s address, as reported on their Form 2.')
address_state = db.Column(db.String(2), doc='State of candidate\'s address, as reported on their Form 2.')
address_street_1 = db.Column(db.String(200), doc='Street of candidate\'s address, as reported on their Form 2.')
address_street_2 = db.Column(db.String(200), doc='Additional street information of candidate\'s address, as reported on their Form 2.')
address_zip = db.Column(db.String(10), doc='Zip code of candidate\'s address, as reported on their Form 2.')
candidate_inactive = db.Column(db.Boolean, doc='True indicates that a candidate is inactive.')
active_through = db.Column(db.Integer, doc=docs.ACTIVE_THROUGH)
class CandidateHistoryLatest(BaseCandidate):
__tablename__ = 'ofec_candidate_history_latest_mv'
#Is there any good reason to have this as a separate model?
candidate_id = db.Column(db.String, primary_key=True, index=True)
two_year_period = db.Column(db.Integer, primary_key=True, index=True)
candidate_election_year = db.Column(db.Integer, doc="The last year of the cycle for this election.")
address_city = db.Column(db.String(100))
address_state = db.Column(db.String(2))
address_street_1 = db.Column(db.String(200))
address_street_2 = db.Column(db.String(200))
address_zip = db.Column(db.String(10))
candidate_inactive = db.Column(db.Boolean)
class CandidateTotal(db.Model):
__tablename__ = 'ofec_candidate_totals_mv'
candidate_id = db.Column(db.String, index=True, primary_key=True)
election_year = db.Column(db.Integer, index=True, primary_key=True)
cycle = db.Column(db.Integer, index=True, primary_key=True)
is_election = db.Column(db.Boolean, index=True, primary_key=True)
receipts = db.Column(db.Numeric(30, 2), index=True)
disbursements = db.Column(db.Numeric(30, 2), index=True)
cash_on_hand_end_period = db.Column(db.Numeric(30, 2))
debts_owed_by_committee = db.Column(db.Numeric(30, 2))
coverage_start_date = db.Column(db.Date, doc=docs.COVERAGE_START_DATE)
coverage_end_date = db.Column(db.Date, doc=docs.COVERAGE_END_DATE)
class CandidateElection(db.Model):
__tablename__ = 'ofec_candidate_election_mv'
candidate_id = db.Column(db.String, primary_key=True, index=True, doc=docs.CANDIDATE_ID)
cand_election_year = db.Column(db.Integer, primary_key=True, index=True, doc='Year a candidate runs for federal office.')
prev_election_year = db.Column(db.Integer, index=True)
```
#### File: openFEC/webservices/decoders.py
```python
import json
election_types = {
'GR': 'General runoff',
'SG': 'Special election general',
'SGR': 'Special election general runoff',
'C': 'Convention',
'SPR': 'Special primary runoff',
'SC': 'Special convention',
'PR': 'Primary runoff',
'G': 'General election',
'P': 'Primary election',
'SP': 'Special primary',
'R': 'Runoff',
'SR': 'Special runoff',
}
form_types = {
'F1': 'Statement of organization',
'F1M': 'Notification of multicandidate status',
'F2': 'Statement of candidacy',
'F9': '24-hour notice of disbursements for electioneering communications',
'F10': '24-hour notice of expenditure of personal funds',
'F11': '24-hour notice of opposition personal funds amount',
'F12': '24-hour notice of suspension of increased limits',
'F99': 'Miscellaneous document',
'F6': '48-hour notice of contribution/loans received',
}
def dumper(f3p_col_a, f3p_col_b, f3p_description, dumped):
for row in dumped:
description, col_a, col_b = row
f3p_col_a.append(col_a)
f3p_col_b.append(col_b)
f3p_description.append(description)
fp = open("data/" + "efile_guide_f3p.json", 'r')
dumped = json.load(fp)
f3p_col_a = []
f3p_col_b = []
f3p_description = []
dumper(f3p_col_a, f3p_col_b, f3p_description, dumped)
f3_col_a = []
f3_col_b = []
f3_description = []
fp = open("data/" + "efile_guide_f3.json", 'r')
dumped = json.load(fp)
dumper(f3_col_a, f3_col_b, f3_description, dumped)
fp = open("data/" + "efile_guide_f3x.json", 'r')
dumped = json.load(fp)
f3x_col_a = []
f3x_col_b = []
f3x_description = []
dumper(f3x_col_a, f3x_col_b, f3x_description, dumped)
```
#### File: openFEC/webservices/docs.py
```python
BEGINNING_IMAGE_NUMBER = '''
Unique identifier for the electronic or paper report. This number is used to construct
PDF URLs to the original document.
'''
CANDIDATE_ID = '''
A unique identifier assigned to each candidate registered with the FEC.
If a person runs for several offices, that person will have separate candidate IDs for each office.
'''
COMMITTEE_ID = '''
A unique identifier assigned to each committee or filer registered with the FEC. In general \
committee id's begin with the letter C which is followed by eight digits.
'''
CANDIDATE_CYCLE = '''
Two-year election cycle in which a candidate runs for office.
Calculated from FEC Form 2. The cycle begins with
an odd year and is named for its ending, even year. This cycle follows
the traditional house election cycle and subdivides the presidential
and Senate elections into comparable two-year blocks. To see data for
the entire four years of a presidential term or six years of a senatorial term,
you will need the `election_full` flag.
'''
COMMITTEE_CYCLE = '''
A two year election cycle that the committee was active- (after original registration
date but before expiration date in FEC Form 1s) The cycle begins with
an odd year and is named for its ending, even year.
'''
RECORD_CYCLE = '''
Filter records to only those that were applicable to a given
two-year period.The cycle begins with an odd year and is named
for its ending, even year.
'''
RECORD_YEAR = '''
Filter records to only those that were applicable to a given year.
'''
# committee uses a different definition for cycle because it is less straight forward
CYCLE = '''
Filter records to only those that are applicable to a given two-year
period. This cycle follows the traditional House election cycle and
subdivides the presidential and Senate elections into comparable
two-year blocks. The cycle begins with an odd year and is named for its
ending, even year.
'''
API_DESCRIPTION = '''
This API allows you to explore the way candidates and committees fund their campaigns.
The FEC API is a RESTful web service supporting full-text and field-specific searches on
FEC data. [Bulk downloads](http://fec.gov/data/DataCatalog.do) are available on the current
site. Information is tied to the underlying forms by file ID and image ID. Data is updated
nightly.
There is a lot of data, but a good place to start is to use search to find
interesting candidates and committees. Then, you can use their IDs to find report or line
item details with the other endpoints. If you are interested in individual donors, check
out contributor information in schedule_a.
Get an [API key here](https://api.data.gov/signup/). That will enable you to place up to 1,000
calls an hour. Each call is limited to 100 results per page. You can email questions, comments or
a request to get a key for 120 calls per minute to[<EMAIL>](<EMAIL>). You can also
ask questions and discuss the data in the [FEC data Google Group](https://groups.google.com/forum/#!forum/fec-data).
API changes will also be added to this group in advance of the change.
The model definitions and schema are available at [/swagger](/swagger/). This is useful for
making wrappers and exploring the data.
A few restrictions limit the way you can use FEC data. For example, you can’t use contributor
lists for commercial purposes or to solicit donations.
[Learn more here](https://transition.fec.gov/pages/brochures/saleuse.shtml).
[View our source code](https://github.com/fecgov/openFEC). We welcome issues and pull requests!
'''
CANDIDATE_TAG = '''
Candidate endpoints give you access to information about the people running for office.
This information is organized by candidate_id. If you're unfamiliar with candidate IDs,
using `/candidates/search` will help you locate a particular candidate.
Officially, a candidate is an individual seeking nomination for election to a federal
office. People become candidates when they (or agents working on their behalf)
raise contributions or make expenditures that exceed $5,000.
The candidate endpoints primarily use data from FEC registration
[Form 1](http://www.fec.gov/pdf/forms/fecfrm1.pdf), for candidate information, and
[Form 2](http://www.fec.gov/pdf/forms/fecfrm2.pdf), for committee information.
'''
NAME_SEARCH = '''
Search for candidates or committees by name. If you're looking for information on a
particular person or group, using a name to find the `candidate_id` or `committee_id` on
this endpoint can be a helpful first step.
'''
LEGAL_SEARCH = '''
Search for legal documents.
'''
CANDIDATE_LIST = '''
Fetch basic information about candidates, and use parameters to filter results to the
candidates you're looking for.
Each result reflects a unique FEC candidate ID. That ID is particular to the candidate for a
particular office sought. If a candidate runs for the same office multiple times, the ID
stays the same. If the same person runs for another office — for example, a House
candidate runs for a Senate office — that candidate will get a unique ID for each office.
'''
CANDIDATE_HISTORY = '''
Find out a candidate's characteristics over time. This is particularly useful if the
candidate runs for the same office in different districts or you want to know more about a candidate's
previous races.
This information is organized by `candidate_id`, so it won't help you find a candidate
who ran for different offices over time; candidates get a new ID for each office.
'''
CANDIDATE_SEARCH = '''
Fetch basic information about candidates and their principal committees.
Each result reflects a unique FEC candidate ID. That ID is assigned to the candidate for a
particular office sought. If a candidate runs for the same office over time, that ID
stays the same. If the same person runs for multiple offices — for example, a House
candidate runs for a Senate office — that candidate will get a unique ID for each office.
The candidate endpoints primarily use data from FEC registration
[Form 1](http://www.fec.gov/pdf/forms/fecfrm1.pdf), for candidate information, and
[Form 2](http://www.fec.gov/pdf/forms/fecfrm2.pdf), for committees information, with additional information
to provide context.
'''
CANDIDATE_DETAIL = '''
This endpoint is useful for finding detailed information about a particular candidate. Use the
`candidate_id` to find the most recent information about that candidate.
'''
COMMITTEE_TAG = '''
Committees are entities that spend and raise money in an election. Their characteristics and
relationships with candidates can change over time.
You might want to use filters or search endpoints to find the committee you're looking
for. Then you can use other committee endpoints to explore information about the committee
that interests you.
Financial information is organized by `committee_id`, so finding the committee you're interested in
will lead you to more granular financial information.
The committee endpoints include all FEC filers, even if they aren't registered as a committee.
Officially, committees include the committees and organizations that file with the FEC.
Several different types of organizations file financial reports with the FEC:
* Campaign committees authorized by particular candidates to raise and spend funds in
their campaigns
* Non-party committees (e.g., PACs), some of which may be sponsored by corporations,
unions, trade or membership groups, etc.
* Political party committees at the national, state, and local levels
* Groups and individuals making only independent expenditures
* Corporations, unions, and other organizations making internal communications
The committee endpoints primarily use data from FEC registration Form 1 and Form 2.
'''
COMMITTEE_LIST = '''
Fetch basic information about committees and filers. Use parameters to filter for
particular characteristics.
'''
COMMITTEE_DETAIL = '''
This endpoint is useful for finding detailed information about a particular committee or
filer. Use the `committee_id` to find the most recent information about the committee.
'''
COMMITTEE_HISTORY = '''
Explore a filer's characteristics over time. This can be particularly useful if the
committees change treasurers, designation, or `committee_type`.
'''
ELECTION_SEARCH = '''
List elections by cycle, office, state, and district.
'''
ELECTIONS = '''
Look at the top-level financial information for all candidates running for the same
office.
Choose a 2-year cycle, and `house`, `senate` or `presidential`.
If you are looking for a Senate seat, you will need to select the state using a two-letter
abbreviation.
House races require state and a two-digit district number.
Since this endpoint reflects financial information, it will only have candidates once they file
financial reporting forms. Query the `/candidates` endpoint to see an up to date list of all the
candidates that filed to run for a particular seat.
'''
FINANCIAL_TAG = '''
Fetch key information about a committee's Form 3, Form 3X, or Form 3P financial reports.
Most committees are required to summarize their financial activity in each filing; those summaries
are included in these files. Generally, committees file reports on a quarterly or monthly basis, but
some must also submit a report 12 days before primary elections. Therefore, during the primary
season, the period covered by this file may be different for different committees. These totals
also incorporate any changes made by committees, if any report covering the period is amended.
Information is made available on the API as soon as it's processed. Keep in mind, complex
paper filings take longer to process.
The financial endpoints use data from FEC [form 5](http://www.fec.gov/pdf/forms/fecfrm5.pdf),
for independent expenditors; or the summary and detailed summary pages of the FEC
[Form 3](http://www.fec.gov/pdf/forms/fecfrm3.pdf), for House and Senate committees;
[Form 3X](http://www.fec.gov/pdf/forms/fecfrm3x.pdf), for PACs and parties;
and [Form 3P](http://www.fec.gov/pdf/forms/fecfrm3p.pdf), for presidential committees.
'''
WIP_TAG = '''
DISCLAIMER: The field labels contained within this resource are subject to change. We are attempting to succinctly
label these fields while conveying clear meaning to ensure accessibility for all users.
'''
REPORTS = '''
Each report represents the summary information from FEC Form 3, Form 3X and Form 3P.
These reports have key statistics that illuminate the financial status of a given committee.
Things like cash on hand, debts owed by committee, total receipts, and total disbursements
are especially helpful for understanding a committee's financial dealings.
By default, this endpoint includes both amended and final versions of each report. To restrict
to only the final versions of each report, use `is_amended=false`; to view only reports that
have been amended, use `is_amended=true`.
Several different reporting structures exist, depending on the type of organization that
submits financial information. To see an example of these reporting requirements,
look at the summary and detailed summary pages of FEC Form 3, Form 3X, and Form 3P.
'''
REPORTS += WIP_TAG
REPORT_YEAR = '''
Year that the record applies to. Sometimes records are amended in subsequent
years so this can differ from underlying form's receipt date.
'''
TWO_YEAR_TRANSACTION_PERIOD = '''
This is a two-year period that is derived from the year a transaction took place in the
Itemized Schedule A and Schedule B tables. In cases where we have the date of the transaction
(contribution_receipt_date in schedules/schedule_a, disbursement_date in schedules/schedule_b)
the two_year_transaction_period is named after the ending, even-numbered year. If we do not
have the date of the transaction, we fall back to using the report year (report_year in both
tables) instead, making the same cycle adjustment as necessary. If no transaction year is
specified, the results default to the most current cycle.
'''
TOTALS = '''
This endpoint provides information about a committee's Form 3, Form 3X, or Form 3P financial reports,
which are aggregated by two-year period. We refer to two-year periods as a `cycle`.
The cycle is named after the even-numbered year and includes the year before it. To see
totals from 2013 and 2014, you would use 2014. In odd-numbered years, the current cycle
is the next year — for example, in 2015, the current cycle is 2016.
For presidential and Senate candidates, multiple two-year cycles exist between elections.
'''
SCHEDULE_A_TAG = '''
Schedule A records describe itemized receipts reported by a committee. This is where
you can look for individual contributors. If you are interested in
individual donors, `/schedules/schedule_a` will be the endpoint you use.
Once a person gives more than a total of $200, the donations of that person must be
reported by committees that file F3, F3X and F3P forms.
Contributions $200 and under are not required to be itemized, but you can find the total
amount of these small donations by looking up the "unitemized" field in the `/reports`
or `/totals` endpoints.
When comparing the totals from reports to line items. the totals will not match unless you
only look at items where `"is_individual":true` since the same transaction is in the data
multiple ways to explain the way it may move though different committees as an earmark.
For the Schedule A aggregates, such as by_occupation and by_state, include only unique individual
contributions. See below for full methodology.
### Methodology for determining unique, individual contributions
For receipts over $200 use FEC code line_number to identify individuals.
The line numbers that specify individuals that are automatically included:
Line number with description
- 10 Contribution to Independent Expenditure-Only Committees (Super PACs),\n\
Political Committees with non-contribution accounts (Hybrid PACs)\n\
and nonfederal party "soft money" accounts (1991-2002)\n\
from a person (individual, partnership, limited liability company,\n\
corporation, labor organization, or any other organization or\n\
group of persons)
- 15 Contribution to political committees (other than Super PACs\n\
and Hybrid PACs) from an individual, partnership or\n\
limited liability company
- 15E Earmarked contributions to political committees\n\
(other than Super PACs and Hybrid PACs) from an individual,\n\
partnership or limited liability company
- 15J Memo - Recipient committee's percentage of contribution\n\
from an individual, partnership or limited liability\n\
company given to joint fundraising committee
- 18J | Memo - Recipient committee's percentage of contribution\n\
from a registered committee given to joint fundraising committee\n\
- 30, 30T, 31, 31T, 32 Individual party codes\n\
For receipts under $200:
We check the following codes and see if there is "earmark" (or a variation) in the `memo_text`
description of the contribution.
Line number with description
-11AI The itemized individual contributions from F3 schedule A\n\
-12 Nonfederal other receipt - Levin Account (Line 2)\n\
-17 Itemized individual contributions from Form 3P\n\
-17A Itemized individual contributions from Form 3P\n\
-18 Itemized individual contributions from Form 3P\n\
Of those transactions,[under $200, and having "earmark" in the memo text OR transactions having the codes 11A, 12, 17, 17A, or 18], we then want to exclude earmarks.
This is [the sql function](https://github.com/fecgov/openFEC/blob/develop/data/functions/individual.sql) that defines individual contributions:
'''
SCHEDULE_A = SCHEDULE_A_TAG + '''
The data is divided in two-year periods, called `two_year_transaction_period`, which
is derived from the `contribution_receipt_date`. If no value is supplied, the results
will default to the most recent two-year period that is named after the ending,
even-numbered year.
Due to the large quantity of Schedule A filings, this endpoint is not paginated by
page number. Instead, you can request the next page of results by adding the values in
the `last_indexes` object from `pagination` to the URL of your last request. For
example, when sorting by `contribution_receipt_date`, you might receive a page of
results with the following pagination information:
```
pagination: {\n\
pages: 2152643,\n\
per_page: 20,\n\
count: 43052850,\n\
last_indexes: {\n\
last_index: "230880619",\n\
last_contribution_receipt_date: "2014-01-01"\n\
}\n\
}\n\
```
To fetch the next page of sorted results, append `last_index=230880619` and
`last_contribution_receipt_date=2014-01-01` to the URL. We strongly advise paging through
these results by using sort indices (defaults to sort by contribution date), otherwise some resources may be
unintentionally filtered out. This resource uses keyset pagination to improve query performance and these indices
are required to properly page through this large dataset.
Note: because the Schedule A data includes many records, counts for
large result sets are approximate; you will want to page through the records until no records are returned.
'''
SUB_ID = '''
A unique database identifier for itemized receipts or disbursements.
'''
SCHEDULE_B_TAG = '''
Schedule B filings describe itemized disbursements. This data
explains how committees and other filers spend their money. These figures are
reported as part of forms F3, F3X and F3P.
'''
SCHEDULE_B = SCHEDULE_B_TAG + '''
The data is divided in two-year periods, called `two_year_transaction_period`, which
is derived from the `disbursement_date`. If no value is supplied, the results will
default to the most recent two-year period that is named after the ending,
even-numbered year.
Due to the large quantity of Schedule B filings, this endpoint is not paginated by
page number. Instead, you can request the next page of results by adding the values in
the `last_indexes` object from `pagination` to the URL of your last request. For
example, when sorting by `disbursement_date`, you might receive a page of
results with the following pagination information:
```
pagination: {\n\
pages: 965191,\n\
per_page: 20,\n\
count: 19303814,\n\
last_indexes: {\n\
last_index: "230906248",\n\
last_disbursement_date: "2014-07-04"\n\
}\n\
}\n\
```
To fetch the next page of sorted results, append `last_index=230906248` and
`last_disbursement_date=2014-07-04` to the URL. We strongly advise paging through
these results by using the sort indices (defaults to sort by disbursement date, e.g. `last_disbursement_date`), otherwise
some resources may be unintentionally filtered out. This resource uses keyset pagination to improve query performance
and these indices are required to properly page through this large dataset.
Note: because the Schedule B data includes many records, counts for
large result sets are approximate; you will want to page through the records until no records are returned.
'''
SCHEDULE_B_BY_PURPOSE = '''
Schedule B receipts aggregated by disbursement purpose category. To avoid double counting, memoed items are not included.
Purpose is a combination of transaction codes, category codes and disbursement description. See [the sql function](https://github.com/fecgov/openFEC/blob/7d2c058706f1b385b2cc18d75eb3ad0a1fba9d52/data/functions/purpose.sql)
'''
SCHEDULE_C_TAG = '''
Schedule C shows all loans, endorsements and loan guarantees a committee
receives or makes.
'''
SCHEDULE_C = SCHEDULE_C_TAG + '''
The committee continues to report the loan until it is repaid.
'''
SCHEDULE_D_TAG = '''
Schedule D shows debts and obligations owed to or by the committee that are
required to be disclosed.
'''
SCHEDULE_D = SCHEDULE_D_TAG + '''
'''
SCHEDULE_E_TAG = '''
Schedule E covers the line item expenditures for independent expenditures. For example, if a super PAC
bought ads on TV to oppose a federal candidate, each ad purchase would be recorded here with
the expenditure amount, name and id of the candidate, and whether the ad supported or opposed the candidate.
An independent expenditure is an expenditure for a communication "expressly advocating the election or
defeat of a clearly identified candidate that is not made in cooperation, consultation, or concert with,
or at the request or suggestion of, a candidate, a candidate’s authorized committee, or their agents, or
a political party or its agents."
Aggregates by candidate do not include 24 and 48 hour reports. This ensures we don't double count expenditures
and the totals are more accurate. You can still find the information from 24 and 48 hour reports in
`/schedule/schedule_e/`.
'''
SCHEDULE_E = SCHEDULE_E_TAG + '''
Due to the large quantity of Schedule E filings, this endpoint is not paginated by
page number. Instead, you can request the next page of results by adding the values in
the `last_indexes` object from `pagination` to the URL of your last request. For
example, when sorting by `expenditure_amount`, you might receive a page of
results with the following pagination information:
```
"pagination": {\n\
"count": 152623,\n\
"last_indexes": {\n\
"last_index": "3023037",\n\
"last_expenditure_amount": -17348.5\n\
},\n\
"per_page": 20,\n\
"pages": 7632\n\
}\n\
}\n\
```
To fetch the next page of sorted results, append `last_index=3023037` and
`last_expenditure_amount=` to the URL. We strongly advise paging through
these results by using the sort indices (defaults to sort by disbursement date, e.g. `last_disbursement_date`), otherwise
some resources may be unintentionally filtered out. This resource uses keyset pagination to improve query performance
and these indices are required to properly page through this large dataset.
Note: because the Schedule E data includes many records, counts for
large result sets are approximate; you will want to page through the records until no records are returned.
'''
SCHEDULE_F_TAG = '''
Schedule F shows all special expenditures a national or state party committee makes in connection with
the general election campaigns of federal candidates
'''
SCHEDULE_F = SCHEDULE_F_TAG + '''
These coordinated party expenditures do not count against the contribution limits but are subject to other limits,
these limits are detailed in Chapter 7 of the FEC Campaign Guide for Political Party Committees.
'''
SIZE_DESCRIPTION = '''
This endpoint aggregates Schedule A donations based on size:
- $200 and under\n\
- $200.01 - $499.99\n\
- $500 - $999.99\n\
- $1000 - $1999.99\n\
- $2000 +\n\
In cases where the donations are $200 or less, the results include small donations
that are reported on Schedule A, but filers are not required to itemize those small
donations, so we also add unitemized contributions. Unitemized contributions come
from the summary section of the forms. It represents the total money brought in from
donors that are not reported on Schedule A and have given $200 or less.
'''
SIZE = '''
The total all contributions in the following ranges:
```
-0 $200 and under\n\
-200 $200.01 - $499.99\n\
-500 $500 - $999.99\n\
-1000 $1000 - $1999.99\n\
-2000 $2000 +\n\
```
Unitemized contributions are included in the `0` category.
'''
STATE_AGGREGATE = '''
Schedule A individual receipts aggregated by contributor state.
This is an aggregate of only individual contributions. To avoid double counting,
memoed items are not included. Transactions $200 and under do not have to be
itemized, if those contributions are not itemized, they will not be included in the
state totals.
'''
STATE_AGGREGATE_RECIPIENT_TOTALS = STATE_AGGREGATE + '''
These receipts are then added together by committee type for the total amount
of each type, grouped by state and cycle.
'''
API_KEY_DESCRIPTION = '''
API key for https://api.data.gov. Get one at https://api.data.gov/signup.
'''
SEARCH_TAG = '''
Search for candidates or committees by name.
'''
FILINGS_TAG = '''
Search for financial reports and other FEC documents.
'''
FILINGS = '''
All official records and reports filed by or delivered to the FEC.
Note: because the filings data includes many records, counts for large
result sets are approximate; you will want to page through the records until no records are returned.
'''
DOC_TYPE = '''
The type of document,\n\
for documents other\n\
than reports:\n\
- 2 24 Hour\n\
Contribution Notice\n\
- 4 48 Hour\n\
Contribution Notice\n\
- A Debt Settlement\n\
Statement\n\
- B Acknowledgment of\n\
Receipt of Debt\n\
Settlement\n\
Statement\n\
- C RFAI: Debt\n\
Settlement\n\
First Notice\n\
- D Commission Debt\n\
Settlement Review\n\
- E Commission Response\n\
TO Debt\n\
Settlement Request\n\
- F Administrative\n\
Termination\n\
- G Debt Settlement\n\
Plan Amendment\n\
- H Disavowal Notice\n\
- I Disavowal Response\n\
- J Conduit Report\n\
- K Termination\n\
Approval\n\
- L Repeat Non-Filer\n\
Notice\n\
- M Filing Frequency\n\
Change Notice\n\
- N Paper Amendment to\n\
Electronic Report\n\
- O Acknowledgment of\n\
Filing Frequency\n\
Change\n\
- S RFAI: Debt\n\
Settlement Second\n\
- T Miscellaneous\n\
Report TO FEC\n\
- V Repeat Violation\n\
Notice\n\
(441A OR 441B)\n\
- P Notice of\n\
Paper Filing\n\
- R F3L Filing\n\
Frequency\n\
Change Notice\n\
- Q Acknowledgment of\n\
F3L Filing \n\
Frequency Change\n\
- U Unregistered\n\
Committee Notice\n\
'''
DATES_TAG = '''
Reporting deadlines, election dates FEC meetings, events etc.
'''
CALENDAR_DATES = '''
Combines the election and reporting dates with Commission meetings, conferences, outreach, Advisory Opinions, rules, litigation dates and other
events into one calendar.
State and report type filtering is no longer available.
'''
CALENDAR_EXPORT = '''
Returns CSV or ICS for downloading directly into calendar applications like Google, Outlook or other applications.
Combines the election and reporting dates with Commission meetings, conferences, outreach, Advisory Opinions, rules, litigation dates and other
events into one calendar.
State filtering now applies to elections, reports and reporting periods.
Presidential pre-primary report due dates are not shown on even years.
Filers generally opt to file monthly rather than submit over 50 pre-primary election
reports. All reporting deadlines are available at /reporting-dates/ for reference.
This is [the sql function](https://github.com/fecgov/openFEC/blob/develop/data/migrations/V40__omnibus_dates.sql)
that creates the calendar.
'''
COMMUNICATION_TAG = '''
Reports of communication costs by corporations and membership organizations
from the FEC [F7 forms](http://www.fec.gov/pdf/forms/fecform7.pdf).
'''
ELECTIONEERING = '''
An electioneering communication is any broadcast, cable or satellite communication that fulfills each of the following conditions:
- The communication refers to a clearly identified federal candidate;\n\
- The communication is publicly distributed by a television station,\n\
radio station, cable television system or satellite system for a fee; and\n\
- The communication is distributed within 60 days prior\n\
to a general election or 30 days prior to a primary election to federal office.\n\
'''
COMMUNICATION_COST = '''
52 U.S.C. 30118 allows "communications by a corporation to its stockholders and
executive or administrative personnel and their families or by a labor organization
to its members and their families on any subject," including the express advocacy of
the election or defeat of any Federal candidate. The costs of such communications
must be reported to the Federal Election Commission under certain circumstances.
'''
FILER_RESOURCES = '''
Useful tools for those who file with the FEC.
Look up RAD analyst with telephone extension by committee_id.
'''
RAD_ANALYST = '''
Use this endpoint to look up the RAD Analyst for a committee.
The mission of the Reports Analysis Division (RAD) is to ensure that
campaigns and political committees file timely and accurate reports that fully disclose
their financial activities. RAD is responsible for reviewing statements and financial
reports filed by political committees participating in federal elections, providing
assistance and guidance to the committees to properly file their reports, and for taking
appropriate action to ensure compliance with the Federal Election Campaign Act (FECA).
'''
# fields and filters
# shared
LOAD_DATE = 'Date the information was loaded into the FEC systems. This can be affected by \
reseting systems and other factors, refer to receipt_date for the day that the FEC received \
the paper or electronic document. Keep in mind that paper filings take more time to process \
and there can be a lag between load_date and receipt_date. This field can be helpful to \
identify paper records that have been processed recently.'
PARTY = 'Three-letter code for the party affiliated with a candidate or committee. For example, DEM for Democratic Party and REP for Republican Party.'
PARTY_FULL = 'Party affiliated with a candidate or committee'
FORM_TYPE = 'The form where the underlying data comes from, for example, Form 1 would appear as F1:\n\
- F1 Statement of\n\
Organization\n\
- F1M Notification of\n\
Multicandidate\n\
Status\n\
- F2 Statement of Candidacy\n\
- F3 Report of Receipts and\n\
Disbursements for an \n\
Authorized Committee\n\
- F3P Report of Receipts and\n\
Disbursements by an\n\
Authorized Committee\n\
of a Candidate for\n\
The Office of President\n\
or Vice President\n\
- F3L Report of Contributions\n\
Bundled by Lobbyists/\n\
Registrants and Lobbyist/\n\
Registrant PACs\n\
- F3X Report of Receipts and\n\
Disbursements for other \n\
than an Authorized \n\
Committee\n\
- F4 Report of Receipts and \n\
Disbursements for a \n\
Committee or Organization\n\
Supporting a Nomination\n\
Convention\n\
- F5 Report of Independent \n\
Expenditures Made and \n\
Contributions Received\n\
- F6 48 Hour Notice of \n\
Contributions/Loans\n\
Received\n\
- F7 Report of Communication\n\
Costs by Corporations \n\
and Membership \n\
Organizations\n\
- F8 Debt Settlement Plan\n\
- F9 24 Hour Notice of \n\
Disbursements for \n\
Electioneering \n\
Communications\n\
- F13 Report of Donations \n\
Accepted for Inaugural\n\
Committee\n\
- F99 Miscellaneous Text\n\
- FRQ Request for Additional\n\
Information\n\
'
REPORT_TYPE = 'Name of report where the underlying data comes from:\n\
- 10D Pre-Election\n\
- 10G Pre-General\n\
- 10P Pre-Primary\n\
- 10R Pre-Run-Off\n\
- 10S Pre-Special\n\
- 12C Pre-Convention\n\
- 12G Pre-General\n\
- 12P Pre-Primary\n\
- 12R Pre-Run-Off\n\
- 12S Pre-Special\n\
- 30D Post-Election\n\
- 30G Post-General\n\
- 30P Post-Primary\n\
- 30R Post-Run-Off\n\
- 30S Post-Special\n\
- 60D Post-Convention\n\
- M1 January Monthly\n\
- M10 October Monthly\n\
- M11 November Monthly\n\
- M12 December Monthly\n\
- M2 February Monthly\n\
- M3 March Monthly\n\
- M4 April Monthly\n\
- M5 May Monthly\n\
- M6 June Monthly\n\
- M7 July Monthly\n\
- M8 August Monthly\n\
- M9 September Monthly\n\
- MY Mid-Year Report\n\
- Q1 April Quarterly\n\
- Q2 July Quarterly\n\
- Q3 October Quarterly\n\
- TER Termination Report\n\
- YE Year-End\n\
- 90S Post Inaugural\n\
Supplement\n\
- 90D Post Inaugural\n\
- 48 48 Hour Notification\n\
- 24 24 Hour Notification\n\
- M7S July Monthly/\n\
Semi-Annual\n\
- MSA Monthly Semi-Annual\n\
(MY)\n\
- MYS Monthly Year End/\n\
Semi-Annual\n\
- Q2S July Quarterly/\n\
Semi-Annual\n\
- QSA Quarterly Semi-Annual\n\
(MY)\n\
- QYS Quarterly Year End/\n\
Semi-Annual\n\
- QYE Quarterly Semi-Annual\n\
(YE)\n\
- QMS Quarterly Mid-Year/\n\
Semi-Annual\n\
- MSY Monthly Semi-Annual\n\
(YE)\n\
'
REQUEST_TYPE = 'Requests for additional information (RFAIs)\n\
sent to filers.\n\
The request type is based on\n\
the type of document filed:\n\
- 1 Statement of \n\
Organization\n\
- 2 Report of Receipts\n\
and Expenditures\n\
(Form 3 and 3X)\n\
- 3 Second Notice - Reports\n\
- 4 Request for\n\
Additional Information\n\
- 5 Informational - Reports\n\
- 6 Second Notice -\n\
Statement of Organization\n\
- 7 Failure to File\n\
- 8 From Public Disclosure\n\
- 9 From Multi\n\
Candidate Status\n\
'
REPORT_TYPE_W_EXCLUDE = 'Report type; prefix with "-" to exclude. '+REPORT_TYPE
RECEIPT_DATE = 'Date the FEC received the electronic or paper record'
STATE_GENERIC = 'US state or territory'
ZIP_CODE = 'Zip code'
#candidates
CANDIDATE_NAME = 'Name of candidate running for office'
OFFICE_FULL = 'Federal office candidate runs for: House, Senate or presidential'
OFFICE = 'Federal office candidate runs for: H, S or P'
STATE = 'US state or territory where a candidate runs for office'
YEAR = 'See records pertaining to a particular election year. The list of election years \
is based on a candidate filing a statement of candidacy (F2) for that year.'
DISTRICT = 'Two-digit US House distirict of the office the candidate is running for. \
Presidential, Senate and House at-large candidates will have District 00.'
CANDIDATE_STATUS = 'One-letter code explaining if the candidate is:\n\
- C present candidate\n\
- F future candidate\n\
- N not yet a candidate\n\
- P prior candidate\n\
'
LAST_F2_DATE = 'The day the FEC received the candidate\'s most recent Form 2'
FIRST_CANDIDATE_FILE_DATE = 'The day the FEC received the candidate\'s first filing. \
This is a F2 candidate registration.'
LAST_CANDIDATE_FILE_DATE = 'The day the FEC received the candidate\'s most recent filing'
INCUMBENT_CHALLENGE = "One-letter code ('I', 'C', 'O') explaining if the candidate is an incumbent, a challenger, or if the seat is open."
INCUMBENT_CHALLENGE_FULL = 'Explains if the candidate is an incumbent, a challenger, or if the seat is open.'
ACTIVE_THROUGH = 'Last year a candidate was active. This field is specific to the candidate_id so if the same person runs for another office, there may be a different record for them.'
HAS_RAISED_FUNDS = 'A boolean that describes if a candidate\'s committee has ever received any receipts for their campaign for this particular office. (Candidates have separate candidate IDs for each office.)'
FEDERAL_FUNDS_FLAG = 'A boolean the describes if a presidential candidate has accepted federal funds. The flag will be false for House and Senate candidates.'
# committees
COMMITTEE_NAME = 'The name of the committee. If a committee changes its name, \
the most recent name will be shown. Committee names are not unique. Use committee_id \
for looking up records.'
COMMITTEE_YEAR = 'A year that the committee was active— (after original registration date \
or filing but before expiration date)'
DESIGNATION = 'The one-letter designation\n\
code of the organization:\n\
- A authorized by\n\
a candidate\n\
- J joint fundraising\n\
committee\n\
- P principal campaign\n\
committee of a\n\
candidate\n\
- U unauthorized\n\
- B lobbyist/registrant\n\
PAC\n\
- D leadership PAC\n\
'
ORGANIZATION_TYPE = 'The one-letter\n\
code for the kind\n\
for organization:\n\
- C corporation\n\
- L labor\n\
organization\n\
- M membership\n\
organization\n\
- T trade association\n\
- V cooperative\n\
- W corporation without\n\
capital stock\n\
'
COMMITTEE_TYPE = 'The one-letter type\n\
code of the\n\
organization:\n\
- C communication\n\
cost\n\
- D delegate\n\
- E electioneering\n\
communication\n\
- H House\n\
- I independent\n\
expenditor\n\
(person or group)\n\
- N PAC -\n\
nonqualified\n\
- O independent\n\
expenditure-only\n\
(super PACs)\n\
- P presidential\n\
- Q PAC - qualified\n\
- S Senate\n\
- U single candidate\n\
independent\n\
expenditure\n\
- V PAC with\n\
non-contribution\n\
account,\n\
nonqualified\n\
- W PAC with\n\
non-contribution\n\
account,\n\
qualified\n\
- X party,\n\
nonqualified\n\
- Y party, qualified\n\
- Z national party\n\
non-federal\n\
account\n\
'
COMMITTEE_TYPE_STATE_AGGREGATE_TOTALS = COMMITTEE_TYPE + '\
- all All Committee Types\n\
- all_candidates\n\
All Candidate Committee\n\
Types (H, S, P)\n\
- all_pacs All PAC\n\
Committee Types\n\
(N, O, Q, V, W)\n\
'
PAC_PARTY_TYPE = 'The one-letter type\n\
code of a PAC/Party\n\
organization:\n\
- N PAC - nonqualified\n\
- O independent\n\
expenditure-only\n\
(super PACs)\n\
- Q PAC - qualified\n\
- V PAC with\n\
non-contribution\n\
account,\n\
nonqualified\n\
account,\n\
qualified\n\
- X party,\n\
nonqualified\n\
- Y party,\n\
qualified\n\
'
TREASURER_NAME = 'Name of the Committee\'s treasurer. If multiple treasurers for the \
committee, the most recent treasurer will be shown.'
COMMITTEE_STATE = 'State of the committee\'s address as filed on the Form 1'
FIRST_FILE_DATE = 'The day the FEC received the committee\'s first filing. \
This is usually a Form 1 committee registration.'
LAST_FILE_DATE = 'The day the FEC received the committee\'s most recent filing'
LAST_F1_DATE = 'The day the FEC received the committee\'s most recent Form 1'
MEANS_FILED = 'The method used to file with the FEC, either electronic or on paper.'
# schedules
MEMO_CODE = "'X' indicates that the amount is NOT to be included in the itemization total."
# schedule A
CONTRIBUTOR_ID = 'The FEC identifier should be represented here if the contributor is registered with the FEC.'
EMPLOYER = 'Employer of contributor as reported on the committee\'s filing'
OCCUPATION = 'Occupation of contributor as reported on the committee\'s filing'
CONTRIBUTOR_NAME = 'Name of contributor'
CONTRIBUTOR_CITY = 'City of contributor'
CONTRIBUTOR_STATE = 'State of contributor'
CONTRIBUTOR_EMPLOYER = 'Employer of contributor, filers need to make an effort to gather this information'
CONTRIBUTOR_OCCUPATION = 'Occupation of contributor, filers need to make an effort to gather this information'
CONTRIBUTOR_ZIP = 'Zip code of contributor'
IS_INDIVIDUAL = 'Restrict to non-earmarked individual contributions where memo code is true. \
Filtering individuals is useful to make sure contributions are not double reported and in creating \
breakdowns of the amount of money coming from individuals.'
# schedule B
RECIPIENT_NAME = 'Name of the entity receiving the disbursement'
RECIPIENT_ID = 'The FEC identifier should be represented here if the entity receiving \
the disbursement is registered with the FEC.'
# communication cost and electioneering
SUPPORT_OPPOSE_INDICATOR = 'Explains if the money was spent in order to support or oppose a candidate or candidates. (Coded S or O for support or oppose.) This indicator applies to independent expenditures and communication costs.'
# schedule B
PURPOSE = 'Purpose of the expenditure'
# schedule E
EXPENDITURE_MAX_DATE = 'Selects all items expended by this committee before this date'
EXPENDITURE_MIN_DATE = 'Selects all items expended by this committee after this date'
EXPENDITURE_MIN_AMOUNT = 'Selects all items expended by this committee greater than this amount'
EXPENDITURE_MAX_AMOUNT = 'Selects all items expended by this committee less than this amount'
# dates
DUE_DATE = 'Date the report is due'
CREATE_DATE = 'Date the record was created'
UPDATE_DATE = 'Date the record was updated'
ELECTION_DATE = 'Date of election'
ELECTION_YEAR = 'Year of election'
#? TODO: add more categories
ELECTION_TYPE = 'Election type \n\
Convention, Primary,\n\
General, Special,\n\
Runoff etc.\n\
'
SENATE_CLASS = 'Senators run every six years and each state has two senators. General elections \
are held every 2 years. The Senate elections are staggered so there are three classes of Senators \
In a given state, only one Senate seat is up for election at a time and every six years, there is \
not a senate election in a given state. Thus, the Senate is broken up in to three groups called \
classes. Senators in the same class are up for election at the same time. Sometimes it is a bit \
less straight forward when, because there are situations in which there is a special election to \
fill a vacancy in the Senate. In those cases class refers to the seat groupings and not the time \
of the election.'
# filings
ENDING_IMAGE_NUMBER = 'Image number is an unique identifier for each page the electronic or paper \
report. The last image number corresponds to the image number for the last page of the document.'
IMAGE_NUMBER = 'An unique identifier for each page the electronic or paper \
report.'
# Reports and Totals
def add_period(var):
return var + ' total for the reporting period'
def add_ytd(var):
return var + ' total for the year to date'
# shared
CASH_ON_HAND_BEGIN_PERIOD = 'Balance for the committee at the start of the two-year period'
CASH_ON_HAND_END_PERIOD = 'Ending cash balance on the most recent filing'
COVERAGE_START_DATE = 'Beginning date of the reporting period'
COVERAGE_END_DATE = 'Ending date of the reporting period'
DEBTS_OWED_BY_COMMITTEE = 'Debts owed by the committee'
DEBTS_OWED_TO_COMMITTEE = 'Debts owed to the committee'
# shared receipts
RECEIPTS = 'Anything of value (money, goods, services or property) received by a political committee'
# can't tack on period or year without being really confusing
INDIVIDUAL_ITEMIZED_CONTRIBUTIONS = 'Individual itemized contributions are from individuals whose aggregate contributions total over $200 per individual per year. Be aware, some filers choose to itemize donations $200 or less.'
INDIVIDUAL_ITEMIZED_CONTRIBUTIONS_PERIOD = 'Individual itemized contributions are from individuals whose aggregate contributions total over $200 per individual per year. This amount represents the total of these receipts for the reporting period.'
INDIVIDUAL_ITEMIZED_CONTRIBUTIONS_YTD = 'Individual itemized contributions are from individuals whose aggregate contributions total over $200 per individual per year. This amount represents the total of these receipts for the year to date.'
INDIVIDUAL_UNITEMIZED_CONTRIBUTIONS = 'Unitemized contributions are made individuals whose aggregate contributions total $200 or less per individual per year. Be aware, some filers choose to itemize donations $200 or less and in that case those donations will appear in the itemized total.'
INDIVIDUAL_UNITEMIZED_CONTRIBUTIONS_PERIOD = 'Unitemized contributions are from individuals whose aggregate contributions total $200 or less per individual per year. This amount represents the total of these receipts for the reporting period.'
INDIVIDUAL_UNITEMIZED_CONTRIBUTIONS_YTD = 'Itemized contributions are from individuals whose aggregate contributions total $200 or less per individual per year. This amount represents the total of these receipts for the year to date.'
POLITICAL_PARTY_COMMITTEE_CONTRIBUTIONS = 'Party committees contributions'
INDIVIDUAL_CONTRIBUTIONS = 'Individual contributions'
OTHER_POLITICAL_COMMITTEE_CONTRIBUTIONS = 'Other committees contributions'
OFFSETS_TO_OPERATING_EXPENDITURES = 'Offsets to operating expenditures'
CONTRIBUTIONS = 'Contribution'
# house senate and presidential
CANDIDATE_CONTRIBUTION = 'Candidate contributions'
OTHER_RECEIPTS = 'Other receipts'
# house senate and PAC party
NET_CONTRIBUTIONS = 'Net contributions'
# shared disbursements
DISBURSEMENTS = 'Disbursements'
REFUNDED_INDIVIDUAL_CONTRIBUTIONS = 'Individual refunds'
OPERATING_EXPENDITURES = 'Total operating expenditures'
OTHER_DISBURSEMENTS = 'Other disbursements'
REFUNDED_POLITICAL_PARTY_COMMITTEE_CONTRIBUTIONS = 'Political party refunds'
CONTRIBUTION_REFUNDS = 'Total contribution refunds'
REFUNDED_OTHER_POLITICAL_COMMITTEE_CONTRIBUTIONS = 'Other committee refunds'
#loans
LOAN_SOURCE = "Source of the loan (i.e., bank loan, brokerage account, credit card, home equity line of credit," \
"other line of credit, or personal funds of the candidate"
# presidential
# receipts
FEDERAL_FUNDS = 'Federal funds: Public funding of presidential elections means that qualified presidential candidates receive federal government funds to pay for the valid expenses of their political campaigns in both the primary and general elections.'
TRANSFERS_FROM_AFFILIATED_COMMITTEE = 'Transfers from affiliated committees'
LOANS_RECEIVED_FROM_CANDIDATE = 'Loans made by candidate'
OTHER_LOANS_RECEIVED = 'Other loans'
LOANS_RECEIVED = 'Total loans received'
OFFSETS_TO_FUNDRAISING_EXPENDITURES = 'Fundraising offsets'
OFFSETS_TO_LEGAL_ACCOUNTING = 'Legal and accounting offsets'
TOTAL_OFFSETS_TO_OPERATING_EXPENDITURES = 'Total offsets'
# disbursements
TRANSFERS_TO_OTHER_AUTHORIZED_COMMITTEE = 'Transfers to authorized committees'
REPAYMENTS_LOANS_MADE_BY_CANDIDATE = 'Candidate loan repayments'
REPAYMENTS_OTHER_LOANS = 'Other loan repayments'
LOAN_REPAYMENTS_MADE = 'Total loan repayments'
# House Senate
# receipts
TRANSFERS_FROM_OTHER_AUTHORIZED_COMMITTEE = 'Transfers from authorized committees'
LOANS_MADE_BY_CANDIDATE = 'Loans made by candidate'
ALL_OTHER_LOANS = 'Other loans'
LOANS = 'Total loans received'
# disbursements
NET_OPERATING_EXPENDITURES = 'Net operating expenditures'
TRANSFERS_TO_OTHER_AUTHORIZED_COMMITTEE = 'Transfers to authorized committees'
LOAN_REPAYMENTS_CANDIDATE_LOANS = 'Candidate loan repayments'
LOAN_REPAYMENTS_OTHER_LOANS = 'Other loan repayments'
OTHER_DISBURSEMENTS = 'Other disbursements'
# PAC and Party
# Receipts
TRANSFERS_FROM_AFFILIATED_PARTY = 'Transfers from affiliated committees'
ALL_LOANS_RECEIVED = 'Loans received'
LOAN_REPAYMENTS_RECEIVED = 'Loan repayments received'
FED_CANDIDATE_CONTRIBUTION_REFUNDS = 'Candidate refunds'
OTHER_FED_RECEIPTS = 'Other receipts'
TRANSFERS_FROM_NONFED_ACCOUNT = 'Non-federal transfers'
TRANSFERS_FROM_NONFED_LEVIN = 'Levin funds'
TRANSFERS_FROM_NONFED_ACCOUNT = 'Total non-federal transfers'
FED_RECEIPTS = 'Total federal receipts'
# disbursement
SHARED_FED_OPERATING_EXPENDITURES = 'Federal allocated operating expenditures'
SHARED_NONFED_OPERATING_EXPENDITURES = 'Non-federal operating expenditures'
OTHER_FED_OPERATING_EXPENDITURES = 'Other federal operating expenditures'
NET_OPERATING_EXPENDITURES = 'Net operating expenditures'
TRANSFERS_TO_AFFILIATED_COMMITTEE = 'Transfers to affiliated committees'
FED_CANDIDATE_COMMITTEE_CONTRIBUTIONS = 'Contributions to other federal committees'
INDEPENDENT_EXPENDITURES = 'Independent expenditures'
COORDINATED_EXPENDITURES_BY_PARTY_COMMITTEE = 'Coordinated party expenditures'
LOANS_MADE = 'Loans made'
LOAN_REPAYMENTS_MADE = 'Loan repayments made'
SHARED_FED_ACTIVITY = 'Allocated federal election activity - federal share'
ALLOCATED_FEDERAL_ELECTION_LEVIN_SHARE = 'Allocated federal election activity - Levin share'
NON_ALLOCATED_FED_ELECTION_ACTIVITY = 'Federal election activity - federal only'
FED_ELECTION_ACTIVITY = 'Total federal election activity'
FED_DISBURSEMENTS = 'Total federal disbursements'
# calendar
CATEGORY = '''
Each type of event has a calendar category with an integer id. Options are: Open Meetings: 32, Executive Sessions: 39, Public Hearings: 40,
Conferences: 33, Roundtables: 34, Election Dates: 36, Federal Holidays: 37, FEA Periods: 38, Commission Meetings: 20,
Reporting Deadlines: 21, Conferences and Outreach: 22, AOs and Rules: 23, Other: 24, Quarterly: 25, Monthly: 26,
Pre and Post-Elections: 27, EC Periods:28, and IE Periods: 29
'''
CAL_STATE = 'The state field only applies to election dates and reporting deadlines, reporting periods and all other dates do not have the array of states to filter on'
CAL_DESCRIPTION = 'Brief description of event'
SUMMARY = 'Longer description of event'
EVENT_ID = 'An unique ID for an event. Useful for downloading a single event to your calendar. This ID is not a permanent, persistent ID.'
# efiling
EFILING_TAG = '''
Efiling endpoints provide real-time campaign finance data for electronic filers.
These endpoints are perfect for watching filings roll in when you want to know the latest information. Efiling endpoints
only contain the most recent two years worth of data and don't contain the processed and coded data that
you can find on the other endpoints. Those endpoints are better for in-depth analysis.
Senate candidates and committees are required to file by paper. Other committees who raise and spend less than $50,000
in a calendar can choose whether to file electronically or by paper.
'''
EFILING_TAG += WIP_TAG
EFILE_FILES = 'Basic information about electronic files coming into the FEC, posted as they are received.'
FILE_NUMBER = 'Filing ID number'
AMENDMENT_CHAIN = '''
The first value in the chain is the original filing. The ordering in the chain reflects the order the
amendments were filed up to the amendment being viewed.
'''
AMENDMENT_INDICATOR = '''
-N new\n\
-A amendment\n\
-T terminated\n\
-C consolidated\n\
-M multi-candidate\n\
-S secondary\n\n\
Null might be\n\
new or amendment.\n\
If amendment\n\
indicator is null\n\
and the filings\n\
is the first\n\
or first in a\n\
chain treat it\n\
as if it was a new.\n\
If it is not the\n\
first or first\n\
in a chain then\n\
treat the filing\n\
as an amendment.\n\
'''
AMENDED_BY = '''
If this report has been amended, this field gives the file_number of the report that should be used. For example,
if a report is amended multiple times, the first report and the first amendment will have the file_number of the final amended
report in the ameded_by field and the final report will have no id in the amended_by field.
'''
AMENDS_FILE = '''
For amendments, this file_number is the file_number of the previous report that is being amended. See amended_by
for the most recent version of the report.
'''
AMENDMENT_NUMBER = '''
Number of times the report has been amended.
'''
EFILE_REPORTS = '''
Key financial data reported periodically by committees as they are reported. This feed includes summary
information from the the House F3 reports, the presidential F3p reports and the PAC and party
F3x reports.
Generally, committees file reports on a quarterly or monthly basis, but
some must also submit a report 12 days before primary elections. Therefore, during the primary
season, the period covered by this file may be different for different committees. These totals
also incorporate any changes made by committees, if any report covering the period is amended.
'''
EFILE_REPORTS += WIP_TAG
MIN_FILTER = 'Filter for all amounts greater than a value.'
MAX_FILTER = 'Filter for all amounts less than a value.'
ENTITY_RECEIPTS_TOTLAS = '''
Provides cumulative receipt totals by entity type, over a two year cycle. Totals are adjusted to avoid double counting.
This is [the sql](https://github.com/fecgov/openFEC/blob/develop/data/migrations/V41__large_aggregates.sql) that creates these calculations.
'''
ENTITY_DISBURSEMENTS_TOTLAS = '''
Provides cumulative disbursement totals by entity type, over a two year cycle. Totals are adjusted to avoid double counting.
This is [the sql](https://github.com/fecgov/openFEC/blob/develop/data/migrations/V41__large_aggregates.sql) that creates these calculations.
'''
# Audit api
AUDIT = '''
The agency’s monitoring process may detect potential violations through a review of a committee’s reports or through a
Commission audit. By law, all enforcement cases must remain confidential until they’re closed.
The Commission is required by law to audit Presidential campaigns that accept public funds. In addition, the Commission
audits a committee when it appears not to have met the threshold requirements for substantial compliance with the Federal
Election Campaign Act. The audit determines whether the committee complied with limitations, prohibitions and disclosure
requirements.
These endpoints contain Final Audit Reports approved by the Commission since inception.
'''
#endpoint: audit-case
AUDIT_CASE = '''
This endpoint contains Final Audit Reports approved by the Commission since inception.
The search can be based on information about the audited committee (Name, FEC ID Number, Type, \n\
Election Cycle) or the issues covered in the report.
'''
#endpoint: audit-primary-category
AUDIT_PRIMARY_CATEGORY = '''
This lists the options for the primary categories available in the /audit-search/ endpoint.
'''
#endpoint: audit-category
AUDIT_CATEGORY = '''
This lists the options for the categories and subcategories available in the /audit-search/ endpoint.
'''
AUDIT_ID = '''
The audit issue. Each subcategory has an unique ID
'''
AUDIT_CASE_ID = '''
Primary/foreign key for audit tables
'''
PRIMARY_CATEGORY_ID = '''
Audit category ID (table PK)
'''
PRIMARY_CATEGORY_NAME = 'Primary Audit Category\n\
- No Findings or\n\
Issues/Not a Committee\n\
- Net Outstanding Campaign\n\
/Convention Expenditures/\n\
Obligations\n\
- Payments/Disgorgements\n\
- Allocation Issues\n\
- Prohibited Contributions\n\
- Disclosure\n\
- Recordkeeping\n\
- Repayment to US Treasury\n\
- Other\n\
- Misstatement of \n\
Financial Activity\n\
- Excessive Contributions\n\
- Failure to File\n\
Reports/Schedules/Notices\n\
- Loans\n\
- Referred Findings Not Listed\n\
'
SUB_CATEGORY_ID = '''
The finding id of an audit. Finding are a category of broader issues. Each category has an unique ID.
'''
SUB_CATEGORY_NAME = '''
The audit issue. Each subcategory has an unique ID.
'''
AUDIT_TIER = '''
1 specifies a primary category and 2 specifies a subcategory
'''
COMMITTEE_DESCRIPTION = 'Type of committee:\n\
- H or S - Congressional\n\
- P - Presidential\n\
- X or Y or Z - Party\n\
- N or Q - PAC\n\
- I - Independent expenditure\n\
- O - Super PAC \n\
'
FAR_RELEASE_DATE = '''
Final audit report release date
'''
LINK_TO_REPORT = '''
URL for retrieving the PDF document
'''
```
#### File: openFEC/webservices/filters.py
```python
import sqlalchemy as sa
from webservices import utils
from webservices import exceptions
from webservices.common import models
def is_exclude_arg(arg):
# Handle string and int excludes
return str(arg).startswith('-')
def parse_exclude_arg(arg):
# Integers will come in as negative and strings will start with "-""
if isinstance(arg, int):
return abs(arg)
else:
return arg[1:]
def filter_match(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key) is not None:
if is_exclude_arg(kwargs[key]):
query = query.filter(sa.or_(column != parse_exclude_arg(kwargs[key]),
column == None))
else:
query = query.filter(column == kwargs[key])
return query
def filter_multi(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key):
# handle combination exclude/include lists
exclude_list = [parse_exclude_arg(value) for value in kwargs[key] if is_exclude_arg(value)]
include_list = [value for value in kwargs[key] if not is_exclude_arg(value)]
if exclude_list:
query = query.filter(sa.or_(column.notin_(exclude_list),
column == None))
if include_list:
query = query.filter(column.in_(include_list))
return query
def filter_range(query, kwargs, fields):
for (min_key, max_key), column in fields:
if kwargs.get(min_key) is not None:
query = query.filter(column >= kwargs[min_key])
if kwargs.get(max_key) is not None:
query = query.filter(column <= kwargs[max_key])
return query
def filter_fulltext(query, kwargs, fields):
for key, column in fields:
if kwargs.get(key):
exclude_list = [parse_exclude_arg(value) for value in kwargs[key] if is_exclude_arg(value)]
include_list = [value for value in kwargs[key] if not is_exclude_arg(value)]
if exclude_list:
filters = [
sa.not_(column.match(utils.parse_fulltext(value)))
for value in exclude_list
]
query = query.filter(sa.and_(*filters))
if include_list:
filters = [
column.match(utils.parse_fulltext(value))
for value in include_list
]
query = query.filter(sa.or_(*filters))
return query
def filter_contributor_type(query, column, kwargs):
if kwargs.get('contributor_type') == ['individual']:
return query.filter(column == 'IND')
if kwargs.get('contributor_type') == ['committee']:
return query.filter(sa.or_(column != 'IND', column == None)) # noqa
return query
def filter_election(query, kwargs, candidate_column, cycle_column=None, year_column=None):
if not kwargs.get('office'):
return query
utils.check_election_arguments(kwargs)
cycle = get_cycle(kwargs)
query = query.join(
models.CandidateHistory,
candidate_column == models.CandidateHistory.candidate_id,
).filter(
models.CandidateHistory.two_year_period == cycle,
models.CandidateHistory.office == kwargs['office'][0].upper(),
)
if kwargs.get('state'):
query = query.filter(models.CandidateHistory.state == kwargs['state'])
if kwargs.get('district'):
query = query.filter(models.CandidateHistory.district == kwargs['district'])
return query
def get_cycle(kwargs):
if isinstance(kwargs['cycle'], list):
if len(kwargs['cycle']) != 1:
raise exceptions.ApiError(
'Must include exactly one argument "cycle"',
status_code=422,
)
return kwargs['cycle'][0]
return kwargs['cycle']
```
#### File: webservices/legal_docs/index_management.py
```python
import logging
import elasticsearch
from webservices import utils
logger = logging.getLogger(__name__)
MAPPINGS = {
"_default_": {
"properties": {
"sort1": {
"type": "integer",
"include_in_all": False
},
"sort2": {
"type": "integer",
"include_in_all": False
},
}
},
"citations": {
"properties": {
"citation_type": {
"type": "string",
"index": "not_analyzed"
},
"citation_text": {
"type": "string",
"index": "not_analyzed"
}
}
},
"murs": {
"properties": {
"no": {
"type": "string",
"index": "not_analyzed"
},
"doc_id": {
"type": "string",
"index": "no"
},
"mur_type": {
"type": "string"
},
"name": {
"type": "string",
"analyzer": "english"
},
"election_cycles": {
"type": "long"
},
"open_date": {
"type": "date",
"format": "dateOptionalTime"
},
"close_date": {
"type": "date",
"format": "dateOptionalTime"
},
"url": {
"type": "string",
"index": "no"
},
"subjects": {
"type": "string"
},
"commission_votes": {
"properties": {
"text": {
"type": "string"
},
"vote_date": {
"type": "date",
"format": "dateOptionalTime"
}
}
},
"dispositions": {
"properties": {
"citations": {
"properties": {
"text": {
"type": "string"
},
"title": {
"type": "string"
},
"type": {
"type": "string"
},
"url": {
"type": "string"
}
}
},
"disposition": {
"type": "string",
"index": "not_analyzed"
},
"penalty": {
"type": "double"
},
"respondent": {
"type": "string"
}
}
},
"documents": {
"type": "nested",
"properties": {
"category": {
"type": "string",
"index": "not_analyzed"
},
"description": {
"type": "string"
},
"document_date": {
"type": "date",
"format": "dateOptionalTime"
},
"document_id": {
"type": "long",
"index": "no"
},
"length": {
"type": "long",
"index": "no"
},
"text": {
"type": "string"
},
"url": {
"type": "string",
"index": "no"
}
}
},
"participants": {
"properties": {
"citations": {
"type": "object"
},
"name": {
"type": "string"
},
"role": {
"type": "string"
}
}
},
"respondents": {
"type": "string"
}
}
},
"statutes": {
"properties": {
"doc_id": {
"type": "string",
"index": "no"
},
"name": {
"type": "string",
"analyzer": "english"
},
"text": {
"type": "string",
"analyzer": "english"
},
"no": {
"type": "string",
"index": "not_analyzed"
},
"title": {
"type": "string"
},
"chapter": {
"type": "string"
},
"subchapter": {
"type": "string"
},
"url": {
"type": "string",
"index": "no"
}
}
},
"regulations": {
"properties": {
"doc_id": {
"type": "string",
"index": "no"
},
"name": {
"type": "string",
"analyzer": "english"
},
"text": {
"type": "string",
"analyzer": "english"
},
"no": {
"type": "string",
"index": "not_analyzed"
},
"url": {
"type": "string",
"index": "no"
}
}
},
"advisory_opinions": {
"properties": {
"no": {
"type": "string",
"index": "not_analyzed"
},
"name": {
"type": "string",
"analyzer": "english"
},
"summary": {
"type": "string",
"analyzer": "english"
},
"issue_date": {
"type": "date",
"format": "dateOptionalTime"
},
"is_pending": {
"type": "boolean"
},
"status": {
"type": "string"
},
"ao_citations": {
"properties": {
"name": {
"type": "string"
},
"no": {
"type": "string"
}
}
},
"aos_cited_by": {
"properties": {
"name": {
"type": "string"
},
"no": {
"type": "string"
}
}
},
"statutory_citations": {
"type": "nested",
"properties": {
"section": {
"type": "long"
},
"title": {
"type": "long"
}
}
},
"regulatory_citations": {
"type": "nested",
"properties": {
"part": {
"type": "long"
},
"section": {
"type": "long"
},
"title": {
"type": "long"
}
}
},
"requestor_names": {
"type": "string"
},
"requestor_types": {
"type": "string",
"index": "not_analyzed"
},
"documents": {
"type": "nested",
"properties": {
"document_id": {
"type": "long",
"index": "no"
},
"category": {
"type": "string",
"index": "not_analyzed"
},
"description": {
"type": "string"
},
"date": {
"type": "date",
"format": "dateOptionalTime"
},
"text": {
"type": "string"
},
"url": {
"type": "string",
"index": "no"
}
}
}
}
}
}
ANALYZER_SETTINGS = {
"analysis": {"analyzer": {"default": {"type": "english"}}}
}
def create_docs_index():
"""
Initialize Elasticsearch for storing legal documents.
Create the `docs` index, and set up the aliases `docs_index` and `docs_search`
to point to the `docs` index. If the `doc` index already exists, delete it.
"""
es = utils.get_elasticsearch_connection()
try:
logger.info("Delete index 'docs'")
es.indices.delete('docs')
except elasticsearch.exceptions.NotFoundError:
pass
try:
logger.info("Delete index 'docs_index'")
es.indices.delete('docs_index')
except elasticsearch.exceptions.NotFoundError:
pass
logger.info("Create index 'docs'")
es.indices.create('docs', {
"mappings": MAPPINGS,
"settings": ANALYZER_SETTINGS,
"aliases": {
'docs_index': {},
'docs_search': {}
}
})
def create_archived_murs_index():
"""
Initialize Elasticsearch for storing archived MURs.
If the `archived_murs` index already exists, delete it.
Create the `archived_murs` index.
Set up the alias `archived_murs_index` to point to the `archived_murs` index.
Set up the alias `docs_search` to point `archived_murs` index, allowing the
legal search to work across current and archived MURs
"""
es = utils.get_elasticsearch_connection()
try:
logger.info("Delete index 'archived_murs'")
es.indices.delete('archived_murs')
except elasticsearch.exceptions.NotFoundError:
pass
logger.info("Create index 'archived_murs' with aliases 'docs_search' and 'archived_murs_index'")
es.indices.create('archived_murs', {
"mappings": MAPPINGS,
"settings": ANALYZER_SETTINGS,
"aliases": {
'archived_murs_index': {},
'docs_search': {}
}
})
def delete_docs_index():
"""
Delete index `docs`.
This is usually done in preparation for restoring indexes from a snapshot backup.
"""
es = utils.get_elasticsearch_connection()
try:
logger.info("Delete index 'docs'")
es.indices.delete('docs')
except elasticsearch.exceptions.NotFoundError:
pass
def create_staging_index():
"""
Create the index `docs_staging`.
Move the alias docs_index to point to `docs_staging` instead of `docs`.
"""
es = utils.get_elasticsearch_connection()
try:
logger.info("Delete index 'docs_staging'")
es.indices.delete('docs_staging')
except:
pass
logger.info("Create index 'docs_staging'")
es.indices.create('docs_staging', {
"mappings": MAPPINGS,
"settings": ANALYZER_SETTINGS,
})
logger.info("Move alias 'docs_index' to point to 'docs_staging'")
es.indices.update_aliases(body={"actions": [
{"remove": {"index": 'docs', "alias": 'docs_index'}},
{"add": {"index": 'docs_staging', "alias": 'docs_index'}}
]})
def restore_from_staging_index():
"""
A 4-step process:
1. Move the alias docs_search to point to `docs_staging` instead of `docs`.
2. Reinitialize the index `docs`.
3. Reindex `doc_staging` to `docs`
4. Move `docs_index` and `docs_search` aliases to point to the `docs` index.
Delete index `docs_staging`.
"""
es = utils.get_elasticsearch_connection()
logger.info("Move alias 'docs_search' to point to 'docs_staging'")
es.indices.update_aliases(body={"actions": [
{"remove": {"index": 'docs', "alias": 'docs_search'}},
{"add": {"index": 'docs_staging', "alias": 'docs_search'}}
]})
logger.info("Delete and re-create index 'docs'")
es.indices.delete('docs')
es.indices.create('docs', {
"mappings": MAPPINGS,
"settings": ANALYZER_SETTINGS
})
logger.info("Reindex all documents from index 'docs_staging' to index 'docs'")
body = {
"source": {
"index": "docs_staging",
},
"dest": {
"index": "docs"
}
}
es.reindex(body=body, wait_for_completion=True, request_timeout=1500)
logger.info("Move aliases 'docs_index' and 'docs_search' to point to 'docs'")
es.indices.update_aliases(body={"actions": [
{"remove": {"index": 'docs_staging', "alias": 'docs_index'}},
{"remove": {"index": 'docs_staging', "alias": 'docs_search'}},
{"add": {"index": 'docs', "alias": 'docs_index'}},
{"add": {"index": 'docs', "alias": 'docs_search'}}
]})
logger.info("Delete index 'docs_staging'")
es.indices.delete('docs_staging')
def move_archived_murs():
'''
Move archived MURs from `docs` index to `archived_murs_index`
This should only need to be run once.
Once archived MURs are on their own index, we will be able to
re-index current legal docs after a schema change much more quickly.
'''
es = utils.get_elasticsearch_connection()
body = {
"source": {
"index": "docs",
"type": "murs",
"query": {
"match": {
"mur_type": "archived"
}
}
},
"dest": {
"index": "archived_murs"
}
}
logger.info("Copy archived MURs from 'docs' index to 'archived_murs' index")
es.reindex(body=body, wait_for_completion=True, request_timeout=1500)
```
#### File: webservices/resources/elections.py
```python
import sqlalchemy as sa
from sqlalchemy import cast, Integer
from flask_apispec import doc, marshal_with
from webservices import args
from webservices import docs
from webservices import utils
from webservices import filters
from webservices import schemas
from webservices.utils import use_kwargs
from webservices.common.models import (
db, CandidateHistory, CandidateCommitteeLink,
CommitteeTotalsPresidential, CommitteeTotalsHouseSenate,
ElectionResult, ElectionsList, ZipsDistricts, ScheduleEByCandidate,
)
office_totals_map = {
'president': CommitteeTotalsPresidential,
'senate': CommitteeTotalsHouseSenate,
'house': CommitteeTotalsHouseSenate,
}
office_args_map = {
'house': ['state', 'district'],
'senate': ['state'],
}
def cycle_length(elections):
return sa.case(
[
(elections.c.office == 'P', 4),
(elections.c.office == 'S', 6),
(elections.c.office == 'H', 2),
]
)
@doc(
description=docs.ELECTION_SEARCH,
tags=['financial']
)
class ElectionsListView(utils.Resource):
model = ElectionsList
schema = schemas.ElectionsListSchema
page_schema = schemas.ElectionsListPageSchema
filter_multi_fields = [
('cycle', ElectionsList.cycle),
]
@use_kwargs(args.paging)
@use_kwargs(args.elections_list)
@use_kwargs(args.make_multi_sort_args(default=['sort_order', 'district',]))
@marshal_with(schemas.ElectionsListPageSchema())
def get(self, **kwargs):
query = self._get_elections(kwargs)
return utils.fetch_page(query, kwargs, model=ElectionsList, multi=True)
def _get_elections(self, kwargs):
"""Get elections from ElectionsList model."""
query = db.session.query(ElectionsList)
if kwargs.get('office'):
values = [each[0].upper() for each in kwargs['office']]
query = query.filter(ElectionsList.office.in_(values))
if kwargs.get('state'):
query = query.filter(
sa.or_(
ElectionsList.state.in_(kwargs['state']),
ElectionsList.office == 'P',
)
)
if kwargs.get('district'):
query = query.filter(
sa.or_(
ElectionsList.district.in_(kwargs['district']),
ElectionsList.office.in_(['P', 'S']),
),
)
if kwargs.get('zip'):
query = self._filter_zip(query, kwargs)
return filters.filter_multi(query, kwargs, self.filter_multi_fields)
def _filter_zip(self, query, kwargs):
"""Filter query by zip codes."""
districts = db.session.query(ZipsDistricts).filter(
cast(ZipsDistricts.zip_code, Integer).in_(kwargs['zip']),
ZipsDistricts.active == 'Y'
).subquery()
return query.join(
districts,
sa.or_(
# House races from matching states and districts
sa.and_(
ElectionsList.district == districts.c['district'],
ElectionsList.state == districts.c['state_abbrevation'],
),
# Senate and presidential races from matching states
sa.and_(
sa.or_(
ElectionsList.district == '00'
),
ElectionsList.state.in_([districts.c['state_abbrevation'], 'US'])
),
)
)
@doc(
description=docs.ELECTIONS,
tags=['financial']
)
class ElectionView(utils.Resource):
@use_kwargs(args.paging)
@use_kwargs(args.elections)
@use_kwargs(args.make_sort_args(default='-total_receipts'))
@marshal_with(schemas.ElectionPageSchema())
def get(self, **kwargs):
query = self._get_records(kwargs)
return utils.fetch_page(query, kwargs, cap=0)
def _get_records(self, kwargs):
utils.check_election_arguments(kwargs)
totals_model = office_totals_map[kwargs['office']]
pairs = self._get_pairs(totals_model, kwargs).subquery()
aggregates = self._get_aggregates(pairs).subquery()
outcomes = self._get_outcomes(kwargs).subquery()
latest = self._get_latest(pairs).subquery()
return db.session.query(
aggregates,
latest,
sa.case(
[(outcomes.c.cand_id != None, True)], # noqa
else_=False,
).label('won'),
).outerjoin(
latest,
aggregates.c.candidate_id == latest.c.candidate_id,
).outerjoin(
outcomes,
aggregates.c.candidate_id == outcomes.c.cand_id,
).distinct()
def _get_pairs(self, totals_model, kwargs):
pairs = CandidateHistory.query.with_entities(
CandidateHistory.candidate_id,
CandidateHistory.name,
CandidateHistory.party_full,
CandidateHistory.incumbent_challenge_full,
CandidateHistory.office,
CandidateHistory.two_year_period,
CandidateHistory.candidate_election_year,
CandidateCommitteeLink.committee_id,
totals_model.receipts,
totals_model.disbursements,
totals_model.last_cash_on_hand_end_period.label('cash_on_hand_end_period'),
totals_model.coverage_end_date,
)
pairs = join_candidate_totals(pairs, kwargs, totals_model)
pairs = filter_candidate_totals(pairs, kwargs, totals_model)
return pairs
def _get_latest(self, pairs):
latest = db.session.query(
pairs.c.cash_on_hand_end_period,
).distinct(
pairs.c.candidate_id,
pairs.c.cmte_id,
).order_by(
pairs.c.candidate_id,
pairs.c.cmte_id,
sa.desc(pairs.c.two_year_period),
).subquery()
return db.session.query(
latest.c.candidate_id,
sa.func.sum(sa.func.coalesce(latest.c.cash_on_hand_end_period,0.0)).label('cash_on_hand_end_period'),
).group_by(
latest.c.candidate_id,
)
def _get_aggregates(self, pairs):
return db.session.query(
pairs.c.candidate_id,
pairs.c.candidate_election_year,
sa.func.max(pairs.c.name).label('candidate_name'),
sa.func.max(pairs.c.party_full).label('party_full'),
sa.func.max(pairs.c.incumbent_challenge_full).label('incumbent_challenge_full'),
sa.func.max(pairs.c.office).label('office'),
sa.func.sum(sa.func.coalesce(pairs.c.receipts, 0.0)).label('total_receipts'),
sa.func.sum(sa.func.coalesce(pairs.c.disbursements, 0.0)).label('total_disbursements'),
sa.func.sum(sa.func.coalesce(pairs.c.cash_on_hand_end_period, 0.0)).label('cash_on_hand_end_period'),
sa.func.array_agg(sa.distinct(pairs.c.cmte_id)).label('committee_ids'),
sa.func.max(pairs.c.coverage_end_date).label('coverage_end_date'),
).group_by(
pairs.c.candidate_id,
pairs.c.candidate_election_year
)
def _get_outcomes(self, kwargs):
return db.session.query(
ElectionResult.cand_id
).filter(
ElectionResult.election_yr == kwargs['cycle'],
ElectionResult.cand_office == kwargs['office'][0].upper(),
ElectionResult.cand_office_st == (kwargs.get('state', 'US')),
ElectionResult.cand_office_district == (kwargs.get('district', '00')),
)
@doc(
description=docs.ELECTION_SEARCH,
tags=['financial']
)
class ElectionSummary(utils.Resource):
@use_kwargs(args.elections)
@marshal_with(schemas.ElectionSummarySchema())
def get(self, **kwargs):
utils.check_election_arguments(kwargs)
aggregates = self._get_aggregates(kwargs).subquery()
expenditures = self._get_expenditures(kwargs).subquery()
return db.session.query(
aggregates.c.count,
aggregates.c.receipts,
aggregates.c.disbursements,
expenditures.c.independent_expenditures,
).first()._asdict()
def _get_aggregates(self, kwargs):
totals_model = office_totals_map[kwargs['office']]
aggregates = CandidateHistory.query.with_entities(
sa.func.count(sa.distinct(CandidateHistory.candidate_id)).label('count'),
sa.func.sum(totals_model.receipts).label('receipts'),
sa.func.sum(totals_model.disbursements).label('disbursements'),
)
aggregates = join_candidate_totals(aggregates, kwargs, totals_model)
aggregates = filter_candidate_totals(aggregates, kwargs, totals_model)
return aggregates
def _get_expenditures(self, kwargs):
expenditures = db.session.query(
sa.func.sum(ScheduleEByCandidate.total).label('independent_expenditures'),
).select_from(
CandidateHistory
).join(
ScheduleEByCandidate,
sa.and_(
CandidateHistory.candidate_id == ScheduleEByCandidate.candidate_id,
ScheduleEByCandidate.cycle == kwargs['cycle'],
),
)
expenditures = filter_candidates(expenditures, kwargs)
return expenditures
election_durations = {
'senate': 6,
'president': 4,
'house': 2,
}
def join_candidate_totals(query, kwargs, totals_model):
return query.outerjoin(
CandidateCommitteeLink,
sa.and_(
CandidateHistory.candidate_id == CandidateCommitteeLink.candidate_id,
CandidateHistory.two_year_period == CandidateCommitteeLink.fec_election_year,
)
).outerjoin(
totals_model,
sa.and_(
CandidateCommitteeLink.committee_id == totals_model.committee_id,
CandidateCommitteeLink.fec_election_year == totals_model.cycle,
)
)
def filter_candidates(query, kwargs):
duration = (
election_durations.get(kwargs['office'], 2)
if kwargs.get('election_full')
else 2
)
query = query.filter(
CandidateHistory.two_year_period <= kwargs['cycle'],
CandidateHistory.two_year_period > (kwargs['cycle'] - duration),
# CandidateHistory.cycles.any(kwargs['cycle']),
CandidateHistory.candidate_election_year + (CandidateHistory.candidate_election_year % 2) == kwargs['cycle'],
CandidateHistory.office == kwargs['office'][0].upper(),
)
if kwargs.get('state'):
query = query.filter(CandidateHistory.state == kwargs['state'])
if kwargs.get('district'):
query = query.filter(CandidateHistory.district == kwargs['district'])
return query
def filter_candidate_totals(query, kwargs, totals_model):
query = filter_candidates(query, kwargs)
query = query.filter(
CandidateHistory.candidate_inactive == False, # noqa
# CandidateCommitteeLink.committee_designation.in_(['P', 'A']),
).distinct()
return query
```
#### File: webservices/resources/sched_b.py
```python
import sqlalchemy as sa
from flask_apispec import doc
from webservices import args
from webservices import docs
from webservices import utils
from webservices import schemas
from webservices.common import models
from webservices.common import views
from webservices.common.views import ItemizedResource
@doc(
tags=['disbursements'],
description=docs.SCHEDULE_B,
)
class ScheduleBView(ItemizedResource):
model = models.ScheduleB
schema = schemas.ScheduleBSchema
page_schema = schemas.ScheduleBPageSchema
@property
def year_column(self):
return self.model.two_year_transaction_period
@property
def index_column(self):
return self.model.sub_id
filter_multi_fields = [
('image_number', models.ScheduleB.image_number),
('committee_id', models.ScheduleB.committee_id),
('recipient_city', models.ScheduleB.recipient_city),
('recipient_state', models.ScheduleB.recipient_state),
('recipient_committee_id', models.ScheduleB.recipient_committee_id),
('disbursement_purpose_category', models.ScheduleB.disbursement_purpose_category),
]
filter_match_fields = [
('two_year_transaction_period', models.ScheduleB.two_year_transaction_period),
]
filter_fulltext_fields = [
('recipient_name', models.ScheduleB.recipient_name_text),
('disbursement_description', models.ScheduleB.disbursement_description_text),
]
filter_range_fields = [
(('min_date', 'max_date'), models.ScheduleB.disbursement_date),
(('min_amount', 'max_amount'), models.ScheduleB.disbursement_amount),
(('min_image_number', 'max_image_number'), models.ScheduleB.image_number),
]
@property
def args(self):
return utils.extend(
args.itemized,
args.schedule_b,
args.make_seek_args(),
args.make_sort_args(
default='-disbursement_date',
validator=args.OptionValidator(['disbursement_date', 'disbursement_amount']),
)
)
def build_query(self, **kwargs):
query = super(ScheduleBView, self).build_query(**kwargs)
query = query.options(sa.orm.joinedload(models.ScheduleB.committee))
query = query.options(sa.orm.joinedload(models.ScheduleB.recipient_committee))
#might be worth looking to factoring these out into the filter script
if kwargs.get('sub_id'):
query = query.filter_by(sub_id= int(kwargs.get('sub_id')))
if kwargs.get('line_number'):
if len(kwargs.get('line_number').split('-')) == 2:
form, line_no = kwargs.get('line_number').split('-')
query = query.filter_by(filing_form=form.upper())
query = query.filter_by(line_number=line_no)
return query
@doc(
tags=['disbursements'],
description=docs.EFILING_TAG
)
class ScheduleBEfileView(views.ApiResource):
model = models.ScheduleBEfile
schema = schemas.ItemizedScheduleBfilingsSchema
page_schema = schemas.ScheduleBEfilePageSchema
filter_multi_fields = [
('image_number', models.ScheduleBEfile.image_number),
('committee_id', models.ScheduleBEfile.committee_id),
('recipient_city', models.ScheduleBEfile.recipient_city),
('recipient_state', models.ScheduleBEfile.recipient_state),
#('recipient_committee_id', models.ScheduleBEfile.recipient_committee_id),
#('disbursement_purpose_category', models.ScheduleB.disbursement_purpose_category),
]
filter_fulltext_fields = [
#('recipient_name', models.ScheduleB.recipient_name_text),
('disbursement_description', models.ScheduleBEfile.disbursement_description),
]
filter_range_fields = [
(('min_date', 'max_date'), models.ScheduleBEfile.disbursement_date),
(('min_amount', 'max_amount'), models.ScheduleBEfile.disbursement_amount),
#(('min_image_number', 'max_image_number'), models.ScheduleBE.image_number),
]
@property
def args(self):
return utils.extend(
args.paging,
args.schedule_b_efile,
args.make_sort_args(
default='-disbursement_date',
validator=args.OptionValidator(['disbursement_date', 'disbursement_amount']),
),
)
```
#### File: openFEC/webservices/sorting.py
```python
import sqlalchemy as sa
from webservices.exceptions import ApiError
from webservices.common.util import get_class_by_tablename
def parse_option(option, model=None, aliases=None, join_columns=None, query=None):
"""Parse sort option to SQLAlchemy order expression.
:param str option: Column name, possibly prefixed with "-"
:param model: Optional SQLAlchemy model to sort on
:param join_columns: Mapping of column names to sort and join rules; used
for sorting on related columns
:raises: ApiError if column not found on model
"""
aliases = aliases or {}
join_columns = join_columns or {}
order = sa.desc if option.startswith('-') else sa.asc
column = option.lstrip('-')
relationship = None
if column in aliases:
column = aliases[column]
elif column in join_columns:
column, relationship = join_columns[column]
elif model:
try:
column = getattr(model, column)
except AttributeError:
raise ApiError('Field "{0}" not found'.format(column))
else:
for entity in query._entities:
if entity._label_name == column:
single_model = get_class_by_tablename(entity.namespace)
if not single_model:
column = entity.column
break
column = getattr(single_model, column)
break
return column, order, relationship
return column, order, relationship
def multi_sort(query, keys, model, aliases=None, join_columns=None, clear=False,
hide_null=False, index_column=None):
for key in keys:
query,_ = sort(query, key, model, aliases, join_columns, clear, hide_null, index_column)
return query,_
def sort(query, key, model, aliases=None, join_columns=None, clear=False,
hide_null=False, index_column=None):
"""Sort query using string-formatted columns.
:param query: Original query
:param options: Sort column name; prepend with "-" for descending sort
:param model: SQLAlchemy model
:param join_columns: Mapping of column names to sort and join rules; used
for sorting on related columns
:param clear: Clear existing sort conditions
:param hide_null: Exclude null values on sorted column(s)
:param index_column:
:param reverse_nulls: Swap order of null values on sorted column(s) in results;
Ignored if hide_null is True
"""
# Start off assuming we are dealing with a sort column, not a sort
# expression.
is_expression = False
expression_field = None
expression_type = None
null_sort = None
if clear:
query = query.order_by(False)
# If the query contains multiple entities (i.e., isn't a simple query on a
# model), looking up the sort key on the model may lead to invalid queries.
# In this case, use the string name of the sort key.
sort_model = (
model
if len(query._entities) == 1 and hasattr(query._entities[0], 'mapper')
else None
)
column, order, relationship = parse_option(
key,
model=sort_model,
aliases=aliases,
join_columns=join_columns,
query=query
)
# Store the text representation (name) of the sorting column in case we
# swap it for an expression instead.
if hasattr(column, 'key'):
column_name = column.key
else:
column_name = column
if model:
# Check to see if the model has a sort_expressions attribute on it,
# which contains a dictionary of column mappings to SQL expressions.
# If the model has this and there is a matching expression for the
# column, use the expression instead.
if hasattr(model, 'sort_expressions') and column_name in model.sort_expressions:
column = model.sort_expressions[column_name]['expression']
expression_field = model.sort_expressions[column_name]['field']
expression_type = model.sort_expressions[column_name]['type']
null_sort = model.sort_expressions[column_name].get(
'null_sort',
model.sort_expressions[column_name]['expression']
)
is_expression = True
sort_column = order(column)
query = query.order_by(sort_column)
if relationship:
query = query.join(relationship)
if hide_null:
query = query.filter(column != None) # noqa
return query, (
column,
order,
column_name,
is_expression,
expression_field,
expression_type,
null_sort,
)
```
#### File: webservices/tasks/legal_docs.py
```python
import logging
from celery_once import QueueOnce
from webservices.tasks import app
from webservices.rest import db
from webservices.legal_docs.advisory_opinions import load_advisory_opinions
from webservices.legal_docs.current_murs import load_current_murs
logger = logging.getLogger(__name__)
RECENTLY_MODIFIED_STARTING_AO = """
SELECT ao_no, pg_date
FROM aouser.aos_with_parsed_numbers
WHERE pg_date >= NOW() - '1 day'::INTERVAL
ORDER BY ao_year, ao_serial
LIMIT 1;
"""
RECENTLY_MODIFIED_STARTING_MUR = """
SELECT case_no, pg_date
FROM fecmur.cases_with_parsed_case_serial_numbers
WHERE pg_date >= NOW() - '1 day'::INTERVAL
AND case_type = 'MUR'
ORDER BY case_serial
LIMIT 1;
"""
@app.task(once={'graceful': True}, base=QueueOnce)
def refresh():
with db.engine.connect() as conn:
refresh_aos(conn)
refresh_murs(conn)
def refresh_aos(conn):
row = conn.execute(RECENTLY_MODIFIED_STARTING_AO).first()
if row:
logger.info("AO found %s modified at %s", row["ao_no"], row["pg_date"])
load_advisory_opinions(row["ao_no"])
else:
logger.info("No modified AOs found")
def refresh_murs(conn):
row = conn.execute(RECENTLY_MODIFIED_STARTING_MUR).first()
if row:
logger.info("Current MUR found %s modified at %s", row["case_no"], row["pg_date"])
load_current_murs(row["case_no"])
else:
logger.info("No modified current MURs found")
```
#### File: webservices/tasks/refresh.py
```python
import logging
import manage
from webservices import utils
from webservices.env import env
from webservices.tasks import app, download
logger = logging.getLogger(__name__)
@app.task
def refresh():
"""Update incremental aggregates, itemized schedules, materialized views,
then slack a notification to the development team.
"""
manage.logger.info('Starting nightly refresh...')
try:
manage.refresh_itemized()
manage.refresh_materialized()
download.clear_bucket()
slack_message = '*Success* nightly updates for {0} completed'.format(env.get_credential('NEW_RELIC_APP_NAME'))
utils.post_to_slack(slack_message, '#bots')
manage.logger.info(slack_message)
except Exception as error:
manage.logger.exception(error)
slack_message = '*ERROR* nightly update failed for {0}. Check logs.'.format(env.get_credential('NEW_RELIC_APP_NAME'))
utils.post_to_slack(slack_message, '#bots')
manage.logger.exception(error)
```
#### File: openFEC/webservices/utils.py
```python
import re
import functools
import json
import logging
import requests
import six
import sqlalchemy as sa
from collections import defaultdict
from datetime import date
from sqlalchemy.orm import foreign
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.dialects import postgresql
from webservices.env import env
from elasticsearch import Elasticsearch
import flask_restful as restful
from marshmallow_pagination import paginators
from webargs import fields
from flask_apispec import use_kwargs as use_kwargs_original
from flask_apispec.views import MethodResourceMeta
from webservices import docs
from webservices import sorting
from webservices import decoders
from webservices import exceptions
from webservices.common.models import db
logger = logging.getLogger(__name__)
use_kwargs = functools.partial(use_kwargs_original, locations=('query', ))
class Resource(six.with_metaclass(MethodResourceMeta, restful.Resource)):
pass
API_KEY_ARG = fields.Str(
required=True,
missing='DEMO_KEY',
description=docs.API_KEY_DESCRIPTION,
)
if env.get_credential('PRODUCTION'):
Resource = use_kwargs({'api_key': API_KEY_ARG})(Resource)
fec_url_map = {'9': 'http://docquery.fec.gov/dcdev/posted/{0}.fec'}
fec_url_map = defaultdict(lambda : 'http://docquery.fec.gov/paper/posted/{0}.fec', fec_url_map)
def check_cap(kwargs, cap):
if cap:
if not kwargs.get('per_page') or kwargs['per_page'] > cap:
raise exceptions.ApiError(
'Parameter "per_page" must be between 1 and {}'.format(cap),
status_code=422,
)
def fetch_page(query, kwargs, model=None, aliases=None, join_columns=None, clear=False,
count=None, cap=100, index_column=None, multi=False):
check_cap(kwargs, cap)
sort, hide_null, reverse_nulls = kwargs.get('sort'), kwargs.get('sort_hide_null'), kwargs.get('sort_reverse_nulls')
if sort and multi:
query, _ = sorting.multi_sort(
query, sort, model=model, aliases=aliases, join_columns=join_columns,
clear=clear, hide_null=hide_null, index_column=index_column
)
elif sort:
query, _ = sorting.sort(
query, sort, model=model, aliases=aliases, join_columns=join_columns,
clear=clear, hide_null=hide_null, index_column=index_column
)
paginator = paginators.OffsetPaginator(query, kwargs['per_page'], count=count)
return paginator.get_page(kwargs['page'])
class SeekCoalescePaginator(paginators.SeekPaginator):
def __init__(self, cursor, per_page, index_column, sort_column=None, count=None):
self.max_column_map = {
"date": date.max,
"float": float("inf"),
"int": float("inf")
}
self.min_column_map = {
"date": date.min,
"float": float("inf"),
"int": float("inf")
}
super(SeekCoalescePaginator, self).__init__(cursor, per_page, index_column, sort_column, count)
def _fetch(self, last_index, sort_index=None, limit=None, eager=True):
cursor = self.cursor
direction = self.sort_column[1] if self.sort_column else sa.asc
lhs, rhs = (), ()
if sort_index is not None:
left_index = self.sort_column[0]
# Check if we're using a sort expression and if so, use the type
# associated with it instead of deriving it from the column.
if not self.sort_column[3]:
comparator = self.max_column_map.get(
str(left_index.property.columns[0].type).lower()
)
else:
comparator = self.max_column_map.get(self.sort_column[5])
if 'coalesce' not in str(left_index):
left_index = sa.func.coalesce(left_index, comparator)
lhs += (left_index,)
rhs += (sort_index,)
if last_index is not None:
lhs += (self.index_column,)
rhs += (last_index,)
lhs = sa.tuple_(*lhs)
rhs = sa.tuple_(*rhs)
if rhs.clauses:
filter = lhs > rhs if direction == sa.asc else lhs < rhs
cursor = cursor.filter(filter)
query = cursor.order_by(direction(self.index_column)).limit(limit)
return query.all() if eager else query
def _get_index_values(self, result):
"""Get index values from last result, to be used in seeking to the next
page. Optionally include sort values, if any.
"""
ret = {
'last_index': str(paginators.convert_value(
result,
self.index_column
))
}
if self.sort_column:
key = 'last_{0}'.format(self.sort_column[2])
# Check to see if we are dealing with a sort column or sort
# expression. If we're dealing with a sort expression, we need to
# override the value serialization with the sort expression
# information.
if not self.sort_column[3]:
ret[key] = paginators.convert_value(
result,
self.sort_column[0]
)
else:
# Create a new query based on the result returned and replace
# the SELECT portion with just the sort expression criteria.
# Also augment the WHERE clause with a match for the value of
# the index column found in the result so we only retrieve the
# single row matching the result.
# NOTE: This ensures we maintain existing clauses such as the
# check constraint needed for partitioned tables.
sort_column_query = self.cursor.with_entities(self.sort_column[0]).filter(
getattr(result.__class__, self.index_column.key) == getattr(result, self.index_column.key)
)
# Execute the new query to retrieve the value of the sort
# expression.
expression_value = db.engine.execute(
sort_column_query.statement
).scalar()
# Serialize the value using the mapped marshmallow field
# defined with the sort expression.
ret[key] = self.sort_column[4]()._serialize(
expression_value,
None,
None
)
if ret[key] is None:
ret.pop(key)
ret['sort_null_only'] = True
return ret
def fetch_seek_page(query, kwargs, index_column, clear=False, count=None, cap=100, eager=True):
paginator = fetch_seek_paginator(query, kwargs, index_column, clear=clear, count=count, cap=cap)
if paginator.sort_column is not None:
sort_index = kwargs['last_{0}'.format(paginator.sort_column[2])]
null_sort_by = paginator.sort_column[0]
# Check to see if we are sorting by an expression. If we are, we need
# to account for an alternative way to sort and page by null values.
if paginator.sort_column[3]:
null_sort_by = paginator.sort_column[6]
if not sort_index and kwargs['sort_null_only'] and paginator.sort_column[1] == sa.asc:
print('In fetch_seek_page method')
sort_index = None
query = query.filter(null_sort_by == None)
paginator.cursor = query
else:
sort_index = None
return paginator.get_page(last_index=kwargs['last_index'], sort_index=sort_index, eager=eager)
def fetch_seek_paginator(query, kwargs, index_column, clear=False, count=None, cap=100):
check_cap(kwargs, cap)
model = index_column.parent.class_
sort, hide_null = kwargs.get('sort'), kwargs.get('sort_hide_null')
if sort:
query, sort_column = sorting.sort(
query, sort,
model=model, clear=clear, hide_null=hide_null
)
else:
sort_column = None
return SeekCoalescePaginator(
query,
kwargs['per_page'],
index_column,
sort_column=sort_column,
count=count,
)
def extend(*dicts):
ret = {}
for each in dicts:
ret.update(each)
return ret
def parse_fulltext(text):
return ' & '.join([
part + ':*'
for part in re.sub(r'\W', ' ', text).split()
])
office_args_required = ['office', 'cycle']
office_args_map = {
'house': ['state', 'district'],
'senate': ['state'],
}
def check_election_arguments(kwargs):
for arg in office_args_required:
if kwargs.get(arg) is None:
raise exceptions.ApiError(
'Required parameter "{0}" not found.'.format(arg),
status_code=422,
)
conditional_args = office_args_map.get(kwargs['office'], [])
for arg in conditional_args:
if kwargs.get(arg) is None:
raise exceptions.ApiError(
'Must include argument "{0}" with office type "{1}"'.format(
arg,
kwargs['office'],
),
status_code=422,
)
def get_model(name):
from webservices.common.models import db
return db.Model._decl_class_registry.get(name)
def related(related_model, id_label, related_id_label=None, cycle_label=None,
related_cycle_label=None, use_modulus=True):
from webservices.common.models import db
related_model = get_model(related_model)
related_id_label = related_id_label or id_label
related_cycle_label = related_cycle_label or cycle_label
@declared_attr
def related(cls):
id_column = getattr(cls, id_label)
related_id_column = getattr(related_model, related_id_label)
filters = [foreign(id_column) == related_id_column]
if cycle_label:
cycle_column = getattr(cls, cycle_label)
if use_modulus:
cycle_column = cycle_column + cycle_column % 2
related_cycle_column = getattr(related_model, related_cycle_label)
filters.append(cycle_column == related_cycle_column)
return db.relationship(
related_model,
primaryjoin=sa.and_(*filters),
)
return related
related_committee = functools.partial(related, 'CommitteeDetail', 'committee_id')
related_candidate = functools.partial(related, 'CandidateDetail', 'candidate_id')
related_committee_history = functools.partial(
related,
'CommitteeHistory',
'committee_id',
related_cycle_label='cycle',
)
related_candidate_history = functools.partial(
related,
'CandidateHistory',
'candidate_id',
related_cycle_label='two_year_period',
)
related_efile_summary = functools.partial(
related,
'EFilings',
'file_number',
related_id_label='file_number',
)
def document_description(report_year, report_type=None, document_type=None, form_type=None):
if report_type:
clean = re.sub(r'\{[^)]*\}', '', report_type)
elif document_type:
clean = document_type
elif form_type and form_type in decoders.form_types:
clean = decoders.form_types[form_type]
else:
clean = 'Document'
if form_type and (form_type == 'RFAI' or form_type == 'FRQ'):
clean = 'RFAI: ' + clean
return '{0} {1}'.format(clean.strip(), report_year)
def make_report_pdf_url(image_number):
if image_number:
return 'http://docquery.fec.gov/pdf/{0}/{1}/{1}.pdf'.format(
str(image_number)[-3:],
image_number,
)
else:
return None
def make_schedule_pdf_url(image_number):
if image_number:
return 'http://docquery.fec.gov/cgi-bin/fecimg/?' + image_number
def make_csv_url(file_num):
file_number = str(file_num)
if file_num > -1 and file_num < 100:
return 'http://docquery.fec.gov/csv/000/{0}.csv'.format(file_number)
elif file_num >= 100:
return 'http://docquery.fec.gov/csv/{0}/{1}.csv'.format(file_number[-3:], file_number)
def make_fec_url(image_number, file_num):
image_number = str(image_number)
if file_num < 0 or file_num is None:
return
file_num = str(file_num)
indicator = -1
if len(image_number) == 18:
indicator = image_number[8]
elif len(image_number) == 11:
indicator = image_number[2]
return fec_url_map[indicator].format(file_num)
def get_index_column(model):
column = model.__mapper__.primary_key[0]
return getattr(model, column.key)
def cycle_param(**kwargs):
ret = {
'name': 'cycle',
'type': 'integer',
'in': 'path',
}
ret.update(kwargs)
return ret
def get_election_duration(column):
return sa.case(
[
(column == 'S', 6),
(column == 'P', 4),
],
else_=2,
)
def get_elasticsearch_connection():
es_conn = env.get_service(name='fec-api-search')
if es_conn:
url = es_conn.get_url(url='uri')
else:
url = 'http://localhost:9200'
es = Elasticsearch(url)
return es
def print_literal_query_string(query):
print(str(query.statement.compile(dialect=postgresql.dialect())))
def create_eregs_link(part, section):
url_part_section = part
if section:
url_part_section += '-' + section
return '/regulations/{}/CURRENT'.format(url_part_section)
def post_to_slack(message, channel):
response = requests.post(
env.get_credential('SLACK_HOOK'),
data=json.dumps({
'text': message, 'channel': channel, 'link_names': 1,
'username': 'Ms. Robot', 'icon_emoji': ':robot_face:',
}),
headers={'Content-Type': 'application/json'},
)
if response.status_code != 200:
logger.error('SLACK ERROR- Message failed to send:{0}'.format(message))
``` |
{
"source": "18F/osbu-forecast-api",
"score": 2
} |
#### File: forecast/opportunities/tests.py
```python
import os, csv
from datetime import date
from django.test import TestCase, RequestFactory
from opportunities.models import Office, Opportunity, OSBUAdvisor
from django.contrib.auth.models import User
from opportunities.serializers import OpportunitySerializer
from rest_framework.test import APITestCase
from opportunities.admin import OpportunityAdmin
from django.contrib.admin.sites import AdminSite
from opportunities.validators import validate_NAICS
from django.core.exceptions import ValidationError
from django.core.management import call_command
from opportunities.management.commands.load_opportunities import OpportunitiesLoader
from django.template import Context, Template
class OfficeTestCase(TestCase):
# Create your tests here.
def setUp(self):
self.o = Office(organization="PBS-Public Buildings Service",
region="R1-New England Region")
def test_office_str(self):
self.assertTrue(isinstance(self.o, Office))
self.assertEqual(str(self.o),
"%s (%s)" % (self.o.organization, self.o.region))
class OSBUAdvisorTestCase(TestCase):
# Create your tests here.
def setUp(self):
self.o = OSBUAdvisor(name="<NAME>", phone="202-555-5555",
email="<EMAIL>")
def test_osbu_str(self):
self.assertTrue(isinstance(self.o, OSBUAdvisor))
self.assertEqual(str(self.o),
"%s (%s, %s)" % (self.o.name, self.o.email, self.o.phone))
class OpportunityTestCase(TestCase):
def setUp(self):
Opportunity.objects.create(description="Test Opportunity",
estimated_fiscal_year="2016")
def test_opportunity_created(self):
award = Opportunity.objects.get(description="Test Opportunity")
self.assertTrue(award)
def test_opportunity_str(self):
award = Opportunity.objects.get(description="Test Opportunity")
self.assertEqual(str(award), "Test Opportunity (2016)")
def test_opportunity_NAICS_error(self):
o = Opportunity.objects.create(naics="2016")
self.assertRaises(ValidationError, o.save())
# Testing the Award API
class OpportunityAPITest(APITestCase):
def setUp(self):
self.o = Office(organization="PBS-Public Buildings Service",
region="R1-New England Region")
self.a = Opportunity(office=self.o, description="Test Opportunity",
estimated_fiscal_year="2016")
def test_API(self):
response = self.client.get('/api/opportunities/')
self.assertEqual(200, response.status_code)
class ValidatorsTestCase(TestCase):
# This tests whether a 4-digit NAICS code raises a Validation Error
# Passing the test means that the 4-digit data results in error.
def test_NAICS_validation(self):
with self.assertRaises(ValidationError):
validate_NAICS("5010")
# This tests ensures that only digits can be used, else Validation Error
# Passing the test means that the a non-digit results in error.
def test_NAICS_validation(self):
with self.assertRaises(ValidationError):
validate_NAICS("1010a")
# This tests whether a 6-digit NAICS code can be saved
def test_NAICS_validation_no_error(self):
self.assertTrue(validate_NAICS("501056"))
class ImporterTestCase(TestCase):
sample_filename = 'test/data/sample.csv'
bad_filename = 'test/data/bad_sample.csv'
def load(self, filename, **kwargs):
call_command(
'load_opportunities',
filename=os.path.join(os.path.dirname(__file__), filename)
)
def test_loads_sample(self):
filename = os.path.join(os.path.dirname(__file__), self.sample_filename)
self.load(self.sample_filename)
with open(filename) as f:
has_header = csv.Sniffer().has_header(f.read(1024))
file_read = csv.reader(f)
rows = sum(1 for row in file_read)
self.assertEquals(Opportunity.objects.count(), rows)
def test_parse_date(self):
parse_date = OpportunitiesLoader.parse_date
self.assertEquals(parse_date('12/8/2015'), date(2015, 12, 8))
self.assertEquals(parse_date('06/03/2014'), date(2014, 6, 3))
self.assertEquals(parse_date('5/1/2016'), date(2016, 5, 1))
self.assertIsNone(parse_date(''))
def test_parse_fiscal_dates(self):
parse_fiscal_dates = OpportunitiesLoader.parse_fiscal_dates
self.assertEquals(parse_fiscal_dates("FY 2016-2nd Quarter"),("2016","2nd"))
self.assertEquals(parse_fiscal_dates("FY 2016-Quarter To Be Determined"),
("2016","To Be Determined"))
self.assertEquals(parse_fiscal_dates("FY 2016-Quarter 4"), ("2016","4th"))
def test_parse_dollars(self):
parse_dollars = OpportunitiesLoader.parse_dollars
self.assertEquals(parse_dollars('$1000'), 1000)
self.assertEquals(parse_dollars('$5,000'), 5000)
self.assertIsNone(parse_dollars(1000))
def test_parse_advisor(self):
parse_advisor = OpportunitiesLoader.parse_advisor
self.assertEquals(parse_advisor('Really Fakeperson, 555-555-5555, <EMAIL>'),
['Really Fakeperson', '555-555-5555', '<EMAIL>'])
self.assertEquals(parse_advisor('Different Fakeperson, 555-555-5555 <EMAIL>'),
['Different Fakeperson', '555-555-5555', '<EMAIL>'])
self.assertEquals(parse_advisor('Just the Name'),['Just the Name','',''])
class CutTests(TestCase):
TEMPLATE = Template("{% load filters %} {{ '123-456-7890'|cut:'-' }}")
def setUp(self):
self.entry = '1234567890'
def test_cut(self):
rendered = self.TEMPLATE.render(Context({}))
self.assertIn(self.entry, rendered)
class CurrencyTests(TestCase):
TEMPLATE = Template("{% load filters %} {{ '1234.56'|currency:'-' }}")
def setUp(self):
self.entry = '$1,234.56'
def test_currency(self):
rendered = self.TEMPLATE.render(Context({}))
self.assertIn(self.entry, rendered)
```
#### File: forecast/opportunities/validators.py
```python
from django.core.exceptions import ValidationError
import re
# Enforce NAICS code as 5 digits
def validate_NAICS(naics):
if re.match(r"\d{6}", naics) is None:
raise ValidationError('NAICS code must be 5 digits long')
return True
```
#### File: forecast/opportunities/views.py
```python
from django.shortcuts import render
from .models import Opportunity, Office, OSBUAdvisor
from rest_framework import viewsets, filters
from rest_framework.pagination import PageNumberPagination
from rest_framework.settings import api_settings
from rest_framework_csv.renderers import CSVRenderer
from django.core import serializers
from django.shortcuts import get_object_or_404
import json
import django_filters
import rest_framework
from .serializers import (
OpportunitySerializer, OfficeSerializer, OSBUAdvisorSerializer
)
class PaginatedCSVRenderer (CSVRenderer):
results_field = 'results'
def render(self, data, media_type=None, renderer_context=None):
if not isinstance(data, list):
data = data.get(self.results_field, [])
return super(PaginatedCSVRenderer, self).render(data, media_type, renderer_context)
class LargeResultsSetPagination(PageNumberPagination):
page_size = 1000
page_size_query_param = 'page_size'
max_page_size = 10000
def home(request):
# opportunities = Opportunity.objects.all().select_related('office__id')
agency = Opportunity.objects.values('agency').order_by('agency').distinct('agency')
socioeconomic = Opportunity.objects.values('socioeconomic').order_by('socioeconomic').distinct('socioeconomic')
place_of_performance_state = Opportunity.objects.values('place_of_performance_state').order_by('place_of_performance_state').distinct('place_of_performance_state')
naics = Opportunity.objects.values('naics').order_by('naics').distinct('naics')
estimated_fiscal_year_quarter = Opportunity.objects.values('estimated_fiscal_year_quarter').order_by('estimated_fiscal_year_quarter').distinct('estimated_fiscal_year_quarter')
contract_type = Opportunity.objects.values('contract_type').order_by('contract_type').distinct('contract_type')
award_status = Opportunity.objects.values('award_status').order_by('award_status').distinct('award_status')
return render(request, 'main.html', {
'o': {},
'agency': agency,
'socioeconomic': socioeconomic,
'place_of_performance_state': place_of_performance_state,
'naics': naics,
'estimated_fiscal_year_quarter': estimated_fiscal_year_quarter,
'contract_type': contract_type,
'award_status': award_status
})
def details(request, id):
"""
A page displaying details about a particular contracting opportunity
"""
opportunity = get_object_or_404(Opportunity.objects.filter(id=id).select_related('office__id', 'osbu_advisor__id'))
return render(request, 'detail.html', {'o': opportunity})
class OpportunityFilter(django_filters.FilterSet):
"""
Filters available when calling the API endpoint
"""
description = django_filters.CharFilter(lookup_type='icontains')
dollar_value_min = django_filters.NumberFilter(lookup_type='gt')
dollar_value_max = django_filters.NumberFilter(lookup_type='lt')
class Meta:
model = Opportunity
fields = ['socioeconomic','place_of_performance_state','naics','description',
'estimated_fiscal_year_quarter', 'dollar_value_min', 'dollar_value_max',
'agency', 'contract_type', 'award_status']
class OpportunityViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
renderer_classes = (
rest_framework.renderers.JSONRenderer,
rest_framework.renderers.BrowsableAPIRenderer,
PaginatedCSVRenderer,
)
pagination_class = LargeResultsSetPagination
queryset = Opportunity.objects.all().filter(published=True)
serializer_class = OpportunitySerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = OpportunityFilter
class OfficeViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Office.objects.all()
serializer_class = OfficeSerializer
class OSBUAdvisorViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = OSBUAdvisor.objects.all()
serializer_class = OSBUAdvisorSerializer
``` |
{
"source": "18F/peacecorps-site",
"score": 2
} |
#### File: peacecorps/contenteditor/admin.py
```python
from adminsortable.admin import SortableAdminMixin
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from tinymce.widgets import TinyMCE
from tinymce.models import HTMLField
from django.forms import TextInput
from django.db.models import CharField
from peacecorps import models
from .forms import (
LoggingAuthenticationForm, StrictAdminPasswordChangeForm,
StrictUserCreationForm)
class StrictUserAdmin(UserAdmin):
add_form = StrictUserCreationForm
change_password_form = StrictAdminPasswordChangeForm
class AccountAdmin(admin.ModelAdmin):
list_display = ['code', 'name']
search_fields = ['code', 'name']
class AccountInline(admin.StackedInline):
model = models.Account
class CampaignAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': TextInput(attrs={'size': '80'})},
}
fieldsets = (
('Info', {
'fields': ['account', 'name', ('campaigntype', 'country'), 'icon',
'featured_image']
}),
('Text', {
'fields': ['slug', 'description', 'abstract', 'tagline', 'call',
'published']
}),
)
prepopulated_fields = {"slug": ("name",)}
list_display = ['account', 'name']
list_filter = ['campaigntype']
search_fields = ['account__code', 'name', 'country__name']
raw_id_fields = ['account', 'icon', 'country']
# filter_horizontal = ['featuredprojects']
exclude = ['featuredprojects']
class FeaturedProjectFrontPageAdmin(admin.ModelAdmin):
list_display = ["project", "funded_status"]
raw_id_fields = ['project']
def funded_status(self, obj):
if obj.project.account.funded():
return "Funded"
else:
return "Not Funded"
class IssueAdmin(admin.ModelAdmin):
filter_horizontal = ['campaigns']
search_fields = ['name']
class MediaAdmin(admin.ModelAdmin):
fieldsets = (
('File', {
'fields': ['file']
}),
('Info', {
'fields': ['title', ('mediatype', 'country'), 'caption']
}),
('508 Compliance', {
'fields': ['description', 'transcript'],
'description': """<h4>Images must have a description.
Audio/video files must be transcribed.</h4>"""
}),
)
class ProjectAdmin(admin.ModelAdmin):
list_display = ['account', 'title', 'country',
'volunteername', 'funded_status']
prepopulated_fields = {"slug": ("title",)}
filter_horizontal = ('campaigns',)
search_fields = ['account__code', 'volunteername', 'country__name',
'title']
raw_id_fields = ['account', 'overflow',
'volunteerpicture', 'featured_image']
exclude = ['media']
readonly_fields = ['funded_status']
def funded_status(self, obj):
if obj.account.funded():
return "Funded"
else:
return "Not Funded"
fieldsets = (
('Account Info', {
'fields': ['account', 'overflow', 'country', 'funded_status'],
}),
('Volunteer Info', {
'fields': ['volunteername',
'volunteerhomestate',
'volunteerpicture'],
'classes': ['wide']
}),
('Media', {
'fields': ['featured_image']
}),
('Project Info', {
'fields': ['title', 'tagline', 'slug',
'description', 'abstract', 'published']
}),
('Campaigns', {
'fields': ['campaigns']
}),
)
class VignetteAdmin(admin.ModelAdmin):
actions = None
list_display = ('slug', 'location')
fields = ('location', 'instructions', 'content')
readonly_fields = ('location', 'instructions')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class FAQAdmin(SortableAdminMixin, admin.ModelAdmin):
pass
class PayGovAlertAdmin(admin.ModelAdmin):
formfield_overrides = {
HTMLField: {'widget': TinyMCE(attrs={'cols': 80, 'rows': 10})},
}
admin.site.register(models.Account, AccountAdmin)
admin.site.register(models.Campaign, CampaignAdmin)
admin.site.register(models.Country)
admin.site.register(models.FeaturedCampaign)
admin.site.register(models.FeaturedProjectFrontPage,
FeaturedProjectFrontPageAdmin)
admin.site.register(models.Media, MediaAdmin)
admin.site.register(models.Project, ProjectAdmin)
# These aren't used anywhere yet
# admin.site.register(models.Vignette, VignetteAdmin)
admin.site.register(models.Issue, IssueAdmin)
admin.site.register(models.FAQ, FAQAdmin)
admin.site.register(models.PayGovAlert, PayGovAlertAdmin)
admin.site.unregister(User)
admin.site.register(User, StrictUserAdmin)
admin.site.login_form = LoggingAuthenticationForm
``` |
{
"source": "18F/pre-award-product-planner",
"score": 3
} |
#### File: 18F/pre-award-product-planner/create_document.py
```python
import datetime
from docx import Document
from models import Agency, RFQ, ContentComponent, Deliverable, CustomComponent, session
BIG_HEADING = 1
SUB_HEADING = 2
SMALL_HEADING = 4
user_dict = {
"external_people": "External People/The Public",
"external_it": "External IT/Developers",
"internal_people": "Internal People/Government Employees",
"internal_it": "Internal IT/Developers",
}
def make_dict(components):
component_dict = {}
for component in components:
component_dict[component.name] = component.text
return component_dict
def make_custom_component_list(components):
custom_component_list = []
for component in components:
this_component = {}
this_component['name'] = component.name
this_component['text'] = component.text
this_component['title'] = component.title
custom_component_list.append(this_component)
return custom_component_list
def get_users(cc, user_types):
users = []
for user in user_types:
if cc[user] == "true":
users.append(user)
return users
# Add global counter for headings
# def section_heading(document, headingTitle):
# title = str(sectionNumber)+'. '+headingTitle
# document.add_heading(headingTitle, level=BIG_HEADING)
# global sectionNumberCounter
# sectionNumberCounter = sectionNumberCounter+1
# return document
def overview(document, rfq):
# table of contents & basic info
agency_full_name = session.query(Agency).filter_by(abbreviation=rfq.agency).first().full_name
title = "RFQ for the " + agency_full_name
document.add_heading(title, level=BIG_HEADING)
doc_date = str(datetime.date.today())
document.add_heading(doc_date, level=3)
# table of contents
document.add_heading("Table of Contents", level=SUB_HEADING)
sections = ["Definitions", "Services", "Statement of Objectives", "Personnel Requirements", "Inspection and Delivery", "Government Roles", "Special Requirements", "Additional Contract Clauses", "Appendix"]
for section in sections:
document.add_paragraph(section, style='ListNumber')
text = "Note: All sections of this RFQ will be incorporated into the contract except the Statement of Objectives, Instructions, and Evaluation Factors."
document.add_paragraph(text)
document.add_page_break()
return document
def definitions(document, rfq):
document.add_heading("1. Definitions", level=BIG_HEADING)
all_definitions = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=1).first()
for definition in all_definitions.text.split("\n\n"):
document.add_paragraph(definition)
return document
def services(document, rfq):
document.add_heading("2. Services", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=2).all()
# include vendor number
cc = make_dict(content_components)
optionPeriods = cc["optionPeriods"]
document.add_heading("Brief Description of Services & Type of Contract", level=SUB_HEADING)
document.add_paragraph(cc["descriptionOfServices"])
document.add_paragraph(cc["naicsText"])
document.add_heading("Budget", level=SUB_HEADING)
max_text = "The government is willing to invest a maximum budget of $" + cc["maxBudget"] + " in this endeavor."
document.add_paragraph(max_text)
# travel
if cc["travelRequirement"] == "yes":
travel_text = "The Government anticipates travel will be required under this effort. Contractor travel expenses will not exceed $" + cc["travelBudget"] + "."
document.add_paragraph(travel_text)
document.add_paragraph(cc["travelLanguage"])
else:
document.add_paragraph("The Government does not anticipate significant travel under this effort.")
# @TODO make top column bold, add award fee/incentive information (if applicable)
# base period
document.add_heading("Contract Line Item Number (CLIN) Format", level=SUB_HEADING)
document.add_paragraph("\n")
table = document.add_table(rows=2, cols=1)
table.style = 'TableGrid'
table.rows[0].cells[0].text = "Base Period: " + str(cc["basePeriodDurationNumber"]) + ' ' + cc["basePeriodDurationUnit"]
table.rows[1].cells[0].text = "CLIN 0001, FFP- Completion - The Contractor shall provide services for the Government in accordance with the Performance Work Statement (PWS)"
table = document.add_table(rows=4, cols=2)
table.style = 'TableGrid'
table.rows[0].cells[0].text = "Iteration Period of Performance"
table.rows[0].cells[1].text = cc["iterationPoPNumber"] + ' ' + cc["iterationPoPUnit"]
table.rows[1].cells[0].text = "Price Per Iteration"
table.rows[1].cells[1].text = "$XXXXX (Vendor Completes)"
table.rows[2].cells[0].text = "Period of Performance"
table.rows[2].cells[1].text = cc["basePeriodDurationNumber"] + ' ' + cc["basePeriodDurationUnit"]
table.rows[3].cells[0].text = "Firm Fixed Price (Completion):"
table.rows[3].cells[1].text = "$XXXXX (Vendor Completes)"
# @TODO if base fee, add base fee clin row
document.add_paragraph("\n")
# option periods
for i in range(1, int(optionPeriods)+1):
table = document.add_table(rows=2, cols=1)
table.style = 'TableGrid'
table.rows[0].cells[0].text = "Option Period " + str(i) + ": " + str(cc["optionPeriodDurationNumber"]) + ' ' + cc["optionPeriodDurationUnit"]
table.rows[1].cells[0].text = "CLIN " + str(i) + "0001, FFP- Completion - The Contractor shall provide services for the Government in accordance with the Performance Work Statement (PWS)"
table = document.add_table(rows=4, cols=2)
table.style = 'TableGrid'
table.rows[0].cells[0].text = "Iteration Period of Performance"
table.rows[0].cells[1].text = cc["iterationPoPNumber"] + ' ' + cc["iterationPoPUnit"]
table.rows[1].cells[0].text = "Price Per Iteration"
table.rows[1].cells[1].text = "$XXXXX (Vendor Completes)"
table.rows[2].cells[0].text = "Period of Performance"
table.rows[2].cells[1].text = cc["optionPeriodDurationNumber"] + ' ' + cc["optionPeriodDurationUnit"]
table.rows[3].cells[0].text = "Firm Fixed Price (Completion):"
table.rows[3].cells[1].text = "$XXXXX (Vendor Completes)"
# @TODO if option fee, add option fee clin row
document.add_paragraph("\n")
# @TODO add custom CLIN(s)
document.add_heading("Payment Schedule", level=SUB_HEADING)
document.add_paragraph(cc["paymentSchedule"])
return document
def objectives(document, rfq):
document.add_heading("3. Objectives", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=3).all()
cc = make_dict(content_components)
document.add_heading("General Background", level=SUB_HEADING)
if len(cc["generalBackground"]) > 0:
document.add_paragraph(cc["generalBackground"])
else:
document.add_paragraph("Please provide several paragraphs about your project's history, mission, and current state.")
document.add_heading("Program History", level=SUB_HEADING)
if len(cc["programHistory"]) > 0:
document.add_paragraph(cc["programHistory"])
else:
document.add_paragraph("If you have any information about the current vendors and specific technology being used please provide it here.")
document.add_heading("Users", level=SUB_HEADING)
user_types = ["external_people", "external_it", "internal_people", "internal_it"]
users = get_users(cc, user_types)
if len(users) == 0:
document.add_paragraph("The primary users may include any of the following:")
for i, user in enumerate(user_dict):
document.add_paragraph(str(i+1) + ". " + user_dict[user])
else:
document.add_paragraph("The primary users will include the following:")
for i, user in enumerate(users):
document.add_paragraph(str(i+1) + ". " + user_dict[user])
# for user in users, add text of each user's needs
needs = ['external_people_needs', 'external_it_needs', 'internal_it_needs', 'internal_people_needs']
document.add_heading("User Research", level=SUB_HEADING)
user_research_options = {
"done": "Research has already been conducted, either internally or by another vendor.",
"internal": "We intend to conduct user research internally prior to the start date of this engagement.",
"vendor": "The vendor will be responsible for the user research.",
}
if cc["userResearchStrategy"] == "vendor":
document.add_paragraph(user_research_options["vendor"])
document.add_heading("Understand What People Need", level=SUB_HEADING)
document.add_paragraph(cc["whatPeopleNeed"])
document.add_heading("Address the whole experience, from start to finish", level=SUB_HEADING)
document.add_paragraph(cc["startToFinish"])
if cc["userResearchStrategy"] == "done":
document.add_paragraph(user_research_options["done"])
if cc["userResearchStrategy"] == "internal":
document.add_paragraph(user_research_options["internal"])
if cc["userResearchStrategy"] == "none":
pass
document.add_paragraph(cc['userAccess'])
document.add_heading("Universal Requirements", level=BIG_HEADING)
document.add_heading("Build the service using agile and iterative practices", level=SUB_HEADING)
document.add_paragraph(cc["agileIterativePractices"])
document.add_heading("Make it simple and intuitive", level=SUB_HEADING)
document.add_paragraph(cc["simpleAndIntuitive"])
document.add_heading("Use data to drive decisions", level=SUB_HEADING)
document.add_paragraph(cc["dataDrivenDecisions"])
document.add_heading("Specific Tasks and Deliverables", level=SUB_HEADING)
text = "This " + rfq.doc_type + " will require the following services:"
document.add_paragraph(text)
deliverables = session.query(Deliverable).filter_by(document_id=rfq.id).filter_by(value="true").all()
for deliverable in deliverables:
document.add_paragraph(" " + deliverable.display)
document.add_heading("Deliverables", level=SUB_HEADING)
document.add_paragraph(cc["definitionOfDone"])
for deliverable in deliverables:
document.add_heading(deliverable.display, level=SMALL_HEADING)
document.add_paragraph(deliverable.text)
document.add_heading("Place of Performance", level=SUB_HEADING)
if cc['locationRequirement'] == "no":
document.add_paragraph("The contractor is not required to have a full-time working staff presence on-site.")
else:
if len(cc['locationText']) > 0:
location = cc["locationText"]
else:
location = "[LOCATION HERE]"
location_text = "The contractor shall have a full-time working staff presence at " + location + ". The contractor shall have additional facilities to perform contract functions as necessary."
document.add_paragraph(location_text)
document.add_paragraph(cc["offSiteDevelopmentCompliance"])
document.add_heading("Kick Off Meeting", level=SUB_HEADING)
kickoff_text = ""
if cc["kickOffMeeting"] == "none":
kickoff_text = "A formal kick-off meeting will not be required."
if cc["kickOffMeeting"] == "in-person":
kickoff_text = cc["kickOffMeetingInPerson"]
if cc["kickOffMeeting"] == "remote":
kickoff_text = cc["kickOffMeetingRemote"]
document.add_paragraph(kickoff_text)
return document
def personnel(document, rfq):
document.add_heading("4. Key Personnel", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=4).all()
cc = make_dict(content_components)
document.add_paragraph(cc["keyPersonnelIntro"])
document.add_heading("Security Clearance and Onsite Presence", level=SUB_HEADING)
if cc["clearanceRequired"] == "None":
document.add_paragraph("Contractor personnel will not be required to have a security clearance.")
else:
document.add_paragraph("Some contractor personnel will be required to have a clearance at the level of " + cc["clearanceRequired"] + ".")
if cc["onSiteRequired"] == "yes":
document.add_paragraph("An onsite presence by the contractor will be required.")
else:
document.add_paragraph("An onsite presence by the contractor will not be required.")
document.add_heading("Key Personnel Evaluation", level=SUB_HEADING)
if cc["evaluateKeyPersonnel"] == "yes":
document.add_paragraph(cc["keyPersonnelRequirements"])
else:
document.add_paragraph(cc["notEvaluateKeyPersonnel"])
document.add_heading("Performance Work Statement", level=SUB_HEADING)
document.add_paragraph(cc["performanceWorkStatement"])
return document
def invoicing(document, rfq):
document.add_heading("5. Invoicing & Funding", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=5).all()
cc = make_dict(content_components)
document.add_paragraph(cc["invoicing"])
document.add_paragraph("The Contractor shall submit an original invoice for payment to the following office:")
if len(cc['billingAddress']) < 1:
document.add_paragraph("ADD BILLING ADDRESS HERE")
else:
document.add_paragraph(cc["billingAddress"])
document.add_paragraph(cc["duplicateInvoice"])
return document
def inspection_and_delivery(document, rfq):
document.add_heading("6. Inspection and Delivery", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=6).all()
cc = make_dict(content_components)
document.add_heading("Overview", level=SUB_HEADING)
document.add_paragraph(cc["guidingPrinciples"])
document.add_heading("Delivery and Timing", level=SUB_HEADING)
document.add_paragraph(cc["inspectionOverview"])
document.add_heading("Late Delivery", level=SUB_HEADING)
document.add_paragraph(cc["lateDelivery"])
document.add_heading("Collaboration Environment", level=SUB_HEADING)
if cc["workspaceExists"] == "yes":
if len(cc["workspaceName"]) > 0:
document.add_paragraph(rfq.agency + " is currently using " + cc["workspaceName"] + " as their primary collaborative workspace tool. The contractor is required to establish a collaborative workspace using either this tool or another that both the contractor and the CO can agree upon.")
document.add_paragraph(cc["deliveringDeliverables"])
document.add_heading("Transition Activities", level=SUB_HEADING)
document.add_paragraph(cc["transitionActivities"])
return document
def government_roles(document, rfq):
document.add_heading("7. Government Roles", level=BIG_HEADING)
content_components = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=7).all()
cc = make_dict(content_components)
custom_components = session.query(CustomComponent).filter_by(document_id=rfq.id).filter_by(section=7).order_by(CustomComponent.id).all()
document.add_paragraph(cc["stakeholderIntro"])
component_list = make_custom_component_list(custom_components)
for component in component_list:
document.add_heading(component['title'], level=SUB_HEADING)
document.add_paragraph(component['text'])
return document
def special_requirements(document, rfq):
document.add_heading("8. Special Requirements", level=BIG_HEADING)
custom_components = session.query(CustomComponent).filter_by(document_id=rfq.id).filter_by(section=8).order_by(CustomComponent.id).all()
component_list = make_custom_component_list(custom_components)
for component in component_list:
document.add_heading(component['title'], level=SUB_HEADING)
document.add_paragraph(component['text'])
return document
def contract_clauses(document, rfq):
document.add_heading("9. Additional Contract Clauses", level=BIG_HEADING)
contract_clauses = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=9).first()
document.add_paragraph(contract_clauses.text)
return document
def instructions_to_offerors(document, rfq):
document.add_heading("10. Instructions to Offerors", level=BIG_HEADING)
instructions = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=10).first()
document.add_paragraph(instructions.text)
return document
def evaluation_criteria(document, rfq):
document.add_heading("11. Evaluation Criteria", level=BIG_HEADING)
instructions = session.query(ContentComponent).filter_by(document_id=rfq.id).filter_by(section=11).first()
document.add_paragraph(instructions.text)
return document
def appendix(document, rfq):
document.add_heading("12. Appendix", level=BIG_HEADING)
return document
def create_document(rfq_id):
rfq = session.query(RFQ).filter_by(id=rfq_id).first()
document = Document()
document = overview(document, rfq)
document = definitions(document, rfq)
document = services(document, rfq)
document = objectives(document, rfq)
document = personnel(document, rfq)
document = invoicing(document, rfq)
document = inspection_and_delivery(document, rfq)
document = government_roles(document, rfq)
document = special_requirements(document, rfq)
document = contract_clauses(document, rfq)
document = instructions_to_offerors(document, rfq)
document = evaluation_criteria(document, rfq)
document = appendix(document, rfq)
return document
```
#### File: 18F/pre-award-product-planner/models.py
```python
import sys, os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text, Boolean, String, ForeignKey, create_engine
from sqlalchemy.orm import sessionmaker, relationship, scoped_session
from flask_sqlalchemy import SQLAlchemy
from config import ProductionConfig
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
import seed
engine = create_engine(ProductionConfig.SQLALCHEMY_DATABASE_URI)
session_factory = sessionmaker(bind=engine)
session = scoped_session(session_factory)
Base = declarative_base()
content_components = seed.content_components
deliverables = seed.deliverables
custom_components = seed.custom_components
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key = True)
username = Column(String(32), index = True)
password_hash = Column(String(128))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(os.environ.get('SECRET_KEY', "None"), expires_in = expiration)
return s.dumps({ 'id': self.id })
@staticmethod
def verify_auth_token(token):
s = Serializer(os.environ.get('SECRET_KEY', "None"))
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = session.query(User).get(data['id'])
return user
class Agency(Base):
__tablename__ = 'agencies'
id = Column(Integer, primary_key=True)
full_name = Column(String, unique=True)
abbreviation = Column(String, unique=True)
def __repr__(self):
return "<Agency(id='%d', full_name='%s', abbreviation='%s')>" % (self.id, self.full_name, self.abbreviation)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class RFQ(Base):
__tablename__ = 'rfqs'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
agency = Column(String)
doc_type = Column(String)
program_name = Column(String)
setaside = Column(String)
base_number = Column(String)
content_components = relationship("ContentComponent")
deliverables = relationship("Deliverable")
custom_components = relationship("CustomComponent")
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<RFQ(id='%d', agency='%s', doc_type='%s', program_name='%s')>" % (self.id, self.agency, self.doc_type, self.program_name)
def __init__(self, user_id, agency, doc_type, program_name, setaside, base_number=None):
# working-with-related-objects
base_number_value = None
if len(base_number) > 0:
base_number_value = base_number
# seed each section of the new document with the template content
self.user_id = user_id
self.agency = agency
self.doc_type = doc_type
self.program_name = program_name
self.setaside = setaside
self.base_number = base_number_value
vehicle = ""
agency_full_name = session.query(Agency).filter_by(abbreviation=agency).first().full_name
if doc_type != "Purchase Order":
vehicle = "(vehicle number " + base_number_value + ") "
for section in content_components:
text = section['text']
section['text'] = text.replace("{AGENCY}", agency).replace("{DOC_TYPE}", doc_type).replace("{AGENCY_FULL_NAME}", agency_full_name).replace("{PROGRAM_NAME}", program_name).replace("{VEHICLE}", vehicle)
self.content_components.append(ContentComponent(**section))
for deliverable in deliverables:
deliverable["text"] = str(deliverable['text'])
deliverable["display"] = str(deliverable['display'])
self.deliverables.append(Deliverable(**deliverable))
for component in custom_components:
text = component['text']
title = component['title']
component['text'] = text.replace("{AGENCY}", agency).replace("{DOC_TYPE}", doc_type).replace("{AGENCY_FULL_NAME}", agency_full_name).replace("{PROGRAM_NAME}", program_name).replace("{VEHICLE}", vehicle)
component['title'] = title.replace("{AGENCY}", agency).replace("{DOC_TYPE}", doc_type).replace("{AGENCY_FULL_NAME}", agency_full_name).replace("{PROGRAM_NAME}", program_name).replace("{VEHICLE}", vehicle)
self.custom_components.append(CustomComponent(**component))
class ContentComponent(Base):
__tablename__ = 'content_components'
document_id = Column(Integer, ForeignKey('rfqs.id'), primary_key=True)
section = Column(Integer, primary_key=True)
name = Column(String, primary_key=True)
text = Column(Text)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<ContentComponent(name='%s', doc_id='%d', text='%s')>" % (self.name, self.document_id, self.text)
class Deliverable(Base):
__tablename__ = 'deliverables'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, ForeignKey('rfqs.id'), primary_key=True)
name = Column(String, primary_key=True)
display = Column(String)
value = Column(String)
text = Column(Text)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<Deliverable(name='%s', doc_id='%d', text='%s', value='%s', display='%s')>" % (self.name, self.document_id, self.text, self.value, self.display)
class AdditionalClin(Base):
__tablename__ = 'additional_clins'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, ForeignKey('rfqs.id'))
row1 = Column(Text)
row2 = Column(Text)
row3a = Column(Text)
row3b = Column(Text)
row4a = Column(Text)
row4b = Column(Text)
row5a = Column(Text)
row5b = Column(Text)
row6a = Column(Text)
row6b = Column(Text)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<Clin(id='%d', row1='%s', row2='%s', row3a='%s')>" % (self.document_id, self.row1, self.row2, self.row3a)
class CustomComponent(Base):
__tablename__ = 'custom_components'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, ForeignKey('rfqs.id'))
title = Column(String)
name = Column(String)
text = Column(Text)
section = Column(Integer)
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return "<AdditionalComponent(id='%d', title='%s', text='%s')>" % (self.document_id, self.title, self.text)
``` |
{
"source": "18F/PriceHistoryAuth",
"score": 3
} |
#### File: 18F/PriceHistoryAuth/auth.py
```python
import os
import random,string
import datetime
import LogActivity
import pickle
import hashlib
from PriceHistoryGUI.ppGuiConfig import RelativePathToHashesFile,TokenTimeout
hashes = {}
GLOBAL_BAD_LOGIN = {}
LIMIT_NUMBER_BAD_LOGINS = 5
# We'll make them wait one hour if they have 5 bad logins.
LIMIT_TIME_TO_RETRY = 60*60
# Load from disk
def loadHashes():
hshs_file = RelativePathToHashesFile
if os.path.exists(hshs_file):
with open(hshs_file, "rb") as hs:
hashes = pickle.load(hs)
else:
hashes = {}
return hashes
def record_bad_login(username):
if username not in GLOBAL_BAD_LOGIN:
GLOBAL_BAD_LOGIN[username] = [0,datetime.datetime.now()]
else:
GLOBAL_BAD_LOGIN[username][0] = GLOBAL_BAD_LOGIN[username][0]+1
GLOBAL_BAD_LOGIN[username][1] = datetime.datetime.now()
def does_authenticate(username,cred,p3apisalt):
hashes = loadHashes()
if username in GLOBAL_BAD_LOGIN:
timenow = datetime.datetime.now()
timestamp = GLOBAL_BAD_LOGIN[username][1]
timedelta = timenow - timestamp
if (timedelta >= datetime.timedelta(seconds=LIMIT_TIME_TO_RETRY)):
# An hour has gone by, so we givem them a pass....
GLOBAL_BAD_LOGIN.pop(username, None)
if username in GLOBAL_BAD_LOGIN:
count = GLOBAL_BAD_LOGIN[username][0]
if (count >= LIMIT_NUMBER_BAD_LOGINS):
# Probably should have a separate log message for this..
LogActivity.logTooManyLoginAttempts(username)
return False;
if username not in hashes:
LogActivity.logBadCredentials(username)
record_bad_login(username)
return False;
if hashes[username] == hashlib.sha256(cred+p3apisalt).hexdigest():
return True;
else:
LogActivity.logBadCredentials(username)
record_bad_login(username)
return False;
GLOBAL_SESSION_DICT = {}
def create_session_id():
session_id = get_rand_string(13);
acsrf = get_rand_string(13);
timestamp = datetime.datetime.now();
GLOBAL_SESSION_DICT[session_id] = [acsrf,timestamp]
return session_id;
def update_acsrf_nonce_form(session_id):
acsrf = get_rand_string(13);
return update_new_acsrf(session_id,acsrf)
def update_acsrf(session_id):
acsrf = GLOBAL_SESSION_DICT[session_id][0];
return update_new_acsrf(session_id,acsrf)
def update_new_acsrf(session_id,acsrf):
timestamp = datetime.datetime.now();
GLOBAL_SESSION_DICT[session_id] = [acsrf,timestamp]
LogActivity.logDebugInfo("SETTING ACSRF session, acsrf "+session_id+"."+GLOBAL_SESSION_DICT[session_id][0])
return session_id;
CHARS = string.ascii_letters + string.digits
def get_rand_string(length):
return ''.join(random.choice(CHARS) for i in range(length))
def is_valid_acsrf_old(session_id):
if (session_id in GLOBAL_SESSION_DICT):
timestamp = GLOBAL_SESSION_DICT[session_id][1]
timenow = datetime.datetime.now()
timedelta = timenow - timestamp
if (timedelta < datetime.timedelta(seconds=TokenTimeout)):
return True
else:
LogActivity.logTimeout(session_id)
return False
else:
LogActivity.logMissingSession(session_id)
return False;
def is_valid_acsrf(session_id,acsrf):
if (session_id in GLOBAL_SESSION_DICT):
timestamp = GLOBAL_SESSION_DICT[session_id][1]
timenow = datetime.datetime.now()
timedelta = timenow - timestamp
if (timedelta < datetime.timedelta(seconds=TokenTimeout)):
if (acsrf != GLOBAL_SESSION_DICT[session_id][0]):
LogActivity.logDebugInfo("ACSRF Mismatch provided vs. stored :"+acsrf+","+GLOBAL_SESSION_DICT[session_id][0])
return False
else:
return True
else:
LogActivity.logTimeout(session_id)
return False
else:
LogActivity.logMissingSession(session_id)
return False;
def get_acsrf(session_id):
return GLOBAL_SESSION_DICT[session_id][0]
def del_session(session_id):
obj = (GLOBAL_SESSION_DICT.pop(session_id, None))
if session_id in GLOBAL_SESSION_DICT:
LogActivity.logMissingSession(str(session_id)+"failed to remove")
else:
LogActivity.logMissingSession(str(session_id)+"removed")
```
#### File: 18F/PriceHistoryAuth/pycas.py
```python
import requests
# Name field for pycas cookie
PYCAS_NAME = "pycas"
# CAS Staus Codes: returned to calling program by login() function.
CAS_OK = 0 # CAS authentication successful.
CAS_COOKIE_EXPIRED = 1 # PYCAS cookie exceeded its lifetime.
CAS_COOKIE_INVALID = 2 # PYCAS cookie is invalid (probably corrupted).
CAS_TICKET_INVALID = 3 # CAS server ticket invalid.
CAS_GATEWAY = 4 # CAS server returned without ticket while in gateway mode.
# Status codes returned internally by function get_cookie_status().
COOKIE_AUTH = 0 # PYCAS cookie is valid.
COOKIE_NONE = 1 # No PYCAS cookie found.
COOKIE_GATEWAY = 2 # PYCAS gateway cookie found.
COOKIE_INVALID = 3 # Invalid PYCAS cookie found.
# Status codes returned internally by function get_ticket_status().
TICKET_OK = 0 # Valid CAS server ticket found.
TICKET_NONE = 1 # No CAS server ticket found.
TICKET_INVALID = 2 # Invalid CAS server ticket found.
TICKET_NOPIV = 3 # No CAS server ticket found.
CAS_MSG = (
"CAS authentication successful.",
"PYCAS cookie exceeded its lifetime.",
"PYCAS cookie is invalid (probably corrupted).",
"CAS server ticket invalid.",
"CAS server returned without ticket while in gateway mode.",
)
###Optional log file for debugging
LOG_FILE="/tmp/cas.log"
#-----------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------
import os
import cgi
import md5
import time
import urllib
import urlparse
#-----------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------
# For debugging.
def writelog(msg):
with open(LOG_FILE,"a") as f:
timestr = time.strftime("%Y-%m-%d %H:%M:%S ");
f.write(timestr + msg + "\n");
# Used for parsing xml. Search str for first occurance of
# <tag>.....</tag> and return text (striped of leading and
# trailing whitespace) between tags. Return "" if tag not
# found.
def parse_tag(s,tag):
tag1_pos1 = s.find("<" + tag)
# No tag found, return empty string.
if tag1_pos1==-1: return ""
tag1_pos2 = s.find(">",tag1_pos1)
if tag1_pos2==-1: return ""
tag2_pos1 = s.find("</" + tag,tag1_pos2)
if tag2_pos1==-1: return ""
return s[tag1_pos2+1:tag2_pos1].strip()
# Split string in exactly two pieces, return '' for missing pieces.
def split2(s,sep):
parts = s.split(sep,1) + ["",""]
return parts[0], parts[1]
# Use hash and secret to encrypt string.
def makehash(s,secert):
m = md5.new()
m.update(s)
m.update(secret)
return m.hexdigest()[0:8]
# Form cookie
def make_pycas_cookie(val, domain, path, secure, expires=None):
cookie = "Set-Cookie: %s=%s;domain=%s;path=%s" % (PYCAS_NAME, val, domain, path)
if secure:
cookie += ";secure"
if expires:
cookie += ";expires=" + expires
return cookie
# Send redirect to client. This function does not return, i.e. it teminates this script.
def do_redirect(cas_host, service_url, opt, secure):
cas_url = cas_host + "/cas/login?service=" + service_url
if opt in ("renew","gateway"):
cas_url += "&%s=true" % opt
# Print redirect page to browser
print "Refresh: 0; url=%s" % cas_url
print "Content-type: text/html"
if opt=="gateway":
domain,path = urlparse.urlparse(service_url)[1:3]
print make_pycas_cookie("gateway", domain, path, secure)
print """
If your browser does not redirect you, then please follow <a href="%s">this link</a>.
""" % (cas_url)
raise SystemExit
# Send redirect to client. This function does not return, i.e. it teminates this script.
def get_url_redirect_as_string(cas_host, service_url, opt, secure):
cas_url = cas_host + "/cas/login?service=" + service_url
if opt in ("renew","gateway"):
cas_url += "&%s=true" % opt
# Print redirect page to browser
return cas_url
def get_cookie_as_string(cas_host, service_url, opt, secure):
if opt=="gateway":
domain,path = urlparse.urlparse(service_url)[1:3]
return make_pycas_cookie("gateway", domain, path, secure)
# I'm not sure what the Else should be---it would have been clearer if
# this were functional rather than imperative.
# Retrieve id from pycas cookie and test data for validity
# (to prevent mailicious users from falsely authenticating).
# Return status and id (id will be empty string if unknown).
def decode_cookie(cookie_vals,cas_secret,lifetime=None):
# Test for now cookies
if cookie_vals is None:
return COOKIE_NONE, ""
# Test each cookie value
cookie_attrs = []
for cookie_val in cookie_vals:
# Remove trailing ;
if cookie_val and cookie_val[-1]==";":
cookie_val = cookie_val[0:-1]
# Test for pycas gateway cookie
if cookie_val=="gateway":
cookie_attrs.append(COOKIE_GATEWAY)
# Test for valid pycas authentication cookie.
else:
# Separate cookie parts
oldhash = cookie_val[0:8]
timestr, id_ = split2(cookie_val[8:],":")
# Verify hash
newhash=makehash(timestr + ":" + id_, cas_secret)
if oldhash==makehash(timestr + ":" + id_, cas_secret):
# Check lifetime
if lifetime:
if str(int(time.time()+int(lifetime)))<timestr:
# OK: Cookie still valid.
cookie_attrs.append(COOKIE_AUTH)
else:
# ERROR: Cookie exceeded lifetime
cookie_attrs.append(COOKIE_EXPIRED)
else:
# OK: Cookie valid (it has no lifetime)
cookie_attrs.append(COOKIE_AUTH)
else:
# ERROR: Cookie value are not consistent
cookie_attrs.append(COOKIE_INVALID)
# Return status according to attribute values
# Valid authentication cookie takes precedence
if COOKIE_AUTH in cookie_attrs:
return COOKIE_AUTH, id_
# Gateway cookie takes next precedence
if COOKIE_GATEWAY in cookie_attrs:
return COOKIE_GATEWAY, ""
# If we've gotten here, there should be only one attribute left.
return cookie_attrs[0], ""
# Validate ticket using cas 1.0 protocol
def validate_cas_1(cas_host, service_url, ticket):
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/validate?ticket=" + ticket + "&service=" + service_url
f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
response = f_validate.readline()
# Ticket does not validate, return error
if response=="no\n":
f_validate.close()
return TICKET_INVALID, ""
# Ticket validates
else:
# Get id
id_ = f_validate.readline()
f_validate.close()
id_ = id_.strip()
return TICKET_OK, id_
# Validate ticket using cas 2.0 protocol
# The 2.0 protocol allows the use of the mutually exclusive "renew" and "gateway" options.
def validate_cas_2(cas_host, service_url, ticket, opt):
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/serviceValidate?ticket=" + ticket + "&service=" + service_url
if opt:
cas_validate += "&%s=true" % opt
f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
response = f_validate.read()
id_ = parse_tag(response,"cas:user")
# Ticket does not validate, return error
if id_ == "":
return TICKET_INVALID, ""
# Ticket validates
else:
return TICKET_OK, id_
# Validate ticket using cas 2.0 protocol
# The 2.0 protocol allows the use of the mutually exclusive "renew" and "gateway" options.
def validate_cas_2x_urllib(cas_host, cas_proxy, service_url, ticket, opt):
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/serviceValidate?ticket=" + ticket + "&service=" + service_url
if opt:
cas_validate += "&%s=true" % opt
f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
response = f_validate.read()
id_ = parse_tag(response,"cas:user")
# Ticket does not validate, return error
if id == "":
return TICKET_INVALID, "", "", ""
# Ticket validates
else:
pivcard = parse_tag(response,"maxAttribute:samlAuthenticationStatementAuthMethod")
agencyThatRequired = parse_tag(response,"maxAttribute:EAuth-LOA")
return TICKET_OK, id_, pivcard, agencyThatRequired
def validate_cas_2x(cas_host, cas_proxy, service_url, ticket, opt):
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/serviceValidate?ticket=" + ticket + "&service=" + service_url
if opt:
cas_validate += "&%s=true" % opt
# writelog("cas_validate = "+cas_validate)
# f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
# response = f_validate.read()
# writelog("response = "+response)
r = requests.get(cas_validate,proxies=cas_proxy)
response = r.text
id_ = parse_tag(response,"cas:user")
# Ticket does not validate, return error
if id_ == "":
return TICKET_INVALID, "", "", ""
# Ticket validates
else:
# writelog("validate response = "+response)
pivcard = parse_tag(response,"maxAttribute:samlAuthenticationStatementAuthMethod")
eauth_but_not_valid = parse_tag(response,"maxAttribute:EAuth-LOA")
# writelog("pivcard = "+pivcard)
# writelog("agencyThatRequired = "+agencyThatRequired)
return TICKET_OK, id_, pivcard, eauth_but_not_valid
# Read cookies from env variable HTTP_COOKIE.
def get_cookies():
# Read all cookie pairs
try:
cookie_pairs = os.getenv("HTTP_COOKIE").split()
except AttributeError:
cookie_pairs = []
cookies = {}
for cookie_pair in cookie_pairs:
key,val = split2(cookie_pair.strip(),"=")
if cookies.has_key(key):
cookies[key].append(val)
else:
cookies[key] = [val,]
return cookies
# Check pycas cookie
def get_cookie_status(cas_secret):
cookies = get_cookies()
return decode_cookie(cookies.get(PYCAS_NAME),cas_secret)
def get_ticket_status(cas_host,service_url,protocol,opt):
if cgi.FieldStorage().has_key("ticket"):
ticket = cgi.FieldStorage()["ticket"].value
return get_ticket_status_from_ticket(ticket,cas_host,service_url,protocol,opt)
else:
writelog("returning TICKET_NONE ")
return TICKET_NONE, ""
def get_ticket_status_from_ticket(ticket,cas_host,service_url,protocol,opt):
if protocol==1:
ticket_status, id_=validate_cas_1(cas_host, service_url, ticket, opt)
else:
ticket_status, id_=validate_cas_2(cas_host, service_url, ticket, opt)
# writelog("ticket status"+repr(ticket_status))
# Make cookie and return id
if ticket_status==TICKET_OK:
return TICKET_OK, id_
# Return error status
else:
return ticket_status, ""
def get_ticket_status_from_ticket_piv_required(assurancelevel_p,ticket,cas_host,cas_proxy,service_url,protocol,opt):
if protocol==1:
ticket_status, id_ = validate_cas_1(cas_host, service_url, ticket, opt)
else:
ticket_status, id_, piv, eauth=validate_cas_2x(cas_host, cas_proxy, service_url, ticket, opt)
# writelog("ticket status"+repr(ticket_status))
# writelog("piv status"+repr(piv))
# writelog("pivx status"+repr(pivx))
# Make cookie and return id
# MAX is actually returning a value here (in pivx), I think I need
# to search for "assurancelevel3", because it is sending
# "assurance2" when there is no PIV card!
# if ticket_status==TICKET_OK and (piv == "urn:max:fips-201-pivcard" or (assurancelevel_p(pivx))):
# This is supposed to be a simple boolean! But...
# it is returning a set containing a boolean! I know not why.
if ticket_status==TICKET_OK and (True in assurancelevel_p(eauth,piv)):
return TICKET_OK, id_
# Return error status
else:
if ticket_status==TICKET_OK:
return TICKET_NOPIV, ""
else:
return TICKET_NONE, ""
#-----------------------------------------------------------------------
# Exported functions
#-----------------------------------------------------------------------
# This function should be merged with the above function "login"
# Note: assurcane_level_p is a a function applied to return results.
# I takes to arguments and should look something like this:
# CAS_LEVEL_OF_ASSURANCE_PREDICATE_LOA2_AND_PIV = lambda loa,piv: {
# (("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel2" == loa)
# or
# ("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel3" == loa))
# and
# ("urn:max:fips-201-pivcard" == piv)
#
# }
def check_authenticated_p(assurance_level_p,ticket,cas_host,cas_proxy,cas_secret,service_url, lifetime=None, secure=1, protocol=2, path="/", opt=""):
# writelog("login begun")
# Check cookie for previous pycas state, with is either
# COOKIE_AUTH - client already authenticated by pycas.
# COOKIE_GATEWAY - client returning from CAS_SERVER with gateway option set.
# Other cookie status are
# COOKIE_NONE - no cookie found.
# COOKIE_INVALID - invalid cookie found.
cookie_status, id_ = get_cookie_status(cas_secret)
# writelog("got cookie status")
if cookie_status==COOKIE_AUTH:
writelog("CAS_OK")
return CAS_OK, id_, ""
if cookie_status==COOKIE_INVALID:
return CAS_COOKIE_INVALID, "", ""
# Check ticket ticket returned by CAS server, ticket status can be
# TICKET_OK - a valid authentication ticket from CAS server
# TICKET_INVALID - an invalid authentication ticket.
# TICKET_NONE - no ticket found.
# If ticket is ok, then user has authenticated, return id and
# a pycas cookie for calling program to send to web browser.
# writelog("getting cookie status")
ticket_status, id_ = get_ticket_status_from_ticket_piv_required(assurance_level_p,ticket,cas_host,cas_proxy,service_url,protocol,opt)
if ticket_status==TICKET_OK:
timestr = str(int(time.time()))
hash_ = makehash(timestr + ":" + id_, cas_secret)
cookie_val = hash_ + timestr + ":" + id_
domain = urlparse.urlparse(service_url)[1]
return CAS_OK, id_, make_pycas_cookie(cookie_val, domain, path, secure)
elif ticket_status==TICKET_INVALID:
return CAS_TICKET_INVALID, "", ""
# If unathenticated and in gateway mode, return gateway status and clear
# pycas cookie (which was set to gateway by do_redirect()).
if opt=="gateway":
if cookie_status==COOKIE_GATEWAY:
domain,path = urlparse.urlparse(service_url)[1:3]
# Set cookie expiration in the past to clear the cookie.
past_date = time.strftime("%a, %d-%b-%Y %H:%M:%S %Z",time.localtime(time.time()-48*60*60))
return CAS_GATEWAY, "", make_pycas_cookie("",domain,path,secure,past_date)
return CAS_TICKET_INVALID, "", ""
``` |
{
"source": "18F/project-monitor",
"score": 3
} |
#### File: project-monitor/projmon/test.py
```python
from __future__ import absolute_import, division, print_function
from future import standard_library; standard_library.install_aliases()
import json, unittest, re
from operator import itemgetter
from . import PROJECTS_FILE
class TestProjects (unittest.TestCase):
def test_projects(self):
'''
'''
with open(PROJECTS_FILE) as file:
projects = json.load(file)
guids = list(map(itemgetter('guid'), projects))
self.assertEqual(len(guids), len(projects), 'A GUID in every project')
self.assertEqual(len(guids), len(set(guids)), 'Non-unique GUIDs')
matches = [bool(re.match(r'^\w+(-\w+)*$', guid)) for guid in guids]
self.assertFalse(False in matches, r'GUIDs all match "^\w+(-\w+)*$"')
names = list(map(itemgetter('name'), projects))
self.assertEqual(len(names), len(projects), 'A name in every project')
self.assertEqual(len(names), len(set(names)), 'Non-unique names')
urls = list(map(itemgetter('travis url'), projects))
self.assertEqual(len(urls), len(projects), 'A URL in every project')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "18F/projects",
"score": 2
} |
#### File: management/commands/import_tock_projects.py
```python
import textwrap
import logging
import json
from argparse import RawTextHelpFormatter
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.text import slugify
from projects.models import Project
logger = logging.getLogger(__name__)
class DryRunFinished(Exception):
pass
class Command(BaseCommand):
help = textwrap.dedent("""
Imports projects exported from the Tock API.
To export projects from the Tock API, save the following URL
while logged-in to Tock from your browser:
https://tock.18f.gov/api/projects.json?page_size=1000
""")
def create_parser(self, *args, **kwargs):
# http://stackoverflow.com/a/35470682
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
parser.add_argument(
'filename',
help='Path to JSON export from Tock API'
)
parser.add_argument(
'--dry-run',
default=False,
help='Don\'t commit imported data to database.',
action='store_true'
)
def load(self, filename):
with open(filename) as f:
content = json.load(f)
assert content['next'] is None and content['previous'] is None
results = content['results']
for result in results:
tock_id = result['id']
name = result['name']
logname = '%s (#%d)' % (name, tock_id)
if Project.objects.filter(tock_id=tock_id).exists():
logger.info('%s exists.' % logname)
continue
print("Creating entry for %s." % logname)
project = Project(
name=name,
slug=slugify(name),
is_billable=result['billable'],
tock_id=tock_id
)
project.save()
def handle(self, **options):
try:
with transaction.atomic():
self.load(options['filename'])
if options['dry_run']:
raise DryRunFinished()
except DryRunFinished:
logger.info('Dry run complete.')
```
#### File: projects/tests/test_forms.py
```python
from django.test import TestCase
from ..forms import ProjectForm
class ProjectFormTestCase(TestCase):
def test_form_contains_autocomplete_url(self):
self.assertTrue('/client-autocomplete/' in ProjectForm().as_p())
```
#### File: projects/tests/test_migrations.py
```python
from unittest import SkipTest
from django.apps import apps
from django.test import TransactionTestCase
from django.db.migrations.executor import MigrationExecutor
from django.db import connection
from django.conf import settings
from test_without_migrations.management.commands.test import DisableMigrations
class MigrationTestCase(TransactionTestCase):
# https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
@property
def app(self):
return apps.get_containing_app_config(type(self).__module__).name
migrate_from = None
migrate_to = None
def setUp(self):
if isinstance(settings.MIGRATION_MODULES, DisableMigrations):
raise SkipTest('migrations are disabled')
assert self.migrate_from and self.migrate_to, \
("TestCase '{}' must define migrate_from and "
"migrate_to properties".format(type(self).__name__))
self.migrate_from = [(self.app, self.migrate_from)]
self.migrate_to = [(self.app, self.migrate_to)]
executor = MigrationExecutor(connection)
old_apps = executor.loader.project_state(self.migrate_from).apps
# Reverse to the original migration
executor.migrate(self.migrate_from)
self.setUpBeforeMigration(old_apps)
# This wasn't part of the original code from the blog post, but
# was mentioned as a required workaround in the comments.
executor = MigrationExecutor(connection)
# Run the migration to test
executor.migrate(self.migrate_to)
self.apps = executor.loader.project_state(self.migrate_to).apps
def setUpBeforeMigration(self, apps):
pass
class TestPopulateIsBillable(MigrationTestCase):
migrate_from = '0012_add_is_billable'
migrate_to = '0013_populate_is_billable'
BILLABLE = 0
NON_BILLABLE = 1
def setUpBeforeMigration(self, apps):
Project = apps.get_model('projects', 'Project')
self.billable_id = Project.objects.create(
billable=self.BILLABLE
).id
self.non_billable_id = Project.objects.create(
billable=self.NON_BILLABLE
).id
def test_migration_works(self):
Project = self.apps.get_model('projects', 'Project')
self.assertTrue(
Project.objects.get(id=self.billable_id).is_billable
)
self.assertFalse(
Project.objects.get(id=self.non_billable_id).is_billable,
)
``` |
{
"source": "18F/pulse-labs",
"score": 2
} |
#### File: pulse-labs/data/env.py
```python
import os
import sys
import yaml
DATA_DIR = os.path.dirname(__file__)
# App-level metadata.
META = yaml.safe_load(open(os.path.join(DATA_DIR, "../meta.yml")))
DOMAINS = os.environ.get("DOMAINS", META["data"]["domains_url"])
# domain-scan paths (MUST be set in env)
SCAN_COMMAND = os.environ.get("DOMAIN_SCAN_PATH", None)
GATHER_COMMAND = os.environ.get("DOMAIN_GATHER_PATH", None)
# post-processing and uploading information
PARENTS_DATA = os.path.join(DATA_DIR, "./output/parents")
PARENTS_RESULTS = os.path.join(DATA_DIR, "./output/parents/results")
SUBDOMAIN_DATA = os.path.join(DATA_DIR, "./output/subdomains")
SUBDOMAIN_DATA_GATHERED = os.path.join(DATA_DIR, "./output/subdomains/gather")
SUBDOMAIN_DATA_SCANNED = os.path.join(DATA_DIR, "./output/subdomains/scan")
DB_DATA = os.path.join(DATA_DIR, "./db.json")
BUCKET_NAME = META['bucket']
AWS_REGION = META['aws_region']
# DAP source data
ANALYTICS_URL = META["data"]["analytics_url"]
# a11y source data
A11Y_CONFIG = META["a11y"]["config"]
A11Y_REDIRECTS = META["a11y"]["redirects"]
### Parent domain scanning information
#
scanner_string = os.environ.get("SCANNERS", "pshtt,sslyze,analytics,a11y,third_parties")
SCANNERS = scanner_string.split(",")
### subdomain gathering/scanning information
GATHER_SUFFIXES = os.environ.get("GATHER_SUFFIXES", ".gov,.fed.us")
# names and options must be in corresponding order
GATHERER_NAMES = [
"censys-snapshot", "rdns-snapshot",
"dap", "eot2016", "parents"
]
GATHERER_OPTIONS = [
"--censys-snapshot=%s" % META["data"]["censys_snapshot_url"],
"--rdns-snapshot=%s" % META["data"]["rdns_snapshot_url"],
"--dap=%s" % META["data"]["analytics_subdomains_url"],
"--eot2016=%s" % META["data"]["eot_subdomains_url"],
"--parents=%s" % DOMAINS
]
# Run these scanners over *all* (which is a lot) discovered subdomains.
SUBDOMAIN_SCANNERS = ["pshtt", "sslyze"]
# Used if --lambda is enabled during the scan.
LAMBDA_WORKERS = 900
# Quick and dirty CLI options parser.
def options():
options = {}
for arg in sys.argv[1:]:
if arg.startswith("--"):
if "=" in arg:
key, value = arg.split('=')
else:
key, value = arg, "true"
key = key.split("--")[1]
key = key.lower()
value = value.lower()
if value == 'true': value = True
elif value == 'false': value = False
options[key] = value
return options
``` |
{
"source": "18F/raktabija",
"score": 2
} |
#### File: raktabija/scripts/config_go.py
```python
import argparse
import xml.etree.ElementTree as et
import chandika_client
import re
def add_pipeline(pipelines, name, url):
pipeline = et.SubElement(pipelines, 'pipeline', {'name':name})
materials = et.SubElement(pipeline, 'materials')
git = et.SubElement(materials, 'git', {'url':url, 'branch':'deploy'})
stage = et.SubElement(pipeline,'stage', {'name':'bootstrap'})
jobs = et.SubElement(stage,'jobs')
job = et.SubElement(jobs,'job', {'name':'exec'})
tasks = et.SubElement(job,'tasks')
et.SubElement(tasks, 'exec', {'args':'deploy', 'command':'/bin/bash'})
parser = argparse.ArgumentParser(description='Configure go.')
parser.add_argument('config', help="Go's config file")
parser.add_argument('chandika', help="Chandika's hostname")
parser.add_argument('chandika_api_key', help="Chandika API key")
args = parser.parse_args()
# get go server id from original config file
tree = et.parse(args.config)
root = tree.getroot()
schemaVersion = root.get('schemaVersion')
server = root.findall('server')[0]
serverId = server.get('serverId')
# get template
tree = et.parse('packer/cookbooks/gocd/templates/cruise-config.xml.erb')
root = tree.getroot()
root.set('schemaVersion', schemaVersion)
pipelines = root.findall('pipelines')[0]
# set server id
server = root.findall('server')[0]
server.set('serverId', serverId)
# add new pipeline for kali
pipeline = et.SubElement(pipelines, 'pipeline', {'name':'kali'})
timer = et.SubElement(pipeline, 'timer', {'onlyOnChanges':'false'})
timer.text = '0 0 22 ? * *'
materials = et.SubElement(pipeline, 'materials')
git = et.SubElement(materials, 'git', {'url':'https://github.com/18F/raktabija.git'})
vcs_filter = et.SubElement(git, 'filter')
et.SubElement(vcs_filter, 'ignore', {'pattern':'**'})
stage = et.SubElement(pipeline,'stage', {'name':'bootstrap'})
jobs = et.SubElement(stage,'jobs')
job = et.SubElement(jobs,'job', {'name':'exec'})
tasks = et.SubElement(job,'tasks')
et.SubElement(tasks, 'exec', {'args':'scripts/kali_cron', 'command':'/bin/bash'})
aws_creds = chandika_client.aws_credentials()
account = chandika_client.chandika_metadata(aws_creds['account_id'], args.chandika, args.chandika_api_key)
urls = {}
for system in account['Systems']:
repository = system['Repository']
if repository and repository.strip():
name = re.sub('[^A-Za-z0-9_]', '_', system['Name'])
urls[name] = repository
for name in urls:
add_pipeline(pipelines, name, urls[name])
# write out config file
tree.write(args.config)
``` |
{
"source": "18F/snap-api-prototype",
"score": 2
} |
#### File: snap_financial_factors/income/net_income.py
```python
from typing import Dict
from snap_financial_factors.input_data.input_data import InputData
from snap_financial_factors.deductions.earned_income_deduction import EarnedIncomeDeduction
from snap_financial_factors.deductions.dependent_care_deduction import DependentCareDeduction
from snap_financial_factors.deductions.medical_expenses_deduction import MedicalExpensesDeduction
from snap_financial_factors.deductions.child_support_payments_deduction import ChildSupportPaymentsDeduction
from snap_financial_factors.deductions.standard_deduction import StandardDeduction
from snap_financial_factors.deductions.excess_shelter_deduction import ExcessShelterDeduction
from snap_financial_factors.income.income_result import IncomeResult
class NetIncome:
'''
Returns the adjusted net income (gross income minus deductions).
'''
def __init__(self,
input_data: InputData,
gross_income: int,
standard_deductions: Dict,
max_shelter_deductions: Dict,
child_support_payments_treatment: str,
mandatory_standard_utility_allowances: bool,
standard_utility_allowances: Dict,
standard_medical_deduction: bool,
standard_medical_deduction_amount: int) -> None:
# Load user input data
self.input_data = input_data
self.monthly_job_income = input_data.monthly_job_income
self.state_or_territory = input_data.state_or_territory
self.household_size = input_data.household_size
self.dependent_care_costs = input_data.dependent_care_costs
self.household_includes_elderly_or_disabled = input_data.household_includes_elderly_or_disabled
self.medical_expenses_for_elderly_or_disabled = input_data.medical_expenses_for_elderly_or_disabled
self.court_ordered_child_support_payments = input_data.court_ordered_child_support_payments
self.rent_or_mortgage = input_data.rent_or_mortgage
self.homeowners_insurance_and_taxes = input_data.homeowners_insurance_and_taxes
self.utility_costs = input_data.utility_costs
self.utility_allowance = input_data.utility_allowance
# Load calculated inputs
self.gross_income = gross_income
# Load state-level data
self.standard_deductions = standard_deductions
self.max_shelter_deductions = max_shelter_deductions
self.child_support_payments_treatment = child_support_payments_treatment
self.mandatory_standard_utility_allowances = mandatory_standard_utility_allowances
self.standard_utility_allowances = standard_utility_allowances
self.standard_medical_deduction = standard_medical_deduction
self.standard_medical_deduction_amount = standard_medical_deduction_amount
def calculate(self):
explanation = []
explanation_intro = (
'To find out if this household is eligible for SNAP and estimate ' +
'the benefit amount, we start by calculating net income. Net income ' +
'is equal to total gross monthly income, minus deductions.'
)
explanation.append(explanation_intro)
# Add up income.
income_explanation = (
"Let's start with total household income. " +
f"This household's gross income is ${self.gross_income}."
)
explanation.append(income_explanation)
# Add up deductions:
deductions_before_excess_shelter = [
StandardDeduction(
state_or_territory=self.state_or_territory,
household_size=self.household_size,
standard_deductions=self.standard_deductions
),
EarnedIncomeDeduction(monthly_job_income=self.monthly_job_income),
DependentCareDeduction(dependent_care_costs=self.dependent_care_costs),
MedicalExpensesDeduction(
household_includes_elderly_or_disabled=self.household_includes_elderly_or_disabled,
medical_expenses_for_elderly_or_disabled=self.medical_expenses_for_elderly_or_disabled,
standard_medical_deduction=self.standard_medical_deduction,
standard_medical_deduction_amount=self.standard_medical_deduction_amount
),
ChildSupportPaymentsDeduction(
child_support_payments_treatment=self.child_support_payments_treatment,
court_ordered_child_support_payments=self.court_ordered_child_support_payments,
)
]
deduction_results_before_excess_shelter = []
for deduction in deductions_before_excess_shelter:
calculation = deduction.calculate()
deduction_explanations = calculation.explanation
# Append each deduction's explanations to overall Net Income explanation
for deduction_explanation in deduction_explanations:
explanation.append(deduction_explanation)
deduction_results_before_excess_shelter.append(calculation.result)
total_deductions_before_excess_shelter = sum(deduction_results_before_excess_shelter)
adjusted_income_before_excess_shelter = (
self.gross_income - total_deductions_before_excess_shelter
)
excess_shelter_deduction_calcualtor = ExcessShelterDeduction(
adjusted_income=adjusted_income_before_excess_shelter,
rent_or_mortgage=self.rent_or_mortgage,
homeowners_insurance_and_taxes=self.homeowners_insurance_and_taxes,
household_includes_elderly_or_disabled=self.household_includes_elderly_or_disabled,
state_or_territory=self.state_or_territory,
household_size=self.household_size,
max_shelter_deductions=self.max_shelter_deductions,
utility_costs=self.utility_costs,
utility_allowance=self.utility_allowance,
mandatory_standard_utility_allowances=self.mandatory_standard_utility_allowances,
standard_utility_allowances=self.standard_utility_allowances,
)
excess_shelter_calculation = excess_shelter_deduction_calcualtor.calculate()
excess_shelter_calculation_result = excess_shelter_calculation.result
excess_shelter_calculation_explanation = excess_shelter_calculation.explanation
deduction_results = (
deduction_results_before_excess_shelter + [excess_shelter_calculation_result]
)
total_deductions_value = sum(deduction_results)
for deduction_explanation in excess_shelter_calculation_explanation:
explanation.append(deduction_explanation)
total_deductions_explanation = (
"Next, we add all applicable deductions together: "
)
explanation.append(total_deductions_explanation)
explanation.append('')
# Construct math explanation for total deductions:
total_deductions_math_explanation = ''
applicable_deductions = [
deduction for deduction in deduction_results if deduction > 0
]
applicable_deductions_len = len(applicable_deductions)
for index, deduction_value in enumerate(applicable_deductions):
if index == (applicable_deductions_len - 1):
total_deductions_math_explanation += f"${deduction_value} = "
else:
total_deductions_math_explanation += f"${deduction_value} + "
total_deductions_math_explanation += f"${total_deductions_value}"
explanation.append(total_deductions_math_explanation)
total_deductions_summary = (
f"The total of all deductions is <strong>${total_deductions_value}</strong>. "
)
explanation.append(total_deductions_summary)
net_income = self.gross_income - total_deductions_value
# Adjusted net income can't be negative
if 0 > net_income:
net_income = 0
calculation_explanation = (
f"Gross income (<strong>${self.gross_income}</strong>) minus " +
f"total deductions (<strong>${total_deductions_value}</strong>) " +
f"equals net income: <strong>${net_income}.</strong>"
)
explanation.append(calculation_explanation)
return IncomeResult(
name='Net Income',
result=net_income,
explanation=explanation,
sort_order=1
)
```
#### File: snap_financial_factors/program_data_api/fetch_program_data.py
```python
import yaml
import pkgutil
class FetchProgramData:
'''
Small class to wrap fetching and parsing program data from static YAML files.
'''
def __init__(self, filename: str) -> None:
self.filename = filename
def parse_data(self):
raw_data = pkgutil.get_data(
"snap_financial_factors.program_data", self.filename
)
return yaml.safe_load(raw_data)
``` |
{
"source": "18F/State-TalentMAP-API",
"score": 2
} |
#### File: talentmap_api/bidding/models.py
```python
import logging
from django.utils import timezone
from django.db.models import Q, Value, Case, When, BooleanField
from django.db import models
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
from django.dispatch import receiver
from django.contrib.postgres.fields import ArrayField
from simple_history.models import HistoricalRecords
from djchoices import DjangoChoices, ChoiceItem
import talentmap_api.position.models
from talentmap_api.common.models import StaticRepresentationModel
from talentmap_api.messaging.models import Notification
from talentmap_api.user_profile.models import UserProfile
class BidCycle(StaticRepresentationModel):
'''
The bid cycle model represents an individual bidding cycle
'''
name = models.TextField(null=False, help_text="The name of the bid cycle")
cycle_start_date = models.DateTimeField(null=True, help_text="The start date for the bid cycle")
cycle_deadline_date = models.DateTimeField(null=True, help_text="The deadline date for the bid cycle")
cycle_end_date = models.DateTimeField(null=True, help_text="The end date for the bid cycle")
active = models.BooleanField(default=False, help_text="Whether this bidcycle is active or not")
positions = models.ManyToManyField('position.Position', related_name="bid_cycles")
history = HistoricalRecords()
_id = models.TextField(null=True)
_positions_seq_nums = ArrayField(models.TextField(), default=list)
_category_code = models.TextField(null=True)
def __str__(self):
return f"{self.name}"
@property
def annotated_positions(self):
'''
Returns a queryset of all positions, annotated with whether it is accepting bids or not
'''
bids = self.bids.filter(Bid.get_unavailable_status_filter()).values_list('position_id', flat=True)
case = Case(When(id__in=bids,
then=Value(False)),
default=Value(True),
output_field=BooleanField())
positions = self.positions.annotate(accepting_bids=case)
return positions
def update_relationships(self):
# For each position in our _positions_seq_nums, find it and add it to our positions
pos = talentmap_api.position.models.Position.objects.filter(_seq_num__in=self._positions_seq_nums)
self.positions.add(*list(pos))
class Meta:
managed = True
ordering = ["cycle_start_date"]
class BiddingStatus(StaticRepresentationModel):
'''
The status of the bid in a given bid cycle
'''
bidcycle = models.ForeignKey('bidding.BidCycle', on_delete=models.CASCADE, related_name="statuses")
position = models.ForeignKey('position.Position', on_delete=models.CASCADE, related_name="bid_cycle_statuses")
status_code = models.CharField(max_length=120, default="OP", null=True, help_text="Cycle status code")
status = models.CharField(max_length=120, default="Open", null=True, help_text="Cycle status text")
class Meta:
managed = True
ordering = ["bidcycle__cycle_start_date"]
class StatusSurvey(StaticRepresentationModel):
'''
The status survey model represents eligiblity status self-identification information
on a per-bidcycle basis
'''
user = models.ForeignKey("user_profile.UserProfile", on_delete=models.CASCADE, related_name="status_surveys")
bidcycle = models.ForeignKey(BidCycle, on_delete=models.DO_NOTHING, related_name="status_surveys")
is_differential_bidder = models.BooleanField(default=False)
is_fairshare = models.BooleanField(default=False)
is_six_eight = models.BooleanField(default=False)
class Meta:
managed = True
ordering = ["bidcycle"]
unique_together = (("user", "bidcycle"),)
class UserBidStatistics(StaticRepresentationModel):
'''
Stores bid statistics for any particular bidcycle for each user
'''
bidcycle = models.ForeignKey('bidding.BidCycle', on_delete=models.CASCADE, related_name='user_bid_statistics')
user = models.ForeignKey('user_profile.UserProfile', on_delete=models.CASCADE, related_name='bid_statistics')
draft = models.IntegerField(default=0)
submitted = models.IntegerField(default=0)
handshake_offered = models.IntegerField(default=0)
handshake_accepted = models.IntegerField(default=0)
handshake_declined = models.IntegerField(default=0)
in_panel = models.IntegerField(default=0)
approved = models.IntegerField(default=0)
declined = models.IntegerField(default=0)
closed = models.IntegerField(default=0)
def update_statistics(self):
for status_code, _ in Bid.Status.choices:
setattr(self, status_code, Bid.objects.filter(user=self.user, bidcycle=self.bidcycle, status=status_code).count())
self.save()
class Meta:
managed = True
ordering = ["bidcycle__cycle_start_date"]
unique_together = (("bidcycle", "user",),)
class Bid(StaticRepresentationModel):
'''
The bid object represents an individual bid, the position, user, and process status
'''
MAXIMUM_SUBMITTED_BIDS = 10
class Status(DjangoChoices):
draft = ChoiceItem("draft")
submitted = ChoiceItem("submitted")
handshake_offered = ChoiceItem("handshake_offered", "handshake_offered")
handshake_accepted = ChoiceItem("handshake_accepted", "handshake_accepted")
handshake_declined = ChoiceItem("handshake_declined", "handshake_declined")
in_panel = ChoiceItem("in_panel", "in_panel")
approved = ChoiceItem("approved")
declined = ChoiceItem("declined")
closed = ChoiceItem("closed")
status = models.TextField(default=Status.draft, choices=Status.choices)
draft_date = models.DateTimeField(auto_now_add=True)
submitted_date = models.DateTimeField(null=True, help_text="The date the bid was submitted")
handshake_offered_date = models.DateTimeField(null=True, help_text="The date the handshake was offered")
handshake_accepted_date = models.DateTimeField(null=True, help_text="The date the handshake was accepted")
handshake_declined_date = models.DateTimeField(null=True, help_text="The date the handshake was declined")
in_panel_date = models.DateTimeField(null=True, help_text="The date the bid was scheduled for panel")
scheduled_panel_date = models.DateTimeField(null=True, help_text="The date of the paneling meeting")
approved_date = models.DateTimeField(null=True, help_text="The date the bid was approved")
declined_date = models.DateTimeField(null=True, help_text="The date the bid was declined")
closed_date = models.DateTimeField(null=True, help_text="The date the bid was closed")
bidcycle = models.ForeignKey('bidding.BidCycle', on_delete=models.CASCADE, related_name="bids", help_text="The bidcycle for this bid")
user = models.ForeignKey('user_profile.UserProfile', on_delete=models.CASCADE, related_name="bidlist", help_text="The user owning this bid")
position = models.ForeignKey('position.Position', on_delete=models.CASCADE, related_name="bids", help_text="The position this bid is for")
is_priority = models.BooleanField(default=False, help_text="Flag indicating if this bid is the bidder's priority bid")
panel_reschedule_count = models.IntegerField(default=0, help_text="The number of times this bid's panel date has been rescheduled")
reviewer = models.ForeignKey('user_profile.UserProfile', on_delete=models.DO_NOTHING, null=True, related_name="reviewing_bids", help_text="The bureau reviewer for this bid")
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.id} {self.user}#{self.position.position_number} ({self.status})"
@property
def is_paneling_today(self):
return timezone.now().date() == self.scheduled_panel_date.date()
@staticmethod
def get_approval_statuses():
'''
Returns an array of statuses that denote some approval of the bid (handshake->approved)
'''
return [Bid.Status.handshake_offered, Bid.Status.handshake_accepted, Bid.Status.in_panel, Bid.Status.approved]
@staticmethod
def get_priority_statuses():
'''
Returns an array of statuses that correspond to a priority bid (handshake_accepted->approved)
'''
return [Bid.Status.handshake_accepted, Bid.Status.in_panel, Bid.Status.approved]
@staticmethod
def get_unavailable_status_filter():
'''
Returns a Q object which will return bids which are unavailable for bids (i.e. at or further than handshake status)
'''
# We must not have a status of a handshake; or any status further in the process
qualified_statuses = Bid.get_approval_statuses()
q_obj = Q()
# Here we construct a Q object looking for statuses matching any of the qualified statuses
for status in qualified_statuses:
q_obj = q_obj | Q(status=status)
return q_obj
def generate_status_messages(self):
return {
"handshake_offered_owner": f"Your bid for {self.position} has been offered a handshake.",
"handshake_offered_other": f"A competing bid for {self.position} has been offered a handshake.",
"in_panel_owner": f"Your bid for {self.position} has been scheduled for panel review.",
"approved_owner": f"Your bid for {self.position} has been approved by panel.",
"declined_owner": f"Your bid for {self.position} has been declined."
}
class Meta:
managed = True
ordering = ["bidcycle__cycle_start_date", "update_date"]
class Waiver(StaticRepresentationModel):
'''
The waiver model represents an individual waiver for a particular facet of a bid's requirements
'''
class Category(DjangoChoices):
retirement = ChoiceItem('retirement')
language = ChoiceItem('language')
six_eight = ChoiceItem('six_eight', 'six_eight')
fairshare = ChoiceItem('fairshare')
skill = ChoiceItem('skill')
class Type(DjangoChoices):
partial = ChoiceItem("partial")
full = ChoiceItem("full")
class Status(DjangoChoices):
approved = ChoiceItem("approved")
requested = ChoiceItem("requested")
denied = ChoiceItem("denied")
category = models.TextField(choices=Category.choices)
type = models.TextField(default=Type.full, choices=Type.choices)
status = models.TextField(default=Status.requested, choices=Status.choices)
bid = models.ForeignKey(Bid, on_delete=models.DO_NOTHING, related_name='waivers')
position = models.ForeignKey('position.Position', on_delete=models.DO_NOTHING, related_name='waivers')
user = models.ForeignKey('user_profile.UserProfile', on_delete=models.DO_NOTHING, related_name='waivers')
reviewer = models.ForeignKey('user_profile.UserProfile', on_delete=models.DO_NOTHING, null=True, related_name='reviewed_waivers')
description = models.TextField(null=True, help_text="Description of the waiver request")
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
def generate_status_messages(self):
return {
"approved_owner": f"The requested waiver for {self.user} ({self.type} {self.category}) for position {self.position} has been approved.",
"requested_cdo": f"{self.user} has requested a {self.type} {self.category} waiver for position {self.position}.",
"denied_owner": f"The requested waiver for {self.user} ({self.type} {self.category}) for position {self.position} has been denied."
}
def __str__(self):
return f"{self.type} {self.category} for {self.user} at {self.position}, {self.status}"
class Meta:
managed = True
ordering = ["update_date"]
@receiver(m2m_changed, sender=BidCycle.positions.through, dispatch_uid="bidcycle_m2m_changed")
def bidcycle_positions_update(sender, instance, action, reverse, model, pk_set, **kwargs):
if action == "pre_add":
# Create a new statistics item when a position is placed in the bid cycle
for position_id in pk_set:
talentmap_api.position.models.PositionBidStatistics.objects.create(bidcycle=instance, position_id=position_id)
BiddingStatus.objects.get_or_create(bidcycle=instance, position_id=position_id)
elif action == "pre_remove":
# Delete statistics items when removed from the bidcycle
talentmap_api.position.models.PositionBidStatistics.objects.filter(bidcycle=instance, position_id__in=pk_set).delete()
BiddingStatus.objects.filter(bidcycle=instance, position_id__in=pk_set).delete()
@receiver(pre_save, sender=Bid, dispatch_uid="bid_status_changed")
def bid_status_changed(sender, instance, **kwargs):
notification_bodies = instance.generate_status_messages()
# If our instance has an id, we're performing an update (and not a create)
if instance.id:
# Get our bid as it exists in the database
old_bid = Bid.objects.get(id=instance.id)
# Set the bid's priority flag
instance.is_priority = instance.status in instance.get_priority_statuses()
# Check if our old bid's status equals the new instance's status
if old_bid.status != instance.status:
# Create notifications for the owner of the bid, and other bidders on the same position
owner = [instance.user]
others = [x for x in instance.position.bids.values_list('user__id', flat=True) if x is not instance.user.id]
others = list(UserProfile.objects.filter(id__in=others))
for notification, users in [(f"{instance.status}_owner", owner), (f"{instance.status}_other", others)]:
# If we have that notification status in the notification bodies, create the notification
if notification in notification_bodies:
for user in users:
Notification.objects.create(owner=user,
tags=['bidding', f'{instance.status}'],
message=notification_bodies[notification])
@receiver(pre_save, sender=Bid, dispatch_uid="bid_panel_date_changed")
def bid_panel_date_changed(sender, instance, **kwargs):
# If our instance has an id, we're performing an update (and not a create)
if instance.id and instance.scheduled_panel_date and instance.status in [Bid.Status.handshake_accepted, Bid.Status.in_panel]:
# Get our bid as it exists in the database
old_bid = Bid.objects.get(id=instance.id)
verb = 'scheduled'
# If we have an old date, this a re-schedule
if old_bid.scheduled_panel_date:
verb = 'rescheduled'
instance.panel_reschedule_count = old_bid.panel_reschedule_count + 1
# Check if our old bid's paneling date is the same as the new one
if old_bid.scheduled_panel_date != instance.scheduled_panel_date:
Notification.objects.create(owner=instance.user,
tags=['bidding', f'{instance.status}', f'{verb}'],
message=f"Your bid for {instance.position} has been {verb} for paneling on {instance.scheduled_panel_date}.")
@receiver(post_save, sender=Bid, dispatch_uid="save_update_bid_statistics")
@receiver(post_delete, sender=Bid, dispatch_uid="delete_update_bid_statistics")
def delete_update_bid_statistics(sender, instance, **kwargs):
# Get the position associated with this bid and update the statistics
statistics, _ = talentmap_api.position.models.PositionBidStatistics.objects.get_or_create(bidcycle=instance.bidcycle, position=instance.position)
statistics.update_statistics()
# Update the user's bid statistics
statistics, _ = UserBidStatistics.objects.get_or_create(user=instance.user, bidcycle=instance.bidcycle)
statistics.update_statistics()
@receiver(pre_save, sender=Waiver, dispatch_uid="waiver_status_changed")
def waiver_status_changed(sender, instance, **kwargs):
notification_bodies = instance.generate_status_messages()
# If our instance has an id, we're performing an update (and not a create)
if instance.id:
# Get our waiver as it exists in the database
old_waiver = Waiver.objects.get(id=instance.id)
# Check if our old waiver's status equals the new instance's status
if old_waiver.status != instance.status:
# Perform an action based upon the new status
if instance.status is Waiver.Status.approved:
Notification.objects.create(owner=instance.user,
tags=['waiver', f'{instance.status}'],
message=notification_bodies['approved_owner'])
elif instance.status is Waiver.Status.denied:
Notification.objects.create(owner=instance.user,
tags=['waiver', f'{instance.status}'],
message=notification_bodies['denied_owner'])
else:
# It's a new waiver request, notify the CDO
if instance.user.cdo:
Notification.objects.create(owner=instance.user.cdo,
tags=['waiver', f'{instance.status}'],
message=notification_bodies['requested_cdo'])
```
#### File: bidding/serializers/serializers.py
```python
from datetime import datetime
from rest_framework import serializers
from talentmap_api.common.common_helpers import ensure_date
from talentmap_api.common.serializers import PrefetchedSerializer, StaticRepresentationField
from talentmap_api.position.serializers import PositionSerializer
from talentmap_api.bidding.models import BidCycle, Bid, StatusSurvey, UserBidStatistics, Waiver
class BidCycleSerializer(PrefetchedSerializer):
def validate(self, data):
datasource = self.initial_data
# Convert incoming string dates into date objects for validation
for date_key in ["cycle_end_date", "cycle_deadline_date", "cycle_start_date"]:
date = datasource.get(date_key, None)
if date:
datasource[date_key] = ensure_date(date)
# Update our current data if we have any with new data
if self.instance:
instance_data = self.instance.__dict__
instance_data.update(datasource)
datasource = instance_data
# Validate our dates are in a chronologically sound order
start_date = datasource.get("cycle_start_date")
end_date = datasource.get("cycle_end_date")
deadline_date = datasource.get("cycle_deadline_date")
if end_date < start_date:
raise serializers.ValidationError("Cycle start date must be before cycle end date")
if end_date < deadline_date:
raise serializers.ValidationError("Cycle deadline date must be on or before the cycle end date")
if deadline_date < start_date:
raise serializers.ValidationError("Cycle deadline date must be after cycle start date")
return data
class Meta:
model = BidCycle
fields = ("id", "name", "cycle_start_date", "cycle_deadline_date", "cycle_end_date", "active")
writable_fields = ("name", "cycle_start_date", "cycle_deadline_date", "cycle_end_date", "active")
class BidCycleStatisticsSerializer(PrefetchedSerializer):
total_positions = serializers.SerializerMethodField()
available_positions = serializers.SerializerMethodField()
available_domestic_positions = serializers.SerializerMethodField()
available_international_positions = serializers.SerializerMethodField()
total_bids = serializers.SerializerMethodField()
total_bidders = serializers.SerializerMethodField()
approved_bidders = serializers.SerializerMethodField()
in_panel_bidders = serializers.SerializerMethodField()
bidding_days_remaining = serializers.SerializerMethodField()
def get_total_positions(self, obj):
return obj.positions.count()
def get_available_positions(self, obj):
return obj.annotated_positions.filter(accepting_bids=True).count()
def get_available_domestic_positions(self, obj):
return obj.annotated_positions.filter(accepting_bids=True, post__location__country__code="USA").count()
def get_available_international_positions(self, obj):
return obj.annotated_positions.filter(accepting_bids=True).exclude(post__location__country__code="USA").count()
def get_total_bids(self, obj):
return obj.bids.count()
def get_total_bidders(self, obj):
return obj.bids.values('user').distinct().count()
def get_in_panel_bidders(self, obj):
return obj.bids.filter(status=Bid.Status.in_panel).values('user').distinct().count()
def get_approved_bidders(self, obj):
return obj.bids.filter(status=Bid.Status.approved).values('user').distinct().count()
def get_bidding_days_remaining(self, obj):
return (obj.cycle_deadline_date.date() - datetime.now().date()).days
class Meta:
model = BidCycle
fields = ("id", "name", "cycle_start_date", "cycle_deadline_date", "cycle_end_date",
"total_positions", "available_positions", "available_domestic_positions", "available_international_positions",
"total_bids", "total_bidders", "in_panel_bidders", "approved_bidders", "bidding_days_remaining",)
class SurveySerializer(PrefetchedSerializer):
calculated_values = serializers.SerializerMethodField()
def get_calculated_values(self, obj):
calculated_values = {}
calculated_values['is_fairshare'] = obj.user.is_fairshare
calculated_values['is_six_eight'] = obj.user.is_six_eight
return calculated_values
class Meta:
model = StatusSurvey
fields = "__all__"
writable_fields = ("bidcycle", "is_differential_bidder", "is_fairshare", "is_six_eight")
class BidSerializer(PrefetchedSerializer):
bidcycle = StaticRepresentationField(read_only=True)
user = StaticRepresentationField(read_only=True)
position = StaticRepresentationField(read_only=True)
waivers = StaticRepresentationField(read_only=True, many=True)
is_paneling_today = serializers.BooleanField(read_only=True)
class Meta:
model = Bid
fields = "__all__"
nested = {
"position": {
"class": PositionSerializer,
"field": "position",
"kwargs": {
"override_fields": [
"id",
"position_number",
"bureau",
"title",
"skill",
"grade",
"post__id",
"post__location",
"update_date",
"create_date"
],
"read_only": True
}
},
"reviewer": {
"class": "talentmap_api.user_profile.serializers.UserProfileShortSerializer",
"field": "reviewer",
"kwargs": {
"read_only": True
}
}
}
class UserBidStatisticsSerializer(PrefetchedSerializer):
bidcycle = StaticRepresentationField(read_only=True)
user = StaticRepresentationField(read_only=True)
class Meta:
model = UserBidStatistics
fields = "__all__"
class BidWritableSerializer(PrefetchedSerializer):
'''
This is only used by AOs to schedule the panel date
'''
def validate(self, data):
datasource = self.initial_data
# Convert incoming string dates into date objects for validation
date = datasource.get('scheduled_panel_date', None)
if date:
datasource['scheduled_panel_date'] = ensure_date(date)
# Update our current data if we have any with new data
if self.instance:
instance_data = self.instance.__dict__
instance_data.update(datasource)
datasource = instance_data
return data
class Meta:
model = Bid
fields = ("id", "scheduled_panel_date")
writable_fields = ("scheduled_panel_date")
class WaiverSerializer(PrefetchedSerializer):
'''
For read-only usages
'''
bid = StaticRepresentationField(read_only=True)
user = StaticRepresentationField(read_only=True)
reviewer = StaticRepresentationField(read_only=True)
position = StaticRepresentationField(read_only=True)
class Meta:
model = Waiver
fields = "__all__"
class WaiverClientSerializer(PrefetchedSerializer):
'''
For client/CDO creation (no status editing)
'''
class Meta:
model = Waiver
fields = "__all__"
writable_fields = ("bid", "position", "type", "category", "description")
```
#### File: management/commands/soap_api_test.py
```python
from django.core.management.base import BaseCommand
import logging
import defusedxml.lxml as ET
from talentmap_api.common.common_helpers import xml_etree_to_dict
from talentmap_api.integrations.synchronization_helpers import get_soap_client
class Command(BaseCommand):
help = 'Tests the connection to the SOAP webservices'
logger = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument('command', nargs='?', type=str, help="The command to run")
parser.add_argument('arguments', nargs='*', type=str, help="The arguments for the command, as named pairs; i.e. USCity=Fairfax")
def handle(self, *args, **options):
client = get_soap_client()
if not options['command']:
self.logger.info('No command specified, dumping wsdl information')
client.wsdl.dump()
return
arguments = {x.split('=')[0]: x.split('=')[1] for x in options["arguments"]}
self.logger.info(f'Calling command {options["command"]} with parameters {arguments}')
response = getattr(client.service, options['command'])(**arguments)
dict_response = xml_etree_to_dict(response)
self.logger.info(type(response))
if not isinstance(response, str):
response = ET.tostring(response, pretty_print=True)
self.logger.info(f'SOAP call response:')
self.logger.info(response.decode('unicode_escape'))
self.logger.info(f'Dictionary parsed response: {dict_response}')
```
#### File: common/tests/test_common_helpers.py
```python
import pytest
import datetime
from dateutil import parser, tz
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from model_mommy import mommy
from talentmap_api.common.common_helpers import get_permission_by_name, get_group_by_name, in_group_or_403, has_permission_or_403, ensure_date, safe_navigation, order_dict, serialize_instance, in_superuser_group
from talentmap_api.position.models import Position
@pytest.mark.django_db()
def test_ensure_date():
# Try to get a permission without it existing
with pytest.raises(Exception, match="Parameter must be a date object or string"):
ensure_date(201225123)
date = parser.parse("1000-01-01T00:00:00-05:00")
# Now check it
assert ensure_date("1000-01-01", utc_offset=-5) == date
@pytest.mark.django_db()
def test_serialize_instance():
# Try to get a permission without it existing
p = mommy.make('position.Position')
assert serialize_instance(p, 'talentmap_api.position.serializers.PositionSerializer').get('id') == p.id
@pytest.mark.django_db()
def test_order_dict():
ordered_dict = {
"a": 1,
"b": 2,
"c": 3
}
unordered_dict = {
"b": 2,
"a": 1,
"c": 3
}
assert order_dict(unordered_dict) == ordered_dict
nested_ordered_dict = {
"a": 1,
"b": {
"a": 1,
"b": 2,
"c": 3
},
"c": 3
}
nested_unordered_dict = {
"b": {
"b": 2,
"a": 1,
"c": 3
},
"a": 1,
"c": 3
}
assert order_dict(nested_unordered_dict) == nested_ordered_dict
@pytest.mark.django_db()
def test_safe_navigation():
position = mommy.make('position.Position')
assert safe_navigation(position, "post") is None
position.post = mommy.make('organization.Post')
position.save()
assert safe_navigation(position, "post") is not None
assert safe_navigation(position, "post.location") is None
position.post.location = mommy.make('organization.Location')
position.save()
assert safe_navigation(position, "post.location") is not None
@pytest.mark.django_db()
def test_get_permission_by_name():
# Try to get a permission without it existing
with pytest.raises(Exception, match="Permission position.test_permission not found."):
get_permission_by_name("position.test_permission")
# Create a permission
mommy.make('auth.Permission', name="test_permission", codename="test_permission", content_type=ContentType.objects.get_for_model(Position))
# Now check it
assert get_permission_by_name("position.test_permission")
@pytest.mark.django_db()
def test_has_permission_or_403(authorized_user):
# Create a permission
permission = mommy.make('auth.Permission', name="test_permission", codename="test_permission", content_type=ContentType.objects.get_for_model(Position))
# Check for 403 since we don't have permission
with pytest.raises(PermissionDenied):
has_permission_or_403(authorized_user, "position.test_permission")
# Add the permission to the user
authorized_user.user_permissions.add(permission)
# Should not raise an exception (we re-get the user due to permission caching)
has_permission_or_403(User.objects.get(id=authorized_user.id), "position.test_permission")
@pytest.mark.django_db()
def test_get_group_by_name():
# Try to get a permission without it existing
with pytest.raises(Exception, message="Group test_group not found."):
get_group_by_name("test_group")
# Create a permission
mommy.make('auth.Group', name="test_group")
# Now check it
assert get_group_by_name("test_group")
@pytest.mark.django_db()
def test_in_group_or_403(authorized_user):
group = mommy.make('auth.Group', name="test_group")
# Check for 403 since we're not in the group
with pytest.raises(PermissionDenied):
in_group_or_403(authorized_user, "test_group")
# Add the user to the group
group.user_set.add(authorized_user)
# Should not raise an exception
in_group_or_403(authorized_user, "test_group")
@pytest.mark.django_db()
def test_in_superuser_group(authorized_user):
group = mommy.make('auth.Group', name="superuser")
assert not in_superuser_group(authorized_user)
# Add the user to the group
group.user_set.add(authorized_user)
# Should not raise an exception
assert in_superuser_group(authorized_user)
```
#### File: talentmap_api/position/serializers.py
```python
from rest_framework import serializers
from talentmap_api.common.serializers import PrefetchedSerializer, StaticRepresentationField
from talentmap_api.bidding.models import BiddingStatus
from talentmap_api.position.models import Position, Grade, Skill, SkillCone, CapsuleDescription, Classification, Assignment, PositionBidStatistics
from talentmap_api.language.serializers import LanguageQualificationSerializer
from talentmap_api.organization.serializers import PostSerializer
class CapsuleDescriptionSerializer(PrefetchedSerializer):
last_editing_user = StaticRepresentationField(read_only=True)
# This is a dynamic flag used by the front end to simplify checking if the current user has permissions
is_editable_by_user = serializers.SerializerMethodField()
date_created = serializers.DateTimeField(read_only=True)
date_updated = serializers.DateTimeField(read_only=True)
def get_is_editable_by_user(self, obj):
try:
return self.context.get("request").user.has_perm(f"position.{obj.position.post.permission_edit_post_capsule_description_codename}")
except AttributeError:
# The position doesn't have a post, or otherwise
return False
class Meta:
model = CapsuleDescription
fields = "__all__"
writable_fields = ("content", "point_of_contact", "website",)
class CurrentAssignmentSerializer(PrefetchedSerializer):
user = StaticRepresentationField(read_only=True)
tour_of_duty = StaticRepresentationField(read_only=True)
class Meta:
model = Assignment
exclude = ("position",)
class AssignmentSerializer(CurrentAssignmentSerializer):
class Meta:
model = Assignment
fields = "__all__"
nested = {
"position": {
"class": "talentmap_api.position.serializers.PositionSerializer",
"field": "position",
"kwargs": {
"override_fields": [
"position_number",
"bureau",
"skill",
"title",
"post__location",
],
"read_only": True
}
}
}
class ClassificationSerializer(PrefetchedSerializer):
class Meta:
model = Classification
fields = "__all__"
class BiddingStatusSerializer(PrefetchedSerializer):
bidcycle = StaticRepresentationField(read_only=True)
position = StaticRepresentationField(read_only=True)
class Meta:
model = BiddingStatus
fields = "__all__"
class PositionWritableSerializer(PrefetchedSerializer):
class Meta:
model = Position
fields = ("classifications",)
writable_fields = ("classifications",)
class PositionBidStatisticsSerializer(PrefetchedSerializer):
bidcycle = StaticRepresentationField(read_only=True)
class Meta:
model = PositionBidStatistics
exclude = ("position",)
class PositionSerializer(PrefetchedSerializer):
grade = StaticRepresentationField(read_only=True)
skill = StaticRepresentationField(read_only=True)
bureau = serializers.SerializerMethodField()
organization = serializers.SerializerMethodField()
tour_of_duty = StaticRepresentationField(read_only=True)
classifications = StaticRepresentationField(read_only=True, many=True)
representation = serializers.SerializerMethodField()
is_highlighted = serializers.ReadOnlyField()
# This method returns the string representation of the bureau, or the code
# if it doesn't currently exist in the database
def get_bureau(self, obj):
if obj.bureau:
return obj.bureau._string_representation
else:
return obj._bureau_code
# This method returns the string representation of the parent org, or the code
# if it doesn't currently exist in the database
def get_organization(self, obj):
if obj.organization:
return obj.organization._string_representation
else:
return obj._org_code
class Meta:
model = Position
fields = "__all__"
nested = {
"bid_cycle_statuses": {
"class": BiddingStatusSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"bid_statistics": {
"class": PositionBidStatisticsSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"languages": {
"class": LanguageQualificationSerializer,
"kwargs": {
"many": True,
"read_only": True
}
},
"post": {
"class": PostSerializer,
"field": "post",
"kwargs": {
"many": False,
"read_only": True
}
},
"description": {
"class": CapsuleDescriptionSerializer,
"field": "description",
"kwargs": {
"read_only": True
}
},
"latest_bidcycle": {
"class": "talentmap_api.bidding.serializers.serializers.BidCycleSerializer",
"field": "latest_bidcycle",
"kwargs": {
"read_only": True
}
},
"current_assignment": {
"class": CurrentAssignmentSerializer,
"field": "current_assignment",
"kwargs": {
"override_fields": [
"user",
"status",
"start_date",
"tour_of_duty",
"estimated_end_date"
],
"read_only": True
}
}
}
class GradeSerializer(PrefetchedSerializer):
class Meta:
model = Grade
fields = ("id", "code")
class SkillSerializer(PrefetchedSerializer):
cone = StaticRepresentationField(read_only=True)
class Meta:
model = Skill
fields = "__all__"
class SkillConeSerializer(PrefetchedSerializer):
skills = StaticRepresentationField(read_only=True, many=True)
class Meta:
model = SkillCone
fields = "__all__"
```
#### File: position/tests/test_position_endpoints.py
```python
import pytest
from model_mommy import mommy
from rest_framework import status
from itertools import cycle
from django.contrib.auth.models import User
from django.utils import timezone
from talentmap_api.common.common_helpers import get_permission_by_name
from talentmap_api.position.tests.mommy_recipes import bidcycle_positions
# Might move this fixture to a session fixture if we end up needing languages elsewhere
@pytest.fixture
def test_position_endpoints_fixture():
# Create a specific language, proficiency, and qualification
language = mommy.make('language.Language', code="DE", long_description="German", short_description="Ger")
mommy.make('language.Language', code="FR", long_description="French", short_description="Fch")
proficiency = mommy.make('language.Proficiency', code="3")
proficiency_2 = mommy.make('language.Proficiency', code="4")
qualification = mommy.make('language.Qualification', language=language, spoken_proficiency=proficiency, reading_proficiency=proficiency)
qualification_2 = mommy.make('language.Qualification', language=language, spoken_proficiency=proficiency_2, reading_proficiency=proficiency_2)
# Create some grades
grade = mommy.make('position.Grade', code="00")
grade_2 = mommy.make('position.Grade', code="01")
mommy.make_recipe('talentmap_api.position.tests.grade', _quantity=8)
# Create some skills
skill = mommy.make('position.Skill', code="0010")
skill_2 = mommy.make('position.Skill', code="0020")
mommy.make_recipe('talentmap_api.position.tests.skill', _quantity=8)
bc = mommy.make('bidding.BidCycle')
# Create a position with the specific qualification
bc.positions.add(mommy.make('position.Position', languages=[qualification], grade=grade, skill=skill))
bc.positions.add(mommy.make('position.Position', languages=[qualification_2], grade=grade_2, skill=skill_2))
is_overseas = [True, False]
# Create some junk positions to add numbers
for _ in range(0, 8):
bc.positions.add(mommy.make('position.Position',
organization=mommy.make_recipe('talentmap_api.organization.tests.orphaned_organization'),
bureau=mommy.make_recipe('talentmap_api.organization.tests.orphaned_organization'),
is_overseas=cycle(is_overseas)))
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_position_list(client):
response = client.get('/api/v1/position/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 10
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_position_filtering(client):
response = client.get('/api/v1/position/?languages__language__name=German')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
response = client.get('/api/v1/position/?languages__spoken_proficiency__at_least=3')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
response = client.get('/api/v1/position/?languages__spoken_proficiency__at_most=3')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = client.get('/api/v1/position/?languages__spoken_proficiency__at_least=4')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = client.get('/api/v1/position/?languages__spoken_proficiency__at_most=4')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_position_grade_skill_filters(client):
response = client.get('/api/v1/position/?grade__code=00')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = client.get('/api/v1/position/?skill__code=0010')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_grade_list(client):
response = client.get('/api/v1/grade/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 10
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_grade_filtering(client):
response = client.get('/api/v1/grade/?code=00')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = client.get('/api/v1/grade/?code__in=00,01')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_skill_list(client):
response = client.get('/api/v1/skill/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 10
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_skill_filtering(client):
response = client.get('/api/v1/skill/?code=0010')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = client.get('/api/v1/skill/?code__in=0010,0020')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
@pytest.mark.parametrize("endpoint, available, expected_count", [
("/api/v1/language/", True, 1),
("/api/v1/language/", False, 1),
("/api/v1/language_proficiency/", True, 2),
("/api/v1/language_proficiency/", False, 0),
("/api/v1/grade/", True, 2),
("/api/v1/grade/", False, 8),
("/api/v1/skill/", True, 2),
("/api/v1/skill/", False, 8),
])
def test_available_filtering(client, endpoint, available, expected_count):
response = client.get(f'{endpoint}?is_available={available}')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == expected_count
@pytest.mark.django_db()
@pytest.mark.usefixtures("test_position_endpoints_fixture")
def test_domestic_filtering(client):
response_1 = client.get('/api/v1/position/?is_domestic=true')
response_2 = client.get('/api/v1/position/?is_overseas=false')
assert response_1.data == response_2.data
response_1 = client.get('/api/v1/position/?is_domestic=false')
response_2 = client.get('/api/v1/position/?is_overseas=true')
assert response_1.data == response_2.data
@pytest.mark.django_db()
def test_position_assignment_list(authorized_client, authorized_user):
# Give the user AO permissions
group = mommy.make("auth.Group", name="bureau_ao")
group.user_set.add(authorized_user)
position = mommy.make("position.Position")
mommy.make("position.Assignment", position=position, user=authorized_user.profile, tour_of_duty=mommy.make("organization.TourOfDuty"), _quantity=5)
response = authorized_client.get(f'/api/v1/position/{position.id}/assignments/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data['results']) == 5
@pytest.mark.django_db()
def test_position_bid_list(authorized_client, authorized_user):
# Create a bureau for the position
bureau = mommy.make('organization.Organization', code='12345')
position = mommy.make('position.Position', bureau=bureau)
bidcycle = mommy.make('bidding.BidCycle')
bidcycle.positions.add(position)
mommy.make('bidding.Bid', user=authorized_user.profile, position=position, bidcycle=bidcycle, _quantity=5)
# Create valid permissions to view this position's bids
group = mommy.make('auth.Group', name='bureau_ao')
group.user_set.add(authorized_user)
group = mommy.make('auth.Group', name=f'bureau_ao_{bureau.code}')
group.user_set.add(authorized_user)
response = authorized_client.get(f'/api/v1/position/{position.id}/bids/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data['results']) == 5
@pytest.mark.django_db()
def test_favorite_action_endpoints(authorized_client, authorized_user):
position = mommy.make_recipe('talentmap_api.position.tests.position')
response = authorized_client.get(f'/api/v1/position/{position.id}/favorite/')
assert response.status_code == status.HTTP_404_NOT_FOUND
response = authorized_client.put(f'/api/v1/position/{position.id}/favorite/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.get(f'/api/v1/position/{position.id}/favorite/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.delete(f'/api/v1/position/{position.id}/favorite/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.get(f'/api/v1/position/{position.id}/favorite/')
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db(transaction=True)
def test_highlight_action_endpoints(authorized_client, authorized_user):
bureau = mommy.make('organization.Organization', code="123456", short_description="Test Bureau")
bureau.create_permissions()
permission = get_permission_by_name(f"organization.can_highlight_positions_{bureau.code}")
position = bidcycle_positions(bureau=bureau)
response = authorized_client.get(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_404_NOT_FOUND
# First, try to highlight without appropriate permissions
response = authorized_client.put(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_403_FORBIDDEN
# Now, try to unhiglight without appropriate permissions
response = authorized_client.delete(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_403_FORBIDDEN
# Add the permission to our user
authorized_user.user_permissions.add(permission)
group = mommy.make('auth.Group', name='superuser')
group.user_set.add(authorized_user)
response = authorized_client.put(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.get(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.delete(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_204_NO_CONTENT
response = authorized_client.get(f'/api/v1/position/{position.id}/highlight/')
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.django_db(transaction=True)
def test_position_waiver_actions(authorized_client, authorized_user):
# Create a bureau for the position
bureau = mommy.make('organization.Organization', code='12345')
position = bidcycle_positions(bureau=bureau)
bidcycle = mommy.make('bidding.BidCycle')
bid = mommy.make('bidding.Bid', user=authorized_user.profile, position=position, bidcycle=bidcycle)
waiver = mommy.make('bidding.Waiver', user=authorized_user.profile, position=position, bid=bid)
# Create valid permissions to view this position's waivers
group = mommy.make('auth.Group', name='bureau_ao')
group.user_set.add(authorized_user)
group = mommy.make('auth.Group', name=f'bureau_ao_{bureau.code}')
group.user_set.add(authorized_user)
assert waiver.status == waiver.Status.requested
# Pull a list of all the waivers
response = authorized_client.get(f'/api/v1/position/{position.id}/waivers/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
# Approve the waiver
response = authorized_client.get(f'/api/v1/position/{position.id}/waivers/{waiver.id}/approve/')
assert response.status_code == status.HTTP_204_NO_CONTENT
waiver.refresh_from_db()
assert waiver.status == waiver.Status.approved
assert waiver.reviewer == authorized_user.profile
# Deny it the waiver
response = authorized_client.get(f'/api/v1/position/{position.id}/waivers/{waiver.id}/deny/')
assert response.status_code == status.HTTP_204_NO_CONTENT
waiver.refresh_from_db()
assert waiver.status == waiver.Status.denied
assert waiver.reviewer == authorized_user.profile
@pytest.mark.django_db(transaction=True)
def test_position_vacancy_filter_aliases(authorized_client, authorized_user):
one_year_tod = mommy.make('organization.TourOfDuty', months=12)
two_year_tod = mommy.make('organization.TourOfDuty', months=24)
three_year_tod = mommy.make('organization.TourOfDuty', months=36)
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
mommy.make('position.Assignment', position=bidcycle_positions(), start_date=today, tour_of_duty=one_year_tod, user=authorized_user.profile)
mommy.make('position.Assignment', position=bidcycle_positions(), start_date=today, tour_of_duty=two_year_tod, user=authorized_user.profile)
mommy.make('position.Assignment', position=bidcycle_positions(), start_date=today, tour_of_duty=three_year_tod, user=authorized_user.profile)
response = authorized_client.get('/api/v1/position/?vacancy_in_years=1')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 1
response = authorized_client.get('/api/v1/position/?vacancy_in_years=2')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 2
response = authorized_client.get('/api/v1/position/?vacancy_in_years=3')
assert response.status_code == status.HTTP_200_OK
assert len(response.data["results"]) == 3
@pytest.mark.django_db(transaction=True)
def test_position_similar_list(client):
post = mommy.make_recipe('talentmap_api.organization.tests.post')
skill = mommy.make('position.Skill')
grade = mommy.make('position.Grade')
position = bidcycle_positions(post=post, skill=skill, grade=grade)
bidcycle_positions(post=post, skill=skill, grade=grade, _quantity=3)
bidcycle_positions(post=mommy.make_recipe('talentmap_api.organization.tests.post'), skill=skill, grade=grade, _quantity=3)
bidcycle_positions(post=mommy.make_recipe('talentmap_api.organization.tests.post'), skill=mommy.make('position.Skill'), grade=grade, _quantity=3)
response = client.get(f'/api/v1/position/{position.id}/similar/')
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 3
position.post = None
position.save()
position.refresh_from_db()
response = client.get(f'/api/v1/position/{position.id}/similar/')
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 6
position.skill = None
position.save()
position.refresh_from_db()
response = client.get(f'/api/v1/position/{position.id}/similar/')
assert response.status_code == status.HTTP_200_OK
assert response.data["count"] == 9
``` |
{
"source": "18F/tock-blcoks",
"score": 3
} |
#### File: tock-blcoks/spec/utilization_summary_spec.py
```python
import datetime
import unittest
import sys
from collections import namedtuple
import utilization_summary
sys.path.append('..')
TODAY = datetime.date.today()
class UtilizationSummaryTestCase(unittest.TestCase):
"""Tests for `utilization_summary.py`."""
def test_calc_billable_hours(self):
"""Can it correctly calculate billable hours"""
test_entry_list = [
{'project_name': 'TTS Acq / Internal Acq', 'hours_spent': 1, 'billable': False},
{'project_name': '18F / Learn', 'hours_spent': 1, 'billable': False},
{'project_name': 'TTS Acq / Internal Acq', 'hours_spent': 1, 'billable': True},
{'project_name': 'TTS Acq / Learn', 'hours_spent': 4, 'billable': True}
]
self.assertEqual(utilization_summary.calc_billable_hours(test_entry_list), 5)
def test_calc_internal_hours(self):
"""Can it correctly calculate internal project hours"""
test_entry_list = [
{'project_name': 'TTS Acq / Internal Acq', 'hours_spent': 1, 'billable': False},
{'project_name': 'TTS Acq / Internal Acq', 'hours_spent': 1, 'billable': True},
{'project_name': 'TTS Acq / Learn', 'hours_spent': 4, 'billable': True}
]
self.assertEqual(utilization_summary.calc_internal_hours(test_entry_list), 1)
def test_calc_total_hours(self):
"""Does the sample list of entries return a total number of hours equal to the sum?"""
test_entry_list = [
{'hours_spent': 1},
{'hours_spent': 1},
{'hours_spent': 1},
{'hours_spent': 4}
]
self.assertEqual(utilization_summary.calc_total_hours(test_entry_list), 7)
def test_month_average_and_goal_row(self):
"""Can it correctly calculate quarterly averages and utilization hours goals"""
test_user_list_row = [
'name',
'type',
'team',
[0.5, 4.0, 4.5],
[1.5, 2.0, 4.5],
[0.5, 3.0, 4.5],
[0.5, 3.0, 4.5]
]
result = [4.0, 2.0, 3.0, 3.0, 2.7, '22.9', '30.9']
self.assertEqual(utilization_summary.month_average_and_goal_row(
test_user_list_row, 1), result)
def test_mean(self):
"""Does a list of 4 2s have a mean of 2?"""
self.assertEqual(utilization_summary.mean([2, 2, 2, 2]), 2)
def test_find_months_raise_error(self):
"""Does August and Oktoberfest render an error?"""
Args = namedtuple('MyStruct', 'beginmonth lastmonth')
args = Args(beginmonth='August', lastmonth='Oktoberfest')
self.assertRaises(ValueError, lambda: utilization_summary.find_months(TODAY, args))
def test_find_months_december(self):
"""Does January and December render a list of [1,13]?"""
Args = namedtuple('MyStruct', 'beginmonth lastmonth')
args = Args(beginmonth='January', lastmonth='December')
self.assertEqual(utilization_summary.find_months(TODAY, args), [1, 12])
def test_find_months(self):
"""Does August and October render a list of [8,10]?"""
Args = namedtuple('MyStruct', 'beginmonth lastmonth')
args = Args(beginmonth='August', lastmonth='October')
self.assertEqual(utilization_summary.find_months(TODAY, args), [8, 10])
if __name__ == '__main__':
unittest.main()
```
#### File: 18F/tock-blcoks/utilization_summary.py
```python
import csv
import os
import datetime
import urllib.request
from urllib.error import HTTPError as HTTPError
import json
import tock_blocks
TOCK_API_KEY = os.environ['TOCK_API_KEY']
MONTH_NAME_LIST = ["January", "February", "March",
"April", "May", "June", "July", "August",
"September", "October", "November", "December"
]
PRINT_PREFIX = tock_blocks.Color.PURPLE+"TOCK BLOCKS:"+tock_blocks.Color.END
def all_users_from_file(userfile, args):
"""
Generate the entire utilization report from a csv of users
"""
data_source = 'api'
if args.file is not None:
data_source = args.file
print("{} Generating the utilization report from the data in {}.".format(
PRINT_PREFIX, data_source))
users = tock_blocks.read_csv_to_list(userfile)
today = datetime.date.today()
if data_source != 'api':
time_entries = tock_blocks.read_csv_to_list(args.file)
months = find_months(today, args)
user_list = [0] * len(users)
for user_index in range(len(users)):
print('{} Downloading data from tock & processing for {}.'.format(
PRINT_PREFIX,
users[user_index][0]
)
)
if data_source == 'api':
time_entries = get_data_from_tock(today, users[user_index][0])
user_list[user_index] = users[user_index] + \
utilization_calculator(
users[user_index][0], months, time_entries, today)
write_output(args, user_list, months, today)
def get_data_from_tock(today, tock_user_name):
"""
Pulls api data from tock
Args:
today (datetime): datetime for current time
tock_user_name (str): username of current tock query
Returns:
An array of tock_entries as a dict for that user from the past year
and empty array if there is a failure
"""
last_year = today.year - 1
query_month = today.month + 1
url = 'https://tock.18f.gov/api/timecards.json?after={}-{}-01&user={}'.format(
str(last_year),
query_month,
tock_user_name
)
headers = {}
headers['Authorization'] = 'token %s' % TOCK_API_KEY
req = urllib.request.Request(url, headers=headers)
try:
html = urllib.request.urlopen(req).read()
parsed_reponse = json.loads(html.decode("utf-8"))
return parsed_reponse
except HTTPError:
print('Failed to download data for {}'.format(tock_user_name))
return []
def find_months(today, args):
"""
Convert supplied users into a range of months to iterate over
"""
months = [0, 0]
if args.beginmonth is None:
months[0] = today.month - 11
if args.lastmonth is None:
months[1] = today.month + 1
if months[1] == 0:
months[1] = MONTH_NAME_LIST.index(args.lastmonth)+1
if months[0] == 0:
months[0] = MONTH_NAME_LIST.index(args.beginmonth)+1
if months[0] >= months[1]:
months[0] = months[0] - 12
return months
def utilization_calculator(user, months, time_entries, today):
"""
Figure out the utilization and billable levels for a user
"""
# Grab user
user_entries = tock_blocks.get_user_entries(user, time_entries)
# Calculate each month billable/ utilization / total for that user
user_values = [0] * (months[1]-months[0])
array_ind = 0
for ind in range(months[0], months[1]):
start_month = calculate_month_year(ind, today)
month_time_entries = tock_blocks.get_entries_in_month(
start_month+"-01", user_entries
)
billable_hours = calc_billable_hours(month_time_entries)
internal_hours = calc_internal_hours(month_time_entries)
total_hours = calc_total_hours(month_time_entries)
user_values[array_ind] = find_percentages(
billable_hours, internal_hours, total_hours)
array_ind += 1
return user_values
def find_percentages(billable_hours, internal_hours, total_hours):
"""
Take total billable, internal, hours to find out the percentages of each.
"""
billable_percent = 0.0
internal_percent = 0.0
if total_hours > 0:
billable_percent = round(billable_hours/total_hours*100, 1)
internal_percent = round(internal_hours/total_hours*100, 1)
utilizable_percent = round((billable_percent + internal_percent), 1)
return [billable_percent, internal_percent, utilizable_percent]
return [0.0, 0.0, 0.0]
def calculate_month_year(month_value, today):
"""
Convert a month index to a string to be supplied in a filter
"""
year_to_use = today.year
ind = month_value
if month_value <= 0:
year_to_use = year_to_use - 1
ind = month_value + 12
start_month = ""
if ind < 10:
start_month = "0"+str(ind)
elif month_value == 12:
start_month = "12"
else:
start_month = str(ind)
start_month = str(year_to_use) + '-' + start_month
return start_month
def calc_hour_generator(calculator_method):
"""
TODO this will allow for lambdas to go through the different calculators
"""
def nested_entry_iterator(entries):
"""
Internal function for the lambda
"""
hour_count = 0
for entry in entries:
if calculator_method:
hour_count += float(entry['hours_spent'])
return nested_entry_iterator
def calc_billable_hours(entries):
"""
Calculates billable hours from an array of entry dictionaries
"""
billable_hours_count = 0.0
for entry in entries:
if entry['billable']:
billable_hours_count = billable_hours_count + float(entry['hours_spent'])
return billable_hours_count
def calc_internal_hours(entries):
"""
Calculates internal utilizable hours from an array of entry dictionaries
"""
internal_hours = 0.0
for entry in entries:
if entry['project_name'][:22] == "TTS Acq / Internal Acq" and not entry['billable']:
internal_hours = internal_hours + float(entry['hours_spent'])
return internal_hours
def calc_total_hours(entries):
"""
Calculates sum of hours from an array of entry dictionaries
"""
total_hours = 0.0
for entry in entries:
total_hours = total_hours + float(entry['hours_spent'])
return total_hours
def month_average_and_goal_row(user_list_row, sub_array_ind):
"""
Append the user's monthly data to averages and utilization targets
"""
filtered_list = [i[sub_array_ind] for i in user_list_row[3:]]
quarterly_average = round(mean(filtered_list[-3:]), 1)
filtered_list = filtered_list + [
quarterly_average,
weekly_difference_to_goal(quarterly_average, 60),
weekly_difference_to_goal(quarterly_average, 80)
]
return filtered_list
def weekly_difference_to_goal(average_value, level):
"""
Calculate how many more hours a week the person would need to be usable for to
achieve a utiliztaion target
"""
weekly_difference = round((level-average_value) * 0.4, 1)
return str(weekly_difference)
def mean(numbers):
"""
Calculates the mean of an arrary
"""
return float(sum(numbers)) / max(len(numbers), 1)
def write_output(args, user_list, months, today):
"""
Builds a csv of the utilization file
"""
file_to_write = develop_filename(args, today)
with open(file_to_write, 'w') as outcsv:
writer = csv.writer(outcsv, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\n'
)
if months[0] <= 0: # check if starting in previous year
first_month = months[0] + 11
months_to_print = MONTH_NAME_LIST[first_month:] + MONTH_NAME_LIST[:months[1]-1]
else:
months_to_print = MONTH_NAME_LIST[months[0]-1+months[1]-1]
final_columns = [
'Average for last quarter',
'60 % Util Hours / Week',
'80 % Util - Hours / Week'
]
header_row = ['Name', 'Position', 'Team', 'Project type'] + months_to_print+final_columns
writer.writerow(header_row)
for item in user_list:
toprow = [item[0], item[1], item[2], 'Billable'] + month_average_and_goal_row(item, 0)
middlelist = ['', '', '', 'Internal projects'] + month_average_and_goal_row(item, 1)
bottom = ['', '', '', 'Utilization percentage'] + month_average_and_goal_row(item, 2)
writer.writerow(toprow)
writer.writerow(middlelist)
writer.writerow(bottom)
writer.writerow(['']*(len(item)+1))
print("{} Completed generating the utilization summary. Please view the report in the file {}.".format(
PRINT_PREFIX,
file_to_write
)
)
def develop_filename(args, today):
"""
Figues out whether to use a supplied filename or a date stamped entry
"""
if args.outfile is not None:
return args.outfile
return 'utlization-summary-{}.csv'.format(today.strftime("%Y-%m-%d"))
``` |
{
"source": "18F/tock-manangement-reports",
"score": 3
} |
#### File: tock-manangement-reports/lib/timecards.py
```python
import os
import requests
import pandas as pd
import json
from lib.roster import Roster
class Timecards:
def __init__(self, start_date):
r = Roster().roster
self.timecards = self.get_timecards(start_date)
self.timecards = self.merge_roster(r)
def get_timecards(self, start_date):
headers = {'Authorization': 'Token ' + os.environ.get('TOCK_API_KEY')}
url = 'https://tock.18f.gov/api/timecards.json?date=' + str(start_date)
tock_data = requests.get(url, headers=headers).json()
timecards = pd.read_json(json.dumps(tock_data))
return timecards
def merge_roster(self, roster):
return self.timecards.merge(roster, left_on="user", right_on="employee", how="left")
```
#### File: tock-manangement-reports/tests/test_roster.py
```python
from lib.roster import Roster
def test_create_roster_object():
r = Roster()
assert len(r.roster) == 193
assert r.roster["employee"][146] == "aaron.snow"
``` |
{
"source": "18F/trello_utils",
"score": 2
} |
#### File: 18F/trello_utils/dump_board.py
```python
import json
import sys
import click
import yaml
## API Reference: https://pythonhosted.org/trello/trello.html
## Advanced: https://developers.trello.com/advanced-reference/
## Python: https://pythonhosted.org/trello/
# Trello API Info: https://trello.com/app-key
from trello import TrelloApi
# Get an AUTH_TOKEN Example: https://pythonhosted.org/trello/examples.html
# : To get an AUTH_TOKEN use "get_auth_token()"
# Help: http://www.trello.org/help.html
def setup_trello():
with open('env.json') as env:
config = json.load(env)
trello = TrelloApi( config['TRELLO_APP_KEY'] )
trello.set_token( config['TRELLO_AUTH_TOKEN'] )
return config, trello
@click.group()
def cli():
pass
def dump_list_cards(in_list):
list_cards = trello.lists.get(in_list['id'], cards="all")['cards']
for c in list_cards:
print "\t* ###", c['name'].encode('utf8'), "###"
if c['desc']:
print "\t--------------------------"
print "\t", c['desc'].encode('utf8')
print "\t--------------------------"
text = []
actions = trello.cards.get_field("actions", c['id'])
for a in actions:
if a.get('data', {}).get('text', None):
text.append(a['data']['text'].encode('utf8'))
if text:
print "\t`"
print "\t", "\n".join(text)
print "\t`"
@cli.command()
def dump_board():
board_lists = trello.boards.get_list( config['BOARD_ID'] )
for l in board_lists:
print "##", l['name'].encode('utf8')
dump_list_cards(l)
config, trello = setup_trello()
if __name__ == "__main__":
cli()
```
#### File: 18F/trello_utils/tag_cards.py
```python
import json
import sys
## API Reference: https://pythonhosted.org/trello/trello.html
## Advanced: https://developers.trello.com/advanced-reference/
## Python: https://pythonhosted.org/trello/
# Trello API Info: https://trello.com/app-key
from trello import TrelloApi
# Get an AUTH_TOKEN Example: https://pythonhosted.org/trello/examples.html
# : To get an AUTH_TOKEN use "get_auth_token()"
# Help: http://www.trello.org/help.html
def del_all_cards(trello, board_id):
for card in trello.boards.get_card(board_id):
trello.cards.delete( card['id'] )
def get_auth_token(trello):
print("Call this URL in your browser and write down the code in env.json")
print(trello.get_token_url('trello_utils', expires='never', write_access=True))
def get_labels(trello, board_id):
b = trello.boards.get( board_id )
return b['labelNames']
def label_all_cards(trello, board_id, list_name, label_name):
print("Tagging all cards in <%s> with <%s>" % (list_name, label_name))
board_lists = trello.boards.get_list( board_id )
in_list = filter(lambda l: l['name'] == list_name, board_lists)[0]
print("\tIn:", in_list['name'], in_list['id'])
print("")
list_cards = trello.lists.get(in_list['id'], cards="all")['cards']
for c in list_cards:
print("Tagging:", c['name'], c['id'])
try:
trello.cards.new_label( c['id'], label_name ) #throws HTTP 400 if label already exists
except:
pass
def unlabel_all_cards(trello, board_id, list_name, label_name):
print("Tagging all cards in <%s> with <%s>" % (list_name, label_name))
board_lists = trello.boards.get_list( board_id )
in_list = filter(lambda l: l['name'] == list_name, board_lists)[0]
print("\tIn:", in_list['name'], in_list['id'])
print("")
list_cards = trello.lists.get(in_list['id'], cards="all")['cards']
for c in list_cards:
print("Tagging:", c['name'], c['id'])
try:
trello.cards.delete_label_color( label_name, c['id'] )
except:
pass
def setup_trello():
with open('env.json') as env:
config = json.load(env)
trello = TrelloApi( config['TRELLO_APP_KEY'] )
trello.set_token( config['TRELLO_AUTH_TOKEN'] )
return config, trello
if __name__ == "__main__":
config, trello = setup_trello()
list_name = sys.argv[1]
try:
tag_color = sys.argv[2]
except:
print(json.dumps( get_labels(trello, config['BOARD_ID'] ), indent=4, sort_keys=True))
sys.exit(-1)
# Auth Manual Step Required: Call the URL printed by below via your browser, while logged in
#trello = TrelloApi( config["TRELLO_APP_KEY"] )
#get_auth_token(trello)
label_all_cards(trello, config['BOARD_ID'], list_name, tag_color)
#unlabel_all_cards(trello, config['BOARD_ID'], list_name, tag_color)
``` |
{
"source": "18F/trustymail",
"score": 3
} |
#### File: trustymail/trustymail/domain.py
```python
from publicsuffix import PublicSuffixList
from trustymail import trustymail
public_list = PublicSuffixList()
class Domain:
base_domains = {}
def __init__(self, domain_name):
self.domain_name = domain_name
self.base_domain_name = public_list.get_public_suffix(domain_name)
if self.base_domain_name != self.domain_name:
if self.base_domain_name not in Domain.base_domains:
domain = Domain(self.base_domain_name)
# Populate DMARC for parent.
trustymail.dmarc_scan(domain)
Domain.base_domains[self.base_domain_name] = domain
self.base_domain = Domain.base_domains[self.base_domain_name]
else:
self.base_domain = None
# Start off assuming the host is live unless an error tells us otherwise.
self.is_live = True
# Keep entire record for potential future use.
self.mx_records = []
self.spf = []
self.dmarc = []
self.dmarc_policy = None
# Syntax validity - default spf to false as the lack of an SPF is a bad thing.
self.valid_spf = False
self.valid_dmarc = True
self.syntax_errors = []
# Mail Info
self.mail_servers = []
# A list of any errors that occurred while scanning records.
self.errors = []
def has_mail(self):
return len(self.mail_servers) > 0
def has_spf(self):
return len(self.spf) > 0
def has_dmarc(self):
return len(self.dmarc) > 0
def add_mx_record(self, record):
self.mx_records.append(record)
self.mail_servers.append(record[1])
def parent_has_dmarc(self):
if self.base_domain is None:
return None
return self.base_domain.has_dmarc()
def parent_valid_dmarc(self):
if self.base_domain is None:
return None
return self.base_domain.valid_dmarc
def parent_dmarc_results(self):
if self.base_domain is None:
return None
return self.format_list(self.base_domain.dmarc)
def get_dmarc_policy(self):
# If the policy was never set, or isn't in the list of valid policies, check the parents.
if self.dmarc_policy is None or self.dmarc_policy.lower() not in ["quarantine", "reject", "none"]:
if self.base_domain is None:
return ""
else:
return self.base_domain.get_dmarc_policy()
return self.dmarc_policy
def generate_results(self):
results = {
"Domain": self.domain_name,
"Base Domain": self.base_domain_name,
"Live": self.is_live,
"MX Record": self.has_mail(),
"Mail Servers": self.format_list(self.mail_servers),
"SPF Record": self.has_spf(),
"Valid SPF": self.valid_spf,
"SPF Results": self.format_list(self.spf),
"DMARC Record": self.has_dmarc(),
"Valid DMARC": self.has_dmarc() and self.valid_dmarc,
"DMARC Results": self.format_list(self.dmarc),
"DMARC Record on Base Domain": self.parent_has_dmarc(),
"Valid DMARC Record on Base Domain": self.parent_has_dmarc() and self.parent_valid_dmarc(),
"DMARC Results on Base Domain": self.parent_dmarc_results(),
"DMARC Policy": self.get_dmarc_policy(),
"Syntax Errors": self.format_list(self.syntax_errors)
}
return results
# Format a list into a string to increase readability in CSV.
def format_list(self, record_list):
if not record_list:
return ""
return ", ".join(record_list)
``` |
{
"source": "18F/tts-bug-bounty-dashboard",
"score": 3
} |
#### File: bugbounty/tests/test_settings_utils.py
```python
import unittest
import json
from ..settings_utils import (load_cups_from_vcap_services,
load_database_url_from_vcap_services)
def make_vcap_services_env(vcap_services):
return {
'VCAP_SERVICES': json.dumps(vcap_services)
}
class CupsTests(unittest.TestCase):
def test_noop_if_vcap_services_not_in_env(self):
env = {}
load_cups_from_vcap_services('blah', env=env)
self.assertEqual(env, {})
def test_irrelevant_cups_are_ignored(self):
env = make_vcap_services_env({
"user-provided": [
{
"label": "user-provided",
"name": "NOT-boop-env",
"syslog_drain_url": "",
"credentials": {
"boop": "jones"
},
"tags": []
}
]
})
load_cups_from_vcap_services('boop-env', env=env)
self.assertFalse('boop' in env)
def test_credentials_are_loaded(self):
env = make_vcap_services_env({
"user-provided": [
{
"label": "user-provided",
"name": "boop-env",
"syslog_drain_url": "",
"credentials": {
"boop": "jones"
},
"tags": []
}
]
})
load_cups_from_vcap_services('boop-env', env=env)
self.assertEqual(env['boop'], 'jones')
def test_database_settings_loaded():
uri = "this is the database url"
env = make_vcap_services_env({"aws-rds": [{"credentials": {"uri": uri}}]})
load_database_url_from_vcap_services("boop-env", "aws-rds", env=env)
assert env['DATABASE_URL'] == uri
```
#### File: bugbounty/tests/test_system_check.py
```python
import pytest
from django.core.management import call_command
@pytest.mark.django_db
def test_system_check_works():
call_command('check', '--fail-level', 'WARNING')
```
#### File: management/commands/runscheduler.py
```python
import time
import logging
from django.core.management import call_command
from django.core.management.base import BaseCommand
logger = logging.getLogger('scheduler')
class Command(BaseCommand):
help = 'Runs the scheduler process'
def run_cmd(self, cmd, *args, **options):
cmdline = f'manage.py {cmd}'
logger.info(f'Running "{cmdline}".')
try:
call_command(cmd, *args, **options)
except Exception as e:
logger.exception(f'An error occurred when running "{cmdline}".')
def sleep(self, seconds):
logger.info(f'Waiting {seconds} seconds.')
time.sleep(seconds)
def handle(self, *args, **options):
while True:
self.run_cmd('h1sync')
self.sleep(600)
```
#### File: dashboard/tests/test_h1.py
```python
from django.test import override_settings
from unittest import mock
from .. import h1
from h1 import models as h1_models
@override_settings(H1_PROGRAMS=[
h1.ProgramConfiguration(handle='baz',
api_username='foo',
api_password='<PASSWORD>')
])
@mock.patch('dashboard.h1.HackerOneClient')
def test_find_reports_works(fake_client_class):
fake_client = mock.MagicMock()
fake_client_class.return_value = fake_client
fake_client.find_resources.return_value = ["stuff"]
results = [r for r in h1.find_reports(blah=1)]
assert results == ["stuff"]
fake_client_class.assert_called_once_with('foo', 'bar')
fake_client.find_resources.assert_called_once_with(
h1.h1_models.Report,
program=['baz'],
blah=1
)
def test_program_configuration_parse_works():
pc = h1.ProgramConfiguration.parse('prog:user:pass:!?:')
assert pc.handle == 'prog'
assert pc.api_username == 'user'
assert pc.api_password == '<PASSWORD>:!?:'
def test_program_configuration_parse_list_from_environ_works():
pcs = h1.ProgramConfiguration.parse_list_from_environ(
environ={
'H1_PROGRAM_1': 'a:b:c',
'H1_PROGRAM_2': 'd:e:f',
},
prefix='H1_PROGRAM_',
)
assert len(pcs) == 2
assert pcs[0].handle == 'a'
assert pcs[0].api_username == 'b'
assert pcs[0].api_password == 'c'
assert pcs[1].handle == 'd'
assert pcs[1].api_username == 'e'
assert pcs[1].api_password == 'f'
def test_newer_report_works():
report = h1.NewerReport(None, {
'type': 'report',
'attributes': {
'created_at': '2016-02-02T04:05:06.000Z',
'last_activity_at': None,
'first_program_activity_at': None,
'last_program_activity_at': None,
'last_reporter_activity_at': None,
'triaged_at': None,
'swag_awarded_at': None,
'bounty_awarded_at': None,
'closed_at': None,
'disclosed_at': None,
'title': 'XSS in login form',
'state': 'new',
},
'relationships': {
'structured_scope': {
'data': {
"id": "57",
"type": "structured-scope",
"attributes": {
"asset_identifier": "api.example.com",
"asset_type": "url",
"confidentiality_requirement": "high",
"integrity_requirement": "high",
"availability_requirement": "high",
"max_severity": "critical",
"created_at": "2015-02-02T04:05:06.000Z",
"updated_at": "2016-05-02T04:05:06.000Z",
"instruction": None,
"eligible_for_bounty": True,
"eligible_for_submission": True
}
}
}
}
})
assert report.structured_scope.asset_identifier == 'api.example.com'
assert report.structured_scope.asset_type == 'url'
assert report.structured_scope.eligible_for_bounty is True
def test_bounty_amounts_containing_commas():
# Frustratingly, h1's API returns bounty amounts over 999 that contain
# commas. This tests that our monkeypatch handles this correctly.
b = h1_models.Bounty(None, {
'type': 'bounty',
'attributes': {
'created_at': '2016-02-02T04:05:06.000Z',
'amount': '2,000',
'bonus_amount': '1,500'
}
})
assert b.amount == 2000
```
#### File: dashboard/tests/test_views.py
```python
import pytest
from django.contrib.auth.models import User
from django.utils.safestring import SafeString
from .. import views
@pytest.fixture
def some_user(db):
user = User(username='foo', email='<EMAIL>')
user.save()
return user
@pytest.fixture
def some_user_client(some_user, client):
client.force_login(some_user)
return client
def test_get_bookmarklet_url_works(rf):
request = rf.get('/')
request.META['HTTP_HOST'] = 'boop.gov'
url = views.get_bookmarklet_url(request)
assert url.startswith('javascript:')
assert '"' not in url
assert 'https://boop.gov' in url
assert isinstance(url, SafeString)
def test_logout_works(some_user_client):
response = some_user_client.get('/logout/')
assert response.status_code == 200
assert not response.wsgi_request.user.is_authenticated()
def test_index_returns_200(some_user_client):
response = some_user_client.get('/', **{'HTTP_HOST': 'boop.gov'})
assert response.status_code == 200
def test_index_requires_logged_in_user(client):
response = client.get('/')
assert response.status_code == 302
assert response['location'] == '/auth/login?next=/'
def test_bounty_list(some_user_client):
response = some_user_client.get('/bounties/')
assert response.status_code == 200
``` |
{
"source": "18F/wic_rules",
"score": 2
} |
#### File: wic_rules/rules_server/models.py
```python
from copy import deepcopy
import jsonschema
from django.contrib.postgres.fields.jsonb import JSONField
from django.core import exceptions
from django.db import DataError, connection, models
from prettytable import from_db_cursor
from rest_framework import exceptions
from .utils import DefaultValidatingDraft4Validator, values_from_json
class Ruleset(models.Model):
program = models.TextField(null=False, blank=False)
entity = models.TextField(null=False, blank=False)
sample_input = JSONField(null=True, blank=True)
null_sources = JSONField(null=True, blank=True, default={})
class Meta:
unique_together = (("program", "entity"), )
def validate(self, applications):
"""
Validate payload against this ruleset's syntax schemas.
Includes using it to fill in default values from the schema.
Returns the validated payload.
"""
for syntax_schema in self.syntaxschema_set.all():
try:
DefaultValidatingDraft4Validator(
syntax_schema.code).validate(applications)
except jsonschema.ValidationError as valerr:
raise exceptions.ParseError(str(valerr))
return applications
@property
def schema(self):
return self.syntaxschema_set.first()
def flattened(self, payload):
applicants = payload.pop('applicants')
for applicant in applicants:
applicant_info = deepcopy(payload)
applicant_info.update(applicant)
yield applicant_info
def null_source_sql(self, raw):
for (key, val) in self.null_sources.items():
if key not in raw:
yield " %s as ( select * from %s ) " % (key, val)
def source_sql_statements(self, raw):
with connection.cursor() as cursor:
for (source_sql, source_data) in values_from_json(
raw, self.schema):
table_name = source_sql.split()[0]
source_sql = "with " + source_sql + " select * from " + table_name
source_sql = source_sql.replace("%s", "'%s'") % source_data
yield (source_sql)
cursor.execute(source_sql)
yield str(from_db_cursor(cursor))
def values_from_json(self, raw):
(source_sql,
source_data) = zip(*(values_from_json(raw, schema=self.schema)))
source_sql += tuple(self.null_source_sql(raw))
source_clause = 'WITH ' + ',\n'.join(source_sql)
return (source_clause, source_data)
def calc(self, application):
overall_result = {}
for applicant in self.flattened(application):
eligibility = True
result = {'requirements': {}}
(source_clause, source_data) = self.values_from_json(applicant)
for node in self.node_set.filter(parent__isnull=True):
node_result = node.calc(source_clause, source_data)
result['requirements'][node.name] = node_result
if node.name != 'categories':
eligibility &= node_result['eligible']
result['eligible'] = eligibility
overall_result[int(applicant['id'])] = result
categories = result['requirements'].pop('categories', {})
category_names = [
key
for (key, val) in categories.get('subfindings', {}).items()
if val['eligible']
]
result['categories'] = {
'applicable': category_names,
'findings': categories.get('subfindings', {})
}
overall_result[int(applicant['id'])] = result
return overall_result
def sql(self, application):
for applicant in self.flattened(application):
(source_clause, source_data) = self.values_from_json(applicant)
for node in self.node_set.all():
yield from node.sql(source_clause, source_data)
class Node(models.Model):
name = models.TextField(null=False, blank=False)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
ruleset = models.ForeignKey(Ruleset, null=True, on_delete=models.CASCADE)
requires_all = models.BooleanField(null=False, blank=False, default=False)
class Meta:
unique_together = (("name", "parent", "ruleset"), )
@property
def get_ruleset(self):
return self.ruleset or self.parent.get_ruleset
def sql(self, source_clause, source_data):
for rule in self.rule_set.all():
yield rule.sql(source_clause, source_data)
def calc(self, source_clause, source_data):
if self.requires_all:
eligibility = True
else:
eligibility = False
node_result = {'limitation': [], 'explanation': [], 'subfindings': {}}
for child_node in self.node_set.all():
child_node_result = child_node.calc(source_clause, source_data)
if self.requires_all:
eligibility &= child_node_result['eligible']
else:
eligibility |= child_node_result['eligible']
node_result['explanation'].append(child_node_result['explanation'])
if child_node_result['eligible'] and child_node_result['limitation']:
node_result['limitation'].append(
child_node_result['limitation'])
node_result['subfindings'][child_node.name] = child_node_result
for rule in self.rule_set.all():
rule_result = rule.calc(source_clause, source_data)
node_result['explanation'].append(rule_result['explanation'])
if self.requires_all:
eligibility &= rule_result['eligible']
else:
eligibility |= rule_result['eligible']
if rule_result['eligible'] and rule_result['limitation']:
node_result['limitation'].append(rule_result['limitation'])
node_result['subfindings'][rule.name] = rule_result
node_result['eligible'] = eligibility
return node_result
class Rule(models.Model):
name = models.TextField(null=False, blank=False)
code = models.TextField(null=True, blank=True)
node = models.ForeignKey(Node, on_delete=models.CASCADE)
class Meta:
unique_together = (("name", "node"), )
@property
def ruleset(self):
return self.node.get_ruleset
_SQL = """with source as (%s %s)
select (source.result).eligible,
(source.result).explanation,
((source.result).limitation).end_date,
((source.result).limitation).normal,
((source.result).limitation).description,
((source.result).limitation).explanation AS limitation_explanation
from source"""
def calc(self, source_clause, source_data):
with connection.cursor() as cursor:
sql = self._SQL % (source_clause, self.code)
try:
cursor.execute(sql, tuple(source_data))
except Exception as exc:
msg = ("Error executing rule %s\n" % self.name + str(exc) +
'\n\n in sql:\n\n' + sql)
raise DataError(msg)
findings = cursor.fetchone()
limitation = dict(
zip(('end_date', 'normal', 'description', 'explanation'),
findings[2:]))
if (not limitation['end_date']) and (not limitation['description']):
limitation = None
return {
'eligible': findings[0],
'explanation': findings[1],
'limitation': limitation
}
def sql(self, source_clause, source_data):
result = self._SQL % (source_clause, self.code)
result = result.replace("%s", "'%s'")
return result % source_data
class SyntaxSchema(models.Model):
ruleset = models.ForeignKey(Ruleset, on_delete=models.CASCADE)
type = models.TextField(null=False, blank=False, default='jsonschema')
code = JSONField(null=False, blank=False)
def walk(self, node=None):
"""Yields all the dictionaries in a nested structure."""
node = node or self.code
if isinstance(node, list):
for itm in node:
yield from self.walk(itm)
else:
yield node
for (key, val) in node.items():
if isinstance(val, dict):
yield from self.walk(val)
_JSONSCHEMA_TO_PG_TYPES = {
'integer': 'integer',
'number': 'numeric',
'string': 'text',
'date': 'date',
'boolean': 'boolean',
}
def _col_data_type(self, col_data):
if col_data.get('format') == 'date-time':
return 'date'
elif col_data.get('$ref') == '#/definitions/ynexception':
return 'text'
else:
data_type = col_data.get('type', 'text')
if isinstance(data_type, list):
data_type = [dt for dt in data_type if dt != 'null']
if len(data_type) > 1:
data_type = 'text'
else:
data_type = data_type[0]
return self._JSONSCHEMA_TO_PG_TYPES.get(data_type)
def data_types(self):
result = {}
for node in self.walk():
for (col_name, col_data) in node.get('properties', {}).items():
col_type_from_schema = self._col_data_type(col_data)
if col_type_from_schema:
result[col_name] = self._col_data_type(col_data)
return result
# todo: this should be one-to-one, or sorted so that the
# type-determiner comesfirst?
```
#### File: rules_server/tests/test_income.py
```python
import json
from copy import deepcopy
from os.path import join
import pytest
from django.core.management import call_command
from rest_framework import status
from rest_framework.test import APIClient
client = APIClient()
with open(join('examples', 'wic-federal0.json')) as infile:
payload0 = json.load(infile)
@pytest.fixture(autouse=True)
def rule_models():
call_command('loaddata', 'rules_server/fixtures/federal_wic.json')
@pytest.mark.django_db
def test_response_form():
url = '/rulings/wic/federal/'
response = client.post(url, payload0, format='json')
assert response.status_code == status.HTTP_200_OK
findings = response.json()['findings']
assert len(findings) == 2
assert len(findings['1']) == 4
assert len(findings['2']) == 1
for application in findings.values():
for applicant in application.values():
assert 'eligible' in applicant
assert 'categories' in applicant
assert 'requirements' in applicant
assert 'income' in applicant['requirements']
assert 'standard income' in applicant['requirements']['income'][
'subfindings']
assert applicant['eligible'] in (True, False)
@pytest.mark.django_db
def test_identity_required():
url = '/rulings/wic/federal/'
payload1 = deepcopy(payload0)
for application in payload1:
for applicant in application['applicants']:
applicant['proof_of_identity'] = True
response = client.post(url, payload1, format='json')
n_true = 0
for application in response.json()['findings'].values():
for applicant in application.values():
if applicant['eligible']:
n_true += 1
for explanation in applicant['requirements']['identity'][
'explanation']:
assert 'identity requirements' not in explanation
assert n_true > 0
payload1 = deepcopy(payload0)
for application in payload1:
for applicant in application['applicants']:
applicant['proof_of_identity'] = False
response = client.post(url, payload1, format='json')
for application in response.json()['findings'].values():
for applicant in application.values():
assert not applicant['eligible']
```
#### File: wic_rules/rules_server/utils.py
```python
import json
from collections import defaultdict
from datetime import date
from django.utils.dateparse import parse_date
from jsonschema import Draft4Validator, validators
def relationalize(target,
name='data_source',
results=None,
ids=None,
parent_id=None,
parent_name=None):
if results is None:
results = defaultdict(list)
if ids is None:
ids = defaultdict(int)
if not isinstance(target, list):
target = [
target,
]
for itm in target:
row = {'id': ids[name] + 1}
ids[name] += 1
if parent_name:
row[parent_name + '_id'] = parent_id
if isinstance(itm, dict):
for (key, val) in itm.items():
if isinstance(val, list) or isinstance(val, dict):
relationalize(
target=val,
name=key,
results=results,
ids=ids,
parent_id=row['id'],
parent_name=name)
else:
row[key] = val
else:
row[name] = itm
results[name].append(row)
return results
def datatype_is_ok(datatype, value):
if datatype == int:
is_float = datatype_is_ok(float, value)
return is_float and float(value).is_integer()
elif datatype == bool:
return isinstance(value, bool)
elif datatype == date:
try:
result = parse_date(value)
return bool(result)
except TypeError:
return False
try:
datatype(value)
return True
except (TypeError, ValueError):
return False
def datatype(values):
"""
Most restrictive Python type that works for all values.
>>> data = [4, 'cows', 0.3]
>>> datatype(data)
<class 'str'>
>>> data = [4, 12, 9]
>>> datatype(data)
<class 'int'>
>>> data = [True, False, False]
>>> datatype(data)
<class 'bool'>
"""
types = [bool, int, float, str]
for value in values:
while not datatype_is_ok(types[0], value):
types.pop(0)
if not types:
raise TypeError('No known type for %s' % value)
return types[0]
PY_TO_PG_DATATYPES = {
bool: 'bool',
int: 'integer',
float: 'numeric',
str: 'text',
date: 'date',
}
def all_values_in_list_of_dicts(data):
"""
>>> data = [{'a': 1}, {'b': 2}, {'a': 3, 'b': 4}]
>>> dict(all_values_in_list_of_dicts(data))
{'a': [1, 3], 'b': [2, 4]}
"""
values = defaultdict(list)
for row in data:
for (key, val) in row.items():
values[key].append(val)
return values
def column_types(data):
"""
For a list of dicts `data`, find PostgreSQL types
>>> data = [{'a': 1.5}, {'b': 'cows', 'c': 2}, {'a': 3, 'b': 4}]
>>> column_types(data)
{'a': 'numeric', 'b': 'text', 'c': 'integer'}
"""
result = {}
for (key, values) in all_values_in_list_of_dicts(data).items():
dtype = PY_TO_PG_DATATYPES.get(datatype(values))
result[key] = dtype
return result
def update_only_existing_keys(dict1, dict2):
"""
Like `dict1.update(dict2)`, only for keys already in dict1
"""
partial_dict2 = {k: v for (k, v) in dict2.items() if k in dict1}
dict1.update(partial_dict2)
return dict1
def record_type(data, schema):
"""
Generates PostgreSQL record type SQL
"""
data_types = column_types(data)
if schema:
data_types = update_only_existing_keys(data_types, schema.data_types())
return ', '.join(
'%s %s' % (key, dtype) for (key, dtype) in data_types.items())
def sql(name, data, schema=None):
types = record_type(data=data, schema=schema)
return """%s AS
(SELECT * FROM JSON_TO_RECORDSET(%%s) AS x(%s))""" % (name, types)
def values_from_json(raw, schema=None):
relationalized = relationalize(raw, 'applicant')
for (table_name, data) in relationalized.items():
yield (sql(table_name, data, schema=schema), json.dumps(data))
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for (property, subschema) in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
DefaultValidatingDraft4Validator = extend_with_default(Draft4Validator)
if __name__ == "__main__":
import doctest
doctest.testmod()
``` |
{
"source": "18h61a05b0/Video-Analysis-for-Surveillance",
"score": 3
} |
#### File: 18h61a05b0/Video-Analysis-for-Surveillance/extract.py
```python
import os
import cv2
import numpy as np
from datetime import datetime
import classify
import os
import glob
files = glob.glob('./images/*')
for f in files:
os.remove(f)
print("Extracting frames from the video.....")
print("Please Wait...\n\n")
def video2frames(path, output=None, skip=1, mirror=False):
video_object = cv2.VideoCapture(path)
# setup the output folder
if output is None:
output = path[:-4]
else:
if not output.endswith('/') and not output.endswith('\\'):
output += '/'
output += 'py_image'
index = 0
last_mirrored = True
while True:
success, frame = video_object.read()
if success:
if index % skip == 0:
if mirror and last_mirrored:
frame = _mirror_image(frame)
last_mirrored = not last_mirrored
cv2.imwrite(output + "_" + str(datetime.now()) + ".jpg", frame) # assumes that the extension is three letters long
else:
break
index += 1
def _mirror_image(image):
return np.fliplr(image)
def main():
import argparse
parser = argparse.ArgumentParser("Enter the filename of a video")
parser.add_argument('filename')
parser.add_argument('-o', '--output')
parser.add_argument('--skip', help="Only save every nth frame")
parser.add_argument('--mirror', action='store_true', help="Flip every other image")
args = parser.parse_args()
if args.skip is None:
args.skip = 1
if args.mirror is None:
args.mirror = False
# In case the filename points to a directory
if os.path.isdir(args.filename):
files = [os.path.join(args.filename, f) for f in os.listdir(args.filename) if os.path.isfile(os.path.join(args.filename, f))]
for video in files:
video2frames(video, output=args.output, skip=int(args.skip), mirror=bool(args.mirror))
else:
video2frames(args.filename, output=args.output, skip=int(args.skip), mirror=bool(args.mirror))
if __name__ == "__main__":
main()
print("100% Extracted")
print("Begin Analysis.....")
print("Please Wait....\n\n")
classify.run_classifier()
``` |
{
"source": "18harsh/Nirikshak-Bot-NB--eyantra",
"score": 2
} |
#### File: task2/NB_2082_Task_2B/task_2b.py
```python
import cv2
import numpy as np
import os, sys, platform
import traceback
import math
import time
##############################################################
# Importing the sim module for Remote API connection with CoppeliaSim
try:
import sim
except Exception:
print('\n[ERROR] It seems the sim.py OR simConst.py files are not found!')
print('\n[WARNING] Make sure to have following files in the directory:')
print('sim.py, simConst.py and appropriate library - remoteApi.dll (if on Windows), remoteApi.so (if on Linux) or remoteApi.dylib (if on Mac).\n')
sys.exit()
# Global variable "client_id" for storing ID of starting the CoppeliaSim Remote connection
# NOTE: DO NOT change the value of this "client_id" variable here
client_id = -1
##############################################################
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
##############################################################
def init_remote_api_server():
"""
Purpose:
---
This function should first close any open connections and then start
communication thread with server i.e. CoppeliaSim.
NOTE: In this Task, do not call the exit_remote_api_server function in case of failed connection to the server.
The test_task_2a executable script will handle that condition.
Input Arguments:
---
None
Returns:
---
`client_id` : [ integer ]
the client_id generated from start connection remote API, it should be stored in a global variable
Example call:
---
client_id = init_remote_api_server()
NOTE: This function will be automatically called by test_task_2a executable before starting the simulation.
"""
global client_id
############## ADD YOUR CODE HERE ##############
sim.simxFinish(-1)
client_id = sim.simxStart('127.0.0.1',19997,True,True,5000,5)
##################################################
return client_id
def get_vision_sensor_image():
"""
Purpose:
---
This function should first get the handle of the Vision Sensor object from the scene.
After that it should get the Vision Sensor's image array from the CoppeliaSim scene.
Input Arguments:
---
None
Returns:
---
`vision_sensor_image` : [ list ]
the image array returned from the get vision sensor image remote API
`image_resolution` : [ list ]
the image resolution returned from the get vision sensor image remote API
`return_code` : [ integer ]
the return code generated from the remote API
Example call:
---
vision_sensor_image, image_resolution, return_code = get_vision_sensor_image()
NOTE: This function will be automatically called by test_task_2a executable at regular intervals.
"""
global client_id
vision_sensor_image = []
image_resolution = []
return_code = 0
############## ADD YOUR CODE HERE ##############
return_code,v0=sim.simxGetObjectHandle(client_id,'Vision_sensor',sim.simx_opmode_blocking)
return_code, resolution, image = sim.simxGetVisionSensorImage(client_id, v0, 0, sim.simx_opmode_blocking)
vision_sensor_image = np.array(image)
image_resolution = np.array(resolution)
##################################################
return vision_sensor_image, image_resolution, return_code
def transform_vision_sensor_image(vision_sensor_image, image_resolution):
"""
Purpose:
---
Transforms the image data returned by simxGetVisionSensorImage into a numpy
array that is possible to process using OpenCV library.
This function should:
1. First convert the vision_sensor_image list to a NumPy array with data-type as uint8.
2. Since the image returned from Vision Sensor is in the form of a 1-D (one dimensional) array,
the new NumPy array should then be resized to a 3-D (three dimensional) NumPy array.
3. Change the color of the new image array from BGR to RGB.
4. Flip the resultant image array about the X-axis.
The resultant image NumPy array should be returned.
Input Arguments:
---
`vision_sensor_image` : [ list ]
the image array returned from the get vision sensor image remote API
`image_resolution` : [ list ]
the image resolution returned from the get vision sensor image remote API
Returns:
---
`transformed_image` : [ numpy array ]
the resultant transformed image array after performing above 4 steps
that can be processed further using OpenCV library
Example call:
---
transformed_image = transform_vision_sensor_image(vision_sensor_image, image_resolution)
NOTE: This function will be automatically called by test_task_2a executable at regular intervals.
"""
transformed_image = None
############## ADD YOUR CODE HERE ##############
vision_sensor_image = np.uint8(vision_sensor_image)
vision_sensor_image = vision_sensor_image.reshape(image_resolution[0],image_resolution[1],3)
transformed_image = cv2.cvtColor(vision_sensor_image, cv2.COLOR_BGR2RGB)
transformed_image = cv2.flip(vision_sensor_image, 0)
##################################################
return transformed_image
def send_data(maze_array):
"""
Purpose:
---
Sends data to CoppeliaSim via Remote API.
Input Arguments:
---
`maze_array` : [ nested list of lists ]
encoded maze in the form of a 2D array returned by detectMaze() function
Returns:
---
`return_code` : [ integer ]
the return code generated from the call script function remote API
Example call:
---
send_data(maze_array)
NOTE: You might want to study this link to understand simx.callScriptFunction() better
https://www.coppeliarobotics.com/helpFiles/en/remoteApiExtension.htm
"""
global client_id
return_code = -1
#####n######### ADD YOUR CODE HERE ##############
maze_array = np.array(maze_array)
inputInts=maze_array.flatten()
inputFloats=[]
inputStrings=[]
inputBuffer=bytearray()
return_code,retInts,retFloats,retStrings,retBuffer=sim.simxCallScriptFunction(client_id,'Base',sim.sim_scripttype_customizationscript,'receiveData',inputInts,inputFloats,inputStrings,inputBuffer,sim.simx_opmode_blocking)
# if (return_code == sim.simx_return_ok):
# print(return_code,retInts,retFloats,retStrings,retBuffer)
##################################################
return return_code
def exit_remote_api_server():
"""
Purpose:
---
This function should wait for the last command sent to arrive at the Coppeliasim server
before closing the connection and then end the communication thread with server
i.e. CoppeliaSim using simxFinish Remote API.
Input Arguments:
---
None
Returns:
---
None
Example call:
---
exit_remote_api_server()
NOTE: This function will be automatically called by test_task_2a executable after ending the simulation.
"""
global client_id
############## ADD YOUR CODE HERE ##############
sim.simxGetPingTime(client_id)
sim.simxFinish(client_id)
##################################################
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: main
# Inputs: None
# Outputs: None
# Purpose: This part of the code is only for testing your solution. The function does the following:
# - takes maze00.jpg as input
# - applies the Perspective transform and encodes the maze in the form of a 2D array by
# calling the detectMaze() function (same as Task 1B)
# - connects with the remote API server (CoppeliaSim scene)
# - starts the simulation
# - receives the output of the Vision Sensor in the CoppeliaSim scene
# - saves the output of vision sensor as an image
# - stops the simulation
# - Disconnects with the remote API server
# It then asks the whether to repeat the same above steps on all maze images present in
# 'test_cases' folder or not. Write your solution ONLY in the space provided in the
# transform_vision_sensor_image() and send_data() functions.
if __name__ == "__main__":
# Import 'task_1b.py' file as module
try:
import task_1b
except ImportError:
print('\n[ERROR] task_1b.py file is not present in the current directory.')
print('Your current directory is: ', os.getcwd())
print('Make sure task_1b.py is present in this current directory.\n')
sys.exit()
except Exception as e:
print('Your task_1b.py throwed an Exception, kindly debug your code!\n')
traceback.print_exc(file=sys.stdout)
sys.exit()
# Initiate the Remote API connection with CoppeliaSim server
print('\nConnection to CoppeliaSim Remote API Server initiated.')
print('Trying to connect to Remote API Server...')
try:
client_id = init_remote_api_server()
if (client_id != -1):
print('\nConnected successfully to Remote API Server in CoppeliaSim!')
else:
print('\n[ERROR] Failed connecting to Remote API server!')
print('[WARNING] Make sure the CoppeliaSim software is running and')
print('[WARNING] Make sure the Port number for Remote API Server is set to 19997.')
print('[ERROR] OR init_remote_api_server function is not configured correctly, check the code!')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your init_remote_api_server function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually if started.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
# Flag to check whether maze array is generated or not, initially set to 0
maze_array_generated_flag = 0
# path directory of images in 'test_cases' folder
img_dir_path = 'test_cases/'
# path directory to 'generated_images' folder
generated_dir_path = 'generated_images/'
# path to 'maze00.jpg' image file
file_num = 0
img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'
if os.path.exists(img_file_path):
# print('\nFound maze0' + str(file_num) + '.jpg')
pass
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg not found. Make sure "test_cases" folder is present in current directory.')
print('Your current directory is: ', os.getcwd())
sys.exit()
print('\n============================================')
print('\nFor maze0' + str(file_num) + '.jpg')
# read the 'maze00.jpg' image file
input_img = cv2.imread(img_file_path)
if type(input_img) is np.ndarray:
try:
# get the resultant warped maze image after applying Perspective Transform
warped_img = task_1b.applyPerspectiveTransform(input_img)
if type(warped_img) is np.ndarray:
try:
# get the encoded maze in the form of a 2D array
maze_array = task_1b.detectMaze(warped_img)
if (type(maze_array) is list) and (len(maze_array) == 10):
print('\nEncoded Maze Array = %s' % (maze_array))
print('\n============================================')
# Flag for maze array generated updated to 1
maze_array_generated_flag = 1
else:
print('\n[ERROR] maze_array returned by detectMaze function in \'task_1b.py\' is not returning maze array in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your detectMaze function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] applyPerspectiveTransform function in \'task_1b.py\' is not returning the warped maze image in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your applyPerspectiveTransform function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg was not read correctly, something went wrong!')
print()
sys.exit()
# Check if connected to Remote API server and maze array has been generated successfully
if ((client_id != -1) and (maze_array_generated_flag == 1)):
try:
# Send maze array data to CoppeliaSim via Remote API
return_code = send_data(maze_array)
if (return_code == sim.simx_return_ok):
# Start the simulation
return_code = sim.simxStartSimulation(client_id, sim.simx_opmode_oneshot)
# Making sure that last command sent out had time to arrive
sim.simxGetPingTime(client_id)
if ((return_code == sim.simx_return_novalue_flag) or (return_code == sim.simx_return_ok)):
print('\nSimulation started correctly in CoppeliaSim.')
time.sleep(2)
try:
# Get image array and its resolution from Vision Sensor in ComppeliaSim scene
vision_sensor_image, image_resolution, return_code = get_vision_sensor_image()
if ((return_code == sim.simx_return_ok) and (len(image_resolution) == 2) and (len(vision_sensor_image) > 0)):
print('\nImage captured from Vision Sensor in CoppeliaSim successfully!')
# Get the transformed vision sensor image captured in correct format
try:
transformed_image = transform_vision_sensor_image(vision_sensor_image, image_resolution)
if (type(transformed_image) is np.ndarray):
# Save generated maze image in 'generated_images' folder
generated_img_file_path = generated_dir_path + 'result_maze0' + str(file_num) + '.jpg'
# Create the 'generated_images' folder and save the transformed image
if os.path.isdir(generated_dir_path) == True:
pass
else:
os.mkdir(generated_dir_path)
return_code = cv2.imwrite(generated_img_file_path, transformed_image)
if return_code == True:
print('\nTransformed maze image from CoppeliaSim: ' + str(generated_img_file_path) + ' was saved in \'generated_images\' folder successfully!')
else:
print('\n[ERROR] Failed to save Transformed maze image from CoppeliaSim in \'generated_images\' folder.')
# Stop the simulation
return_code = sim.simxStopSimulation(client_id, sim.simx_opmode_oneshot)
# Making sure that last command sent out had time to arrive
sim.simxGetPingTime(client_id)
if ((return_code == sim.simx_return_novalue_flag) or (return_code == sim.simx_return_ok)):
print('\nSimulation stopped correctly.')
time.sleep(2)
# Stop the Remote API connection with CoppeliaSim server
try:
exit_remote_api_server()
if (sim.simxStartSimulation(client_id, sim.simx_opmode_oneshot) == sim.simx_return_initialize_error_flag):
print('\nDisconnected successfully from Remote API Server in CoppeliaSim!')
else:
print('\n[ERROR] Failed disconnecting from Remote API server!')
print('[ERROR] exit_remote_api_server function is not configured correctly, check the code!')
except Exception:
print('\n[ERROR] Your exit_remote_api_server function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
except Exception:
print('\n[ERROR] Your transform_vision_sensor_image function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
except Exception:
print('\n[ERROR] Your get_vision_sensor_image function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
except Exception:
print('\n[ERROR] Your send_data function throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
choice = input('\nDo you want to run your script on all maze images ? => "y" or "n": ')
if choice == 'y':
try:
client_id = init_remote_api_server()
if (client_id != -1):
print('\nConnected successfully to Remote API Server in CoppeliaSim!')
else:
print('\n[ERROR] Failed connecting to Remote API server!')
print('[WARNING] Make sure the CoppeliaSim software is running and')
print('[WARNING] Make sure the Port number for Remote API Server is set to 19997.')
print('[ERROR] OR init_remote_api_server function is not configured correctly, check the code!')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your init_remote_api_server function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually if started.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
for file_num in range(1,9):
# Reset the flag to check whether maze array is generated or not to 0
maze_array_generated_flag = 0
# path to image file
img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'
if os.path.exists(img_file_path):
# print('\nFound maze0' + str(file_num) + '.jpg')
pass
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg not found. Make sure "test_cases" folder is present in current directory.')
print('Your current directory is: ', os.getcwd())
sys.exit()
print('\n============================================')
print('\nFor maze0' + str(file_num) + '.jpg')
# read the image file
input_img = cv2.imread(img_file_path)
if type(input_img) is np.ndarray:
try:
# get the resultant warped maze image after applying Perspective Transform
warped_img = task_1b.applyPerspectiveTransform(input_img)
if type(warped_img) is np.ndarray:
try:
# get the encoded maze in the form of a 2D array
maze_array = task_1b.detectMaze(warped_img)
if (type(maze_array) is list) and (len(maze_array) == 10):
print('\nEncoded Maze Array = %s' % (maze_array))
print('\n============================================')
# Flag for maze array generated updated to 1
maze_array_generated_flag = 1
else:
print('\n[ERROR] maze_array returned by detectMaze function in \'task_1b.py\' is not returning maze array in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your detectMaze function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] applyPerspectiveTransform function in \'task_1b.py\' is not returning the warped maze image in expected format!, check the code.')
print()
sys.exit()
except Exception:
print('\n[ERROR] Your applyPerspectiveTransform function in \'task_1b.py\' throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('\n[ERROR] maze0' + str(file_num) + '.jpg was not read correctly, something went wrong!')
print()
sys.exit()
# Check if connected to Remote API server and maze array has been generated successfully
if ((client_id != -1) and (maze_array_generated_flag == 1)):
try:
# Send maze array data to CoppeliaSim via Remote API
return_code = send_data(maze_array)
if (return_code == sim.simx_return_ok):
# Start the simulation
return_code = sim.simxStartSimulation(client_id, sim.simx_opmode_oneshot)
# Making sure that last command sent out had time to arrive
sim.simxGetPingTime(client_id)
if ((return_code == sim.simx_return_novalue_flag) or (return_code == sim.simx_return_ok)):
print('\nSimulation started correctly in CoppeliaSim.')
time.sleep(2)
try:
# Get image array and its resolution from Vision Sensor in ComppeliaSim scene
vision_sensor_image, image_resolution, return_code = get_vision_sensor_image()
if ((return_code == sim.simx_return_ok) and (len(image_resolution) == 2) and (len(vision_sensor_image) > 0)):
print('\nImage captured from Vision Sensor in CoppeliaSim successfully!')
# Get the transformed vision sensor image captured in correct format
try:
transformed_image = transform_vision_sensor_image(vision_sensor_image, image_resolution)
if (type(transformed_image) is np.ndarray):
# Save generated maze image in 'generated_images' folder
generated_img_file_path = generated_dir_path + 'result_maze0' + str(file_num) + '.jpg'
# Create the 'generated_images' folder and save the transformed image
if os.path.isdir(generated_dir_path) == True:
pass
else:
os.mkdir(generated_dir_path)
return_code = cv2.imwrite(generated_img_file_path, transformed_image)
if return_code == True:
print('\nTransformed maze image from CoppeliaSim: ' + str(generated_img_file_path) + ' was saved in \'generated_images\' folder successfully!')
else:
print('\n[ERROR] Failed to save Transformed maze image from CoppeliaSim in \'generated_images\' folder.')
# Stop the simulation
return_code = sim.simxStopSimulation(client_id, sim.simx_opmode_oneshot)
# Making sure that last command sent out had time to arrive
sim.simxGetPingTime(client_id)
if ((return_code == sim.simx_return_novalue_flag) or (return_code == sim.simx_return_ok)):
print('\nSimulation stopped correctly.')
time.sleep(2)
except Exception:
print('\n[ERROR] Your transform_vision_sensor_image function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
except Exception:
print('\n[ERROR] Your get_vision_sensor_image function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
except Exception:
print('\n[ERROR] Your send_data function throwed an Exception, kindly debug your code!')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
# Stop the Remote API connection with CoppeliaSim server
try:
exit_remote_api_server()
if (sim.simxStartSimulation(client_id, sim.simx_opmode_oneshot) == sim.simx_return_initialize_error_flag):
print('\nDisconnected successfully from Remote API Server in CoppeliaSim!')
else:
print('\n[ERROR] Failed disconnecting from Remote API server!')
print('[ERROR] exit_remote_api_server function is not configured correctly, check the code!')
except Exception:
print('\n[ERROR] Your exit_remote_api_server function throwed an Exception, kindly debug your code!')
print('Stop the CoppeliaSim simulation manually.\n')
traceback.print_exc(file=sys.stdout)
print()
sys.exit()
else:
print('')
``` |
{
"source": "18jeffreyma/tensorflow",
"score": 2
} |
#### File: keras/engine/training_utils_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import multiprocessing.pool
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class ModelInputsTest(test.TestCase):
def test_single_thing(self):
a = np.ones(10)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tensor_util.is_tensor(vals[0]))
self.assertEqual(backend.floatx(), vals[0].dtype)
def test_single_thing_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
with testing_utils.use_keras_tensors_scope(False):
a = np.ones(10, dtype=np.int32)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(val))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
self.assertEqual(dtypes.int32, vals[0].dtype)
with testing_utils.use_keras_tensors_scope(True):
a = np.ones(10, dtype=np.int32)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertIsInstance(val, keras_tensor.KerasTensor)
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertEqual(dtypes.int32, vals[0].dtype)
def test_list(self):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals[0]))
self.assertTrue(tensor_util.is_tensor(vals[1]))
def test_list_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
with testing_utils.use_keras_tensors_scope(False):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[1]))
with testing_utils.use_keras_tensors_scope(True):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals[0], keras_tensor.KerasTensor)
self.assertIsInstance(vals[1], keras_tensor.KerasTensor)
def test_dict(self):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals['a']))
self.assertTrue(tensor_util.is_tensor(vals['b']))
def test_dict_eager(self):
if not context.executing_eagerly():
self.skipTest('Run in eager mode only.')
with testing_utils.use_keras_tensors_scope(False):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals['a']))
self.assertTrue(tf_utils.is_symbolic_tensor(vals['b']))
with testing_utils.use_keras_tensors_scope(True):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertIsInstance(vals['a'], keras_tensor.KerasTensor)
self.assertIsInstance(vals['b'], keras_tensor.KerasTensor)
class DatasetUtilsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
# pylint: disable=g-long-lambda
('Batch', lambda: dataset_ops.Dataset.range(5).batch(2)),
('Cache', lambda: dataset_ops.Dataset.range(5).cache()),
('Concatenate', lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5))),
('FlatMap', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0))),
('FlatMap_Shuffle', lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0).shuffle(1)), True),
('Filter', lambda: dataset_ops.Dataset.range(5).filter(lambda _: True)),
('FixedLengthRecordDatasetV2',
lambda: readers.FixedLengthRecordDatasetV2([], 42)),
('FromTensors', lambda: dataset_ops.Dataset.from_tensors(0)),
('FromTensorSlices',
lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0])),
('Interleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1)),
('Interleave_Shuffle', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0).shuffle(1),
cycle_length=1), True),
('Map', lambda: dataset_ops.Dataset.range(5).map(lambda x: x)),
('Options',
lambda: dataset_ops.Dataset.range(5).with_options(dataset_ops.Options())
),
('PaddedBatch', lambda: dataset_ops.Dataset.range(5).padded_batch(2, [])),
('ParallelInterleave', lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1)),
('ParallelMap', lambda: dataset_ops.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1)),
('Prefetch', lambda: dataset_ops.Dataset.range(5).prefetch(1)),
('Range', lambda: dataset_ops.Dataset.range(0)),
('Repeat', lambda: dataset_ops.Dataset.range(0).repeat(0)),
('Shuffle', lambda: dataset_ops.Dataset.range(5).shuffle(1), True),
('Skip', lambda: dataset_ops.Dataset.range(5).skip(2)),
('Take', lambda: dataset_ops.Dataset.range(5).take(2)),
('TextLineDataset', lambda: readers.TextLineDatasetV2([])),
('TFRecordDataset', lambda: readers.TFRecordDatasetV2([])),
('Window', lambda: dataset_ops.Dataset.range(5).window(2)),
('Zip', lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5))),
# pylint: enable=g-long-lambda
)
def test_verify_dataset_shuffled(self, dataset_fn, expect_shuffled=False):
dataset = dataset_fn()
if not expect_shuffled:
with test.mock.patch.object(logging, 'warning') as mock_log:
shuffled = training_utils.verify_dataset_shuffled(dataset)
self.assertRegex(
str(mock_log.call_args), 'input dataset `x` is not shuffled.')
self.assertFalse(shuffled)
else:
self.assertTrue(training_utils.verify_dataset_shuffled(dataset))
class StandardizeWeightsTest(keras_parameterized.TestCase):
def test_sample_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
weights = training_utils.standardize_weights(y, sample_weights)
self.assertAllClose(weights, sample_weights)
def test_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils.standardize_weights(y, class_weight=class_weights)
self.assertAllClose(weights, np.array([0.5, 1., 0.5, 0.5, 1.5]))
def test_sample_weights_and_class_weights(self):
y = np.array([0, 1, 0, 0, 2])
sample_weights = np.array([0.5, 1., 1., 0., 2.])
class_weights = {0: 0.5, 1: 1., 2: 1.5}
weights = training_utils.standardize_weights(y, sample_weights,
class_weights)
expected = sample_weights * np.array([0.5, 1., 0.5, 0.5, 1.5])
self.assertAllClose(weights, expected)
def test_dataset_with_class_weight(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse')
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
class_weight = dict(enumerate(class_weight_np))
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=1,
class_weight=class_weight)
class MonitoredPool(multiprocessing.pool.ThreadPool):
def __init__(self, *args, **kwargs):
self._apply_counter = 0
self._func_wrapper = None
super(MonitoredPool, self).__init__(*args, **kwargs)
def apply_async(self, func, *args, **kwargs):
self._apply_counter += 1
if self._func_wrapper:
func = self._func_wrapper(func) # pylint: disable=not-callable
return super(MonitoredPool, self).apply_async(func, *args, **kwargs)
def add_sleep(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
time.sleep(1.)
return f(*args, **kwargs)
return wrapped
def cause_error(f):
@functools.wraps(f)
def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument
# Induce a TypeError during assignment.
return f(None, None, None, is_finished)
return wrapped
_TEST_DATA = np.array((
(3, 1, 3, 1, 2, 0, 3, 3, 1, 2),
(0, 1, 2, 1, 3, 0, 0, 1, 3, 0),
(3, 2, 1, 1, 1, 1, 1, 3, 2, 3),
(2, 2, 0, 1, 0, 3, 3, 2, 1, 1),
(3, 0, 3, 3, 3, 2, 1, 0, 0, 1),
(1, 0, 3, 3, 3, 2, 1, 2, 3, 1),))
class AggregationTest(keras_parameterized.TestCase):
def setUp(self):
super(AggregationTest, self).setUp()
self._old_pool = training_utils._COPY_POOL
self._old_threshold = training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD
self._old_timeout = training_utils.SliceAggregator._MAX_COPY_SECONDS
training_utils._COPY_POOL = MonitoredPool(training_utils._COPY_THREADS)
def tearDown(self):
super(AggregationTest, self).tearDown()
training_utils._COPY_POOL = self._old_pool
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = self._old_threshold
training_utils.SliceAggregator._MAX_COPY_SECONDS = self._old_timeout
def _run_with_steps(self):
aggregator = training_utils.OutputsAggregator(use_steps=True)
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
aggregator.aggregate(batch)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.ConcatAggregator)
aggregator.finalize()
return aggregator.results
def _run_without_steps(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
batch_start = 0
for i, batch in enumerate(np.array_split(_TEST_DATA, 4)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch.shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.SliceAggregator)
aggregator.finalize()
return aggregator.results
def test_with_steps(self):
self.assertAllEqual(self._run_with_steps(), _TEST_DATA)
def test_without_steps(self):
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
def test_nested_aggregation(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
batches = np.array_split(_TEST_DATA, 4)
batch_start = 0
for i, batch in enumerate(zip(batches, batches)):
if i == 0:
aggregator.create(batch)
batch_end = batch_start + batch[0].shape[0]
aggregator.aggregate(batch, batch_start, batch_end)
batch_start = batch_end
assert len(aggregator.results) == 2
aggregator.finalize()
self.assertAllEqual(aggregator.results, (_TEST_DATA, _TEST_DATA))
def test_concat_single_batch(self):
aggregator = training_utils.OutputsAggregator(use_steps=True)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.ConcatAggregator)
aggregator.aggregate(data)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_slice_single_batch(self):
aggregator = training_utils.OutputsAggregator(
use_steps=False, num_samples=6)
data = _TEST_DATA.copy()
aggregator.create(data)
assert len(aggregator.results) == 1
assert isinstance(aggregator.results[0], training_utils.SliceAggregator)
aggregator.aggregate(data, 0, 6)
aggregator.finalize()
assert aggregator.results is data # No copy.
def test_async_copy(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
self.assertAllEqual(self._run_without_steps(), _TEST_DATA)
# Two of the four batches will have 20 elements and two will have 10.
self.assertEqual(training_utils._COPY_POOL._apply_counter, 2)
def test_async_copy_timeout(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils.SliceAggregator._MAX_COPY_SECONDS = 0.1
training_utils._COPY_POOL._func_wrapper = add_sleep
with self.assertRaisesRegex(ValueError, 'Timed out waiting for copy'):
self._run_without_steps()
def test_async_copy_reraise(self):
training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
training_utils.SliceAggregator._MAX_COPY_SECONDS = 1.
training_utils._COPY_POOL._func_wrapper = cause_error
with self.assertRaisesRegex(TypeError, 'NoneType'):
self._run_without_steps()
if __name__ == '__main__':
test.main()
``` |
{
"source": "18kotov/python_training_1",
"score": 3
} |
#### File: 18kotov/python_training_1/contact.py
```python
class Contact:
def __init__(self, firstname, middlename, lastname, nickname, title, company, address, home, email):
self.firstname=firstname
self.middlename=middlename
self.lastname=lastname
self.nickname=nickname
self.title=title
self.company=company
self.address=address
self.home=home
self.email=email
``` |
{
"source": "18pattonj/RAiDER",
"score": 2
} |
#### File: test/weather_model/test_processWM.py
```python
import os
import pytest
from RAiDER.processWM import getWMFilename
def test_getWMFilename():
import datetime
f = getWMFilename(
'ERA5',
datetime.datetime(2020, 1, 1, 0, 0, 0),
'test_out_loc'
)
assert f[0]
assert f[1] == os.path.join(
'test_out_loc',
'ERA5_2020_01_01_T00_00_00.nc'
)
```
#### File: tools/RAiDER/checkArgs.py
```python
import os
from datetime import datetime
import numpy as np
import pandas as pd
import RAiDER.utilFcns
from RAiDER.constants import Zenith
from RAiDER.llreader import readLL
def checkArgs(args, p):
'''
Helper fcn for checking argument compatibility and returns the
correct variables
'''
# Argument checking
if args.heightlvs is not None:
if args.outformat is not None:
if args.outformat.lower() != 'hdf5':
raise RuntimeError('HDF5 must be used with height levels')
# Query Area
lat, lon, llproj, bounds, flag = readLL(args.query_area)
if (np.min(lat) < -90) | (np.max(lat) > 90):
raise RuntimeError('Lats are out of N/S bounds; are your lat/lon coordinates switched?')
# Line of sight calc
if args.lineofsight is not None:
los = ('los', args.lineofsight)
elif args.statevectors is not None:
los = ('sv', args.statevectors)
else:
los = Zenith
# Weather
weather_model_name = args.model
if weather_model_name == 'WRF' and args.files is None:
raise RuntimeError('Argument --files is required with --model WRF')
_, model_obj = RAiDER.utilFcns.modelName2Module(args.model)
if args.model == 'WRF':
weathers = {'type': 'wrf', 'files': args.files,
'name': 'wrf'}
elif args.model == 'HDF5':
weathers = {'type': 'HDF5', 'files': args.files,
'name': args.model}
else:
try:
weathers = {'type': model_obj(), 'files': args.files,
'name': args.model}
except:
raise NotImplementedError('{} is not implemented'.format(weather_model_name))
# zref
zref = args.zref
# parallel or concurrent runs
parallel = args.parallel
if not parallel==1:
import multiprocessing
# asses the number of concurrent jobs to be executed
max_threads = multiprocessing.cpu_count()
if parallel == 'all':
parallel = max_threads
parallel = parallel if parallel < max_threads else max_threads
# handle the datetimes requested
datetimeList = [datetime.combine(d, args.time) for d in args.dateList]
# Misc
download_only = args.download_only
verbose = args.verbose
useWeatherNodes = flag == 'bounding_box'
# Output
out = args.out
if out is None:
out = os.getcwd()
if args.outformat is None:
if args.heightlvs is not None:
outformat = 'hdf5'
elif flag == 'station_file':
outformat = 'csv'
elif useWeatherNodes:
outformat = 'hdf5'
else:
outformat = 'envi'
else:
outformat = args.outformat.lower()
if args.wmLoc is not None:
wmLoc = args.wmLoc
else:
wmLoc = os.path.join(args.out, 'weather_files')
if not os.path.exists(wmLoc):
os.mkdir(wmLoc)
wetNames, hydroNames = [], []
for time in datetimeList:
if flag == 'station_file':
wetFilename = os.path.join(out, '{}_Delay_{}_Zmax{}.csv'
.format(weather_model_name, time.strftime('%Y%m%dT%H%M%S'), zref))
hydroFilename = wetFilename
# copy the input file to the output location for editing
indf = pd.read_csv(args.query_area)
indf.to_csv(wetFilename, index=False)
else:
wetFilename, hydroFilename = \
RAiDER.utilFcns.makeDelayFileNames(time, los, outformat, weather_model_name, out)
wetNames.append(wetFilename)
hydroNames.append(hydroFilename)
# DEM
if args.dem is not None:
heights = ('dem', args.dem)
elif args.heightlvs is not None:
heights = ('lvs', args.heightlvs)
elif flag == 'station_file':
indf = pd.read_csv(args.query_area)
try:
hgts = indf['Hgt_m'].values
heights = ('pandas', wetNames)
except:
heights = ('merge', wetNames)
elif useWeatherNodes:
heights = ('skip', None)
else:
heights = ('download', os.path.join(out, 'geom', 'warpedDEM.dem'))
# put all the arguments in a dictionary
outArgs = {}
outArgs['los']=los
outArgs['lats']=lat
outArgs['lons']=lon
outArgs['ll_bounds']=bounds
outArgs['heights']=heights
outArgs['flag']=flag
outArgs['weather_model']=weathers
outArgs['wmLoc']=wmLoc
outArgs['zref']=zref
outArgs['outformat']=outformat
outArgs['times']=datetimeList
outArgs['download_only']=download_only
outArgs['out']=out
outArgs['verbose']=verbose
outArgs['wetFilenames']=wetNames
outArgs['hydroFilenames']=hydroNames
outArgs['parallel']=parallel
return outArgs
#return los, lat, lon, bounds, heights, flag, weathers, wmLoc, zref, outformat, datetimeList, out, download_only, verbose, wetNames, hydroNames, parallel
```
#### File: tools/RAiDER/processWM.py
```python
import contextlib
import os
import sys
import numpy as np
from datetime import datetime, date
from RAiDER.logger import *
from RAiDER.utilFcns import getTimeFromFile
def getWMFilename(weather_model_name, time, outLoc):
'''
Check whether the output weather model exists, and
if not, download it.
'''
with contextlib.suppress(FileExistsError):
os.mkdir('weather_files')
download_flag = True
f = os.path.join(
outLoc,
'{}_{}.nc'.format(
weather_model_name,
datetime.strftime(time, '%Y_%m_%d_T%H_%M_%S')
)
)
if weather_model_name in ['GMAO', 'MERRA2']:
if time.date() < date(2017, 12, 1) and weather_model_name == 'GMAO':
ext = 'nc'
else:
ext = 'h5'
f = f[:-2]+ ext
logger.debug('Storing weather model at: %s', f)
if os.path.exists(f):
logger.warning('Weather model already exists, skipping download')
download_flag = False
return download_flag, f
def prepareWeatherModel(
weatherDict,
wmFileLoc,
lats=None,
lons=None,
los=None,
zref=None,
time=None,
download_only=False,
makePlots=False
):
'''
Parse inputs to download and prepare a weather model grid for interpolation
'''
weather_model, weather_files, weather_model_name = \
weatherDict['type'], weatherDict['files'], weatherDict['name']
# check whether weather model files are supplied
if weather_files is None:
if time is None:
raise RuntimeError('prepareWeatherModel: Either a file or a time must be specified')
download_flag,f = getWMFilename(weather_model.Model(), time, wmFileLoc)
weather_model.files = [f]
else:
download_flag = False
time = getTimeFromFile(weather_files[0])
if (time < datetime(2013, 6, 26, 0, 0, 0)) and (weather_model._Name is 'HRES'):
weather_model.update_a_b()
# if no weather model files supplied, check the standard location
if download_flag:
weather_model.fetch(*weather_model.files, lats, lons, time)
# exit on download if download_only requested
if download_only:
logger.warning(
'download_only flag selected. No further processing will happen.'
)
return None, None, None
# Load the weather model data
if weather_model.files is not None:
weather_model.load(*weather_model.files, outLats=lats, outLons=lons, los=los, zref=zref)
download_flag = False
else:
weather_model.load(f, outLats=lats, outLons=lons, los=los, zref=zref)
logger.debug('Number of weather model nodes: %d', np.prod(weather_model.getWetRefractivity().shape))
logger.debug('Shape of weather model: %s', weather_model.getWetRefractivity().shape)
logger.debug(
'Bounds of the weather model: %.2f/%.2f/%.2f/%.2f (SNWE)',
np.nanmin(weather_model._ys), np.nanmax(weather_model._ys),
np.nanmin(weather_model._xs), np.nanmax(weather_model._xs)
)
logger.debug('Weather model: %s', weather_model.Model())
logger.debug(
'Mean value of the wet refractivity: %f',
np.nanmean(weather_model.getWetRefractivity())
)
logger.debug(
'Mean value of the hydrostatic refractivity: %f',
np.nanmean(weather_model.getHydroRefractivity())
)
logger.debug(weather_model)
if makePlots:
p = weather_model.plot('wh', True)
p = weather_model.plot('pqt', True)
return weather_model, lats, lons
```
#### File: tools/RAiDER/utilFcns.py
```python
import importlib
import multiprocessing as mp
import os
import re
from datetime import datetime, timedelta
import h5py
import numpy as np
import pandas as pd
import pyproj
from osgeo import gdal, osr
import RAiDER.mathFcns as mathFcns
from RAiDER.constants import Zenith
from RAiDER import Geo2rdr
from RAiDER.logger import *
gdal.UseExceptions()
def lla2ecef(lat, lon, height):
ecef = pyproj.Proj(proj='geocent')
lla = pyproj.Proj(proj='latlong')
return pyproj.transform(lla, ecef, lon, lat, height, always_xy=True)
def enu2ecef(east, north, up, lat0, lon0, h0):
"""Return ecef from enu coordinates."""
# I'm looking at
# https://github.com/scivision/pymap3d/blob/master/pymap3d/__init__.py
x0, y0, z0 = lla2ecef(lat0, lon0, h0)
t = mathFcns.cosd(lat0) * up - mathFcns.sind(lat0) * north
w = mathFcns.sind(lat0) * up + mathFcns.cosd(lat0) * north
u = mathFcns.cosd(lon0) * t - mathFcns.sind(lon0) * east
v = mathFcns.sind(lon0) * t + mathFcns.cosd(lon0) * east
my_ecef = np.stack((x0 + u, y0 + v, z0 + w))
return my_ecef
def gdal_extents(fname):
if os.path.exists(fname + '.vrt'):
fname = fname + '.vrt'
try:
ds = gdal.Open(fname, gdal.GA_ReadOnly)
except Exception:
raise OSError('File {} could not be opened'.format(fname))
# Check whether the file is georeferenced
proj = ds.GetProjection()
gt = ds.GetGeoTransform()
if not proj or not gt:
raise AttributeError('File {} does not contain geotransform information'.format(fname))
xSize, ySize = ds.RasterXSize, ds.RasterYSize
return [gt[0], gt[0] + (xSize - 1) * gt[1] + (ySize - 1) * gt[2], gt[3], gt[3] + (xSize - 1) * gt[4] + (ySize - 1) * gt[5]]
def gdal_open(fname, returnProj=False, userNDV=None):
if os.path.exists(fname + '.vrt'):
fname = fname + '.vrt'
try:
ds = gdal.Open(fname, gdal.GA_ReadOnly)
except:
raise OSError('File {} could not be opened'.format(fname))
proj = ds.GetProjection()
gt = ds.GetGeoTransform()
val = []
for band in range(ds.RasterCount):
b = ds.GetRasterBand(band + 1) # gdal counts from 1, not 0
data = b.ReadAsArray()
if userNDV is not None:
logger.debug('Using user-supplied NoDataValue')
data[data == userNDV] = np.nan
else:
try:
ndv = b.GetNoDataValue()
data[data == ndv] = np.nan
except:
logger.debug('NoDataValue attempt failed*******')
val.append(data)
b = None
ds = None
if len(val) > 1:
data = np.stack(val)
else:
data = val[0]
if not returnProj:
return data
else:
return data, proj, gt
def writeResultsToHDF5(lats, lons, hgts, wet, hydro, filename, delayType=None):
'''
write a 1-D array to a NETCDF5 file
'''
if delayType is None:
delayType = "Zenith"
with h5py.File(filename, 'w') as f:
f['lat'] = lats
f['lon'] = lons
f['hgts'] = hgts
f['wetDelay'] = wet
f['hydroDelay'] = hydro
f['wetDelayUnit'] = "m"
f['hydroDelayUnit'] = "m"
f['hgtsUnit'] = "m"
f.attrs['DelayType'] = delayType
def writeArrayToRaster(array, filename, noDataValue=0., fmt='ENVI', proj=None, gt=None):
'''
write a numpy array to a GDAL-readable raster
'''
array_shp = np.shape(array)
if array.ndim != 2:
raise RuntimeError('writeArrayToRaster: cannot write an array of shape {} to a raster image'.format(array_shp))
dType = array.dtype
if 'complex' in str(dType):
dType = gdal.GDT_CFloat32
elif 'float' in str(dType):
dType = gdal.GDT_Float32
else:
dType = gdal.GDT_Byte
driver = gdal.GetDriverByName(fmt)
ds = driver.Create(filename, array_shp[1], array_shp[0], 1, dType)
if proj is not None:
ds.SetProjection(proj)
if gt is not None:
ds.SetGeoTransform(gt)
b1 = ds.GetRasterBand(1)
b1.WriteArray(array)
b1.SetNoDataValue(noDataValue)
ds = None
b1 = None
def writeArrayToFile(lats, lons, array, filename, noDataValue=-9999):
'''
Write a single-dim array of values to a file
'''
array[np.isnan(array)] = noDataValue
with open(filename, 'w') as f:
f.write('Lat,Lon,Hgt_m\n')
for l, L, a in zip(lats, lons, array):
f.write('{},{},{}\n'.format(l, L, a))
def round_date(date, precision):
# First try rounding up
# Timedelta since the beginning of time
datedelta = datetime.min - date
# Round that timedelta to the specified precision
rem = datedelta % precision
# Add back to get date rounded up
round_up = date + rem
# Next try rounding down
datedelta = date - datetime.min
rem = datedelta % precision
round_down = date - rem
# It's not the most efficient to calculate both and then choose, but
# it's clear, and performance isn't critical here.
up_diff = round_up - date
down_diff = date - round_down
return round_up if up_diff < down_diff else round_down
def _least_nonzero(a):
"""Fill in a flat array with the first non-nan value in the last dimension.
Useful for interpolation below the bottom of the weather model.
"""
mgrid_index = tuple(slice(None, d) for d in a.shape[:-1])
return a[tuple(np.mgrid[mgrid_index]) + ((~np.isnan(a)).argmax(-1),)]
def robmin(a):
'''
Get the minimum of an array, accounting for empty lists
'''
try:
return np.nanmin(a)
except ValueError:
return 'N/A'
def robmax(a):
'''
Get the minimum of an array, accounting for empty lists
'''
try:
return np.nanmax(a)
except ValueError:
return 'N/A'
def _get_g_ll(lats):
'''
Compute the variation in gravity constant with latitude
'''
# TODO: verify these constants. In particular why is the reference g different from self._g0?
return 9.80616 * (1 - 0.002637 * cosd(2 * lats) + 0.0000059 * (cosd(2 * lats))**2)
def _get_Re(lats):
'''
Returns the ellipsoid as a fcn of latitude
'''
# TODO: verify constants, add to base class constants?
Rmax = 6378137
Rmin = 6356752
return np.sqrt(1 / (((cosd(lats)**2) / Rmax**2) + ((mathFcns.sind(lats)**2) / Rmin**2)))
def _geo_to_ht(lats, hts, g0=9.80556):
"""Convert geopotential height to altitude."""
# Convert geopotential to geometric height. This comes straight from
# TRAIN
# Map of g with latitude (I'm skeptical of this equation - Ray)
g_ll = _get_g_ll(lats)
Re = _get_Re(lats)
# Calculate Geometric Height, h
h = (hts * Re) / (g_ll / g0 * Re - hts)
return h
def padLower(invar):
'''
add a layer of data below the lowest current z-level at height zmin
'''
new_var = _least_nonzero(invar)
return np.concatenate((new_var[:, :, np.newaxis], invar), axis=2)
def makeDelayFileNames(time, los, outformat, weather_model_name, out):
'''
return names for the wet and hydrostatic delays.
# Examples:
>>> makeDelayFileNames(time(0, 0, 0), None, "h5", "model_name", "some_dir")
('some_dir/model_name_wet_00_00_00_ztd.h5', 'some_dir/model_name_hydro_00_00_00_ztd.h5')
>>> makeDelayFileNames(None, None, "h5", "model_name", "some_dir")
('some_dir/model_name_wet_ztd.h5', 'some_dir/model_name_hydro_ztd.h5')
'''
format_string = "{model_name}_{{}}_{time}{los}.{ext}".format(
model_name=weather_model_name,
time=time.strftime("%H_%M_%S_") if time is not None else "",
los="ztd" if los is None else "std",
ext=outformat
)
hydroname, wetname = (
format_string.format(dtyp) for dtyp in ('hydro', 'wet')
)
hydro_file_name = os.path.join(out, hydroname)
wet_file_name = os.path.join(out, wetname)
return wet_file_name, hydro_file_name
def make_weather_model_filename(name, time, ll_bounds):
if ll_bounds[0] < 0:
S = 'S'
else:
S = 'N'
if ll_bounds[1] < 0:
N = 'S'
else:
N = 'N'
if ll_bounds[2] < 0:
W = 'W'
else:
W = 'E'
if ll_bounds[3] < 0:
E = 'W'
else:
E = 'E'
return '{}_{}_{}{}_{}{}_{}{}_{}{}.h5'.format(
name, time.strftime("%Y-%m-%dT%H_%M_%S"), np.abs(ll_bounds[0]), S, np.abs(ll_bounds[1]), N, np.abs(ll_bounds[2]), W, np.abs(ll_bounds[3]), E
)
def checkShapes(los, lats, lons, hts):
'''
Make sure that by the time the code reaches here, we have a
consistent set of line-of-sight and position data.
'''
if los is None:
los = Zenith
test1 = hts.shape == lats.shape == lons.shape
try:
test2 = los.shape[:-1] == hts.shape
except AttributeError:
test2 = los is Zenith
if not test1 and test2:
raise ValueError(
'I need lats, lons, heights, and los to all be the same shape. ' +
'lats had shape {}, lons had shape {}, '.format(lats.shape, lons.shape) +
'heights had shape {}, and los was not Zenith'.format(hts.shape))
def checkLOS(los, Npts):
'''
Check that los is either:
(1) Zenith,
(2) a set of scalar values of the same size as the number
of points, which represent the projection value), or
(3) a set of vectors, same number as the number of points.
'''
# los is a bunch of vectors or Zenith
if los is not Zenith:
los = los.reshape(-1, 3)
if los is not Zenith and los.shape[0] != Npts:
raise RuntimeError('Found {} line-of-sight values and only {} points'
.format(los.shape[0], Npts))
return los
def modelName2Module(model_name):
"""Turn an arbitrary string into a module name.
Takes as input a model name, which hopefully looks like ERA-I, and
converts it to a module name, which will look like erai. I doesn't
always produce a valid module name, but that's not the goal. The
goal is just to handle common cases.
Inputs:
model_name - Name of an allowed weather model (e.g., 'era-5')
Outputs:
module_name - Name of the module
wmObject - callable, weather model object
"""
module_name = 'RAiDER.models.' + model_name.lower().replace('-', '')
model_module = importlib.import_module(module_name)
wmObject = getattr(model_module, model_name.upper().replace('-', ''))
return module_name, wmObject
def read_hgt_file(filename):
'''
Read height data from a comma-delimited file
'''
data = pd.read_csv(filename)
hgts = data['Hgt_m'].values
return hgts
def roundTime(dt, roundTo=60):
'''
Round a datetime object to any time lapse in seconds
dt: datetime.datetime object
roundTo: Closest number of seconds to round to, default 1 minute.
Source: https://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object/10854034#10854034
'''
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + timedelta(0,rounding-seconds,-dt.microsecond)
def writeDelays(flag, wetDelay, hydroDelay, lats, lons,
wetFilename, hydroFilename=None, zlevels=None, delayType=None,
outformat=None, proj=None, gt=None, ndv=0.):
'''
Write the delay numpy arrays to files in the format specified
'''
# Need to consistently handle noDataValues
wetDelay[np.isnan(wetDelay)] = ndv
hydroDelay[np.isnan(hydroDelay)] = ndv
# Do different things, depending on the type of input
if flag == 'station_file':
df = pd.read_csv(wetFilename)
# quick check for consistency
assert(np.all(np.abs(lats - df['Lat']) < 0.01))
df['wetDelay'] = wetDelay
df['hydroDelay'] = hydroDelay
df['totalDelay'] = wetDelay + hydroDelay
df.to_csv(wetFilename, index=False)
elif outformat == 'hdf5':
writeResultsToHDF5(lats, lons, zlevels, wetDelay, hydroDelay, wetFilename, delayType=delayType)
else:
writeArrayToRaster(wetDelay, wetFilename, noDataValue=ndv,
fmt=outformat, proj=proj, gt=gt)
writeArrayToRaster(hydroDelay, hydroFilename, noDataValue=ndv,
fmt=outformat, proj=proj, gt=gt)
def getTimeFromFile(filename):
'''
Parse a filename to get a date-time
'''
fmt = '%Y_%m_%d_T%H_%M_%S'
p = re.compile(r'\d{4}_\d{2}_\d{2}_T\d{2}_\d{2}_\d{2}')
try:
out = p.search(filename).group()
return datetime.strptime(out, fmt)
except:
raise RuntimeError('The filename for {} does not include a datetime in the correct format'.format(filename))
def writePnts2HDF5(lats, lons, hgts, los, outName='testx.h5', chunkSize=None, noDataValue=0.):
'''
Write query points to an HDF5 file for storage and access
'''
epsg = 4326
projname = 'projection'
checkLOS(los, np.prod(lats.shape))
in_shape = lats.shape
# create directory if needed
os.makedirs(os.path.abspath(os.path.dirname(outName)), exist_ok=True)
if chunkSize is None:
minChunkSize = 100
maxChunkSize = 10000
cpu_count = mp.cpu_count()
chunkSize = tuple(max(min(maxChunkSize, s // cpu_count), min(s, minChunkSize)) for s in in_shape)
logger.debug('Chunk size is {}'.format(chunkSize))
logger.debug('Array shape is {}'.format(in_shape))
with h5py.File(outName, 'w') as f:
f.attrs['Conventions'] = np.string_("CF-1.8")
x = f.create_dataset('lon', data=lons, chunks=chunkSize, fillvalue=noDataValue)
y = f.create_dataset('lat', data=lats, chunks=chunkSize, fillvalue=noDataValue)
z = f.create_dataset('hgt', data=hgts, chunks=chunkSize, fillvalue=noDataValue)
los = f.create_dataset('LOS', data=los, chunks=chunkSize + (3,), fillvalue=noDataValue)
x.attrs['Shape'] = in_shape
y.attrs['Shape'] = in_shape
z.attrs['Shape'] = in_shape
f.attrs['ChunkSize'] = chunkSize
f.attrs['NoDataValue'] = noDataValue
# CF 1.8 Convention stuff
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
projds = f.create_dataset(projname, (), dtype='i')
projds[()] = epsg
# WGS84 ellipsoid
projds.attrs['semi_major_axis'] = 6378137.0
projds.attrs['inverse_flattening'] = 298.257223563
projds.attrs['ellipsoid'] = np.string_("WGS84")
projds.attrs['epsg_code'] = epsg
projds.attrs['spatial_ref'] = np.string_(srs.ExportToWkt())
# Geodetic latitude / longitude
if epsg == 4326:
# Set up grid mapping
projds.attrs['grid_mapping_name'] = np.string_('latitude_longitude')
projds.attrs['longitude_of_prime_meridian'] = 0.0
x.attrs['standard_name'] = np.string_("longitude")
x.attrs['units'] = np.string_("degrees_east")
y.attrs['standard_name'] = np.string_("latitude")
y.attrs['units'] = np.string_("degrees_north")
z.attrs['standard_name'] = np.string_("height")
z.attrs['units'] = np.string_("m")
else:
raise NotImplemented
start_positions = f.create_dataset('Rays_SP', in_shape + (3,), chunks=los.chunks, dtype='<f8', fillvalue=noDataValue)
lengths = f.create_dataset('Rays_len', in_shape, chunks=x.chunks, dtype='<f8', fillvalue=noDataValue)
scaled_look_vecs = f.create_dataset('Rays_SLV', in_shape + (3,), chunks=los.chunks, dtype='<f8', fillvalue=noDataValue)
los.attrs['grid_mapping'] = np.string_(projname)
start_positions.attrs['grid_mapping'] = np.string_(projname)
lengths.attrs['grid_mapping'] = np.string_(projname)
scaled_look_vecs.attrs['grid_mapping'] = np.string_(projname)
f.attrs['NumRays'] = len(x)
def writeWeatherVars2HDF5(lat, lon, x, y, z, q, p, t, proj, outName=None):
'''
Write the OpenDAP/PyDAP-retrieved weather model data (GMAO and MERRA-2) to an HDF5 file
that can be accessed by external programs.
The point of doing this is to alleviate some of the memory load of keeping
the full model in memory and make it easier to scale up the program.
'''
if outName is None:
outName = os.path.join(
os.getcwd()+'/weather_files',
self._Name + datetime.strftime(
self._time, '_%Y_%m_%d_T%H_%M_%S'
) + '.h5'
)
with h5py.File(outName, 'w') as f:
lon = f.create_dataset('lons', data=lon.astype(np.float64))
lat = f.create_dataset('lats', data=lat.astype(np.float64))
X = f.create_dataset('x', data=x)
Y = f.create_dataset('y', data=y)
Z = f.create_dataset('z', data=z)
Q = f.create_dataset('q', data=q)
P = f.create_dataset('p', data=p)
T = f.create_dataset('t', data=t)
f.create_dataset('Projection', data=proj.to_json())
``` |
{
"source": "18praneeth/Hacktoberfest-2021",
"score": 4
} |
#### File: Hacktoberfest-2021/Python/141-linked-cycle.py
```python
class Solution:
def hasCycle(self, head: ListNode) -> bool:
if not head:
return False
slow = head
fast = head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return True
return False
# Input: head = [3,2,0,-4], pos = 1
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
``` |
{
"source": "18praneeth/udayagiri-scl-maxo",
"score": 3
} |
#### File: udayagiri-scl-maxo/students/models.py
```python
from django.db import models
from datetime import datetime
class Student(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
tuber_id = models.IntegerField()
tuber_name = models.CharField(max_length=100)
city = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
email = models.CharField(max_length=200)
state = models.CharField(max_length=100)
message = models.TextField(blank=True)
user_id = models.IntegerField(blank=True)
created_date = models.DateTimeField(blank=True, default=datetime.now)
def __str__(self):
return self.email
```
#### File: udayagiri-scl-maxo/students/signals.py
```python
# from django.db.models.signals import post_save
# from django.contrib.auth.models import User
# from django.dispatch import receiver
# from .models import Student
# @receiver(post_save, sender=Student)
# def send_mail(sender, instance, created, **kwargs):
# if created:
# # first_name = instance.first_name
# print('-'*100)
# print(instance)
# print(type(instance))
# print(dir(instance))
# print(instance.email)
# # email = instance.email
# tuber = instance.tuber_name
# print(tuber)
# print('-'*100)
# subject = f'''
# Hi,
# We have received your request, you will receive another confirmation mail soon.
# Happy learning :)
# Tutorvita team
# '''
# send_mail(
# f'TutorVita: Confirmation mail for reaching our {tuber}',
# subject,
# [email],
# )
```
#### File: udayagiri-scl-maxo/teachers/models.py
```python
from django.db import models
from datetime import datetime
from ckeditor.fields import RichTextField
from django_countries.fields import CountryField
class Teacher(models.Model):
subject_choice = (
('English Language Arts', 'English Language Arts'),
('Art / Music / Theater', 'Art / Music / Theater'),
('Social Sciences', 'Social Sciences'),
('Mathematics', 'Mathematics'),
('Science', 'Science'),
('World Language', 'World Language'),
('Professional Career', 'Professional Career'),
('Multiple Subjects', 'Multiple Subjects'),
('Other', 'Other')
)
name = models.CharField(max_length=255)
photo = models.ImageField(upload_to='media/ytubers/%Y/%m/')
country = CountryField()
mail = models.CharField(max_length=300, blank=True)
subject = models.CharField(choices=subject_choice, max_length=255)
video_url = models.CharField(max_length=255)
description = RichTextField()
city = models.CharField(max_length=255)
age = models.IntegerField()
is_verified = models.BooleanField(default=False)
created_date = models.DateTimeField(default=datetime.now, blank=True)
twitter_url = models.CharField(max_length=400, blank=True)
linkedin_url = models.CharField(max_length=400, blank=True)
facebook_url = models.CharField(max_length=400, blank=True)
instagram_url = models.CharField(max_length=400, blank=True)
teaching_experience = models.IntegerField(default=1)
qualification = models.CharField(max_length=300, blank=True)
designation = models.CharField(max_length=500, blank=True)
area_of_interest = models.CharField(max_length=500, blank=True)
rating = models.CharField(max_length=300, blank=True)
requests = models.CharField(max_length=300, blank=True)
awards = models.CharField(max_length=700, blank=True)
books_published = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.name
```
#### File: udayagiri-scl-maxo/webpages/views.py
```python
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Contact
from django.contrib.auth.decorators import login_required
def home(request):
if request.user.is_authenticated:
return render(request, 'webpages/home.html')
else:
return render(request, 'webpages/index.html')
def about(request):
return render(request, 'webpages/about.html')
@login_required
def team(request):
return render(request, 'webpages/team.html')
@login_required
def privacy(request):
return render(request, 'webpages/privacy.html')
@login_required
def license(request):
return render(request, 'webpages/license.html')
@login_required
def contact(request):
if request.POST:
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
comment = request.POST['message']
message = Contact()
message.name = name
message.email = email
message.subject = subject
message.comments = comment
message.save()
messages.success(request, 'Your response is recorded')
return redirect('contact')
else:
return render(request, 'webpages/contact.html',{})
``` |
{
"source": "18praveenb/hknweb",
"score": 4
} |
#### File: events/google_calendar_creds/get_token.py
```python
import datetime
import pickle
import os.path
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
def generate_token(token_path='./token.pickle', credential_path='./credentials.json'):
"""
Based on:
https://developers.google.com/calendar/quickstart/python?authuser=3
What this script does is refresh the Google Calendar authentication token
that is needed to update the calendar.
How to Use:
Open the https://developers.google.com/calendar/quickstart/python tutorial.
(1) Follow step 1 and download credentials.json to this directory.
(2) Run this script. It should pop up in a browser window.
(3) Select the google account you want for calendar and authorize.
THIS SHOULD NOT BE YOUR PERSONAL EMAIL OR ANY EMAIL WITH EXISTING
CALENDAR DATA -- THE CALENDAR WILL BE DELETED AND OVERWRITTEN WITH
THE HKN CALENDAR!!!
The `token.pickle` will be saved in this folder. You can then scp it
over to the server. Put it in the same directory
(hknweb/events/google_calendar_creds/token.pickle) and the server should
be able to update Google Calendar now.
"""
creds = None
print("Checking token status...")
if os.path.exists(token_path):
print("Token exists. Checking...")
with open(token_path, 'rb') as token:
creds = pickle.load(token)
if creds.valid:
print("Token OK!")
return
if creds and creds.expired and creds.refresh_token:
print("Token expired. Refreshing...")
creds.refresh(Request())
else:
print("No valid token existed. Generating...")
if not os.path.exists(credential_path):
print(f"ERROR: No credentials file at {credential_path}")
return
flow = InstalledAppFlow.from_client_secrets_file(
credential_path,
['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.events'])
creds = flow.run_local_server(port=0)
print("Writing token...")
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
print(f"Token written to {token_path}")
def main():
generate_token()
if __name__ == "__main__":
main()
``` |
{
"source": "18rberry/Operationalizing-Equity-Tiebreaker-in-SF",
"score": 3
} |
#### File: src/d01_data/abstract_data_api.py
```python
import pandas as pd
import numpy as np
_raw_file_path = "/share/data/school_choice/"
_out_file_path = _raw_file_path + "dssg/"
class AbstractDataApi:
def __init__(self):
pass
@staticmethod
def read_dta(path):
return pd.read_stata(path)
@staticmethod
def read_csv(path):
df = pd.read_csv(path, low_memory=False)
return df
@staticmethod
def read_excel(path):
df = pd.read_excel(path, sheet_name=None, engine="openpyxl")
return df
@staticmethod
def read_pickle(path):
df = pd.read_pickle(path)
return df
def read_data(self, file_name, shared=True, user=""):
if shared:
file_type = file_name.split(".")[-1]
path = _raw_file_path + file_name
else:
# In this case the file is in the dssg folder and we need to add the user name to the front:
file_type = file_name.split(".")[-1]
path = _out_file_path + user + "_" + file_name
if file_type == "csv":
return self.read_csv(path)
elif file_type == "dta":
return self.read_dta(path)
elif file_type == "xlsx":
return self.read_excel(path)
elif file_type == "pkl":
return self.read_pickle(path)
else:
raise Exception("Format .%s not implemented" % file_type)
def get_data(self):
raise NotImplementedError("Method not implemented for abstract class")
@staticmethod
def save_data(df, file_name):
df.to_csv(_out_file_path + file_name)
```
#### File: src/d02_intermediate/classifier_data_api.py
```python
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from time import time
from src.d01_data.block_data_api import BlockDataApi, _default_frl_key
from src.d01_data.student_data_api import StudentDataApi, _block_features, _census_block_column, \
_diversity_index_features
from src.d00_utils.file_paths import GEO_DATA_PATH, REDLINE_DATA_PATH
from src.d00_utils.utils import get_group_value, add_percent_columns
from src.d00_utils.file_paths import REDLINING_PATH
geoid_name = 'geoid'
block_data_api = BlockDataApi()
periods_list = ["1415", "1516", "1617", "1718", "1819", "1920"]
student_data_api = StudentDataApi()
# Those are the demographic columns we want:
block_columns = ['BlockGroup','CTIP_2013 assignment','SF Analysis Neighborhood','SFHA_ex_Sr']
block_columns_rename = {'CTIP_2013 assignment': 'CTIP13',
'SF Analysis Neighborhood':'Neighborhood',
'SFHA_ex_Sr':'Housing'}
frl_column_dict = {'Geoid Group': 'group', '4YR AVG Student Count': 'n', '4YR AVG FRL Count': 'nFRL',
'4YR AVG Eth Flag Count': 'nAALPI', '4YR AVG Combo Flag Count': 'nBoth'}
class ClassifierDataApi:
__block_data = None
__redline_data = None
__map_data = None
def __init__(self):
pass
def refresh(self):
"""
Reset the block data
:return:
"""
self.__block_data = None
def get_block_data(self, redline=True, frl_key=_default_frl_key, pct_frl=False):
"""
Query block data from all three sources.
:param redline: boolean to add redline status
:param frl_key: string that identifies which FRL data should be loaded ('tk5' or tk12')
:param pct_frl: boolean to add the percent values of the frl variables
:return:
"""
if self.__block_data is None:
e = time()
print("Loading Block FRL data...", end="")
frl_df = self.get_frl_data(frl_key=frl_key)
if pct_frl:
frl_df = add_percent_columns(frl_df)
print("%.4f" % (time()-e))
e = time()
print("Loading Block Demographic data...", end="")
demo_df = self.get_demo_data()
print("%.4f" % (time()-e))
e = time()
print("Loading Student Demographic data...", end="")
stud_df = self.get_student_data()
print("%.4f" % (time()-e))
df = pd.concat([demo_df,
stud_df.reindex(demo_df.index),
frl_df.reindex(demo_df.index)],
axis=1,
ignore_index=False)
self.__block_data = df
# Add the redline status:
if redline:
block_gdf = self.get_map_df_data(cols="BlockGroup")
self.__block_data["Redline"] = self.get_redline_status(block_gdf)
return self.__block_data.copy()
def get_map_data(self):
"""
Query map data used to build the geographic maps
"""
if self.__map_data is None:
geodata_path = GEO_DATA_PATH
file_name = 'geo_export_e77bce0b-6556-4358-b36b-36cfcf826a3c'
data_types = ['.shp', '.dbf', '.prj', '.shx']
sfusd_map = gpd.read_file(geodata_path + file_name + data_types[0])
sfusd_map[geoid_name] = sfusd_map['geoid10'].astype('int64')
sfusd_map.set_index(geoid_name, inplace=True)
self.__map_data = sfusd_map
return self.__map_data
def get_redline_map_data(self):
"""
Query HOLC grades map data used in the redline criterion
"""
if self.__redline_data is None:
file_path = REDLINING_PATH
redline_map = gpd.read_file(file_path)
self.__redline_data = redline_map
return self.__redline_data
def get_map_df_data(self, cols):
"""
Append block data to the map data geopandas.DataFrame
:param cols: Columns from block data that should be appended to the map data
:return:
"""
block_data = self.get_block_data()
map_data = self.get_map_data()
if cols == [geoid_name]:
map_df_data = map_data.reindex(block_data.index)
else:
map_df_data = pd.concat([map_data.reindex(block_data.index), block_data[cols]],
axis=1, ignore_index=False)
return map_df_data
@staticmethod
def get_frl_data(frl_key=_default_frl_key):
"""
Query FRL data
:param frl_key: string that identifies which FRL data should be loaded ('tk5' or tk12')
:return:
"""
block_data_api.load_data(frl=True, frl_key=frl_key)
block_data_api.add_aa2frl(frl_key=frl_key)
frl_df = block_data_api.get_data(frl=True, frl_key=frl_key).set_index('Geoid10')
# print(frl_df)
frl_df.index.name = geoid_name
frl_df.rename(columns=frl_column_dict, inplace=True)
frl_df['nFocal'] = frl_df.apply(lambda row: row['nFRL'] + row['nAALPI'] - row['nBoth'],
axis=1, raw=False)
frl_df['nAAFRL'] = frl_df.apply(lambda row: row['nBoth'] * row['pctAA'], axis=1)
# we want to find the blocks that share a group index
mask = frl_df['group'] < 1000
last_group_index = frl_df.loc[mask, 'group'].max()
# then we generate a new set of group indexes for the standalone blocks that is more coherent
# with the indexes of the grouped blocks
num_of_new_indexes = np.sum(~mask)
new_group_index = np.arange(num_of_new_indexes) + 1 + last_group_index
frl_df.at[~mask, 'group'] = new_group_index
frl_df['group'] = frl_df['group'].astype('int64')
return frl_df
@staticmethod
def get_demo_data():
"""
Query demographic data
:return:
"""
# Collect the meaningful demographic columns:
demo_df = block_data_api.get_data().set_index('Block')[block_columns].dropna(subset=['BlockGroup'])
# Clean the SFHA column:
demo_df = demo_df.replace({'SFHA_ex_Sr': {'yes': True, 'no': False}})
# Rename the columns for easier acces:
demo_df.rename(columns=block_columns_rename, inplace=True)
# Set index as geoid
demo_df.index.name = geoid_name
return demo_df
@staticmethod
def get_student_data():
"""
Query student data
:return:
"""
df_students = student_data_api.get_data(periods_list)
mask = df_students[_census_block_column] == 'NaN'
df_students.drop(df_students.index[mask], inplace=True)
df_students[geoid_name]=df_students['census_block'].astype('int64')
stud_df = df_students.groupby(geoid_name)[_diversity_index_features].agg(get_group_value)
return stud_df
def get_redline_status(self, map_data):
"""
Appends to the block dataframe the redline status i.e. whether the block was in a grade D HOLC area
:param block_df: block GeoDataFrame indexed by geoids with geometries of each block
:return: pandas (Boolean) Series of whether block intersects redline geometries
"""
if self.__redline_data is None:
self.__redline_data = self.get_redline_map_data()
redlining_by_grade = self.__redline_data.dissolve(by='holc_grade').to_crs(map_data.crs)
redline_series = map_data.buffer(0).intersects(redlining_by_grade['geometry']['D'].buffer(0), align=False)
return redline_series
@staticmethod
def plot_map_column(map_df_data, col, missing_vals=None, cmap="viridis", ax=None, save=False,
fig=None, title=None, legend=True, show=True):
"""
Plot map data with color set to the columns `col`
:param map_df_data: geopandas.DataFrame of SFUSD
:param missing_vals: geopandas.DataFrame of blocks with missing values
:param col: column of `map_df_data` with the value of interest
:param cmap: color map for the plot
:param ax: (optional) axis values. Must also provide `fig` value
:param save: boolean to save the plot
:param fig: (optional) figure values. Must also provide `ax` value
:param title: title of the figure
:param legend: boolean to show legend of the plot
:param show: boolean to show the figure
:return:
"""
if ax is None:
fig, ax = plt.subplots(figsize=(6,6))
# Missing values workaround for the gentrification plot:
if missing_vals is not None:
map_df_data.plot(column=col, ax=ax, cmap=cmap,
legend=legend, legend_kwds={'orientation': "horizontal"})
missing_vals.plot(color="lightgrey", hatch = "///", label = "Missing values", ax=ax)
else:
map_df_data.plot(column=col, ax=ax, cmap=cmap,
legend=legend, legend_kwds={'orientation': "horizontal"},
missing_kwds={'color': 'lightgrey'})
if title is None:
ax.set_title(col, fontsize=12)
else:
ax.set_title(title, fontsize=12)
if show:
plt.axis('off')
plt.tight_layout()
plt.show()
if save:
fname = 'outputs/' + col + '.png'
fig.savefig(fname)
return ax
@staticmethod
def plot_map_column_new(map_df_data, col, cmap="YlOrRd", ax=None, save=False, fig=None,
title=None, legend=True, show=True):
if ax is None:
fig, ax = plt.subplots(figsize=(4.8,4.8))
save = True
map_df_data.plot(column=col, ax=ax, cmap=cmap, marker = 'o', color = 'black',
legend=legend, legend_kwds={'orientation': "horizontal"},
missing_kwds={'color': 'lightgrey'})
if title is None:
ax.set_title(col, fontsize=12)
else:
ax.set_title(title, fontsize=12)
if show:
plt.axis('off')
plt.tight_layout()
plt.show()
if save:
fname = 'outputs/' + col + '.png'
fig.savefig(fname)
return ax
def plot_redline_data(self, ax=None, size=20, show=True):
sf_gdf = self.get_map_df_data("Neighborhood")
redline_gdf = self.get_redline_map_data().dissolve(by='holc_grade').to_crs(sf_gdf.crs)
if ax is None:
fig, ax = plt.subplots(figsize=(size,size))
color_mapping = {"A": "green", "B": "blue", "C":"yellow", "D":"red"}
ax = sf_gdf.plot(color='gray', ax=ax, alpha=0.9)
redline_gdf.plot(ax=ax, categorical=True,
color=redline_gdf.index.map(color_mapping), alpha=0.6,
legend=True, legend_kwds={"fontsize":30})
if show:
plt.show()
return ax
if __name__ == "__main__":
obj = ClassifierDataApi()
block_data = obj.get_block_data()
map_data = obj.get_map_data()
map_df_data = obj.get_map_df_data(['group', 'pctFRL', 'pctAALPI', 'pctBoth'])
obj.plot_map_column(map_df_data, 'pctFRL', cmap="YlOrRd")
``` |
{
"source": "18SebastianVC/tytus",
"score": 2
} |
#### File: parse/sql_ddl/create.py
```python
from jsonMode import createDatabase, createTable, dropDatabase
from parse.ast_node import ASTNode
from parse.symbol_table import SymbolTable, DatabaseSymbol, TableSymbol, FieldSymbol, TypeSymbol
from parse.errors import Error, ErrorType
class CreateEnum(ASTNode):
def __init__(self, name, value_list, line, column):
ASTNode.__init__(self, line, column)
self.name = name # type name
self.value_list = value_list # list of possible values
def execute(self, table: SymbolTable, tree):
super().execute(table, tree)
result_values = self.value_list.execute(table, tree)
symbol = TypeSymbol(self.name, result_values)
return table.add(symbol)
class CreateDatabase(ASTNode):
def __init__(self, name, owner, mode, replace, line, column):
ASTNode.__init__(self, line, column)
self.name = name # database name
self.owner = owner # optional owner
self.mode = mode # mode integer
self.replace = replace # boolean type
def execute(self, table: SymbolTable, tree):
super().execute(table, tree)
# result_name = self.name.execute(table, tree)
result_name = self.name
result_owner = self.owner.execute(table, tree) if self.owner else None # Owner seems to be stored only to ST
result_mode = self.mode(table, tree) if self.mode is not None else 6 # Change to 1 when default mode available
if self.replace:
dropDatabase(result_name)
result = 0
if result_mode == 6: # add more ifs when modes from EDD available
result = createDatabase(result_name)
if result == 1:
# log error on operation
raise Error(0, 0, ErrorType.RUNTIME, '5800: system_error')
return False
elif result == 2:
# log error because db already exists
raise Error(0, 0, ErrorType.RUNTIME, '42P04: duplicate_database')
return False
else:
return table.add(DatabaseSymbol(result_name, result_owner, result_mode))
class CreateTable(ASTNode): # TODO: Check grammar, complex instructions are not added yet
def __init__(self, name, inherits_from, fields, line, column):
ASTNode.__init__(self, line, column)
self.name = name # table name
self.inherits_from = inherits_from # optional inheritance
self.fields = fields # list of fields
def execute(self, table: SymbolTable, tree):
super().execute(table, tree)
result_name = self.name.execute(table, tree)
result_inherits_from = self.inherits_from.execute(table, tree) if self.inherits_from else None
result_fields = []
if result_inherits_from:
# get inheritance table, if doesn't exists throws semantic error, else append result
result_fields.append(table.get_fields_from_table(result_inherits_from))
result = createTable('db_from_st', result_name, len(result_fields))
if result == 1:
raise Error(0, 0, ErrorType.RUNTIME, '5800: system_error')
return False
elif result == 2:
raise Error(0, 0, ErrorType.RUNTIME, '42P04: database_does_not_exists')
return False
elif result == 3:
raise Error(0, 0, ErrorType.RUNTIME, '42P07: duplicate_table')
return False
else:
table.add(TableSymbol(table.get_current_db().name, result_name))
result_fields = self.fields.execute(table, tree) # A list of TableField assumed
for field in result_fields:
field.table_name = result_name
table.add(field)
return
class TableField(ASTNode): # returns an item, grammar has to add it to a list and synthesize value to table
def __init__(self, name, field_type, length, allows_null, is_pk, line, column):
ASTNode.__init__(self, line, column)
self.name = name # field name
self.field_type = field_type # type of field
self.length = length
self.allows_null = allows_null # if true then NULL or default, if false the means is NOT NULL
self.is_pk = is_pk # field is primary key
def execute(self, table, tree):
super().execute(table, tree)
result_name = self.name.execute(table, tree)
result_field_type = self.field_type.execute(table, tree)
result_length = self.length.execute(table, tree)
return FieldSymbol(
table.get_current_db().name,
None,
result_name,
result_field_type,
result_length,
self.allows_null,
self.is_pk
)
# table = SymbolTable([])
# cdb_obj = CreateDatabase('db_test2', None, None, False, 1, 2)
# print(cdb_obj.execute(table, None))
```
#### File: parser/team06/ventana.py
```python
import os
import sys
import platform
from nodeAst import nodeAst
import ascendente as analizador
#import accionesIDE as accionesVarias
#import mostrarLineas
#To display pdfs
import webbrowser
#Interface toolkit of python tk interface
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
#Custom text is for painting colors in a text area
from CustomText import CustomText
#For managing the Line Numbers in the text area
from TextLine import TextLineNumbers
class Interfaz(tk.Frame):
def __init__(self, *args, **kwargs):
self.root = root
tk.Frame.__init__(self, *args, **kwargs)
self.filename = None
self.terminal = tk.Text(root, width=75, height=1, background="black",foreground="#00AA00")
self.terminal.pack(side="right", fill="both", expand=True)
# Special Text
self.ter = tk.Scrollbar(orient="vertical", command=self.terminal.yview)
self.terminal.configure(yscrollcommand=self.ter.set)
self.ter.pack(side="right", fill="y")
# Special Text
self.text = CustomText(self)
self.vsb = tk.Scrollbar(orient="vertical", command=self.text.yview)
self.text.configure(yscrollcommand=self.vsb.set)
# Text line number
self.linenumbers = TextLineNumbers(self, width=70)
self.linenumbers.attach(self.text)
self.vsb.pack(side="right", fill="y")
self.linenumbers.pack(side="left", fill="y")
self.text.pack(side="right", fill="both", expand=True)
self.text.bind("<<Change>>", self._on_change)
self.text.bind("<Configure>", self._on_change)
#Menu bar
menubar = tk.Menu(self)
root.config(menu=menubar)
file_dropdown = tk.Menu(menubar, tearoff=0)
run_dropdown = tk.Menu(menubar, tearoff=0)
report_dropdown = tk.Menu(menubar, tearoff=0)
help_dropdown = tk.Menu(menubar, tearoff=0)
file_dropdown.add_command(label="Nuevo", command=self.new_file)
file_dropdown.add_command(label="Abrir", command=self.open_file)
file_dropdown.add_command(label="Guardar", command=self.save)
file_dropdown.add_command(label="Guardar Como", command=self.save_as)
file_dropdown.add_separator()
file_dropdown.add_command(label="Salir", command=self.end)
run_dropdown.add_command(label="Ejecutar Ascendente", command=self.ejecutar_ascendente)
run_dropdown.add_command(label="Ejecutar Descendente")
report_dropdown.add_command(label="Reporte de Errores", command=self.generarReporteErrores )
report_dropdown.add_command(label="Reporte AST", command=self.astReport)
report_dropdown.add_command(label="Reporte de Gramatical", command=self.generarReporteGramatical)
report_dropdown.add_command(label="Tabla de Simbolos", command=self.generarReporteSimbolos )
help_dropdown.add_command(label="Acerca de", command=self.about)
help_dropdown.add_command(label="Manual de Usuario", command=self.m_user)
help_dropdown.add_command(label="Manual Técnico", command=self.m_tecnic)
menubar.add_cascade(label="Archivo", menu=file_dropdown)
menubar.add_cascade(label="Ejecutar", menu=run_dropdown)
menubar.add_cascade(label="Reportes", menu=report_dropdown)
menubar.add_cascade(label="Ayuda", menu=help_dropdown)
#-------------------------------------------------------Metodo para reportes---------------------------------------------------------------------
def generarReporteGramatical(self):
try:
state_script_dir = os.getcwd()
report_dir = state_script_dir + "\\Reportes\\reporteGramatical.html"
print(report_dir)
analizador.genenerarReporteGramaticalAscendente(report_dir)
print("Si se genero el reporte :D!")
edge_path = 'C://Program Files (x86)//Microsoft//Edge//Application/msedge.exe %s'
webbrowser.get(edge_path).open(report_dir)
except:
print("no se genero el reporte :(")
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def generarReporteErrores(self):
try:
state_script_dir = os.getcwd()
report_dir = state_script_dir + "\\Reportes\\reporteDeErrores.html"
print(report_dir)
analizador.genenerarReporteErroresAscendente(report_dir)
print("Si se genero el reporte de errores :D!")
edge_path = 'C://Program Files (x86)//Microsoft//Edge//Application/msedge.exe %s'
webbrowser.get(edge_path).open(report_dir)
except:
print("no se genero el reporte :(")
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def generarReporteSimbolos(self):
try:
state_script_dir = os.getcwd()
report_dir = state_script_dir + "\\Reportes\\TablaDeSimbolos.html"
print(report_dir)
analizador.generarReporteSimbolos(report_dir)
print("Si se genero el reporte :D!")
edge_path = 'C://Program Files (x86)//Microsoft//Edge//Application/msedge.exe %s'
webbrowser.get(edge_path).open(report_dir)
except:
print("no se genero el reporte :(")
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def astReport(self):
analizador.generarASTReport()
#-------------------------------------------------------Color Tags for the Paint Method---------------------------------------------------------------------
"""self.text.tag_configure("reserved", foreground="red")
self.text.tag_configure("var", foreground="#008000")
self.text.tag_configure("int", foreground="#0000FF")
self.text.tag_configure("boolean", foreground="#0000FF")
self.text.tag_configure("string", foreground="#FFFF00")
self.text.tag_configure("comment", foreground="#808080")
self.text.tag_configure("operator", foreground="#FFA500")"""
#-------------------------------------------------------Line Number Method---------------------------------------------------------------------
def _on_change(self, event):
self.linenumbers.redraw()
self.text.tag_remove('resaltado', '1.0', tk.END)
#-------------------------------------------------------File Menu Methods---------------------------------------------------------------------
def set_window_title(self, name=None):
if name:
self.root.title(name)
else:
self.root.title("Sin titulo.txt")
def new_file(self):
self.text.delete(1.0, tk.END)
self.filename = None
self.set_window_title()
def open_file(self):
self.filename = filedialog.askopenfilename(defaultextension="*.*",
filetypes=[("All Files","*.*")])
if self.filename:
self.text.delete(1.0, tk.END)
with open(self.filename, "r") as f:
self.text.insert(1.0, f.read())
self.set_window_title(self.filename)
def save(self):
if self.filename:
try:
textarea_content = self.text.get(1.0, tk.END)
with open(self.filename, "w") as f:
f.write(textarea_content)
except Exception as e:
print(e)
else:
self.save_as()
def save_as(self):
try:
new_file = filedialog.asksaveasfilename(initialfile="Sin titulo.txt", defaultextension="*.*",
filetypes=[("All Files","*.*"),("JS Files",".js"),("CSS Files",".css"),("HTML Files",".html")])
textarea_content = self.text.get(1.0, tk.END)
with open(new_file,"w") as f:
f.write(textarea_content)
self.filename = new_file
self.set_window_title(self.filename)
except Exception as e:
print(e)
def end(self):
value = messagebox.askokcancel("Salir", "Está seguro que desea salir?")
if value :
root.destroy()
#-------------------------------------------------------Execution Menu Methods---------------------------------------------------------------------
def ejecutar_ascendente(self):
x= self.text.get(1.0, tk.END)
self.terminal.delete(1.0, tk.END)
#try:
salida=analizador.ejecucionAscendente(x)
#salida+="\n---------------------FIN EJECUCION ASCENDENTE--------------------------\n"
#except:
#salida="Grupo6>Se genero un error de analisis"
self.terminal.insert(tk.END,salida)
#-------------------------------------------------------Help Menu Methods---------------------------------------------------------------------
def about(self):
box_tilte ="Autor"
box_msg = "GRUPO 6\n"
"<NAME> 2012-22615\n"
"<NAME> 2013-13692\n"
"<NAME> 2013-13734\n"
"<NAME> 2013-13875"
messagebox.showinfo(box_tilte,box_msg)
def m_user(self):
script_dir = os.path.dirname(os.path.abspath(__file__))
direction = script_dir + "\\Manuales\\Manual de Usuario.pdf"
try:
webbrowser.open_new(r'file://'+direction)
except Exception as e:
box_tilte ="Path Error"
box_msg = "El archivo que trata de acceder no existe"
messagebox.showerror(box_tilte,box_msg)
def m_tecnic(self):
script_dir = os.path.dirname(os.path.abspath(__file__))
direction = script_dir + "\\Manuales\\Manual Tecnico.pdf"
try:
webbrowser.open_new(r'file://'+direction)
except Exception as e:
box_tilte ="Path Error"
box_msg = "El archivo que trata de acceder no existe"
messagebox.showerror(box_tilte,box_msg)
#-------------------------------------------------------Reports---------------------------------------------------------------------
"""def error(self,entrada,tipo):
if(len(entrada)==0):
box_tilte = "Tabla de Errores"
box_msg = "No existe ningun error"
messagebox.showinfo(box_tilte, box_msg)
else:
errorList(entrada,tipo)
def errorReport(self):
error_script_dir = os.path.dirname(os.path.abspath(__file__))
print("DIR:"+error_script_dir)
report_dir = error_script_dir + "\\Reportes\\errorList.html"
print("DIRECCION:"+report_dir)
if(os.path.exists(report_dir)):
webbrowser.open_new(r'file://' + report_dir)
else:
print(report_dir)
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def css_state(self,entrada,tipo):
if(len(entrada)==0):
box_tilte = "Reporte de estados"
box_msg = "No existe ningun estado"
messagebox.showinfo(box_tilte, box_msg)
else:
stateList(entrada,tipo)
def state_report(self):
state_script_dir = os.path.dirname(os.path.abspath(__file__))
report_dir = state_script_dir + "\\Reportes\\css_states.html"
if(os.path.exists(report_dir)):
webbrowser.open_new(r'file://' + report_dir)
else:
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def rmt_lines(self,entrada,tipo):
if(len(entrada)==0):
box_tilte = "Reporte de RMT"
box_msg = "No existe ninguna linea"
messagebox.showinfo(box_tilte, box_msg)
else:
rmtList(entrada,tipo)
def rmt_report(self):
rmt_script_dir = os.path.dirname(os.path.abspath(__file__))
report_dir = rmt_script_dir + "\\Reportes\\rmt.html"
if(os.path.exists(report_dir)):
webbrowser.open_new(r'file://' + report_dir)
else:
box_tilte = "Report Error"
box_msg = "El archivo del reporte no existe"
messagebox.showinfo(box_tilte, box_msg)
def js_report(self):
js_script_dir = os.path.dirname(os.path.abspath(__file__))
String = js_script_dir + "\\Grafos\\String.gv.pdf"
Unicomentario = js_script_dir + "\\Grafos\\UniComentario.gv.pdf"
ID = js_script_dir + "\\Grafos\\ID.gv.pdf"
try:
webbrowser.open_new(r'file://' + String)
webbrowser.open_new(r'file://' + Unicomentario)
webbrowser.open_new(r'file://' + ID)
except Exception as e:
box_tilte = "Report Error"
messagebox.showinfo(box_tilte, e)"""
#-------------------------------------------------------Paint Words---------------------------------------------------------------------
"""def pintar(self,token):
for last in token:
if(last[0]!=None):
if(last[2]=="reservada"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('reserved', posicionInicial, posicionFinal)
elif(last[3].lower()=="var"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('var', posicionInicial, posicionFinal)
elif(last[2].lower()=="string"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('string', posicionInicial, posicionFinal)
elif(last[2].lower()=="TAG"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('string', posicionInicial, posicionFinal)
elif(last[2].lower()=="integer"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('int', posicionInicial, posicionFinal)
elif(last[2].lower()=="decimal"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('int', posicionInicial, posicionFinal)
elif(last[3].lower()=="true" or last[3].lower()=="false"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('boolean', posicionInicial, posicionFinal)
elif(last[2].lower()=="comentario"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('comment', posicionInicial, posicionFinal)
elif(last[2].lower()=="operador"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="PARA"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="PARC"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="POR"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="DIV"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="MAS"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
elif(last[2].upper()=="MENOS"):
posicionInicial = f'{last[0]}.{last[1]-1}'
posicionFinal = f'{posicionInicial}+{len(str(last[3]))}c'
self.text.tag_add('operator', posicionInicial, posicionFinal)
else:
pass
else:
pass"""
#-------------------------------------------------------Main---------------------------------------------------------------------
if __name__ == "__main__":
root = tk.Tk()
root.title("TYTUS SQL Grupo 6")
Interfaz(root).pack(side="top", fill="both", expand=True)
root.mainloop()
```
#### File: Instrucciones/Expresiones/Relacional.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Relacional(Instruccion):
def __init__(self, opIzq, opDer, operador, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.BOOLEAN),linea,columna)
self.opIzq = opIzq
self.opDer = opDer
self.operador = operador
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
# Si existe algún error en el operador izquierdo, retorno el error.
resultadoIzq = self.opIzq.ejecutar(tabla, arbol)
if isinstance(resultadoIzq, Excepcion):
return resultadoIzq
# Si existe algún error en el operador derecho, retorno el error.
resultadoDer = self.opDer.ejecutar(tabla, arbol)
if isinstance(resultadoDer, Excepcion):
return resultadoDer
# Comprobamos el tipo de operador
if self.operador == '>':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq > resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" > "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == '<':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq < resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" < "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == '>=':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq >= resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" >= "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == '<=':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq <= resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" <= "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == '=':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq == resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" = "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == '<>':
if self.opIzq.tipo.tipo == Tipo_Dato.INTEGER and self.opDer.tipo.tipo == Tipo_Dato.INTEGER:
return resultadoIzq != resultadoDer
else:
error = Excepcion('42883',"Semántico","el operador no existe: "+self.opIzq.tipo.toString()+" <> "+self.opDer.tipo.toString(),self.linea,self.columna)
arbol.excepciones.append(error)
return error
else:
error = Excepcion('42804',"Semántico","Operador desconocido.",self.linea,self.columna)
arbol.excepciones.append(error)
return error
```
#### File: Tytus_SQLPARSER_G8/Instrucciones/Identificador.py
```python
from Instrucciones.Excepcion import Excepcion
from lexico import columas
from tkinter.constants import FALSE
from Instrucciones.Sql_create.ShowDatabases import ShowDatabases
from Instrucciones.TablaSimbolos.Instruccion import *
from Instrucciones.Tablas.BaseDeDatos import BaseDeDatos
from Instrucciones.TablaSimbolos.Simbolo import Simbolo
from Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato
from storageManager.jsonMode import *
class Identificador(Instruccion):
def __init__(self, id, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.id = id
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
variable = tabla.getVariable(self.id)
if variable == None:
error = Excepcion("42P10","Semantico","La columna "+str(self.id)+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
self.tipo = variable.tipo
return variable.valor.ejecutar(tabla, arbol)
def devolverTabla(self):
super().ejecutar(tabla,arbol)
valor = arbol.devolverTabla(id)
if(valor == 0):
print("Esto provoca un error, tabla no existe")
else:
print("tabla encontrada")
return valor
```
#### File: Tytus_SQLPARSER_G8/Instrucciones/Relaciones.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Relaciones(Instruccion):
def __init__(self, lista, opcion, query, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.query = query
self.lista = lista
self.opcion = opcion
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print("entro a relaciones")
```
#### File: Instrucciones/Sql_alter/AlterTableAddCheck.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
class AlterTableDropConstraint(Instruccion):
def __init__(self, tabla, condicion, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.tabla = tabla
self.condicion = condicion
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(tabla)
arbol.consola.append("Consulta devuelta correctamente.")
```
#### File: Instrucciones/Sql_alter/AlterTableAddConstraint.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
class AlterTableAddConstraint(Instruccion):
def __init__(self, tabla, id, lista_col, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.tabla = tabla
self.id = id
self.lista_col = lista_col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
arbol.consola.append("Consulta devuelta correctamente.")
```
#### File: Instrucciones/Sql_alter/AlterTableAddFK.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
# Asocia la integridad referencial entre llaves foráneas y llaves primarias,
# para efectos de la fase 1 se ignora esta petición.
class AlterTableAddFK(Instruccion):
def __init__(self, tabla, lista_col, tabla_ref, lista_fk, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.tabla = tabla
self.lista_col = lista_col
self.tabla_ref = tabla_ref
self.lista_fk = lista_fk
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
#resultado = alterAddFK(arbol.getBaseDatos(), self.tabla, self.lista_fk)
arbol.consola.append("Consulta devuelta correctamente.")
```
#### File: Instrucciones/Sql_alter/AlterTableAlterColumn.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
# Solo reconocerlo en la gramatica y modificarlo en tu table de tipos
class AlterTableAlterColumn(Instruccion):
def __init__(self, tabla, col, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.tabla = tabla
self.col = col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
#Agregar NOT NULL a la columna
arbol.consola.append("Consulta devuelta correctamente.")
```
#### File: Instrucciones/Sql_create/CreateType.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Expresiones.Primitivo import Primitivo
from storageManager.jsonMode import *
class CreateType(Instruccion):
def __init__(self, id, tipo, listaExpre, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = id
self.listaExpre = listaExpre
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
lista = []
if(self.listaExpre):
print("------VALORES------")
for x in range(0,len(self.listaExpre)):
#volver tipo primitivo
if(type(self.listaExpre[x]) is Primitivo):
valor = self.listaExpre[x].ejecutar(tabla,arbol)
lista.append(valor)
print(valor)
print(lista)
print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
'''
instruccion = CreateType("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
```
#### File: Instrucciones/Sql_select/SelectLista.py
```python
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class SelectLista(Instruccion):
def __init__(self, lista, linea, columna):
Instruccion.__init__(self,None,linea,columna)
self.lista = lista
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
columnas = []
valores = []
for ins in self.lista:
if isinstance(ins, Alias):
resultado = ins.expresion.ejecutar(tabla, arbol)
if isinstance(resultado, Excepcion):
return resultado
valores.append(str(resultado))
columnas.append(ins.id)
else:
resultado = ins.ejecutar(tabla, arbol)
if isinstance(resultado, Excepcion):
return resultado
valores.append(str(resultado))
columnas.append('col')
valores = [valores]
lf = []
for i in range(0,len(columnas)):
temporal = []
temporal.append(len(columnas[i]))
for l in valores:
temporal.append(len(str(l[i])))
lf.append(max(temporal))
# Encabezado
cad = ''
for s in range(0,len(lf)):
cad += '+---'+'-'*lf[s]
cad += '+\n'
for s in range(0,len(lf)):
cad += '| ' +str(columnas[s]) + ' ' *((lf[s]+4)-(2+len(str(columnas[s]))))
cad += '|\n'
cad += '|'
for s in range(0,len(lf)):
cad += '---'+'-'*lf[s]+ '+'
size = len(cad)
cad = cad[:size - 1] + "|\n"
# Valores
for i in valores:
for j in range(0,len(lf)):
cad += '| ' + str(i[j]) + ' ' *((lf[j]+4)-(2+len(str(i[j]))))
cad += "|\n"
# Línea final
for s in range(0,len(columnas)):
cad += '+---'+'-'*lf[s]
cad += '+\n'
arbol.consola.append(cad)
class Alias():
def __init__(self, id, expresion):
self.id = id
self.expresion = expresion
```
#### File: parser/team13/principal.py
```python
from sentencias import *
from storageManager import jsonMode as jBase
import TablaSimbolos as TS
import Error as Error
import re
consola = ""
useActual = ""
listaSemanticos = []
listaConstraint = []
listaFK = []
def interpretar_sentencias(arbol, tablaSimbolos):
jBase.dropAll()
global consola
for nodo in arbol:
if isinstance(nodo, SCrearBase):
print("Creando Base-----")
crearBase(nodo, tablaSimbolos)
# aqui va el metodo para ejecutar crear base
elif isinstance(nodo, SShowBase):
print("Mostrando Base-----")
if nodo.like == False:
bases = jBase.showDatabases()
for base in bases:
consola += base + "\n"
else:
bases = jBase.showDatabases()
basn = []
for base in bases:
basn.append(base)
basn2 = []
r = re.compile(".*" + nodo.cadena + ".*")
basn2 = list(filter(r.match, basn))
for bas in basn2:
consola += bas + "\n"
# aqui va el metodo para ejecutar show base
elif isinstance(nodo, SUse):
global useActual
useActual = nodo.id
elif isinstance(nodo, SAlterBase):
print("Alterando Base-----")
AlterDatabase(nodo, tablaSimbolos)
# aqui va el metodo para ejecutar alter base
elif isinstance(nodo, SDropBase):
print("Drop Base-----")
if nodo.exists == False:
db = jBase.dropDatabase(nodo.id.valor)
if db == 2:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Error la base de datos " + nodo.id.valor + " no existe"))
elif db == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
else:
b = tablaSimbolos.eliminar(nodo.id.valor)
if b == True:
consola += "La base de datos " + nodo.id.valor + " se elimino con exito. \n"
else:
db = jBase.dropDatabase(nodo.id.valor)
if db == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
elif db == 0:
b = tablaSimbolos.eliminar(nodo.id.valor)
if b == True:
consola += "La base de datos " + nodo.id.valor + " se elimino con exito. \n"
else:
consola += "Error no se pudo elminar la base " + nodo.id.valor + " de la tabla de simbolos \n"
# aqui va el metodo para ejecutar drop base
elif isinstance(nodo, STypeEnum):
print("Enum Type------")
print(nodo.id)
for val in nodo.lista:
print(val.valor)
elif isinstance(nodo, SUpdateBase):
print("Update Table-----------")
print(nodo.id)
for val in nodo.listaSet:
print("columna------")
print(val.columna)
print("------------")
if isinstance(val.valor, SOperacion):
val2 = val.valor
print(val2.opIzq.valor)
print(val2.operador)
print(val2.opDer.valor)
else:
val2 = val.valor
print(val2.valor)
print(nodo.listaWhere)
elif isinstance(nodo, SDeleteBase):
print("Delete Table-------------")
print(nodo.id)
print("Tiene where?")
print(nodo.listaWhere)
elif isinstance(nodo, STruncateBase):
print("Truncate Table------------")
for id in nodo.listaIds:
print(id)
elif isinstance(nodo, SInsertBase):
print("Insert Table-------------")
print("nombre tabla")
print(nodo.id)
print("valores")
for val in nodo.listValores:
if isinstance(val, SExpresion):
print(val.valor)
elif isinstance(nodo, SShowTable):
print("Mostrando tablas----------")
tablas = jBase.showTables(useActual)
for tabla in tablas:
consola += tabla + "\n"
elif isinstance(nodo, SDropTable):
print("Drop table-----------")
bandera = True
for fk in listaFK:
if fk.idtlocal == nodo.id:
bandera = False
if bandera:
b = jBase.dropTable(useActual, nodo.id)
if b == 0:
base = tablaSimbolos.get(useActual)
if base.deleteTable(nodo.id) == True:
consola += "La tabla " + nodo.id + " de la base " + useActual + " se eliminó con éxito. \n"
else:
consola += "Error no se pudo eliminar la tabla " + nodo.id + " de la tabla de simbolos \n"
elif b == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error la base de datos " + useActual + " no existe, No se puede eliminar la tabla " + nodo.id))
elif b == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error la tabla " + nodo.id + " no existe en la base de datos " + useActual))
elif b == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
else:
consola += "No se puede eliminar la tabla debido a que esta siendo referenciada por una llave foranea \n"
elif isinstance(nodo, SAlterTableRenameColumn):
print("Cambiando nombre columna---")
AlterRenameColumn(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterRenameTable):
AlterRenameTable(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterTableAddColumn):
print("Agregando Columna-----")
AlterAddColumn(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterTableCheck):
print("Agregando check--------")
AlterTableCheck(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterTableAddUnique):
print("Agregando unique-------")
AlterTableUnique(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterTableAddFK):
print("Agregando llave foranea--------")
AlterTableFK(nodo, tablaSimbolos)
elif isinstance(nodo, SAlterTable_AlterColumn):
print("Alter column--------------")
print(nodo.idtabla)
for col in nodo.columnas:
print(col.idcolumna)
elif isinstance(nodo, SAlterTableDrop):
print("Alter drop----------")
print(nodo.idtabla)
print("Es un constraint?")
print(nodo.idco)
elif isinstance(nodo, SCrearTabla):
crearTabla(nodo, tablaSimbolos)
# FRANCISCO
elif isinstance(nodo, Squeries):
print("Entró a Query")
if nodo.ope == False:
print("Query Simple")
if isinstance(nodo.query1, SQuery):
Qselect = nodo.query1.select
Qffrom = nodo.query1.ffrom
Qwhere = nodo.query1.where
Qgroupby = nodo.query1.groupby
Qhaving = nodo.query1.having
Qorderby = nodo.query1.orderby
Qlimit = nodo.query1.limit
# SELECT
if isinstance(Qselect, SSelectCols):
print("Entro a Select")
# Distinct
if Qselect.distinct != False:
print("Distinct True")
# Cantidad de columnas
if Qselect.cols == "*":
print("Todas las Columnas")
else:
print("Columnas Específicas")
for col in Qselect.cols:
##LISTAS
if isinstance(col.cols, SExpresion):
print("Expre")
print(col.cols.valor)
# print("Tipo")
# print(col.cols.tipo)
elif isinstance(col.cols, SOperacion):
print("Operación")
if isinstance(col.cols.opIzq, SExpresion):
print(col.cols.opIzq.valor)
print(col.cols.operador)
print(col.cols.opDer.valor)
##FUNCIONES DE AGREGACION
elif isinstance(col.cols, SFuncAgregacion):
print("Funcion Agregación:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("val")
print(col.cols.param.valor)
else:
print("val")
print(col.cols.param)
##FUNCIONES MATH
elif isinstance(col.cols, SFuncMath):
print("Funcion Math:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncMath2):
print("Funcion Math2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFuncMathSimple):
print("Funcion MathSimple:")
print(col.cols.funcion)
##FUNCIONES TRIG
elif isinstance(col.cols, SFuncTrig):
print("Funcion Trig1:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncTrig2):
print("Funcion Trig2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
##FUNCIONES BINARIAS
elif isinstance(col.cols, SFuncBinary):
print("Funcion Binaria1:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("param")
print(col.cols.param.valor)
else:
print("param")
print(col.cols.param)
elif isinstance(col.cols, SFuncBinary2):
print("Funcion Binaria2:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFuncBinary3):
print("Funcion Binaria3:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param.det)
print(col.cols.param2.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.det)
print(col.cols.param2)
elif isinstance(col.cols, SFuncBinary4):
print("Funcion Binaria4:")
print(col.cols.funcion)
if isinstance(col.cols.param, SExpresion):
print("params")
print(col.cols.param.valor)
print(col.cols.param2.valor)
print(col.cols.param3.valor)
else:
print("params")
print(col.cols.param)
print(col.cols.param2)
print(col.cols.param3)
# EXTRACT
elif isinstance(col.cols, SExtract):
print("Funcion Extract:")
if isinstance(col.cols.field, STipoDato):
print(col.cols.field.dato)
print(col.cols.field.tipo)
print(col.cols.field.cantidad)
print(col.cols.timestampstr)
elif isinstance(col.cols, SExtract2):
print("Funcion Extract2:")
if isinstance(col.cols.field, STipoDato):
print(col.cols.field.dato)
print(col.cols.dtype.dato)
if isinstance(col.cols.timestampstr, SExpresion):
print("param")
print(col.cols.timestampstr.valor)
# FUNCIONES DE FECHA
elif isinstance(col.cols, SSelectFunc):
print("Funcion getFecha:")
print(col.cols.id)
elif isinstance(col.cols, SFechaFunc):
print("Funcion Fecha:")
if isinstance(col.cols.param, STipoDato):
print(col.cols.param.valor)
print(col.cols.param2.valor)
else:
print(col.cols.param)
print(col.cols.param2)
elif isinstance(col.cols, SFechaFunc2):
print("Funcion Fecha2:")
print(col.cols.id)
print(col.cols.param)
print(col.cols.tipo)
print(col.cols.param2)
# CASE
elif isinstance(col.cols, SCase):
print("Funcion Case:")
if isinstance(col.cols.casos, SCaseList):
print(col.cols.casos.param)
print(col.cols.casos.param2)
print(col.cols.casos.clist)
elif isinstance(col.cols, SCaseElse):
print("Funcion CaseElse:")
if isinstance(col.cols.casos, SCaseList):
print(col.cols.casos.param)
print(col.cols.casos.param2)
print(col.cols.casos.clist)
print(col.cols.casoelse)
# OTRAS FUNCIONES
elif isinstance(col.cols, SColumnasSubstr):
print("Funcion Substr:")
print(col.cols.st)
print(col.cols.st2)
print(col.cols.st3)
elif isinstance(col, SColumnasGreatest):
print("Funcion Greatest:")
print(col.cols)
elif isinstance(col.cols, SColumnasLeast):
print("Funcion Least:")
print(col.cols)
else:
print("Otro")
print(col.cols)
# ALIAS
if col.id != False:
if isinstance(col.id, SExpresion):
print("Alias")
print(col.id.valor)
# FROM
if isinstance(Qffrom, SFrom):
print("entro al From")
for col in Qffrom.clist:
if isinstance(col, SAlias):
if col.alias == False:
print("id")
print(col.id)
else:
print("id/alias")
print(col.id)
print(col.alias)
elif isinstance(Qffrom, SFrom2):
print("entro al From2")
# Subquerie
print(Qffrom.clist)
print(Qffrom.id)
else:
print("Otro From")
# WHERE
if isinstance(Qwhere, SWhere):
print("entro al Where")
for col in Qwhere.clist:
if isinstance(col, SWhereCond1):
print("Es where1")
print(col.conds)
# print(col.conds.param.opIzq.valor)
# print(col.conds.param.operador)
# print(col.conds.param.opDer.valor)
elif isinstance(col, SWhereCond2):
print("Es where2")
print(col.conds)
print(col.isnotNull)
elif isinstance(col, SWhereCond3):
print("Es where3")
print(col.conds)
print(col.directiva)
elif isinstance(col, SWhereCond4):
print("Es where4")
print(col.conds)
print(col.ffrom)
elif isinstance(col, SWhereCond5):
print("Es where5")
print(col.c1)
print(col.c2)
print(col.c3)
elif isinstance(col, SWhereCond6):
print("Es where6")
print(col.cols)
elif isinstance(col, SWhereCond7):
print("Es where7")
print(col.efunc)
print(col.qcols)
print(col.anyallsome)
print(col.operador)
elif isinstance(col, SWhereCond8):
print("Es where8")
print(col.qcols)
print(col.efunc)
elif isinstance(col, SWhereCond9):
print("Es where9")
print(col.between)
print(col.efunc)
print(col.efunc2)
else:
print("Otro Where")
# GROUP BY
if isinstance(Qgroupby, SGroupBy):
print("entro al Group By")
for col in Qgroupby.slist:
if isinstance(col, SExpresion):
print("Agrupado por")
print(col.valor)
else:
print("Agrupado por")
print(col)
# HAVING
if isinstance(Qhaving, SHaving):
print("entro al Having")
print(Qhaving.efunc)
# ORDER BY
if isinstance(Qorderby, sOrderBy):
print("entro al Order By")
for col in Qorderby.slist:
if isinstance(col, SListOrderBy):
if col.ascdesc == False and col.firstlast == False:
print("OrderBy1")
print(col.listorder)
elif col.ascdesc == False and col.firstlast != False:
print("OrderBy2")
print(col.listorder)
print(col.firstlast)
elif col.ascdesc != False and col.firstlast == False:
print("OrderBy3")
print(col.listorder)
print(col.ascdesc)
elif col.ascdesc != False and col.firstlast != False:
print("OrderBy4")
print(col.listorder)
print(col.ascdesc)
print(col.firstlast)
# LIMIT
if isinstance(Qlimit, SLimit):
print("Entro a Limit")
if isinstance(Qlimit.limit, SExpresion):
print(Qlimit.limit.valor)
else:
print(Qlimit.limit)
if isinstance(Qlimit.offset, SExpresion):
print(Qlimit.offset.valor)
else:
print(Qlimit.offset)
else:
print("Query anidada")
for i in listaSemanticos:
print(i)
return consola
def crearBase(nodo, tablaSimbolos):
val = nodo.id.valor
global consola
if nodo.replace == False and nodo.exists == False:
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.replace != False and nodo.exists == False:
jBase.dropDatabase(val)
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.replace == False and nodo.exists != False:
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
def crearTabla(nodo, tablaSimbolos):
val = nodo.id
global consola
if nodo.herencia == False:
contador = 0
nueva = TS.SimboloTabla(val, None)
for col in nodo.columnas:
pk = False
default_ = None
check = None
null = True
unique = False
if isinstance(col, SColumna):
if col.opcionales != None:
for opc in col.opcionales:
if isinstance(opc, SOpcionales):
if opc.tipo == TipoOpcionales.PRIMARYKEY:
pk = True
elif opc.tipo == TipoOpcionales.DEFAULT:
default_ = opc.valor
elif opc.tipo == TipoOpcionales.CHECK:
if opc.id == None:
check = {"id": col.id + "_check", "condicion": opc.valor}
listaConstraint.append(
TS.Constraints(useActual, val, col.id + "_check", col.id, "check"))
else:
check = {"id": opc.id, "condicion": opc.valor}
listaConstraint.append(
TS.Constraints(useActual, val, opc.id, col.id, "check"))
elif opc.tipo == TipoOpcionales.NULL:
null = True
elif opc.tipo == TipoOpcionales.NOTNULL:
null = False
elif opc.tipo == TipoOpcionales.UNIQUE:
if opc.id == None:
unique = col.id + "_unique"
listaConstraint.append(
TS.Constraints(useActual, val, col.id + "_unique", col.id, "unique"))
else:
unique = opc.id
listaConstraint.append(
TS.Constraints(useActual, val, opc.id, col.id, "unique"))
colnueva = TS.SimboloColumna(col.id, col.tipo, pk, None, unique, default_, null, check)
nueva.crearColumna(col.id, colnueva)
if colnueva == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Ya existe una columna con el nombre " + col.id))
else:
auxc = TS.SimboloColumna(col.id, col.tipo, False, False, False, False, False, False)
nueva.crearColumna(col.id, auxc)
elif isinstance(col, SColumnaUnique):
for id in col.id:
if nueva.modificarUnique(id.valor, True, id.valor + "_unique") == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna con id " + id.valor))
else:
listaConstraint.append(TS.Constraints(useActual, val, id.valor + "_unique", id.valor, "unique"))
elif isinstance(col, SColumnaCheck):
condicion = col.condicion
opIzq = condicion.opIzq
idcol = opIzq.valor
result = False
if col.id == None:
result = nueva.modificarCheck(idcol, col.condicion, idcol + "_check")
listaConstraint.append(TS.Constraints(useActual, val, idcol + "_check", idcol, "check"))
else:
result = nueva.modificarCheck(idcol, condicion, col.id)
listaConstraint.append(TS.Constraints(useActual, val, col.id, idcol, "check"))
if result != True:
listaSemanticos.append(Error.ErrorS("Error Semantico", "No se encontró la columna con id " + idcol))
elif isinstance(col, SColumnaFk):
for i in range(len(col.idlocal)):
idlocal = col.idlocal[i].valor
idfk = col.idfk[i].valor
columnafk = tablaSimbolos.getColumna(useActual, col.id, idfk)
columnalocal = nueva.getColumna(idlocal)
if columnafk != None and columnalocal != None:
if columnafk.tipo.tipo == columnalocal.tipo.tipo:
nueva.modificarFk(idlocal, col.id, idfk)
listaFK.append(TS.llaveForanea(useActual, val, col.id, idlocal, idfk))
else:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"La columna %s y la columna %s no tienen el mismo tipo" % (
idlocal, idfk)))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna"))
elif isinstance(col, SColumnaPk):
for id in col.id:
if nueva.modificarPk(id.valor) == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna " + id.valor))
contador += 1
base = tablaSimbolos.get(useActual)
base.crearTabla(val, nueva)
tt = jBase.createTable(useActual, nodo.id, contador)
if tt == 0:
consola += "La tabla " + nodo.id + " se creó con éxito. \n"
elif tt == 1:
consola += "Error en la operación al crear la tabla " + nodo.id + "\n"
elif tt == 2:
consola += "La base de datos " + useActual + " no existe. \n"
else:
consola += "La tabla " + nodo.id + " ya existe. \n"
def AlterDatabase(nodo, tablaSimbolos):
global consola
if nodo.rename:
b = jBase.alterDatabase(nodo.id.valor, nodo.idnuevo)
if b == 0:
base = tablaSimbolos.renameBase(nodo.id.valor, nodo.idnuevo)
if base:
for fk in listaFK:
if fk.idbase == nodo.id.valor:
fk.idbase = nodo.idnuevo
for cons in listaConstraint:
if cons.idbase == nodo.id.valor:
cons.idbase = nodo.idnuevo
consola += "La base se renombró con éxito " + nodo.idnuevo + " \n"
else:
consola += "Error no se pudo renombrar la base " + nodo.id.valor + " en la tabla de simbolos \n"
elif b == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La base de datos " + nodo.id.valor + " no existe"))
elif b == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La base de datos ya existe " + nodo.idnuevo))
elif b == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
def AlterAddColumn(nodo, tablaSimbolos):
global consola
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for col in nodo.listaColumnas:
auxcol = TS.SimboloColumna(col.idcolumna, col.tipo, False, None, None, None, True, None)
if tabla.crearColumna(col.idcolumna, auxcol):
b = jBase.alterAddColumn(useActual, nodo.idtabla, col.idcolumna)
if b == 0:
consola += "La columna " + col.idcolumna + " se agregó a la tabla " + nodo.idtabla + " \n"
elif b == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
elif b == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error la base " + useActual + "no existe"))
elif b == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error la tabla " + nodo.idtabla + "no existe"))
else:
consola += "Error al crear la columna " + col.idcolumna + " \n"
def AlterRenameColumn(nodo, tablaSimbolos):
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
global consola
op = tabla.renameColumna(nodo.idcolumna, nodo.idnuevo)
if op == 0:
for fk in listaFK:
if fk.idcfk == nodo.idcolumna:
fk.idcfk = nodo.idnuevo
tablaRF = base.getTabla(fk.idtlocal)
columnaRF = tablaRF.getColumna(fk.idclocal)
columnaRF.foreign_key["columna"] = nodo.idnuevo
elif fk.idclocal == nodo.idcolumna:
fk.idclocal = nodo.idnuevo
for cons in listaConstraint:
if cons.idcol == nodo.idcolumna:
cons.idcol = nodo.idnuevo
consola += "Se cambio el nombre de la columna " + nodo.idcolumna + " a " + nodo.idnuevo + " con exito \n"
elif op == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna con nombre " + nodo.idnuevo + " ya existe"))
elif op == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna con nombre " + nodo.idactual + " no existe"))
def AlterRenameTable(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
op = base.renameTable(nodo.idactual, nodo.idnuevo)
if op == 0:
lib = jBase.alterTable(useActual, nodo.idactual, nodo.idnuevo)
if lib == 0:
for fk in listaFK:
if fk.idtfk == nodo.idactual:
fk.idtfk = nodo.idnuevo
tablaRF = base.getTabla(fk.idtlocal)
columnaRF = tablaRF.getColumna(fk.idclocal)
columnaRF.foreign_key["tabla"] = nodo.idnuevo
elif fk.idtlocal == nodo.idactual:
fk.idtlocal = nodo.idnuevo
for cons in listaConstraint:
if cons.idtabla == nodo.idactual:
cons.idtabla = nodo.idnuevo
consola += "La tabla " + nodo.idactual + " se cambio a " + nodo.idnuevo + " exitosamente \n"
elif lib == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Error en la operacion."))
elif lib == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La base de datos " + useActual + " no existe"))
elif lib == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La tabla " + nodo.idactual + " no existe"))
elif lib == 4:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La tabla " + nodo.idnuevo + " ya existe"))
elif op == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La tabla con nombre " + nodo.idnuevo + " ya existe"))
elif op == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico", "La tabla con nombre " + nodo.idactual + " no existe"))
def AlterTableCheck(nodo, tablaSimbolos):
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
condicion = nodo.expresion
opIzq = condicion.opIzq
idcol = opIzq.valor
result = False
global consola
if nodo.idcons == None:
result = tabla.modificarCheck(idcol, condicion, idcol + "_check")
listaConstraint.append(TS.Constraints(useActual, nodo.idtabla, idcol + "_check", idcol, "check"))
consola += "Se agrego el check a la columna " + idcol + " exitosamente \n"
else:
result = tabla.modificarCheck(idcol, condicion, nodo.idcons)
listaConstraint.append(TS.Constraints(useActual, nodo.idtabla, nodo.idcons, idcol, "check"))
consola += "Se agrego el check a la columna " + idcol + " exitosamente \n"
if result != True:
listaSemanticos.append(Error.ErrorS("Error Semantico", "No se encontró la columna con id " + idcol))
def AlterTableUnique(nodo, tablaSimbolos):
global consola
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
if tabla.modificarUnique(nodo.idcolumna, True, nodo.idconstraint):
listaConstraint.append(TS.Constraints(useActual, nodo.idtabla, nodo.idconstraint, nodo.idcolumna, "unique"))
consola += "Se agrego el unique a la columna " + nodo.idcolumna + " exitosamente \n"
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna con id " + nodo.idcolumna))
def AlterTableFK(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for i in range(len(nodo.idlocal)):
idlocal = nodo.idlocal[i].valor
idfk = nodo.idfk[i].valor
columnafk = tablaSimbolos.getColumna(useActual, nodo.idtablafk, idfk)
columnalocal = tabla.getColumna(idlocal)
if columnafk != None and columnalocal != None:
if columnafk.tipo.tipo == columnalocal.tipo.tipo:
tabla.modificarFk(idlocal, nodo.idtablafk, idfk)
listaFK.append(TS.llaveForanea(useActual, nodo.idtabla, nodo.idtablafk, idlocal, idfk))
consola += "Se agrego la llave foranea a " + idlocal + " exitosamente \n"
else:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"La columna %s y la columna %s no tienen el mismo tipo" % (
idlocal, idfk)))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna"))
```
#### File: team14/Expresion/Terminal.py
```python
from Expresion.Expresion import Expresion
from datetime import date
from datetime import datetime
from Entorno import Entorno
import random as rn
import math
class Terminal(Expresion) :
'''
Esta clase representa un terminal.
'''
def __init__(self,tipo,valor) :
Expresion.__init__(self)
self.tipo=tipo
self.valor=valor
def getval(self,entorno):
if self.tipo=='identificador':
'buscar columna'
if self.valor == 'CURRENT_DATE':
return date.today()
elif self.valor== 'CURRENT_TIME' or (self.valor=='now' and self.tipo=='timestamp without time zone'):
return datetime.now()
elif(self.valor=='random'):
value = rn.randint(0,1)
return value
elif (self.valor=="pi"):
return math.pi
return self.valor
```
#### File: BDTytus/TypeCheck/ListaAtributos.py
```python
import TypeCheck.Atributo as Atributo
class ListaAtributos:
def __init__(self):
self.columnNumber = 0
self.primero=None
self.ultimo=None
def agregarAtributo(self,nuevo:Atributo):
self.columnNumber = self.columnNumber + 1
nuevo.columnNumber = self.columnNumber
if self.primero is None:
self.primero=nuevo
self.ultimo=nuevo
else:
self.ultimo.siguiente=nuevo
nuevo.anterior=self.ultimo
self.ultimo=nuevo
def existeAtributo(self,columna:str):
existe = False
actual = self.primero
while(actual!=None):
if actual.nombre == columna:
existe = True
break
actual = actual.siguiente
return existe
def obtenerTipoAtributo(self,columna:str):
actual = self.primero
while(actual!=None):
if actual.nombre == columna:
return actual.tipo
actual = actual.siguiente
return None
def alterAddPK(self,columns:list):
#0:Encontro todas las columnas, 5:columnas fuera de limites
for columna in columns:
encontrado = False
actual = self.primero
while(actual!=None):
if(actual.nombre==columna):
actual.isPrimary = True
encontrado = True
break
actual = actual.siguiente
if encontrado == False:
return 5
return 0
def alterDropPK(self):
actual = self.primero
while(actual!=None):
actual.isPrimary = False
actual = actual.siguiente
def eliminiarNAtributo(self,niteracion:int):
contador = 1
actual = self.primero
atras = None
while(contador<=niteracion and actual!=None):
if contador == niteracion:
if actual == self.primero:
self.primero = self.primero.siguiente
self.primero.anterior = None
else:
atras.siguiente = actual.siguiente
actual.siguiente.anterior = actual.anterior
contador += 1
atras = actual
actual = actual.siguiente
```
#### File: BDTytus/TypeCheck/Type_Checker.py
```python
import TypeCheck.ListaBases as ListaBases
import TypeCheck.Base as Base
import TypeCheck.ListaTablas as ListaTablas
import TypeCheck.Tabla as Tabla
import TypeCheck.ListaAtributos as ListaAtributos
import TypeCheck.Atributo as Atributo
import TypeCheck.ListaEnums as ListaEnums
import TypeCheck.Enum as Enum
import TypeCheck.ListaConstraints as ListaConstraints
import TypeCheck.Constraint as Constraint
import data.jsonMode as JM
lista_bases = ListaBases.ListaBases()
lista_enums = ListaEnums.ListaEnums()
# Clase TyoeChecker del proyecto que representa la comprobación de tipos
def createDataBase(basedatos: str, modo: int = 1, owner=None):
# 0: exitoso, 1: error en la operación, 2: base de datos existente
respuesta = JM.createDatabase(basedatos)
if respuesta != 0:
return respuesta
if lista_bases.existeBaseDatos(basedatos):
return 2
else:
lista_bases.agregarBase(Base.Base(basedatos, owner, modo))
return 0
def showDataBases():
return JM.showDatabases()
def alterDataBase(dataBaseOld: str, dataBaseNew: str):
# 0: exitoso, 1: error en la operación, 2: dataBaseOld no existente, 3: dataBaseNew existente
respuesta = JM.alterDatabase(dataBaseOld,dataBaseNew)
if respuesta == 0:
return lista_bases.modificarNombreBase(dataBaseOld, dataBaseNew)
return respuesta
def alterDataBaseOwner(database:str,owner:str):
# 0: exitoso, 1: error en la operación, 2: dataBase no existente
return lista_bases.modificarOwnerBase(database,owner)
def dropDataBase(database: str):
# 0:operación exitosa, 1: error en la operación, 2: base de datos no existente
respuesta = JM.dropDatabase(database)
if respuesta == 0:
return lista_bases.eliminarBaseDatos(database)
return respuesta
def obtenerBase(database: str):
actual = lista_bases.primero
while(actual != None):
if actual.nombreBase == database:
break
actual = actual.siguiente
return actual
def createTable(database: str, table: str, numberColumns: int):
# 0:operación exitosa, 1: error en la operación, 2: base de datos inexistente, 3: tabla existente
respuesta = JM.createTable(database,table,numberColumns)
#si correcto entonces guardo en typechecker
if respuesta == 0:
actual = obtenerBase(database)
if(actual != None):
actual.listaTablas.agregarTabla(Tabla.Tabla(table))
return respuesta
def showTables(database:str):
respuesta = JM.showTables(database)
return respuesta
def createColumn(database:str,table:str,nombre:str,tipo:str):
# 0:operación exitosa, 1: error en la operación, 2: base de datos inexistente, 3: tabla inexistente, 4: columna ya existente
actualBase = obtenerBase(database)
if(actualBase!=None):
# Verificamos si la tabla existe
if not actualBase.listaTablas.existeTabla(table):
return 3
else:
actualTabla = actualBase.listaTablas.obtenerTabla(table)
if actualTabla.listaAtributos.existeAtributo(nombre):
return 4
else:
actualTabla.listaAtributos.agregarAtributo(Atributo.Atributo(nombre,tipo))
return 0
else:
return 2
def createAtributo(database:str,table:str,nombreCol:str,nuevo:Atributo):
# 0:operación exitosa, 1: error en la operación, 2: base de datos inexistente, 3: tabla inexistente, 4: columna ya existente
actualBase = obtenerBase(database)
if(actualBase!=None):
# Verificamos si la tabla existe
if not actualBase.listaTablas.existeTabla(table):
return 3
else:
actualTabla = actualBase.listaTablas.obtenerTabla(table)
if actualTabla.listaAtributos.existeAtributo(nombreCol):
return 4
else:
actualTabla.listaAtributos.agregarAtributo(nuevo)
return 0
else:
return 2
def obtenerTipoColumna(database:str,table:str,nombreColumna:str):
# Retorna el tipo de la columna, sino retorna None
actualBase = obtenerBase(database)
if(actualBase!=None):
# Verificamos si la tabla existe
if not actualBase.listaTablas.existeTabla(table):
return None
else:
actualTabla = actualBase.listaTablas.obtenerTabla(table)
if actualTabla.listaAtributos.existeAtributo(nombreColumna):
return actualTabla.listaAtributos.obtenerTipoAtributo(nombreColumna)
return None
else:
return None
def registarEnum(nombre:str,tipos:list):
#o:operacion existosa, 1:Enum ya existe
if not lista_enums.existeEnum(nombre):
lista_enums.createEnum(nombre,tipos)
return 0
return 1
def obtenerTiposEnum(nombre:str):
# Devuelve los tipos o None
if lista_enums.existeEnum(nombre):
return lista_enums.obtenerTipos(nombre)
return None
def createConstraint(database:str, table:str, nuevo:Constraint):
# 0:operación exitosa, 1: error en la operación
actualBase = obtenerBase(database)
if(actualBase!=None):
# Verificamos si la tabla existe
if actualBase.listaTablas.existeTabla(table):
actualTabla = actualBase.listaTablas.obtenerTabla(table)
actualTabla.listaConstraints.agregarConstraint(nuevo)
return 0
return 1
def alterAddPK(database:str,table:str,columns:list):
#0:operación exitosa, 1:error en la operación, 2:database no existente, 3:table no existente, 4:llave primaria existente, 5:columnas fuera de límites
respuesta = JM.alterAddPK(database,table,columns)
if respuesta == 0:
baseActual = obtenerBase(database)
if(baseActual!=None):
actualTabla = baseActual.listaTablas.obtenerTabla(table)
if(actualTabla!=None):
actualTabla.listaAtributos.alterAddPK(columns)
return respuesta
def alterDropPk(database:str,table:str):
# 0:operación exitosa, 1:error en la operación, 2:database no existente, 3:table no existente, 4:pk no existente
respuesta = JM.alterDropPK(database,table)
if respuesta == 0:
baseActual = obtenerBase(database)
if (baseActual != None):
actualTabla = baseActual.listaTablas.obtenerTabla(table)
if (actualTabla != None):
actualTabla.listaAtributos.alterDropPK()
return respuesta
def alterTable(database: str, tableOld: str, tableNew: str) -> int:
#0 operación exitosa, 1 error en la operación, 2 database no existente, 3 tableOld no existente, 4 tableNew existente.
respuesta = JM.alterTable(database,tableOld,tableNew)
if respuesta == 0:
baseActual = obtenerBase(database)
if (baseActual != None):
actualTabla = baseActual.listaTablas.obtenerTabla(tableOld)
if(actualTabla!=None):
actualTabla.nombreTabla = tableNew
return respuesta
def alterAddColumn(database: str, table: str, default: any)-> int:
#0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente.
respuesta = JM.alterAddColumn(database,table,default)
if respuesta == 0:
baseActual = obtenerBase(database)
if (baseActual != None):
actualTabla = baseActual.listaTablas.obtenerTabla(table)
if (actualTabla != None):
actualTabla.listaAtributos.agregarAtributo(Atributo.Atributo.iniciar_Solo_Default(default))
return respuesta
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
#0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 llave no puede eliminarse o tabla
respuesta = JM.alterDropColumn(database,table,columnNumber)
if respuesta == 0:
baseActual = obtenerBase(database)
if (baseActual != None):
actualTabla = baseActual.listaTablas.obtenerTabla(table)
if (actualTabla != None):
actualTabla.listaAtributos.eliminiarNAtributo(columnNumber)
return respuesta
```
#### File: execution/AST/sentence.py
```python
class Sentence:
''' '''
class CreateDatabase(Sentence):
def __init__(self, name, ifNotExistsFlag, OrReplace, OwnerMode):
self.name = name
self.ifNotExistsFlag = ifNotExistsFlag
self.OrReplace = OrReplace
self.OwnerMode = OwnerMode
def graphAST(self, dot, padre):
dot += padre + '->' + str(hash(self)) + '\n'
dot += str(hash(self)) + '[label=\"CreateDatabase\"]\n'
dot += str(hash(self)) + '->' + \
str(hash("CREATE") + hash(self)) + '\n'
dot += str(hash("CREATE") + hash(self)) + \
'[label=\"' + "CREATE" + '\"]\n'
if(self.OrReplace):
dot += str(hash(self)) + '->' + \
str(hash("OR") + hash(self)) + '\n'
dot += str(hash("OR") + hash(self)) + \
'[label=\"' + "OR" + '\"]\n'
dot += str(hash(self)) + '->' + \
str(hash("REPLACE") + hash(self)) + '\n'
dot += str(hash("REPLACE") + hash(self)) + \
'[label=\"' + "REPLACE" + '\"]\n'
dot += str(hash(self)) + '->' + \
str(hash("DATABASE") + hash(self)) + '\n'
dot += str(hash("DATABASE") + hash(self)) + \
'[label=\"' + "DATABASE" + '\"]\n'
if(self.ifNotExistsFlag):
dot += str(hash(self)) + '->' + \
str(hash("IF") + hash(self)) + '\n'
dot += str(hash("IF") + hash(self)) + \
'[label=\"' + "IF" + '\"]\n'
dot += str(hash(self)) + '->' + \
str(hash("NOT") + hash(self)) + '\n'
dot += str(hash("NOT") + hash(self)) + \
'[label=\"' + "NOT" + '\"]\n'
dot += str(hash(self)) + '->' + \
str(hash("EXISTS") + hash(self)) + '\n'
dot += str(hash("EXISTS") + hash(self)) + \
'[label=\"' + "EXISTS" + '\"]\n'
dot += str(hash(self)) + '->' + \
str(hash(self.name) + hash(self)) + '\n'
dot += str(hash(self.name) + hash(self)) + \
'[label=\"' + self.name + '\"]\n'
if(self.OwnerMode[0] != None and self.OwnerMode[0] != None):
dot += str(hash(self)) + '->' + \
str(hash("ownerMode") + hash(self)) + '\n'
dot += str(hash("ownerMode") + hash(self)) + \
'[label=\"' + "ownerMode" + '\"]\n'
if(self.OwnerMode[0] != None):
dot += str(hash("ownerMode") + hash(self)) + '->' + \
str(hash("OWNER") + hash("ownerMode") + hash(self)) + '\n'
dot += str(hash("OWNER") + hash("ownerMode") + hash(self)) + \
'[label=\"' + "OWNER" + '\"]\n'
dot += str(hash("ownerMode") + hash(self)) + '->' + \
str(hash(self.OwnerMode[0]) + hash("ownerMode") + hash(self)) + '\n'
dot += str(hash(self.OwnerMode[0]) + hash("ownerMode") + hash(self)) + \
'[label=\"' + self.OwnerMode[0] + '\"]\n'
if(self.OwnerMode[1] != None):
dot += str(hash("ownerMode") + hash(self)) + '->' + \
str(hash("MODE") + hash("ownerMode") + hash(self)) + '\n'
dot += str(hash("MODE") + hash("ownerMode") + hash(self)) + \
'[label=\"' + "MODE" + '\"]\n'
dot += str(hash("ownerMode") + hash(self)) + '->' + \
str(hash("Expression") + hash("ownerMode") + hash(self)) + '\n'
dot += str(hash("Expression") + hash("ownerMode") + hash(self)) + \
'[label=\"' + "Expression" + '\"]\n'
#selfOwnerMode[1].graphAST(self,'',hash("Expression") + hash("ownerMode") + hash(self))
return dot
class ShowDatabases(Sentence):
''''''
def graphAST(self, dot, padre):
return ""
class DropDatabase(Sentence):
def __init__(self, name, ifExistsFlag):
self.name = name
self.ifExistsFlag = ifExistsFlag
def graphAST(self, dot, padre):
return ""
class DropTable(Sentence):
def __init__(self, name):
self.name = name
def graphAST(self, dot, padre):
return ""
class Use(Sentence):
def __init__(self, name):
self.name = name
def graphAST(self, dot, padre):
return ""
class AlterDatabaseRename(Sentence):
def __init__(self, oldname,newname):
self.oldname = oldname
self.newname = newname
def graphAST(self, dot, padre):
return ""
class AlterDatabaseOwner(Sentence):
def __init__(self, name, newowner):
self.name = name
self.newowner = newowner
def graphAST(self, dot, padre):
return ""
class AlterTableDropColumn(Sentence):
def __init__(self, table, column):
self.table = table
self.column = column
def graphAST(self, dot, padre):
return ""
class AlterTableAddConstraintUnique(Sentence):
def __init__(self, table, constraint, column):
self.table = table
self.constraint = constraint
self.column = column
def graphAST(self, dot, padre):
return ""
class AlterTableAddForeignKey(Sentence):
def __init__(self, table, column, rel_table, rel_column):
self.table = table
self.column = column
self.rel_table = rel_table
self.rel_column = rel_column
def graphAST(self, dot, padre):
return ""
class AlterTableAlterColumnSetNull(Sentence):
def __init__(self, table, column, null):
self.table = table
self.column = column
self.null = null
def graphAST(self, dot, padre):
return ""
class AlterTableAlterColumnType(Sentence):
def __init__(self, table, column, newtype):
self.table = table
self.column = column
self.newtype = newtype # type [type,length] or type = [type]
def graphAST(self, dot, padre):
return ""
class AlterTableAddColumn(Sentence):
def __init__(self, table, column, newtype):
self.table = table
self.column = column
self.type = type # type [type,length] or type = [type]
def graphAST(self, dot, padre):
return ""
class AlterTableDropConstraint(Sentence):
def __init__(self, table, constraint):
self.table = table
self.constraint = constraint
def graphAST(self, dot, padre):
return ""
class Insert(Sentence):
def __init__(self, table, columns, values):
self.table = table
self.columns = columns
self.values = values
def graphAST(self, dot, padre):
return ""
class InsertAll(Sentence):
def __init__(self, table, values):
self.table = table
self.values = values
def graphAST(self, dot, padre):
return ""
class Delete(Sentence):
def __init__(self, table, expression):
self.table = table
self.expression = expression
def graphAST(self, dot, padre):
return ""
class Truncate(Sentence):
def __init__(self, tables):
self.tables = tables
def graphAST(self, dot, padre):
return ""
class Update(Sentence):
def __init__(self, table, values, expression):
self.table = table
self.values = values #values = [value1,value2,...,valuen] -> value = [id,expression]
self.expression = expression
def graphAST(self, dot, padre):
return ""
class CreateType(Sentence):
def __init__(self, name, expressions):
self.name = name
self.expressions = expressions #expressions = [expression1,expression2,...,expressionn]
def graphAST(self, dot, padre):
return ""
class CreateTable(Sentence):
def __init__(self, name, columns, inherits):
self.name = name
self.columns = columns #columns = [column1,column2,...,columnn] Every Column is an instance of {'id','check','constraint','unique','primary','foreign'}
self.inherits = inherits
#Types:
#column -> {ColumnId,ColumnCheck,ColumnConstraint,ColumnUnique,ColumnPrimaryKey,ColumnForeignKey}
def graphAST(self, dot, padre):
return ""
class Select(Sentence):
def __init__(self, columns, distinct, tables, options):
self.columns = columns
self.distinct = distinct
self.tables = tables
self.options = options # options = {'where','orderby','limit','offset','groupby','having'} or None
# options se puede acceder a los items de la forma options['nombrepropiedad'] si no existe devuelve 'nombrepropiedad'
# where -> Expression
# orderby -> SortExpressionList
# sortExpressionList -> lista de expresiones de la forma [Expression,ASC/DESC]
# limit -> Expression/ALL ALL is the same as omitting the LIMIT clause
# offset -> Expression OFFSET says to skip that many rows before beginning to return rows. OFFSET 0 is the same as omitting the OFFSET clause.
# If both OFFSET and LIMIT appear, then OFFSET rows are skipped before starting to count the LIMIT rows that are returned.
# groupby -> ExpressionList
# having -> Expression
def graphAST(self, dot, padre):
return ""
class SelectMultiple(Sentence):
def __init__(self, select1, operator, select2):
self.select1 = select1
self.operator = operator
self.select2 = select2
def graphAST(self, dot, padre):
return ""
class CreateTableOpt:
''' '''
class ColumnId(CreateTableOpt):
def __init__(self, name, typo, options):
self.name = name
self.type = typo
self.options = options #options = {'default','null','primary','reference','unique','constraint','check'}
# options se puede acceder a los items de la forma options['nombrepropiedad'] si no existe devuelve 'nombrepropiedad'
# default -> Expression
# null -> True/False
# primary -> True
# reference -> ID
# unique -> True
# constraintunique -> ID
# check -> Expression
# constraintcheck -> ID,Expression
def graphAST(self, dot, padre):
return ""
class ColumnCheck(CreateTableOpt):
def __init__(self, expression):
self.expression = expression
def graphAST(self, dot, padre):
return ""
class ColumnConstraint(CreateTableOpt):
def __init__(self, name,expression):
self.name = name
self.expression = expression
def graphAST(self, dot, padre):
return ""
class ColumnUnique(CreateTableOpt):
def __init__(self, columnslist):
self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen]
def graphAST(self, dot, padre):
return ""
class ColumnPrimaryKey(CreateTableOpt):
def __init__(self, columnslist):
self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen]
def graphAST(self, dot, padre):
return ""
class ColumnForeignKey(CreateTableOpt):
def __init__(self, columnslist, columnslist_ref):
self.columnslist = columnslist # is and idList [columnname1,columnname2,...,columnnamen]
self.columnslist_ref = columnslist_ref # is and idList [refcolumnname1,refcolumnname2,...,refcolumnname
def graphAST(self, dot, padre):
return ""
```
#### File: parser/team22/type_checker.py
```python
import re
import os
import json
from ts import Simbolo
from storageManager import jsonMode as jsonMode
from tabla_errores import *
from columna import *
class TypeChecker():
'Esta clase representa el type checker para la comprobación de tipos'
def __init__(self, tabla_simbolos, tabla_errores, consola, salida):
self.type_checker = {}
self.actual_database = ''
self.tabla_simbolos = tabla_simbolos
self.tabla_errores = tabla_errores
self.consola = consola
self.salida = salida
jsonMode.dropAll()
self.initCheck()
def createDatabase(self, database: str, line: int, mode: int = 1):
# 0 -> operación exitosa,
# 1 -> error en la operación,
# 2 -> base de datos existente
query_result = jsonMode.createDatabase(database)
if query_result == 0:
self.type_checker[database] = {}
self.tabla_simbolos.agregar(Simbolo(database, 'DATABASE', '', line))
self.consola.append(Codigos().database_successful_completion(database))
self.saveTypeChecker()
elif query_result == 1:
self.addError(Codigos().database_internal_error(database), line)
else:
self.addError(Codigos().database_duplicate_database(database), line)
def showDatabase(self, like: str = ''):
query_result = jsonMode.showDatabases()
if like == '':
self.salida.append(query_result)
else:
pattern = '^' + like.replace('%','.+').replace('_','(.){0,1}') + '$'
filtrada = []
for base in query_result:
if re.match(pattern, base):
filtrada.append(base)
self.salida.append(filtrada)
self.consola.append(Codigos().successful_completion('SHOW DATABASE'))
def alterDatabase(self, databaseOld: str, databaseNew: str, line: int):
# 0 -> operación exitosa
# 1 -> error en la operación
# 2 -> databaseOld no existente
# 3 -> databaseNew existente
query_result = jsonMode.alterDatabase(databaseOld, databaseNew)
if query_result == 0:
self.consola.append(Codigos().successful_completion('ALTER DATABASE'))
self.type_checker[databaseNew] = self.type_checker.pop(databaseOld)
self.tabla_simbolos.simbolos[databaseNew] = self.tabla_simbolos.simbolos.pop(databaseOld)
self.saveTypeChecker()
elif query_result == 1:
self.addError(Codigos().database_internal_error(databaseOld), line)
elif query_result == 2:
self.addError(Codigos().database_undefined_object(databaseOld), line)
else:
self.addError(Codigos().database_duplicate_database(databaseNew), line)
def dropDatabase(self, database: str, line: int):
# 0 -> operación exitosa
# 1 -> error en la operación
# 2 -> base de datos no existente
query_result = jsonMode.dropDatabase(database)
if query_result == 0:
self.consola.append(Codigos().successful_completion('DROP DATABASE «' + database + '»'))
self.type_checker.pop(database)
self.tabla_simbolos.simbolos.pop(database)
if self.actual_database == database:
self.actual_database = ''
self.saveTypeChecker()
elif query_result == 1:
self.addError(Codigos().database_internal_error(database), line)
else:
self.addError(Codigos().database_undefined_object(database), line)
def useDatabase(self, database: str, line: int):
if database in self.type_checker:
self.actual_database = database
self.consola.append(Codigos().successful_completion('USE DATABASE'))
else:
self.addError(Codigos().database_undefined_object(database), line)
def createTable(self, table: str, columns: [], line: int):
# 0 -> operación exitosa
# 1 -> error en la operación
# 2 -> base de datos inexistente
# 3 -> tabla existente
query_result = jsonMode.createTable(self.actual_database, table, len(columns))
if query_result == 0:
self.consola.append(Codigos().table_successful(table))
self.type_checker[self.actual_database][table] = {}
for columna in columns:
self.type_checker[self.actual_database][table][columna['nombre']] = columna['col']
self.saveTypeChecker()
elif query_result == 1:
self.addError(Codigos().database_internal_error(table), line)
elif query_result == 2:
self.addError(Codigos().database_undefined_object(self.actual_database), line)
else:
self.addError(Codigos().table_duplicate_table(table), line)
def initCheck(self):
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists('data/json'):
os.makedirs('data/json')
if not os.path.exists('data/json/type_check'):
data = {}
with open('data/json/type_check', 'w') as file:
json.dump(data, file)
else:
with open('data/json/type_check') as file:
data = json.load(file)
for database in data:
for tabla in data[database]:
for columna in data[database][tabla]:
data[database][tabla][columna] = Columna(
tipo = data[database][tabla][columna]['tipo'],
default = data[database][tabla][columna]['default'],
is_null = TipoNull[data[database][tabla][columna]['is_null']],
is_primary = data[database][tabla][columna]['is_primary'],
references = data[database][tabla][columna]['references'],
is_unique = data[database][tabla][columna]['is_unique'],
constraints = data[database][tabla][columna]['constraints']
)
print('!!!!!!!!!init\n')
data['MODELA']['Tobleta']['columna1'].printCol()
def saveTypeChecker(self):
with open('data/json/type_check', 'w') as file:
data = self.type_checker
for database in data:
for tabla in data[database]:
for columna in data[database][tabla]:
data[database][tabla][columna] = data[database][tabla][columna].json()
json.dump(data, file)
def addError(self, error, line):
self.consola.append(error)
self.tabla_errores.agregar(Error('Semántico', error, line))
```
#### File: team23/instruccion/select_normal.py
```python
from abstract.instruccion import *
from tools.tabla_tipos import *
from tools.console_text import *
class select_normal(instruccion):
def __init__(self,distinto,listaO,expresiones,fin, line, column, num_nodo):
super().__init__(line,column)
self.distinto=distinto
self.fin=fin
self.listaO=listaO
self.nodo = nodo_AST('SELECT',num_nodo)
self.nodo.hijos.append(nodo_AST('SELECT',num_nodo+1))
self.expresiones=expresiones
if (distinto!=None):
self.nodo.hijos.append(nodo_AST(distinto,num_nodo+2))
if (listaO=='*'):
self.nodo.hijos.append(nodo_AST(listaO,num_nodo+3))
else:
if listaO != None:
for element3 in listaO:
if element3 != None:
self.nodo.hijos.append(element3.nodo)
self.nodo.hijos.append(nodo_AST('FROM',num_nodo+4))
print('Si jala select')
if expresiones != None:
for element2 in expresiones:
if element2 != None:
self.nodo.hijos.append(element2.nodo)
if fin != None:
for element in fin:
if element != None:
self.nodo.hijos.append(element.nodo)
def ejecutar(self):
pass
```
#### File: parser/team24/InstruccionesDGA.py
```python
import jsonMode as func
import tablaDGA as TS
#VARIABLES GLOBALES
resultadotxt = ""
tabla = TS.Tabla()
cont = 0
contambito = 0
NombreDB = ""
def Textoresultado():
global tabla
global resultadotxt
print(resultadotxt)
for simbolo in tabla.simbolos:
print("ID: " + str(tabla.simbolos[simbolo].id) + " Nombre: " + tabla.simbolos[simbolo].nombre + " Ambito: " + str(tabla.simbolos[simbolo].ambito))
print("\n")
resultadotxt = ""
class instruccion:
"""INSTRUCCION"""
"""RODUCCIONES GENERALES"""
class cond(instruccion):
def __init__(self,iden, signo,tipo):
self.iden = iden
self.signo = signo
self.tipo = tipo
class wherecond(instruccion):
def __init__(self,iden, tipo, tipo2):
self.iden = iden
self.tipo = tipo
self.tipo2 = tipo2
class wherecond1(instruccion):
def __init__(self,iden, tipo):
self.iden = iden
self.tipo = tipo
"""MANIPULACION DE BASES DE DATOS"""
#CREATEDB----------------------------
class createdb(instruccion):
def __init__(self,replacedb,ifnotexists,iden,owner,mode):
self.replacedb = replacedb
self.ifnotexists = ifnotexists
self.iden = iden
self.owner = owner
self.mode = mode
def ejecutar(self):
global resultadotxt
global cont
global tabla
global contambito
try:
resultado = func.createDatabase(self.iden)
if resultado == 0:
resultadotxt += "Se creo la base de datos " + self.iden + "\n"
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.DATABASE,contambito)
cont+=1
contambito += 1
tabla.agregar(NuevoSimbolo)
elif resultado == 2 and not self.replacedb:
resultadotxt += "Ya existe la base de datos " + self.iden + "\n"
elif resultado == 2 and self.replacedb:
func.dropDatabase(self.iden)
buscar = tabla.BuscarNombre(self.iden)
tabla.simbolos.pop(buscar.id)
func.createDatabase(self.iden)
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.DATABASE,contambito)
cont+=1
contambito+=1
tabla.agregar(NuevoSimbolo)
resultadotxt += "Se reemplazo la base de datos: " + self.iden + "\n"
else:
resultadotxt += "Error al crear base de datos: " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
#SHOWDB----------------------------------
class showdb(instruccion):
def __init__(self,nombre):
self.nombre = nombre
def ejecutar(self):
global resultadotxt
global cont
global tabla
contador = 0
try:
resultado = func.showDatabases()
if len(resultado) > 0:
resultadotxt += "\nBases de datos existentes:\n"
for base in resultado:
resultadotxt += str(contador) + ". " + base + "\n"
contador += 1
else:
resultadotxt += "No existen bases de datos"
except:
"""ERROR SEMANTICO"""
#ALTERDB------------------------------------
class alterdb(instruccion):
def __init__(self,alterdb2):
self.alterdb2 = alterdb2
def ejecutar(self):
global resultadotxt
global cont
global tabla
try:
if self.alterdb2.iden != "" and self.alterdb2.alterdb3.iden != "":
resultado = func.alterDatabase(self.alterdb2.iden, self.alterdb2.alterdb3.iden)
if resultado == 2:
resultadotxt += "No existe la base de datos " + self.alterdb2.iden + "\n"
if resultado == 3:
resultadotxt += "Ya existe la base de datos " + self.alterdb2.alterdb3.iden + "\n"
else:
buscar = tabla.BuscarNombre(self.alterdb2.iden)
buscar.nombre = self.alterdb2.alterdb3.iden
tabla.actualizar(buscar)
resultadotxt += "Se actualizo la base de datos " + self.alterdb2.iden + " a " + self.alterdb2.alterdb3.iden + "\n"
except:
"""ERROR SEMANTICO"""
class alterdb2(instruccion):
def __init__(self,iden, alterdb3):
self.iden = iden
self.alterdb3 = alterdb3
class alterdb21(instruccion):
def __init__(self,iden):
self.iden = iden
class alterdb3(instruccion):
def __init__(self,iden):
self.iden = iden
class alterdb31(instruccion):
def __init__(self,iden, iden2, iden3):
self.iden = iden
self.iden2 = iden2
self.iden3 = iden3
#DROPDB--------------------------------------
class dropdb(instruccion):
def __init__(self,ifexists, iden):
self.ifexists = ifexists
self.iden =iden
def ejecutar(self):
global resultadotxt
global cont
global tabla
try:
resultado = func.dropDatabase(self.iden)
if(resultado == 2):
resultadotxt += "No existe la base de datos " + self.iden + "\n"
else:
BaseDatos = tabla.BuscarNombre(self.iden)
eliminar = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == BaseDatos.id and not tabla.simbolos[simbolo].tipo == TS.TIPO.DATABASE:
TablaExistente = tabla.simbolos[simbolo]
eliminar.append(TablaExistente)
for simbolo2 in tabla.simbolos:
if tabla.simbolos[simbolo2].ambito == TablaExistente.id and not tabla.simbolos[simbolo2].tipo == TS.TIPO.DATABASE and not tabla.simbolos[simbolo2].tipo == TS.TIPO.TABLE:
eliminar.append(tabla.simbolos[simbolo2])
for element in eliminar:
tabla.simbolos.pop(element.id)
tabla.simbolos.pop(BaseDatos.id)
resultadotxt += "Se elimino la base de datos " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
#USEDB----------------------------------------
class usedb(instruccion):
def __init__(self, iden):
self.iden =iden
def ejecutar(self):
global resultadotxt
global NombreDB
NombreDB = self.iden
resultadotxt += "Usando la base de datos " + self.iden + "\n"
#MANIPULACION DE TABLAS
#CREATE TABLE---------------------------------------
class createtb(instruccion):
def __init__(self,iden, coltb, inherits):
self.iden = iden
self.coltb = coltb
self.inherits = inherits
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
try:
resultado = func.createTable(NombreDB, self.iden,0)
if(resultado == 2):
resultadotxt += "No existe la base de datos: " + NombreDB + "\n"
elif(resultado == 3):
resultadotxt += "La tabla ya existe: " + self.iden + "\n"
else:
buscar = tabla.BuscarNombre(NombreDB)
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.TABLE,buscar.id,0)
cont+=1
tabla.agregar(NuevoSimbolo)
"""SE CREAN LAS COLUMNAS PARA LA TABLA"""
inicio = 0
for columna in self.coltb:
try:
if "primary key " in columna.key.lower():
NuevaColumna = TS.Simbolo(cont,columna.iden,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipo,1,columna.references,columna.default,False,columna.constraint,inicio)
listacol = []
listacol.append(NuevaColumna.numcol)
print(max(listacol))
print(min(listacol))
resultado = func.alterAddPK(NombreDB,NuevoSimbolo.nombre,listacol)
resultado2 = func.alterAddColumn(NombreDB,self.iden,columna)
else:
NuevaColumna = TS.Simbolo(cont,columna.iden,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipo,0,columna.references,columna.default,False,columna.constraint,inicio)
resultado = func.alterAddColumn(NombreDB,self.iden,columna)
if resultado == 2:
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
resultadotxt += "No existe la tabla " + self.iden + "\n"
elif resultado == 4:
resultadotxt += "Ya existe una llave primaria en " + self.iden + "\n"
else:
if columna.notnull.lower() == "not null":
NuevaColumna.nullcol = True
else:
NuevaColumna.nullcol = False
cont+=1
inicio+=1
NuevoSimbolo.coltab+=1
tabla.actualizar(NuevoSimbolo)
tabla.agregar(NuevaColumna)
resultadotxt += "Se agrego la columna " + columna.iden + " a la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
resultadotxt += "Se creo la tabla: " + self.iden + " En la base de datos: " + NombreDB + "\n"
except:
"""ERROR SEMANTICO"""
class columna(instruccion):
def __init__(self,iden, tipo, notnull, key, references, default, constraint):
self.iden = iden
self.tipo = tipo
self.notnull = notnull
self.key = key
self.references = references
self.default = default
self.constraint = constraint
#DROP TABLE--------------------------------------
class droptb(instruccion):
def __init__(self,iden):
self.iden = iden
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
try:
resultado = func.dropTable(NombreDB, self.iden)
if(resultado == 2):
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif(resultado == 3):
resultadotxt += "La tabla " + self.iden + " no existe en " + NombreDB + "\n"
else:
buscar = tabla.BuscarNombre(self.iden)
eliminar = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == buscar.id:
eliminar.append(tabla.simbolos[simbolo])
for element in eliminar:
tabla.simbolos.pop(element.id)
tabla.simbolos.pop(buscar.id)
resultadotxt += "Se elimino la tabla: " + self.iden + " de la base de datos: " + NombreDB + "\n"
except:
"""ERROR SEMANTICO"""
#ALTER TABLE-------------------------------------
class altertb(instruccion):
def __init__(self,iden, altertb2):
self.iden = iden
self.altertb2 = altertb2
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
if self.altertb2.text.lower() == "add column":
try:
resultado = func.alterAddColumn(NombreDB,self.iden,self.altertb2.iden)
if resultado == 2:
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
resultadotxt += "No existe la tabla " + self.iden + "\n"
else:
buscar = tabla.BuscarNombre(self.iden)
columna = self.altertb2
buscar.coltab+=1
tabla.actualizar(buscar)
NuevaColumna = TS.Simbolo(cont,columna.iden,TS.TIPO.COLUMN,buscar.id,0,columna.tipo,0,"","",False,"",(buscar.coltab-1))
cont+=1
tabla.agregar(NuevaColumna)
resultadotxt += "Se agrego la columna " + self.altertb2.iden + " a la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
elif self.altertb2.text.lower() == "drop column":
try:
delcolumna = tabla.BuscarNombre(self.altertb2.iden)
resultado = func.alterDropColumn(NombreDB,self.iden,delcolumna.numcol)
if resultado == 2:
resultadotxt += "La base de datos " + NombreDB + " No existe \n"
elif resultado == 3:
resultadotxt += "No se encontro la tabla " + self.iden + " en la base de datos " + NombreDB + "\n"
elif resultado == 4:
resultadotxt += "La columna " + self.altertb2.iden + " Es llave primaria" + "\n"
elif resultado == 5:
resultadotxt += "La columna " + self.altertb2.iden + " No existe" + "\n"
else:
tabla.simbolos.pop(delcolumna.id)
resultadotxt += "Se elimino la columna " + self.altertb2.iden + " de la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
class altertb2(instruccion):
def __init__(self,text,iden, tipo):
self.text = text
self.iden = iden
self.tipo = tipo
class altertb21(instruccion):
def __init__(self,text,iden):
self.text = text
self.iden = iden
class altertb211(instruccion):
def __init__(self,addprop):
self.addprop = addprop
class addprop(instruccion):
def __init__(self,cond):
self.cond = cond
class addprop1(instruccion):
def __init__(self,iden, iden2):
self.iden = iden
self.iden2 = iden2
class addprop11(instruccion):
def __init__(self,colkey, colkey2):
self.colkey = colkey
self.colkey2 = colkey2
class altcol(instruccion):
def __init__(self,altcol, alter):
self.altcol = altcol
self.alter = alter
class alter(instruccion):
def __init__(self,iden, propaltcol):
self.iden = iden
self.propaltcol = propaltcol
#MANIPULACION DE DATOS
#INSERT-------------------------------------
class insert(instruccion):
def __init__(self,iden, valores):
self.iden = iden
self.valores = valores
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
try:
columnasdetabla = []
tablas = tabla.BuscarNombre(self.iden)
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == tablas.id and not tabla.simbolos[simbolo].tipo == TS.TIPO.DATABASE and not tabla.simbolos[simbolo].tipo == TS.TIPO.TABLE and not tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
columnasdetabla.append(tabla.simbolos[simbolo])
colcorrecta = []
iter = 0
for columna in columnasdetabla:
if VerificarTipo(columna.tipocol, self.valores[iter]):
colcorrecta.append(self.valores[iter])
iter+=1
resultado = func.insert(NombreDB,self.iden,colcorrecta)
if resultado == 2:
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
resultadotxt += "No existe la base tabla " + NombreDB + "\n"
elif resultado == 5:
resultadotxt += "La cantidad de valores no coincide con la cantidad de columnas\n"
else:
nombre = ""
for element in colcorrecta:
nombre += str(element) + " "
NuevoRegistro = TS.Simbolo(cont,nombre,TS.TIPO.TUPLA,tablas.id)
tabla.agregar(NuevoRegistro)
resultadotxt += "El registro " + self.valores[0] + " fue agregado a la tabla " + self.iden + "\n"
except:
"""ERRORES SEMANTICOS"""
"""PENDIENDTE"""
def VerificarTipo(TipoColumna,ValorColumna):
return True
#UPDATE-----------------------------------------
class update(instruccion):
def __init__(self,iden, cond, wherecond):
self.iden = iden
self.cond = cond
self.wherecond = wherecond
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
#DELETE-------------------------------------------
class delete(instruccion):
def __init__(self,iden, wherecond):
self.iden = iden
self.wherecond = wherecond
```
#### File: parser/team24/reportTable.py
```python
from graphviz import Digraph
from tablaDGA import Tabla
def graphTable(tabla : Tabla):
s = Digraph('structs', filename='reporteTabla.gv', node_attr={'shape': 'plaintext'})
c = 'lista [label = <<TABLE> \n <TR><TD>ID</TD><TD>Tipo</TD><TD>Valor</TD><TD>Ambito</TD></TR> '
for x in tabla.simbolos.values():
c+= '<TR>\n'
c+= '<TD>\n'
c+= str(x.id)
c+= '\n</TD><TD>'
c+= str(x.tipo)
c+= '\n</TD><TD>'
c+= str(x.valor)
c+= '\n</TD><TD>'
c+= str(x.ambito)
c+= '\n</TD></TR>'
c += '</TABLE>>, ];'
s.body.append(c)
s.view()
```
#### File: G26/Expresiones/Condicionales.py
```python
import sys
sys.path.append('../G26/Instrucciones')
sys.path.append('../G26/Utils')
sys.path.append('../G26/Expresiones')
from instruccion import *
from Error import *
from Primitivo import *
from Identificador import *
from datetime import *
class Condicionales(Instruccion):
def __init__(self, leftOperator, rightOperator, sign, extra):
self.leftOperator = leftOperator
self.rightOperator = rightOperator
self.sign = sign
self.extra = extra
def execute(self):
left = self.leftOperator.execute()
if isinstance(left, Error) :
return left
right = self.rightOperator.execute()
if isinstance(right, Error) :
return right
if self.sign == '>' :
if (left.type == 'integer' and right.type == 'integer') or (left.type == 'float' and right.type == 'float') or (left.type == 'float' and right.type == 'integer') or (left.type == 'integer' and right.type == 'float'):
comp = int(left.val) > int(right.val)
return Primitive('boolean', comp)
else:
error = Error('Semántico', 'Error de tipos en MAYOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
return error
else :
if (left.type == 'integer' and right.type == 'integer') or (left.type == 'float' and right.type == 'float') or (left.type == 'float' and right.type == 'integer') or (left.type == 'integer' and right.type == 'float'):
comp = int(left.val) < int(right.val)
return Primitive('boolean', comp)
else:
error = Error('Semántico', 'Error de tipos en MENOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
return error
def __repr__(self):
return str(self.__dict__)
def executeInsert(self, data, valoresTabla):
try:
left = self.leftOperator.execute()
except:
left = self.leftOperator.execute(data, valoresTabla)
if isinstance(left, Error):
return left
try:
right = self.rightOperator.execute()
except:
right = self.rightOperator.execute(data, valoresTabla)
if isinstance(right, Error):
return right
if self.sign == '>':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
return int(left.val) > int(right.val)
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq > horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
error = Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return error
return fechaValIzq > fechaValDer
else:
return Error('Semántico', 'Error de tipos en MAYOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
elif self.sign == '<':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
return int(left.val) < int(right.val)
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq < horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
error = Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return error
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return fechaValIzq < fechaValDer
else:
return Error('Semántico', 'Error de tipos en MENOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
elif self.sign == '<=':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
return int(left.val) <= int(right.val)
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq <= horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return fechaValIzq <= fechaValDer
else:
return Error('Semántico', 'Error de tipos en MENOR IGUAL QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
elif self.sign == '>=':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
return int(left.val) >= int(right.val)
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq >= horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return fechaValIzq >= fechaValDer
else:
return Error('Semántico', 'Error de tipos en MAYOR IGUAL QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
elif self.sign == '<>' or self.sign == '!=':
if (left.type == 'integer' and right.type == 'integer') or (left.type == 'float' and right.type == 'float') or (left.type == 'float' and right.type == 'integer') or (left.type == 'integer' and right.type == 'float'):
return int(left.val) != int(right.val)
elif (left.type == 'string' and right.type == 'string') or (left.type == 'boolean' and right.type == 'boolean'):
return left.val != right.val
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq != horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return fechaValIzq != fechaValDer
elif left.type == 'string' and right.type == 'string':
return str(left.val) != str(right.val)
else:
return Error('Semántico', 'Error de tipos en DIFERENTE QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
elif self.sign == '=':
if (left.type == 'integer' and right.type == 'integer') or (left.type == 'float' and right.type == 'float') or (left.type == 'float' and right.type == 'integer') or (left.type == 'integer' and right.type == 'float'):
return int(left.val) == int(right.val)
elif (left.type == 'string' and right.type == 'string') or (left.type == 'boolean' and right.type == 'boolean'):
return left.val == right.val
elif (left.type == 'string' and right.type == 'time') or (right.type == 'string' and left.type == 'time'):
try:
horaIzq = left.val
horaValIzq = datetime.strptime(horaIzq, '%H:%M:%S')
horaDer = right.val
horaValDer = datetime.strptime(horaDer, '%H:%M:%S')
return horaValIzq == horaValDer
except:
return Error('Semántico', 'Error de tipos en la comparacion de TIME.', 0, 0)
elif (left.type == 'string' and right.type == 'date') or (right.type == 'string' and left.type == 'date'):
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y')
except:
try:
fechaI = left.val
fechaIzq = fechaI.replace('/', '-')
fechaValIzq = datetime.strptime(fechaIzq, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y')
except:
try:
fechaD = right.val
fechaDer = fechaD.replace('/', '-')
fechaValDer = datetime.strptime(fechaDer, '%d-%m-%Y %H:%M:%S')
except:
return Error('Semántico', 'Error de tipos en la comparacion de DATE.', 0, 0)
return fechaValIzq == fechaValDer
elif left.type == 'string' and right.type == 'string':
return str(left.val) == str(right.val)
else:
return Error('Semántico', 'Error de tipos en IGUAL, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
class Between(Instruccion):
def __init__(self, type, val1, val2):
self.type = type
self.val1 = val1
self.val2 = val2
def execute(self):
return self
def __repr__(self):
return str(self.__dict__)
class IsNotOptions(Instruccion):
def __init__(self, notv, val, distinct):
self.notv = notv
self.val = val
self.distinct = distinct
def execute(self):
return self.notv
def __repr__(self):
return str(self.__dict__)
```
#### File: execution/expression/id.py
```python
import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/parser/team27/G-27/execution/symbol')
from environment import *
from expression import *
class Id(Expression):
"""
id: recibe un id que hará referencia a una variable.
"""
def __init__(self,id, row, column):
Expression.__init__(self, row, column)
self.id = id
def execute(self, environment):
# ir a buscar el id
if not isinstance(self.id,str):
return {'Error': 'El id no es una cadena.', 'Fila':self.row, 'Columna': self.column }
variable = environment.buscarVariable(self.id)
if variable == None:
return {'Error': 'El id: '+self.id+' de la columna indicada no existe.', 'Fila':self.row, 'Columna': self.column }
valor = variable['value']
tipo = variable['tipo']
return {'value':valor, 'typ':tipo}
```
#### File: function/trigonometric/atan2.py
```python
import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/parser/team27/G-27/execution/expression')
sys.path.append('../tytus/parser/team27/G-27/execution/symbol')
sys.path.append('../tytus/parser/team27/G-27/libraries')
sys.path.append('../tytus/parser/team27/G-27/execution/expression')
from function import *
from typ import *
from trigonometric_functions import atan2
class Atan2(Function):
def __init__(self, divisor, dividend, row, column):
Function.__init__(self,row,column)
self.divisor = divisor
self.dividend = dividend
def execute(self, environment):
#Input es una lista
if isinstance(self.dividend,list):
# el rango de los numeros en los parametros es de [-infinito,infinito]
respuesta = []
for i in range(len(self.divisor)):
value = self.divisor[i].execute(environment)
value2 = self.dividend[i].execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value2['typ'] != Type.INT and value2['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
result = atan2(value['value'], value2['value'])
respuesta.append({'value':result, 'typ': Type.INT})
return respuesta
#Input valor puntual
else:
value = self.divisor.execute(environment)
value2 = self.dividend.execute(environment)
if value['typ'] != Type.INT and value['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
if value2['typ'] != Type.INT and value2['typ'] != Type.DECIMAL:
return {'Error':"El valor " + value['value'] + " no es decimal o entero", 'linea':self.row,'columna':self.column }
return [{'value':atan2(value['value'],value2['value']), 'typ': Type.INT}]
```
#### File: execution/querie/add_column.py
```python
import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/parser/team27/G-27/execution/symbol')
sys.path.append('../tytus/parser/team27/G-27/execution/querie')
sys.path.append('../tytus/storage')
from querie import *
from environment import *
from table import *
from column import *
from typ import *
from storageManager import jsonMode as admin
class Add_Column(Querie):
'''
columnName = espera un nombre de columna debe de ser una cadena
row = numero de fila
column = numero de columna
columnType = espera un tipo de dato esto seria un dicionario con la siguiente sintaxis:
{'type':, 'length':, debe ser int, 'default':'' mandamos un valor por defecto del tipo de dato }
valor_type: aqui mandamos un type de la clase Database_Types
valor_length: si el valor_type es igual a Varchar(numero), mandar el numero, osea el tamaño del varchar, si no es varchar mandar un -1
valor_default: mandar un valor por defecto segun el tipo de dato(valor_type), ejemplo -> varchar(10) default -> ''(cadena vacia)
ejemplos diccionario:
{'type':DBType.numeric, 'length': -1, 'default':0 }, {'type':DBType.varchar, 'length': 20, 'default':'' }
'''
def __init__(self, columnName,columnType, row, column):
Querie.__init__(self, row, column)
self.columnName = columnName
self.columnType = columnType
# columnType es un diccionario {'type':text, 'length':-1, 'default':''}
def execute(self, environment,tableName):
if not isinstance(self.columnName,str):
return {'Error': 'El nombre indicado de la columna no es una cadena.', 'Fila':self.row, 'Columna': self.column }
if not isinstance(tableName,str):
return {'Error': 'El nombre indicado de la tabla no es una cadena.', 'Fila':self.row, 'Columna': self.column }
# creo una nueva columna, agregar el length
newColumn = Column(self.columnName,self.columnType['type'],self.columnType['default'],self.columnType['length'])
db_name = environment.getActualDataBase()
database = environment.readDataBase(db_name)
table = database.getTable(tableName)
if table == None:
return {'Error': 'la tabla: '+ tableName +'no existe en la base de datos: '+db_name, 'Fila':self.row, 'Columna': self.column }
table.createColumn(newColumn)
result = admin.alterAddColumn(db_name,tableName,self.columnType['default'])
if result == 0:
return 'se inserto correctamente la columna: '+self.columnName+' en la tabla: '+tableName
elif result == 1:
return {'Error': 'Error al ejecutar la operacion add column', 'Fila':self.row, 'Columna': self.column }
elif result == 2:
return {'Error': 'La base de datos a la que hace referencia no existe', 'Fila':self.row, 'Columna': self.column }
elif result == 3:
return {'Error': 'La tabla: '+tableName+' no existe', 'Fila':self.row, 'Columna': self.column }
else:
return {'Error': 'Error desconocido en el add column', 'Fila':self.row, 'Columna': self.column }
```
#### File: instructions/DML/select.py
```python
from views.data_window import DataWindow
from models.instructions.shared import *
from models.instructions.Expression.expression import *
from models.instructions.DML.special_functions import *
class Union(Instruction):
def __init__(self, array_instr, type_union,line, column) :
self.array_instr = array_instr
self.type_union = type_union
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class Select(Instruction):
'''
SELECT recibe un array con todas los parametros
'''
def __init__(self, instrs, order_option, limit_option) :
self.instrs = instrs
self.order_option = order_option
self.limit_option = limit_option
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
instr = self.instrs.process(instrucction)
return instr
class TypeQuerySelect(Instruction):
'''
TypeQuerySelect recibe si va a ser
UNION
INTERSECT
EXCEPT
Y si va a ir con la opcion ALL
'''
def __init__(self, typeQuery, optionAll,line, column):
self.typeQuery = typeQuery
self.optionAll = optionAll
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class Table:
def __init__(self, headers, values):
self.headers = headers
self.values = values
def __repr__(self):
return str(vars(self))
class SelectQ(Instruction):
'''va a recibir la lista de parametros a seleccion y de que traba se esta seleccionando'''
def __init__(self, type_select, select_list, from_clause, where_or_grouphaving,line, column):
self.type_select = type_select
self.select_list = select_list
self.from_clause = from_clause
self.where_or_grouphaving = where_or_grouphaving
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
list_select = None
if self.type_select == None and self.from_clause == None and self.where_or_grouphaving == None and self.select_list != None:
list_select = format_table_list(self.select_list, instrucction)
return DataWindow().consoleText(list_select)
elif self.type_select != None and self.from_clause != None and self.where_or_grouphaving == None and self.select_list != None:
pass
elif self.type_select != None and self.from_clause != None and self.where_or_grouphaving != None and self.select_list != None:
pass
elif self.type_select == None and self.from_clause != None and self.where_or_grouphaving != None and self.select_list != None:
pass
elif self.type_select == None and self.from_clause != None and self.where_or_grouphaving == None and self.select_list != None:
list_select = loop_list(self.select_list, instrucction)
list_from = self.from_clause.process(instrucction)
if '*' in list_select and len(list_select) == 1 and len(list_from) == 1:
tabla_all = select_all(list_from, self.line, self.column)
return DataWindow().consoleText(format_df(tabla_all))
elif '*' not in list_select and len(list_from) == 1:
table_i = select_all(list_from, self.line, self.column)
table_f = select_with_columns(list_select, table_i)
return DataWindow().consoleText(format_df(table_f))
return None
class SelectList(Instruction):
'''
Guarda la Lista de objectos a seleccionar donde
tiene las siguietnes opciones
-> *
-> Id, Id.....
'''
def __init__(self, arrparams,line, column):
self.arrparams = arrparams
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class OrderClause(Instruction):
'''
Recibe Los parametros para order clause los cuales
son la lista de parametros a ordenar y que tipo de
order si ASC o DESC
'''
def __init__(self, arrvaluesorder, type_order,line, column):
self.arrvaluesorder = arrvaluesorder
self.type_order = type_order
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class LimitClause(Instruction):
'''
Recibe los parametros para limit Clause los cuales
son un rango establecido o bien solo un parametro y
la opcion OFFSET
'''
def __init__(self, limitarr, offset,line, column):
self.limitarr = limitarr
self.offset = offset
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class JoinClause(Instruction):
'''
JoinClause recibe los parametros de
Tipo de Join, Tabla y Expression
'''
def __init__(self, type_join, table, arr_expression,line, column):
self.type_join = type_join
self.table = table
self.arr_expression = arr_expression
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class ExistsClause(Instruction):
'''
ExistsClause recibe de parametro
un subquery
'''
def __init__(self, subquery,line, column):
self.subquery = subquery
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class NotOption(Instruction):
'''
NotClause recibe una lista
de instrucciones a ser negadas
'''
def __init__(self, arr_not,line, column):
self.arr_not = arr_not
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class InClause(Instruction):
'''
InClause
'''
def __init__(self, arr_lista,line, column):
self.arr_lista = arr_lista
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class LikeClause(Instruction):
'''
LikeClause
'''
def __init__(self, arr_list,line, column):
self.arr_list = arr_list
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class isClause(Instruction):
'''
IsClause
'''
def __init__(self, arr_list,line, column):
self.arr_list = arr_list
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class AgreggateFunctions(Instruction):
'''
AgreggateFunctions
'''
def __init__(self, type_agg, cont_agg, opt_alias,line, column):
self.type_agg = type_agg
self.cont_agg = cont_agg
self.opt_alias = opt_alias
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class Case(Instruction):
'''
CASE recibe un array con todas las opciones y un else
'''
def __init__(self, arr_op, c_else,line, column):
self.arr_op = arr_op
self.c_else = c_else
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class CaseOption(Instruction):
'''
CASE OPTION
'''
def __init__(self, when_exp, then_exp,line, column):
self.when_exp = when_exp
self.then_exp = then_exp
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
```
#### File: analizer/functions/TrigonometricFunctions.py
```python
import math
import analizer.functions.MathFunctions as mt
def acos(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
valor = ""
if column[i] >= -1 and 1 >= column[i]:
valor = str(math.acos(column[i]))
else:
valor = "Error de dominio"
result.insert(i + 1, valor)
i += 1
return result
def acosd(column):
return mt.degrees(acos(column))
def asin(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
valor = ""
if column[i] >= -1 and 1 >= column[i]:
valor = str(math.asin(column[i]))
else:
valor = "Error de dominio"
result.insert(i + 1, valor)
i += 1
return result
def asind(column):
return mt.degrees(asin(column))
def atan(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.atan(column[i]))
i += 1
return result
def atand(column):
return mt.degrees(atan(column))
def atan2(column1, column2):
i = 0
column1 = mt.convert_num_col(column1)
column2 = mt.convert_num_col(column2)
result = list()
while i < len(column1):
result.insert(i + 1, math.atan2(column1[i], column2[i]))
i += 1
return result
def atan2d(column1, column2):
return mt.degrees(atan2(column1, column2))
def cos(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.cos(column[i]))
i += 1
return result
def cosd(column):
return mt.degrees(cos(column))
def cot(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if column[i] % math.pi != 0:
result.insert(i + 1, (math.cos(column[i]) / math.sin(column[i])))
else:
result.insert(i + 1, "Error de dominio")
i += 1
return result
def cotd(column):
return mt.degrees(cot(column))
def sin(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.sin(column[i]))
i += 1
return result
def sind(column):
return mt.degrees(sin(column))
def tan(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if (column[i] - (math.pi / 2)) % (math.pi) != 0:
result.insert(i + 1, math.tan(column[i]))
else:
result.insert(i + 1, "Error en el dominio")
i += 1
return result
def tand(column):
return mt.degrees(tan(column))
def sinh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.sinh(column[i]))
i += 1
return result
def cosh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.cosh(column[i]))
i += 1
return result
def tanh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.tanh(column[i]))
i += 1
return result
def asinh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
result.insert(i + 1, math.asinh(column[i]))
i += 1
return result
def acosh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if column[i] >= 1:
result.insert(i + 1, math.acosh(column[i]))
else:
result.insert(i + 1, "Error de dominio")
i += 1
return result
def atanh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if column[i] < 1 and column[i] > -1:
result.insert(i + 1, math.atanh(column[i]))
else:
result.insert(i + 1, "Error de dominio")
i += 1
return result
```
#### File: team04/Server/server.py
```python
import cgi
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver
import io
import os
# Setting server port
PORT = 8000
#Def. requests handler.
class MyRequestHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/plain") #Setting headers
self.end_headers()
def do_GET(self):
if self.path == '/':
pass
try:
myFile = open(self.path[1:]).read()
self.send_response(200)
except:
myFile = "File not found"
self.send_response(404)
self.end_headers()
self.wfile.write(bytes(myFile, 'utf-8'))
def do_POST(self):
dataSize = int(self.headers['Content-Length']) #Getting size of data
myData = self.rfile.read(dataSize) #Reading data (form)
decodedData = myData.decode("utf-8")#Decoding data
self._set_headers()
self.saveFile("database.tytus", bytes(decodedData, 'utf-8'))
self.wfile.write(bytes(decodedData, 'utf-8'))
#Method to create file on server
def saveFile(self, filename, content):
#Checking if data directory exists and created if not
if not os.path.exists('./data'):
os.makedirs('./data')
#Setting full path
myPath = "./data/" + filename
newFile = open(myPath, "wb") #Temporary example
newFile.write(content)
newFile.close()
# Setting and starting server
myServer = HTTPServer(('localhost', PORT), MyRequestHandler)
print("Server running at localhost: " + str(PORT))
myServer.serve_forever()
```
#### File: storage/team07/Interfaz.py
```python
import tkinter as tk
from tkinter import *
from tkinter import ttk
class Aplicacion:
def __init__(self):
self.ventana1=tk.Tk()
self.ventana1.geometry("1400x300+100+100")
s = ttk.Style()
s.configure('Red.TLabelframe.Label', font=('Roboto Condensed', 12))
s.configure('Red.TLabelframe.Label', foreground ='green')
self.labelframe1=ttk.LabelFrame(self.ventana1, text="Base de Datos :", style = "Red.TLabelframe")
self.labelframe1.grid(column=0, row=0, padx=5, pady=10)
self.AgregarH()
self.labelframe2=ttk.LabelFrame(self.ventana1, text="Modo: ", style = "Red.TLabelframe")
self.labelframe2.grid(column=1, row=0, padx=5, pady=10)
self.avl()
self.labelframe3 = ttk.LabelFrame(self.ventana1, text="Tablas: ", style="Red.TLabelframe")
self.labelframe3.grid(column=2, row=0, padx=5, pady=10)
self.Tablas()
self.labelframe4 = ttk.LabelFrame(self.ventana1, text="Reportes: ", style="Red.TLabelframe")
self.labelframe4.grid(column=0, row=1, padx=0, pady=10)
self.Reportes()
#self.operaciones()
self.ventana1.mainloop()
# Implementamos el método login:x
# El algoritmo del método login tiene por objetivo crear las 2 Label, 2 Entry y Button
# y añadirlos dentro del LabelFrame:
def AgregarH(self):
self.label1 = ttk.Label(self.labelframe1, text="Agregar:")
self.label1.grid(column=0, row=0, padx=4, pady=4, sticky="w")
self.AgregarHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe1, width=30, textvariable=self.AgregarHash)
self.entry1.grid(column=1, row=0, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe1, text="Eliminar:")
self.label1.grid(column=0, row=1, padx=4, pady=4, sticky="w")
self.EliminarrHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe1, width=30, textvariable=self.EliminarrHash)
self.entry1.grid(column=1, row=1, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe1, text="Modificar:")
self.label1.grid(column=0, row=2, padx=4, pady=4, sticky="w")
self.ModificarHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe1, width=30, textvariable=self.ModificarHash)
self.entry1.grid(column=1, row=2, padx=4, pady=4)
self.label2 = ttk.Label(self.labelframe1, text="Buscar:")
self.label2.grid(column=0, row=3, padx=4, pady=4, sticky="w")
self.BuscarHash = StringVar()
self.entry2 = ttk.Entry(self.labelframe1, width=30, textvariable=self.BuscarHash)
self.entry2.grid(column=1, row=3, padx=4, pady=4, sticky="w")
self.boton1 = ttk.Button(self.labelframe1, text="Agregar", command=self.metodo)
self.boton1.grid(column=0, row=4, padx=4, pady=4)
self.boton2 = ttk.Button(self.labelframe1, text="Eliminar", command=self.metodo)
self.boton2.grid(column=1 , row=4, padx=4, pady=4)
self.boton3 = ttk.Button(self.labelframe1, text="Modificar" ,command=self.metodo)
self.boton3.grid(column=2, row=4, padx=4, pady=4)
self.boton4 = ttk.Button(self.labelframe1, text="Buscar", command=self.metodo)
self.boton4.grid(column=3, row=4, padx=4, pady=4)
def avl(self):
self.label1 = ttk.Label(self.labelframe2, text="Agregar:")
self.label1.grid(column=0, row=0, padx=4, pady=4, sticky="w")
self.AgregarAvl = StringVar()
self.entry1 = ttk.Entry(self.labelframe2, width=30, textvariable=self.AgregarAvl)
self.entry1.grid(column=1, row=0, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe2, text="Eliminar:")
self.label1.grid(column=0, row=1, padx=4, pady=4, sticky="w")
self.EliminarAvl = StringVar()
self.entry1 = ttk.Entry(self.labelframe2, width=30, textvariable=self.EliminarAvl)
self.entry1.grid(column=1, row=1, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe2, text="Modificar:")
self.label1.grid(column=0, row=2, padx=4, pady=4, sticky="w")
self.ModificarAvl = StringVar()
self.entry1 = ttk.Entry(self.labelframe2, width=30, textvariable=self.ModificarAvl)
self.entry1.grid(column=1, row=2, padx=4, pady=4)
self.label2 = ttk.Label(self.labelframe2, text="Buscar:")
self.label2.grid(column=0, row=3, padx=4, pady=4, sticky="w")
self.BuscarAvl = StringVar()
self.entry2 = ttk.Entry(self.labelframe2, width=30, textvariable=self.BuscarAvl)
self.entry2.grid(column=1, row=3, padx=4, pady=4, sticky="w")
self.boton1 = ttk.Button(self.labelframe2, text="Agregar", command=self.metodo)
self.boton1.grid(column=0, row=4, padx=4, pady=4)
self.boton2 = ttk.Button(self.labelframe2, text="Eliminar", command=self.metodo)
self.boton2.grid(column=1, row=4, padx=4, pady=4)
self.boton3 = ttk.Button(self.labelframe2, text="Modificar", command=self.metodo)
self.boton3.grid(column=2, row=4, padx=4, pady=4)
self.boton4 = ttk.Button(self.labelframe2, text="Buscar", command=self.metodo)
self.boton4.grid(column=3, row=4, padx=4, pady=4)
def Tablas(self):
self.label1 = ttk.Label(self.labelframe3, text="Agregar:")
self.label1.grid(column=0, row=0, padx=4, pady=4, sticky="w")
self.AgregarHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe3, width=30, textvariable=self.AgregarHash)
self.entry1.grid(column=1, row=0, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe3, text="Eliminar:")
self.label1.grid(column=0, row=1, padx=4, pady=4, sticky="w")
self.EliminarrHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe3, width=30, textvariable=self.EliminarrHash)
self.entry1.grid(column=1, row=1, padx=4, pady=4)
self.label1 = ttk.Label(self.labelframe3, text="Modificar:")
self.label1.grid(column=0, row=2, padx=4, pady=4, sticky="w")
self.ModificarHash = StringVar()
self.entry1 = ttk.Entry(self.labelframe3, width=30, textvariable=self.ModificarHash)
self.entry1.grid(column=1, row=2, padx=4, pady=4)
self.label2 = ttk.Label(self.labelframe3, text="Buscar:")
self.label2.grid(column=0, row=3, padx=4, pady=4, sticky="w")
self.BuscarHash = StringVar()
self.entry2 = ttk.Entry(self.labelframe3, width=30, textvariable=self.BuscarHash)
self.entry2.grid(column=1, row=3, padx=4, pady=4, sticky="w")
self.boton1 = ttk.Button(self.labelframe3, text="Agregar", command=self.metodo)
self.boton1.grid(column=0, row=4, padx=4, pady=4)
self.boton2 = ttk.Button(self.labelframe3, text="Eliminar", command=self.metodo)
self.boton2.grid(column=1, row=4, padx=4, pady=4)
self.boton3 = ttk.Button(self.labelframe3, text="Modificar", command=self.metodo)
self.boton3.grid(column=2, row=4, padx=4, pady=4)
self.boton4 = ttk.Button(self.labelframe3, text="Buscar", command=self.metodo)
self.boton4.grid(column=3, row=4, padx=4, pady=4)
def Reportes(self):
self.boton1 = ttk.Button(self.labelframe4, text="Bases de Datos", command=self.metodo)
self.boton1.grid(column=0, row=0, padx=4, pady=4)
self.boton2 = ttk.Button(self.labelframe4, text="Conjunto de Tablas", command=self.metodo)
self.boton2.grid(column=1, row=0, padx=4, pady=4)
self.boton3 = ttk.Button(self.labelframe4, text="Tabla", command=self.metodo)
self.boton3.grid(column=2, row=0, padx=4, pady=4)
self.boton4 = ttk.Button(self.labelframe4, text="Tupla", command=self.metodo)
self.boton4.grid(column=3, row=0, padx=4, pady=4)
def metodo(self):
print(self.AgregarHash.get())
aplicacion1=Aplicacion()
```
#### File: storage/team11/Manager.py
```python
from DataBase import DataBase
from Binary import verify_string, verify_columns
from ArbolAVLManager import ArbolAVLDB
class Manager:
def __init__(self):
self.__database = None
self.__tree__db = ArbolAVLDB() # Guarda todas las databases
# Retorna el nombre de la base de datos
def get_database(self):
return self.__database
# Setea el nuevo nombre de la base de datos
def set_database(self, database):
self.__database = database
# Retorna el arbol donde esta guardado las datatables
def get_tree_db(self):
return self.__tree__db
#funciones para las database
def createDataBase(self, db_nombre):
if verify_string(db_nombre):
if self.__tree__db.search_value(db_nombre) is None:
new_db = DataBase(db_nombre)
self.__tree__db.add(new_db)
return 0
else:
return 2
else:
return 1
#retorna la lista de las databases existentes
def showDatabases(self):
lista2 = list()
lista = self.__tree__db.get_databases()
for lista in lista:
lista2.append(lista.get_database())
return lista2
def alterDatabase(self,old_db,new_db):
bandera = self.__tree__db.search_value(new_db)
if bandera is None:
if verify_string(new_db):
db = self.__tree__db.search_value(old_db)
if db is not None:
self.__tree__db.delete_nodo(db.get_element().get_database())
db = db.get_element()
db.set_database(new_db)
self.__tree__db.add(db)
return 0
else:
return 2
else:
return 1
else:
return 3
def dropDatabase(self,db_name):
if verify_string(db_name):
db_search = self.__tree__db.search_value(db_name)
if db_search is not None:
self.__tree__db.delete_nodo(db_name)
return 0
else:
return 2
else:
return 1
#funciones para las tables
def createTable(self,database,table_name,number_columns):
db = self.__tree__db.search_value(database)
if db is not None:
respuesta = db.get_element().create_table(table_name,number_columns)
else:
return 2
return respuesta
def showTables(self,database):
db = self.__tree__db.search_value(database)
if db is not None:
tablas = db.get_element().show_tables()
else:
return None
return tablas
```
#### File: storage/team12/frm_tablas.py
```python
from tkinter import *
from tkinter import ttk
import tkinter.font as tkFont
from crud_bd import CRUD_DataBase
def mostrarTablas(text):
crud = CRUD_DataBase()
objeto = crud.searchDatabase(text)
# print("objeto: {}".format(objeto.name))
window = Tk()
# Centrado de la Ventana
ancho_ventana = 700
alto_ventana = 450
x_ventana = window.winfo_screenwidth() // 2 - ancho_ventana // 2
y_ventana = window.winfo_screenheight() // 2 - alto_ventana // 2
posicion = str(ancho_ventana) + "x" + str(alto_ventana) + "+" + str(x_ventana) + "+" + str(y_ventana)
window.geometry(posicion)
# Edicion de la Ventana
window.resizable(0,0)
window.title("Tablas")
window.geometry('700x450')
window.mainloop()
def iniciar():
crud = CRUD_DataBase()
list_words = crud.showDatabases()
var = 0
# Esta es la ventana principal
ventana_principal = Tk()
ventana_principal.title('show Databases')
ventana_principal.geometry("550x500")
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Edicion de la Ventana
ancho_ventana = 550
alto_ventana = 500
x_ventana = ventana_principal.winfo_screenwidth() // 2 - ancho_ventana // 2
y_ventana = ventana_principal.winfo_screenheight() // 2 - alto_ventana // 2
posicion = str(ancho_ventana) + "x" + str(alto_ventana) + "+" + str(x_ventana) + "+" + str(y_ventana)
ventana_principal.geometry(posicion)
# Edicion de la Ventana
ventana_principal.resizable(0,0)
dimension = str(ancho_ventana)+'x'+str(alto_ventana)
ventana_principal.geometry(dimension)
ventana_principal.configure(bg="white")
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Se crea un marco principal
marco_principal = Frame(ventana_principal)
marco_principal.pack(fill=BOTH, expand=1)
# Se crea un canvas
var_canvas = Canvas(marco_principal)
var_canvas.config(bg="red")
var_canvas.pack(side=LEFT, fill=BOTH, expand=1)
# Se agrega un scrollbar al canvas
var_scrollbar = Scrollbar(marco_principal, orient=VERTICAL, command=var_canvas.yview)
var_scrollbar.pack(side=RIGHT, fill=Y)
# Se configura el canvas
var_canvas.configure(yscrollcommand=var_scrollbar.set)
var_canvas.bind('<Configure>', lambda e: var_canvas.configure(scrollregion = var_canvas.bbox("all")))
# Se crea otro marco dentro del canvas
second_frame = Frame(var_canvas)
# Se agrega ese nuevo marco a la ventana en el canvas
var_canvas.create_window((0,0), window=second_frame, anchor="nw")
var_font = tkFont.Font(size=13, weight="bold", family="Arial")
for word in list_words:
btn = Button(second_frame, text=word, width=58, height=2, bg="#DBE2FC", font=var_font, command=lambda txt=word:mostrarTablas(txt))
btn.grid(row=var, column=0, pady=1)
var += 1
ventana_principal.mainloop()
```
#### File: storage/team12/Ultimas 5 funciones.py
```python
def insert(database, table, register):
#El método BuscarBase(), Pendiente de definirlo
BaseDatos = BuscarBase(database)
if BaseDatos == None:
#La base de datos no se encontró
return 2
else:
existe = False
for i in range(BaseDatos.getTotalTablas()):
if BaseDatos.Tablas[i].getNombre() == table:
existe = True
if BaseDatos.Tablas[i].getColumnas() != len(register):
#El numero de columnas a insertar no coincide con el numero de columnas de la tabla
return 5
repetidas = VerificarLLavesDuplicadas(BaseDatos.Tablas[i].getPK(),BaseDatos.Tablas[i].VerTabla(),register)
if repetidas[0] == True:
return 4
BaseDatos.Tablas[i].agregarRegistro(register)
#Operación exitosa
return 0
if existe == False:
#La tabla no existe
return 3
return 1
#Método para cambiar los valores de un registro y una tabla en específco
def update(database, table, register, columns):
BaseDatos = BuscarBase(database)
if BaseDatos == None:
#La base de datos no se encontró
return 2
else:
existe = False
for i in range(BaseDatos.getTotalTablas()):
if BaseDatos.Tablas[i].getNombre() == table:
existe = True
repetidas = VerificarLLavesDuplicadas(BaseDatos.Tablas[i].getPK(),BaseDatos.Tablas[i].VerTabla(),columns)
if repetidas[0] == False:
#La llave no se ha encontrado
return 4
for key in register:
BaseDatos.Tablas[i].actualizarRegistro(repetidas[1],key,register[key])
#Operacion exitosa
return 0
if existe == False:
#La tabla no existe
return 3
return 1
#Método para eliminar un registro en la tabla de una base de datos
def delete(database, table, columns):
BaseDatos = BuscarBase(database)
if BaseDatos == None:
#La base de datos no se encontró
return 2
else:
existe = False
for i in range(BaseDatos.getTotalTablas()):
if BaseDatos.Tablas[i].getNombre() == table:
existe = True
repetidas = VerificarLLavesDuplicadas(BaseDatos.Tablas[i].getPK(),BaseDatos.Tablas[i].VerTabla(),columns)
if repetidas[0] == False:
#La llave no se ha encontrado
return 4
BaseDatos.Tablas[i].eliminarRegistro(repetidas[1])
#Operacion exitosa
return 0
if existe == False:
#La tabla no existe
return 3
return 1
#def truncate(database: str, table str) -> int:
def truncate(database, table):
BaseDatos = BuscarBase(database)
if BaseDatos == None:
#La base de datos no se encontró
return 2
else:
existe = False
for i in range(BaseDatos.getTotalTablas()):
if BaseDatos.Tablas[i].getNombre() == table:
existe = True
BaseDatos.Tablas[i].vaciarTabla()
#Operacion exitosa
return 0
#No se encontro la tabla
return 3
#Algún problema en ejecucion
return 1
#def extractRow(database: str, table: str, columns: list) -> list:
def extractRow(database, table, columns):
BaseDatos = BuscarBase(database)
if BaseDatos == None:
#La base de datos no se encontró
return []
else:
existe = False
for i in range(BaseDatos.getTotalTablas()):
if BaseDatos.Tablas[i].getNombre() == table:
existe = True
repetidas = VerificarLLavesDuplicadas(BaseDatos.Tablas[i].getPK(),BaseDatos.Tablas[i].VerTabla(),columns)
if repetidas[0] == False:
#La llave no se ha encontrado
return []
#Operacion exitosa
return BaseDatos.Tablas[i].extraerRegistro(repetidas[1])
if existe == False:
#La tabla no existe
return []
return []
#Método para verificar si hay llaves duplicadas en el registro enviado
def VerificarLLavesDuplicadas(llaves,arreglo,registro):
Repetidos = []
Comparar = []
for i in range(len(arreglo)):
#print(type(llaves))
if type(llaves) == int and type(registro)==int:
#print("Se compara "+str(arreglo[i][llaves])+" con "+str(registro)+" comparacion 1")
if str(arreglo[i][llaves]) == str(registro):
# La llave primaria está duplicada
return [True,i]
elif type(llaves) == int:
#print("Se compara "+str(arreglo[i][llaves])+" con "+str(registro[llaves])+" comparacion 2")
if str(arreglo[i][llaves]) == str(registro[llaves]):
# La llave primaria está duplicada
return [True,i]
else:
for j in range(len(llaves)):
#print("Comparacion 3")
if str(arreglo[i][llaves[j]]) == str(registro[j]):
# La llave primaria está duplicada
Repetidos.append(True)
Comparar.append(True)
else:
Repetidos.append(False)
Comparar.append(True)
if Repetidos == Comparar and Repetidos!=[]:
return [True,i]
Repetidos=[]
Comparar=[]
return [False,0]
```
#### File: team16/DataAccessLayer/handler.py
```python
import pickle
import os
class Handler:
# Databases
@staticmethod
def actualizarArchivoDB(databases):
f = open('data/root.dat', 'wb')
pickle.dump(databases, f)
f.close()
@staticmethod
def leerArchivoDB() -> list:
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists('data/root.dat'):
f = open('data/root.dat', 'wb')
f.close()
if os.path.getsize('data/root.dat') > 0:
with open('data/root.dat', 'rb') as f:
return pickle.load(f)
return []
# Tables
@staticmethod
def actualizarArchivoTB(tabletrees, database: str, tableName: str):
f = open('data/' + str(tableName) + '-' + str(database) + '.tbl', 'wb')
pickle.dump(tabletrees, f)
f.close()
@staticmethod
def siExiste(database: str, tableName: str):
return os.path.isfile('data/' + str(tableName) + '-' + str(database) + '.tbl')
@staticmethod
def leerArchivoTB(database: str, tableName: str):
if os.path.getsize('data/' + str(tableName) + '-' + str(database) + '.tbl') > 0:
with open('data/' + str(tableName) + '-' + str(database) + '.tbl', 'rb') as f:
return pickle.load(f)
else:
return None
@staticmethod
def borrarArchivo(filename):
try:
os.remove('data/' + filename)
except:
print("No se encontró el archivo")
@staticmethod
def renombrarArchivo(oldName, newName):
try:
os.rename('data/' + oldName, 'data/' + newName)
except:
print("No se pudo renombrar")
@staticmethod
def findCoincidences(database, tablesName):
tmp = []
for i in tablesName:
try:
if os.path.isfile('data/' + str(i) + '-' + str(database) + '.tbl'):
tmp.append(str(i))
except:
continue
return tmp
``` |
{
"source": "18swenskiq/Squidski-Bot-PY",
"score": 2
} |
#### File: SquidskiBot/SquidskiBotMain/CommandHandler.py
```python
import discord
import sys
# Makes the Commands and CSharp layers sections work
sys.path.append('./CasinoModule')
sys.path.append('./Commands')
sys.path.append('./csLayers')
# Commands
from apiworkshopsearch import apiworkshopsearch
from CasinoModule import CasinoModule
from bruhmoment import bruhmoment
from currency import currency
from fanfic import fanfic
from ketalquote import ketalquote
from help import help
from helpadmin import helpadmin
from mute import mute
from pings import pings
from purge import purge
from rolepinger import rolepinger
from seinfeldme import seinfeldme
from squidskifact import squidskifact
from sws import sws
# CSharp Layers
from VDCsearch import VDCsearch
class CommandHandler():
async def commandParser(self, message, globalCall):
messageContent = message.content.lower()
# Casino Module
if (messageContent.startswith(globalCall + "c ")):
gambleTime = CasinoModule()
await gambleTime.commandReciever(message)
return
# Help message
if (messageContent == globalCall + "help"):
myHelp = help()
await myHelp.myEmbed(message)
return
# Convert Currency
if (messageContent.startswith(globalCall + "currency")):
newCurrency = currency()
await newCurrency.currencyConverter(message)
return
# Search workshop
if (messageContent.startswith((globalCall + 'sws'))):
searchWorkshop = sws()
await message.channel.send("Searching the workshop. Please wait...")
searchIt = searchWorkshop.theMain(message.content)
if not searchIt:
await message.channel.send("That's a fat error from me dawg. The search came up empty.")
return
else:
await message.channel.send(searchIt)
return
# Get random Seinfeld quote
if (messageContent.startswith((globalCall + 'seinfeldme'))):
getSeinfeldQuote = seinfeldme()
await getSeinfeldQuote.getQuote(message)
return
# Get random ketal quote
if (messageContent.startswith((globalCall + 'ketalquote'))):
getKetalQuote = ketalquote()
await getKetalQuote.getQuote(message)
return
# Get random Squidski fact
if (messageContent.startswith((globalCall + 'squidskifact'))):
squidFact = squidskifact()
await squidFact.getQuote(message)
return
# Give/Remove Pings role
if (messageContent.startswith((globalCall + 'pings'))):
myPings = pings()
await myPings.changePingRoleState(message)
return
# Admin only commands (Administrator only)
if (((messageContent).split(" "))[0].startswith(globalCall + "helpadmin")):
myAdminCommands = helpadmin()
await myAdminCommands.checkPerms(message)
return
# Purge messages (Administrator Only)
if (((messageContent).split(" "))[0].startswith(globalCall + "purge")):
myPurge = purge()
await myPurge.purger(message)
return
# Ping the Pings role (Administrator Only)
if (messageContent.startswith((globalCall + 'rolepinger'))):
myPinger = rolepinger()
await myPinger.pinger(message)
return
# Mute a bad user (Administrator Only)
if (messageContent.startswith((globalCall + 'mute'))):
myMuter = mute()
await myMuter.mute_users(message)
return
# Checks if is bruh moment
if (messageContent.startswith((globalCall + 'bruhmoment'))):
myBruh = bruhmoment()
await myBruh.isBruhMoment(message)
return
# Searches the Steam Workshop via the API
if (messageContent.startswith((globalCall + 'wtest'))):
mySAPI = apiworkshopsearch()
args = message.content.split(" ")
await mySAPI.requestInfo(message, args[1], " ".join(args[2:]))
return
# Pulls a random line from the Source Engine fanfictions
if (messageContent.startswith((globalCall + 'fanfic'))):
getFanfic = fanfic()
await getFanfic.getLine(message)
return
# Search VDC
if (messageContent.startswith((globalCall + 'v'))):
searchVDC = VDCsearch()
await searchVDC.searchTheVDC(message)
return
```
#### File: SquidskiBotMain/Commands/help.py
```python
import discord
class help():
# Builds a discord embed object to display as the help message
async def myEmbed(self, message):
embed = discord.Embed(title="Squidski-Bot PY", description="I am a bot made by Squidski#9545. I can do multiple things and I am still in development", color=0x00ff00)
embed.add_field(name="Search Workshop", value=">sws <game> <type> <search term>", inline=False)
embed.add_field(name="Get Random Seinfeld Quote", value=">seinfeldme", inline=False)
embed.add_field(name="Subscribe or Unsubscribe from pings", value=">pings", inline=False)
embed.add_field(name="Check if something is a bruh moment", value=">bruhmoment", inline=False)
embed.add_field(name="Get a random fact about Squidski", value=">squidskifact", inline = False)
embed.add_field(name="Get a random line from an SE Discord fanfic", value=">fanfic", inline = False)
embed.add_field(name="Get a ketal quote", value=">ketalquote", inline= False)
embed.add_field(name="Admin only commands", value=">helpadmin",inline=False)
await message.channel.send(embed = embed)
```
#### File: SquidskiBotMain/Commands/pings.py
```python
import discord
import json
import sys
class pings():
# Subscribe or unsubsribe to 'pings' role
# TODO: Make it pull the name using the ID, rather than hardcoding the ID
async def changePingRoleState(self, msg):
with open('settings.json') as json_file:
settingsFile = json.load(json_file)
# Unsubscribes from 'pings' role
if settingsFile["notificationsRole"] in str(msg.author.roles):
await msg.author.remove_roles(discord.utils.get(msg.guild.roles, name='Pings'))
await msg.channel.send("Removed the 'Pings' role!")
# Subscribes to 'pings' role
else:
await msg.author.add_roles(discord.utils.get(msg.guild.roles, name='Pings'))
await msg.channel.send("Added the 'Pings' role!")
```
#### File: SquidskiBotMain/Commands/rolepinger.py
```python
import discord
import json
import sys
class rolepinger():
async def pinger(self, message):
# Checks for Admin
with open('settings.json') as json_file:
settingsFile = json.load(json_file)
if settingsFile["adminRoleId"] in str(message.author.roles):
# Makes the Pings role mentionable
await (discord.utils.get(message.guild.roles, name='Pings')).edit(mentionable=True)
# Sends ping
await message.channel.send(f'<@&{settingsFile["notificationsRole"]}>')
await message.channel.send(f'*To unsubscribe from pings, type >pings in <#{settingsFile["botChannel"]}>*')
# Makes the Pings role unmentionable
await (discord.utils.get(message.guild.roles, name='Pings')).edit(mentionable=False)
else:
await message.channel.send("You must have the `Administrator` role to do this...")
```
#### File: SquidskiBot/SquidskiBotMain/GenerateWorkshopURL.py
```python
from urllib.parse import quote
class GenerateWorkshopURL():
def __init__(self):
self.classType = "";
# Generates the actual Steam workshop URL based on the user's arguments
def genURL(self, gameID, myType, searchTerm):
steamString = "https://steamcommunity.com/workshop/browse/?appid=" + str(gameID) + "&searchtext=" + searchTerm + "&childpublishedfileid=0&browsesort=trend§ion="
# What to add to the URL depends on type & gameID
if gameID == 4000:
# Garry's Mod type handling
if (myType == 'collection'): steamString += 'collections'
elif (myType == 'item'): steamString += 'readytouseitems'
else: steamString += 'readytouseitems&requiredtags[]=' + myType
else:
# Default type handling
if (myType == 'collection'): steamString += 'collections'
elif (myType == 'item'):
if (gameID == 730): steamString += 'mtxitems'
else: steamString += 'readytouseitems'
elif (myType == 'map'): steamString += 'readytouseitems'
else: steamString += 'merchandise'
# This string means sort by top all time
return (steamString + "&actualsort=trend&p=1&days=-1")
def validateSearch(self, gameID, type, searchTerm):
print(" ")
print("=============Workshop Search=============")
print("Searching '" + str(gameID) + "' workshop for '" + str(searchTerm) + "' of type: " + type)
# Checks the type. if it is invalid, raises an error
validTypes = ['item','items','addons','maps','merchandise','collections','map','collection','addon', 'merch', 'skin','skins']
if (type.lower() not in validTypes):
return "E0"
else:
print("User search type is " + type)
type = self.trueType(type)
self.classType = type;
# Steam only supports certain types for each individual thing, hence the "interpreted" type
print("The interpreted search type is " + type)
# Tests for invalid combos of game and type
if (gameID == 730):
validCSGOTypes = ['item', 'map', 'collection','merchandise']
if (type not in validCSGOTypes): return "E1"
elif (gameID == 620):
validP2Types = ['item', 'map','merchandise','collection']
if (type not in validP2Types): return "E1"
elif (gameID == 4000):
validGMODTypes = ['item', 'collection', 'map', 'addon']
if (type not in validGMODTypes): return "E1"
elif (gameID == 550):
validL4D2Types = ['item', 'collection']
if (type not in validL4D2Types): return "E1"
else: return "E5"
print(str(gameID) + " and " + type + " are a valid combination.")
# Scrubs user inputs to be URL friendly
searchTerm = str(searchTerm).replace(" ", "+")
searchTerm = quote(searchTerm)
print("New user scrubbed inputs: " + str(gameID), type, searchTerm)
return self.genURL(gameID, type, searchTerm)
# Gives the 'interpreted' search type
def trueType(self, type):
if (type.lower() == 'item' or type.lower() == 'items'): return 'item'
elif (type.lower() == 'collection' or type.lower() == 'collections'): return 'collection'
elif (type.lower() == 'merchandise' or type.lower() == 'merch'): return 'merchandise'
elif (type.lower() == 'addons' or type.lower() == 'addon'): return 'item'
elif (type.lower() == 'map' or type.lower() == 'maps'): return 'map'
elif (type.lower() == 'skin' or type.lower() == 'skins'): return 'item'
else: return "E3"
```
#### File: SquidskiBot/SquidskiBotMain/SquidskiBotMain.py
```python
import asyncio
import datetime
import discord
import json
import sys
import logging
# Various other files
from CommandHandler import CommandHandler
# This settings file contains a bunch of variables
with open('settings.json') as json_file:
settingsFile = json.load(json_file)
# Picks the symbol to prefix onto commands
globalCall = settingsFile["globalCall"]
# Initialization alerts
print("The call symbol for the bot is " + globalCall)
# Logger setup
logger = logging.getLogger('discord')
logger.setLevel(logging.CRITICAL)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# Here's where the magic happens
class MyClient(discord.Client):
# Adds Pings role on join
async def on_member_join(self, member):
await member.add_roles(discord.utils.get(member.guild.roles, name='Pings'))
print(str(member) + " was given the pings role!")
# Initializes stuff
async def on_ready(self):
print(f"Logged on as {self.user}")
# Respond to messages starts here
async def on_message(self, message):
if message.author == self.user:
return
# Handle the Commands with globalCall
if (message.content.startswith(globalCall)):
myCommand = CommandHandler()
await myCommand.commandParser(message, globalCall)
# Good night response
if (message.content.lower() in ["good night","gn","goodnight"]):
await (message.channel.send("Good night " + str(message.author)[:-5]))
# Instantiate client
client = MyClient()
# Read bot token
client.run(settingsFile["botKey"])
print("Successfully read bot token")
tokenReader.close()
``` |
{
"source": "18wh1a0590/NLP-Text-Generation",
"score": 2
} |
#### File: NLP-Text-Generation/tests/test_video_reader.py
```python
from re import A
import pytest
import torch
from video_reader import VideoDataset
@pytest.fixture(scope='module')
def args():
class Object(object):
pass
args = Object()
args.dataset = "data/ssv2small"
args.seq_len = 8
args.img_size = 224
args.way = 5
args.shot = 3
args.query_per_class = 2
yield args
def test_num_vids_read(args):
vd = VideoDataset(args)
assert len(vd.train_split.videos) == 50
def test_check_returned_data_size(args):
vd = VideoDataset(args)
assert vd[0]['support_set'].shape == torch.Size([vd.args.seq_len * vd.args.way * vd.args.shot, 3, vd.args.img_size, vd.args.img_size])
def test_single_video_mode(args):
vd = VideoDataset(args, meta_batches=False)
vid, gt = vd[0]
assert vid.shape == torch.Size([vd.args.seq_len, 3, vd.args.img_size, vd.args.img_size])
```
#### File: NLP-Text-Generation/videotransforms/stack_transforms.py
```python
import numpy as np
import PIL
import torch
from videotransforms.utils import images as imageutils
class ToStackedTensor(object):
"""Converts a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
or PIL Images to a torch.FloatTensor of shape (m*C x H x W)
in the range [0, 1.0]
"""
def __init__(self, channel_nb=3):
self.channel_nb = channel_nb
def __call__(self, clip):
"""
Args:
clip (list of numpy.ndarray or PIL.Image.Image): clip
(list of images) to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'got {} channels instead of 3'.format(
ch)
elif isinstance(clip[0], PIL.Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb * len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, PIL.Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = imageutils.convert_img(img)
np_clip[img_idx * self.channel_nb:(
img_idx + 1) * self.channel_nb, :, :] = img
tensor_clip = torch.from_numpy(np_clip)
return tensor_clip.float().div(255)
``` |
{
"source": "1900zyh/BasicSR",
"score": 2
} |
#### File: BasicSR/basicsr/test.py
```python
import logging
import random
import torch
import argparse
import sys
from os import path as osp
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
sys.path.append(root_path)
from basicsr.data import build_dataloader, build_dataset
from basicsr.models import build_model
from basicsr.utils import get_env_info, get_root_logger, get_time_str, make_exp_dirs, set_random_seed
from basicsr.utils.options import dict2str, parse
def test_pipeline(opt):
# random seed
seed = opt.get('manual_seed')
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# mkdir and initialize loggers
make_exp_dirs(opt)
log_file = osp.join(opt['path']['log'], f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(
test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
# create model
model = build_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info(f'Testing {test_set_name}...')
model.validation(test_loader, current_iter=opt['name'], tb_logger=None, save_img=opt['val']['save_img'])
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--dist', action='store_true')
parser.add_argument('--mnt', type=str, default='~/BasicSR')
parser.add_argument('--nfs', type=str, default='~/BasicSR')
args = parser.parse_args()
opt = parse(args.opt, mnt=args.mnt, nfs=args.nfs, dist=args.dist, is_train=False)
opt['auto_resume'] = args.auto_resume
opt['rank'] = 0
test_pipeline(opt)
``` |
{
"source": "1902309/Aula-Git-23MAR2020",
"score": 2
} |
#### File: 1902309/Aula-Git-23MAR2020/teste.py
```python
import pytest
from principal import somar
def test_soma():
assert somar(2,6)==8
``` |
{
"source": "1904labs/docker-twitter-streamer",
"score": 2
} |
#### File: docker-twitter-streamer/src/kinesis_producer.py
```python
import os
import sys
import logging
# addlib
import boto3
log = logging.getLogger(__name__)
class RecordAccumulator(object):
def __init__(self):
self.limit = 20
self.container = []
def empty(self):
result, self.container = self.container, []
return result
def full(self):
return True if len(self.container) >= self.limit else False
def append(self, record):
self.container.append(record)
class KinesisProducer(object):
def __init__(self, api_name, region_name, stream_name):
self.client = boto3.client(api_name, region_name=region_name)
self.stream_name = stream_name
self.accumulator = RecordAccumulator()
def send(self, topic, data):
self.accumulator.append({
"Data": data.encode('utf-8'),
})
if self.accumulator.full():
return self.client.put_record_batch(
Records=self.accumulator.empty(),
DeliveryStreamName=self.stream_name,
)
else:
return True
```
#### File: docker-twitter-streamer/src/tweepy_stream.py
```python
import os
import sys
import logging
from tweepy import Stream, StreamListener, OAuthHandler
log = logging.getLogger(__name__)
class TweepyStream(Stream):
def __init__(self, **kwargs):
producer = kwargs.pop("producer", StdoutProducer())
filteron = kwargs.pop("filter", "#")
authkeys = ['consumer_key', 'consumer_secret', 'access_token', 'access_token_secret']
authargs = {key: kwargs.pop(key) for key in authkeys}
# add listener, remove producer
if 'listener' not in kwargs:
kwargs['listener'] = TweepyStreamListener(producer, filteron)
# add auth remove login info
if 'auth' not in kwargs:
kwargs['auth'] = self._get_auth(**authargs)
super().__init__(**kwargs)
def _get_auth(self, consumer_key, consumer_secret, access_token, access_token_secret):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return auth
class TweepyStreamListener(StreamListener):
def __init__(self, producer, topic):
self.producer = producer
self.topic = topic
def on_data(self, data):
self.producer.send(self.topic, data)
return True
def on_error(self, status):
print("Error: " + str(status))
class StdoutProducer(object):
def send(self, topic, data):
print(f"{topic}: {data}\n")
return True
``` |
{
"source": "19-1-skku-oss/2019-1-OSS-E3",
"score": 3
} |
#### File: freegames/Code Description/bounce_d.py
```python
"""Bounce, a simple animation demo. # Bounce, 간단한 애니메이션 데모
Exercises # 연습문제들
1. Make the ball speed up and down. # 1. 공의 속도를 빠르거나 느리게 만들자
2. Change how the ball bounces when it hits a wall. # 2. 공이 벽이랑 부딪힐때 튀기는 방법을 바꿔보자
3. Make the ball leave a trail. # 3. 공이 이동하면서 자취를 남기도록 해보자
4. Change the ball color based on position. # 4. 위치에 따른 공의 색깔을 바꿔보자
Hint: colormode(255); color(0, 100, 200)
"""
from random import * # random 모듈을 불러온다
from turtle import * # turtle 모듈을 불러온다
from freegames import vector # freegames utils.py에서 선언된 vector를 불러온다
def value(): # (-5, -3) 그리고 (3, 5) 사이에만 있는 랜덤한 수를 생성하는 함수
"Randomly generate value between (-5, -3) or (3, 5)."
return (3 + random() * 2) * choice([1, -1])
ball = vector(0, 0) # 공의 초기 좌표를 (0, 0)으로 설정해준다
aim = vector(value(), value()) # aim의 초기 좌표는 value 함수에서 구한 랜덤 값을 각각 x좌표와 y좌표에 넣어준다
def draw(): # 공을 움직여주고 그 화면을 나타내주는 함수
"Move ball and draw game."
ball.move(aim) # 공을 aim에 설정되있는 좌표만큼 움직여준다
x = ball.x # x와 y를 ball.x와 ball.y 값으로 설정해준다
y = ball.y
if x < -200 or x > 200: # 만약 x가 -200보다 작거나 200보다 크면 aim의 x좌표의 부호를 바꿔준다
aim.x = -aim.x
if y < -200 or y > 200: # 만약 y가 -200보다 작거나 200보다 크면 aim의 x좌표의 부호를 바꿔준다
aim.y = -aim.y
clear() # turtle 모듈의 함수인데 거북이를 그대로 둔 채 화면을 지워준다
goto(x, y) # 거북이를 (x, y)좌표로 이동시켜준다
dot(10) # 거북이가 있는 위치에 반경 10인 원을 그려준다
ontimer(draw, 50) # 50ms 마다 draw 함수가 실행되도록 해준다
setup(420, 420, 370, 0) # 초기 그래픽 설정을 해준다
hideturtle() # turtle 모듈의 거북이를 숨겨준다
tracer(False) # 거북이가 움직이는 자취를 숨겨준다
up() # 펜을 들게 해준다
draw() # 들고 있는 펜으로 화면 위에 그림을 그려준다
done() # turtle 모듈을 종료 시켜준다
```
#### File: freegames/Code Description/connect_d.py
```python
"""Connect Four Exercises # 연습문제
1. Change the colors. # 1. 색깔을 바꿔보세요.
2. Draw squares instead of circles for open spaces. # 2. 빈공간에 원을 대신해 사각형을 그려보세요.
3. Add logic to detect a full row. # 3. 꽉 찬 열을 판단하는 코드를 작성해 보세요.
4. Create a random computer player. # 4. 가상의 컴퓨터 플레이어를 만들어보세요.
5. How would you detect a winner? # 5. 어떻게하면 승자를 판단할 수 있을까요?
"""
from turtle import * # turrtle 모듈을 불러온다
from freegames import line # freegames 모듈에서 line함수를 불러온다
turns = {'red': 'yellow', 'yellow': 'red'} # 색깔 바꾸기를 위한 색 선언
state = {'player': 'yellow', 'rows': [0] * 8} # 현재 플레이어의 색 상태 표시
def grid():
"Draw Connect Four grid." # 배경이 되는 판 그리기 함수
bgcolor('light blue') # 배경색 설정
for x in range(-150, 200, 50): # 분할을 위한 세로선 그리기
line(x, -200, x, 200)
for x in range(-175, 200, 50): # 원 주변 배경
for y in range(-175, 200, 50):
up()
goto(x, y)
dot(40, 'white') # 원 그리기 (크기, 색깔)
update()
def tap(x, y): # 사용자에게 입력받았을 때의 동작
"Draw red or yellow circle in tapped row." # 빨간색 또는 노란색 원을 그린다.
player = state['player'] # 선언된 배열을 이용한 상태 표시 (사용자)
rows = state['rows'] # 선언된 배열을 이용한 상태 표시 (행)
row = int((x + 200) // 50)
count = rows[row] # 행 갯수 세기
x = ((x + 200) // 50) * 50 - 200 + 25
y = count * 50 - 200 + 25
up()
goto(x, y) # x,y 좌표로 이동하기
dot(40, player) # 원 그리기
update()
rows[row] = count + 1
state['player'] = turns[player]
setup(420, 420, 370, 0) # Grid(배경 판) 설정
hideturtle() # turtle 이미지 없애기
tracer(False) # 그리기 멈추기
grid() # grid 그리기
onscreenclick(tap) # tap을 클릭감지로 설정
done() # 끝
```
#### File: freegames/Code Description/pong_d.py
```python
Description/pong_d.py
"""pong.py를 한국어로 알기 쉽게 설명하는 파일"""
"""Pong, classic arcade game. # Pong, 클래식 아케이드 게임
Excercises # 연습문제들
1. Change the colors. # 1. pong에서 색깔을 어떻게 수정하느냐
2. What is the frame rate? Make it faster or slower. # 2. 프레임의 속도는 어떤가? 이를 수정해보라
3. Change the speed of the ball. # 3. 공의 속도를 수정해보라
4. Change the size of the paddles. # 4. 페달의 사이즈를 수정해보라
5. Change how the ball bounces off walls. # 5. 공이 벽을 넘어갔을 때 어떻게 되는지를 수정하라
6. How would you add a computer player? # 6. 컴퓨터 플레이어를 어떻게 추가할까?
6. Add a second ball. # 7. 두번째 공을 추가해보라
"""
from random import choice, random # 랜덤 모듈에서 choice와 random을 불러온다.
from turtle import * # 터틀 모듈을 불러온다.
from freegames import vector # freegames util.py에서 선언된 vector를 불러온다.
global round
round = 0
def value(): # value() 함수
"Randomly generate value between (-5, -3) or (3, 5)." # 위 함수는 (-5,-3) 또는 (3,5) 중의 값을 랜덤으로 불러온다.
return ((2+round) + random() * 3) * choice([1, -1])
ball = vector(0, 0) # 처음 공의 벡터를 0으로 설정한다.
aim = vector(value(), value()) # 공의 진행방향을 vector(value(),value())로 초기화한다.
state = {1: 0, 2: 0} # 1과 2의 상태를 0으로 초기화한다.
def move(player, change): # move(player, change) 함수
"Move player position by change." # 각 플레이어의 페달의 포지션을 move함수를 통해 바꿔준다.
# player = 1일때 : 왼쪽 페달, player = 2일때 : 오른쪽 페달
state[player] += change # change만큼 state[player]를 증가시킨다. (페달의 y좌표)
def rectangle(x, y, width, height): # rectangle(x, y, width, height) 함수
"Draw rectangle at (x, y) with given width and height." # 이 함수는 게임판을 초기화하는 함수이다.
up()
goto(x, y)
down()
begin_fill()
for count in range(2):
forward(width)
left(90)
forward(height)
left(90)
end_fill()
def draw():
"Draw game and move pong ball."
clear()
rectangle(-200, state[1], 10, 50) # 위의 rectangle을 이용해 함수를 초기화한다. (-200,-200) ~ (200,200) 까지 활성화
rectangle(190, state[2], 10, 50)
ball.move(aim) # ball의 aim을 초기화 시켜주고
x = ball.x # 그에 따라 ball의 x, y 벡터가 설정된다.
y = ball.y
up() # 구현하기 위한 환경설정
goto(x, y)
dot(10)
update()
if y < -200 or y > 200: # ball의 y좌표가 200 초과 또는 -200 미만이 되면 aim.y를 -aim.y로 바꾼다.
aim.y = -aim.y
if x < -185: # x가 -185 미만이 되면 일때는 되면 high는 state[1] 이고 low는 state[1] + 50이다 (즉 판의 길이가 50임)
low = state[1]
high = state[1] + 50
if low <= y <= high: # 만약 y좌표가 low와 high 사이에 있으면, 공의 방향을 오른쪽으로 바꾼다. (즉 튕겨나감)
aim.x = -aim.x
else:
return # 아니면 게임 종료
if x > 185:
low = state[2] # x가 185 초과일 때, 위의 경우와 같은 케이스로 처리
high = state[2] + 50
if low <= y <= high:
aim.x = -aim.x
else:
return
ontimer(draw, 50) # draw함수를 0.05초 뒤에 실행한다. 즉 draw함수는 0.05초마다 계속 갱신되는 재귀함수임.
setup(420, 420, 370, 0) # setuptool에서 제공하는 setup()함수임.
hideturtle() # 거북이를 화면에서 숨긴다.
tracer(False)
listen()
onkey(lambda: move(1, 20), 'w') # w - 왼쪽 페달을 20 올림, s - 왼쪽 페달을 20 내림, i - 오른쪽 페달을 20 올림, k - 오른쪽 페달을 20 내림
onkey(lambda: move(1, -20), 's')
onkey(lambda: move(2, 20), 'i')
onkey(lambda: move(2, -20), 'k')
draw() # draw() 시작해줌
done() # draw()가 종료되면 done() 해줌.
```
#### File: freegames/Code Description/tron_d.py
```python
"""Tron, classic arcade game. # Tron, 클래식 아케이드 게임
Exercises # 연습문제들
1. Make the tron players faster/slower. # 1. tron 플레이어들의 속도를 빠르게/느리게 만들자
2. Stop a tron player from running into itself. # 2. tron 플레이어가 스스로 뛰어드는 것을 막자
3. Allow the tron player to go around the edge of the screen. # 3. tron 플레이어가 화면의 가장자리를 돌아다닐 수 있게 허용하자
4. How would you create a computer player? # 4. 컴퓨터 플레이어는 어떻게 만들 수 있을까?
"""
from turtle import * # turtle 모듈을 불러온다
from freegames import square, vector # freegames utils.py에서 선언된 square와 vector를 불러온다
p1xy = vector(-100, 0) # p1xy는 초기 좌표를 (-100, 0)으로 설정해준다
p1aim = vector(4, 0) # p1aim은 초기 좌표를 (4, 0)으로 설정해준다
p1body = set() # p1body를 집합 자료형인 set으로 설정해준다
p2xy = vector(100, 0) # p1xy는 초기 좌표를 (-100, 0)으로 설정해준다 # p1aim은 초기 좌표를 (4, 0)으로 설정해준다 # p1body를 집합 자료형은 set으로 설정해준다
p2aim = vector(-4, 0) # p2aim은 초기 좌표를 (-4, 0)으로 설정해준다
p2body = set() # p2body를 집합 자료형인 set으로 설정해준다
def inside(head): # tron의 head의 범위를 제한해주는 함수
"Return True if head inside screen." # head의 x와 y좌표를 각각 (-200, 200) 사이로 제한해준다
return -200 < head.x < 200 and -200 < head.y < 200
def draw(): # 플레이어들을 진행시키고 화면에 그려주는 함수
"Advance players and draw game."
p1xy.move(p1aim) # p1xy좌표를 p1aim에 설정되있는 좌표만큼 움직여준다
p1head = p1xy.copy() # p1xy의 복사본을 만들고 그것을 p1head라고 선언해준다
p2xy.move(p2aim) # p2xy좌표를 p2aim에 설정되있는 좌표만큼 움직여준다
p2head = p2xy.copy() # p2xy의 복사본을 만들고 그것을 p2head라고 선언해준다
if not inside(p1head) or p1head in p2body: # p1head가 범위를 벗어나거나 p2body에 부딪히면 함수를 종료시킨다
print('Player blue wins!')
return
if not inside(p2head) or p2head in p1body: # p2head가 범위를 벗어나거나 p1body에 부딪히면 함수를 종료시킨다
print('Player red wins!')
return
p1body.add(p1head) # 아까 복사해서 만들었던 p1head를 p1body에 추가시켜준다
p2body.add(p2head) # 아까 복사해서 만들었던 p2head를 p2body에 추가시켜준다
square(p1xy.x, p1xy.y, 3, 'red') # p1xy가 지나가는 곳에는 빨간색 정사각형을 그려준다
square(p2xy.x, p2xy.y, 3, 'blue') # p2xy가 지나가는 곳에는 파란색 정사각형을 그려준다
update() # 화면을 갱신해준다
ontimer(draw, 50) # 50ms 마다 draw 함수가 실행되도록 해준다
setup(420, 420, 370, 0) # 초기 그래픽 설정을 해준다
hideturtle() # turtle 모듈의 거북이를 숨겨준다
tracer(False) # 거북이가 움직이는 자취를 숨겨준다
listen() # 사용자가 입력하는 키의 입력을 받아준다
onkey(lambda: p1aim.rotate(90), 'a') # p1aim에서 'a'는 왼쪽, 'd'는 오른쪽으로 꺾이게 해주고
onkey(lambda: p1aim.rotate(-90), 'd') # p2aim에서 'j'는 왼쪽, 'l'는 오른쪽으로 꺾이게 해준다
onkey(lambda: p2aim.rotate(90), 'j')
onkey(lambda: p2aim.rotate(-90), 'l')
draw() # 화면에 그림을 그려준다
done() # turtle 모듈을 종료 시켜준다
``` |
{
"source": "19-1-skku-oss/2019-1-OSS-L1",
"score": 2
} |
#### File: pytest_doc_code/Chapter9/test_module.py
```python
def setup_function(function):
print("setting up %s" % function)
def test_func1():
assert True
def test_func2():
assert False
```
#### File: 2019-1-OSS-L1/test_jy/test_1.py
```python
def RectangleArea(width, height):
return width*height
def test_case():
assert RectangleArea(5,8) == 40
assert RectangleArea(5,8) == 20
assert RectangleArea(9,5) == 40
``` |
{
"source": "19-1-skku-oss/2019-1-OSS-L3",
"score": 2
} |
#### File: ext/telegram_chatterbot/telegrambot.py
```python
from telegram.ext import Updater, MessageHandler, Filters, CommandHandler
from chatterbot.ext.telegram_chatterbot import conf
from chatterbot.ext.telegram_chatterbot.bothandler import BotHandler
class TelegramBot:
def __init__(self, token=conf.TOKEN, name=conf.NAME, handlers=None):
self.bot_token = token
self.bot_name = name
self.updater = Updater(token)
self.add_handler(handlers)
def add_handler(self, handlers=None):
if handlers is None:
bot_handler = BotHandler()
handlers = bot_handler.get_lists()
for handler in handlers:
self.updater.dispatcher.add_handler(handler)
def start(self):
self.updater.start_polling(timeout=3, clean=True)
self.updater.idle()
``` |
Subsets and Splits