code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import sys
import os
import argparse
import time
import numpy as np
from os.path import dirname
from sklearn import svm
from sklearn.externals import joblib
from matplotlib import image as img
from lib.utils.dr_utils import invert_dr
from lib.utils.dr_utils import gradient_transform
from lib.attacks.svm_attacks import min_dist_calc
def resolve_path():
return dirname(dirname(dirname(os.path.abspath(__file__))))
#------------------------------------------------------------------------------#
def resolve_path_m(model_dict):
"""
Resolve absolute paths of models for different datasets
Parameters
----------
model_dict : dictionary
contains model's parameters
Returns
-------
absolute path to models directory
"""
dataset = model_dict['dataset']
channels = model_dict['channels']
script_dir = resolve_path()
rel_path_m = 'svm_models/' + dataset
if dataset == 'GTSRB':
rel_path_m += str(channels)
abs_path_m = os.path.join(script_dir, rel_path_m + '/')
if not os.path.exists(abs_path_m):
os.makedirs(abs_path_m)
return abs_path_m
#------------------------------------------------------------------------------#
def resolve_path_o(model_dict):
"""
Resolve absolute paths of output data for different datasets
Parameters
----------
model_dict : dictionary
contains model's parameters
Returns
-------
absolute path to output directory
"""
dataset = model_dict['dataset']
channels = model_dict['channels']
script_dir = resolve_path()
rel_path_o = 'svm_output_data/' + dataset
if dataset == 'GTSRB':
rel_path_o += str(channels)
abs_path_o = os.path.join(script_dir, rel_path_o + '/')
if not os.path.exists(abs_path_o):
os.makedirs(abs_path_o)
return abs_path_o
#------------------------------------------------------------------------------#
def resolve_path_v(model_dict):
"""
Resolve absolute paths of visual data for different datasets
Parameters
----------
model_dict : dictionary
contains model's parameters
Returns
-------
absolute path to output directory
"""
model_name = get_svm_model_name(model_dict)
dataset = model_dict['dataset']
channels = model_dict['channels']
script_dir = resolve_path()
rel_path_v = 'svm_visual_data/' + dataset + '/' + model_name
if dataset == 'GTSRB':
rel_path_v += str(channels)
abs_path_v = os.path.join(script_dir, rel_path_v + '/')
if not os.path.exists(abs_path_v):
os.makedirs(abs_path_v)
return abs_path_v
#------------------------------------------------------------------------------#
def svm_model_dict_create():
"""
Parse arguments to strategic_svm.py and create model_dict containing the
parameters
"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-st',
'--svm_type',
default='linear',
type=str,
help='Specify type of SVM to be used (default: linear)')
parser.add_argument('--dataset', default='MNIST', type=str,
help='Specify dataset (default: MNIST)')
parser.add_argument(
'-c',
'--channels',
default=1,
type=int,
help='Specify number of input channels (1 (default) or 3)')
parser.add_argument(
'--two_classes',
action='store_true',
help='Train SVM on two classes instead of all available classes')
parser.add_argument('-dr', '--dim_red', default='pca', type=str,
help='Specify dimension reduction (default: pca)')
parser.add_argument(
'--rev',
action='store_true',
help='Train SVM and attack on DR sample reverted to original space')
parser.add_argument('-C', '--penconst', default=1.0, type=float,
help='Specify penalty parameter C (default: 1.0)')
parser.add_argument(
'-p',
'--penalty',
default='l2',
type=str,
help='Specify norm to use in penalization (l1 or l2 (default))')
parser.add_argument(
'-pp',
'--preprocess',
default=None,
type=str,
help='Specify preprocessing on dataset (std, whiten, antiwhiten(*)) \
(default: None) \n (*) is degree of covariance (>= -1)')
args = parser.parse_args()
# Create and update model_dict
model_dict = {}
model_dict.update({'svm_type': args.svm_type})
model_dict.update({'dataset': args.dataset})
model_dict.update({'channels': args.channels})
model_dict.update({'dim_red': args.dim_red})
model_dict.update({'penconst': args.penconst})
model_dict.update({'penalty': args.penalty})
model_dict.update({'preprocess': args.preprocess})
if args.rev:
model_dict.update({'rev': 1})
else:
model_dict.update({'rev': None})
if args.two_classes:
model_dict.update({'classes': 2})
else:
# TODO: preferrably put this somehere else
dataset = model_dict['dataset']
if (dataset == 'MNIST'):
model_dict.update({'classes': 10})
elif (dataset == 'GTSRB'):
model_dict.update({'classes': 43})
elif (dataset == 'HAR'):
model_dict.update({'classes': 6})
return model_dict
#------------------------------------------------------------------------------#
def get_svm_model_name(model_dict, rd=None, rev=None):
"""
Helper function to get model name from <model_dict>, <rd> and <rev>
"""
model_name = 'svm_{}_cls{}'.format(
model_dict['svm_type'],
model_dict['classes'])
if model_dict['preprocess'] is not None:
model_name += ('_' + model_dict['preprocess'])
if rd is not None:
model_name += '_{}{}'.format(model_dict['dim_red'], rd)
if rev is not None:
model_name += '_rev'
model_name += '_{}_C{:.0e}'.format(model_dict['penalty'],
model_dict['penconst'])
return model_name
#------------------------------------------------------------------------------#
def model_loader(model_dict, rd=None, rev=None):
"""
Returns a classifier object if it already exists. Returns None, otherwise.
"""
print('Loading model...')
abs_path_m = resolve_path_m(model_dict)
try:
clf = joblib.load(abs_path_m + get_svm_model_name(model_dict, rd, rev) +
'.pkl')
except BaseException:
clf = None
return clf
#------------------------------------------------------------------------------#
def model_trainer(model_dict, X_train, y_train, rd=None, rev=None):
"""Trains and returns SVM. Also save SVM to file."""
print('Training model...')
start_time = time.time()
abs_path_m = resolve_path_m(model_dict)
svm_model = model_dict['svm_type']
C = model_dict['penconst']
penalty = model_dict['penalty']
# Create model based on parameters
if svm_model == 'linear':
dual = False
# if penalty == 'l1':
# dual = False
clf = svm.LinearSVC(C=C, penalty=penalty, dual=dual)
elif svm_model != 'linear':
clf = svm.SVC(C=C, kernel=svm_model)
# Train model
clf.fit(X_train, y_train)
print('Finish training in {:d}s'.format(int(time.time() - start_time)))
# Save model
joblib.dump(clf, abs_path_m + get_svm_model_name(model_dict, rd, rev) + '.pkl')
return clf
#------------------------------------------------------------------------------#
def model_creator(model_dict, X_train, y_train, rd=None, rev=None):
"""Returns a SVM classifier"""
# Load model based on model_dict
clf = model_loader(model_dict, rd, rev)
# If model does not exist, train a new SVM
if clf is None:
clf = model_trainer(model_dict, X_train, y_train, rd, rev)
return clf
#------------------------------------------------------------------------------#
def model_transform(model_dict, clf, dr_alg):
"""
Modify SVM's decision function to take into account transformation
matrix to transform input data in original space
"""
A = gradient_transform(model_dict, dr_alg)
clf.coef_ = np.dot(clf.coef_, A)
return clf
#------------------------------------------------------------------------------#
def model_tester(model_dict, clf, X_test, y_test, rd=None, rev=None):
"""
Calculate model's accuracy and average normalized distance from correctly
classified samples to separating hyperplane of corresponding class
"""
predicted_labels = clf.predict(X_test)
if model_dict['svm_type'] == 'linear':
# Magnitude of weight vectors for each class
norm = np.linalg.norm(clf.coef_, axis=1)
else:
# norm is arbritarily set to one for kernel SVM
norm = np.ones(model_dict['classes'])
test_len = len(X_test)
sum_dist = 0
n_correct = 0
for i in range(test_len):
if predicted_labels[i] == y_test[i]:
n_correct += 1
# Sum normalized distance to sept. hyperplane
_, min_dist = min_dist_calc(X_test[i], clf)
sum_dist += min_dist
DR = model_dict['dim_red']
# Resolve path to utility output file
abs_path_o = resolve_path_o(model_dict)
fname = 'utility_' + get_svm_model_name(model_dict)
if rd != None: fname += '_' + DR
if rev != None: fname += '_rev'
ofile = open(abs_path_o + fname + '.txt', 'a')
DR=model_dict['dim_red']
if rd == None:
ofile.write('No_'+DR+' ')
else:
ofile.write( str(rd) + ' ')
# Format: <dimensions> <accuracy> <avg. dist.>
ofile.write('{:.2f} {:.3f} \n'.format(clf.score(X_test,y_test),
sum_dist / n_correct))
ofile.write('\n\n')
#------------------------------------------------------------------------------#
def acc_calc_all(clf, X_adv, y_test, y_ini):
"""
Return attack success rate on <clf> based on initially correctly predicted
samples
"""
o_list = []
y_adv = clf.predict(X_adv)
# Accuracy vs. true labels
atk_success = (y_adv != y_test)
acc_t = np.sum(atk_success) / float(len(X_adv))
o_list.append(acc_t)
# Accuracy vs. predicted labels
atk_success = (y_adv != y_ini)
acc_p = np.sum(atk_success) / float(len(X_adv))
o_list.append(acc_p)
# Accuracy for adv. examples generated from correctly classified examples
i_c = np. where(y_ini == y_test)
atk_success = (y_adv[i_c] != y_test[i_c])
acc_c = np.sum(atk_success) / float(len(y_adv[i_c]))
o_list.append(acc_c)
return o_list
#------------------------------------------------------------------------------#
def file_create(model_dict, rd=None, strat=None, rev=None):
"""
Creates and returns a file descriptor, named corresponding to model,
attack type, strat, and rev
"""
# Resolve absolute path to output directory
abs_path_o = resolve_path_o(model_dict)
fname = get_svm_model_name(model_dict)
if strat is not None:
fname += '_strat'
if rd is not None:
fname += '_' + model_dict['dim_red']
if rev is not None:
fname += '_rev'
plotfile = open(abs_path_o + fname + '.txt', 'a')
return plotfile, fname
#------------------------------------------------------------------------------#
def print_svm_output(model_dict, output_list, dev_list, rd=None, strat=None,
rev=None):
"""
Creates an output file reporting accuracy and confidence of attack
"""
plotfile, fname = file_create(model_dict, rd, strat, rev)
plotfile.write('\\\\small{{{}}}\n'.format(rd))
for i in range(len(dev_list)):
plotfile.write('{0:.3f}'.format(dev_list[i]))
for item in output_list[i]:
plotfile.write(' {0:.3f}'.format(item))
plotfile.write('\n')
plotfile.write('\n\n')
plotfile.close()
return fname
#------------------------------------------------------------------------------#
def save_svm_images(model_dict, data_dict, X_test, adv_x, dev_mag, rd=None,
dr_alg=None, rev=None):
"""
Save <no_of_img> adv. samples as image files in visual_data folder
"""
no_of_img = 1 # Number of images to save
indices = range(no_of_img)
X_curr = X_test[indices]
dataset = model_dict['dataset']
DR = model_dict['dim_red']
abs_path_v = resolve_path_v(model_dict)
no_of_features = data_dict['no_of_features']
height = int(np.sqrt(no_of_features))
width = height
# TODO: invert preprocessing
# if model_dict['preprocess'] is not None:
channels = 1
if channels == 1:
if (rd is not None) and (rev is None):
# Invert dr samples to their original space
adv_x_curr = adv_x[indices, :] + dr_alg.mean_
for i in indices:
adv = adv_x_curr[i].reshape((height, width))
orig = X_curr[i].reshape((height, width))
img.imsave(
abs_path_v +
'{}_{}_{}_mag{}.png'.format(i, DR, rd, dev_mag),
adv * 255,
vmin=0,
vmax=255,
cmap='gray')
img.imsave(abs_path_v + '{}_{}_{}_orig.png'.format(i, DR, rd),
orig * 255, vmin=0, vmax=255, cmap='gray')
elif (rd is None) or (rev is not None):
adv_x_curr = adv_x[indices, :]
for i in indices:
adv = adv_x_curr[i].reshape((height, width))
orig = X_curr[i].reshape((height, width))
if rd is not None:
fname = abs_path_v + '{}_{}_rev_{}'.format(i, DR, rd)
elif rd is None:
fname = abs_path_v + '{}'.format(i)
img.imsave(fname + '_mag{}.png'.format(dev_mag), adv * 255,
vmin=0, vmax=255, cmap='gray')
img.imsave(fname + '_orig.png', orig * 255, vmin=0, vmax=255,
cmap='gray')
else:
adv = adv_x[i].swapaxes(0, 2).swapaxes(0, 1)
orig = X_test[i].swapaxes(0, 2).swapaxes(0, 1)
#------------------------------------------------------------------------------#
# def plotter(acc_def, acc, dev_list, rd_list, recons_flag=0, strat_flag=0):
#
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# from matplotlib.lines import Line2D
# import glob as glob
# import os
# from matplotlib.pyplot import cm
# from cycler import cycler
#
# if strat_flag == 1: title = 'Strategic gradient '
# elif strat_flag == 0: title = 'Gradient '
# title += 'on DCA reduced dimensions for MNIST data with '
# fname ='MNIST_svm_dca'
# if recons_flag == 1:
# title += 'recons defense'
# fname += '_recon.png'
# elif recons_flag == 0:
# title += 'retrain defense'
# fname += '_retrain'
# if strat_flag == 1: fname += '_strat'
# fname += '.png'
#
# font = {'size': 17}
# matplotlib.rc('font', **font)
# cm = plt.get_cmap('gist_rainbow')
# fig, ax = plt.subplots(1, 1, figsize=(12,9))
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
# colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
# markers = ('o', '^', 'x', 'D', 's', '|', 'v')
# handle_list = []
# count = 0
# for item in rd_list:
# count += 1
# color = colors[count % len(colors)]
# style = markers[count % len(markers)]
# handle_list.append(plt.plot(dev_list, np.multiply(100, acc_def[count-1, :]),
# linestyle='-', marker=style, color=color, markersize=10, label=item))
# handle_list.append(plt.plot(dev_list, np.multiply(100, acc),
# linestyle='-', marker='o', color='b', markersize=10, label='No defense'))
#
# plt.xlabel(r'Adversarial perturbation')
# plt.ylabel('Adversarial success')
# plt.title(title)
# plt.xticks()
# plt.legend(loc=2, fontsize=14)
# plt.ylim(0, 100)
# plt.savefig(fname, bbox_inches='tight')
# plt.show()
# #------------------------------------------------------------------------------#
| [
"os.path.exists",
"numpy.sqrt",
"numpy.ones",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.where",
"sklearn.svm.LinearSVC",
"os.path.join",
"matplotlib.image.imsave",
"numpy.sum",
"numpy.dot",
"lib.attacks.svm_attacks.min_dist_calc",
"numpy.linalg.norm",
"os.path.abspath",
"lib.utils.dr_utils.gradient_transform",
"time.time",
"sklearn.svm.SVC"
] | [((1007, 1049), 'os.path.join', 'os.path.join', (['script_dir', "(rel_path_m + '/')"], {}), "(script_dir, rel_path_m + '/')\n", (1019, 1049), False, 'import os\n'), ((1739, 1781), 'os.path.join', 'os.path.join', (['script_dir', "(rel_path_o + '/')"], {}), "(script_dir, rel_path_o + '/')\n", (1751, 1781), False, 'import os\n'), ((2538, 2580), 'os.path.join', 'os.path.join', (['script_dir', "(rel_path_v + '/')"], {}), "(script_dir, rel_path_v + '/')\n", (2550, 2580), False, 'import os\n'), ((2930, 2955), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2953, 2955), False, 'import argparse\n'), ((6890, 6901), 'time.time', 'time.time', ([], {}), '()\n', (6899, 6901), False, 'import time\n'), ((8275, 8313), 'lib.utils.dr_utils.gradient_transform', 'gradient_transform', (['model_dict', 'dr_alg'], {}), '(model_dict, dr_alg)\n', (8293, 8313), False, 'from lib.utils.dr_utils import gradient_transform\n'), ((8331, 8351), 'numpy.dot', 'np.dot', (['clf.coef_', 'A'], {}), '(clf.coef_, A)\n', (8337, 8351), True, 'import numpy as np\n'), ((10593, 10618), 'numpy.where', 'np.where', (['(y_ini == y_test)'], {}), '(y_ini == y_test)\n', (10601, 10618), True, 'import numpy as np\n'), ((1061, 1087), 'os.path.exists', 'os.path.exists', (['abs_path_m'], {}), '(abs_path_m)\n', (1075, 1087), False, 'import os\n'), ((1097, 1120), 'os.makedirs', 'os.makedirs', (['abs_path_m'], {}), '(abs_path_m)\n', (1108, 1120), False, 'import os\n'), ((1793, 1819), 'os.path.exists', 'os.path.exists', (['abs_path_o'], {}), '(abs_path_o)\n', (1807, 1819), False, 'import os\n'), ((1829, 1852), 'os.makedirs', 'os.makedirs', (['abs_path_o'], {}), '(abs_path_o)\n', (1840, 1852), False, 'import os\n'), ((2592, 2618), 'os.path.exists', 'os.path.exists', (['abs_path_v'], {}), '(abs_path_v)\n', (2606, 2618), False, 'import os\n'), ((2628, 2651), 'os.makedirs', 'os.makedirs', (['abs_path_v'], {}), '(abs_path_v)\n', (2639, 2651), False, 'import os\n'), ((7214, 7260), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C', 'penalty': 'penalty', 'dual': 'dual'}), '(C=C, penalty=penalty, dual=dual)\n', (7227, 7260), False, 'from sklearn import svm\n'), ((8841, 8874), 'numpy.linalg.norm', 'np.linalg.norm', (['clf.coef_'], {'axis': '(1)'}), '(clf.coef_, axis=1)\n', (8855, 8874), True, 'import numpy as np\n'), ((8956, 8986), 'numpy.ones', 'np.ones', (["model_dict['classes']"], {}), "(model_dict['classes'])\n", (8963, 8986), True, 'import numpy as np\n'), ((10292, 10311), 'numpy.sum', 'np.sum', (['atk_success'], {}), '(atk_success)\n', (10298, 10311), True, 'import numpy as np\n'), ((10440, 10459), 'numpy.sum', 'np.sum', (['atk_success'], {}), '(atk_success)\n', (10446, 10459), True, 'import numpy as np\n'), ((10678, 10697), 'numpy.sum', 'np.sum', (['atk_success'], {}), '(atk_success)\n', (10684, 10697), True, 'import numpy as np\n'), ((12656, 12679), 'numpy.sqrt', 'np.sqrt', (['no_of_features'], {}), '(no_of_features)\n', (12663, 12679), True, 'import numpy as np\n'), ((7307, 7337), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'C', 'kernel': 'svm_model'}), '(C=C, kernel=svm_model)\n', (7314, 7337), False, 'from sklearn import svm\n'), ((9236, 9265), 'lib.attacks.svm_attacks.min_dist_calc', 'min_dist_calc', (['X_test[i]', 'clf'], {}), '(X_test[i], clf)\n', (9249, 9265), False, 'from lib.attacks.svm_attacks import min_dist_calc\n'), ((391, 416), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (406, 416), False, 'import os\n'), ((7435, 7446), 'time.time', 'time.time', ([], {}), '()\n', (7444, 7446), False, 'import time\n'), ((14121, 14195), 'matplotlib.image.imsave', 'img.imsave', (["(fname + '_orig.png')", '(orig * 255)'], {'vmin': '(0)', 'vmax': '(255)', 'cmap': '"""gray"""'}), "(fname + '_orig.png', orig * 255, vmin=0, vmax=255, cmap='gray')\n", (14131, 14195), True, 'from matplotlib import image as img\n')] |
####################################################################
# ## NATURAL AND ADVERSARIAL TRAINING FOR WIDERESNET CLASSIFIER ## #
####################################################################
import torch as t
import torch.nn as nn
from torch.utils.data import DataLoader
import json
import datetime
from nets import WideResNet, conv_init
from utils import setup_exp, import_data
# json file with experiment config
CONFIG_FILE = './config_train_clf/cifar10_nat.json'
###############
# ## SETUP ## #
###############
# load experiment config
with open(CONFIG_FILE) as file:
config = json.load(file)
# directory for experiment results
exp_dir = config['exp_dir'] + '_' + datetime.datetime.now().strftime('%d-%m-%Y_%I-%M-%S_%p') + '_/'
# setup folders, save code, set seed and get device
setup_exp(exp_dir, config['seed'], ['checkpoints'], ['train_clf.py', 'nets.py', 'utils.py', CONFIG_FILE])
print('Processing data...')
# import train and test datasets and set up data loaders
train_data, num_classes = import_data(config['data_type'], True, True)
train_data_loader = DataLoader(train_data, config['batch_size'], shuffle=True, num_workers=config['num_workers'])
test_data = import_data(config['data_type'], False, False)[0]
test_data_loader = DataLoader(test_data, config['batch_size'], shuffle=False, num_workers=config['num_workers'])
print('Setting up network and optimizer...')
# network structure and weight init
clf = WideResNet(num_classes=num_classes).cuda()
clf.apply(conv_init)
# initialize optim
assert len(config['lr_list']) == len(config['lr_schedule']), 'lr_list and lr_schedule must have the same length'
optim = t.optim.SGD(clf.parameters(), config['lr_list'][0], config['momentum'], weight_decay=config['weight_decay'])
# loss criterion for logits
criterion = nn.CrossEntropyLoss()
# rescale adversarial parameters for attacks on images with pixel intensities in the range [-1, 1]
config['adv_eps'] *= 2.0 / 255.0
config['adv_eta'] *= 2.0 / 255.0
###############################################
# ## FUNCTIONS FOR ATTACK, TRAIN, AND TEST ## #
###############################################
# l_inf pgd attack
def attack(X, y, adv_steps):
min_mask = t.clamp(X - config['adv_eps'], min=-1.0, max=1.0)
max_mask = t.clamp(X + config['adv_eps'], min=-1.0, max=1.0)
# random initialization in epsilon ball
X_adv = t.clamp(X + config['adv_eps']*(2*t.rand_like(X)-1), min=-1.0, max=1.0)
X_adv = t.autograd.Variable(X_adv, requires_grad=True)
for step in range(adv_steps):
# l_infinity attack on images by climbing loss within epsilon-ball
attack_grad = t.autograd.grad(criterion(clf(X_adv), y), [X_adv])[0]
X_adv.data += config['adv_eta'] * t.sign(attack_grad)
X_adv.data = t.min(max_mask, other=t.max(min_mask, other=X_adv.data))
return X_adv.detach()
# train model for single epoch through data
def train(epoch):
clf.train()
train_loss = 0
correct = 0
total = 0
# update learning rate
if (epoch + 1) in config['lr_schedule']:
lr_ind = config['lr_schedule'].index(epoch + 1)
for lr_gp in optim.param_groups:
lr_gp['lr'] = config['lr_list'][lr_ind]
for batch, (X_batch, y_batch) in enumerate(train_data_loader):
X_train, y_train = X_batch.clone().cuda(), y_batch.cuda()
if config['adv_steps_train'] > 0 and (epoch+1) >= config['adv_train_start']:
# adversarial attacks on input images
X_train = attack(X_train, y_train, config['adv_steps_train'])
# logits for prediction and loss for weight update
logits = clf(X_train)
loss = criterion(logits, y_train)
# update classifier weights
optim.zero_grad()
loss.backward()
optim.step()
# record batch info
train_loss += loss.item()
_, y_pred = t.max(logits.detach(), 1)
correct += t.eq(y_pred, y_train).sum().cpu()
total += y_train.nelement()
# get and print train accuracy
train_acc = 100 * float(correct) / float(total)
print('Epoch {}: Train Loss={} Train Acc={}%'.format(epoch+1, train_loss / (batch+1), train_acc))
# test model on withheld data
def test(epoch):
clf.eval()
test_loss = 0
correct = 0
total = 0
for batch, (X_batch, y_batch) in enumerate(test_data_loader):
X_test, y_test = X_batch.clone().cuda(), y_batch.cuda()
if config['adv_steps_test'] > 0:
# attack images
X_test = attack(X_test, y_test, config['adv_steps_test'])
# check test images
with t.no_grad():
logits = clf(X_test)
loss = criterion(logits, y_test)
# record batch info
test_loss += loss.item()
_, y_pred = t.max(logits, 1)
correct += t.eq(y_pred, y_test).sum().cpu()
total += y_test.nelement()
# get and print test accuracy
test_acc = 100 * float(correct) / float(total)
print('Epoch {}: Test Loss={} Test Acc={}%'.format(epoch+1, test_loss / (batch+1), test_acc))
#######################
# ## LEARNING LOOP # ##
#######################
print('Training has begun.')
for epoch in range(config['num_epochs']):
train(epoch)
if (epoch+1) % config['test_and_log_freq'] == 0:
# save checkpoint
t.save(clf.state_dict(), exp_dir + 'checkpoints/clf_' + str(epoch + 1) + '.pth')
# save optim
t.save(optim.state_dict(), exp_dir + 'checkpoints/optim.pth')
# evaluate test data
test(epoch)
| [
"utils.setup_exp",
"torch.nn.CrossEntropyLoss",
"torch.autograd.Variable",
"torch.rand_like",
"nets.WideResNet",
"torch.max",
"torch.sign",
"torch.eq",
"datetime.datetime.now",
"torch.utils.data.DataLoader",
"json.load",
"torch.no_grad",
"utils.import_data",
"torch.clamp"
] | [((810, 919), 'utils.setup_exp', 'setup_exp', (['exp_dir', "config['seed']", "['checkpoints']", "['train_clf.py', 'nets.py', 'utils.py', CONFIG_FILE]"], {}), "(exp_dir, config['seed'], ['checkpoints'], ['train_clf.py',\n 'nets.py', 'utils.py', CONFIG_FILE])\n", (819, 919), False, 'from utils import setup_exp, import_data\n'), ((1028, 1072), 'utils.import_data', 'import_data', (["config['data_type']", '(True)', '(True)'], {}), "(config['data_type'], True, True)\n", (1039, 1072), False, 'from utils import setup_exp, import_data\n'), ((1093, 1191), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data', "config['batch_size']"], {'shuffle': '(True)', 'num_workers': "config['num_workers']"}), "(train_data, config['batch_size'], shuffle=True, num_workers=\n config['num_workers'])\n", (1103, 1191), False, 'from torch.utils.data import DataLoader\n'), ((1268, 1366), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', "config['batch_size']"], {'shuffle': '(False)', 'num_workers': "config['num_workers']"}), "(test_data, config['batch_size'], shuffle=False, num_workers=\n config['num_workers'])\n", (1278, 1366), False, 'from torch.utils.data import DataLoader\n'), ((1804, 1825), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1823, 1825), True, 'import torch.nn as nn\n'), ((607, 622), 'json.load', 'json.load', (['file'], {}), '(file)\n', (616, 622), False, 'import json\n'), ((1199, 1245), 'utils.import_data', 'import_data', (["config['data_type']", '(False)', '(False)'], {}), "(config['data_type'], False, False)\n", (1210, 1245), False, 'from utils import setup_exp, import_data\n'), ((2202, 2251), 'torch.clamp', 't.clamp', (["(X - config['adv_eps'])"], {'min': '(-1.0)', 'max': '(1.0)'}), "(X - config['adv_eps'], min=-1.0, max=1.0)\n", (2209, 2251), True, 'import torch as t\n'), ((2267, 2316), 'torch.clamp', 't.clamp', (["(X + config['adv_eps'])"], {'min': '(-1.0)', 'max': '(1.0)'}), "(X + config['adv_eps'], min=-1.0, max=1.0)\n", (2274, 2316), True, 'import torch as t\n'), ((2456, 2502), 'torch.autograd.Variable', 't.autograd.Variable', (['X_adv'], {'requires_grad': '(True)'}), '(X_adv, requires_grad=True)\n', (2475, 2502), True, 'import torch as t\n'), ((1450, 1485), 'nets.WideResNet', 'WideResNet', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (1460, 1485), False, 'from nets import WideResNet, conv_init\n'), ((2730, 2749), 'torch.sign', 't.sign', (['attack_grad'], {}), '(attack_grad)\n', (2736, 2749), True, 'import torch as t\n'), ((4601, 4612), 'torch.no_grad', 't.no_grad', ([], {}), '()\n', (4610, 4612), True, 'import torch as t\n'), ((4786, 4802), 'torch.max', 't.max', (['logits', '(1)'], {}), '(logits, 1)\n', (4791, 4802), True, 'import torch as t\n'), ((694, 717), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (715, 717), False, 'import datetime\n'), ((2793, 2826), 'torch.max', 't.max', (['min_mask'], {'other': 'X_adv.data'}), '(min_mask, other=X_adv.data)\n', (2798, 2826), True, 'import torch as t\n'), ((2406, 2420), 'torch.rand_like', 't.rand_like', (['X'], {}), '(X)\n', (2417, 2420), True, 'import torch as t\n'), ((3915, 3936), 'torch.eq', 't.eq', (['y_pred', 'y_train'], {}), '(y_pred, y_train)\n', (3919, 3936), True, 'import torch as t\n'), ((4826, 4846), 'torch.eq', 't.eq', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (4830, 4846), True, 'import torch as t\n')] |
from os import path, remove
from .settings import Settings
import arrow
class Export:
def __init__(self, settings: Settings):
self.settings = settings
def export_to_file(self, filename):
curdatestr = arrow.now().format()
if path.exists(filename):
remove(filename)
with open(filename, mode='wt') as fp:
fp.write('# Settings Exported\n')
fp.write('# on: ' + curdatestr + '\n')
fp.write('# for appname = ' + self.settings.appname + '\n' )
for item in self.settings.items:
s = f"{item['name']}={item['value']}\n"
fp.write(s)
return
| [
"os.path.exists",
"arrow.now",
"os.remove"
] | [((259, 280), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (270, 280), False, 'from os import path, remove\n'), ((294, 310), 'os.remove', 'remove', (['filename'], {}), '(filename)\n', (300, 310), False, 'from os import path, remove\n'), ((227, 238), 'arrow.now', 'arrow.now', ([], {}), '()\n', (236, 238), False, 'import arrow\n')] |
from .MobileAgent import MobileAgent
import numpy as np
from cvxopt import solvers, matrix
from numpy.matlib import repmat
from numpy import zeros, eye, ones, sqrt, asscalar, log
class BarrierFunction(MobileAgent):
def __init__(self):
self.t = 0.5
self.gamma = 2
self.half_plane_ABC = []
self.d_min = 2
MobileAgent.__init__(self);
self.safe_set = [0,0,0]
def calc_control_input(self, dT, goal, fx, fu, Xr, Xh, dot_Xr, dot_Xh, Mr, Mh, p_Mr_p_Xr, p_Mh_p_Xh, u0, min_u, max_u):
dim = np.shape(Mr)[0] // 2
p_idx = np.arange(dim)
v_idx = p_idx + dim
d = np.linalg.norm(Mr[p_idx] - Mh[p_idx])
dot_Mr = p_Mr_p_Xr * dot_Xr
dot_Mh = p_Mh_p_Xh * dot_Xh
dM = Mr - Mh
dot_dM = dot_Mr - dot_Mh
dp = dM[p_idx,0]
dv = dM[v_idx,0]
dot_dp = dot_dM[p_idx,0]
dot_dv = dot_dM[v_idx,0]
#dot_d is the component of velocity lies in the dp direction
dot_d = asscalar(dp.T * dv / d)
d = 1e-3 if d == 0 else d
dot_d = 1e-3 if dot_d == 0 else dot_d
p_d_p_Mr = np.vstack([ dp / d, zeros((dim,1))])
p_d_p_Xr = p_Mr_p_Xr.T * p_d_p_Mr
p_dot_d_p_dp = dv / d - asscalar(dp.T * dv) * dp / (d**3)
p_dot_d_p_dv = dp / d
p_dp_p_Mr = np.hstack([eye(dim), zeros((dim,dim))])
p_dv_p_Mr = np.hstack([zeros((dim,dim)), eye(dim)])
p_dot_d_p_Mr = p_dp_p_Mr.T * p_dot_d_p_dp + p_dv_p_Mr.T * p_dot_d_p_dv
h = (d ** 2 + dot_d * self.t - self.d_min)
h = 1e-100 if h < 0 else h
p_h_p_Mr = (2 * d * p_d_p_Mr + p_dot_d_p_Mr * self.t)
p_h_p_Mh = -p_h_p_Mr
B = -1*log(h / (1+h))
p_B_p_h = (-1 / (h+1) / h)
p_B_p_Xr = p_B_p_h * p_Mr_p_Xr.T * p_h_p_Mr
p_B_p_Xh = p_B_p_h * p_Mh_p_Xh.T * p_h_p_Mh
LfB = p_B_p_Xr.T * fx
LgB = p_B_p_Xr.T * fu
A = matrix(LgB)
b = matrix(self.gamma / B - LfB - p_B_p_Xh.T * dot_Xh)
A = A / abs(b)
b = b / abs(b)
Q = matrix(eye(np.shape(u0)[0]))
p = matrix(- 2 * u0)
nu = np.shape(u0)[0]
G = matrix(np.vstack([eye(nu), -eye(nu)]))
r = matrix(np.vstack([max_u, -min_u]))
A = matrix([[A,G]])
b = matrix([[b,r]])
u = u0
self.fuck = False
try:
solvers.options['feastol']=1e-9
solvers.options['show_progress'] = False
sol=solvers.qp(Q, p, A, b)
u = np.vstack(sol['x'])
except:
pass
self.half_plane_ABC = matrix([[A],[-b - A[:,0]*Mr[0,0] - A[:,1]*Mr[1,0]]])
self.ABC = matrix([[A],[-b]])
return u | [
"numpy.eye",
"numpy.log",
"numpy.asscalar",
"numpy.zeros",
"numpy.vstack",
"cvxopt.matrix",
"numpy.linalg.norm",
"cvxopt.solvers.qp",
"numpy.shape",
"numpy.arange"
] | [((605, 619), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (614, 619), True, 'import numpy as np\n'), ((661, 698), 'numpy.linalg.norm', 'np.linalg.norm', (['(Mr[p_idx] - Mh[p_idx])'], {}), '(Mr[p_idx] - Mh[p_idx])\n', (675, 698), True, 'import numpy as np\n'), ((1031, 1054), 'numpy.asscalar', 'asscalar', (['(dp.T * dv / d)'], {}), '(dp.T * dv / d)\n', (1039, 1054), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1997, 2008), 'cvxopt.matrix', 'matrix', (['LgB'], {}), '(LgB)\n', (2003, 2008), False, 'from cvxopt import solvers, matrix\n'), ((2021, 2071), 'cvxopt.matrix', 'matrix', (['(self.gamma / B - LfB - p_B_p_Xh.T * dot_Xh)'], {}), '(self.gamma / B - LfB - p_B_p_Xh.T * dot_Xh)\n', (2027, 2071), False, 'from cvxopt import solvers, matrix\n'), ((2173, 2188), 'cvxopt.matrix', 'matrix', (['(-2 * u0)'], {}), '(-2 * u0)\n', (2179, 2188), False, 'from cvxopt import solvers, matrix\n'), ((2331, 2347), 'cvxopt.matrix', 'matrix', (['[[A, G]]'], {}), '([[A, G]])\n', (2337, 2347), False, 'from cvxopt import solvers, matrix\n'), ((2359, 2375), 'cvxopt.matrix', 'matrix', (['[[b, r]]'], {}), '([[b, r]])\n', (2365, 2375), False, 'from cvxopt import solvers, matrix\n'), ((2687, 2748), 'cvxopt.matrix', 'matrix', (['[[A], [-b - A[:, 0] * Mr[0, 0] - A[:, 1] * Mr[1, 0]]]'], {}), '([[A], [-b - A[:, 0] * Mr[0, 0] - A[:, 1] * Mr[1, 0]]])\n', (2693, 2748), False, 'from cvxopt import solvers, matrix\n'), ((2759, 2778), 'cvxopt.matrix', 'matrix', (['[[A], [-b]]'], {}), '([[A], [-b]])\n', (2765, 2778), False, 'from cvxopt import solvers, matrix\n'), ((1770, 1786), 'numpy.log', 'log', (['(h / (1 + h))'], {}), '(h / (1 + h))\n', (1773, 1786), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((2204, 2216), 'numpy.shape', 'np.shape', (['u0'], {}), '(u0)\n', (2212, 2216), True, 'import numpy as np\n'), ((2290, 2316), 'numpy.vstack', 'np.vstack', (['[max_u, -min_u]'], {}), '([max_u, -min_u])\n', (2299, 2316), True, 'import numpy as np\n'), ((2543, 2565), 'cvxopt.solvers.qp', 'solvers.qp', (['Q', 'p', 'A', 'b'], {}), '(Q, p, A, b)\n', (2553, 2565), False, 'from cvxopt import solvers, matrix\n'), ((2582, 2601), 'numpy.vstack', 'np.vstack', (["sol['x']"], {}), "(sol['x'])\n", (2591, 2601), True, 'import numpy as np\n'), ((568, 580), 'numpy.shape', 'np.shape', (['Mr'], {}), '(Mr)\n', (576, 580), True, 'import numpy as np\n'), ((1176, 1191), 'numpy.zeros', 'zeros', (['(dim, 1)'], {}), '((dim, 1))\n', (1181, 1191), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1381, 1389), 'numpy.eye', 'eye', (['dim'], {}), '(dim)\n', (1384, 1389), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1391, 1408), 'numpy.zeros', 'zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1396, 1408), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1442, 1459), 'numpy.zeros', 'zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1447, 1459), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1460, 1468), 'numpy.eye', 'eye', (['dim'], {}), '(dim)\n', (1463, 1468), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((1277, 1296), 'numpy.asscalar', 'asscalar', (['(dp.T * dv)'], {}), '(dp.T * dv)\n', (1285, 1296), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((2143, 2155), 'numpy.shape', 'np.shape', (['u0'], {}), '(u0)\n', (2151, 2155), True, 'import numpy as np\n'), ((2250, 2257), 'numpy.eye', 'eye', (['nu'], {}), '(nu)\n', (2253, 2257), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n'), ((2260, 2267), 'numpy.eye', 'eye', (['nu'], {}), '(nu)\n', (2263, 2267), False, 'from numpy import zeros, eye, ones, sqrt, asscalar, log\n')] |
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import uuid
import fnmatch
class BatchFile():
def __init__(self, filename_dbrecord, filename, email, model_name):
self.filename_dbrecord = filename_dbrecord
self.filename = filename
self.email = email
self.model_name = model_name
class BatchFilesDB():
ENTRIES = '/srv/data/entries'
SEPARATOR = "\t"
g_check_directory = True
def create(self, filename, email, model_name):
if self.g_check_directory:
self.g_check_directory = False
if not os.path.exists(self.ENTRIES):
os.makedirs(self.ENTRIES)
filename_dbrecord = str(uuid.uuid4())
filename_dbrecord = os.path.join(self.ENTRIES, filename_dbrecord)
with open(filename_dbrecord, "w") as fh:
line = f"{filename}{self.SEPARATOR}{email}{self.SEPARATOR}{model_name}"
fh.write(line)
return filename_dbrecord
def _find(self, directory, pattern):
filelist = []
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
filelist.append(filename)
return filelist
def _read_record(self, filename_dbrecord):
with open(filename_dbrecord, "r") as fh:
line = fh.readline()
components = line.split(self.SEPARATOR)
return BatchFile(filename_dbrecord, components[0], components[1], components[2])
def select(self):
filenames = self._find(self.ENTRIES, "*")
records = []
for filename in filenames:
record = self._read_record(filename)
records.append(record)
return records
def delete(self, filename):
os.remove(filename)
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"uuid.uuid4",
"fnmatch.fnmatch",
"os.walk",
"os.remove"
] | [((1478, 1523), 'os.path.join', 'os.path.join', (['self.ENTRIES', 'filename_dbrecord'], {}), '(self.ENTRIES, filename_dbrecord)\n', (1490, 1523), False, 'import os\n'), ((1817, 1835), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1824, 1835), False, 'import os\n'), ((2611, 2630), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2620, 2630), False, 'import os\n'), ((1436, 1448), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1446, 1448), False, 'import uuid\n'), ((1331, 1359), 'os.path.exists', 'os.path.exists', (['self.ENTRIES'], {}), '(self.ENTRIES)\n', (1345, 1359), False, 'import os\n'), ((1377, 1402), 'os.makedirs', 'os.makedirs', (['self.ENTRIES'], {}), '(self.ENTRIES)\n', (1388, 1402), False, 'import os\n'), ((1891, 1925), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['basename', 'pattern'], {}), '(basename, pattern)\n', (1906, 1925), False, 'import fnmatch\n'), ((1958, 1986), 'os.path.join', 'os.path.join', (['root', 'basename'], {}), '(root, basename)\n', (1970, 1986), False, 'import os\n')] |
from kata.calculator import stringCalculator
def test_shouldBeZeroIfInputIsEmpty():
sc = stringCalculator()
sum = sc.add("")
assert sum == 0
| [
"kata.calculator.stringCalculator"
] | [((95, 113), 'kata.calculator.stringCalculator', 'stringCalculator', ([], {}), '()\n', (111, 113), False, 'from kata.calculator import stringCalculator\n')] |
"""
Make a basic DataObject.
We will make a class and call it `MyFavoriteStuff`. We
will inherit the DataObject class to gain all its wonderful features.
Here you can see we must define the '_restrictions' attribute.
"""
from do_py import DataObject, R
class MyFavoriteStuff(DataObject):
"""
A DataObject that contains all of my favorite items.
:restriction favorite_number: The number I favor the most. Strings not allowed.
:restriction favorite_candy: My favorite candy, this is restricted by value.
:restriction favorite_movie: My favorite movie. This is optional because a `None` IS allowed!
"""
# There are two kinds of restrictions, type and value.
_restrictions = {
# Type restrictions restrict the type a value can have: int, str, bool, or other DataObjects's
'favorite_number': R.INT,
# Value restrictions restrict the value to a specific value in a list.
'favorite_candy': R('Jolly Ranchers', 'Nerds'),
# This is a type restriction that allows `None` as a value.
'favorite_movie': R.NULL_STR
}
# Instantiate your new DataObject.
instance = MyFavoriteStuff({
'favorite_number': 1985,
'favorite_candy': '<NAME>',
'favorite_movie': 'Jolly Green Giant'
})
print(instance)
# output: MyFavoriteStuff{"favorite_candy": "<NAME>", "favorite_number": 1985, "favorite_movie": "Jolly Green Giant"}
# You can access values using dot notation or like a `dict`.
print(instance.favorite_number == instance['favorite_number'])
# output: True
print(instance.favorite_number)
print(instance.favorite_candy)
print(instance.favorite_movie)
# output: 1985
# output: Jolly Ranchers
# output: Jolly Green Giant
# Editing the values can also be done very easily.
instance.favorite_number = 2013
print(instance.favorite_number)
# output: 2013
| [
"do_py.R"
] | [((949, 977), 'do_py.R', 'R', (['"""Jolly Ranchers"""', '"""Nerds"""'], {}), "('Jolly Ranchers', 'Nerds')\n", (950, 977), False, 'from do_py import DataObject, R\n')] |
"""This script tunes the simulator's hyperparameters by utilizing Bayesian Optimization,
and measures a hyperparameter set's fitness according to Sweden's death data with the
simulator output's time to peak and the difference over the rise of the curve. The
hyperparameters that are tuned in the script include spread rate and social distancing
rate
"""
import dataclasses
from dataclasses import dataclass
from typing import cast, Optional
import GPyOpt
import matplotlib.pyplot as plt
import numpy as np
from pandas import read_csv
from tqdm import trange
import pandemic_simulator as ps
SEED = 30
MAX_EVAL_TRIALS_TO_VALID = 5
np.random.seed(SEED)
@dataclass
class CalibrationData:
deaths: np.ndarray
hospitalizations: np.ndarray
def is_valid(self) -> bool:
return bool(np.sum(self.deaths) > 0)
def reset_patient_capacity(sim_config: ps.env.PandemicSimConfig) -> None:
for lc in sim_config.location_configs:
if issubclass(lc.location_type, ps.env.Hospital):
lc.state_opts['patient_capacity'] = sim_config.num_persons
sim_config.__post_init__()
def eval_params(params: np.ndarray,
max_episode_length: int,
trial_cnt: int = 0) -> CalibrationData:
"""Evaluate the params and return the result
:param params: spread rate and social distancing rate
:param max_episode_length: length of simulation run in days
:param trial_cnt: evaluation trial count
:returns: CalibrationData instance
"""
if trial_cnt >= MAX_EVAL_TRIALS_TO_VALID:
raise Exception(f'Could not find a valid evaluation for the params: {params} within the specified number'
f'of trials: {MAX_EVAL_TRIALS_TO_VALID}.')
spread_rate = np.round(params[:, 0][0], decimals=3)
social_distancing = np.round(params[:, 1][0], decimals=3)
deaths = []
hospitalizations = []
seed = SEED + trial_cnt
if trial_cnt == 0:
print(f'Running with spread rate: {spread_rate} and social distancing: {social_distancing}')
else:
print(f'Re-Running with a different seed: {seed}')
ps.init_globals(seed=seed)
sim_config = ps.sh.small_town_config
reset_patient_capacity(sim_config)
sim_opts = ps.env.PandemicSimOpts(infection_spread_rate_mean=spread_rate)
sim = ps.env.PandemicSim.from_config(sim_config, sim_opts)
# using swedish stage 1 regulation with the given social distancing to calibrate
covid_regulation = dataclasses.replace(ps.sh.swedish_regulations[1], social_distancing=social_distancing)
sim.impose_regulation(regulation=covid_regulation)
hospital_ids = sim.registry.location_ids_of_type(ps.env.Hospital)
hospital_weekly = 0
for i in trange(max_episode_length, desc='Simulating day'):
sim.step_day()
state = sim.state
num_deaths = state.global_infection_summary[ps.env.InfectionSummary.DEAD]
deaths.append(num_deaths)
num_hospitalizations = sum([cast(ps.env.HospitalState, state.id_to_location_state[loc_id]).num_admitted_patients
for loc_id in hospital_ids])
hospital_weekly += num_hospitalizations
if i % 7 == 0:
hospitalizations.append(hospital_weekly)
hospital_weekly = 0
deaths_arr = np.asarray(deaths)
deaths_arr = deaths_arr[1:] - deaths_arr[:-1]
hosp_arr = np.asarray(hospitalizations)
hosp_arr = hosp_arr[1:] - hosp_arr[:-1]
eval_result = CalibrationData(deaths=deaths_arr, hospitalizations=hosp_arr)
return eval_result if eval_result.is_valid() else eval_params(params, max_episode_length, trial_cnt=trial_cnt + 1)
def real_world_data() -> CalibrationData:
"""Extract and treat real-world data from WHO
:returns: real-world death data
"""
# using Sweden's death and hospitalization data
deaths_url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/ecdc/new_deaths.csv'
deaths_df = read_csv(deaths_url, header=0)
real_deaths = deaths_df['Sweden'].values
real_deaths = real_deaths[~np.isnan(real_deaths)]
hosp_url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/scripts/grapher/' \
'COVID-2019%20-%20Hospital%20&%20ICU.csv'
hosp_df = read_csv(hosp_url, header=0)
real_hosp = np.array(hosp_df[hosp_df['entity'] == 'Sweden']['Weekly new ICU admissions'])
real_hosp = np.round(real_hosp[~np.isnan(real_hosp)]).astype('int')
return CalibrationData(deaths=real_deaths, hospitalizations=np.asarray(real_hosp))
def process_data(data: np.ndarray, data_len: Optional[int] = None, five_day_average: bool = False) -> np.ndarray:
# trim initial zeros
data = np.trim_zeros(data, 'f')[:data_len]
# calculate sliding average
if five_day_average:
data = np.convolve(data, np.ones(5) / 5, mode='same')
# normalize
data = data / np.max(data)
return data
def obj_func(params: np.ndarray) -> float:
"""Objective function calculates fitness score for a given parameter set
:param params: spread rate and social distancing rate to be evaluated
:returns: fitness score of parameter set
"""
# get sim data
sim_result: CalibrationData = eval_params(params, 60)
sim_data = process_data(sim_result.hospitalizations)
# get real data
real_result: CalibrationData = real_world_data()
real_data = process_data(real_result.hospitalizations, data_len=len(sim_data))
# compare only until the rise of real_peak
real_peak = np.argmax(real_data).item()
real_data = real_data[:real_peak + 1]
sim_data = sim_data[:real_peak + 1]
# get score
score = np.linalg.norm(real_data - sim_data)
print('score: ', score)
return float(score)
def make_plots(params: np.ndarray) -> None:
"""Plot final parameter set output against real world data
:param params: resulting spread rate and social distancing rate
"""
# get sim data
sim_result: CalibrationData = eval_params(params, 100)
sim_data = process_data(sim_result.hospitalizations)
# get real data
real_result: CalibrationData = real_world_data()
real_data = process_data(real_result.hospitalizations, len(sim_data))
# plot calibrated simulator run against real-world data
plt.plot(sim_data)
plt.plot(real_data)
plt.legend(["Simulated", "Real world (Sweden)"])
plt.xlabel("Weeks Passed")
plt.ylabel("Hospitalizations Per Week (normalized)")
plt.show()
if __name__ == '__main__':
bounds2d = [{'name': 'spread rate', 'type': 'continuous', 'domain': (0.005, 0.03)},
{'name': 'contact rate', 'type': 'continuous', 'domain': (0., 0.4)}]
myBopt_2d = GPyOpt.methods.BayesianOptimization(obj_func, domain=bounds2d)
myBopt_2d.run_optimization()
print("=" * 20)
print("Value of (spread rate, contact rate) that minimises the objective:" + str(myBopt_2d.x_opt))
print("Minimum value of the objective: " + str(myBopt_2d.fx_opt))
print("=" * 20)
make_plots(np.array([[myBopt_2d.x_opt[0], myBopt_2d.x_opt[1]]], dtype=np.float64))
myBopt_2d.plot_acquisition()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"GPyOpt.methods.BayesianOptimization",
"numpy.array",
"pandemic_simulator.init_globals",
"numpy.linalg.norm",
"pandemic_simulator.env.PandemicSimOpts",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"numpy.round",
"numpy.trim_zeros",
"numpy.ones",
"numpy.argmax",
"numpy.isnan",
"dataclasses.replace",
"pandemic_simulator.env.PandemicSim.from_config",
"tqdm.trange",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"typing.cast",
"numpy.sum"
] | [((633, 653), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (647, 653), True, 'import numpy as np\n'), ((1748, 1785), 'numpy.round', 'np.round', (['params[:, 0][0]'], {'decimals': '(3)'}), '(params[:, 0][0], decimals=3)\n', (1756, 1785), True, 'import numpy as np\n'), ((1810, 1847), 'numpy.round', 'np.round', (['params[:, 1][0]'], {'decimals': '(3)'}), '(params[:, 1][0], decimals=3)\n', (1818, 1847), True, 'import numpy as np\n'), ((2118, 2144), 'pandemic_simulator.init_globals', 'ps.init_globals', ([], {'seed': 'seed'}), '(seed=seed)\n', (2133, 2144), True, 'import pandemic_simulator as ps\n'), ((2240, 2302), 'pandemic_simulator.env.PandemicSimOpts', 'ps.env.PandemicSimOpts', ([], {'infection_spread_rate_mean': 'spread_rate'}), '(infection_spread_rate_mean=spread_rate)\n', (2262, 2302), True, 'import pandemic_simulator as ps\n'), ((2313, 2365), 'pandemic_simulator.env.PandemicSim.from_config', 'ps.env.PandemicSim.from_config', (['sim_config', 'sim_opts'], {}), '(sim_config, sim_opts)\n', (2343, 2365), True, 'import pandemic_simulator as ps\n'), ((2475, 2566), 'dataclasses.replace', 'dataclasses.replace', (['ps.sh.swedish_regulations[1]'], {'social_distancing': 'social_distancing'}), '(ps.sh.swedish_regulations[1], social_distancing=\n social_distancing)\n', (2494, 2566), False, 'import dataclasses\n'), ((2726, 2775), 'tqdm.trange', 'trange', (['max_episode_length'], {'desc': '"""Simulating day"""'}), "(max_episode_length, desc='Simulating day')\n", (2732, 2775), False, 'from tqdm import trange\n'), ((3301, 3319), 'numpy.asarray', 'np.asarray', (['deaths'], {}), '(deaths)\n', (3311, 3319), True, 'import numpy as np\n'), ((3386, 3414), 'numpy.asarray', 'np.asarray', (['hospitalizations'], {}), '(hospitalizations)\n', (3396, 3414), True, 'import numpy as np\n'), ((3978, 4008), 'pandas.read_csv', 'read_csv', (['deaths_url'], {'header': '(0)'}), '(deaths_url, header=0)\n', (3986, 4008), False, 'from pandas import read_csv\n'), ((4276, 4304), 'pandas.read_csv', 'read_csv', (['hosp_url'], {'header': '(0)'}), '(hosp_url, header=0)\n', (4284, 4304), False, 'from pandas import read_csv\n'), ((4321, 4398), 'numpy.array', 'np.array', (["hosp_df[hosp_df['entity'] == 'Sweden']['Weekly new ICU admissions']"], {}), "(hosp_df[hosp_df['entity'] == 'Sweden']['Weekly new ICU admissions'])\n", (4329, 4398), True, 'import numpy as np\n'), ((5675, 5711), 'numpy.linalg.norm', 'np.linalg.norm', (['(real_data - sim_data)'], {}), '(real_data - sim_data)\n', (5689, 5711), True, 'import numpy as np\n'), ((6300, 6318), 'matplotlib.pyplot.plot', 'plt.plot', (['sim_data'], {}), '(sim_data)\n', (6308, 6318), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6342), 'matplotlib.pyplot.plot', 'plt.plot', (['real_data'], {}), '(real_data)\n', (6331, 6342), True, 'import matplotlib.pyplot as plt\n'), ((6347, 6395), 'matplotlib.pyplot.legend', 'plt.legend', (["['Simulated', 'Real world (Sweden)']"], {}), "(['Simulated', 'Real world (Sweden)'])\n", (6357, 6395), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6426), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Weeks Passed"""'], {}), "('Weeks Passed')\n", (6410, 6426), True, 'import matplotlib.pyplot as plt\n'), ((6431, 6483), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hospitalizations Per Week (normalized)"""'], {}), "('Hospitalizations Per Week (normalized)')\n", (6441, 6483), True, 'import matplotlib.pyplot as plt\n'), ((6488, 6498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((6717, 6779), 'GPyOpt.methods.BayesianOptimization', 'GPyOpt.methods.BayesianOptimization', (['obj_func'], {'domain': 'bounds2d'}), '(obj_func, domain=bounds2d)\n', (6752, 6779), False, 'import GPyOpt\n'), ((4710, 4734), 'numpy.trim_zeros', 'np.trim_zeros', (['data', '"""f"""'], {}), "(data, 'f')\n", (4723, 4734), True, 'import numpy as np\n'), ((4901, 4913), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (4907, 4913), True, 'import numpy as np\n'), ((7043, 7113), 'numpy.array', 'np.array', (['[[myBopt_2d.x_opt[0], myBopt_2d.x_opt[1]]]'], {'dtype': 'np.float64'}), '([[myBopt_2d.x_opt[0], myBopt_2d.x_opt[1]]], dtype=np.float64)\n', (7051, 7113), True, 'import numpy as np\n'), ((4085, 4106), 'numpy.isnan', 'np.isnan', (['real_deaths'], {}), '(real_deaths)\n', (4093, 4106), True, 'import numpy as np\n'), ((4535, 4556), 'numpy.asarray', 'np.asarray', (['real_hosp'], {}), '(real_hosp)\n', (4545, 4556), True, 'import numpy as np\n'), ((5536, 5556), 'numpy.argmax', 'np.argmax', (['real_data'], {}), '(real_data)\n', (5545, 5556), True, 'import numpy as np\n'), ((799, 818), 'numpy.sum', 'np.sum', (['self.deaths'], {}), '(self.deaths)\n', (805, 818), True, 'import numpy as np\n'), ((4837, 4847), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (4844, 4847), True, 'import numpy as np\n'), ((2978, 3040), 'typing.cast', 'cast', (['ps.env.HospitalState', 'state.id_to_location_state[loc_id]'], {}), '(ps.env.HospitalState, state.id_to_location_state[loc_id])\n', (2982, 3040), False, 'from typing import cast, Optional\n'), ((4435, 4454), 'numpy.isnan', 'np.isnan', (['real_hosp'], {}), '(real_hosp)\n', (4443, 4454), True, 'import numpy as np\n')] |
import graphene
import random
from graphql_ws.pubsub import GeventRxPubsub
from rx import Observable
pubsub = GeventRxPubsub()
class Query(graphene.ObjectType):
base = graphene.String()
def resolve_base(root, info):
return 'Hello World!'
class MutationExample(graphene.Mutation):
class Arguments:
input_text = graphene.String()
output_text = graphene.String()
def mutate(self, info, input_text):
pubsub.publish('BASE', input_text)
return MutationExample(output_text=input_text)
class Mutations(graphene.ObjectType):
mutation_example = MutationExample.Field()
class RandomType(graphene.ObjectType):
seconds = graphene.Int()
random_int = graphene.Int()
class Subscription(graphene.ObjectType):
count_seconds = graphene.Int(up_to=graphene.Int())
random_int = graphene.Field(RandomType)
mutation_example = graphene.String()
def resolve_mutation_example(root, info):
# subscribe_to_channel method returns an observable
return pubsub.subscribe_to_channel('BASE')\
.map(lambda i: "{0}".format(i))
def resolve_count_seconds(root, info, up_to=5):
return Observable.interval(1000)\
.map(lambda i: "{0}".format(i))\
.take_while(lambda i: int(i) <= up_to)
def resolve_random_int(root, info):
return Observable.interval(1000).map(
lambda i: RandomType(seconds=i, random_int=random.randint(0, 500)))
schema = graphene.Schema(query=Query, mutation=Mutations,
subscription=Subscription)
| [
"graphene.String",
"graphene.Field",
"rx.Observable.interval",
"graphql_ws.pubsub.GeventRxPubsub",
"graphene.Int",
"graphene.Schema",
"random.randint"
] | [((112, 128), 'graphql_ws.pubsub.GeventRxPubsub', 'GeventRxPubsub', ([], {}), '()\n', (126, 128), False, 'from graphql_ws.pubsub import GeventRxPubsub\n'), ((1522, 1597), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Query', 'mutation': 'Mutations', 'subscription': 'Subscription'}), '(query=Query, mutation=Mutations, subscription=Subscription)\n', (1537, 1597), False, 'import graphene\n'), ((176, 193), 'graphene.String', 'graphene.String', ([], {}), '()\n', (191, 193), False, 'import graphene\n'), ((382, 399), 'graphene.String', 'graphene.String', ([], {}), '()\n', (397, 399), False, 'import graphene\n'), ((681, 695), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (693, 695), False, 'import graphene\n'), ((713, 727), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (725, 727), False, 'import graphene\n'), ((843, 869), 'graphene.Field', 'graphene.Field', (['RandomType'], {}), '(RandomType)\n', (857, 869), False, 'import graphene\n'), ((893, 910), 'graphene.String', 'graphene.String', ([], {}), '()\n', (908, 910), False, 'import graphene\n'), ((345, 362), 'graphene.String', 'graphene.String', ([], {}), '()\n', (360, 362), False, 'import graphene\n'), ((810, 824), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (822, 824), False, 'import graphene\n'), ((1400, 1425), 'rx.Observable.interval', 'Observable.interval', (['(1000)'], {}), '(1000)\n', (1419, 1425), False, 'from rx import Observable\n'), ((1195, 1220), 'rx.Observable.interval', 'Observable.interval', (['(1000)'], {}), '(1000)\n', (1214, 1220), False, 'from rx import Observable\n'), ((1486, 1508), 'random.randint', 'random.randint', (['(0)', '(500)'], {}), '(0, 500)\n', (1500, 1508), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pymongo
# Make chart of comments vs upvotes for all the flair
# All charts in graph folder
def intilise_database():
"""
Initilse the database and make a table instance
Returns
pymongo object of the table
"""
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb=myclient['subreddit']
maintable = mydb["posts2"]
return maintable
def make_graph(c,u, xx):
N = 11
ind = np.arange(N) # the x locations for the groups
width = 0.27 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
yvals = u
rects1 = ax.bar(ind, yvals, width, color='c')
zvals = c
rects2 = ax.bar(ind+width, zvals, width, color='y')
ax.set_ylabel('Average Number')
ax.set_xticks(ind+width/2)
ax.set_xticklabels( xx)
ax.legend( (rects1[0], rects2[0]), ('Upvotes', 'Comments') )
def autolabel(rects):
for rect in rects:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*h, '%d'%int(h),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
flairs = ['Photography','Politics', 'Non-Political', 'AskIndia', '[R]eddiquette', 'Policy/Economy', 'Business/Finance', 'Science/Technology', 'Scheduled', 'Sports', 'Food' ]
if __name__ == "__main__":
xticket = tuple(flairs)
db = intilise_database()
final_comment = []
final_upv = []
for flair in flairs:
count = db.find({'flair':flair}).count()
comment = 0
upvotes = 0
for post in db.find({'flair':flair}):
comment += post['no_comments']
upvotes += post['upvote']
comment = int(comment/count)
upvotes = int(upvotes/count)
final_comment.append(comment)
final_upv.append(upvotes)
make_graph(final_comment,final_upv,xticket)
| [
"pymongo.MongoClient",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((309, 358), 'pymongo.MongoClient', 'pymongo.MongoClient', (['"""mongodb://localhost:27017/"""'], {}), "('mongodb://localhost:27017/')\n", (328, 358), False, 'import pymongo\n'), ((491, 503), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (500, 503), True, 'import numpy as np\n'), ((596, 608), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (606, 608), True, 'import matplotlib.pyplot as plt\n'), ((1195, 1205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1203, 1205), True, 'import matplotlib.pyplot as plt\n')] |
import random
import re
class CharmmBond:
# (bond)\s+(\d+)\s+(\d+)\s+([-+]?[0-9]*\.?[0-9]+)\s+([-+]?[0-9]*\.?[0-9]+)
def __init__(self, atom_class1, atom_class2, uk1, uk2):
self.type = "bond"
self.atom_class1 = int(atom_class1)
self.atom_class2 = int(atom_class2)
self.uk1 = float(uk1)
self.uk2 = float(uk2)
def __str__(self):
# bond 1 10 330.00 1.1000
return '{:>4} {:>5}{:>5} {:>11.2f} {:>11.4f}'.format(self.type, self.atom_class1, self.atom_class2,
self.uk1, self.uk2)
@staticmethod
def parse(string):
match = re.match(r'(bond)\s+(\d+)\s+(\d+)\s+([-+]?[0-9]*\.?[0-9]+)\s+([-+]?[0-9]*\.?[0-9]+)', string)
if match:
return CharmmBond(match.group(2), match.group(3), match.group(4), match.group(5))
else:
raise ValueError("Invalid Bond input: ", string)
def random_edit(self, percentage):
rand = random.uniform(-percentage, percentage)
change = rand / 100.0
return CharmmBond(self.atom_class1, self.atom_class2, (1 + change) * self.uk1,
(1 + change) * self.uk2)
# f = open('charmm22_2380.prm', 'r')
# param_file = f.read().split('\n')
#
# charmm_atoms = []
#
# for line in param_file:
# if "bond " in line:
# charmm_atoms.append(CharmmBond.parse(line))
#
# for atom in charmm_atoms:
# print(atom)
| [
"random.uniform",
"re.match"
] | [((716, 826), 're.match', 're.match', (['"""(bond)\\\\s+(\\\\d+)\\\\s+(\\\\d+)\\\\s+([-+]?[0-9]*\\\\.?[0-9]+)\\\\s+([-+]?[0-9]*\\\\.?[0-9]+)"""', 'string'], {}), "(\n '(bond)\\\\s+(\\\\d+)\\\\s+(\\\\d+)\\\\s+([-+]?[0-9]*\\\\.?[0-9]+)\\\\s+([-+]?[0-9]*\\\\.?[0-9]+)'\n , string)\n", (724, 826), False, 'import re\n'), ((1052, 1091), 'random.uniform', 'random.uniform', (['(-percentage)', 'percentage'], {}), '(-percentage, percentage)\n', (1066, 1091), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
from . import fake
class BackendTestMixin(object):
def put_file(self, filename, content):
raise NotImplementedError('You must implement this method')
def get_file(self, filename):
raise NotImplementedError('You must implement this method')
def file_exists(self, filename):
raise NotImplementedError('You must implement this method')
def assert_bin_equal(self, filename, expected):
data = self.get_file(filename)
self.assertEqual(data, self.b(expected))
def assert_text_equal(self, filename, expected):
data = self.get_file(filename)
self.assertEqual(data, six.b(expected))
def test_exists(self):
self.put_file('file.test', 'test')
self.assertTrue(self.backend.exists('file.test'))
self.assertFalse(self.backend.exists('other.test'))
def test_open_read(self):
content = self.text()
self.put_file('file.test', content)
with self.backend.open('file.test') as f:
data = f.read()
self.assertIsInstance(data, six.text_type)
self.assertEqual(data, content)
def test_open_read_binary(self):
content = self.binary()
self.put_file('file.test', content)
with self.backend.open('file.test', 'rb') as f:
data = f.read()
self.assertIsInstance(data, six.binary_type)
self.assertEqual(data, content)
def test_open_write_new_file(self):
filename = 'test.text'
content = self.text()
with self.backend.open(filename, 'w') as f:
f.write(content)
self.assert_text_equal(filename, content)
def test_open_write_new_binary_file(self):
filename = 'test.bin'
content = self.binary()
with self.backend.open(filename, 'wb') as f:
f.write(content)
self.assert_bin_equal(filename, content)
def test_open_write_existing_file(self):
filename = 'test.txt'
content = self.text()
self.put_file(filename, self.text())
with self.backend.open(filename, 'w') as f:
f.write(content)
self.assert_text_equal(filename, content)
def test_read(self):
content = self.text()
self.put_file('file.test', content)
self.assertEqual(self.backend.read('file.test'), six.b(content))
def test_write_text(self):
content = self.text()
self.backend.write('test.txt', content)
self.assert_text_equal('test.txt', content)
def test_write_binary(self):
content = self.binary()
self.backend.write('test.bin', content)
self.assert_bin_equal('test.bin', content)
def test_write_file(self):
content = self.binary()
self.backend.write('test.bin', self.file(content))
self.assert_bin_equal('test.bin', content)
def test_delete(self):
content = fake.sentence()
self.put_file('file.test', content)
self.backend.delete('file.test')
self.assertFalse(self.file_exists('file.test'))
def test_save_content(self):
content = self.text()
storage = self.filestorage('test.txt', content)
self.backend.save(storage, 'test.txt')
self.assert_text_equal('test.txt', content)
def test_save_from_file(self):
content = self.binary()
f = self.file(content)
self.backend.save(f, 'test.png')
f.seek(0)
self.assert_bin_equal('test.png', content)
def test_save_with_filename(self):
filename = 'somewhere/test.test'
content = self.text()
storage = self.filestorage('test.txt', content)
self.backend.save(storage, filename)
self.assert_text_equal(filename, content)
| [
"six.b"
] | [((716, 731), 'six.b', 'six.b', (['expected'], {}), '(expected)\n', (721, 731), False, 'import six\n'), ((2425, 2439), 'six.b', 'six.b', (['content'], {}), '(content)\n', (2430, 2439), False, 'import six\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 21:41, 16/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from pandas import DataFrame
from mealpy.evolutionary_based.DE import BaseDE
from examples.setting_function import func_paras, func_names, problem_size
from os import getcwd, path, makedirs
model_name = "DE"
num_runs = 5
PATH_RESULTS = "history/results/"
check_dir1 = getcwd() + "/" + PATH_RESULTS
if not path.exists(check_dir1):
makedirs(check_dir1)
## Setting parameters
epoch = 10
pop_size = 50
wf = 0.8
cr = 0.9
## Run model
best_fit_full = {}
best_fit_columns = []
error_full = {}
error_columns = []
for id_paras in range(len(func_paras)):
md = BaseDE(func_paras[id_paras], epoch, pop_size, wf, cr)
_, best_fit, list_loss = md._train__()
error_full[func_names[id_paras]] = list_loss
error_columns.append(func_names[id_paras])
best_fit_full[func_names[id_paras]] = [best_fit]
best_fit_columns.append(func_names[id_paras])
df_err = DataFrame(error_full, columns=error_columns)
df_err.to_csv(PATH_RESULTS + str(problem_size) + "D_" + model_name + "_error.csv", header=True, index=False)
df_fit = DataFrame(best_fit_full, columns=best_fit_columns)
df_fit.to_csv(PATH_RESULTS + str(problem_size) + "D_" + model_name + "_best_fit.csv", header=True, index=False)
| [
"os.path.exists",
"os.makedirs",
"mealpy.evolutionary_based.DE.BaseDE",
"os.getcwd",
"pandas.DataFrame"
] | [((1611, 1655), 'pandas.DataFrame', 'DataFrame', (['error_full'], {'columns': 'error_columns'}), '(error_full, columns=error_columns)\n', (1620, 1655), False, 'from pandas import DataFrame\n'), ((1775, 1825), 'pandas.DataFrame', 'DataFrame', (['best_fit_full'], {'columns': 'best_fit_columns'}), '(best_fit_full, columns=best_fit_columns)\n', (1784, 1825), False, 'from pandas import DataFrame\n'), ((1047, 1070), 'os.path.exists', 'path.exists', (['check_dir1'], {}), '(check_dir1)\n', (1058, 1070), False, 'from os import getcwd, path, makedirs\n'), ((1076, 1096), 'os.makedirs', 'makedirs', (['check_dir1'], {}), '(check_dir1)\n', (1084, 1096), False, 'from os import getcwd, path, makedirs\n'), ((1303, 1356), 'mealpy.evolutionary_based.DE.BaseDE', 'BaseDE', (['func_paras[id_paras]', 'epoch', 'pop_size', 'wf', 'cr'], {}), '(func_paras[id_paras], epoch, pop_size, wf, cr)\n', (1309, 1356), False, 'from mealpy.evolutionary_based.DE import BaseDE\n'), ((1010, 1018), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1016, 1018), False, 'from os import getcwd, path, makedirs\n')] |
"""Imitate the .websocket_server functionality using RS232 instead of TCP/IP.
"""
import logging
from typing import Callable
import serial
from .decoder import Decoder
from .. import logger
LOGGER = logging.getLogger("serial_server")
class SerialServer:
"""A server that listens on and sends through a serial port."""
def __init__(self, device: str,
received_msg_callback: Callable[[str], None] = None,
baudrate: int = 19200) -> None:
try:
self._dev = serial.Serial(port=device, baudrate=baudrate)
except (FileNotFoundError, serial.SerialException):
LOGGER.error("Failed to open serial connection for serving.")
raise ConnectionError("Starting serial connection for server "
"failed.")
self._rcv_callback = received_msg_callback
LOGGER.info("Creating instance. Do call the async_init() fcn.")
def publish(self, data: str) -> None:
"""Send the given string over the serial interface."""
bytestream = data.encode()
n_bytes = len(bytestream)
n_transmitted_bytes = self._dev.write(bytestream)
if n_transmitted_bytes == n_bytes:
LOGGER.debug("Sent message: %s", logger.ellipsicate(data))
else:
LOGGER.warning("Error transmitting Message: %s",
logger.ellipsicate(data))
def serve(self) -> None:
"""Start listening. This blocks indefinitely. Use threading."""
LOGGER.info("Listening on port %s", self._dev.port)
collector = Decoder()
while True:
# Receive data. This will block until some data is received.
# This will avoid busy waiting.
data = self._dev.read(1)
data += self._dev.read(self._dev.in_waiting)
collector.feed(data)
if collector.n_pending() > 0:
messages = collector.harvest()
for msg in messages:
LOGGER.debug("Received message: %s", msg)
if callable(self._rcv_callback):
self._rcv_callback(msg)
| [
"logging.getLogger",
"serial.Serial"
] | [((202, 236), 'logging.getLogger', 'logging.getLogger', (['"""serial_server"""'], {}), "('serial_server')\n", (219, 236), False, 'import logging\n'), ((520, 565), 'serial.Serial', 'serial.Serial', ([], {'port': 'device', 'baudrate': 'baudrate'}), '(port=device, baudrate=baudrate)\n', (533, 565), False, 'import serial\n')] |
import pickle
from typing import List
from bee.data.option import Options, Option
from bee.data.map import Map
from bee.errors.error import CodedError
from bee.net.rpc.codec import Stream, IClientCodec, IServerCodec, CodecBuilder, Request as _Request, RequestHead as _RequestHead, ResponseHead as _ResponseHead, \
Response as _Response, Result as _Result, register_codec
from bee.data import const
# 缺省最大值
from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request
const.default_max_message_size = 2 << 20
def reset_req(req: Request):
if req.args != None:
req.args = req.args[:0]
if req.labels != None:
req.labels = req.labels[:0]
def reset_resp(resp: Response):
resp.result = None
resp.error = None
class ClientCodec(IClientCodec):
"""
客户端编解码
"""
def __init__(self, s: Stream, req: Request = None, resp: Response = None, max_msg_size=0):
self.s = s
self.req = req
self.resp = resp
self.max_msg_size = max_msg_size
def stream(self) -> Stream:
return self.s
def encode(self, req: _Request):
self.req.id = req.head.id
self.req.service = req.head.service
self.req.method = req.head.method
if len(req.args) > 0:
for v in req.args:
# convert v to bytes, v is object
self.req.args.append(pickle.dumps(v))
if len(req.head.labels) > 0:
for v in req.head.labels:
self.req.labels.append(Label(name=v.name, value=v.value))
self._write(self.req.SerializeToString())
def _write(self, data: bytes):
# wtire data's length, occupy 4 bytes
self.s.write(len(data).to_bytes(4, byteorder='little'))
# write data
self.s.write(data)
self.s.flush()
def decode_head(self, head: _ResponseHead):
length_bytes = self.s.read(4);
length = int.from_bytes(length_bytes, byteorder="little")
if length > self.max_msg_size:
raise BaseException("message too big: %s" % length)
data = self.s.read(length)
self.resp.ParseFromString(data)
head.id = self.resp.id
def decode_result(self, result: _Result):
if self.resp.result != None:
result.error = None
result.value = pickle.loads(self.resp.result)
else:
result.error = CodedError(self.resp.error)
def discard_result(self):
return None
class ServerCodec(IServerCodec):
"""
服务端编解码
"""
def __init__(self, s: Stream, req: Request = None, resp: Response = None, max_msg_size=0):
self.s = s
self.req = req
self.resp = resp
self.max_msg_size = max_msg_size
def stream(self) -> Stream:
return self.s
def encode(self, resp: _Response):
self.resp.id = resp.head.id
if resp.result.error == None:
if resp.result.value != None:
self.resp.result = pickle.dumps(resp.result.value)
else:
self.resp.result = None
self.resp.error = resp.result.error
self._write(self.resp.SerializeToString())
def _write(self, data: bytes):
# wtire data's length, occupy 4 bytes
self.s.write(len(data).to_bytes(4, byteorder='little'))
# write data
self.s.write(data)
self.s.flush()
def decode_head(self, head: _RequestHead):
length_bytes = self.s.read(4);
length = int.from_bytes(length_bytes, byteorder="little")
if length > self.max_msg_size:
raise BaseException("message too big: %s" % length)
data = self.s.read(length)
self.req.ParseFromString(data)
head.id = self.req.id
head.service = self.req.service
head.method = self.req.method
if self.req.labels:
opts = Options()
for v in self.req.labels:
opts.append(Option(name=v.name, value=v.value))
head.labels = opts
if head.id != None and head.id > 0:
print("%s, %s.%s(%s), labels(%s)" % (head.id, head.service, head.method, "", head.labels))
def decode_args(self, args: List[object]):
if self.req.args != None:
for i, v in enumerate(self.req.args):
args.insert(i, pickle.loads(v))
def discard_args(self):
return None
class Builder(CodecBuilder):
name="proto"
def new_client_codec(self, s: Stream, opts: Map) -> ClientCodec:
max_msg_size = self.max_msg_size(opts)
return ClientCodec(s=s, req=Request(), resp=Response(), max_msg_size=max_msg_size)
def new_server_codec(self, s: Stream, opts: Map) -> ServerCodec:
max_msg_size = self.max_msg_size(opts)
return ServerCodec(s=s, req=Request(), resp=Response(), max_msg_size=max_msg_size)
def max_msg_size(self, opts: Map):
size = None
if opts != None:
if opts.get("max_msg_size") != None:
size = int(opts.get("max_msg_size"))
if size == None or size == 0:
size = const.default_max_message_size
return size
def init():
register_codec("proto", Builder())
| [
"bee.errors.error.CodedError",
"pickle.dumps",
"bee.data.option.Option",
"bee.net.rpc.codecs.proto.msg_pb2.Response",
"bee.net.rpc.codecs.proto.msg_pb2.Label",
"bee.net.rpc.codecs.proto.msg_pb2.Request",
"pickle.loads",
"bee.data.option.Options"
] | [((2325, 2355), 'pickle.loads', 'pickle.loads', (['self.resp.result'], {}), '(self.resp.result)\n', (2337, 2355), False, 'import pickle\n'), ((2397, 2424), 'bee.errors.error.CodedError', 'CodedError', (['self.resp.error'], {}), '(self.resp.error)\n', (2407, 2424), False, 'from bee.errors.error import CodedError\n'), ((3874, 3883), 'bee.data.option.Options', 'Options', ([], {}), '()\n', (3881, 3883), False, 'from bee.data.option import Options, Option\n'), ((2989, 3020), 'pickle.dumps', 'pickle.dumps', (['resp.result.value'], {}), '(resp.result.value)\n', (3001, 3020), False, 'import pickle\n'), ((4595, 4604), 'bee.net.rpc.codecs.proto.msg_pb2.Request', 'Request', ([], {}), '()\n', (4602, 4604), False, 'from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request\n'), ((4611, 4621), 'bee.net.rpc.codecs.proto.msg_pb2.Response', 'Response', ([], {}), '()\n', (4619, 4621), False, 'from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request\n'), ((4803, 4812), 'bee.net.rpc.codecs.proto.msg_pb2.Request', 'Request', ([], {}), '()\n', (4810, 4812), False, 'from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request\n'), ((4819, 4829), 'bee.net.rpc.codecs.proto.msg_pb2.Response', 'Response', ([], {}), '()\n', (4827, 4829), False, 'from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request\n'), ((1383, 1398), 'pickle.dumps', 'pickle.dumps', (['v'], {}), '(v)\n', (1395, 1398), False, 'import pickle\n'), ((1515, 1548), 'bee.net.rpc.codecs.proto.msg_pb2.Label', 'Label', ([], {'name': 'v.name', 'value': 'v.value'}), '(name=v.name, value=v.value)\n', (1520, 1548), False, 'from bee.net.rpc.codecs.proto.msg_pb2 import Label, Response, Request\n'), ((3950, 3984), 'bee.data.option.Option', 'Option', ([], {'name': 'v.name', 'value': 'v.value'}), '(name=v.name, value=v.value)\n', (3956, 3984), False, 'from bee.data.option import Options, Option\n'), ((4327, 4342), 'pickle.loads', 'pickle.loads', (['v'], {}), '(v)\n', (4339, 4342), False, 'import pickle\n')] |
""" Path library for the template engine. """
__author__ = "<NAME>"
__copyright__ = "Copyright 2016-2019"
__license__ = "Apache License 2.0"
import os
class _PathLib(object):
""" Path based functions. """
@property
def sep(self):
""" The path separator for the current platform. """
return os.sep
def join(self, *parts):
""" Join a path. """
return os.path.join(*parts)
def split(self, path):
""" Split a path into a head and a tail. """
return os.path.split(path)
def splitext(self, path):
""" Split the extension out of the path. """
return os.path.splitext(path)
def dirname(self, path):
""" Return the directory name of a path. """
return os.path.dirname(path)
def basename(self, path):
""" Return the base name of a path. """
return os.path.basename(path)
def relpath(self, target, fromdir):
""" Return a relative path to target from the directory fromdir. """
return os.path.relpath(target, fromdir)
FACTORY = _PathLib
| [
"os.path.splitext",
"os.path.join",
"os.path.split",
"os.path.dirname",
"os.path.basename",
"os.path.relpath"
] | [((415, 435), 'os.path.join', 'os.path.join', (['*parts'], {}), '(*parts)\n', (427, 435), False, 'import os\n'), ((532, 551), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (545, 551), False, 'import os\n'), ((651, 673), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (667, 673), False, 'import os\n'), ((772, 793), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (787, 793), False, 'import os\n'), ((888, 910), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (904, 910), False, 'import os\n'), ((1044, 1076), 'os.path.relpath', 'os.path.relpath', (['target', 'fromdir'], {}), '(target, fromdir)\n', (1059, 1076), False, 'import os\n')] |
from django.contrib import admin
from .models import System, State, Event, Transition
admin.site.register(System)
admin.site.register(State)
admin.site.register(Event)
admin.site.register(Transition)
| [
"django.contrib.admin.site.register"
] | [((88, 115), 'django.contrib.admin.site.register', 'admin.site.register', (['System'], {}), '(System)\n', (107, 115), False, 'from django.contrib import admin\n'), ((116, 142), 'django.contrib.admin.site.register', 'admin.site.register', (['State'], {}), '(State)\n', (135, 142), False, 'from django.contrib import admin\n'), ((143, 169), 'django.contrib.admin.site.register', 'admin.site.register', (['Event'], {}), '(Event)\n', (162, 169), False, 'from django.contrib import admin\n'), ((170, 201), 'django.contrib.admin.site.register', 'admin.site.register', (['Transition'], {}), '(Transition)\n', (189, 201), False, 'from django.contrib import admin\n')] |
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple log collection script for Mob* Monitor"""
from __future__ import print_function
import glob
import os
import tempfile
import shutil
from chromite.lib import cros_build_lib
from chromite.lib import osutils
TMPDIR = '/mnt/moblab/tmp'
TMPDIR_PREFIX = 'moblab_logs_'
LOG_DIRS = {
'apache_errors': '/var/log/apache2/error_log',
'devserver_logs': '/var/log/devserver',
'dhcp_leases': '/var/lib/dhcp',
'messages': '/var/log/messages',
'mysql': '/var/log/mysql',
'servod': '/var/log/servod.log',
'scheduler': '/usr/local/autotest/logs/scheduler.latest'
}
def remove_old_tarballs():
paths = glob.iglob(os.path.join(TMPDIR, '%s*.tgz' % TMPDIR_PREFIX))
for path in paths:
os.remove(path)
def collect_logs():
remove_old_tarballs()
osutils.SafeMakedirs(TMPDIR)
tempdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX, dir=TMPDIR)
os.chmod(tempdir, 0o777)
try:
for name, path in LOG_DIRS.iteritems():
if not os.path.exists(path):
continue
if os.path.isdir(path):
shutil.copytree(path, os.path.join(tempdir, name))
else:
shutil.copyfile(path, os.path.join(tempdir, name))
cmd = ['mobmoncli', 'GetStatus']
cros_build_lib.RunCommand(
cmd,
log_stdout_to_file=os.path.join(tempdir, 'mobmonitor_getstatus')
)
finally:
tarball = '%s.tgz' % tempdir
cros_build_lib.CreateTarball(tarball, tempdir)
osutils.RmDir(tempdir, ignore_missing=True)
return tarball
| [
"os.path.exists",
"chromite.lib.cros_build_lib.CreateTarball",
"os.path.join",
"os.chmod",
"chromite.lib.osutils.RmDir",
"os.path.isdir",
"tempfile.mkdtemp",
"chromite.lib.osutils.SafeMakedirs",
"os.remove"
] | [((967, 995), 'chromite.lib.osutils.SafeMakedirs', 'osutils.SafeMakedirs', (['TMPDIR'], {}), '(TMPDIR)\n', (987, 995), False, 'from chromite.lib import osutils\n'), ((1008, 1058), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': 'TMPDIR_PREFIX', 'dir': 'TMPDIR'}), '(prefix=TMPDIR_PREFIX, dir=TMPDIR)\n', (1024, 1058), False, 'import tempfile\n'), ((1061, 1083), 'os.chmod', 'os.chmod', (['tempdir', '(511)'], {}), '(tempdir, 511)\n', (1069, 1083), False, 'import os\n'), ((829, 876), 'os.path.join', 'os.path.join', (['TMPDIR', "('%s*.tgz' % TMPDIR_PREFIX)"], {}), "(TMPDIR, '%s*.tgz' % TMPDIR_PREFIX)\n", (841, 876), False, 'import os\n'), ((903, 918), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (912, 918), False, 'import os\n'), ((1559, 1605), 'chromite.lib.cros_build_lib.CreateTarball', 'cros_build_lib.CreateTarball', (['tarball', 'tempdir'], {}), '(tarball, tempdir)\n', (1587, 1605), False, 'from chromite.lib import cros_build_lib\n'), ((1610, 1653), 'chromite.lib.osutils.RmDir', 'osutils.RmDir', (['tempdir'], {'ignore_missing': '(True)'}), '(tempdir, ignore_missing=True)\n', (1623, 1653), False, 'from chromite.lib import osutils\n'), ((1199, 1218), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1212, 1218), False, 'import os\n'), ((1151, 1171), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1165, 1171), False, 'import os\n'), ((1459, 1504), 'os.path.join', 'os.path.join', (['tempdir', '"""mobmonitor_getstatus"""'], {}), "(tempdir, 'mobmonitor_getstatus')\n", (1471, 1504), False, 'import os\n'), ((1250, 1277), 'os.path.join', 'os.path.join', (['tempdir', 'name'], {}), '(tempdir, name)\n', (1262, 1277), False, 'import os\n'), ((1321, 1348), 'os.path.join', 'os.path.join', (['tempdir', 'name'], {}), '(tempdir, name)\n', (1333, 1348), False, 'import os\n')] |
# Функції вищого порядку
# Зведення у другу ступінь кожного числа з послідовності за допомого циклу for
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = []
for number in numbers:
result.append(number**2)
print(result)
# Зведення у другу ступінь кожного числа з послідовності
# за допомого циклу for та винесенням частини логіку у окрему функцію
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
def pow_number(x):
return x**2
result = []
for number in numbers:
result.append(pow_number(number))
print(result)
# Зведення у другу ступінь кожного числа з послідовності
# за допомого циклу for та винесенням частини логіку у окрему lambda функцію
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
def pow_number(x):
return x**2
result = []
result_lambda = []
for number in numbers:
result.append(pow_number(number))
result_lambda.append((lambda x: x**2)(number))
print(result)
# Зведення у другу ступінь кожного числа з послідовності за допомого функції map()
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = list(map(lambda x: x**2, numbers))
print(result)
# Фільтрування послідовності чисел для отримання тільки парних чисел за допомогою циклу з умовою
import sys
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
filtered_numbers = []
for number in numbers:
if number % 2 == 0:
filtered_numbers.append(number)
result = list(map(lambda x: x**2, filtered_numbers))
print(sys.getsizeof(filtered_numbers))
print(result)
# Фільтрування послідовності чисел для отримання тільки парних чисел за допомогою функції filter()
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
filtered_numbers = filter(lambda x: x % 2 == 0, numbers)
print(sys.getsizeof(filtered_numbers))
print(sys.getsizeof(list(filtered_numbers)))
result = list(map(lambda x: x**2, filtered_numbers))
print(result)
# Фільтрування послідовності чисел, використання функцій map() та filter() у одному рядку
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = list(map(lambda x: x**2, filter(lambda x: x % 2 == 0, numbers)))
print(result)
# Отримання суми квадратів парних чисел пурших десяти чисел з послідовності Фібоначчі
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = list(map(lambda x: x**2, filter(lambda x: x % 2 == 0, numbers)))
print(result)
sum_ = 0
for number in result:
sum_ += number
print(sum_)
# Отримання суми квадратів парних чисел пурших десяти чисел з послідовності Фібоначчі за допомогою функції reduce()
from functools import reduce
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
result = reduce(lambda x, y: x + y, map(lambda x: x**2, filter(lambda x: x % 2 == 0, numbers)))
print(result)
# Отримання факторіалу числа за допомогою функції reduce()
from functools import reduce
numbers = range(1, 10)
def f(x, y):
return x * y
result = reduce(f, numbers)
result_lambda = reduce(lambda x, y: x * y, numbers)
print(result)
print(result_lambda)
| [
"functools.reduce",
"sys.getsizeof"
] | [((2811, 2829), 'functools.reduce', 'reduce', (['f', 'numbers'], {}), '(f, numbers)\n', (2817, 2829), False, 'from functools import reduce\n'), ((2847, 2882), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'numbers'], {}), '(lambda x, y: x * y, numbers)\n', (2853, 2882), False, 'from functools import reduce\n'), ((1429, 1460), 'sys.getsizeof', 'sys.getsizeof', (['filtered_numbers'], {}), '(filtered_numbers)\n', (1442, 1460), False, 'import sys\n'), ((1685, 1716), 'sys.getsizeof', 'sys.getsizeof', (['filtered_numbers'], {}), '(filtered_numbers)\n', (1698, 1716), False, 'import sys\n')] |
import os
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
from .base import print_loaded_dataset_shapes, log_call_parameters
class DSpritesDataset(Dataset):
def __init__(self, indices, classification=False, colored=False, data_file=None):
super(DSpritesDataset, self).__init__()
if data_file is None:
data_file = os.path.join(os.environ['DATA_DIR'], 'dsprites-dataset',
'dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz')
data = np.load(data_file, encoding='latin1', allow_pickle=True)
self.indices = indices
# color related stuff
self.colored = colored
self.colors = None
self.n_colors = 1
indices_without_color = indices
if colored:
color_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'resources/rainbow-7.npy')
self.colors = np.load(color_file)
self.n_colors = len(self.colors)
indices_without_color = [idx // self.n_colors for idx in indices]
# factor_names and factor_sizes
meta = data['metadata'].item()
self.factor_names = list(meta['latents_names'][1:])
self.factor_sizes = list(meta['latents_sizes'][1:])
if colored:
self.factor_names.append('color')
self.factor_sizes.append(self.n_colors)
self.n_factors = len(self.factor_names)
# save relevant part of the grid
self.imgs = data['imgs'][indices_without_color]
# factor values, classes, possible_values
self.factor_values = data['latents_values'][indices_without_color]
self.factor_values = [arr[1:] for arr in self.factor_values]
self.factor_classes = data['latents_classes'][indices_without_color]
self.factor_classes = [arr[1:] for arr in self.factor_classes]
self.possible_values = []
for name in ['shape', 'scale', 'orientation', 'posX', 'posY']:
self.possible_values.append(meta['latents_possible_values'][name])
if colored:
for i, idx in enumerate(self.indices):
color_class = idx % self.n_colors
color_value = color_class / (self.n_colors - 1.0)
self.factor_classes[i] = np.append(self.factor_classes[i], color_class)
self.factor_values[i] = np.append(self.factor_values[i], color_value)
self.possible_values.append(list(np.arange(0, self.n_colors) / (self.n_colors - 1.0)))
self.classification = classification
# dataset name
self.dataset_name = 'dsprites'
if self.colored:
self.dataset_name += '-colored'
# factor types
self.is_categorical = [True, False, False, False, False]
if self.colored:
self.is_categorical.append(True)
# normalization values
means = torch.tensor([0.456])
stds = torch.tensor([0.224])
if self.colored:
means = torch.tensor([0.485, 0.456, 0.406])
stds = torch.tensor([0.229, 0.224, 0.225])
self.statistics = (means, stds)
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
sample = self.imgs[idx]
sample = torch.tensor(sample, dtype=torch.float).unsqueeze(dim=0) # (1, H, W)
factors = (self.factor_classes[idx] if self.classification else self.factor_values[idx])
factors = torch.tensor(factors, dtype=torch.float)
if self.colored:
color_class = self.factor_classes[idx][-1]
color = self.colors[color_class]
sample = torch.cat([sample * color[0], sample * color[1], sample * color[2]], dim=0) # (3, H, W)
means, stds = self.statistics
sample = ((sample - means.unsqueeze(dim=-1).unsqueeze(dim=-1)) /
stds.unsqueeze(dim=-1).unsqueeze(dim=-1))
return sample, factors
@print_loaded_dataset_shapes
@log_call_parameters
def load_dsprites_datasets(val_ratio=0.2, test_ratio=0.2, seed=42,
classification=False, colored=False, data_file=None, **kwargs):
N = 737280
if colored:
N *= 7
val_cnt = int(val_ratio * N)
test_cnt = int(test_ratio * N)
train_cnt = N - val_cnt - test_cnt
np.random.seed(seed)
perm = np.random.permutation(N)
train_indices = perm[:train_cnt]
val_indices = perm[train_cnt:train_cnt + val_cnt]
test_indices = perm[train_cnt + val_cnt:]
train_dataset = DSpritesDataset(indices=train_indices, colored=colored,
classification=classification, data_file=data_file)
val_dataset = DSpritesDataset(indices=val_indices, colored=colored,
classification=classification, data_file=data_file)
test_dataset = DSpritesDataset(indices=test_indices, colored=colored,
classification=classification, data_file=data_file)
return train_dataset, val_dataset, test_dataset, None
@log_call_parameters
def load_dsprites_loaders(val_ratio=0.2, test_ratio=0.2, batch_size=128,
seed=42, classification=False, colored=False,
drop_last=False, **kwargs):
train_dataset, val_dataset, test_dataset, info = load_dsprites_datasets(
val_ratio=val_ratio, test_ratio=test_ratio, seed=seed,
classification=classification, colored=colored, **kwargs)
train_loader = None
val_loader = None
test_loader = None
if len(train_dataset) > 0:
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=4, drop_last=drop_last)
if len(val_dataset) > 0:
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=4, drop_last=drop_last)
if len(test_dataset) > 0:
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
num_workers=4, drop_last=drop_last)
return train_loader, val_loader, test_loader, info
| [
"numpy.arange",
"os.path.join",
"numpy.append",
"torch.tensor",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"os.path.abspath",
"numpy.load",
"torch.cat",
"numpy.random.permutation"
] | [((4367, 4387), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4381, 4387), True, 'import numpy as np\n'), ((4399, 4423), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (4420, 4423), True, 'import numpy as np\n'), ((542, 598), 'numpy.load', 'np.load', (['data_file'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(data_file, encoding='latin1', allow_pickle=True)\n", (549, 598), True, 'import numpy as np\n'), ((2961, 2982), 'torch.tensor', 'torch.tensor', (['[0.456]'], {}), '([0.456])\n', (2973, 2982), False, 'import torch\n'), ((2998, 3019), 'torch.tensor', 'torch.tensor', (['[0.224]'], {}), '([0.224])\n', (3010, 3019), False, 'import torch\n'), ((3518, 3558), 'torch.tensor', 'torch.tensor', (['factors'], {'dtype': 'torch.float'}), '(factors, dtype=torch.float)\n', (3530, 3558), False, 'import torch\n'), ((5658, 5761), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': 'drop_last'}), '(train_dataset, batch_size=batch_size, shuffle=True, num_workers=\n 4, drop_last=drop_last)\n', (5668, 5761), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5841, 5942), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'drop_last': 'drop_last'}), '(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4,\n drop_last=drop_last)\n', (5851, 5942), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6023, 6126), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)', 'drop_last': 'drop_last'}), '(test_dataset, batch_size=batch_size, shuffle=False, num_workers=\n 4, drop_last=drop_last)\n', (6033, 6126), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((383, 493), 'os.path.join', 'os.path.join', (["os.environ['DATA_DIR']", '"""dsprites-dataset"""', '"""dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz"""'], {}), "(os.environ['DATA_DIR'], 'dsprites-dataset',\n 'dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz')\n", (395, 493), False, 'import os\n'), ((979, 998), 'numpy.load', 'np.load', (['color_file'], {}), '(color_file)\n', (986, 998), True, 'import numpy as np\n'), ((3065, 3100), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3077, 3100), False, 'import torch\n'), ((3120, 3155), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3132, 3155), False, 'import torch\n'), ((3706, 3781), 'torch.cat', 'torch.cat', (['[sample * color[0], sample * color[1], sample * color[2]]'], {'dim': '(0)'}), '([sample * color[0], sample * color[1], sample * color[2]], dim=0)\n', (3715, 3781), False, 'import torch\n'), ((2344, 2390), 'numpy.append', 'np.append', (['self.factor_classes[i]', 'color_class'], {}), '(self.factor_classes[i], color_class)\n', (2353, 2390), True, 'import numpy as np\n'), ((2431, 2476), 'numpy.append', 'np.append', (['self.factor_values[i]', 'color_value'], {}), '(self.factor_values[i], color_value)\n', (2440, 2476), True, 'import numpy as np\n'), ((3332, 3371), 'torch.tensor', 'torch.tensor', (['sample'], {'dtype': 'torch.float'}), '(sample, dtype=torch.float)\n', (3344, 3371), False, 'import torch\n'), ((860, 885), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (875, 885), False, 'import os\n'), ((2522, 2549), 'numpy.arange', 'np.arange', (['(0)', 'self.n_colors'], {}), '(0, self.n_colors)\n', (2531, 2549), True, 'import numpy as np\n')] |
import os
from pygame import *
from Hover import Hover
from PaintLayout import PaintLayout
class MusicBox(object):
def __init__(self, surface):
super().__init__()
self.noSound = False
self.surface = surface
try:
mixer.init()
except Exception as _:
self.noSound = True
# music box
self.musicNameRect = Rect(810, 10, 180, 25)
draw.rect(surface, (0, 0, 0), self.musicNameRect, 1)
PaintLayout.drawText(surface, "Music Box", (0, 0, 0), self.musicNameRect, 25)
# play and stop box
self.musicPlayRect = Rect(815, 36, 84, 43)
self.musicStopRect = Rect(900, 36, 84, 43)
Hover.addHover(self.musicPlayRect)
Hover.addHover(self.musicStopRect)
PaintLayout.drawText(surface, "Play", (0, 0, 0), self.musicPlayRect, 25)
PaintLayout.drawText(surface, "Stop", (0, 0, 0), self.musicStopRect, 25)
self.musicNextRect = Rect(900, 80, 84, 25)
self.musicBackRect = Rect(815, 80, 84, 25)
PaintLayout.createNextBox(surface, self.musicNextRect)
PaintLayout.createBackBox(surface, self.musicBackRect)
self.curSong = 0
self.songs = []
def setup(self):
self.songs = []
path = "Resources/music box/Songs"
files = os.listdir(path)
files.sort() # because listdir does not guarantee order
for file in files:
if file.endswith(".ogg"):
self.songs.append(f"{path}/{file}")
def handle(self):
if self.noSound: return None
PaintLayout.drawText(self.surface, f"Music Box: {self.curSong + 1}", (0, 0, 0), self.musicNameRect, 25)
mx, my = mouse.get_pos()
if self.musicNextRect.collidepoint(mx, my):
self.curSong += 1
elif self.musicBackRect.collidepoint(mx, my):
self.curSong += len(self.songs) - 1
elif self.musicPlayRect.collidepoint(mx, my):
mixer.music.stop()
mixer.music.load(self.songs[self.curSong])
mixer.music.play(-1)
elif self.musicStopRect.collidepoint(mx, my):
mixer.music.stop()
| [
"PaintLayout.PaintLayout.createNextBox",
"os.listdir",
"PaintLayout.PaintLayout.createBackBox",
"PaintLayout.PaintLayout.drawText",
"Hover.Hover.addHover"
] | [((482, 559), 'PaintLayout.PaintLayout.drawText', 'PaintLayout.drawText', (['surface', '"""Music Box"""', '(0, 0, 0)', 'self.musicNameRect', '(25)'], {}), "(surface, 'Music Box', (0, 0, 0), self.musicNameRect, 25)\n", (502, 559), False, 'from PaintLayout import PaintLayout\n'), ((700, 734), 'Hover.Hover.addHover', 'Hover.addHover', (['self.musicPlayRect'], {}), '(self.musicPlayRect)\n', (714, 734), False, 'from Hover import Hover\n'), ((743, 777), 'Hover.Hover.addHover', 'Hover.addHover', (['self.musicStopRect'], {}), '(self.musicStopRect)\n', (757, 777), False, 'from Hover import Hover\n'), ((786, 858), 'PaintLayout.PaintLayout.drawText', 'PaintLayout.drawText', (['surface', '"""Play"""', '(0, 0, 0)', 'self.musicPlayRect', '(25)'], {}), "(surface, 'Play', (0, 0, 0), self.musicPlayRect, 25)\n", (806, 858), False, 'from PaintLayout import PaintLayout\n'), ((867, 939), 'PaintLayout.PaintLayout.drawText', 'PaintLayout.drawText', (['surface', '"""Stop"""', '(0, 0, 0)', 'self.musicStopRect', '(25)'], {}), "(surface, 'Stop', (0, 0, 0), self.musicStopRect, 25)\n", (887, 939), False, 'from PaintLayout import PaintLayout\n'), ((1051, 1105), 'PaintLayout.PaintLayout.createNextBox', 'PaintLayout.createNextBox', (['surface', 'self.musicNextRect'], {}), '(surface, self.musicNextRect)\n', (1076, 1105), False, 'from PaintLayout import PaintLayout\n'), ((1114, 1168), 'PaintLayout.PaintLayout.createBackBox', 'PaintLayout.createBackBox', (['surface', 'self.musicBackRect'], {}), '(surface, self.musicBackRect)\n', (1139, 1168), False, 'from PaintLayout import PaintLayout\n'), ((1323, 1339), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1333, 1339), False, 'import os\n'), ((1590, 1697), 'PaintLayout.PaintLayout.drawText', 'PaintLayout.drawText', (['self.surface', 'f"""Music Box: {self.curSong + 1}"""', '(0, 0, 0)', 'self.musicNameRect', '(25)'], {}), "(self.surface, f'Music Box: {self.curSong + 1}', (0, 0,\n 0), self.musicNameRect, 25)\n", (1610, 1697), False, 'from PaintLayout import PaintLayout\n')] |
import sys
from matplotlib import pyplot as plt
import numpy as npx
import torch
from torch import nn
from torch.utils import data
from torchvision import transforms, datasets
from .utils import *
from .chaos import import_np, load_array as _load_array
_name = __name__.split('.')[-1]
np = import_np(_name)
np.pi = torch.tensor(npx.pi) # np.acos(np.zeros(1)) * 2
xint = sys.modules[__name__]
# ======================================
# 特定于框架的类,函数等
def get_dataloader_workers():
"""在非Windows的平台上,使用4个进程来读取的数据。"""
return 0 if sys.platform.startswith('win') else 4
def load_data_fashion_mnist(batch_size, resize=None):
"""下载Fashion-MNIST数据集,然后将其加载到内存中。"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = datasets.FashionMNIST(root="../data",
train=True,
transform=trans,
download=True)
mnist_test = datasets.FashionMNIST(root="../data",
train=False,
transform=trans,
download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
def load_data_mnist(batch_size, resize=None):
"""下载 MNIST 数据集,然后将其加载到内存中。"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = datasets.MNIST(root="../data",
train=True,
transform=trans,
download=True)
mnist_test = datasets.MNIST(root="../data",
train=False,
transform=trans,
download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
def accuracy(y_hat, y):
"""计算预测正确的数量。"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y
return float(sum(cmp.type(y.dtype)))
def train_epoch(net, train_iter, loss, updater, num_classes=10):
"""训练模型一个迭代周期(定义见第3章)。"""
# 将模型设置为训练模式
if isinstance(net, nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
if isinstance(loss, nn.MSELoss):
y_ = nn.functional.one_hot(y, num_classes=num_classes)
y_ = y_.type(torch.float)
l = loss(y_hat, y_)
else:
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.backward()
updater.step()
metric.add(
float(l) * len(y), accuracy(y_hat, y),
y.size().numel())
else:
# 使用PyTorch内置的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练准确率
return metric[0] / metric[2], metric[1] / metric[2]
def sgd(params, lr, batch_size):
"""小批量随机梯度下降。"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
def updater(params, lr, batch_size):
return sgd(params, lr, batch_size)
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()。"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]。"""
devices = [
torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
try_gpu(), try_gpu(10), try_all_gpus()
# ======================================
## 共同 API
def load_array(data_arrays, batch_size, is_train=True):
return _load_array(_name, data_arrays, batch_size, is_train)
def normal(x, mu, sigma):
p = 1 / np.sqrt(2 * np.pi * sigma**2)
return p * np.exp(-0.5 / sigma**2 * (x - mu)**2)
def one_hot(arr, num_classes):
return np.eye(num_classes)[arr]
def softmax(X):
X_exp = np.exp(X)
partition = np.sum(X_exp, axis=1, keepdims=True)
return X_exp / partition # 这里应用了广播机制
def cross_entropy(y_hat, y):
return -np.log(y_hat[range(len(y_hat)), y])
def evaluate_accuracy(net, data_iter):
"""计算在指定数据集上模型的精度"""
metric = Accumulator(2) # 正确预测数、预测总数
for X, y in data_iter:
metric.add(accuracy(net(X), y), len(y))
return metric[0] / metric[1]
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset."""
metric = Accumulator(2) # Sum of losses, no. of examples
for X, y in data_iter:
l = loss(net(X), y)
metric.add(float(sum(l)), len(l))
return metric[0] / metric[1]
def train(net, train_iter, test_iter, loss, num_epochs, updater, ylim=None):
"""训练模型(定义见第3章)。"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=ylim,
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
#train_loss, train_acc = train_metrics
| [
"torchvision.datasets.FashionMNIST",
"sys.platform.startswith",
"torch.cuda.device_count",
"torch.tensor",
"torch.nn.functional.one_hot",
"torchvision.datasets.MNIST",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose",
"torch.device"
] | [((319, 339), 'torch.tensor', 'torch.tensor', (['npx.pi'], {}), '(npx.pi)\n', (331, 339), False, 'import torch\n'), ((789, 814), 'torchvision.transforms.Compose', 'transforms.Compose', (['trans'], {}), '(trans)\n', (807, 814), False, 'from torchvision import transforms, datasets\n'), ((833, 919), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', ([], {'root': '"""../data"""', 'train': '(True)', 'transform': 'trans', 'download': '(True)'}), "(root='../data', train=True, transform=trans, download\n =True)\n", (854, 919), False, 'from torchvision import transforms, datasets\n'), ((1052, 1138), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', ([], {'root': '"""../data"""', 'train': '(False)', 'transform': 'trans', 'download': '(True)'}), "(root='../data', train=False, transform=trans,\n download=True)\n", (1073, 1138), False, 'from torchvision import transforms, datasets\n'), ((1717, 1742), 'torchvision.transforms.Compose', 'transforms.Compose', (['trans'], {}), '(trans)\n', (1735, 1742), False, 'from torchvision import transforms, datasets\n'), ((1761, 1835), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""../data"""', 'train': '(True)', 'transform': 'trans', 'download': '(True)'}), "(root='../data', train=True, transform=trans, download=True)\n", (1775, 1835), False, 'from torchvision import transforms, datasets\n'), ((1952, 2027), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""../data"""', 'train': '(False)', 'transform': 'trans', 'download': '(True)'}), "(root='../data', train=False, transform=trans, download=True)\n", (1966, 2027), False, 'from torchvision import transforms, datasets\n'), ((4085, 4104), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4097, 4104), False, 'import torch\n'), ((540, 570), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (563, 570), False, 'import sys\n'), ((688, 709), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (707, 709), False, 'from torchvision import transforms, datasets\n'), ((1616, 1637), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1635, 1637), False, 'from torchvision import transforms, datasets\n'), ((3729, 3744), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3742, 3744), False, 'import torch\n'), ((3997, 4022), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4020, 4022), False, 'import torch\n'), ((4048, 4073), 'torch.device', 'torch.device', (['f"""cuda:{i}"""'], {}), "(f'cuda:{i}')\n", (4060, 4073), False, 'import torch\n'), ((4193, 4218), 'torch.device', 'torch.device', (['f"""cuda:{i}"""'], {}), "(f'cuda:{i}')\n", (4205, 4218), False, 'import torch\n'), ((750, 775), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (767, 775), False, 'from torchvision import transforms, datasets\n'), ((1678, 1703), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (1695, 1703), False, 'from torchvision import transforms, datasets\n'), ((2955, 3004), 'torch.nn.functional.one_hot', 'nn.functional.one_hot', (['y'], {'num_classes': 'num_classes'}), '(y, num_classes=num_classes)\n', (2976, 3004), False, 'from torch import nn\n'), ((4298, 4317), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4310, 4317), False, 'import torch\n'), ((4234, 4259), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4257, 4259), False, 'import torch\n')] |
"""Views for the django ``Brewery`` application.
"""
# pylint: disable=too-many-ancestors
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.http import JsonResponse
import logging
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from joulia.filters import SearchOrIdFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from brewery import models
from brewery import permissions
from brewery import serializers
from joulia import http
LOGGER = logging.getLogger(__name__)
class JouliaControllerReleaseApiMixin(APIView):
"""Common REST API view information for ``JouliaControllerRelease`` model.
"""
serializer_class = serializers.JouliaControllerReleaseSerializers
queryset = models.JouliaControllerRelease.objects.all()
permission_classes = (
IsAuthenticated, permissions.IsContinuousIntegrationToEdit)
class JouliaControllerReleaseListView(JouliaControllerReleaseApiMixin,
generics.ListCreateAPIView):
"""List REST API view for ``JouliaControllerRelease`` model."""
# TODO(willjschmitt): Add Create functionality for release software to
# programmatically update this.
pass
class JouliaControllerReleaseDetailView(JouliaControllerReleaseApiMixin,
generics.RetrieveAPIView):
"""Retrieve REST API view for ``JouliaControllerRelease`` model."""
pass
class BrewingStateAPIMixin(APIView):
"""Common REST API view information for ``BrewingState`` model."""
serializer_class = serializers.BrewingStateSerializer
queryset = models.BrewingState.objects.all().order_by('index')
filter_fields = ('id', 'software_release',)
permission_classes = (
IsAuthenticated, permissions.IsContinuousIntegrationToEdit)
class BrewingStateListCreateView(BrewingStateAPIMixin,
generics.ListCreateAPIView):
"""List/create REST API view for ``BrewingState`` model.
Ordering is guaranteed to be sorted by index in the List view.
"""
pass
class BrewingStateDetailView(BrewingStateAPIMixin,
generics.RetrieveAPIView):
"""Get REST API view for ``BrewingState`` model."""
pass
class BrewingCompanyApiMixin(APIView):
"""Common REST API view information for ``BrewingCompany`` model."""
serializer_class = serializers.BrewingCompanySerializer
permission_classes = (IsAuthenticated, permissions.IsMember)
def get_queryset(self):
return models.BrewingCompany.objects.filter(
group__user=self.request.user)
class BrewingCompanyListView(BrewingCompanyApiMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``BrewingCompany`` model."""
pass
class BrewingCompanyDetailView(BrewingCompanyApiMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``BrewingCompany``model.
"""
pass
class BreweryApiMixin(APIView):
"""Common REST API view information for ``Brewery`` model."""
serializer_class = serializers.BrewerySerializer
permission_classes = (IsAuthenticated, permissions.IsMemberOfBrewingCompany)
def get_queryset(self):
return models.Brewery.objects.filter(
company__group__user=self.request.user)
class BreweryListView(BreweryApiMixin, generics.ListCreateAPIView):
"""List and Create REST API view for ``Brewery`` model."""
pass
class BreweryDetailView(BreweryApiMixin, generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``Brewery``model."""
pass
class BrewhouseApiMixin(APIView):
"""Common REST API view information for ``Brewhouse`` model."""
serializer_class = serializers.BrewhouseSerializer
permission_classes = (IsAuthenticated, permissions.IsMemberOfBrewery)
filter_fields = ('id', 'brewery',)
def get_queryset(self):
return models.Brewhouse.objects.filter(
brewery__company__group__user=self.request.user)
class BrewhouseListView(BrewhouseApiMixin, generics.ListCreateAPIView):
"""List and Create REST API view for ``Brewhouse`` model."""
pass
class BrewhouseDetailView(BrewhouseApiMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``Brewhouse``model."""
pass
class BeerStyleApiMixin(APIView):
"""Common REST API view information for ``BeerStyle`` model."""
queryset = models.BeerStyle.objects.all()
serializer_class = serializers.BeerStyleSerializer
permission_classes = (IsAuthenticated, permissions.IsAdminToEdit)
class BeerStyleListView(BeerStyleApiMixin, generics.ListAPIView):
"""List REST API view for ``BeerStyle`` model."""
filter_backends = (SearchOrIdFilter,)
search_fields = ('name',)
class BeerStyleDetailView(BeerStyleApiMixin, generics.RetrieveAPIView):
"""Retrieve REST API view for ``BeerStyle``model."""
pass
class YeastIngredientAPIMixin(APIView):
"""Common REST API view information for ``YeastIngredient`` model."""
serializer_class = serializers.YeastIngredientSerializer
permission_classes = (IsAuthenticated, permissions.IsAdminToEdit)
queryset = models.YeastIngredient.objects.all()
class YeastIngredientListView(YeastIngredientAPIMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``YeastIngredient`` model."""
filter_backends = (SearchOrIdFilter,)
search_fields = ('name',)
class YeastIngredientDetailView(YeastIngredientAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``YeastIngredient``
model."""
pass
class MaltIngredientAPIMixin(APIView):
"""Common REST API view information for ``MaltIngredient`` model."""
serializer_class = serializers.MaltIngredientSerializer
permission_classes = (IsAuthenticated, permissions.IsAdminToEdit)
queryset = models.MaltIngredient.objects.all()
class MaltIngredientListView(MaltIngredientAPIMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``MaltIngredient`` model."""
filter_backends = (SearchOrIdFilter,)
search_fields = ('name',)
class MaltIngredientDetailView(MaltIngredientAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``MaltIngredient``
model."""
pass
class BitteringIngredientAPIMixin(APIView):
"""Common REST API view information for ``BitteringIngredient`` model."""
serializer_class = serializers.BitteringIngredientSerializer
permission_classes = (IsAuthenticated, permissions.IsAdminToEdit)
queryset = models.BitteringIngredient.objects.all()
class BitteringIngredientListView(BitteringIngredientAPIMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``BitteringIngredient`` model."""
filter_backends = (SearchOrIdFilter,)
search_fields = ('name',)
class BitteringIngredientDetailView(BitteringIngredientAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``BitteringIngredient``
model."""
pass
class MaltIngredientAdditionAPIMixin(APIView):
"""Common REST API view information for ``MaltIngredientAddition`` model."""
filter_fields = ('id', 'recipe',)
serializer_class = serializers.MaltIngredientAdditionSerializer
permission_classes = (IsAuthenticated, permissions.OwnsRecipe)
queryset = models.MaltIngredientAddition.objects.all()
class MaltIngredientAdditionListView(MaltIngredientAdditionAPIMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``MaltIngredientAddition`` model."""
pass
class MaltIngredientAdditionDetailView(MaltIngredientAdditionAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for
``MaltIngredientAddition`` model.
"""
pass
class BitteringIngredientAdditionAPIMixin(APIView):
"""Common REST API view information for ``BitteringIngredientAddition``
model.
"""
filter_fields = ('id', 'recipe',)
serializer_class = serializers.BitteringIngredientAdditionSerializer
permission_classes = (IsAuthenticated, permissions.OwnsRecipe)
queryset = models.BitteringIngredientAddition.objects.all()
class BitteringIngredientAdditionListView(BitteringIngredientAdditionAPIMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``BitteringIngredientAddition``
model."""
pass
class BitteringIngredientAdditionDetailView(
BitteringIngredientAdditionAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for
``BitteringIngredientAddition`` model."""
pass
class RecipeAPIMixin(APIView):
"""Common REST API view information for ``Recipe`` model."""
serializer_class = serializers.RecipeSerializer
permission_classes = (IsAuthenticated, permissions.IsMemberOfBrewingCompany)
def get_queryset(self):
return models.Recipe.objects.filter(
company__group__user=self.request.user)
class RecipeListView(RecipeAPIMixin, generics.ListCreateAPIView):
"""List and Create REST API view for ``Recipe`` model."""
pass
class RecipeDetailView(RecipeAPIMixin, generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``Recipe`` model."""
pass
class MashPointAPIMixin(APIView):
"""Common REST API view information for ``MashPoint`` model."""
serializer_class = serializers.MashPointSerializer
permission_classes = (IsAuthenticated, permissions.OwnsRecipe)
filter_fields = ('id', 'recipe',)
def get_queryset(self):
return models.MashPoint.objects.filter(
recipe__company__group__user=self.request.user).order_by('index')
class MashPointListView(MashPointAPIMixin, generics.ListCreateAPIView):
"""List and create REST API for ``MashPoint`` model."""
pass
class MashPointDetailView(MashPointAPIMixin,
generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``MashPoint`` model."""
pass
class RecipeInstanceApiMixin(APIView):
"""Common REST API view information for ``RecipeInstance`` model."""
serializer_class = serializers.RecipeInstanceSerializer
permission_classes = (IsAuthenticated, permissions.OwnsRecipe)
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ('id', 'active', 'brewhouse',)
ordering_fields = ('date',)
def get_queryset(self):
return models.RecipeInstance.objects.filter(
recipe__company__group__user=self.request.user)
class RecipeInstanceListView(RecipeInstanceApiMixin,
generics.ListCreateAPIView):
"""List and Create REST API view for ``RecipeInstance`` model."""
pass
class RecipeInstanceDetailView(RecipeInstanceApiMixin,
generics.RetrieveUpdateAPIView):
"""Retrieve and Update REST API view for ``RecipeInstance`` model."""
pass
class TimeSeriesNewHandler(generics.CreateAPIView):
"""Create REST API view for ``TimeSeriesDataPoint`` model."""
serializer_class = serializers.TimeSeriesDataPointSerializer
permission_classes = (IsAuthenticated, permissions.OwnsSensor)
def get_queryset(self):
return models.TimeSeriesDataPoint.objects.filter(
sensor__brewhouse__brewery__company__group__user=
self.request.user)
class TimeSeriesIdentifyHandler(APIView):
"""Identifies a time series group by the name of an AssetSensor.
Can only be handled as a POST request.
"""
@staticmethod
def post(request):
"""Identifies a time series group by the name of an AssetSensor.
If the AssetSensor does not yet exist, creates it.
Args:
recipe_instance: (Optional) POST argument with the
recipe_instance pk. Used to retrieve the Brewhouse equipment
associated with the request. Used for some cases when the
equipment has more readily available access to the
recipe_instance rather than the Brewhouse directly.
brewhouse: (Optional): POST argument with the Brewhouse pk. Required
if recipe_instance is not submitted.
name: Name for the AssetSensor to be used in the time series data.
See AssetSensor for more information on naming.
variable_type: Type of AssetSensor to be used (e.g. "value" or
"override"). Defaults to 'value'.
Returns:
JsonResponse with the pk to the sensor as the property "sensor".
Response status is 200 if just returning object and 201 if needed to
create a new AssetSensor.
"""
name = request.data['name']
variable_type = request.data.get('variable_type', 'value')
if 'recipe_instance' in request.data:
recipe_instance_id = request.data['recipe_instance']
recipe_instance = models.RecipeInstance.objects.get(
id=recipe_instance_id)
brewhouse = recipe_instance.brewhouse
else:
brewhouse_id = request.data['brewhouse']
brewhouse = models.Brewhouse.objects.get(id=brewhouse_id)
if not permissions.is_member_of_brewing_company(
request.user, brewhouse.brewery.company):
return HttpResponseForbidden(
'Access not permitted to brewing equipment.')
# See if we can get an existing AssetSensor.
try:
sensor = models.AssetSensor.objects.get(name=name,
brewhouse=brewhouse,
variable_type=variable_type)
status_code = status.HTTP_200_OK
# Otherwise create one for recording data
except ObjectDoesNotExist:
LOGGER.debug('Creating new asset sensor %s for asset %s',
name, brewhouse)
sensor = models.AssetSensor.objects.create(
name=name, brewhouse=brewhouse, variable_type=variable_type)
status_code = status.HTTP_201_CREATED
response = JsonResponse({'sensor': sensor.pk})
response.status_code = status_code
return response
class BrewhouseIdByToken(APIView):
"""Retrieves the brewhouse ID for a user authenticated with a Token."""
@staticmethod
def get(request):
try:
brewhouse = request.user.auth_token.brewhouse
return JsonResponse({'brewhouse': brewhouse.pk})
except ObjectDoesNotExist:
return HttpResponseForbidden()
class BrewhouseLaunchView(APIView):
"""Launches a recipe instance on a Brewhouse."""
@staticmethod
def post(request):
brewhouse_pk = http.get_data_value_or_400(request, 'brewhouse')
recipe_pk = http.get_data_value_or_400(request, 'recipe')
brewhouse = http.get_object_or_404(models.Brewhouse, brewhouse_pk)
if not permissions.IsMemberOfBrewery().has_object_permission(
request, None, brewhouse):
raise http.HTTP403("No permission to access requested brewhouse.")
recipe = http.get_object_or_404(models.Recipe, recipe_pk)
if not permissions.IsMemberOfBrewingCompany().has_object_permission(
request, None, recipe):
raise http.HTTP403("No permission to access requested recipe.")
recipe_instance = models.RecipeInstance.objects.create(
brewhouse=brewhouse, recipe=recipe, active=True)
return JsonResponse({"id": recipe_instance.pk})
class BrewhouseEndView(APIView):
"""Ends a recipe instance on a Brewhouse."""
@staticmethod
def post(request):
recipe_instance_pk = http.get_data_value_or_400(request,
'recipe_instance')
recipe_instance = http.get_object_or_404(models.RecipeInstance,
recipe_instance_pk)
if not permissions.OwnsRecipe().has_object_permission(
request, None, recipe_instance):
raise http.HTTP403(
"No permission to access requested recipe_instance.")
recipe_instance.active = False
recipe_instance.save()
return HttpResponse()
| [
"logging.getLogger",
"brewery.models.BitteringIngredient.objects.all",
"brewery.models.Brewhouse.objects.filter",
"brewery.models.JouliaControllerRelease.objects.all",
"brewery.models.BeerStyle.objects.all",
"brewery.models.YeastIngredient.objects.all",
"brewery.models.MashPoint.objects.filter",
"brewery.models.AssetSensor.objects.get",
"django.http.HttpResponse",
"brewery.models.Recipe.objects.filter",
"brewery.models.Brewery.objects.filter",
"brewery.permissions.IsMemberOfBrewingCompany",
"brewery.models.BrewingState.objects.all",
"brewery.models.BitteringIngredientAddition.objects.all",
"joulia.http.get_data_value_or_400",
"brewery.models.Brewhouse.objects.get",
"brewery.permissions.IsMemberOfBrewery",
"brewery.models.BrewingCompany.objects.filter",
"django.http.JsonResponse",
"brewery.models.AssetSensor.objects.create",
"brewery.models.RecipeInstance.objects.get",
"joulia.http.HTTP403",
"brewery.models.MaltIngredient.objects.all",
"brewery.models.TimeSeriesDataPoint.objects.filter",
"brewery.models.RecipeInstance.objects.filter",
"brewery.models.RecipeInstance.objects.create",
"brewery.permissions.OwnsRecipe",
"joulia.http.get_object_or_404",
"django.http.HttpResponseForbidden",
"brewery.models.MaltIngredientAddition.objects.all",
"brewery.permissions.is_member_of_brewing_company"
] | [((653, 680), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (670, 680), False, 'import logging\n'), ((903, 947), 'brewery.models.JouliaControllerRelease.objects.all', 'models.JouliaControllerRelease.objects.all', ([], {}), '()\n', (945, 947), False, 'from brewery import models\n'), ((4740, 4770), 'brewery.models.BeerStyle.objects.all', 'models.BeerStyle.objects.all', ([], {}), '()\n', (4768, 4770), False, 'from brewery import models\n'), ((5492, 5528), 'brewery.models.YeastIngredient.objects.all', 'models.YeastIngredient.objects.all', ([], {}), '()\n', (5526, 5528), False, 'from brewery import models\n'), ((6276, 6311), 'brewery.models.MaltIngredient.objects.all', 'models.MaltIngredient.objects.all', ([], {}), '()\n', (6309, 6311), False, 'from brewery import models\n'), ((7066, 7106), 'brewery.models.BitteringIngredient.objects.all', 'models.BitteringIngredient.objects.all', ([], {}), '()\n', (7104, 7106), False, 'from brewery import models\n'), ((7945, 7988), 'brewery.models.MaltIngredientAddition.objects.all', 'models.MaltIngredientAddition.objects.all', ([], {}), '()\n', (7986, 7988), False, 'from brewery import models\n'), ((8817, 8865), 'brewery.models.BitteringIngredientAddition.objects.all', 'models.BitteringIngredientAddition.objects.all', ([], {}), '()\n', (8863, 8865), False, 'from brewery import models\n'), ((2694, 2761), 'brewery.models.BrewingCompany.objects.filter', 'models.BrewingCompany.objects.filter', ([], {'group__user': 'self.request.user'}), '(group__user=self.request.user)\n', (2730, 2761), False, 'from brewery import models\n'), ((3470, 3539), 'brewery.models.Brewery.objects.filter', 'models.Brewery.objects.filter', ([], {'company__group__user': 'self.request.user'}), '(company__group__user=self.request.user)\n', (3499, 3539), False, 'from brewery import models\n'), ((4179, 4264), 'brewery.models.Brewhouse.objects.filter', 'models.Brewhouse.objects.filter', ([], {'brewery__company__group__user': 'self.request.user'}), '(brewery__company__group__user=self.request.user\n )\n', (4210, 4264), False, 'from brewery import models\n'), ((9639, 9707), 'brewery.models.Recipe.objects.filter', 'models.Recipe.objects.filter', ([], {'company__group__user': 'self.request.user'}), '(company__group__user=self.request.user)\n', (9667, 9707), False, 'from brewery import models\n'), ((11233, 11322), 'brewery.models.RecipeInstance.objects.filter', 'models.RecipeInstance.objects.filter', ([], {'recipe__company__group__user': 'self.request.user'}), '(recipe__company__group__user=self.\n request.user)\n', (11269, 11322), False, 'from brewery import models\n'), ((12023, 12137), 'brewery.models.TimeSeriesDataPoint.objects.filter', 'models.TimeSeriesDataPoint.objects.filter', ([], {'sensor__brewhouse__brewery__company__group__user': 'self.request.user'}), '(\n sensor__brewhouse__brewery__company__group__user=self.request.user)\n', (12064, 12137), False, 'from brewery import models\n'), ((14946, 14981), 'django.http.JsonResponse', 'JsonResponse', (["{'sensor': sensor.pk}"], {}), "({'sensor': sensor.pk})\n", (14958, 14981), False, 'from django.http import JsonResponse\n'), ((15569, 15617), 'joulia.http.get_data_value_or_400', 'http.get_data_value_or_400', (['request', '"""brewhouse"""'], {}), "(request, 'brewhouse')\n", (15595, 15617), False, 'from joulia import http\n'), ((15638, 15683), 'joulia.http.get_data_value_or_400', 'http.get_data_value_or_400', (['request', '"""recipe"""'], {}), "(request, 'recipe')\n", (15664, 15683), False, 'from joulia import http\n'), ((15705, 15759), 'joulia.http.get_object_or_404', 'http.get_object_or_404', (['models.Brewhouse', 'brewhouse_pk'], {}), '(models.Brewhouse, brewhouse_pk)\n', (15727, 15759), False, 'from joulia import http\n'), ((15969, 16017), 'joulia.http.get_object_or_404', 'http.get_object_or_404', (['models.Recipe', 'recipe_pk'], {}), '(models.Recipe, recipe_pk)\n', (15991, 16017), False, 'from joulia import http\n'), ((16238, 16327), 'brewery.models.RecipeInstance.objects.create', 'models.RecipeInstance.objects.create', ([], {'brewhouse': 'brewhouse', 'recipe': 'recipe', 'active': '(True)'}), '(brewhouse=brewhouse, recipe=recipe,\n active=True)\n', (16274, 16327), False, 'from brewery import models\n'), ((16353, 16393), 'django.http.JsonResponse', 'JsonResponse', (["{'id': recipe_instance.pk}"], {}), "({'id': recipe_instance.pk})\n", (16365, 16393), False, 'from django.http import JsonResponse\n'), ((16549, 16603), 'joulia.http.get_data_value_or_400', 'http.get_data_value_or_400', (['request', '"""recipe_instance"""'], {}), "(request, 'recipe_instance')\n", (16575, 16603), False, 'from joulia import http\n'), ((16686, 16751), 'joulia.http.get_object_or_404', 'http.get_object_or_404', (['models.RecipeInstance', 'recipe_instance_pk'], {}), '(models.RecipeInstance, recipe_instance_pk)\n', (16708, 16751), False, 'from joulia import http\n'), ((17102, 17116), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (17114, 17116), False, 'from django.http import HttpResponse\n'), ((1777, 1810), 'brewery.models.BrewingState.objects.all', 'models.BrewingState.objects.all', ([], {}), '()\n', (1808, 1810), False, 'from brewery import models\n'), ((13736, 13792), 'brewery.models.RecipeInstance.objects.get', 'models.RecipeInstance.objects.get', ([], {'id': 'recipe_instance_id'}), '(id=recipe_instance_id)\n', (13769, 13792), False, 'from brewery import models\n'), ((13951, 13996), 'brewery.models.Brewhouse.objects.get', 'models.Brewhouse.objects.get', ([], {'id': 'brewhouse_id'}), '(id=brewhouse_id)\n', (13979, 13996), False, 'from brewery import models\n'), ((14013, 14099), 'brewery.permissions.is_member_of_brewing_company', 'permissions.is_member_of_brewing_company', (['request.user', 'brewhouse.brewery.company'], {}), '(request.user, brewhouse.brewery.\n company)\n', (14053, 14099), False, 'from brewery import permissions\n'), ((14132, 14199), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Access not permitted to brewing equipment."""'], {}), "('Access not permitted to brewing equipment.')\n", (14153, 14199), False, 'from django.http import HttpResponseForbidden\n'), ((14305, 14400), 'brewery.models.AssetSensor.objects.get', 'models.AssetSensor.objects.get', ([], {'name': 'name', 'brewhouse': 'brewhouse', 'variable_type': 'variable_type'}), '(name=name, brewhouse=brewhouse,\n variable_type=variable_type)\n', (14335, 14400), False, 'from brewery import models\n'), ((15293, 15334), 'django.http.JsonResponse', 'JsonResponse', (["{'brewhouse': brewhouse.pk}"], {}), "({'brewhouse': brewhouse.pk})\n", (15305, 15334), False, 'from django.http import JsonResponse\n'), ((15891, 15951), 'joulia.http.HTTP403', 'http.HTTP403', (['"""No permission to access requested brewhouse."""'], {}), "('No permission to access requested brewhouse.')\n", (15903, 15951), False, 'from joulia import http\n'), ((16153, 16210), 'joulia.http.HTTP403', 'http.HTTP403', (['"""No permission to access requested recipe."""'], {}), "('No permission to access requested recipe.')\n", (16165, 16210), False, 'from joulia import http\n'), ((16932, 16998), 'joulia.http.HTTP403', 'http.HTTP403', (['"""No permission to access requested recipe_instance."""'], {}), "('No permission to access requested recipe_instance.')\n", (16944, 16998), False, 'from joulia import http\n'), ((10334, 10413), 'brewery.models.MashPoint.objects.filter', 'models.MashPoint.objects.filter', ([], {'recipe__company__group__user': 'self.request.user'}), '(recipe__company__group__user=self.request.user)\n', (10365, 10413), False, 'from brewery import models\n'), ((14764, 14862), 'brewery.models.AssetSensor.objects.create', 'models.AssetSensor.objects.create', ([], {'name': 'name', 'brewhouse': 'brewhouse', 'variable_type': 'variable_type'}), '(name=name, brewhouse=brewhouse,\n variable_type=variable_type)\n', (14797, 14862), False, 'from brewery import models\n'), ((15389, 15412), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', ([], {}), '()\n', (15410, 15412), False, 'from django.http import HttpResponseForbidden\n'), ((15775, 15806), 'brewery.permissions.IsMemberOfBrewery', 'permissions.IsMemberOfBrewery', ([], {}), '()\n', (15804, 15806), False, 'from brewery import permissions\n'), ((16033, 16071), 'brewery.permissions.IsMemberOfBrewingCompany', 'permissions.IsMemberOfBrewingCompany', ([], {}), '()\n', (16069, 16071), False, 'from brewery import permissions\n'), ((16817, 16841), 'brewery.permissions.OwnsRecipe', 'permissions.OwnsRecipe', ([], {}), '()\n', (16839, 16841), False, 'from brewery import permissions\n')] |
import math
import turtle
SQRT2 = math.sqrt(2)
def house(length):
"""
Draw a nice house where the base is length long and put the turtle
back to its original position at the end.
"""
inside = SQRT2 * length
roof = inside / 2.
turtle.forward(length)
turtle.left(90)
turtle.forward(length)
# roof
turtle.left(45)
turtle.forward(roof)
turtle.left(90)
turtle.forward(roof)
turtle.left(45)
# interior
turtle.forward(length)
turtle.left(135)
turtle.forward(inside)
turtle.left(135)
turtle.forward(length)
turtle.left(135)
turtle.forward(inside)
# back into position
turtle.left(45)
turtle.backward(length)
turtle.shape("turtle")
house(100)
turtle.penup()
turtle.forward(120)
turtle.pendown()
house(80)
turtle.exitonclick()
| [
"turtle.shape",
"turtle.pendown",
"turtle.penup",
"math.sqrt",
"turtle.forward",
"turtle.exitonclick",
"turtle.left",
"turtle.backward"
] | [((35, 47), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (44, 47), False, 'import math\n'), ((712, 734), 'turtle.shape', 'turtle.shape', (['"""turtle"""'], {}), "('turtle')\n", (724, 734), False, 'import turtle\n'), ((747, 761), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (759, 761), False, 'import turtle\n'), ((762, 781), 'turtle.forward', 'turtle.forward', (['(120)'], {}), '(120)\n', (776, 781), False, 'import turtle\n'), ((782, 798), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (796, 798), False, 'import turtle\n'), ((811, 831), 'turtle.exitonclick', 'turtle.exitonclick', ([], {}), '()\n', (829, 831), False, 'import turtle\n'), ((257, 279), 'turtle.forward', 'turtle.forward', (['length'], {}), '(length)\n', (271, 279), False, 'import turtle\n'), ((284, 299), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (295, 299), False, 'import turtle\n'), ((304, 326), 'turtle.forward', 'turtle.forward', (['length'], {}), '(length)\n', (318, 326), False, 'import turtle\n'), ((343, 358), 'turtle.left', 'turtle.left', (['(45)'], {}), '(45)\n', (354, 358), False, 'import turtle\n'), ((363, 383), 'turtle.forward', 'turtle.forward', (['roof'], {}), '(roof)\n', (377, 383), False, 'import turtle\n'), ((388, 403), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (399, 403), False, 'import turtle\n'), ((408, 428), 'turtle.forward', 'turtle.forward', (['roof'], {}), '(roof)\n', (422, 428), False, 'import turtle\n'), ((433, 448), 'turtle.left', 'turtle.left', (['(45)'], {}), '(45)\n', (444, 448), False, 'import turtle\n'), ((469, 491), 'turtle.forward', 'turtle.forward', (['length'], {}), '(length)\n', (483, 491), False, 'import turtle\n'), ((496, 512), 'turtle.left', 'turtle.left', (['(135)'], {}), '(135)\n', (507, 512), False, 'import turtle\n'), ((517, 539), 'turtle.forward', 'turtle.forward', (['inside'], {}), '(inside)\n', (531, 539), False, 'import turtle\n'), ((544, 560), 'turtle.left', 'turtle.left', (['(135)'], {}), '(135)\n', (555, 560), False, 'import turtle\n'), ((565, 587), 'turtle.forward', 'turtle.forward', (['length'], {}), '(length)\n', (579, 587), False, 'import turtle\n'), ((592, 608), 'turtle.left', 'turtle.left', (['(135)'], {}), '(135)\n', (603, 608), False, 'import turtle\n'), ((613, 635), 'turtle.forward', 'turtle.forward', (['inside'], {}), '(inside)\n', (627, 635), False, 'import turtle\n'), ((666, 681), 'turtle.left', 'turtle.left', (['(45)'], {}), '(45)\n', (677, 681), False, 'import turtle\n'), ((686, 709), 'turtle.backward', 'turtle.backward', (['length'], {}), '(length)\n', (701, 709), False, 'import turtle\n')] |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph
import ufora.native.Cumulus as CumulusNative
import ufora.native.FORA as ForaNative
import ufora.BackendGateway.ComputedValue.ComputedValueGateway as ComputedValueGateway
import time
import ufora.BackendGateway.ComputedGraph.BackgroundUpdateQueue as BackgroundUpdateQueue
getGateway = ComputedValueGateway.getGateway
class ViewOfEntireCumulusSystem(ComputedGraph.Location):
viewOfSystem_ = ComputedGraph.Mutable(object, lambda: ())
recentGlobalUserFacingLogMessages_ = ComputedGraph.Mutable(object, lambda: ())
totalMessageCountsEver_ = ComputedGraph.Mutable(object, lambda: 0)
@ComputedGraph.ExposedProperty()
def mostRecentMessages(self):
return self.recentGlobalUserFacingLogMessages_
@ComputedGraph.ExposedProperty()
def totalMessagesEver(self):
return self.totalMessageCountsEver_
@ComputedGraph.ExposedFunction()
def clearMostRecentMessages(self, arg):
self.recentGlobalUserFacingLogMessages_ = ()
@ComputedGraph.ExposedFunction()
def clearAndReturnMostRecentMessages(self, arg):
messages = self.recentGlobalUserFacingLogMessages_
self.recentGlobalUserFacingLogMessages_ = ()
return messages
@ComputedGraph.ExposedProperty()
def viewOfCumulusSystem(self):
return self.viewOfSystem_
@ComputedGraph.ExposedFunction()
def pushNewGlobalUserFacingLogMessage(self, msg):
self.totalMessageCountsEver_ = self.totalMessageCountsEver_ + 1
self.recentGlobalUserFacingLogMessages_ = (
self.recentGlobalUserFacingLogMessages_ + \
({"timestamp": msg.timestamp, "message": msg.message, "isDeveloperFacing": msg.isDeveloperFacing, },)
)
| [
"ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedProperty",
"ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedFunction",
"ufora.BackendGateway.ComputedGraph.ComputedGraph.Mutable"
] | [((1064, 1106), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.Mutable', 'ComputedGraph.Mutable', (['object', '(lambda : ())'], {}), '(object, lambda : ())\n', (1085, 1106), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1147, 1189), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.Mutable', 'ComputedGraph.Mutable', (['object', '(lambda : ())'], {}), '(object, lambda : ())\n', (1168, 1189), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1219, 1260), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.Mutable', 'ComputedGraph.Mutable', (['object', '(lambda : 0)'], {}), '(object, lambda : 0)\n', (1240, 1260), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1266, 1297), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedProperty', 'ComputedGraph.ExposedProperty', ([], {}), '()\n', (1295, 1297), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1393, 1424), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedProperty', 'ComputedGraph.ExposedProperty', ([], {}), '()\n', (1422, 1424), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1508, 1539), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedFunction', 'ComputedGraph.ExposedFunction', ([], {}), '()\n', (1537, 1539), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1643, 1674), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedFunction', 'ComputedGraph.ExposedFunction', ([], {}), '()\n', (1672, 1674), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1870, 1901), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedProperty', 'ComputedGraph.ExposedProperty', ([], {}), '()\n', (1899, 1901), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n'), ((1977, 2008), 'ufora.BackendGateway.ComputedGraph.ComputedGraph.ExposedFunction', 'ComputedGraph.ExposedFunction', ([], {}), '()\n', (2006, 2008), True, 'import ufora.BackendGateway.ComputedGraph.ComputedGraph as ComputedGraph\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 11:27:20 2022
@author: samue
"""
import pandas as pd
import re
amenitiesList = ["Internet", "Air conditioning", "Breakfast", "TV", "Bathub", "Dryer", "Elevator in building", "Parking",
"Gym", "Heating", "Kitchen", "Pets allowed", "Pool", "Smoking allowed", "Washer", "Wheelchair accessible"]
def getColEncoded(name, dataset):
return [x for x in dataset.columns if re.search(name + " \d+", str(x))]
def GetDataSet(path):
dataset = pd.read_csv(path, dtype={'Zipcode': "string"})
dataset = dataset[dataset["Review Scores Value"].notna()]
neiColEncodedNeighbourhood = getColEncoded("Neighbourhood", dataset)
neiColEncodedPropertyType = getColEncoded("Property Type", dataset)
neiColEncodedRoomType = getColEncoded("Room Type", dataset)
neiColEncodedBedType = getColEncoded("Bed Type", dataset)
X = dataset[["Square Meter"] +
["Accommodates"] +
["Bathrooms"] +
["Bedrooms"] +
["Review Scores Value"] +
amenitiesList +
neiColEncodedNeighbourhood +
neiColEncodedPropertyType +
neiColEncodedRoomType +
neiColEncodedBedType].values
y = dataset['Price']
return X, y, dataset
| [
"pandas.read_csv"
] | [((513, 559), 'pandas.read_csv', 'pd.read_csv', (['path'], {'dtype': "{'Zipcode': 'string'}"}), "(path, dtype={'Zipcode': 'string'})\n", (524, 559), True, 'import pandas as pd\n')] |
import errno
import os
import pickle
from abc import ABCMeta, abstractmethod
from pathlib import Path
import numpy as np
from sklearn.utils import resample
from stat_test import perform_t_test, get_box_plots_mod
from util.util import twitter_datetime_str_to_object
class BaseFeatureHelper(metaclass=ABCMeta):
@abstractmethod
def get_feature_group_name(self):
pass
@abstractmethod
def get_micro_feature_method_references(self):
pass
@abstractmethod
def get_micro_feature_method_names(self):
pass
@abstractmethod
def get_micro_feature_short_names(self):
pass
@abstractmethod
def get_macro_feature_method_references(self):
pass
@abstractmethod
def get_macro_feature_method_names(self):
pass
@abstractmethod
def get_macro_feature_short_names(self):
pass
def get_dump_file_name(self, news_source, micro_features, macro_features, label, file_dir):
file_tags = [news_source, label, self.get_feature_group_name()]
if micro_features:
file_tags.append("micro")
if macro_features:
file_tags.append("macro")
return "{}/{}.pkl".format(file_dir, "_".join(file_tags))
def get_features_array(self, prop_graphs, micro_features, macro_features, news_source=None, label=None,
file_dir="/content/FakeNewsPropagation/data/features", use_cache=False):
function_refs = []
file_name = self.get_dump_file_name(news_source, micro_features, macro_features, label, file_dir)
data_file = Path(file_name)
if use_cache and data_file.is_file():
return pickle.load(open(file_name, "rb"))
if micro_features:
function_refs.extend(self.get_micro_feature_method_references())
if macro_features:
function_refs.extend(self.get_macro_feature_method_references())
if len(function_refs) == 0:
return None
all_features = []
for function_reference in function_refs:
features_set = get_sample_feature_value(prop_graphs, function_reference)
all_features.append(features_set)
feature_array = np.transpose(get_numpy_array(all_features))
pickle.dump(feature_array, open(file_name, "wb"))
return feature_array
def get_feature_names(self, micro_features, macro_features):
features_names = []
short_feature_names = []
if micro_features:
features_names.extend(self.get_micro_feature_method_names())
short_feature_names.extend(self.get_micro_feature_short_names())
if macro_features:
features_names.extend(self.get_macro_feature_method_names())
short_feature_names.extend(self.get_macro_feature_short_names())
return features_names, short_feature_names
def print_statistics_for_all_features(self, feature_array=None, prop_graphs=None, micro_features=None,
macro_features=None):
if feature_array is None:
feature_array = self.get_features_array(prop_graphs, micro_features, macro_features)
[feature_names, short_feature_names] = self.get_feature_names(micro_features, macro_features)
for idx in range(len(feature_names)):
feature_values = feature_array[:, idx]
print_stat_values(feature_names[idx], feature_values, short_feature_names[idx])
def save_blox_plots_for_features(self, fake_feature_array=None, real_feature_array=None, fake_prop_graphs=None,
real_prop_graphs=None, micro_features=None, macro_features=None, save_folder=None):
if fake_feature_array is None:
fake_feature_array = self.get_features_array(fake_prop_graphs, micro_features, macro_features)
real_feature_array = self.get_features_array(real_prop_graphs, micro_features, macro_features)
[feature_names, short_feature_names] = self.get_feature_names(micro_features, macro_features)
for idx in range(len(feature_names)):
fake_feature_values = fake_feature_array[:, idx]
real_feature_values = real_feature_array[:, idx]
get_box_plots_mod(fake_feature_values, real_feature_values, save_folder, feature_names[idx],
short_feature_names[idx])
def get_feature_significance_t_tests(self, fake_feature_array, real_feature_array, micro_features=None,
macro_features=None):
[feature_names, short_feature_names] = self.get_feature_names(micro_features, macro_features)
for idx in range(len(feature_names)):
fake_feature_values = fake_feature_array[:, idx]
real_feature_values = real_feature_array[:, idx]
print("Feature {} : {}".format(short_feature_names[idx], feature_names[idx]))
perform_t_test(fake_feature_values, real_feature_values)
def get_feature_significance_bootstrap_tests(self, fake_feature_array, real_feature_array, micro_features=None,
macro_features=None):
[feature_names, short_feature_names] = self.get_feature_names(micro_features, macro_features)
for idx in range(len(feature_names)):
fake_feature_values = fake_feature_array[:, idx]
real_feature_values = real_feature_array[:, idx]
perms_fake = []
perms_real = []
combined = np.concatenate((fake_feature_values, real_feature_values), axis=0)
print("combined shape : ", combined.shape)
for i in range(10000):
np.random.seed(i)
perms_fake.append(resample(combined, n_samples=len(fake_feature_values)))
perms_real.append(resample(combined, n_samples=len(real_feature_values)))
dif_bootstrap_means = (np.mean(perms_fake, axis=1) - np.mean(perms_real, axis=1))
print("diff bootstrap means : ", dif_bootstrap_means.shape)
obs_difs = (np.mean(fake_feature_values) - np.mean(real_feature_values))
p_value = dif_bootstrap_means[dif_bootstrap_means >= obs_difs].shape[0] / 10000
print("Feature {} : {}".format(short_feature_names[idx], feature_names[idx]))
print("t- value : {} p-value : {}".format(obs_difs, p_value))
def get_sample_feature_value(news_graps: list, get_feature_fun_ref):
result = []
for graph in news_graps:
result.append(get_feature_fun_ref(graph))
return result
def create_dir(dir_name):
if not os.path.exists(dir_name):
try:
os.makedirs(dir_name)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def get_epoch_timestamp_from_retweet(retweet):
return twitter_datetime_str_to_object(retweet["created_at"])
def sort_retweet_object_by_time(retweets: list):
retweets.sort(key=get_epoch_timestamp_from_retweet)
return retweets
def get_noise_news_ids():
with open("data/news_id_ignore_list") as file:
lines = file.readlines()
return [line.strip() for line in lines]
def load_prop_graph(data_folder, news_source, news_label):
news_graphs = pickle.load(open("{}/{}_{}_news_prop_graphs.pkl".format(data_folder, news_source, news_label), "rb"))
return news_graphs
def remove_prop_graph_noise(news_graphs, noise_ids):
noise_ids = set(noise_ids)
return [graph for graph in news_graphs if graph.tweet_id not in noise_ids]
def sort_tweet_node_object_by_created_time(tweet_nodes: list):
tweet_nodes.sort(key=lambda x: x.created_time)
return tweet_nodes
def equal_samples(sample1, sample2):
target_len = min(len(sample1), len(sample2))
np.random.seed(0)
np.random.shuffle(sample1)
np.random.shuffle(sample2)
return sample1[:target_len], sample2[:target_len]
# def get_propagation_graphs(data_folder, news_source):
# fake_propagation_graphs = load_prop_graph(data_folder, news_source, "fake")
# real_propagation_graphs = load_prop_graph(data_folder, news_source, "real")
#
# print("Before filtering no. of FAKE prop graphs: {}".format(len(fake_propagation_graphs)))
# print("Before filtering no. of REAL prop graphs: {}".format(len(real_propagation_graphs)))
#
# fake_propagation_graphs = remove_prop_graph_noise(fake_propagation_graphs, get_noise_news_ids())
# real_propagation_graphs = remove_prop_graph_noise(real_propagation_graphs, get_noise_news_ids())
#
# print("After filtering no. of FAKE prop graphs: {}".format(len(fake_propagation_graphs)))
# print("After filtering no. of REAL prop graphs: {}".format(len(real_propagation_graphs)))
# print(flush=True)
#
# return fake_propagation_graphs, real_propagation_graphs
def get_numpy_array(list_of_list):
np_array_lists = []
for list_obj in list_of_list:
np_array_lists.append(np.array(list_obj))
return np.array(np_array_lists)
def print_stat_values(feature_name, values, short_feature_name=""):
print("=========================================")
print("Feature {} : {}".format(short_feature_name, feature_name))
print("Min value : {}".format(min(values)))
print("Max value : {}".format(max(values)))
print("Mean value : {}".format(np.mean(np.array(values))))
print("=========================================")
| [
"os.path.exists",
"numpy.mean",
"stat_test.perform_t_test",
"os.makedirs",
"pathlib.Path",
"stat_test.get_box_plots_mod",
"numpy.array",
"numpy.random.seed",
"numpy.concatenate",
"util.util.twitter_datetime_str_to_object",
"numpy.random.shuffle"
] | [((6924, 6977), 'util.util.twitter_datetime_str_to_object', 'twitter_datetime_str_to_object', (["retweet['created_at']"], {}), "(retweet['created_at'])\n", (6954, 6977), False, 'from util.util import twitter_datetime_str_to_object\n'), ((7868, 7885), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7882, 7885), True, 'import numpy as np\n'), ((7891, 7917), 'numpy.random.shuffle', 'np.random.shuffle', (['sample1'], {}), '(sample1)\n', (7908, 7917), True, 'import numpy as np\n'), ((7922, 7948), 'numpy.random.shuffle', 'np.random.shuffle', (['sample2'], {}), '(sample2)\n', (7939, 7948), True, 'import numpy as np\n'), ((9069, 9093), 'numpy.array', 'np.array', (['np_array_lists'], {}), '(np_array_lists)\n', (9077, 9093), True, 'import numpy as np\n'), ((1602, 1617), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1606, 1617), False, 'from pathlib import Path\n'), ((6663, 6687), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (6677, 6687), False, 'import os\n'), ((4265, 4387), 'stat_test.get_box_plots_mod', 'get_box_plots_mod', (['fake_feature_values', 'real_feature_values', 'save_folder', 'feature_names[idx]', 'short_feature_names[idx]'], {}), '(fake_feature_values, real_feature_values, save_folder,\n feature_names[idx], short_feature_names[idx])\n', (4282, 4387), False, 'from stat_test import perform_t_test, get_box_plots_mod\n'), ((4955, 5011), 'stat_test.perform_t_test', 'perform_t_test', (['fake_feature_values', 'real_feature_values'], {}), '(fake_feature_values, real_feature_values)\n', (4969, 5011), False, 'from stat_test import perform_t_test, get_box_plots_mod\n'), ((5553, 5619), 'numpy.concatenate', 'np.concatenate', (['(fake_feature_values, real_feature_values)'], {'axis': '(0)'}), '((fake_feature_values, real_feature_values), axis=0)\n', (5567, 5619), True, 'import numpy as np\n'), ((6714, 6735), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (6725, 6735), False, 'import os\n'), ((9037, 9055), 'numpy.array', 'np.array', (['list_obj'], {}), '(list_obj)\n', (9045, 9055), True, 'import numpy as np\n'), ((5728, 5745), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (5742, 5745), True, 'import numpy as np\n'), ((5962, 5989), 'numpy.mean', 'np.mean', (['perms_fake'], {'axis': '(1)'}), '(perms_fake, axis=1)\n', (5969, 5989), True, 'import numpy as np\n'), ((5992, 6019), 'numpy.mean', 'np.mean', (['perms_real'], {'axis': '(1)'}), '(perms_real, axis=1)\n', (5999, 6019), True, 'import numpy as np\n'), ((6118, 6146), 'numpy.mean', 'np.mean', (['fake_feature_values'], {}), '(fake_feature_values)\n', (6125, 6146), True, 'import numpy as np\n'), ((6149, 6177), 'numpy.mean', 'np.mean', (['real_feature_values'], {}), '(real_feature_values)\n', (6156, 6177), True, 'import numpy as np\n'), ((9428, 9444), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (9436, 9444), True, 'import numpy as np\n')] |
import firebase_admin
from firebase_admin import storage
import asyncio
import logging
import functools
import random
import subprocess
import discord
from discord.ext import commands
from tle.util import codeforces_api as cf
from tle.util import db
logger = logging.getLogger(__name__)
_CF_COLORS = (0xFFCA1F, 0x198BCC, 0xFF2020)
_SUCCESS_GREEN = 0x28A745
_ALERT_AMBER = 0xFFBF00
def embed_neutral(desc, color=discord.Embed.Empty):
return discord.Embed(description=str(desc), color=color)
def embed_success(desc):
return discord.Embed(description=str(desc), color=_SUCCESS_GREEN)
def embed_alert(desc):
return discord.Embed(description=str(desc), color=_ALERT_AMBER)
def cf_color_embed(**kwargs):
return discord.Embed(**kwargs, color=random.choice(_CF_COLORS))
def attach_image(embed, img_file):
embed.set_image(url=f'attachment://{img_file.filename}')
def set_author_footer(embed, user):
embed.set_footer(text=f'Requested by {user}', icon_url=user.avatar_url)
def send_error_if(*error_cls):
"""Decorator for `cog_command_error` methods. Decorated methods send the error in an alert embed
when the error is an instance of one of the specified errors, otherwise the wrapped function is
invoked.
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(cog, ctx, error):
if isinstance(error, error_cls):
await ctx.send(embed=embed_alert(error))
error.handled = True
else:
await func(cog, ctx, error)
return wrapper
return decorator
async def bot_error_handler(ctx, exception):
if getattr(exception, 'handled', False):
# Errors already handled in cogs should have .handled = True
return
if isinstance(exception, db.DatabaseDisabledError):
await ctx.send(embed=embed_alert('Sorry, the database is not available. Some features are disabled.'))
elif isinstance(exception, commands.NoPrivateMessage):
await ctx.send(embed=embed_alert('Commands are disabled in private channels'))
elif isinstance(exception, commands.DisabledCommand):
await ctx.send(embed=embed_alert('Sorry, this command is temporarily disabled'))
elif isinstance(exception, cf.CodeforcesApiError):
await ctx.send(embed=embed_alert(exception))
else:
exc_info = type(exception), exception, exception.__traceback__
logger.exception('Ignoring exception in command {}:'.format(ctx.command), exc_info=exc_info)
def uploadData():
cred = firebase_admin.credentials.Certificate('key.json')
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://smart-india-hackathon-d1f21.appspot.com/',
'storageBucket': 'smart-india-hackathon-d1f21.appspot.com/',
})
blob = storage.bucket('smart-india-hackathon-d1f21.appspot.com').blob('data.zip') # intended name of file in Firebase Storage
blob.upload_from_filename('data.zip') # path to file on local disk
async def presence(bot):
await bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.listening,
name='your commands'))
await asyncio.sleep(60)
while True:
target = random.choice([
member for member in bot.get_all_members()
if 'Purgatory' not in {role.name for role in member.roles}
])
Activity_Type = random.randint(0, 2)
if Activity_Type == 0:
await bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.listening, name="Kalasala"))
elif Activity_Type == 1:
await bot.change_presence(activity=discord.Game(
name=f'{target.display_name} orz'))
elif Activity_Type == 2:
await bot.change_presence(activity=discord.Activity(
type=discord.ActivityType.watching, name="Hariyali"))
logger.info(f"Starting Backup...")
bashCommand = "zip data.zip data/db/cache.db data/db/user.db"
output, error = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE).communicate()
logger.info(f"Uploading to cloud...")
uploadData()
bashCommand = "rm data.zip"
output, error = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE).communicate()
logger.info(f"Backup Complete")
await asyncio.sleep(10 * 60)
| [
"logging.getLogger",
"random.choice",
"firebase_admin.initialize_app",
"discord.Game",
"functools.wraps",
"firebase_admin.credentials.Certificate",
"discord.Activity",
"asyncio.sleep",
"random.randint",
"firebase_admin.storage.bucket"
] | [((262, 289), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (279, 289), False, 'import logging\n'), ((2559, 2609), 'firebase_admin.credentials.Certificate', 'firebase_admin.credentials.Certificate', (['"""key.json"""'], {}), "('key.json')\n", (2597, 2609), False, 'import firebase_admin\n'), ((2614, 2787), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['cred', "{'databaseURL': 'https://smart-india-hackathon-d1f21.appspot.com/',\n 'storageBucket': 'smart-india-hackathon-d1f21.appspot.com/'}"], {}), "(cred, {'databaseURL':\n 'https://smart-india-hackathon-d1f21.appspot.com/', 'storageBucket':\n 'smart-india-hackathon-d1f21.appspot.com/'})\n", (2643, 2787), False, 'import firebase_admin\n'), ((1291, 1312), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1306, 1312), False, 'import functools\n'), ((3170, 3187), 'asyncio.sleep', 'asyncio.sleep', (['(60)'], {}), '(60)\n', (3183, 3187), False, 'import asyncio\n'), ((3398, 3418), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (3412, 3418), False, 'import random\n'), ((763, 788), 'random.choice', 'random.choice', (['_CF_COLORS'], {}), '(_CF_COLORS)\n', (776, 788), False, 'import random\n'), ((2811, 2868), 'firebase_admin.storage.bucket', 'storage.bucket', (['"""smart-india-hackathon-d1f21.appspot.com"""'], {}), "('smart-india-hackathon-d1f21.appspot.com')\n", (2825, 2868), False, 'from firebase_admin import storage\n'), ((4382, 4404), 'asyncio.sleep', 'asyncio.sleep', (['(10 * 60)'], {}), '(10 * 60)\n', (4395, 4404), False, 'import asyncio\n'), ((3066, 3141), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.listening', 'name': '"""your commands"""'}), "(type=discord.ActivityType.listening, name='your commands')\n", (3082, 3141), False, 'import discord\n'), ((3497, 3567), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.listening', 'name': '"""Kalasala"""'}), "(type=discord.ActivityType.listening, name='Kalasala')\n", (3513, 3567), False, 'import discord\n'), ((3666, 3713), 'discord.Game', 'discord.Game', ([], {'name': 'f"""{target.display_name} orz"""'}), "(name=f'{target.display_name} orz')\n", (3678, 3713), False, 'import discord\n'), ((3812, 3881), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.watching', 'name': '"""Hariyali"""'}), "(type=discord.ActivityType.watching, name='Hariyali')\n", (3828, 3881), False, 'import discord\n')] |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
bank = pd.DataFrame(bank_data)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
banks = bank.drop(columns = ['Loan_ID'])
print(banks.isnull().sum())
bank_mode = banks.mode()
banks.fillna(bank_mode,inplace = True)
print(banks.isnull().sum())
avg_loan_amount = pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],values=["LoanAmount"],aggfunc=np.mean)
print(avg_loan_amount)
loan_approved_se = banks.loc[(banks["Self_Employed"]=="Yes") & (banks["Loan_Status"]=="Y"),["Loan_Status"]].count()
loan_approved_nse = banks.loc[(banks["Self_Employed"]=="No") & (banks["Loan_Status"]=="Y"),["Loan_Status"]].count()
percentage_se = (loan_approved_se * 100) / 614
print("%.2f"%percentage_se)
percentage_nse = (loan_approved_nse * 100) / 614
print("%.2f"%percentage_nse)
loan_term = banks['Loan_Amount_Term'].apply(lambda x : x/12)
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
print("%.2f"%mean_values.iloc[1,0],2)
#Code starts here
| [
"pandas.DataFrame",
"pandas.pivot_table",
"warnings.filterwarnings",
"pandas.read_csv"
] | [((135, 168), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (158, 168), False, 'import warnings\n'), ((201, 218), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (212, 218), True, 'import pandas as pd\n'), ((227, 250), 'pandas.DataFrame', 'pd.DataFrame', (['bank_data'], {}), '(bank_data)\n', (239, 250), True, 'import pandas as pd\n'), ((606, 718), 'pandas.pivot_table', 'pd.pivot_table', (['banks'], {'index': "['Gender', 'Married', 'Self_Employed']", 'values': "['LoanAmount']", 'aggfunc': 'np.mean'}), "(banks, index=['Gender', 'Married', 'Self_Employed'], values=\n ['LoanAmount'], aggfunc=np.mean)\n", (620, 718), True, 'import pandas as pd\n')] |
from pritunl_api import *
import pprint
pprint.pprint(json)
pri = Pritunl(url="https://yoursite.com",
token="###",
secret="###")
# delete user
org_id = ""
users = pri.user.get(org_id=org_id)
email_delete = "<EMAIL>"
for user in users:
if user["name"] == email_delete:
pri.user.delete(org_id=user["organization"], usr_id=user["id"])
print(pri.ping())
# view org
q = pri.user.get(org_id="")
# View users id
q = pri.user.get(org_id=x[0]['id'])
# Delete users
pri.user.delete(org_id=x[0]['id'], user_id=q[1]['id'])
print(q)
| [
"pprint.pprint"
] | [((40, 59), 'pprint.pprint', 'pprint.pprint', (['json'], {}), '(json)\n', (53, 59), False, 'import pprint\n')] |
import sys
import csv
import datetime
def log(message):
script_name = sys.argv[0]
print(str(datetime.datetime.now()) + '\t'+ script_name + ': ' + message)
class Sorter:
field_indices = {}
field_names_dict = {}
def read_from_stream_into_dict(self, file_name):
dict = {}
fieldnames = None
with open(file_name, 'r', newline='') as infile:
reader = csv.DictReader(infile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fieldnames = reader.fieldnames
for record in reader:
if not record.get('geo_coord_original'):
continue
key = record['geo_coord_original']
if (len(key) < 6):
continue
record['geo_coord_original'] = key[2:6] + key[0:2]
dict[record['resource_name']] = record
log(str("{: >4d}".format(len(dict))) + ' records read from ' + file_name)
return fieldnames, dict
def get_record_key(self, array_record):
return array_record[self.field_indices['geo_coord_original']] + ' ' + array_record[self.field_indices['resource_name']]
def to_array(self, dict_record):
arr = []
for key, value in dict_record.items():
arr.insert(self.field_indices[key], value)
return arr
def to_dict(self, array_record):
dict_record = {}
i = 0
for v in array_record:
dict_record[self.field_names_dict[i]] = v
i += 1
return dict_record
def main(self):
fieldnames, dict_records = self.read_from_stream_into_dict('data/transformed.csv')
i = 0
for f in fieldnames:
self.field_indices[f] = i
self.field_names_dict[i] = f
i += 1
array_records = []
for r in dict_records.values():
array_records.append(self.to_array(r))
sorted_by_value = sorted(array_records, key=self.get_record_key)
with open('data/sorted.csv', 'w', newline='') as outfile:
writer = csv.DictWriter(outfile, fieldnames, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for record in sorted_by_value:
writer.writerow(self.to_dict(record))
log(str("{: >4d}".format(len(sorted_by_value))) + ' records written to sorted.csv')
if '__main__' == __name__:
print('sorter.py functionality now merged into mapper.py')
# Sorter().main()
| [
"csv.DictWriter",
"datetime.datetime.now",
"csv.DictReader"
] | [((405, 484), 'csv.DictReader', 'csv.DictReader', (['infile'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(infile, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (419, 484), False, 'import csv\n'), ((2088, 2185), 'csv.DictWriter', 'csv.DictWriter', (['outfile', 'fieldnames'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(outfile, fieldnames, delimiter=\',\', quotechar=\'"\', quoting=\n csv.QUOTE_MINIMAL)\n', (2102, 2185), False, 'import csv\n'), ((101, 124), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (122, 124), False, 'import datetime\n')] |
import pyflann
import scipy.io as sio
import numpy as np
import cv2 as cv
from util.synthetic_util import SyntheticUtil
from util.iou_util import IouUtil
from util.projective_camera import ProjectiveCamera
from util.iou_util import ut_homography_warp
from utils import mouse_handler
from utils import get_two_points
from options.test_options import TestOptions
from models.models import create_model
import os
import argparse
import torch
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import time
from PIL import Image
from deep.siamese import BranchNetwork, SiameseNetwork
from deep.camera_dataset import CameraDataset
def generate_HOG_feature(edge_map):
# Generate HoG feature from testset edge images
# HoG parameters
win_size = (128, 128)
block_size = (32, 32)
block_stride = (32, 32)
cell_size = (32, 32)
n_bins = 9
im_h, im_w = 180, 320
hog = cv.HOGDescriptor(win_size, block_size, block_stride, cell_size, n_bins)
h, w, c = edge_map.shape
#n number of test images
features = []
edge_image = edge_map[:,:,:]
edge_image = cv.resize(edge_image, (im_w, im_h))
edge_image = cv.cvtColor(edge_image, cv.COLOR_BGR2GRAY)
feat = hog.compute(edge_image)
features.append(feat)
features = np.squeeze(np.asarray(features), axis=2)
return features
def initialize_deep_feature(deep_model_directory):
cuda_id = -1 #use -1 for CPU and 0 for GPU
# 2: load network
branch = BranchNetwork()
net = SiameseNetwork(branch)
if os.path.isfile(deep_model_directory):
checkpoint = torch.load(deep_model_directory, map_location=lambda storage, loc: storage)
net.load_state_dict(checkpoint['state_dict'])
print('load model file from {}.'.format(deep_model_directory))
else:
print('Error: file not found at {}'.format(deep_model_directory))
# 3: setup computation device
device = 'cpu'
if torch.cuda.is_available():
device = torch.device('cuda:{}'.format(cuda_id))
net = net.to(device)
cudnn.benchmark = True
print('computation device: {}'.format(device))
normalize = transforms.Normalize(mean=[0.0188],
std=[0.128])
data_transform = transforms.Compose(
[ transforms.ToTensor(),
normalize,
]
)
return net,data_transform , device
def generate_deep_feature(edge_map,net,data_transform, device):
"""
Extract feature from a siamese network
input: network and edge images
output: feature and camera
"""
#parameters
batch_size = 1
#resize image
pivot_image = edge_map
pivot_image = cv.resize(pivot_image ,(320,180))
pivot_image = cv.cvtColor(pivot_image, cv.COLOR_RGB2GRAY)
pivot_images = np.reshape(pivot_image,(1,pivot_image.shape[0],pivot_image.shape[1]))
print('Note: assume input image resolution is 180 x 320 (h x w)')
data_loader = CameraDataset(pivot_images,
pivot_images,
batch_size,
-1,
data_transform,
is_train=False)
features = []
with torch.no_grad():
for i in range(len(data_loader)):
x, _ = data_loader[i]
x = x.to(device)
feat = net.feature_numpy(x) # N x C
features.append(feat)
# append to the feature list
features = np.vstack((features))
return features, pivot_image
def initialize_two_GAN(directory):
opt = TestOptions().parse(directory)
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.continue_train = False
model = create_model(opt)
return model
def testing_two_GAN(image, model):
# test
if __name__ == '__main__':
image=Image.fromarray(image)
osize = [512,256]
cropsize = osize
image=transforms.Compose([transforms.Scale(osize, Image.BICUBIC),transforms.RandomCrop(cropsize),transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))])(image)
image=image.unsqueeze(0)
model.set_input(image)
model.test()
visuals = model.get_current_visuals()
edge_map= visuals['fake_D']
seg_map = visuals['fake_C']
edge_map = cv.resize(edge_map,(1280,720),interpolation=5)
seg_map = cv.resize(seg_map,(1280,720))
return edge_map,seg_map
################################### STEP 0 : get the addresses ###########################################
address_parser = argparse.ArgumentParser()
address_parser.add_argument('--image', required=True, type=str, help='sth like "./my_pic.png" ')
address_parser.add_argument('--advertising_image', required=False, type=str, help='sth like "./my_billboard.png" ')
address_args = address_parser.parse_args()
#########################################################################################################
################################## The base parameter which you have to set ############################################
feature_type ='deep'
print(feature_type)
#########################################################################################################################
query_index = 0
"""
Estimate an homogrpahy using edge images
"""
######################## Step 1: get the current directory working:######################################################
from pathlib import Path
current_directory = str(Path(__file__).resolve().parent) # like this /home/skovorodkin/stack/scripts
print("current_directory is: "+ current_directory)
# ##################################################### Step 2: load data ###############################################
# database
if feature_type == "deep":
deep_database_directory = current_directory + "/data_2/features/feature_camera_91k.mat"
data=sio.loadmat(deep_database_directory)
database_features = data['features']
database_cameras = data['cameras']
deep_model_directory = current_directory + "/deep/deep_network.pth"
net,data_transform ,device= initialize_deep_feature(deep_model_directory)
else: #HOG feature database
HOG_database_directory = current_directory + "/data_2/features/database_camera_feature_HoG.mat"
data = sio.loadmat(HOG_database_directory)
database_features=data['features']
database_cameras = data['cameras']
#-------------------------------------------------------------------------------------------------------------
# testing edge image from two-GAN
model = initialize_two_GAN(current_directory)
cap = cv.VideoCapture(address_args.image)
warped_out = cv.VideoWriter(current_directory + r"/warped_output.avi",cv.VideoWriter_fourcc('M','J','P','G'), 1, (460,296))
retrieved_out = cv.VideoWriter(current_directory + r"/retrieved_output.avi",cv.VideoWriter_fourcc('M','J','P','G'), 1, (1280,720))
if address_args.advertising_image :
overlayed_out = cv.VideoWriter(current_directory + r"/overlayed_output.avi",cv.VideoWriter_fourcc('M','J','P','G'), 1, (1280,720))
#ret1=cap.set(cv.CAP_PROP_FRAME_WIDTH,1024)
#ret2=cap.set(cv.CAP_PROP_FRAME_HEIGHT,720)
print(cap.get(3))
print(cap.get(4))
print(cap.get(7))
while(cap.isOpened()):
start_time = time.time() ## ===> for measuring execution time
for i in range(int(cap.get(5))): ##just every second
ret, frame = cap.read()
if cap.get(3)!= 1280.0 or cap.get(4)!= 720.0 :
frame = cv.resize(frame,(1280,720))# ===> for videos which resolutions are greater
cv.waitKey(1)
edge_map ,seg_map = testing_two_GAN (frame,model)
########################################################################################
if feature_type == "deep":
test_features , reduced_edge_map = generate_deep_feature(edge_map ,net,data_transform,device)
else: #HOG feature
test_features = generate_HOG_feature(edge_map)
#--------------------------------------------------------------------------------------------------------
# World Cup soccer template
data = sio.loadmat(current_directory + "/data_2/worldcup2014.mat")
model_points = data['points']
model_line_index = data['line_segment_index']
template_h = 74 # yard, soccer template
template_w = 115
##########################################################################################################################################
############################################ Step 2: retrieve a camera using deep features ################################################
flann = pyflann.FLANN()
result, _ = flann.nn(database_features, test_features[query_index], 1, algorithm="kdtree", trees=8, checks=64)
retrieved_index = result[0]
"""
Retrieval camera: get the nearest-neighbor camera from database
"""
retrieved_camera_data = database_cameras[retrieved_index]
u, v, fl = retrieved_camera_data[0:3]
rod_rot = retrieved_camera_data[3:6]
cc = retrieved_camera_data[6:9]
retrieved_camera = ProjectiveCamera(fl, u, v, cc, rod_rot)
retrieved_h = IouUtil.template_to_image_homography_uot(retrieved_camera, template_h, template_w)
retrieved_image = SyntheticUtil.camera_to_edge_image(retrieved_camera_data, model_points, model_line_index,
im_h=720, im_w=1280, line_width=2)
"""
Refine camera: refine camera pose using Lucas-Kanade algorithm
"""
dist_threshold = 50
query_dist = SyntheticUtil.distance_transform(edge_map)
retrieved_dist = SyntheticUtil.distance_transform(retrieved_image)
query_dist[query_dist > dist_threshold] = dist_threshold
retrieved_dist[retrieved_dist > dist_threshold] = dist_threshold
h_retrieved_to_query = SyntheticUtil.find_transform(retrieved_dist, query_dist)
refined_h = h_retrieved_to_query@retrieved_h
####################################################################################################################
## Warp source image to destination based on homography
im_out = cv.warpPerspective(seg_map, np.linalg.inv(refined_h), (115,74), borderMode=cv.BORDER_CONSTANT)
frame = cv.resize(frame,(1280,720),interpolation = cv.INTER_CUBIC)
################################################### advertisement overlaying #######################################
if address_args.advertising_image :
billboard = cv.imread(address_args.advertising_image)
billboard= np.tile(billboard, (1,2, 1))
billboard = cv.resize(billboard,(115,74),interpolation = cv.INTER_CUBIC)
im_out_2 = cv.warpPerspective(billboard,refined_h, (1280,720), borderMode=cv.BORDER_CONSTANT)
im_out_2 = cv.addWeighted(frame,0.9,im_out_2,0.2,0.0)
#cv.imshow("ss",im_out_2 )
#cv.waitKey()
###################################################################################################################
model_address=current_directory + "/model.jpg"
model_image=cv.imread(model_address)
model_image=cv.resize(model_image,(115,74))
new_image=cv.addWeighted(model_image,1,im_out,1,0)
new_image=cv.resize(new_image,(460,296),interpolation=1)
# Display images
"""cv.waitKey(200)
cv.imshow('frame',frame)
cv.waitKey()
cv.imshow('overlayed image', im_out_2)
cv.waitKey()
cv.imshow('Edge image of retrieved camera', retrieved_image)
cv.waitKey()
cv.imshow("Warped Source Image", new_image)
cv.waitKey()"""
if address_args.advertising_image :
overlayed_out.write(im_out_2)
retrieved_out.write(retrieved_image)
warped_out.write(new_image)
print("--- %s seconds ---" % (time.time() - start_time))
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
warped_out.release()
retrieved_out.release()
if address_args.advertising_image :
overlayed_out.release()
cv.destroyAllWindows()
| [
"util.synthetic_util.SyntheticUtil.distance_transform",
"scipy.io.loadmat",
"deep.camera_dataset.CameraDataset",
"deep.siamese.SiameseNetwork",
"cv2.warpPerspective",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"numpy.reshape",
"argparse.ArgumentParser",
"util.synthetic_util.SyntheticUtil.camera_to_edge_image",
"pathlib.Path",
"numpy.asarray",
"deep.siamese.BranchNetwork",
"cv2.addWeighted",
"numpy.vstack",
"cv2.VideoWriter_fourcc",
"torchvision.transforms.ToTensor",
"cv2.waitKey",
"numpy.tile",
"torchvision.transforms.Scale",
"os.path.isfile",
"options.test_options.TestOptions",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"cv2.resize",
"time.time",
"cv2.imread",
"PIL.Image.fromarray",
"torch.load",
"util.projective_camera.ProjectiveCamera",
"models.models.create_model",
"cv2.HOGDescriptor",
"torchvision.transforms.RandomCrop",
"pyflann.FLANN",
"numpy.linalg.inv",
"cv2.VideoCapture",
"torch.no_grad",
"util.synthetic_util.SyntheticUtil.find_transform",
"util.iou_util.IouUtil.template_to_image_homography_uot"
] | [((4924, 4949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4947, 4949), False, 'import argparse\n'), ((6984, 7019), 'cv2.VideoCapture', 'cv.VideoCapture', (['address_args.image'], {}), '(address_args.image)\n', (6999, 7019), True, 'import cv2 as cv\n'), ((12394, 12416), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (12414, 12416), True, 'import cv2 as cv\n'), ((934, 1005), 'cv2.HOGDescriptor', 'cv.HOGDescriptor', (['win_size', 'block_size', 'block_stride', 'cell_size', 'n_bins'], {}), '(win_size, block_size, block_stride, cell_size, n_bins)\n', (950, 1005), True, 'import cv2 as cv\n'), ((1138, 1173), 'cv2.resize', 'cv.resize', (['edge_image', '(im_w, im_h)'], {}), '(edge_image, (im_w, im_h))\n', (1147, 1173), True, 'import cv2 as cv\n'), ((1191, 1233), 'cv2.cvtColor', 'cv.cvtColor', (['edge_image', 'cv.COLOR_BGR2GRAY'], {}), '(edge_image, cv.COLOR_BGR2GRAY)\n', (1202, 1233), True, 'import cv2 as cv\n'), ((1514, 1529), 'deep.siamese.BranchNetwork', 'BranchNetwork', ([], {}), '()\n', (1527, 1529), False, 'from deep.siamese import BranchNetwork, SiameseNetwork\n'), ((1540, 1562), 'deep.siamese.SiameseNetwork', 'SiameseNetwork', (['branch'], {}), '(branch)\n', (1554, 1562), False, 'from deep.siamese import BranchNetwork, SiameseNetwork\n'), ((1571, 1607), 'os.path.isfile', 'os.path.isfile', (['deep_model_directory'], {}), '(deep_model_directory)\n', (1585, 1607), False, 'import os\n'), ((1988, 2013), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((2211, 2259), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.0188]', 'std': '[0.128]'}), '(mean=[0.0188], std=[0.128])\n', (2231, 2259), True, 'import torchvision.transforms as transforms\n'), ((2752, 2786), 'cv2.resize', 'cv.resize', (['pivot_image', '(320, 180)'], {}), '(pivot_image, (320, 180))\n', (2761, 2786), True, 'import cv2 as cv\n'), ((2819, 2862), 'cv2.cvtColor', 'cv.cvtColor', (['pivot_image', 'cv.COLOR_RGB2GRAY'], {}), '(pivot_image, cv.COLOR_RGB2GRAY)\n', (2830, 2862), True, 'import cv2 as cv\n'), ((2887, 2959), 'numpy.reshape', 'np.reshape', (['pivot_image', '(1, pivot_image.shape[0], pivot_image.shape[1])'], {}), '(pivot_image, (1, pivot_image.shape[0], pivot_image.shape[1]))\n', (2897, 2959), True, 'import numpy as np\n'), ((3051, 3144), 'deep.camera_dataset.CameraDataset', 'CameraDataset', (['pivot_images', 'pivot_images', 'batch_size', '(-1)', 'data_transform'], {'is_train': '(False)'}), '(pivot_images, pivot_images, batch_size, -1, data_transform,\n is_train=False)\n', (3064, 3144), False, 'from deep.camera_dataset import CameraDataset\n'), ((3573, 3592), 'numpy.vstack', 'np.vstack', (['features'], {}), '(features)\n', (3582, 3592), True, 'import numpy as np\n'), ((3952, 3969), 'models.models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (3964, 3969), False, 'from models.models import create_model\n'), ((6256, 6292), 'scipy.io.loadmat', 'sio.loadmat', (['deep_database_directory'], {}), '(deep_database_directory)\n', (6267, 6292), True, 'import scipy.io as sio\n'), ((6666, 6701), 'scipy.io.loadmat', 'sio.loadmat', (['HOG_database_directory'], {}), '(HOG_database_directory)\n', (6677, 6701), True, 'import scipy.io as sio\n'), ((7091, 7132), 'cv2.VideoWriter_fourcc', 'cv.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (7112, 7132), True, 'import cv2 as cv\n'), ((7221, 7262), 'cv2.VideoWriter_fourcc', 'cv.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (7242, 7262), True, 'import cv2 as cv\n'), ((7634, 7645), 'time.time', 'time.time', ([], {}), '()\n', (7643, 7645), False, 'import time\n'), ((7935, 7948), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (7945, 7948), True, 'import cv2 as cv\n'), ((8492, 8551), 'scipy.io.loadmat', 'sio.loadmat', (["(current_directory + '/data_2/worldcup2014.mat')"], {}), "(current_directory + '/data_2/worldcup2014.mat')\n", (8503, 8551), True, 'import scipy.io as sio\n'), ((9003, 9018), 'pyflann.FLANN', 'pyflann.FLANN', ([], {}), '()\n', (9016, 9018), False, 'import pyflann\n'), ((9462, 9501), 'util.projective_camera.ProjectiveCamera', 'ProjectiveCamera', (['fl', 'u', 'v', 'cc', 'rod_rot'], {}), '(fl, u, v, cc, rod_rot)\n', (9478, 9501), False, 'from util.projective_camera import ProjectiveCamera\n'), ((9521, 9607), 'util.iou_util.IouUtil.template_to_image_homography_uot', 'IouUtil.template_to_image_homography_uot', (['retrieved_camera', 'template_h', 'template_w'], {}), '(retrieved_camera, template_h,\n template_w)\n', (9561, 9607), False, 'from util.iou_util import IouUtil\n'), ((9627, 9755), 'util.synthetic_util.SyntheticUtil.camera_to_edge_image', 'SyntheticUtil.camera_to_edge_image', (['retrieved_camera_data', 'model_points', 'model_line_index'], {'im_h': '(720)', 'im_w': '(1280)', 'line_width': '(2)'}), '(retrieved_camera_data, model_points,\n model_line_index, im_h=720, im_w=1280, line_width=2)\n', (9661, 9755), False, 'from util.synthetic_util import SyntheticUtil\n'), ((9932, 9974), 'util.synthetic_util.SyntheticUtil.distance_transform', 'SyntheticUtil.distance_transform', (['edge_map'], {}), '(edge_map)\n', (9964, 9974), False, 'from util.synthetic_util import SyntheticUtil\n'), ((9996, 10045), 'util.synthetic_util.SyntheticUtil.distance_transform', 'SyntheticUtil.distance_transform', (['retrieved_image'], {}), '(retrieved_image)\n', (10028, 10045), False, 'from util.synthetic_util import SyntheticUtil\n'), ((10205, 10261), 'util.synthetic_util.SyntheticUtil.find_transform', 'SyntheticUtil.find_transform', (['retrieved_dist', 'query_dist'], {}), '(retrieved_dist, query_dist)\n', (10233, 10261), False, 'from util.synthetic_util import SyntheticUtil\n'), ((10615, 10674), 'cv2.resize', 'cv.resize', (['frame', '(1280, 720)'], {'interpolation': 'cv.INTER_CUBIC'}), '(frame, (1280, 720), interpolation=cv.INTER_CUBIC)\n', (10624, 10674), True, 'import cv2 as cv\n'), ((11479, 11503), 'cv2.imread', 'cv.imread', (['model_address'], {}), '(model_address)\n', (11488, 11503), True, 'import cv2 as cv\n'), ((11520, 11553), 'cv2.resize', 'cv.resize', (['model_image', '(115, 74)'], {}), '(model_image, (115, 74))\n', (11529, 11553), True, 'import cv2 as cv\n'), ((11567, 11611), 'cv2.addWeighted', 'cv.addWeighted', (['model_image', '(1)', 'im_out', '(1)', '(0)'], {}), '(model_image, 1, im_out, 1, 0)\n', (11581, 11611), True, 'import cv2 as cv\n'), ((11623, 11672), 'cv2.resize', 'cv.resize', (['new_image', '(460, 296)'], {'interpolation': '(1)'}), '(new_image, (460, 296), interpolation=1)\n', (11632, 11672), True, 'import cv2 as cv\n'), ((1322, 1342), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (1332, 1342), True, 'import numpy as np\n'), ((1630, 1705), 'torch.load', 'torch.load', (['deep_model_directory'], {'map_location': '(lambda storage, loc: storage)'}), '(deep_model_directory, map_location=lambda storage, loc: storage)\n', (1640, 1705), False, 'import torch\n'), ((3311, 3326), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3324, 3326), False, 'import torch\n'), ((4104, 4126), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (4119, 4126), False, 'from PIL import Image\n'), ((4643, 4692), 'cv2.resize', 'cv.resize', (['edge_map', '(1280, 720)'], {'interpolation': '(5)'}), '(edge_map, (1280, 720), interpolation=5)\n', (4652, 4692), True, 'import cv2 as cv\n'), ((4708, 4739), 'cv2.resize', 'cv.resize', (['seg_map', '(1280, 720)'], {}), '(seg_map, (1280, 720))\n', (4717, 4739), True, 'import cv2 as cv\n'), ((7393, 7434), 'cv2.VideoWriter_fourcc', 'cv.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (7414, 7434), True, 'import cv2 as cv\n'), ((7855, 7884), 'cv2.resize', 'cv.resize', (['frame', '(1280, 720)'], {}), '(frame, (1280, 720))\n', (7864, 7884), True, 'import cv2 as cv\n'), ((10535, 10559), 'numpy.linalg.inv', 'np.linalg.inv', (['refined_h'], {}), '(refined_h)\n', (10548, 10559), True, 'import numpy as np\n'), ((10856, 10897), 'cv2.imread', 'cv.imread', (['address_args.advertising_image'], {}), '(address_args.advertising_image)\n', (10865, 10897), True, 'import cv2 as cv\n'), ((10917, 10946), 'numpy.tile', 'np.tile', (['billboard', '(1, 2, 1)'], {}), '(billboard, (1, 2, 1))\n', (10924, 10946), True, 'import numpy as np\n'), ((10966, 11027), 'cv2.resize', 'cv.resize', (['billboard', '(115, 74)'], {'interpolation': 'cv.INTER_CUBIC'}), '(billboard, (115, 74), interpolation=cv.INTER_CUBIC)\n', (10975, 11027), True, 'import cv2 as cv\n'), ((11055, 11144), 'cv2.warpPerspective', 'cv.warpPerspective', (['billboard', 'refined_h', '(1280, 720)'], {'borderMode': 'cv.BORDER_CONSTANT'}), '(billboard, refined_h, (1280, 720), borderMode=cv.\n BORDER_CONSTANT)\n', (11073, 11144), True, 'import cv2 as cv\n'), ((11175, 11221), 'cv2.addWeighted', 'cv.addWeighted', (['frame', '(0.9)', 'im_out_2', '(0.2)', '(0.0)'], {}), '(frame, 0.9, im_out_2, 0.2, 0.0)\n', (11189, 11221), True, 'import cv2 as cv\n'), ((2350, 2371), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2369, 2371), True, 'import torchvision.transforms as transforms\n'), ((3674, 3687), 'options.test_options.TestOptions', 'TestOptions', ([], {}), '()\n', (3685, 3687), False, 'from options.test_options import TestOptions\n'), ((12219, 12232), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (12229, 12232), True, 'import cv2 as cv\n'), ((5861, 5875), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5865, 5875), False, 'from pathlib import Path\n'), ((12181, 12192), 'time.time', 'time.time', ([], {}), '()\n', (12190, 12192), False, 'import time\n'), ((4213, 4251), 'torchvision.transforms.Scale', 'transforms.Scale', (['osize', 'Image.BICUBIC'], {}), '(osize, Image.BICUBIC)\n', (4229, 4251), True, 'import torchvision.transforms as transforms\n'), ((4252, 4283), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['cropsize'], {}), '(cropsize)\n', (4273, 4283), True, 'import torchvision.transforms as transforms\n'), ((4284, 4305), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4303, 4305), True, 'import torchvision.transforms as transforms\n'), ((4306, 4360), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (4326, 4360), True, 'import torchvision.transforms as transforms\n')] |
import os, pickle
from glob import glob
from document import Document, Paragraph, Table
from utils import *
# declare global variable for the demo
data_dir = "O:/SU_March_Valeurs/Centre d'Excellence en Dérivés/Lab Fintech/Réglementation/Atelier CRM/"
ressource_lst = ["Assurances et institutions de dépôt",
"Distribution de produits et services financiers",
"Instruments dérivés",
"Valeurs mobilières"]
consitituion_lst = ["Loi", "Règlement", "Avis", "Directive", "Guide"]
langauge_lst = ["anglais", "français"]
def read_consitituion(ressource, consitituion, langauge):
def keep_file(filename):
return ressource in filename and consitituion in filename and langauge in filename
print("Reading file in pickle format")
file_lst = [y for x in os.walk(data_dir) for y in glob(os.path.join(x[0], '*pkl'))]
file_lst = [filename for filename in file_lst if keep_file(filename)]
for filename in file_lst:
print("print the content of %s" % filename)
read_pkl(filename)
print("Reading file in docx format")
file_lst = [y for x in os.walk(data_dir) for y in glob(os.path.join(x[0], '*docx'))]
file_lst = [filename for filename in file_lst if keep_file(filename)]
for filename in file_lst:
print("print the content of %s" % filename)
read_docx(filename)
def read_pkl(filename):
# opening the pickle
with open(filename, 'rb') as fp:
chapter_lst = pickle.load(fp)
# File written in pkl format are list of chapters
for chap_id, chapter in enumerate(chapter_lst):
# each chapter have a header and section list that
# can be empty
header = chapter.header
section_lst = chapter.section_lst
# print header id (e.g. CHAPTER I) and the title (e.g. SPECIAL PROVISION)
print(header.tid)
print(header.text)
for section in section_lst:
print("Begin of Section", "\n")
# each section is composed of subsection_lst
for subsection in section.subsection_lst:
# a sub section can be a list of paragraph
if isinstance(subsection, list):
for paragraph in subsection:
print(paragraph.tid, paragraph.text)
# or a single paragraph
else:
print(subsection.tid, subsection.text)
# Also each section has historical_note or simply footnotes
# those refer to other part in the law or even to other law
for historical_note in section.historical_note_lst:
print(historical_note.text)
print("End of Section", "\n")
def read_docs_from_pkl(filename):
doc_string = ""
# opening the pickle
with open(filename, 'rb') as fp:
chapter_lst = pickle.load(fp)
# File written in pkl format are list of chapters
for chap_id, chapter in enumerate(chapter_lst):
# each chapter have a header and section list that
# can be empty
header = chapter.header
section_lst = chapter.section_lst
# print header id (e.g. CHAPTER I) and the title (e.g. SPECIAL PROVISION)
#print(header.tid)
#print(header.text)
doc_string += header.text
doc_string += "\n"
for section in section_lst:
#print("Begin of Section", "\n")
# each section is composed of subsection_lst
for subsection in section.subsection_lst:
# a sub section can be a list of paragraph
if isinstance(subsection, list):
for paragraph in subsection:
#print(paragraph.tid, paragraph.text)
doc_string += paragraph.text
doc_string += "\n"
# or a single paragraph
else:
#print(subsection.tid, subsection.text)
doc_string += paragraph.text
doc_string += "\n"
# Also each section has historical_note or simply footnotes
# those refer to other part in the law or even to other law
for historical_note in section.historical_note_lst:
#print(historical_note.text)
doc_string += historical_note.text
doc_string += "\n"
#print("End of Section", "\n")
return doc_string
def read_pass_from_pkl(filename):
passages = []
# opening the pickle
with open(filename, 'rb') as fp:
chapter_lst = pickle.load(fp)
# File written in pkl format are list of chapters
for chap_id, chapter in enumerate(chapter_lst):
# each chapter have a header and section list that
# can be empty
header = chapter.header
section_lst = chapter.section_lst
# print header id (e.g. CHAPTER I) and the title (e.g. SPECIAL PROVISION)
#print(header.tid)
#print(header.text)
doc_tid = header.tid
doc_header = header.text
for section in section_lst:
#print("Begin of Section", "\n")
sec_string = doc_tid + " " + doc_header + "\n"
# each section is composed of subsection_lst
for subsection in section.subsection_lst:
# a sub section can be a list of paragraph
if isinstance(subsection, list):
for paragraph in subsection:
#print(paragraph.tid, paragraph.text)
sec_string += paragraph.tid + " " + paragraph.text + "\n"
# or a single paragraph
else:
#print(subsection.tid, subsection.text)
sec_string += paragraph.tid + " " + paragraph.text + "\n"
# Also each section has historical_note or simply footnotes
# those refer to other part in the law or even to other law
for historical_note in section.historical_note_lst:
#print(historical_note.text)
sec_string += historical_note.text + "\n"
passages.append(sec_string)
return passages
def read_docx(filename):
doc = Document(filename=filename, meta={})
# this function will print the document
doc_elements = doc.print()
# a docx file is a lst of blocks (Paragraph or Table)
for idx, block in enumerate(doc.story):
if isinstance(block, Paragraph):
#block.text
# a paragraph contains a list of Run
for run in block.runs:
# Run contains .text attribute
text = run.text
#and a Font class (attrs: blod, italic, etc ...)
is_bold = run.font.bold
elif isinstance(block, Table):
# Table contains a rows attrbute ( a list of lists)
# iterating on each row in the table
for row in block.rows:
# iterating on each cell in the row
for run in row:
# Run contains .text attribute
text = run.text
# and a Font class (attrs: blod, italic, etc ...)
is_bold = run.font.bold
return doc_elements
def read_pass_from_docx(filename):
doc = Document(filename=filename, meta={})
# this function will print the document
doc_elements = doc.print()
# a docx file is a lst of blocks (Paragraph or Table)
for idx, block in enumerate(doc.story):
if isinstance(block, Paragraph):
#print(block.text)
# a paragraph contains a list of Run
for run in block.runs:
# Run contains .text attribute
text = run.text
#and a Font class (attrs: blod, italic, etc ...)
is_bold = run.font.bold
elif isinstance(block, Table):
# Table contains a rows attrbute ( a list of lists)
# iterating on each row in the table
for row in block.rows:
# iterating on each cell in the row
for run in row:
# Run contains .text attribute
text = run.text
# and a Font class (attrs: blod, italic, etc ...)
is_bold = run.font.bold
return doc_elements
def decompose_docx(filename):
def cond(block):
if any([run.font.size and run.font.bold for run in block.runs]): return True
letter = [c for c in block.text if c.isalpha()]
if len(letter) > 15 and len([c for c in letter if c.isupper()]) == len(letter): return True
return False
doc = Document(filename=filename, meta={})
sections, par_lst = [], []
for idx, block in enumerate(doc.story):
if isinstance(block, Paragraph):
if cond(block) and len(par_lst) > 4:
sections.append(par_lst)
par_lst = []
if block.text:
par_lst.append(block.text)
if par_lst:
sections.append(par_lst)
return sections
#filename = "full/path/to/file.docx"
#sections = decompose_docx(filename)
#for par_lst in sections:
# print("\n".join(par_lst))
# print("\n\n\n\n\n\n")
# demo to read .pkl files
#read_consitituion("Valeurs mobilières", "Loi", "anglais")
# demo to read .docx file
#read_consitituion("Valeurs mobilières", "Règlement", "anglais")
| [
"os.path.join",
"document.Document",
"pickle.load",
"os.walk"
] | [((6419, 6455), 'document.Document', 'Document', ([], {'filename': 'filename', 'meta': '{}'}), '(filename=filename, meta={})\n', (6427, 6455), False, 'from document import Document, Paragraph, Table\n'), ((7542, 7578), 'document.Document', 'Document', ([], {'filename': 'filename', 'meta': '{}'}), '(filename=filename, meta={})\n', (7550, 7578), False, 'from document import Document, Paragraph, Table\n'), ((8955, 8991), 'document.Document', 'Document', ([], {'filename': 'filename', 'meta': '{}'}), '(filename=filename, meta={})\n', (8963, 8991), False, 'from document import Document, Paragraph, Table\n'), ((1531, 1546), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1542, 1546), False, 'import os, pickle\n'), ((2954, 2969), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (2965, 2969), False, 'import os, pickle\n'), ((4740, 4755), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4751, 4755), False, 'import os, pickle\n'), ((841, 858), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (848, 858), False, 'import os, pickle\n'), ((1163, 1180), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (1170, 1180), False, 'import os, pickle\n'), ((873, 899), 'os.path.join', 'os.path.join', (['x[0]', '"""*pkl"""'], {}), "(x[0], '*pkl')\n", (885, 899), False, 'import os, pickle\n'), ((1195, 1222), 'os.path.join', 'os.path.join', (['x[0]', '"""*docx"""'], {}), "(x[0], '*docx')\n", (1207, 1222), False, 'import os, pickle\n')] |
# =========================================================================
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==========================================================================
import json
import os.path
import asyncio
class File_Builder():
'''
This class allows to work with files
'''
# Async write and rewrite json files
async def writeToFileAsync(self, file, data):
sites = []
count = 0
if os.path.exists(file):
sites = await self.readFromFileAsync(file)
if 'manga_sites.json' in file:
for index, site in enumerate(sites):
if site['name'] == data['name']:
sites[index] = data
else: count += 1
if count == len(sites): sites.append(data)
elif 'manga.json' in file:
# if type(data) is list: sites = self.fromMangaToDict(data)
# else: sites.append(data)
for index, site in enumerate(sites):
if site['url'] == data['url']:
sites[index] = data
else: count += 1
if count == len(sites): sites.append(data)
try:
with open(file, 'w', encoding='utf-8') as f:
json.dump(sites, f, ensure_ascii=False)
except:
raise Exception('Failed to async write data to the file!')
# Write and rewrite json files
def writeToFile(self, file, data):
sites = []
count = 0
if os.path.exists(file):
sites = self.readFromFile(file)
if 'manga_sites.json' in file:
for index, site in enumerate(sites):
if site['name'] == data['name']:
sites[index] = data
else: count += 1
if count == len(sites): sites.append(data)
elif 'manga.json' in file:
for index, site in enumerate(sites):
if site['url'] == data['url']:
sites[index] = data
else: count += 1
if count == len(sites): sites.append(data)
try:
with open(file, 'w', encoding='utf-8') as f:
json.dump(sites, f, ensure_ascii=False)
except:
raise Exception('Failed to write data to the file!')
# Async read data from json file
async def readFromFileAsync(self, file):
if os.path.exists(file):
try:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
except:
raise Exception('Failed to async load data!')
else: return None
# Read data from json file
def readFromFile(self, file):
if os.path.exists(file):
try:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
except:
raise Exception('Failed to load data!')
else: return None
# Delete data from file
def deleteFromFile(self, file, data):
if 'manga_sites.json' in file:
deleted_item = self.manga_site_to_dict(list([data]))
elif 'manga.json' in file:
deleted_item = self.manga_to_dict(list([data]))
file_content = self.readFromFile(file)
file_content.remove(deleted_item[0])
with open(file, 'w', encoding='utf-8') as f:
json.dump(file_content, f, ensure_ascii=False)
# Convert manga objects to dict
def manga_to_dict(self, manga_list):
list_of_dict = []
for manga in manga_list:
manga_dict = {
'url': manga.data['url'],
'name': manga.data['name'],
'site': manga.site.name,
'info': {
'descr': manga.data['info']['descr'],
'img': manga.data['info']['img'],
'latest': manga.data['info']['latest']
}
}
list_of_dict.append(manga_dict)
return list_of_dict
# Convert manga objects to dict
def manga_site_to_dict(self, site_list):
list_of_dict = []
for site in site_list:
data = {
'name': site.name,
'optimized': site.optimized,
'test_link': site.test_link,
'xpaths': {
'title': site.xpaths['title'],
'descr': site.xpaths['descr'],
'img': site.xpaths['img'],
'latest': site.xpaths['latest']
}
}
list_of_dict.append(data)
return list_of_dict | [
"json.load",
"json.dump"
] | [((3981, 4027), 'json.dump', 'json.dump', (['file_content', 'f'], {'ensure_ascii': '(False)'}), '(file_content, f, ensure_ascii=False)\n', (3990, 4027), False, 'import json\n'), ((1785, 1824), 'json.dump', 'json.dump', (['sites', 'f'], {'ensure_ascii': '(False)'}), '(sites, f, ensure_ascii=False)\n', (1794, 1824), False, 'import json\n'), ((2714, 2753), 'json.dump', 'json.dump', (['sites', 'f'], {'ensure_ascii': '(False)'}), '(sites, f, ensure_ascii=False)\n', (2723, 2753), False, 'import json\n'), ((3056, 3068), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3065, 3068), False, 'import json\n'), ((3409, 3421), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3418, 3421), False, 'import json\n')] |
from src.utils.initialize import *
import pprint
import pickle
with open('data/processed/movies_with_overviews.pkl','rb') as f:
movies_with_overviews=pickle.load(f)
print("Loaded the list of movies that have overviews from data/processed/movies_with_overviews.pkl.\n")
# list of genres and movie ids in prep for binarizination
print("Extracting the genres and movie ids in prep for binarizination...")
genres=[]
all_ids=[]
for i in range(len(movies_with_overviews)):
movie=movies_with_overviews[i]
id=movie['id']
genre_ids=movie['genre_ids']
genres.append(genre_ids)
all_ids.extend(genre_ids)
with open('data/processed/genre_ids.pkl','wb') as f:
pickle.dump(genres,f)
print('Saved the genre ids as data/processed/genre_ids.pkl.\n')
# tmdb package provides a method that will propvide a dictionary that maps genre ids to genre name.
# we may need to add something if that list is incorrect.
print("Creating a mapping from the genre ids to the genre names...")
genres=tmdb.Genres()
# the movie_list() method of the Genres() class returns a listing of all genres in the form of a dictionary.
list_of_genres=genres.movie_list()['genres']
Genre_ID_to_name={}
for i in range(len(list_of_genres)):
genre_id=list_of_genres[i]['id']
genre_name=list_of_genres[i]['name']
Genre_ID_to_name[genre_id]=genre_name
for i in set(all_ids):
if i not in Genre_ID_to_name.keys():
print(i)
if i == 10769:
Genre_ID_to_name[10769]="Foreign" # look up what the above genre ids are. see if there's a programmatic way to do it
print("Mapping from genre id to genre name is saved in the Genre_ID_to_name dictionary:")
pprint.pprint(Genre_ID_to_name, indent=4)
print('\n')
with open('data/processed/genre_id_to_name_dict.pkl','wb') as f:
pickle.dump(Genre_ID_to_name,f)
print('Saved the mapping from genre id to genre name as data/processed/genre_id_to_name_dict.pkl.')
| [
"pickle.dump",
"pickle.load",
"pprint.pprint"
] | [((1680, 1721), 'pprint.pprint', 'pprint.pprint', (['Genre_ID_to_name'], {'indent': '(4)'}), '(Genre_ID_to_name, indent=4)\n', (1693, 1721), False, 'import pprint\n'), ((155, 169), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (166, 169), False, 'import pickle\n'), ((677, 699), 'pickle.dump', 'pickle.dump', (['genres', 'f'], {}), '(genres, f)\n', (688, 699), False, 'import pickle\n'), ((1804, 1836), 'pickle.dump', 'pickle.dump', (['Genre_ID_to_name', 'f'], {}), '(Genre_ID_to_name, f)\n', (1815, 1836), False, 'import pickle\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: test_emaillib.py
#
# Copyright 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Tests for `emaillib` module.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
from betamax.fixtures import unittest
from emaillib.emaillib import Message, SmtpServer, EasySender
__author__ = '''<NAME> <<EMAIL>>'''
__docformat__ = '''google'''
__date__ = '''16-09-2017'''
__copyright__ = '''Copyright 2017, <NAME>'''
__credits__ = ["<NAME>"]
__license__ = '''MIT'''
__maintainer__ = '''<NAME>'''
__email__ = '''<<EMAIL>>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
class TestEmaillib(unittest.BetamaxTestCase):
def setUp(self):
"""
Test set up
This is where you can setup things that you use throughout the tests. This method is called before every test.
"""
pass
def testNoSender(self):
info = {'sender': '',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
message = Message(**info)
def testInvalidSender(self):
invalid = ['a',
'a@',
'@.',
'@a.',
'.@',
'a@@',
'a@@.']
for sender in invalid:
info = {'sender': sender,
'recipients': '<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
message = Message(**info)
def testNoRecipient(self):
info = {'sender': '<EMAIL>',
'recipients': '',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
message = Message(**info)
def testInvalidRecipient(self):
invalid = ['a',
'a@',
'@.',
'@a.',
'.@',
'a@@',
'a@@.',
'<EMAIL>,a.',
'inv.domain.com,<EMAIL>']
for recipients in invalid:
info = {'sender': '<EMAIL>',
'recipients': recipients,
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
print(recipients)
message = Message(**info)
def testInvalidCC(self):
invalid = ['a',
'a@',
'@.',
'@a.',
'.@',
'a@@',
'a@@.',
'<EMAIL>,a.',
'inv.domain.com,<EMAIL>']
for cc in invalid:
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'cc': cc,
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
message = Message(**info)
def testInvalidBCC(self):
invalid = ['a',
'a@',
'@.',
'@a.',
'.@',
'a@@',
'a@@.',
'<EMAIL>,a.',
'inv.<EMAIL>,<EMAIL>']
for bcc in invalid:
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'bcc': bcc,
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
with self.assertRaises(ValueError):
message = Message(**info)
def testValid(self):
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8'}
message = Message(**info)
self.assertTrue(message.subject == u'Τεστ test')
self.assertTrue(message.body == u'This is a τεστ on utf8')
def testInvalidContent(self):
info = {'sender': '',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8',
'content': 'Texd'}
with self.assertRaises(ValueError):
message = Message(**info)
def testHtmlContent(self):
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8',
'content': 'html'}
message = Message(**info)
self.assertTrue(message.subject == u'Τεστ test')
self.assertTrue(message.body == u'This is a τεστ on utf8')
def testTextContent(self):
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8',
'content': 'text'}
message = Message(**info)
self.assertTrue(message.subject == u'Τεστ test')
self.assertTrue(message.body == u'This is a τεστ on utf8')
def testRecipients(self):
info = {'sender': '<EMAIL>',
'recipients': '<EMAIL>',
'cc': ['<EMAIL>'],
'bcc': '<EMAIL>,<EMAIL>',
'subject': 'Τεστ test',
'body': 'This is a τεστ on utf8',
'content': 'html'}
message = Message(**info)
self.assertTrue(message.recipients == ['<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>'])
def testSmtpInstance(self):
info = {'smtp_address': 'smtp.test.com',
'username': 'hacker',
'password': '<PASSWORD>',
'ssl': False,
'tls': True,
'port': 587}
smtp = SmtpServer(**info)
self.assertTrue(smtp.address == 'smtp.<EMAIL>')
self.assertTrue(smtp.username == 'hacker')
self.assertTrue(smtp.password == '<PASSWORD>')
self.assertFalse(smtp.ssl)
self.assertTrue(smtp.tls)
self.assertTrue(smtp.port == 587)
self.assertFalse(smtp.connected)
def tearDown(self):
"""
Test tear down
This is where you should tear down what you've setup in setUp before. This method is called after every test.
"""
pass
| [
"emaillib.emaillib.Message",
"emaillib.emaillib.SmtpServer"
] | [((5336, 5351), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (5343, 5351), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((6196, 6211), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (6203, 6211), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((6666, 6681), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (6673, 6681), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((7135, 7150), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (7142, 7150), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((7649, 7667), 'emaillib.emaillib.SmtpServer', 'SmtpServer', ([], {}), '(**info)\n', (7659, 7667), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((2293, 2308), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (2300, 2308), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((3159, 3174), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (3166, 3174), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((5850, 5865), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (5857, 5865), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((2807, 2822), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (2814, 2822), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((3796, 3811), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (3803, 3811), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((4413, 4428), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (4420, 4428), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n'), ((5031, 5046), 'emaillib.emaillib.Message', 'Message', ([], {}), '(**info)\n', (5038, 5046), False, 'from emaillib.emaillib import Message, SmtpServer, EasySender\n')] |
from .graphs import build_Syk, build_receptor, recruit_to_receptor, recruit_to_ligand
from .templates import TransPhosphorylationModel
from wc_rules.modeling.pattern import GraphContainer, Pattern
from wc_rules.modeling.model import AggregateModel
gRecSyk = recruit_to_receptor(build_receptor(['alpha','gamma']), build_Syk(['tsh2','aloop']))
gSykOnSyk = recruit_to_ligand(2,gRecSyk,gRecSyk.duplicate())
class SykKinaseModel(TransPhosphorylationModel):
def __init__(self,name,aloop_state):
g = Pattern(gSykOnSyk, constraints = [f'aloop_1.ph == {aloop_state}'])
super().__init__(name,'aloop_2',g)
self.verify(self.defaults)
model = AggregateModel(
name = 'syk_kinase',
models = [
SykKinaseModel(name='aloop_unphosphorylated',aloop_state=False),
SykKinaseModel(name='aloop_phosphorylated',aloop_state=True)
]
)
data = {
'aloop_unphosphorylated': {'phosphorylation_rate': 100},
'aloop_phosphorylated': {'phosphorylation_rate': 200},
}
model.verify(data)
model.defaults = data
| [
"wc_rules.modeling.pattern.Pattern"
] | [((500, 564), 'wc_rules.modeling.pattern.Pattern', 'Pattern', (['gSykOnSyk'], {'constraints': "[f'aloop_1.ph == {aloop_state}']"}), "(gSykOnSyk, constraints=[f'aloop_1.ph == {aloop_state}'])\n", (507, 564), False, 'from wc_rules.modeling.pattern import GraphContainer, Pattern\n')] |
# -*- coding: utf-8 -*-
## Kookmin University
## School of Computer Science
## Capstone #4 Flex Ads
## 20132651 <NAME>
# username_dynamodb.py 는 외부에서 user_name 과 user_id 를 입력받아,
# user_id 에 해당되는 dynamodb 의 user_name 값을 입력받은 값으로 변경하는 코드이다.
# 이를 위해 AWS 에 연결하기 위한 boto3 패키지와 값을 입력받기 위한 sys 패키지를 가져온다.
import boto3
import sys
# dynamo_credential 이라는 추가적인 python 파일을 생성하여
# dynamodb 접근 보안에 관련된 데이터를 분리하여 저장한다.
import dynamo_credential
# shell script 에서 전달받은 user_name 과 user_id 를 가져온다.
user_name = sys.argv[1]
user_id = sys.argv[2]
# 위에서 import 한 패키지에서 id 와 key 를 가져와 변수에 입력한다.
dynamo_id = dynamo_credential.key_id()
dynamo_key = dynamo_credential.access_key()
# 해당되는 변수를 이용하여 dynamodb 의 resource 를 생성하고, 접근한다.
# 이 때, region 은 oregon 인 us-west-2 로 설정한다.
dynamodb = boto3.resource(
'dynamodb',
aws_access_key_id=dynamo_id,
aws_secret_access_key=dynamo_key,
region_name = 'us-west-2'
)
# dynamodb 의 Recommendation 테이블을 지정한다.
table = dynamodb.Table('Recommendation')
# 해당 테이블에 update_item 을 이용하여, user_id 로 접근한 다음
# user_name 을 입력받은 user_name 으로 변환한다.
response = table.update_item(
Key={
'user_id':user_id,
'update_ver' : 1
},
UpdateExpression="set user_name = :u",
ExpressionAttributeValues={
':u': user_name
},
ReturnValues="UPDATED_NEW"
)
# 정확하게 이름이 들어갔는지 확인하기 위해, get_item 을 이용하여 체크한다.
check_response = table.get_item(
Key={
'user_id' : user_id,
'update_ver': 1
}
)
check_username = check_response['Item']['user_name']
print('<<', check_username, '>> name has registered to <<', user_id, '>> user number.')
| [
"boto3.resource",
"dynamo_credential.access_key",
"dynamo_credential.key_id"
] | [((588, 614), 'dynamo_credential.key_id', 'dynamo_credential.key_id', ([], {}), '()\n', (612, 614), False, 'import dynamo_credential\n'), ((628, 658), 'dynamo_credential.access_key', 'dynamo_credential.access_key', ([], {}), '()\n', (656, 658), False, 'import dynamo_credential\n'), ((764, 882), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'aws_access_key_id': 'dynamo_id', 'aws_secret_access_key': 'dynamo_key', 'region_name': '"""us-west-2"""'}), "('dynamodb', aws_access_key_id=dynamo_id,\n aws_secret_access_key=dynamo_key, region_name='us-west-2')\n", (778, 882), False, 'import boto3\n')] |
"""Unit of Work"""
import abc
from functools import singledispatch
import logging
import typing
from karp.domain import errors, index, network, repository
RepositoryType = typing.TypeVar(
"RepositoryType", repository.Repository, index.Index, network.Network
)
logger = logging.getLogger("karp")
class UnitOfWork(typing.Generic[RepositoryType], abc.ABC):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.rollback()
def commit(self):
self._commit()
def collect_new_events(self) -> typing.Iterable:
for entity in self.repo.seen:
while entity.events:
yield entity.events.pop(0)
@abc.abstractmethod
def _commit(self):
pass
@abc.abstractmethod
def rollback(self):
pass
@property
@abc.abstractmethod
def repo(self) -> RepositoryType:
pass
class ResourceUnitOfWork(UnitOfWork[repository.ResourceRepository]):
@property
def resources(self) -> repository.ResourceRepository:
return self.repo
class EntryUnitOfWork(UnitOfWork[repository.EntryRepository]):
_registry = {}
type = None
def __init_subclass__(
cls, entry_repository_type: str, is_default: bool = False, **kwargs
) -> None:
super().__init_subclass__(**kwargs)
print(
f"""EntryUnitOfWork.__init_subclass__ called with:
entry_repository_type={entry_repository_type} and
is_default={is_default}"""
)
if entry_repository_type is None:
raise RuntimeError(
"Unallowed entry_repository_type: entry_repository_type = None"
)
if entry_repository_type in cls._registry:
raise RuntimeError(
f"An EntryUnitOfWork with type '{entry_repository_type}' already exists: {cls._registry[entry_repository_type]!r}"
)
# if is_default and None in cls._registry:
# raise RuntimeError(f"A default EntryRepository is already set. Default type is {cls._registry[None]!r}")
cls.type = entry_repository_type
cls._registry[entry_repository_type] = cls
if is_default or None not in cls._registry:
logger.info(
"Setting default EntryUnitOfWork type to '%s'", entry_repository_type
)
cls._registry[None] = entry_repository_type
@classmethod
def get_default_entry_repository_type(cls) -> typing.Optional[str]:
return cls._registry[None]
@classmethod
def create(
cls,
entry_repository_type: typing.Optional[str],
settings: typing.Dict,
resource_config: typing.Dict,
**kwargs,
):
print(f"_registry={cls._registry}")
if entry_repository_type is None:
entry_repository_type = cls._registry[None]
try:
uow_cls = cls._registry[entry_repository_type]
except KeyError as err:
raise errors.ConfigurationError(
f"Can't create an EntryUnitOfWork with type '{entry_repository_type}'"
) from err
print(f"kwargs = {kwargs}")
return uow_cls.from_dict(settings, resource_config, **kwargs)
@property
def entries(self) -> repository.EntryRepository:
return self.repo
class IndexUnitOfWork(UnitOfWork[index.Index]):
_registry = {}
type = None
def __init_subclass__(
cls, index_type: str, is_default: bool = False, **kwargs
) -> None:
super().__init_subclass__(**kwargs)
print(
f"""IndexUnitOfWork.__init_subclass__ called with:
index_type={index_type} and
is_default={is_default}"""
)
if index_type is None:
raise RuntimeError("Unallowed index_type: index_type = None")
if index_type in cls._registry:
raise RuntimeError(
f"An IndexUnitOfWork with type '{index_type}' already exists: {cls._registry[index_type]!r}"
)
# if is_default and None in cls._registry:
# raise RuntimeError(f"A default EntryRepository is already set. Default type is {cls._registry[None]!r}")
cls.type = index_type
cls._registry[index_type] = cls
if is_default or None not in cls._registry:
logger.info("Setting default IndexUnitOfWork type to '%s'", index_type)
cls._registry[None] = index_type
@classmethod
def get_default_index_type(cls) -> typing.Optional[str]:
return cls._registry[None]
@classmethod
def create(
cls, index_type: typing.Optional[str], **kwargs
): # , settings: typing.Dict, **kwargs):
print(f"_registry={cls._registry}")
if index_type is None:
index_type = cls._registry[None]
try:
uow_cls = cls._registry[index_type]
except KeyError as err:
raise errors.ConfigurationError(
f"Can't create an IndexUnitOfWork with type '{index_type}'"
) from err
print(f"kwargs = {kwargs}")
return uow_cls.from_dict(**kwargs)
class EntriesUnitOfWork:
def __init__(self, entry_uows=None):
self.entry_uows: typing.Dict[str, EntryUnitOfWork] = (
{key: uow for key, uow in entry_uows} if entry_uows else {}
)
def get(self, resource_id: str) -> EntryUnitOfWork:
return self.entry_uows[resource_id]
def get_uow(self, resource_id: str) -> EntryUnitOfWork:
return self.entry_uows[resource_id]
def set_uow(self, resource_id: str, uow: EntryUnitOfWork):
self.entry_uows[resource_id] = uow
@property
def repo(self):
return self
def collect_new_events(self) -> typing.Iterable:
for uow in self.entry_uows.values():
yield from uow.collect_new_events()
class EntryUowFactory(abc.ABC):
@abc.abstractmethod
def create(
self,
resource_id: str,
resource_config: typing.Dict,
entry_repository_settings: typing.Optional[typing.Dict],
) -> EntryUnitOfWork:
raise NotImplementedError
class DefaultEntryUowFactory(EntryUowFactory):
def create(
self,
resource_id: str,
resource_config: typing.Dict,
entry_repository_settings: typing.Optional[typing.Dict],
) -> EntryUnitOfWork:
entry_repository_type = resource_config["entry_repository_type"]
if not entry_repository_settings:
entry_repository_settings = (
repository.EntryRepository.create_repository_settings(
resource_id=resource_id,
repository_type=entry_repository_type,
resource_config=resource_config,
)
)
# entry_repository = repository.EntryRepository.create(
# entry_repository_type, settings=entry_repository_settings
# )
return EntryUnitOfWork.create(
entry_repository_type=entry_repository_type,
settings=entry_repository_settings,
resource_config=resource_config,
)
# return create_entry_unit_of_work(entry_repository)
@singledispatch
def create_entry_unit_of_work(repo) -> EntryUnitOfWork:
raise NotImplementedError(f"Can't handle repository '{repo!r}'")
| [
"logging.getLogger",
"karp.domain.repository.EntryRepository.create_repository_settings",
"karp.domain.errors.ConfigurationError",
"typing.TypeVar"
] | [((174, 263), 'typing.TypeVar', 'typing.TypeVar', (['"""RepositoryType"""', 'repository.Repository', 'index.Index', 'network.Network'], {}), "('RepositoryType', repository.Repository, index.Index,\n network.Network)\n", (188, 263), False, 'import typing\n'), ((277, 302), 'logging.getLogger', 'logging.getLogger', (['"""karp"""'], {}), "('karp')\n", (294, 302), False, 'import logging\n'), ((6567, 6727), 'karp.domain.repository.EntryRepository.create_repository_settings', 'repository.EntryRepository.create_repository_settings', ([], {'resource_id': 'resource_id', 'repository_type': 'entry_repository_type', 'resource_config': 'resource_config'}), '(resource_id=\n resource_id, repository_type=entry_repository_type, resource_config=\n resource_config)\n', (6620, 6727), False, 'from karp.domain import errors, index, network, repository\n'), ((3007, 3109), 'karp.domain.errors.ConfigurationError', 'errors.ConfigurationError', (['f"""Can\'t create an EntryUnitOfWork with type \'{entry_repository_type}\'"""'], {}), '(\n f"Can\'t create an EntryUnitOfWork with type \'{entry_repository_type}\'")\n', (3032, 3109), False, 'from karp.domain import errors, index, network, repository\n'), ((4950, 5041), 'karp.domain.errors.ConfigurationError', 'errors.ConfigurationError', (['f"""Can\'t create an IndexUnitOfWork with type \'{index_type}\'"""'], {}), '(\n f"Can\'t create an IndexUnitOfWork with type \'{index_type}\'")\n', (4975, 5041), False, 'from karp.domain import errors, index, network, repository\n')] |
from __future__ import annotations
import os
import glob
import pathlib
from typing import Dict, Iterable, List, Union, TypeVar
# import itertools
from functools import wraps
# import inspect
from parse import parse as parse_
from pformat import *
from .util import *
Ps = TypeVar('Ps', bound='Paths')
P = TypeVar('P', bound='Path')
_TREE_DEF_TYPE = Dict[str, Union[str, Dict]]
def tree(root: Union[str, _TREE_DEF_TYPE]=None, paths: Union[str, _TREE_DEF_TYPE]=None, data: Dict=None):
'''Build paths from a directory spec.
Arguments:
root (str): the root directory.
paths (dict): the directory structure.
Returns:
The initialized Paths object
.. code-block:: python
import pathtrees
# define the file structure
path = pathtrees.tree('{project}', {
'data': {
'{sensor_id}': {
'': 'sensor',
'audio': { '{file_id:04d}.flac': 'audio' },
'spl': { 'spl_{file_id:04d}.csv': 'spl' },
'embeddings': { 'emb_{file_id:04d}.csv': 'embeddings' },
},
},
})
.. note::
use empty strings to reference the directory. This works because
``os.path.join(path, '') == path``
'''
if root is not None and not isinstance(root, (str, os.PathLike)):
root, paths = paths, root
paths = paths or {}
if isinstance(paths, Paths):
return paths.rjoinpath(root) if root else paths
root = root or '.'
if isinstance(paths, (list, tuple, set)):
paths = {k: k for k in paths}
# if isinstance():
# pass
return Paths(
{v: Path(*k) for k, v in _get_keys({str(root or ''): paths})},
data or {})
def _get_keys(data: _TREE_DEF_TYPE, keys: tuple=None, iters_as_keys: bool=False):
'''Recursively traverse a nested dict and return the trail of keys, and the final value'''
keys = tuple(keys or ())
for key, value in data.items():
ks = keys + (key,)
if isinstance(value, dict):
for ksi, val in _get_keys(value, ks, iters_as_keys):
yield ksi, val
elif iters_as_keys and isinstance(value, (tuple, list, set)):
for val in value:
yield ks, val
else:
yield ks, value
class Underspecified(KeyError):
'''Raised when you try to format a Path without enough data.
It's basically a KeyError with more information.
'''
class Paths:
'''A hierarchy of paths in your project.
You can arbitrarily nest them and it will join all of the keys
leading down to that path. The value is the name that you
can refer to it by.
.. code-block:: python
# define your file structure.
# a common ML experiment structure (for me anyways)
paths = Paths.define('./logs', {
'{log_id}': {
'model.h5': 'model',
'model_spec.pkl': 'model_spec',
'plots': {
'epoch_{step_name}': {
'{plot_name}.png': 'plot',
'': 'plot_dir'
}
},
# a path join hack that gives you: log_dir > ./logs/{log_id}
'', 'log_dir',
}
})
paths.update(log_id='test1', step_name='epoch_100')
# get paths by name
paths.model # logs/test1/model.h5
paths.model_spec # logs/test1/model_spec.pkl
paths.plot # logs/test1/plots/{step_name}/{plot_name}.png
# for example, a keras callback that saves a matplotlib plot every epoch
class MyCallback(Callback):
def on_epoch_end(self, epoch, logs):
# creates a copy of the path tree that has step_name=epoch
epoch_paths = paths.specify(step_name=epoch)
...
# save one plot
plt.imsave(epoch_paths.plot.specify(plot_name='confusion_matrix'))
...
# save another plot
plt.imsave(epoch_paths.plot.specify(plot_name='auc'))
# you can glob over any missing data (e.g. step_name => '*')
# equivalent to: glob("logs/test1/plots/{step_name}/auc.png")
for path in paths.plot.specify(plot_name='auc').glob():
print(path)
'''
def __init__(self, paths: Dict[str, 'Path'], data: dict=None):
self.paths = paths
for path in paths.values():
path._tree = self
self.data = {} if data is None else data
def __repr__(self) -> str:
return '<Paths data={} \n{}\n>'.format(self.data, '\n'.join([
'\t{} : {}'.format(name, p.partial_format())
for name, p in self.paths.items()
]))
@classmethod
def define(cls, paths: _TREE_DEF_TYPE=None, root: P=None, data: dict=None) -> 'Paths':
return tree(paths, root, data)
define.__doc__ = tree.__doc__
def __contains__(self, name: str) -> bool:
'''Check if a label is in the tree.'''
return name in self.paths
def __iter__(self) -> Iterable['Path']:
'''Iterate over paths in the tree.'''
return iter(self.paths.values())
# def __call__(self, **kw):
# return self.specify(**kw)
def keys(self) -> Iterable[str]:
'''Iterate over path names in the tree.'''
return self.paths.keys()
def __getattr__(self, name) -> 'Path':
'''Get a path by name.'''
if name in self.paths:
return self.paths[name]
raise AttributeError(name)
def __getitem__(self, name) -> 'Path':
'''Get a path by name.'''
if isinstance(name, tuple):
return Paths({n: self._paths[n] for n in name}, data=dict(self.data))
return self.paths[name]
def add(self: Ps, root=None, paths=None) -> Ps:
'''Build paths from a directory spec.
Arguments:
root (str): the root directory.
paths (dict): the directory structure.
Returns:
The initialized Paths object
'''
paths = tree(root, paths)
children = {k: p.copy for k, p in paths.paths.items()}
self.paths.update(**children)
for path in children.values():
path._tree = self
return self
def rjoinpath(self: Ps, path) -> Ps:
"""Give these paths a new root! Basically doing root / path for all paths in this tree.
This is useful if you want to nest a folder inside another.py
"""
return Paths({name: p.rjoinpath(path) for name, p in self.paths.items()}, dict(self.data))
def relative_to(self: Ps, path) -> Ps:
"""Make these paths relative to another path! Basically doing path.relative_to(root) for all paths in this tree.
Use this with ``with_root`` to change the root directory of the paths.
"""
return Paths({name: p.relative_to(path) for name, p in self.paths.items()}, dict(self.data))
def parse(self, path, name) -> dict:
'''Parse data from a formatted string (reverse of string format)
Arguments:
path (str): the string to parse
name (str): the name of the path pattern to use.
'''
return self[name].parse(path)
def translate(self, file, name, to, **kw) -> 'Path':
''''''
return self.paths[to].specify(**self.paths[name].parse(file, **kw))
@property
def copy(self: Ps) -> Ps:
"""Create a copy of a path tree and its paths."""
return Paths({name: path.copy for name, path in self.paths.items()}, dict(self.data))
def update(self: Ps, **kw) -> Ps:
'''Update specified data in place.
.. code-block:: python
paths = pathtrees.tree({'{a}': aaa})
assert not paths.fully_specified
paths.update(a=5)
assert paths.fully_specified
assert paths.data['a'] == 5
'''
self.data.update(kw)
return self
def specify(self: Ps, **kw) -> Ps:
'''Creates a copy of the path tree then updates the copy's data.
.. code-block:: python
paths = pathtrees.tree({'{a}': aaa})
paths2 = paths.specify(a=5)
assert not paths.fully_specified
assert paths2.fully_specified
assert 'a' not in paths.data
assert paths2.data['a'] == 5
Equivalent to:
.. code-block:: python
paths.copy.update(**kw)
'''
return self.copy.update(**kw)
def unspecify(self: Ps, *keys, inplace=False, children=True) -> Ps:
'''Remove keys from paths dictionary.
.. code-block:: python
paths = pathtrees.tree({'{a}': aaa})
paths.update(a=5)
assert paths.fully_specified
assert paths.data['a'] == 5
paths.unspecify('a')
assert not paths.fully_specified
assert 'a' not in paths.data
'''
p = self if inplace else self.copy
for key in keys:
p.data.pop(key, None)
if children:
for p in self:
p.unspecify(*keys, parent=False)
return p
@property
def fully_specified(self) -> bool:
'''Are all paths fully specified?
.. code-block:: python
paths = pathtrees.tree({'{a}': aaa})
assert not paths.fully_specified
paths.update(a=5)
assert paths.fully_specified
'''
return all(p.fully_specified for p in self.paths.values())
def format(self, **kw) -> Dict[str, str]:
'''Try to format all paths as strings. Raises Underspecified if data is missing.
Arguments:
**kw: additional data specified for formatting.
Returns:
dict: key is the name of the path, and the value is the formatted ``pathlib.Path``.
'''
return {name: self[name]._format(**kw) for name in self.paths}
def maybe_format(self, **kw) -> Dict[str, Union[str, 'Path']]:
'''Return a dictionary where all fully specified paths are converted to strings
and underspecified strings are left as Path objects.
Arguments:
**kw: additional data specified for formatting.
'''
return {name: self[name].maybe_format(**kw) for name in self.paths}
def partial_format(self, **kw) -> Dict[str, str]:
'''Return a dictionary where all paths are converted to strings
and underspecified fields are left in for later formatting.
Arguments:
**kw: additional data specified for formatting.
'''
return {name: self[name]._partial_format(**kw) for name in self.paths}
BuiltinPath = type(pathlib.Path())
class Path(BuiltinPath):
'''Represents a ``pathlib.Path`` with placeholders for bits of data.
It uses python string formatting to let you fill in the
missing bits at a later date.
.. code-block::
path = pathtrees.Path('projects/{name}/images/frame_{frame_id:04d}.jpg')
path.update(name='my_project')
# loop over all frames
for f in path.glob():
# print out some info about each frame
data = path.parse(f)
print("frame ID:", data['frame_id'])
print("path:", f)
... # do something - load an image idk
There are quite a few methods that had to be wrapped from the original path
object so that if we manipulate the path in any way that it can copy the extra
attributes needed to manage the data.
'''
__slots__ = ['data', '_tree'] #, '_tree_root'
def __new__(cls, *args, data: dict=None, tree: Paths=None): # , root=None
p = super().__new__(cls, *args)
p.data = {} if data is None else data
p._tree = tree
# p._tree_root = root
return p
# str representations
def _add_extra_parts(self, p: 'Path', copy_data: bool=False) -> 'Path':
p.data = dict(self.data) if copy_data else self.data
p._tree = self._tree
# p._tree_root = self._tree_root
return p
def __repr__(self) -> str:
return 'Path({}, data={}, parent_data={})'.format(super().__str__(), self.data, self._tree.data if self._tree else None)
def __fspath__(self) -> str:
return self._format()
def __str__(self) -> str:
return self._partial_format()
def __hash__(self):
return hash(self._partial_format())
def __call__(self, **kw) -> BuiltinPath:
return self.format(**kw)
def __eq__(self, __o) -> bool:
if isinstance(__o, str):
return self._partial_format() == __o
return super().__eq__(__o)
@property
def raw(self) -> str:
return super().__str__()
def rjoinpath(self, root: BuiltinPath) -> 'Path':
'''Return an absolute form of the path.
TODO: is there a better way?
'''
return Path(root / self, data=self.data, tree=self._tree)
# data management
@property
def copy(self: P) -> P:
'''Creates a copy of the path object so that data can be altered without affecting
the original object.'''
return self._add_extra_parts(
self._from_parsed_parts(self._drv, self._root, self._parts),
copy_data=True)
def update(self: P, **kw) -> P:
'''Update specified data in place'''
self.data.update(kw)
return self
def specify(self: P, **kw) -> P:
'''Update specified data and return a new object.'''
return self.copy.update(**kw)
def unspecify(self: P, *keys, inplace: bool=True, parent: bool=True) -> P:
'''Remove keys from path dictionary'''
p = self if inplace else self.copy
if parent and p._tree:
p._tree = p._tree.unspecify(*keys, children=False)
for key in keys:
p.data.pop(key, None)
return p
@property
def fully_specified(self) -> bool:
'''Check if the path is fully specified (if True, it can be
formatted without raising an Underspecified error.).'''
try:
self._format()
return True
except KeyError:
return False
# formatting
def _get_data(self, **kw) -> dict:
return {**(self._tree.data if self._tree else {}), **self.data, **kw}
def get(self, key, default=None):
if key in self.data:
return self.data[key]
if self._tree:
return self._tree.data.get(key, default)
return default
def _format(self, **kw) -> str:
return fformat(super().__str__(), **self._get_data(**kw))
def _partial_format(self, **kw) -> str:
return pformat(super().__str__(), **self._get_data(**kw))
def _glob_format(self, **kw) -> str:
return gformat(super().__str__(), **self._get_data(**kw))
def _format_path(self, **kw) -> BuiltinPath:
return BuiltinPath(self._format(**kw))
def _partial_format_path(self: P, **kw) -> P:
return Path(self._partial_format(**kw), data=self.data, tree=self._tree)
def _glob_format_path(self, **kw) -> BuiltinPath:
return BuiltinPath(self._glob_format(**kw))
def format(self, **kw) -> str:
'''Insert data into the path string. (Works like string format.)
Raises:
KeyError if the format string is underspecified.
'''
return self._format(**kw)
def partial_format(self, **kw) -> str:
'''Format a field, leaving all unspecified fields to be filled later.'''
return self._partial_format(**kw)
def glob_format(self, **kw) -> str:
'''Format a field, setting all unspecified fields as a wildcard (asterisk).'''
return self._glob_format(**kw)
def format_path(self, **kw) -> BuiltinPath:
'''Insert data into the path string. (Works like string format.)
Raises:
KeyError if the format string is underspecified.
'''
return self._format_path(**kw)
def partial_format_path(self: P, **kw) -> P:
'''Format a field, setting all unspecified fields as a wildcard (asterisk).'''
return self._partial_format_path(**kw)
def glob_format_path(self, **kw) -> BuiltinPath:
'''Format a field, setting all unspecified fields as a wildcard (asterisk).'''
return self._glob_format_path(**kw)
def maybe_format(self: p, **kw) -> Union[str, P]:
'''Try to format a field. If it fails, return as a Path object.'''
p = self.specify(**kw) if kw else self
try:
return p.format()
except KeyError:
return p
# glob
def glob(self, *fs) -> List[str]:
'''Glob over all unspecified variables.
Arguments:
*path (str): additional paths to join. e.g. for a directory
you can use ``"*.txt"`` to get all .txt files.
Returns:
list: The paths matching the glob pattern.
'''
return glob.glob(os.path.join(self._glob_format(), *fs))
def iglob(self, *fs) -> Iterable[str]:
'''Iterable glob over all unspecified variables. See :func:`glob` for signature.'''
return glob.iglob(os.path.join(self._glob_format(), *fs))
def rglob(self, *fs) -> List[str]:
'''Recursive glob over all unspecified variables. See :func:`glob` for signature.'''
return glob.glob(os.path.join(self._glob_format(), *fs), recursive=True)
def irglob(self, *fs) -> Iterable[str]:
'''Iterable, recursive glob over all unspecified variables. See :func:`glob` for signature.'''
return glob.iglob(os.path.join(self._glob_format(), *fs), recursive=True)
def parse(self, path: str, use_data: bool=True) -> dict:
'''Extract variables from a compiled path.
See ``parse`` to understand the amazing witchery that
makes this possible!
https://pypi.org/project/parse/
Arguments:
path (str): The path containing data to parse.
use_data (bool): Should we fill in the data we already
have before parsing? This means fewer variables that
need to be parsed. Set False if you do not wish to use
the data.
'''
path = str(path)
pattern = self._partial_format() if use_data else super().__str__()
r = parse_(pattern, path)
if r is None:
raise ValueError('''Could not parse path using pattern.
path: {}
pattern: {}
`path.parse(path)` will call self.partial_format() by default before parsing
so any specified keys will be fixed. This is helpful to dodge ambiguous parsing
cases. To disable this pass `use_data=False` to parse.'''.format(path, pattern))
return self._get_data(**r.named)
def translate(self: P, path: str, to: str, **kw) -> P:
'''Translate the paths to another pattern'''
return self._tree[to].specify(**self.parse(path, **kw))
# fixes
@property
def parents(self) -> _PathParents:
return _PathParents(self)
def fformat(x: str, **kw) -> str:
try:
return x.format(**kw)
except KeyError as e:
raise Underspecified(
f'Path "{pformat(x, **kw)}" is missing a value for "{str(e)[1:-1]}" in data {set(kw)}.')
# hot fix for copying over data
def _fix_parts(func):
@wraps(func)
def inner(self, *a, **kw):
return self._add_extra_parts(func(self, *a, **kw))
return inner
_uses_parsed_parts = ['_make_child', '_make_child_relpath', 'with_name', 'with_suffix', 'relative_to']
_uses_parts = ['__rtruediv__', 'absolute', 'resolve', 'readlink', 'expanduser']
for _method in _uses_parsed_parts + _uses_parts:
try:
setattr(Path, _method, _fix_parts(getattr(BuiltinPath, _method)))
except AttributeError:
pass
Path.parent = property(_fix_parts(BuiltinPath.parent.fget))
class _PathParents(pathlib._PathParents):
__slots__ = ['data', 'parent']
def __init__(self, path):
super().__init__(path)
self.data = path.data
self.parent = path.parents
_add_extra_parts = Path._add_extra_parts
def __getitem__(self, i):
x = super().__getitem__(i)
return x if isinstance(i, slice) else self._add_extra_parts(x)
if __name__ == '__main__':
def main():
p = Path("path/{hello}/{hi}.txt")
import fire
fire.Fire(main) | [
"parse.parse",
"fire.Fire",
"pathlib.Path",
"functools.wraps",
"typing.TypeVar"
] | [((274, 302), 'typing.TypeVar', 'TypeVar', (['"""Ps"""'], {'bound': '"""Paths"""'}), "('Ps', bound='Paths')\n", (281, 302), False, 'from typing import Dict, Iterable, List, Union, TypeVar\n'), ((307, 333), 'typing.TypeVar', 'TypeVar', (['"""P"""'], {'bound': '"""Path"""'}), "('P', bound='Path')\n", (314, 333), False, 'from typing import Dict, Iterable, List, Union, TypeVar\n'), ((10867, 10881), 'pathlib.Path', 'pathlib.Path', ([], {}), '()\n', (10879, 10881), False, 'import pathlib\n'), ((19566, 19577), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (19571, 19577), False, 'from functools import wraps\n'), ((20597, 20612), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (20606, 20612), False, 'import fire\n'), ((18573, 18594), 'parse.parse', 'parse_', (['pattern', 'path'], {}), '(pattern, path)\n', (18579, 18594), True, 'from parse import parse as parse_\n')] |
from datamart.upload.store import Datamart_dataset
# this sample will save the following online csv datasets into datamart in blaze graph
a = Datamart_dataset()
all_dir = ["https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/List_of_United_States_counties_by_per_capita_income.csv",
"https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/Most-Recent-Cohorts-Scorecard-Elements.csv",
"https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/Unemployment.csv",
"https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/educate.csv",
"https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/population.csv",
"https://raw.githubusercontent.com/usc-isi-i2/datamart-userend/master/example_datasets/poverty.csv"
]
for input_dir in all_dir:
df,meta=a.load_and_preprocess(input_dir=input_dir,file_type="online_csv")
# there should only have one table extracted from one online csv address
a.model_data(df, meta, 0)
a.upload()
all_dir_wikipedia_test = ["https://en.wikipedia.org/wiki/1962_Washington_Senators_season", "https://en.wikipedia.org/wiki/2017%E2%80%9318_New_Orleans_Privateers_women%27s_basketball_team", "https://en.wikipedia.org/wiki/List_of_Asian_Games_medalists_in_cue_sports"]
for input_dir in all_dir_wikipedia_test:
df,meta=a.load_and_preprocess(input_dir=input_dir,file_type="wikitable")
for i in range(len(df)):
a.model_data(df, meta, i)
a.upload()
# input_dir = "/Users/minazuki/Downloads/usda/population_new.csv"
# df,meta=a.load_and_preprocess(input_dir)
# a.model_data(df, meta)
# a.output_to_ttl("2")
# input_dir = "/Users/minazuki/Downloads/usda/poverty_new.csv"
# df,meta=a.load_and_preprocess(input_dir)
# a.model_data(df, meta)
# a.output_to_ttl("3")
# input_dir = "/Users/minazuki/Downloads/usda/Unemployment_new.csv"
# df,meta=a.load_and_preprocess(input_dir)
# a.model_data(df, meta)
# a.output_to_ttl("4")
| [
"datamart.upload.store.Datamart_dataset"
] | [((142, 160), 'datamart.upload.store.Datamart_dataset', 'Datamart_dataset', ([], {}), '()\n', (158, 160), False, 'from datamart.upload.store import Datamart_dataset\n')] |
# Refer to pytorch/examples/VAE
from model.abstract_VAE import VAE
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.stats import norm
class StableBCELoss(nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.sum()
# All MLP!
class NaiveVAE(VAE):
def __init__(self, input_dims, code_dims,
hidden=400, activacation="lrelu",
decoder="Bernoulli"):
super(NaiveVAE, self).__init__(input_dims,
code_dims)
self.name = "NaiveVAE"
self.nx = int(np.prod(input_dims))
self.nz = int(np.prod(code_dims))
if activacation == "lrelu":
self.act = nn.LeakyReLU()
else:
self.act = nn.ReLU()
if decoder == "Bernoulli":
self.reconstruct_loss = StableBCELoss()
else:
self.reconstruct_loss = nn.MSELoss()
# encoding part
self.fc1 = nn.Linear(self.nx, hidden)
# mu and sigma
self.fc21 = nn.Linear(hidden, self.nz)
self.fc22 = nn.Linear(hidden, self.nz)
# decoding part
self.fc3 = nn.Linear(self.nz, hidden)
self.fc4 = nn.Linear(hidden, self.nx)
def encode(self, x):
x = x.view(x.size(0), -1)
h1 = self.act(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if isinstance(mu, torch.cuda.FloatTensor):
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
h3 = self.act(self.fc3(z))
return self.fc4(h3)
def forward(self, x):
mu, logvar = self.encode(x.view(x.size(0), -1))
z = self.reparametrize(mu, logvar)
return self.decode(z), mu, logvar, z
def loss(self, recon_x, x, mu, logvar, z):
x = x.view(x.size(0), -1)
BCE = self.reconstruct_loss(recon_x, x) / x.size(0)
# see Appendix B from VAE paper:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5) / x.size(0)
return BCE + KLD, BCE, KLD
def mutual_info_q(self, x):
mu, logvar = self.encode(x.view(x.size(0), -1))
z = self.reparametrize(mu, logvar)
l = z.size(0)
z = z.repeat(l, 1, 1)
mu = mu.unsqueeze(2).repeat(1,1,l).transpose(1,2)
logvar = logvar.unsqueeze(2).repeat(1,1,l).transpose(1,2)
p_matrix = ( - torch.sum((z - mu) ** 2 / logvar.exp(), dim=2) / 2.0 - 0.5 * torch.sum(logvar, dim=2)).exp_()
p_split_matrix = (- (z - mu) ** 2 / logvar.exp() / 2.0 - 0.5 * logvar ).exp_()
p_split_vector = torch.sum(p_split_matrix, dim=1)
p_vector = torch.sum(p_matrix, dim=1)
I = torch.FloatTensor([np.log(l)])
I_split = torch.FloatTensor([np.log(l)] * int(z.size(2)))
for i in range(l):
I += (p_matrix[i][i].log() - p_vector[i].log()).data / l
I_split += (p_split_matrix[i][i].log() - p_split_vector[i].log()).data / l
# q(z_i) is not independent..
# assert np.allclose(I.numpy(), np.sum(I_split.numpy()))
return I, I_split
# more flexiable VAE
class BetaVAE(NaiveVAE):
def __init__(self, input_dims, code_dims, layers=[2, 2], beta=1.0,
hidden=400, activacation="lrelu",
decoder="Bernoulli"):
super(BetaVAE, self).__init__(input_dims, code_dims,
hidden=400,
activacation="lrelu",
decoder="Bernoulli")
self.beta = beta
self.encode_layers = nn.ModuleList([self.fc1])
for i in range(layers[0]-2):
l = nn.Linear(hidden, hidden)
self.encode_layers.append(l)
self.decode_layers = nn.ModuleList([self.fc3])
for i in range(layers[0]-2):
l = nn.Linear(hidden, hidden)
self.decode_layers.append(l)
def encode(self, x):
h = x.view(x.size(0), -1)
for fc in self.encode_layers:
h = self.act(fc(h))
return self.fc21(h), self.fc22(h)
def decode(self, z):
h = z
for fc in self.decode_layers:
h = self.act(fc(z))
return self.fc4(h)
def loss(self, recon_x, x, mu, logvar, z):
x = x.view(x.size(0), -1)
BCE = self.reconstruct_loss(recon_x, x) / x.size(0)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5) / x.size(0)
return BCE + self.beta * KLD, BCE, KLD
class MMDVAE(BetaVAE):
def compute_kernel(self, x, y):
x_size = x.size(0)
y_size = y.size(0)
dim = x.size(1)
tiled_x = x.unsqueeze(1).repeat(1, y_size, 1)
tiled_y = y.unsqueeze(0).repeat(x_size, 1, 1)
return ((-(tiled_x - tiled_y) ** 2).mean(dim=2) / float(dim)).exp_()
def compute_mmd(self, x, y, sigma_sqr=1.0):
x_kernel = self.compute_kernel(x, x)
y_kernel = self.compute_kernel(y, y)
xy_kernel = self.compute_kernel(x, y)
return torch.mean(x_kernel) + torch.mean(y_kernel) - 2 * torch.mean(xy_kernel)
def loss(self, recon_x, x, mu, logvar, z):
x = x.view(x.size(0), -1)
BCE = self.reconstruct_loss(recon_x, x) / x.size(0)
true_samples = Variable(torch.FloatTensor(x.size(0), self.nz).normal_())
MMD = self.compute_mmd(true_samples, z)
return BCE + self.beta * MMD , BCE, MMD
| [
"numpy.prod",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"torch.mean",
"numpy.log",
"torch.nn.MSELoss",
"torch.sum",
"torch.nn.Linear",
"torch.autograd.Variable"
] | [((1219, 1245), 'torch.nn.Linear', 'nn.Linear', (['self.nx', 'hidden'], {}), '(self.nx, hidden)\n', (1228, 1245), True, 'import torch.nn as nn\n'), ((1289, 1315), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'self.nz'], {}), '(hidden, self.nz)\n', (1298, 1315), True, 'import torch.nn as nn\n'), ((1336, 1362), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'self.nz'], {}), '(hidden, self.nz)\n', (1345, 1362), True, 'import torch.nn as nn\n'), ((1407, 1433), 'torch.nn.Linear', 'nn.Linear', (['self.nz', 'hidden'], {}), '(self.nz, hidden)\n', (1416, 1433), True, 'import torch.nn as nn\n'), ((1453, 1479), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'self.nx'], {}), '(hidden, self.nx)\n', (1462, 1479), True, 'import torch.nn as nn\n'), ((1898, 1911), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (1906, 1911), False, 'from torch.autograd import Variable\n'), ((3272, 3304), 'torch.sum', 'torch.sum', (['p_split_matrix'], {'dim': '(1)'}), '(p_split_matrix, dim=1)\n', (3281, 3304), False, 'import torch\n'), ((3325, 3351), 'torch.sum', 'torch.sum', (['p_matrix'], {'dim': '(1)'}), '(p_matrix, dim=1)\n', (3334, 3351), False, 'import torch\n'), ((4269, 4294), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.fc1]'], {}), '([self.fc1])\n', (4282, 4294), True, 'import torch.nn as nn\n'), ((4444, 4469), 'torch.nn.ModuleList', 'nn.ModuleList', (['[self.fc3]'], {}), '([self.fc3])\n', (4457, 4469), True, 'import torch.nn as nn\n'), ((824, 843), 'numpy.prod', 'np.prod', (['input_dims'], {}), '(input_dims)\n', (831, 843), True, 'import numpy as np\n'), ((867, 885), 'numpy.prod', 'np.prod', (['code_dims'], {}), '(code_dims)\n', (874, 885), True, 'import numpy as np\n'), ((955, 969), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (967, 969), True, 'import torch.nn as nn\n'), ((1007, 1016), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1014, 1016), True, 'import torch.nn as nn\n'), ((1163, 1175), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1173, 1175), True, 'import torch.nn as nn\n'), ((4348, 4373), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (4357, 4373), True, 'import torch.nn as nn\n'), ((4523, 4548), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'hidden'], {}), '(hidden, hidden)\n', (4532, 4548), True, 'import torch.nn as nn\n'), ((3383, 3392), 'numpy.log', 'np.log', (['l'], {}), '(l)\n', (3389, 3392), True, 'import numpy as np\n'), ((5766, 5786), 'torch.mean', 'torch.mean', (['x_kernel'], {}), '(x_kernel)\n', (5776, 5786), False, 'import torch\n'), ((5789, 5809), 'torch.mean', 'torch.mean', (['y_kernel'], {}), '(y_kernel)\n', (5799, 5809), False, 'import torch\n'), ((5816, 5837), 'torch.mean', 'torch.mean', (['xy_kernel'], {}), '(xy_kernel)\n', (5826, 5837), False, 'import torch\n'), ((2651, 2673), 'torch.sum', 'torch.sum', (['KLD_element'], {}), '(KLD_element)\n', (2660, 2673), False, 'import torch\n'), ((3432, 3441), 'numpy.log', 'np.log', (['l'], {}), '(l)\n', (3438, 3441), True, 'import numpy as np\n'), ((5146, 5168), 'torch.sum', 'torch.sum', (['KLD_element'], {}), '(KLD_element)\n', (5155, 5168), False, 'import torch\n'), ((3126, 3150), 'torch.sum', 'torch.sum', (['logvar'], {'dim': '(2)'}), '(logvar, dim=2)\n', (3135, 3150), False, 'import torch\n')] |
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.orm import backref, relationship
from datetime import datetime
from database import Base
class Users(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(250), nullable=False, unique=True)
given_name = Column(String(250))
family_name = Column(String(250))
role = Column(Integer)
#catalogs = relationship("catalog", backref="user")
def __repr__(self):
return '["{}","{}","{}","{}","{}"]'.format(self.id,self.email,self.given_name,self.family_name,self.role)
class Catalog(Base):
__tablename__ = 'catalog'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
status = Column(String(250), default="Draft")
status2 = Column(String(250))
title = Column(String(250))
number = Column(String(250))
created = Column(DateTime, default=datetime.now())
updated = Column(DateTime, default=datetime.now())
collection = Column(Boolean)
publish_r = Column(Boolean, default=False)
publish_a = Column(Boolean, default=False)
published = Column(Boolean, default=False)
archive = Column(Boolean, default=False)
def __repr__(self):
return '["{}","{}","{}","{}","{}","{}","{}"."{}","{}","{}","{}","{}","{}"]'.format(self.id,self.user_id,self.status,self.status2, self.title, self.number,
self.created, self.updated, self.collection, self.publish_r, self.publish_a, self.published, self.archive)
class Meta(Base):
__tablename__ = 'meta'
id = Column(Integer,primary_key=True)
def __repr__(self):
return '["{}"]'.format(self.id)
class Apikeys(Base):
__tablename__ = 'apikeys'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
api_url = Column(String(250))
api_key = Column(String(250))
def __repr__(self):
return '["{}","{}","{}","{}"]'.format(self.id,self.user_id,self.api_url,self.api_key)
| [
"sqlalchemy.String",
"datetime.datetime.now",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((294, 327), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (300, 327), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((475, 490), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (481, 490), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((749, 782), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (755, 782), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1113, 1128), 'sqlalchemy.Column', 'Column', (['Boolean'], {}), '(Boolean)\n', (1119, 1128), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1145, 1175), 'sqlalchemy.Column', 'Column', (['Boolean'], {'default': '(False)'}), '(Boolean, default=False)\n', (1151, 1175), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1192, 1222), 'sqlalchemy.Column', 'Column', (['Boolean'], {'default': '(False)'}), '(Boolean, default=False)\n', (1198, 1222), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1239, 1269), 'sqlalchemy.Column', 'Column', (['Boolean'], {'default': '(False)'}), '(Boolean, default=False)\n', (1245, 1269), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1284, 1314), 'sqlalchemy.Column', 'Column', (['Boolean'], {'default': '(False)'}), '(Boolean, default=False)\n', (1290, 1314), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1676, 1709), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1682, 1709), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1839, 1872), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1845, 1872), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((347, 358), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (353, 358), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((413, 424), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (419, 424), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((451, 462), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (457, 462), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((813, 835), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (823, 835), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((857, 868), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (863, 868), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((908, 919), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (914, 919), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((940, 951), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (946, 951), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((973, 984), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (979, 984), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1903, 1925), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (1913, 1925), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1948, 1959), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (1954, 1959), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1982, 1993), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (1988, 1993), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, func, Boolean\n'), ((1025, 1039), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1037, 1039), False, 'from datetime import datetime\n'), ((1080, 1094), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1092, 1094), False, 'from datetime import datetime\n')] |
import fnmatch
import re
import os
import mkdocs
import mkdocs.plugins
import mkdocs.structure.files
class Include(mkdocs.plugins.BasePlugin):
"""A mkdocs plugin that adds all matching files from the input list."""
config_scheme = (
('ext', mkdocs.config.config_options.Type((str, list), default=[
".md", ".markdown", ".mdown", ".mkdn", ".mkd", ".css",
".js", ".javascript", ".html", ".htm", ".xml", ".json",
".bmp", ".tif", ".tiff", ".gif", ".svg", ".jpeg",
".jpg", ".jif", ".jfif", ".jp2", ".jpx", ".j2k",
".j2c", ".fpx", ".pcd", ".png", ".pdf", "CNAME",
".snippet", ".pages"
])),
('glob', mkdocs.config.config_options.Type((str, list), default=None)),
('regex', mkdocs.config.config_options.Type((str, list), default=None)),
)
def on_files(self, files, config):
exts = self.config['ext'] or []
if not isinstance(exts, list):
exts = [exts]
globs = self.config['glob'] or []
if not isinstance(globs, list):
globs = [globs]
regexes = self.config['regex'] or []
if not isinstance(regexes, list):
regexes = [regexes]
out = []
def include(name):
if os.path.splitext(name)[1] in exts:
return True
for g in globs:
if fnmatch.fnmatchcase(name, g):
return True
for r in regexes:
if re.match(r, name):
return True
return False
for i in files:
name = i.src_path
if not include(name):
continue
# Windows reports filenames as eg. a\\b\\c instead of a/b/c.
# To make the same globs/regexes match filenames on Windows and
# other OSes, let's try matching against converted filenames.
# On the other hand, Unix actually allows filenames to contain
# literal \\ characters (although it is rare), so we won't
# always convert them. We only convert if os.sep reports
# something unusual. Conversely, some future mkdocs might
# report Windows filenames using / separators regardless of
# os.sep, so we *always* test with / above.
if os.sep != '/':
namefix = name.replace(os.sep, '/')
if not include(namefix):
continue
out.append(i)
return mkdocs.structure.files.Files(out)
| [
"fnmatch.fnmatchcase",
"os.path.splitext",
"re.match",
"mkdocs.structure.files.Files",
"mkdocs.config.config_options.Type"
] | [((2527, 2560), 'mkdocs.structure.files.Files', 'mkdocs.structure.files.Files', (['out'], {}), '(out)\n', (2555, 2560), False, 'import mkdocs\n'), ((260, 613), 'mkdocs.config.config_options.Type', 'mkdocs.config.config_options.Type', (['(str, list)'], {'default': "['.md', '.markdown', '.mdown', '.mkdn', '.mkd', '.css', '.js',\n '.javascript', '.html', '.htm', '.xml', '.json', '.bmp', '.tif',\n '.tiff', '.gif', '.svg', '.jpeg', '.jpg', '.jif', '.jfif', '.jp2',\n '.jpx', '.j2k', '.j2c', '.fpx', '.pcd', '.png', '.pdf', 'CNAME',\n '.snippet', '.pages']"}), "((str, list), default=['.md', '.markdown',\n '.mdown', '.mkdn', '.mkd', '.css', '.js', '.javascript', '.html',\n '.htm', '.xml', '.json', '.bmp', '.tif', '.tiff', '.gif', '.svg',\n '.jpeg', '.jpg', '.jif', '.jfif', '.jp2', '.jpx', '.j2k', '.j2c',\n '.fpx', '.pcd', '.png', '.pdf', 'CNAME', '.snippet', '.pages'])\n", (293, 613), False, 'import mkdocs\n'), ((699, 759), 'mkdocs.config.config_options.Type', 'mkdocs.config.config_options.Type', (['(str, list)'], {'default': 'None'}), '((str, list), default=None)\n', (732, 759), False, 'import mkdocs\n'), ((780, 840), 'mkdocs.config.config_options.Type', 'mkdocs.config.config_options.Type', (['(str, list)'], {'default': 'None'}), '((str, list), default=None)\n', (813, 840), False, 'import mkdocs\n'), ((1393, 1421), 'fnmatch.fnmatchcase', 'fnmatch.fnmatchcase', (['name', 'g'], {}), '(name, g)\n', (1412, 1421), False, 'import fnmatch\n'), ((1504, 1521), 're.match', 're.match', (['r', 'name'], {}), '(r, name)\n', (1512, 1521), False, 'import re\n'), ((1283, 1305), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (1299, 1305), False, 'import os\n')] |
import sys
arguments = sys.argv
test_case = open(arguments[1], "r").read()
team_output = open(arguments[2], "r").read()
if team_output != test_case:
sys.exit(43)
else:
sys.exit(42)
| [
"sys.exit"
] | [((156, 168), 'sys.exit', 'sys.exit', (['(43)'], {}), '(43)\n', (164, 168), False, 'import sys\n'), ((179, 191), 'sys.exit', 'sys.exit', (['(42)'], {}), '(42)\n', (187, 191), False, 'import sys\n')] |
from flask import Blueprint
blueprint = Blueprint(
"account", __name__, url_prefix="/account", static_folder="../static"
)
from app.account import views, admin
| [
"flask.Blueprint"
] | [((41, 126), 'flask.Blueprint', 'Blueprint', (['"""account"""', '__name__'], {'url_prefix': '"""/account"""', 'static_folder': '"""../static"""'}), "('account', __name__, url_prefix='/account', static_folder='../static'\n )\n", (50, 126), False, 'from flask import Blueprint\n')] |
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__version__ = "0.0.1"
# Imports
from snakemake.shell import shell
import tempfile
import os
# Shortcuts
opt = snakemake.params.get("opt", "")
threads = snakemake.threads if snakemake.threads >= 4 else 4
view_threads = sort_threads = threads//4
align_threads = threads - (view_threads + sort_threads)
outdir = os.path.dirname(os.path.abspath(snakemake.output.bam))
fastq = snakemake.input.fastq
ref = snakemake.input.ref
bam = snakemake.output.bam
# Run shell commands
shell("echo '#### NGMLR + SAMTOOLS VIEW & SORT LOG ####' > {snakemake.log}")
with tempfile.TemporaryDirectory(dir=outdir) as temp_dir:
shell("ngmlr -t {align_threads} {opt} -r {ref} -q {fastq} 2>> {snakemake.log}|\
samtools view -@ {view_threads} -bh 2>> {snakemake.log} |\
samtools sort -@ {sort_threads} -T {temp_dir} -O bam > {bam} 2>> {snakemake.log}")
shell("samtools index {bam}")
| [
"os.path.abspath",
"snakemake.shell.shell",
"tempfile.TemporaryDirectory"
] | [((576, 652), 'snakemake.shell.shell', 'shell', (['"""echo \'#### NGMLR + SAMTOOLS VIEW & SORT LOG ####\' > {snakemake.log}"""'], {}), '("echo \'#### NGMLR + SAMTOOLS VIEW & SORT LOG ####\' > {snakemake.log}")\n', (581, 652), False, 'from snakemake.shell import shell\n'), ((956, 985), 'snakemake.shell.shell', 'shell', (['"""samtools index {bam}"""'], {}), "('samtools index {bam}')\n", (961, 985), False, 'from snakemake.shell import shell\n'), ((432, 469), 'os.path.abspath', 'os.path.abspath', (['snakemake.output.bam'], {}), '(snakemake.output.bam)\n', (447, 469), False, 'import os\n'), ((659, 698), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'dir': 'outdir'}), '(dir=outdir)\n', (686, 698), False, 'import tempfile\n'), ((716, 960), 'snakemake.shell.shell', 'shell', (['"""ngmlr -t {align_threads} {opt} -r {ref} -q {fastq} 2>> {snakemake.log}| samtools view -@ {view_threads} -bh 2>> {snakemake.log} | samtools sort -@ {sort_threads} -T {temp_dir} -O bam > {bam} 2>> {snakemake.log}"""'], {}), "(\n 'ngmlr -t {align_threads} {opt} -r {ref} -q {fastq} 2>> {snakemake.log}| samtools view -@ {view_threads} -bh 2>> {snakemake.log} | samtools sort -@ {sort_threads} -T {temp_dir} -O bam > {bam} 2>> {snakemake.log}'\n )\n", (721, 960), False, 'from snakemake.shell import shell\n')] |
import numpy as np
from classification.classifier import Classifier
class NaiveBayes(Classifier):
def __init__(self, name='Naive Bayes'):
super().__init__(1,1,name,_type=3)
self.name = name
def _predict(self, x):
if len(self._labels) == 0:
print("It is necessary to train the classifier before using it to make predictions")
return
# it might look complex but it's just prior + posterior probabilities for each label
probs = np.array([np.log(self.P_C[i])+np.sum(np.log(self.gaussian(x,i))) for i in range(self.M)])
return self._labels[np.argmax(probs)]
def fit(self, X, Y, *args, **kwargs):
if not isinstance(Y, np.ndarray):
Y = np.array(Y)
self._labels = np.unique(Y)
self.N = X.shape[1]
self.M = len(self._labels)
self.mean = np.zeros(X.shape)
self.var = np.zeros(X.shape)
self.P_C = np.array([len(np.where(Y==i)[0])/float(len(Y)) for i in self._labels])
# for each class
for idx, cl in enumerate(self._labels):
indexes = np.where(Y==cl)[0]
self.mean[idx,: ] = np.mean(X[indexes], axis=0)
self.var[idx,:] = np.var(X[indexes], axis=0)
def gaussian(self, x, idx):
return np.exp(-((x-self.mean[idx])**2/(2*self.var[idx])))/np.sqrt(2*np.pi*self.var[idx])
def _save(self, file):
file.create_dataset('labels', self._labels.shape, self._labels.dtype, self._labels, compression="gzip")
file.create_dataset('mean', self.mean.shape, self.mean.dtype, self.mean, compression="gzip")
file.create_dataset('variance', self.var.shape, self.var.dtype, self.var, compression="gzip")
file.create_dataset('priors', self.P_C.shape, self.P_C.dtype, self.P_C, compression="gzip")
def _load(self, file):
self._labels = np.array(file['labels'])
self.mean = np.array(file['mean'])
self.var = np.array(file['variance'])
self.P_C = np.array(file['priors']) | [
"numpy.mean",
"numpy.sqrt",
"numpy.unique",
"numpy.where",
"numpy.log",
"numpy.argmax",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.var"
] | [((794, 806), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (803, 806), True, 'import numpy as np\n'), ((893, 910), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (901, 910), True, 'import numpy as np\n'), ((931, 948), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (939, 948), True, 'import numpy as np\n'), ((1914, 1938), 'numpy.array', 'np.array', (["file['labels']"], {}), "(file['labels'])\n", (1922, 1938), True, 'import numpy as np\n'), ((1960, 1982), 'numpy.array', 'np.array', (["file['mean']"], {}), "(file['mean'])\n", (1968, 1982), True, 'import numpy as np\n'), ((2003, 2029), 'numpy.array', 'np.array', (["file['variance']"], {}), "(file['variance'])\n", (2011, 2029), True, 'import numpy as np\n'), ((2050, 2074), 'numpy.array', 'np.array', (["file['priors']"], {}), "(file['priors'])\n", (2058, 2074), True, 'import numpy as np\n'), ((633, 649), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (642, 649), True, 'import numpy as np\n'), ((756, 767), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (764, 767), True, 'import numpy as np\n'), ((1192, 1219), 'numpy.mean', 'np.mean', (['X[indexes]'], {'axis': '(0)'}), '(X[indexes], axis=0)\n', (1199, 1219), True, 'import numpy as np\n'), ((1251, 1277), 'numpy.var', 'np.var', (['X[indexes]'], {'axis': '(0)'}), '(X[indexes], axis=0)\n', (1257, 1277), True, 'import numpy as np\n'), ((1329, 1387), 'numpy.exp', 'np.exp', (['(-((x - self.mean[idx]) ** 2 / (2 * self.var[idx])))'], {}), '(-((x - self.mean[idx]) ** 2 / (2 * self.var[idx])))\n', (1335, 1387), True, 'import numpy as np\n'), ((1380, 1414), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * self.var[idx])'], {}), '(2 * np.pi * self.var[idx])\n', (1387, 1414), True, 'import numpy as np\n'), ((1140, 1157), 'numpy.where', 'np.where', (['(Y == cl)'], {}), '(Y == cl)\n', (1148, 1157), True, 'import numpy as np\n'), ((524, 543), 'numpy.log', 'np.log', (['self.P_C[i]'], {}), '(self.P_C[i])\n', (530, 543), True, 'import numpy as np\n'), ((983, 999), 'numpy.where', 'np.where', (['(Y == i)'], {}), '(Y == i)\n', (991, 999), True, 'import numpy as np\n')] |
import pytest
def test_noun_chunks_is_parsed_nb(nb_tokenizer):
"""Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed."""
doc = nb_tokenizer("Smørsausen brukes bl.a. til")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| [
"pytest.raises"
] | [((219, 244), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (232, 244), False, 'import pytest\n')] |
from unittest import TestCase
import unittest
from app import format_dimension_result, DUMMY, get_power_of_dummy_var, \
get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word
from pint import UnitRegistry, formatter, pi_theorem
class TestFormat_dimension_result(TestCase):
def test_format_dimension_result(self):
#dummy top
res = DUMMY + " / L"
self.assertEqual(format_dimension_result(res).strip(), "L")
res = "( " + DUMMY + " * T ) / L"
self.assertEqual(format_dimension_result(res).strip(), "L / T")
res = "( T * " + DUMMY + " ) / L"
self.assertEqual(format_dimension_result(res).strip(), "L / T")
#dummy bottom
res = " T / ( V * " + DUMMY + ")"
self.assertEqual(format_dimension_result(res).strip(), "T / V")
res = " T / ( " + DUMMY + " * V )"
self.assertEqual(format_dimension_result(res).strip(), "T / V")
res = " T / " + DUMMY
self.assertEqual(format_dimension_result(res).strip(), "T")
#dummy power on bottom
res = " T * V / " + DUMMY + "^5"
self.assertEqual(format_dimension_result(res).strip(), "(T * V)^(1/5)")
res = " T / ( V * " + DUMMY + "^5 )"
self.assertEqual(format_dimension_result(res).strip(), "(T / V)^(1/5)")
res = " T / " + DUMMY + "^5"
self.assertEqual(format_dimension_result(res).strip(), "T^(1/5)")
res = DUMMY + "^5 / T "
self.assertEqual(format_dimension_result(res).strip(), "T^(1/5)")
res = DUMMY + "^5 * V / T "
self.assertEqual(format_dimension_result(res).strip(), "(T / V)^(1/5)")
res = "V * " + DUMMY + "^5 / T "
self.assertEqual(format_dimension_result(res).strip(), "(T / V)^(1/5)")
def test_get_complete_dim_word(self):
for key in SHORTCUT_DICT:
self.assertEqual(get_complete_dim_word(key), SHORTCUT_DICT[key])
key_with_spaces = " ".join(key)
self.assertEqual(get_complete_dim_word(key_with_spaces), SHORTCUT_DICT[key])
def test_get_word_partition(self):
self.assertEqual(get_word_partition("[T] / [V]"), ["[T]", " / " , "[V]"])
self.assertEqual(get_word_partition("[T] * [M] / [V]"), ["[T]", " * ", "[M]"," / ", "[V]"])
def test_replace_acronyms_and_return_word(self):
self.assertEqual(replace_acronyms_and_return_word(["[t]"]),"[time]")
self.assertEqual(replace_acronyms_and_return_word(["[t]", " * ", "[M]"]), "[time] * [mass]")
def test_pi_theorem(self):
ureg = UnitRegistry()
quantities = {"Va": "[time]", "Vb": "[length]", "Vc": "[length] / [time]"}
pi = pi_theorem(quantities, ureg)
pretty_result = formatter(pi[0].items(), single_denominator=True, power_fmt='{}^{}')
self.assertEqual(pretty_result, "Va * Vc / Vb")
quantities = {"Va" : "[time]", "Vb" : "[length]", "Vc" : "[acceleration]"}
pi = pi_theorem(quantities, ureg)
pretty_result = formatter(pi[0].items(), single_denominator=True, power_fmt='{}^{}')
self.assertEqual(pretty_result, "Va^2 * Vc / Vb")
def test_get_power_of_dummy_var(self):
res = DUMMY + "^10"
self.assertEqual(get_power_of_dummy_var(res), "10")
res = DUMMY
self.assertEqual(get_power_of_dummy_var(res), "")
res = "V * " + DUMMY + "^5"
self.assertEqual(get_power_of_dummy_var(res), "5")
if __name__ == "__main__":
unittest.main()
| [
"pint.pi_theorem",
"app.get_power_of_dummy_var",
"app.get_complete_dim_word",
"app.get_word_partition",
"app.format_dimension_result",
"unittest.main",
"app.replace_acronyms_and_return_word",
"pint.UnitRegistry"
] | [((3478, 3493), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3491, 3493), False, 'import unittest\n'), ((2574, 2588), 'pint.UnitRegistry', 'UnitRegistry', ([], {}), '()\n', (2586, 2588), False, 'from pint import UnitRegistry, formatter, pi_theorem\n'), ((2685, 2713), 'pint.pi_theorem', 'pi_theorem', (['quantities', 'ureg'], {}), '(quantities, ureg)\n', (2695, 2713), False, 'from pint import UnitRegistry, formatter, pi_theorem\n'), ((2960, 2988), 'pint.pi_theorem', 'pi_theorem', (['quantities', 'ureg'], {}), '(quantities, ureg)\n', (2970, 2988), False, 'from pint import UnitRegistry, formatter, pi_theorem\n'), ((2138, 2169), 'app.get_word_partition', 'get_word_partition', (['"""[T] / [V]"""'], {}), "('[T] / [V]')\n", (2156, 2169), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((2220, 2257), 'app.get_word_partition', 'get_word_partition', (['"""[T] * [M] / [V]"""'], {}), "('[T] * [M] / [V]')\n", (2238, 2257), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((2374, 2415), 'app.replace_acronyms_and_return_word', 'replace_acronyms_and_return_word', (["['[t]']"], {}), "(['[t]'])\n", (2406, 2415), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((2451, 2506), 'app.replace_acronyms_and_return_word', 'replace_acronyms_and_return_word', (["['[t]', ' * ', '[M]']"], {}), "(['[t]', ' * ', '[M]'])\n", (2483, 2506), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((3237, 3264), 'app.get_power_of_dummy_var', 'get_power_of_dummy_var', (['res'], {}), '(res)\n', (3259, 3264), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((3317, 3344), 'app.get_power_of_dummy_var', 'get_power_of_dummy_var', (['res'], {}), '(res)\n', (3339, 3344), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((3411, 3438), 'app.get_power_of_dummy_var', 'get_power_of_dummy_var', (['res'], {}), '(res)\n', (3433, 3438), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1892, 1918), 'app.get_complete_dim_word', 'get_complete_dim_word', (['key'], {}), '(key)\n', (1913, 1918), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((2013, 2051), 'app.get_complete_dim_word', 'get_complete_dim_word', (['key_with_spaces'], {}), '(key_with_spaces)\n', (2034, 2051), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((433, 461), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (456, 461), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((543, 571), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (566, 571), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((657, 685), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (680, 685), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((794, 822), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (817, 822), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((909, 937), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (932, 937), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1011, 1039), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1034, 1039), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1152, 1180), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1175, 1180), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1277, 1305), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1300, 1305), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1394, 1422), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1417, 1422), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1500, 1528), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1523, 1528), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1610, 1638), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1633, 1638), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n'), ((1731, 1759), 'app.format_dimension_result', 'format_dimension_result', (['res'], {}), '(res)\n', (1754, 1759), False, 'from app import format_dimension_result, DUMMY, get_power_of_dummy_var, get_complete_dim_word, SHORTCUT_DICT, get_word_partition, replace_acronyms_and_return_word\n')] |
import random
from typing import List
from google.protobuf import wrappers_pb2
from google.protobuf.wrappers_pb2 import BytesValue
from dialog_bot_sdk.entities.Avatar import Avatar
from dialog_bot_sdk.entities.Group import Group
from dialog_bot_sdk.entities.Permissions import Permissions, GroupPermission
from dialog_bot_sdk.entities.User import User
from dialog_bot_sdk.entities.Peer import Peer, PeerType
from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer
from .service import ManagedService
from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2
class Groups(ManagedService):
"""Class for handling groups
"""
@async_dec()
def create_public_group(self, title: str, short_name: str) -> Group:
"""Create public group
:param title: title of group
:param short_name: group name
:return: Group
"""
request = groups_pb2.RequestCreateGroup(
title=title,
username=wrappers_pb2.StringValue(value=short_name),
group_type=groups_pb2.GROUPTYPE_GROUP
)
return self.__create_group(request)
@async_dec()
def create_private_group(self, title: str) -> Group:
"""Create private group
:param title: title of group
:return: Group
"""
request = groups_pb2.RequestCreateGroup(
title=title,
group_type=groups_pb2.GROUPTYPE_GROUP
)
return self.__create_group(request)
@async_dec()
def create_public_channel(self, title: str, short_name: str) -> Group:
"""Create public channel
:param title: title of group
:param short_name: group name
:return: Group
"""
request = groups_pb2.RequestCreateGroup(
title=title,
username=wrappers_pb2.StringValue(value=short_name),
group_type=groups_pb2.GROUPTYPE_CHANNEL
)
return self.__create_group(request)
@async_dec()
def create_private_channel(self, title: str) -> Group:
"""Create private channel
:param title: title of group
:return: Group
"""
request = groups_pb2.RequestCreateGroup(
title=title,
group_type=groups_pb2.GROUPTYPE_CHANNEL
)
return self.__create_group(request)
@async_dec()
def find_group_by_short_name(self, short_name: str) -> Group or None:
"""Find a Group by short_name
:param short_name: short_name of group
:return: Group or None if could not find
"""
request = search_pb2.RequestPeerSearch(
query=[
search_pb2.SearchCondition(
searchPeerTypeCondition=search_pb2.SearchPeerTypeCondition(
peer_type=search_pb2.SEARCHPEERTYPE_GROUPS
)
),
search_pb2.SearchCondition(
searchPieceText=search_pb2.SearchPieceText(query=short_name)
)
]
)
response = self.internal.search.PeerSearch(request).search_results
for result in response:
if result.peer.type == PeerType.PEERTYPE_GROUP and hasattr(result, 'shortname') and \
result.shortname.value == short_name:
return self.find_group_by_id(result.peer.id).wait()
@async_dec()
def find_group_by_id(self, group_id: int) -> Group or None:
"""Find and return Group by id
:param group_id: group's
:return: Group or None if could not find
"""
out_peer = self.__get_out_peer(Peer(group_id, PeerType.PEERTYPE_GROUP))
if out_peer is None:
return None
request = sequence_and_updates_pb2.RequestGetReferencedEntitites(
groups=[out_peer]
)
result = self.internal.updates.GetReferencedEntitites(request).groups
self.manager.add_out_peer(peers_pb2.OutPeer(id=result[0].id, type=PeerType.PEERTYPE_GROUP,
access_hash=result[0].access_hash))
return Group.from_api(result[0])
@async_dec()
def load_members(self, group_peer: Peer or AsyncTask, limit: int = 0, cursor: bytes = b"",
cursor_flag: bool = False) -> List[User] or List[User] and bytes or None:
"""Load Group members by peer
:param group_peer: Peer or AsyncTask (in which located Group)
:param limit: count members
:param cursor: bytes object that specify to the user from whom to start (returned from this method)
:param cursor_flag: returned cursor? (True/False)
:return: list of User's
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
if out_peer is None:
return None
request = groups_pb2.RequestLoadMembers(
group=out_peer,
limit=limit,
next=BytesValue(value=cursor)
)
response = self.internal.groups.LoadMembers(request)
members = response.members
cursor = response.cursor.value
request = sequence_and_updates_pb2.RequestGetReferencedEntitites(
group_members=[
sequence_and_updates_pb2.GroupMembersSubset(
group_peer=out_peer,
member_ids=[member.uid for member in members]
)
]
)
if cursor_flag:
return [User.from_api(x) for x in self.internal.updates.GetReferencedEntitites(request).users], cursor
return [User.from_api(x) for x in self.internal.updates.GetReferencedEntitites(request).users]
@async_dec()
def kick_user(self, group_peer: Peer or AsyncTask, user_peer: Peer or AsyncTask) -> None:
"""Kick user from Group
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peer: Peer or AsyncTask (in which located User)
:return: None
"""
group_peer, user_peer = get_peer(group_peer), get_peer(user_peer)
group_out_peer, user_out_peer = self.__get_out_peer(group_peer), self.__get_out_peer(user_peer)
request = groups_pb2.RequestKickUser(
group_peer=group_out_peer,
user=user_out_peer,
rid=random.randint(0, 100000000),
)
self.internal.groups.KickUser(request)
@async_dec()
def invite_user(self, group_peer: Peer or AsyncTask, user_peer: Peer or AsyncTask) -> None:
"""Invite user in Group
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peer: Peer or AsyncTask (in which located User)
:return: None
"""
group_peer, user_peer = get_peer(group_peer), get_peer(user_peer)
group_out_peer, user_out_peer = self.__get_out_peer(group_peer), self.__get_out_peer(user_peer)
request = groups_pb2.RequestInviteUser(
group_peer=group_out_peer,
user=user_out_peer,
rid=random.randint(0, 100000000),
)
self.internal.groups.InviteUser(request)
@async_dec()
def set_default_group_permissions(self, group_peer: Peer or AsyncTask,
add_permissions: List[GroupPermission] = None,
del_permissions: List[GroupPermission] = None) -> None:
"""add/del default group permissions
:param group_peer: Peer or AsyncTask (in which located Group)
:param add_permissions: list of permissions to add
:param del_permissions: list of permissions to delete
:return: None
"""
group_peer = get_peer(group_peer)
if del_permissions is None:
del_permissions = []
if add_permissions is None:
add_permissions = []
group_out_peer = self.__get_out_peer(group_peer)
add_request = groups_pb2.RequestEditGroupBasePermissions(
group_peer=group_out_peer,
random_id=random.randint(0, 100000000),
granted_permissions=[x for x in add_permissions]
)
del_request = groups_pb2.RequestEditGroupBasePermissions(
group_peer=group_out_peer,
random_id=random.randint(0, 100000000),
revoked_permissions=[x for x in del_permissions]
)
self.internal.groups.EditGroupBasePermissions(add_request)
self.internal.groups.EditGroupBasePermissions(del_request)
@async_dec()
def set_member_permissions(self, group_peer: Peer or AsyncTask, user_peer: Peer or AsyncTask,
add_permissions: List[GroupPermission] = None,
del_permissions: List[GroupPermission] = None) -> None:
"""add/del group's member permissions
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peer: Peer or AsyncTask (in which located User)
:param add_permissions: list of permissions to add
:param del_permissions: list of permissions to delete
:return: None
"""
group_peer, user_peer = get_peer(group_peer), get_peer(user_peer)
if del_permissions is None:
del_permissions = []
if add_permissions is None:
add_permissions = []
group_out_peer, user_out_peer = self.__get_out_peer(group_peer), self.__get_out_peer(user_peer)
add_request = groups_pb2.RequestEditMemberPermissions(
group_peer=group_out_peer,
user_peer=user_out_peer,
granted_permissions=[x for x in add_permissions]
)
del_request = groups_pb2.RequestEditMemberPermissions(
group_peer=group_out_peer,
user_peer=user_out_peer,
revoked_permissions=[x for x in del_permissions]
)
self.internal.groups.EditMemberPermissions(add_request)
self.internal.groups.EditMemberPermissions(del_request)
@async_dec()
def get_group_member_permissions(self, group_peer: Peer or AsyncTask, user_peers: List[Peer or AsyncTask]) \
-> List[Permissions]:
"""return group member's permissions
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peers: Peer or AsyncTask (in which located User)
:return: group member's permissions
"""
group_peer, user_peers = get_peer(group_peer), [get_peer(x) for x in user_peers]
request = groups_pb2.RequestGetGroupMemberPermissions(
group_id=group_peer.id,
user_ids=[peer.id for peer in user_peers]
)
return [Permissions.from_api(x) for x in self.internal.groups.GetGroupMemberPermissions(request).permissions]
@async_dec()
def edit_group_title(self, group_peer: Peer or AsyncTask, title: str) -> None:
"""change group's title
:param group_peer: Peer or AsyncTask (in which located Group)
:param title: new title
:return: None
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestEditGroupTitle(
group_peer=out_peer,
rid=random.randint(0, 100000000),
title=title
)
self.internal.groups.EditGroupTitle(request)
@async_dec()
def edit_avatar(self, group_peer: Peer or AsyncTask, file: str) -> Avatar or None:
"""change group's avatar
:param group_peer: Peer or AsyncTask (in which located Group)
:param file: file path
:return: Avatar
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
location = self.internal.uploading.upload_file(file).wait()
if location is None:
return None
request = groups_pb2.RequestEditGroupAvatar(
group_peer=out_peer,
rid=random.randint(0, 100000000),
file_location=location.to_api()
)
return Avatar.from_api(self.internal.groups.EditGroupAvatar(request).avatar)
@async_dec()
def remove_group_avatar(self, group_peer: Peer or AsyncTask) -> None:
"""deleted group's avatar
:param group_peer: Peer or AsyncTask (in which located User)
:return: None
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestRemoveGroupAvatar(
group_peer=out_peer,
rid=random.randint(0, 100000000),
)
self.internal.groups.RemoveGroupAvatar(request)
@async_dec()
def edit_group_about(self, group_peer: Peer or AsyncTask, about: str) -> None:
"""change group's "about"
:param group_peer: Peer or AsyncTask (in which located User)
:param about: about text
:return: None
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestEditGroupAbout(
group_peer=out_peer,
rid=random.randint(0, 100000000),
about=wrappers_pb2.StringValue(value=about)
)
self.internal.groups.EditGroupAbout(request)
@async_dec()
def leave_group(self, group_peer: Peer or AsyncTask) -> None:
"""leave from group
:param group_peer: Peer or AsyncTask (in which located Group)
:return: None
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestLeaveGroup(
group_peer=out_peer,
rid=random.randint(0, 100000000),
)
self.internal.groups.LeaveGroup(request)
@async_dec()
def make_user_admin(self, group_peer: Peer or AsyncTask, user_peer: Peer or AsyncTask,
permissions: List[GroupPermission]) -> None:
"""Set new user's permissions (old permissions will be revoke)
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peer: Peer or AsyncTask (in which located User)
:param permissions: permissions list (for admin)
:return: None
"""
group_peer, user_peer = get_peer(group_peer), get_peer(user_peer)
group_out_peer, user_out_peer = self.__get_out_peer(group_peer), self.__get_out_peer(user_peer)
request = groups_pb2.RequestMakeUserAdmin(
group_peer=group_out_peer,
user_peer=user_out_peer,
permissions=permissions
)
self.internal.groups.MakeUserAdmin(request)
@async_dec()
def transfer_ownership(self, group_peer: Peer or AsyncTask, user_peer: Peer or AsyncTask) -> None:
"""change group's owner to user
:param group_peer: Peer or AsyncTask (in which located Group)
:param user_peer: Peer or AsyncTask (in which located User)
:return: None
"""
group_peer, user_peer = get_peer(group_peer), get_peer(user_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestTransferOwnership(
group_peer=out_peer,
new_owner=user_peer.id
)
self.internal.groups.TransferOwnership(request)
@async_dec()
def get_group_invite_url(self, group_peer: Peer or AsyncTask) -> str:
"""return group's invite url
:param group_peer: Peer or AsyncTask (in which located Group)
:return: invite url
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestGetGroupInviteUrl(
group_peer=out_peer
)
return self.internal.groups.GetGroupInviteUrl(request).url
@async_dec()
def get_group_invite_url_base(self) -> str:
"""return group's invite url without token/short_name (example https://domain/@)
:return: invite url (string)
"""
request = groups_pb2.RequestGetGroupInviteUrlBase()
return self.internal.groups.GetGroupInviteUrlBase(request).url
@async_dec()
def revoke_invite_url(self, group_peer: Peer or AsyncTask) -> str:
"""revoke current invite url and return new group's invite url
:return: invite url
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestRevokeInviteUrl(
group_peer=out_peer
)
return self.internal.groups.RevokeInviteUrl(request).url
@async_dec()
def join_group(self, token_or_url: str) -> Group:
"""join to group by token or invite url (used for private groups)
:param token_or_url: group's token or invite url
:return: Group
"""
request = groups_pb2.RequestJoinGroup(
token=token_or_url
)
response = self.internal.groups.JoinGroup(request)
self.manager.add_out_peer(peers_pb2.OutPeer(id=response.group.id, access_hash=response.group.access_hash,
type=PeerType.PEERTYPE_GROUP))
return Group.from_api(response.group)
@async_dec()
def join_group_by_peer(self, group_peer: Peer or AsyncTask) -> None:
"""join to group by group's peer (used for public groups)
:param group_peer: Peer or AsyncTask (in which located Group)
:return: None
"""
group_peer = get_peer(group_peer)
out_peer = self.__get_out_peer(group_peer)
request = groups_pb2.RequestJoinGroupByPeer(
peer=out_peer
)
self.internal.groups.JoinGroupByPeer(request)
def __get_out_peer(self, peer: peers_pb2.Peer) -> peers_pb2.GroupOutPeer or peers_pb2.UserOutPeer or None:
out_peer = self.manager.get_out_peer(peer)
if out_peer is None:
return None
if peer.type == PeerType.PEERTYPE_GROUP:
return peers_pb2.GroupOutPeer(group_id=out_peer.id, access_hash=out_peer.access_hash)
elif peer.type == PeerType.PEERTYPE_PRIVATE:
return peers_pb2.UserOutPeer(uid=out_peer.id, access_hash=out_peer.access_hash)
def __create_group(self, request: groups_pb2.RequestCreateGroup) -> Group:
group = self.internal.groups.CreateGroup(request).group
self.manager.add_out_peer(
peers_pb2.OutPeer(id=group.id, access_hash=group.access_hash, type=PeerType.PEERTYPE_GROUP))
return Group.from_api(group)
| [
"dialog_api.peers_pb2.OutPeer",
"google.protobuf.wrappers_pb2.BytesValue",
"dialog_api.peers_pb2.UserOutPeer",
"dialog_bot_sdk.utils.get_peer",
"dialog_api.groups_pb2.RequestGetGroupMemberPermissions",
"dialog_api.search_pb2.SearchPieceText",
"dialog_bot_sdk.utils.async_dec",
"dialog_api.sequence_and_updates_pb2.GroupMembersSubset",
"dialog_bot_sdk.entities.Group.Group.from_api",
"random.randint",
"dialog_api.peers_pb2.GroupOutPeer",
"dialog_api.groups_pb2.RequestGetGroupInviteUrlBase",
"dialog_api.search_pb2.SearchPeerTypeCondition",
"dialog_api.groups_pb2.RequestMakeUserAdmin",
"dialog_api.groups_pb2.RequestEditMemberPermissions",
"dialog_api.groups_pb2.RequestGetGroupInviteUrl",
"dialog_api.groups_pb2.RequestCreateGroup",
"google.protobuf.wrappers_pb2.StringValue",
"dialog_bot_sdk.entities.Peer.Peer",
"dialog_api.groups_pb2.RequestTransferOwnership",
"dialog_api.groups_pb2.RequestRevokeInviteUrl",
"dialog_bot_sdk.entities.Permissions.Permissions.from_api",
"dialog_api.groups_pb2.RequestJoinGroup",
"dialog_api.groups_pb2.RequestJoinGroupByPeer",
"dialog_api.sequence_and_updates_pb2.RequestGetReferencedEntitites",
"dialog_bot_sdk.entities.User.User.from_api"
] | [((672, 683), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (681, 683), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((1148, 1159), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (1157, 1159), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((1506, 1517), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (1515, 1517), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((1989, 2000), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (1998, 2000), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((2354, 2365), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (2363, 2365), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((3391, 3402), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (3400, 3402), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((4161, 4172), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (4170, 4172), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((5714, 5725), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (5723, 5725), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((6430, 6441), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (6439, 6441), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((7152, 7163), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (7161, 7163), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((8524, 8535), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (8533, 8535), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((10010, 10021), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (10019, 10021), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((10786, 10797), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (10795, 10797), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((11367, 11378), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (11376, 11378), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((12128, 12139), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (12137, 12139), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((12651, 12662), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (12660, 12662), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((13266, 13277), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (13275, 13277), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((13762, 13773), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (13771, 13773), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((14644, 14655), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (14653, 14655), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((15292, 15303), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (15301, 15303), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((15789, 15800), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (15798, 15800), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((16125, 16136), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (16134, 16136), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((16579, 16590), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (16588, 16590), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((17208, 17219), 'dialog_bot_sdk.utils.async_dec', 'async_dec', ([], {}), '()\n', (17217, 17219), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((1340, 1426), 'dialog_api.groups_pb2.RequestCreateGroup', 'groups_pb2.RequestCreateGroup', ([], {'title': 'title', 'group_type': 'groups_pb2.GROUPTYPE_GROUP'}), '(title=title, group_type=groups_pb2.\n GROUPTYPE_GROUP)\n', (1369, 1426), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((2186, 2274), 'dialog_api.groups_pb2.RequestCreateGroup', 'groups_pb2.RequestCreateGroup', ([], {'title': 'title', 'group_type': 'groups_pb2.GROUPTYPE_CHANNEL'}), '(title=title, group_type=groups_pb2.\n GROUPTYPE_CHANNEL)\n', (2215, 2274), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((3753, 3826), 'dialog_api.sequence_and_updates_pb2.RequestGetReferencedEntitites', 'sequence_and_updates_pb2.RequestGetReferencedEntitites', ([], {'groups': '[out_peer]'}), '(groups=[out_peer])\n', (3807, 3826), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((4129, 4154), 'dialog_bot_sdk.entities.Group.Group.from_api', 'Group.from_api', (['result[0]'], {}), '(result[0])\n', (4143, 4154), False, 'from dialog_bot_sdk.entities.Group import Group\n'), ((4739, 4759), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (4747, 4759), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((7710, 7730), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (7718, 7730), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((9477, 9622), 'dialog_api.groups_pb2.RequestEditMemberPermissions', 'groups_pb2.RequestEditMemberPermissions', ([], {'group_peer': 'group_out_peer', 'user_peer': 'user_out_peer', 'granted_permissions': '[x for x in add_permissions]'}), '(group_peer=group_out_peer,\n user_peer=user_out_peer, granted_permissions=[x for x in add_permissions])\n', (9516, 9622), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((9687, 9832), 'dialog_api.groups_pb2.RequestEditMemberPermissions', 'groups_pb2.RequestEditMemberPermissions', ([], {'group_peer': 'group_out_peer', 'user_peer': 'user_out_peer', 'revoked_permissions': '[x for x in del_permissions]'}), '(group_peer=group_out_peer,\n user_peer=user_out_peer, revoked_permissions=[x for x in del_permissions])\n', (9726, 9832), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((10517, 10631), 'dialog_api.groups_pb2.RequestGetGroupMemberPermissions', 'groups_pb2.RequestGetGroupMemberPermissions', ([], {'group_id': 'group_peer.id', 'user_ids': '[peer.id for peer in user_peers]'}), '(group_id=group_peer.id,\n user_ids=[peer.id for peer in user_peers])\n', (10560, 10631), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((11071, 11091), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (11079, 11091), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((11658, 11678), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (11666, 11678), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((12373, 12393), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (12381, 12393), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((12938, 12958), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (12946, 12958), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((13498, 13518), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (13506, 13518), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((14431, 14544), 'dialog_api.groups_pb2.RequestMakeUserAdmin', 'groups_pb2.RequestMakeUserAdmin', ([], {'group_peer': 'group_out_peer', 'user_peer': 'user_out_peer', 'permissions': 'permissions'}), '(group_peer=group_out_peer, user_peer=\n user_out_peer, permissions=permissions)\n', (14462, 14544), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((15115, 15200), 'dialog_api.groups_pb2.RequestTransferOwnership', 'groups_pb2.RequestTransferOwnership', ([], {'group_peer': 'out_peer', 'new_owner': 'user_peer.id'}), '(group_peer=out_peer, new_owner=user_peer.id\n )\n', (15150, 15200), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((15547, 15567), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (15555, 15567), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((15637, 15693), 'dialog_api.groups_pb2.RequestGetGroupInviteUrl', 'groups_pb2.RequestGetGroupInviteUrl', ([], {'group_peer': 'out_peer'}), '(group_peer=out_peer)\n', (15672, 15693), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((16006, 16047), 'dialog_api.groups_pb2.RequestGetGroupInviteUrlBase', 'groups_pb2.RequestGetGroupInviteUrlBase', ([], {}), '()\n', (16045, 16047), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((16341, 16361), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (16349, 16361), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((16431, 16485), 'dialog_api.groups_pb2.RequestRevokeInviteUrl', 'groups_pb2.RequestRevokeInviteUrl', ([], {'group_peer': 'out_peer'}), '(group_peer=out_peer)\n', (16464, 16485), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((16830, 16877), 'dialog_api.groups_pb2.RequestJoinGroup', 'groups_pb2.RequestJoinGroup', ([], {'token': 'token_or_url'}), '(token=token_or_url)\n', (16857, 16877), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((17171, 17201), 'dialog_bot_sdk.entities.Group.Group.from_api', 'Group.from_api', (['response.group'], {}), '(response.group)\n', (17185, 17201), False, 'from dialog_bot_sdk.entities.Group import Group\n'), ((17485, 17505), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (17493, 17505), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((17575, 17623), 'dialog_api.groups_pb2.RequestJoinGroupByPeer', 'groups_pb2.RequestJoinGroupByPeer', ([], {'peer': 'out_peer'}), '(peer=out_peer)\n', (17608, 17623), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((18507, 18528), 'dialog_bot_sdk.entities.Group.Group.from_api', 'Group.from_api', (['group'], {}), '(group)\n', (18521, 18528), False, 'from dialog_bot_sdk.entities.Group import Group\n'), ((3640, 3679), 'dialog_bot_sdk.entities.Peer.Peer', 'Peer', (['group_id', 'PeerType.PEERTYPE_GROUP'], {}), '(group_id, PeerType.PEERTYPE_GROUP)\n', (3644, 3679), False, 'from dialog_bot_sdk.entities.Peer import Peer, PeerType\n'), ((3961, 4064), 'dialog_api.peers_pb2.OutPeer', 'peers_pb2.OutPeer', ([], {'id': 'result[0].id', 'type': 'PeerType.PEERTYPE_GROUP', 'access_hash': 'result[0].access_hash'}), '(id=result[0].id, type=PeerType.PEERTYPE_GROUP,\n access_hash=result[0].access_hash)\n', (3978, 4064), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((5621, 5637), 'dialog_bot_sdk.entities.User.User.from_api', 'User.from_api', (['x'], {}), '(x)\n', (5634, 5637), False, 'from dialog_bot_sdk.entities.User import User\n'), ((6057, 6077), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (6065, 6077), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((6079, 6098), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['user_peer'], {}), '(user_peer)\n', (6087, 6098), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((6775, 6795), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (6783, 6795), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((6797, 6816), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['user_peer'], {}), '(user_peer)\n', (6805, 6816), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((9171, 9191), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (9179, 9191), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((9193, 9212), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['user_peer'], {}), '(user_peer)\n', (9201, 9212), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((10443, 10463), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (10451, 10463), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((10678, 10701), 'dialog_bot_sdk.entities.Permissions.Permissions.from_api', 'Permissions.from_api', (['x'], {}), '(x)\n', (10698, 10701), False, 'from dialog_bot_sdk.entities.Permissions import Permissions, GroupPermission\n'), ((14267, 14287), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (14275, 14287), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((14289, 14308), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['user_peer'], {}), '(user_peer)\n', (14297, 14308), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((15004, 15024), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['group_peer'], {}), '(group_peer)\n', (15012, 15024), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((15026, 15045), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['user_peer'], {}), '(user_peer)\n', (15034, 15045), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((16993, 17107), 'dialog_api.peers_pb2.OutPeer', 'peers_pb2.OutPeer', ([], {'id': 'response.group.id', 'access_hash': 'response.group.access_hash', 'type': 'PeerType.PEERTYPE_GROUP'}), '(id=response.group.id, access_hash=response.group.\n access_hash, type=PeerType.PEERTYPE_GROUP)\n', (17010, 17107), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((17984, 18062), 'dialog_api.peers_pb2.GroupOutPeer', 'peers_pb2.GroupOutPeer', ([], {'group_id': 'out_peer.id', 'access_hash': 'out_peer.access_hash'}), '(group_id=out_peer.id, access_hash=out_peer.access_hash)\n', (18006, 18062), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((18399, 18495), 'dialog_api.peers_pb2.OutPeer', 'peers_pb2.OutPeer', ([], {'id': 'group.id', 'access_hash': 'group.access_hash', 'type': 'PeerType.PEERTYPE_GROUP'}), '(id=group.id, access_hash=group.access_hash, type=PeerType\n .PEERTYPE_GROUP)\n', (18416, 18495), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((994, 1036), 'google.protobuf.wrappers_pb2.StringValue', 'wrappers_pb2.StringValue', ([], {'value': 'short_name'}), '(value=short_name)\n', (1018, 1036), False, 'from google.protobuf import wrappers_pb2\n'), ((1833, 1875), 'google.protobuf.wrappers_pb2.StringValue', 'wrappers_pb2.StringValue', ([], {'value': 'short_name'}), '(value=short_name)\n', (1857, 1875), False, 'from google.protobuf import wrappers_pb2\n'), ((4984, 5008), 'google.protobuf.wrappers_pb2.BytesValue', 'BytesValue', ([], {'value': 'cursor'}), '(value=cursor)\n', (4994, 5008), False, 'from google.protobuf.wrappers_pb2 import BytesValue\n'), ((6337, 6365), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (6351, 6365), False, 'import random\n'), ((7057, 7085), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (7071, 7085), False, 'import random\n'), ((8054, 8082), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (8068, 8082), False, 'import random\n'), ((8282, 8310), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (8296, 8310), False, 'import random\n'), ((10466, 10477), 'dialog_bot_sdk.utils.get_peer', 'get_peer', (['x'], {}), '(x)\n', (10474, 10477), False, 'from dialog_bot_sdk.utils import async_dec, AsyncTask, get_peer\n'), ((11244, 11272), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (11258, 11272), False, 'import random\n'), ((11953, 11981), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (11967, 11981), False, 'import random\n'), ((12549, 12577), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (12563, 12577), False, 'import random\n'), ((13111, 13139), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (13125, 13139), False, 'import random\n'), ((13159, 13196), 'google.protobuf.wrappers_pb2.StringValue', 'wrappers_pb2.StringValue', ([], {'value': 'about'}), '(value=about)\n', (13183, 13196), False, 'from google.protobuf import wrappers_pb2\n'), ((13667, 13695), 'random.randint', 'random.randint', (['(0)', '(100000000)'], {}), '(0, 100000000)\n', (13681, 13695), False, 'import random\n'), ((18135, 18207), 'dialog_api.peers_pb2.UserOutPeer', 'peers_pb2.UserOutPeer', ([], {'uid': 'out_peer.id', 'access_hash': 'out_peer.access_hash'}), '(uid=out_peer.id, access_hash=out_peer.access_hash)\n', (18156, 18207), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((5272, 5388), 'dialog_api.sequence_and_updates_pb2.GroupMembersSubset', 'sequence_and_updates_pb2.GroupMembersSubset', ([], {'group_peer': 'out_peer', 'member_ids': '[member.uid for member in members]'}), '(group_peer=out_peer, member_ids\n =[member.uid for member in members])\n', (5315, 5388), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((5510, 5526), 'dialog_bot_sdk.entities.User.User.from_api', 'User.from_api', (['x'], {}), '(x)\n', (5523, 5526), False, 'from dialog_bot_sdk.entities.User import User\n'), ((2743, 2821), 'dialog_api.search_pb2.SearchPeerTypeCondition', 'search_pb2.SearchPeerTypeCondition', ([], {'peer_type': 'search_pb2.SEARCHPEERTYPE_GROUPS'}), '(peer_type=search_pb2.SEARCHPEERTYPE_GROUPS)\n', (2777, 2821), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n'), ((2967, 3011), 'dialog_api.search_pb2.SearchPieceText', 'search_pb2.SearchPieceText', ([], {'query': 'short_name'}), '(query=short_name)\n', (2993, 3011), False, 'from dialog_api import search_pb2, groups_pb2, peers_pb2, sequence_and_updates_pb2\n')] |
from rest_framework import serializers
from posts.models import Post
from comments.comments_api.serializers import CommentSerializer
from comments.models import Comment
class PostListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='posts_api:pk_detail', lookup_field='pk'
)
delete_url = serializers.HyperlinkedIdentityField(
view_name='posts_api:delete', lookup_field='pk'
)
user = serializers.SerializerMethodField()
image = serializers.SerializerMethodField()
markdown = serializers.SerializerMethodField()
comments = serializers.SerializerMethodField()
def get_comments(self, obj):
content_type = obj.get_content_type
object_id = obj.id
comments_queryset = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(comments_queryset, many=True).data
return comments
def get_user(self, obj):
return str(obj.user.username)
def get_markdown(self, obj):
return obj.get_markdown()
def get_image(self, obj):
try:
img = obj.image.url
except:
img = None
return img
class Meta:
model = Post
fields = ['url', 'user', 'id', 'title', 'markdown', 'image', 'delete_url', 'comments']
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ['id', 'title', 'slug', 'content', 'publish']
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = [
"title",
"content",
"publish",
]
| [
"comments.comments_api.serializers.CommentSerializer",
"comments.models.Comment.objects.filter_by_instance",
"rest_framework.serializers.SerializerMethodField",
"rest_framework.serializers.HyperlinkedIdentityField"
] | [((236, 328), 'rest_framework.serializers.HyperlinkedIdentityField', 'serializers.HyperlinkedIdentityField', ([], {'view_name': '"""posts_api:pk_detail"""', 'lookup_field': '"""pk"""'}), "(view_name='posts_api:pk_detail',\n lookup_field='pk')\n", (272, 328), False, 'from rest_framework import serializers\n'), ((356, 445), 'rest_framework.serializers.HyperlinkedIdentityField', 'serializers.HyperlinkedIdentityField', ([], {'view_name': '"""posts_api:delete"""', 'lookup_field': '"""pk"""'}), "(view_name='posts_api:delete',\n lookup_field='pk')\n", (392, 445), False, 'from rest_framework import serializers\n'), ((467, 502), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (500, 502), False, 'from rest_framework import serializers\n'), ((515, 550), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (548, 550), False, 'from rest_framework import serializers\n'), ((566, 601), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (599, 601), False, 'from rest_framework import serializers\n'), ((617, 652), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (650, 652), False, 'from rest_framework import serializers\n'), ((786, 825), 'comments.models.Comment.objects.filter_by_instance', 'Comment.objects.filter_by_instance', (['obj'], {}), '(obj)\n', (820, 825), False, 'from comments.models import Comment\n'), ((845, 892), 'comments.comments_api.serializers.CommentSerializer', 'CommentSerializer', (['comments_queryset'], {'many': '(True)'}), '(comments_queryset, many=True)\n', (862, 892), False, 'from comments.comments_api.serializers import CommentSerializer\n')] |
import os
import logging
import argparse
import builtins
from ghcopy.utils import set_config, activate_virtual_environment, set_localization, get_logger
########################################################################################################################
# Configuration #
########################################################################################################################
parser = argparse.ArgumentParser(prog='ghcopy')
home = os.getenv("HOME")
parser.add_argument('-c', '--config', help='config file', default='~/.ghcopy/config.json')
parser.add_argument('-o', '--output', help='output directory', default='~/RemoteCopies')
parser.add_argument('-b', '--hub', help='repository type: github, bitbucket')
parser.add_argument('-u', '--user', help='user name')
parser.add_argument('-p', '--password', help='password')
parser.add_argument('-t', '--token', help='token')
parser.add_argument('-l', '--log_level', help='logging level: CRITICAL, ERROR, WARNING, INFO, DEBUG or NOTSET',
default='INFO')
cmd_args = parser.parse_args()
config_args = set_config(cmd_args.config.replace('~', home))
cmd_args.output = cmd_args.output.replace('~', home)
########################################################################################################################
# Localization #
########################################################################################################################
set_localization(**config_args)
translate = _ = builtins.__dict__.get('_', lambda x: x)
########################################################################################################################
# Logging #
########################################################################################################################
try:
log_level, level_error = logging._nameToLevel[cmd_args.log_level], False
except KeyError:
level_error = True
log_level = logging._nameToLevel['INFO']
logger = get_logger('ghcopy', config_args.get("log_format", "%(levelname)-10s|%(asctime)s|"
"%(process)d|%(thread)d| %(name)s --- "
"%(message)s"),
config_args.get('log_file', '~/.report/report.log').replace('~', home), log_level)
if level_error:
logger.warning('%s \'%s\', %s \'INFO\' %s' % (_('incorrect logging level'), cmd_args.log_level, _('used'),
_('by default')))
cmd_args.log_level = 'INFO'
########################################################################################################################
# Virtual environment #
########################################################################################################################
if config_args.get('environment') != "":
activate_virtual_environment(**config_args)
logger.info('%s \'%s\'' % (_('activated virtual environment'), config_args.get('environment')))
| [
"argparse.ArgumentParser",
"os.getenv",
"ghcopy.utils.set_localization",
"builtins.__dict__.get",
"ghcopy.utils.activate_virtual_environment"
] | [((527, 565), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""ghcopy"""'}), "(prog='ghcopy')\n", (550, 565), False, 'import argparse\n'), ((573, 590), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (582, 590), False, 'import os\n'), ((1670, 1701), 'ghcopy.utils.set_localization', 'set_localization', ([], {}), '(**config_args)\n', (1686, 1701), False, 'from ghcopy.utils import set_config, activate_virtual_environment, set_localization, get_logger\n'), ((1718, 1757), 'builtins.__dict__.get', 'builtins.__dict__.get', (['"""_"""', '(lambda x: x)'], {}), "('_', lambda x: x)\n", (1739, 1757), False, 'import builtins\n'), ((3300, 3343), 'ghcopy.utils.activate_virtual_environment', 'activate_virtual_environment', ([], {}), '(**config_args)\n', (3328, 3343), False, 'from ghcopy.utils import set_config, activate_virtual_environment, set_localization, get_logger\n')] |
from abc import ABC, abstractmethod
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticNote
)
from sciencebeam_parser.document.layout_document import EMPTY_BLOCK, LayoutBlock, LayoutTokensText
LOGGER = logging.getLogger(__name__)
class ModelSemanticExtractor(ABC):
@abstractmethod
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
pass
def get_regex_cleaned_layout_block_with_prefix_suffix(
layout_block: LayoutBlock,
regex_pattern: Optional[str]
) -> Tuple[LayoutBlock, LayoutBlock, LayoutBlock]:
if not layout_block or not layout_block.lines or not regex_pattern:
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(regex_pattern, text, re.IGNORECASE)
if not m:
LOGGER.debug('text does not match regex: %r', text)
return EMPTY_BLOCK, layout_block, EMPTY_BLOCK
start = m.start(1)
end = m.end(1)
LOGGER.debug('start: %d, end: %d, len: %d (text: %r)', start, end, len(text), text)
return (
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, start)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(start, end)
)),
LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(end, len(text))
))
)
class SimpleModelSemanticExtractor(ModelSemanticExtractor):
def __init__(
self,
semantic_content_class_by_tag: Optional[
Mapping[str, SemanticContentFactoryProtocol]
] = None
):
super().__init__()
self.semantic_content_class_by_tag = semantic_content_class_by_tag or {}
def get_semantic_content_for_entity_name(
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
semantic_content_class = self.semantic_content_class_by_tag.get(name)
if semantic_content_class:
return semantic_content_class(layout_block=layout_block)
return SemanticNote(
layout_block=layout_block,
note_type=name
)
| [
"logging.getLogger",
"sciencebeam_parser.document.semantic_document.SemanticNote",
"sciencebeam_parser.document.layout_document.LayoutTokensText",
"re.match"
] | [((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((971, 1001), 'sciencebeam_parser.document.layout_document.LayoutTokensText', 'LayoutTokensText', (['layout_block'], {}), '(layout_block)\n', (987, 1001), False, 'from sciencebeam_parser.document.layout_document import EMPTY_BLOCK, LayoutBlock, LayoutTokensText\n'), ((1045, 1089), 're.match', 're.match', (['regex_pattern', 'text', 're.IGNORECASE'], {}), '(regex_pattern, text, re.IGNORECASE)\n', (1053, 1089), False, 'import re\n'), ((2401, 2456), 'sciencebeam_parser.document.semantic_document.SemanticNote', 'SemanticNote', ([], {'layout_block': 'layout_block', 'note_type': 'name'}), '(layout_block=layout_block, note_type=name)\n', (2413, 2456), False, 'from sciencebeam_parser.document.semantic_document import SemanticContentFactoryProtocol, SemanticContentWrapper, SemanticNote\n')] |
from visual_tools.visualize import display_segmentation
from .gui_viewer import GuiViewer
class GuiKittiViewer(GuiViewer):
def __init__(self, figurename, dataset):
super(GuiKittiViewer, self).__init__(figurename)
self.dataset = dataset
self.num_images = self.dataset.num_images
self.create_slider()
self.create_textbox()
self.display()
def display(self):
should_update = super(GuiKittiViewer, self).display()
if should_update:
image = self.dataset.load_image(self.image_id)
masks = self.dataset.load_mask(self.image_id)
display_segmentation(image, masks, self.dataset.class_names, ax=self.ax)
title = "ID: {}\nImage file name: {}".format(
self.image_id,
self.dataset.image_files[self.image_id]
)
self.fig.suptitle(title, fontsize=20)
self.fig.canvas.draw_idle()
| [
"visual_tools.visualize.display_segmentation"
] | [((635, 707), 'visual_tools.visualize.display_segmentation', 'display_segmentation', (['image', 'masks', 'self.dataset.class_names'], {'ax': 'self.ax'}), '(image, masks, self.dataset.class_names, ax=self.ax)\n', (655, 707), False, 'from visual_tools.visualize import display_segmentation\n')] |
import matplotlib.pyplot as plt
import pyshtools
import numpy as np
import sys
import gmi_misc
import convert_shtools_grids
try:
#raw_grid = gmi_misc.read_tess_output_global_grid_from_file(sys.argv[1])
raw_grid = gmi_misc.read_global_grid_from_xyz_file(sys.argv[1])
except IOError as err:
print("CAN NOT OPEN OBSERVED DATAFILE: {0}".format(err))
exit(-1)
shtools_inp_grid = pyshtools.SHGrid.from_array(raw_grid)
#get SH coefficients
shtools_coeff = shtools_inp_grid.expand(normalization='schmidt')
shtools_coeff_filt = gmi_misc.remove_lw_sh_coeff(shtools_coeff, 16)
shtools_grd_filt = shtools_coeff_filt.expand(grid='DH2')
shtools_grd_filt.to_file(sys.argv[1][0:len(sys.argv[1])-4]+'_filt.dat')
convert_shtools_grids.conv_grid(sys.argv[1][0:len(sys.argv[1])-4]+'_filt.dat')
| [
"pyshtools.SHGrid.from_array",
"gmi_misc.read_global_grid_from_xyz_file",
"gmi_misc.remove_lw_sh_coeff"
] | [((383, 420), 'pyshtools.SHGrid.from_array', 'pyshtools.SHGrid.from_array', (['raw_grid'], {}), '(raw_grid)\n', (410, 420), False, 'import pyshtools\n'), ((529, 575), 'gmi_misc.remove_lw_sh_coeff', 'gmi_misc.remove_lw_sh_coeff', (['shtools_coeff', '(16)'], {}), '(shtools_coeff, 16)\n', (556, 575), False, 'import gmi_misc\n'), ((219, 271), 'gmi_misc.read_global_grid_from_xyz_file', 'gmi_misc.read_global_grid_from_xyz_file', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (258, 271), False, 'import gmi_misc\n')] |
import json
def respond(err, res=None):
response = {
"statusCode": "400" if err else "200",
"body": str(err) if err else json.dumps(res),
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "https://example.com", # <- Also have to set CORS Headers in function response
"Access-Control-Allow-Credentials": True,
},
}
return response
def lambda_handler(event, context):
"""Sample HelloWorld Function"""
try:
# Accessing the principalId set via Cookie Authorizer
username = event["requestContext"]["authorizer"]["principalId"]
res = {"message": f"Hello {username}"}
return respond(None, res)
except Exception as e:
print(e)
return respond
| [
"json.dumps"
] | [((143, 158), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (153, 158), False, 'import json\n')] |
# -*- coding: utf-8 -*-
import os
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
file_path = os.path.join(base_dir, 'fixtures/cidades.json.zip')
print(file_path)
call_command('loaddata', file_path)
| [
"os.path.dirname",
"os.path.join",
"django.core.management.call_command"
] | [((321, 372), 'os.path.join', 'os.path.join', (['base_dir', '"""fixtures/cidades.json.zip"""'], {}), "(base_dir, 'fixtures/cidades.json.zip')\n", (333, 372), False, 'import os\n'), ((406, 441), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'file_path'], {}), "('loaddata', file_path)\n", (418, 441), False, 'from django.core.management import call_command\n'), ((272, 297), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (287, 297), False, 'import os\n')] |
"""Asyncio helper."""
import asyncio
from typing import Callable, Any, Coroutine
from pnp import validator
from pnp.typing import T
from pnp.utils import StopCycleError
SleepInterruptPredicate = Callable[[], Coroutine[Any, Any, bool]]
SleepInterruptCallback = Callable[[], Coroutine[Any, Any, None]]
async def async_interruptible_sleep(
wait: float, callback: SleepInterruptCallback, interval: float = 0.1
) -> None:
"""
Waits the specified amount of time. The waiting can be interrupted when the callback raises a
`StopCycleError`. The argument `interval` defines after how much waiting time the callback
should be called to determine if the sleep should be interrupted or not.
"""
wait = float(wait)
interval = float(interval)
complete_cycles = int(wait // interval)
try:
for _ in range(0, complete_cycles):
await callback() # Should raise a StopCycleError error when waiting should be aborted
await asyncio.sleep(interval)
await asyncio.sleep(wait % interval)
except StopCycleError:
pass
async def async_sleep_until_interrupt(
sleep_time: float, interrupt_fun: SleepInterruptPredicate, interval: float = 0.1
) -> None:
"""Call this method to sleep an interruptable sleep until the interrupt co-routine returns
True."""
validator.is_function(interrupt_fun=interrupt_fun)
async def callback() -> None:
if await interrupt_fun():
raise StopCycleError()
await async_interruptible_sleep(sleep_time, callback, interval=interval)
async def run_sync(func: Callable[..., T], *args: Any) -> T:
"""Runs sync code in an async compatible non-blocking way using an executor."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, func, *args)
| [
"pnp.utils.StopCycleError",
"asyncio.get_event_loop",
"pnp.validator.is_function",
"asyncio.sleep"
] | [((1340, 1390), 'pnp.validator.is_function', 'validator.is_function', ([], {'interrupt_fun': 'interrupt_fun'}), '(interrupt_fun=interrupt_fun)\n', (1361, 1390), False, 'from pnp import validator\n'), ((1730, 1754), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1752, 1754), False, 'import asyncio\n'), ((1020, 1050), 'asyncio.sleep', 'asyncio.sleep', (['(wait % interval)'], {}), '(wait % interval)\n', (1033, 1050), False, 'import asyncio\n'), ((1478, 1494), 'pnp.utils.StopCycleError', 'StopCycleError', ([], {}), '()\n', (1492, 1494), False, 'from pnp.utils import StopCycleError\n'), ((981, 1004), 'asyncio.sleep', 'asyncio.sleep', (['interval'], {}), '(interval)\n', (994, 1004), False, 'import asyncio\n')] |
import unittest
import os
from link.wrappers import DBConnectionWrapper, SqliteDBConnectionWrapper
from mock import Mock, MagicMock
from link.tests import *
class TestDBConnectionWrapper(unittest.TestCase):
"""
Test that we are wrapping DB connections correctly
"""
def setUp(self):
self.cw = DBConnectionWrapper()
#create a fake connection that is a mock as well
self.cw._wrapped = Mock()
self.cw.execute = MagicMock()
self.cw.execute.return_value = MagicMock()
self.cw.execute.return_value.fetchall = MagicMock()
self.cw.description = MagicMock()
def test_select_dataframe(self):
return_val = [(1,2), (3,4)]
headers = [['Col1'], ['col2']]
self.cw.execute.return_value.fetchall.return_value = return_val
self.cw.execute.return_value.description = headers
query = 'my select statement'
results = self.cw.select_dataframe(query)
#check to see that the headers match but all lower case
self.assertEquals([d for d in results.columns],
[c[0].lower() for c in headers])
def test_select(self):
query = 'my select statement'
results = self.cw.select(query)
expected = ((1,2), (3,4))
results._data = expected
self.assertEquals(query, results.query)
self.assertEquals(results.data, expected)
def check_chunk(self):
self.cw.chunks = {'this_chunk':True}
self.assertTrue(self.cw.chunk('this_chunk'))
class TestSqliteConnection(unittest.TestCase):
db_path = tst_db_path('test_db')
db_path_with_default = tst_db_path('test_db.db')
def setUp(self):
self.cw = SqliteDBConnectionWrapper(path = self.db_path, chunked = True)
self.cw_db = SqliteDBConnectionWrapper(path = self.db_path_with_default, chunked = True)
self.cw_no_chunk = SqliteDBConnectionWrapper(path = self.db_path_with_default)
def test_db_created(self):
# don't change this table please
data = self.cw_db.select('select * from test_table where column = 1').data
self.assertEquals(data[0][0], 1)
self.assertTrue(self.cw._wrapped == None)
def test_db_chunk_created(self):
# don't change this table please
data = self.cw_db.select('select * from test_table_chunk where column = 1',
chunk_name = 'my_chunk.db').data
self.assertEquals(data[0][0], 1)
data = self.cw_db.select('select * from test_table_chunk where column = 1',
chunk_name = 'my_chunk.db').data
self.assertEquals(data[0][0], 1)
def test_db_created(self):
# don't change this table please
data = self.cw_no_chunk.select('select * from test_table where column = 1').data
self.assertEquals(data[0][0], 1)
self.assertRaises(Exception, self.cw_no_chunk.select,
'select * from test_table_chunk where column = 1',chunk_name = 'my_chunk.db')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
#class TestSqliteConnection(unittest.TestCase):
#TEST_DB = 'test_sqlite.db'
#def setUp(self):
## if it already exists then just remove it
#if os.path.exists(TEST_DB):
#os.remove(TEST_DB)
##create a new connection to the db
#self.connection = SqlLiteDBConnectionWrapper(path=TEST_DB)
#def test_execute(self):
#self.connection.execute("""create table test_table
#(col1 int, col2 int)""")
#self.assertTrue(True)
#def test_false(self):
#self.assertTrue(True)
| [
"link.wrappers.SqliteDBConnectionWrapper",
"mock.Mock",
"nose.runmodule",
"link.wrappers.DBConnectionWrapper",
"mock.MagicMock"
] | [((3098, 3185), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (3112, 3185), False, 'import nose\n'), ((318, 339), 'link.wrappers.DBConnectionWrapper', 'DBConnectionWrapper', ([], {}), '()\n', (337, 339), False, 'from link.wrappers import DBConnectionWrapper, SqliteDBConnectionWrapper\n'), ((424, 430), 'mock.Mock', 'Mock', ([], {}), '()\n', (428, 430), False, 'from mock import Mock, MagicMock\n'), ((457, 468), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (466, 468), False, 'from mock import Mock, MagicMock\n'), ((508, 519), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (517, 519), False, 'from mock import Mock, MagicMock\n'), ((568, 579), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (577, 579), False, 'from mock import Mock, MagicMock\n'), ((610, 621), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (619, 621), False, 'from mock import Mock, MagicMock\n'), ((1720, 1778), 'link.wrappers.SqliteDBConnectionWrapper', 'SqliteDBConnectionWrapper', ([], {'path': 'self.db_path', 'chunked': '(True)'}), '(path=self.db_path, chunked=True)\n', (1745, 1778), False, 'from link.wrappers import DBConnectionWrapper, SqliteDBConnectionWrapper\n'), ((1804, 1875), 'link.wrappers.SqliteDBConnectionWrapper', 'SqliteDBConnectionWrapper', ([], {'path': 'self.db_path_with_default', 'chunked': '(True)'}), '(path=self.db_path_with_default, chunked=True)\n', (1829, 1875), False, 'from link.wrappers import DBConnectionWrapper, SqliteDBConnectionWrapper\n'), ((1907, 1964), 'link.wrappers.SqliteDBConnectionWrapper', 'SqliteDBConnectionWrapper', ([], {'path': 'self.db_path_with_default'}), '(path=self.db_path_with_default)\n', (1932, 1964), False, 'from link.wrappers import DBConnectionWrapper, SqliteDBConnectionWrapper\n')] |
import bookworm.execute_command as execute_command
import bookworm.detect_user as detect_user
import bookworm_main.arg_processor as arg_processor
import sys
def warning(*objs):
"""
The function ``warning`` is a helper method for printing warnings.
"""
print('WARNING: ', *objs, file=sys.stderr)
def main(argv=sys.argv):
"""
The main pdf operations are:
1. Unpack a PDF.
2. Change image resolution.
3. Rescale image.
4. Expand image with fill.
"""
parser = arg_processor.arg_processor()
if detect_user.is_admin():
warning('You are currently running bookworm as superuser. '
'You really should not run this program with elevated privileges.')
if len(argv) < 2:
parser.print_help()
# An exit code of 2 is standard unix convention.
sys.exit(2)
args = parser.parse_args(argv[1:])
command = argv[1]
try:
command_dict = dict(command=command, args=vars(args))
action = execute_command.process_command(command_dict)
execute_command.run_command([action])
except Exception as e:
print(e)
sys.exit(1)
except ValueError as e:
print(e)
sys.exit(1)
| [
"bookworm_main.arg_processor.arg_processor",
"bookworm.execute_command.run_command",
"bookworm.detect_user.is_admin",
"sys.exit",
"bookworm.execute_command.process_command"
] | [((519, 548), 'bookworm_main.arg_processor.arg_processor', 'arg_processor.arg_processor', ([], {}), '()\n', (546, 548), True, 'import bookworm_main.arg_processor as arg_processor\n'), ((557, 579), 'bookworm.detect_user.is_admin', 'detect_user.is_admin', ([], {}), '()\n', (577, 579), True, 'import bookworm.detect_user as detect_user\n'), ((849, 860), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (857, 860), False, 'import sys\n'), ((1012, 1057), 'bookworm.execute_command.process_command', 'execute_command.process_command', (['command_dict'], {}), '(command_dict)\n', (1043, 1057), True, 'import bookworm.execute_command as execute_command\n'), ((1066, 1103), 'bookworm.execute_command.run_command', 'execute_command.run_command', (['[action]'], {}), '([action])\n', (1093, 1103), True, 'import bookworm.execute_command as execute_command\n'), ((1156, 1167), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1164, 1167), False, 'import sys\n'), ((1221, 1232), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1229, 1232), False, 'import sys\n')] |
import argparse
import glob
import logging
import os
import json
import random
import pickle as pkl
import numpy as np
import torch
from tqdm import tqdm, trange
from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering
from .config import *
from .logging_handler import logger
class AlbertQA:
def __init__(self, path, device='cpu'):
if not os.path.exists(path):
raise NotADirectoryError(
f"{os.path.abspath(path)} must be a directory containing the model files: config, tokenizer, weights.")
files = os.listdir(path)
if CONFIG_JSON_FILE not in files:
raise FileNotFoundError(f"{CONFIG_JSON_FILE} must be in {path}.")
if WEIGHTS_FILE not in files:
raise FileNotFoundError(f"{WEIGHTS_FILE} must be in {path}.")
with open(os.path.join(path, CONFIG_JSON_FILE), "r") as f:
config = json.load(f)
self.tokenizer = AutoTokenizer.from_pretrained(path)
weights = torch.load(os.path.join(path, WEIGHTS_FILE),
map_location=lambda storage, loc: storage)
# Load pretrained model/tokenizer
config = AlbertConfig.from_dict(config)
self.model = AlbertForQuestionAnswering(config)
self.model.load_state_dict(weights)
self.model = self.model.eval()
self.args = albert_args_squad
if device == "cuda":
logger.debug("Setting model with CUDA")
self.args['device'] = 'cuda'
self.model.to('cuda')
def answer(self, question, context, **kwargs):
for key in kwargs:
if key in self.args:
self.args[key] = kwargs[key]
inputs = self.tokenizer.encode_plus(question, context, **self.args)
for key in inputs.keys():
inputs[key] = inputs[key].to(self.args['device'])
input_ids = inputs["input_ids"].tolist()[0]
answer_start_scores, answer_end_scores = self.model(**inputs)
answer_start = torch.argmax(answer_start_scores) # Get the most likely beginning of answer
answer_end = torch.argmax(answer_end_scores) + 1 # Get the most likely end of answer
answer = self.tokenizer.convert_tokens_to_string(
self.tokenizer.convert_ids_to_tokens(
input_ids[answer_start:answer_end]
)
)
answer = answer.replace("[CLS]", "").replace("[SEP]", " ").replace("<s>", "").replace("</s>", "")
return answer
class AlbertNER:
def __init__(self, path ,device='cpu'):
""" Method for pretrained model loading. """
if not os.path.exists(path):
raise NotADirectoryError(
f"{os.path.abspath(path)} must be a directory containing the model files: config, tokenizer, weights.")
files = os.listdir(path)
if CONFIG_JSON_FILE not in files:
raise FileNotFoundError(f"{CONFIG_JSON_FILE} must be in {path}.")
if WEIGHTS_FILE not in files:
raise FileNotFoundError(f"{WEIGHTS_FILE} must be in {path}.")
with open(os.path.join(path, CONFIG_JSON_FILE), "r") as f:
config = json.load(f)
self.tokenizer = AutoTokenizer.from_pretrained(path)
weights = torch.load(os.path.join(path, WEIGHTS_FILE),
map_location=lambda storage, loc: storage)
# Load pretrained model/tokenizer
config = AlbertConfig.from_dict(config)
self.model = AlbertForTokenClassification(config)
self.model.load_state_dict(weights)
self.model = self.model.eval()
self.args = albert_args_ner
if device == "cuda":
logger.debug("Setting model with CUDA")
self.args['device'] = 'cuda'
self.model.to('cuda')
def extract(self, text, **kwargs):
for key in kwargs:
if key in self.args:
self.args[key] = kwargs[key]
tokens = self.tokenizer.tokenize(self.tokenizer.decode(self.tokenizer.encode(text)))
inputs = self.tokenizer.encode(text, return_tensors="pt")
outputs = self.model(inputs, **kwargs)[0]
predictions = torch.argmax(outputs, dim=2)
return [(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].tolist())]
| [
"os.path.exists",
"os.listdir",
"transformers.AlbertConfig.from_dict",
"transformers.AlbertForQuestionAnswering",
"os.path.join",
"transformers.AutoTokenizer.from_pretrained",
"json.load",
"os.path.abspath",
"transformers.AlbertForTokenClassification",
"torch.argmax"
] | [((608, 624), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (618, 624), False, 'import os\n'), ((984, 1019), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['path'], {}), '(path)\n', (1013, 1019), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((1219, 1249), 'transformers.AlbertConfig.from_dict', 'AlbertConfig.from_dict', (['config'], {}), '(config)\n', (1241, 1249), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((1271, 1305), 'transformers.AlbertForQuestionAnswering', 'AlbertForQuestionAnswering', (['config'], {}), '(config)\n', (1297, 1305), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((2059, 2092), 'torch.argmax', 'torch.argmax', (['answer_start_scores'], {}), '(answer_start_scores)\n', (2071, 2092), False, 'import torch\n'), ((2870, 2886), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2880, 2886), False, 'import os\n'), ((3246, 3281), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['path'], {}), '(path)\n', (3275, 3281), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((3481, 3511), 'transformers.AlbertConfig.from_dict', 'AlbertConfig.from_dict', (['config'], {}), '(config)\n', (3503, 3511), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((3533, 3569), 'transformers.AlbertForTokenClassification', 'AlbertForTokenClassification', (['config'], {}), '(config)\n', (3561, 3569), False, 'from transformers import AlbertConfig, AlbertForTokenClassification, AutoTokenizer, AlbertForQuestionAnswering\n'), ((4221, 4249), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(2)'}), '(outputs, dim=2)\n', (4233, 4249), False, 'import torch\n'), ((411, 431), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (425, 431), False, 'import os\n'), ((946, 958), 'json.load', 'json.load', (['f'], {}), '(f)\n', (955, 958), False, 'import json\n'), ((1049, 1081), 'os.path.join', 'os.path.join', (['path', 'WEIGHTS_FILE'], {}), '(path, WEIGHTS_FILE)\n', (1061, 1081), False, 'import os\n'), ((2157, 2188), 'torch.argmax', 'torch.argmax', (['answer_end_scores'], {}), '(answer_end_scores)\n', (2169, 2188), False, 'import torch\n'), ((2673, 2693), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2687, 2693), False, 'import os\n'), ((3208, 3220), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3217, 3220), False, 'import json\n'), ((3311, 3343), 'os.path.join', 'os.path.join', (['path', 'WEIGHTS_FILE'], {}), '(path, WEIGHTS_FILE)\n', (3323, 3343), False, 'import os\n'), ((876, 912), 'os.path.join', 'os.path.join', (['path', 'CONFIG_JSON_FILE'], {}), '(path, CONFIG_JSON_FILE)\n', (888, 912), False, 'import os\n'), ((3138, 3174), 'os.path.join', 'os.path.join', (['path', 'CONFIG_JSON_FILE'], {}), '(path, CONFIG_JSON_FILE)\n', (3150, 3174), False, 'import os\n'), ((490, 511), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (505, 511), False, 'import os\n'), ((2752, 2773), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (2767, 2773), False, 'import os\n')] |
# Generated by Django 2.1.5 on 2020-02-24 22:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0014_auto_20200219_2322'),
]
operations = [
migrations.AddField(
model_name='event',
name='registration_type',
field=models.PositiveSmallIntegerField(choices=[(1, 'Register to attend event'), (2, 'Apply to attend event'), (3, 'Visit event website')], default=1),
),
]
| [
"django.db.models.PositiveSmallIntegerField"
] | [((343, 491), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'Register to attend event'), (2, 'Apply to attend event'), (3,\n 'Visit event website')]", 'default': '(1)'}), "(choices=[(1, 'Register to attend event'),\n (2, 'Apply to attend event'), (3, 'Visit event website')], default=1)\n", (375, 491), False, 'from django.db import migrations, models\n')] |
"""
make sure server.py is running before passing a message as commandline \
argument after client.py.
for example, a user will pass a message in terminal as below:
python client.py this message will be echoed back
Once you enter the message, it will be printed on server terminal as well as \
client terminal.
And subsequently the socket on client terminal will be closed.
"""
import socket
import time
import sys
def client():
socket.getaddrinfo('127.0.0.1', 3000)
infos = socket.getaddrinfo('127.0.0.1', 3000)
len(infos)
stream_info = [i for i in infos if i[1] == socket.SOCK_STREAM][0]
socket.getaddrinfo('127.0.0.1', 3000)
stream_info
client = socket.socket(*stream_info[:3])
client
client.connect(stream_info[-1])
buffer_size = 8
now = time.strftime('%H:%M:%S %d/%m/%Y')
print('Starting server on port 3000 at {}'.format(now))
try:
message = sys.argv[1:]
message = ' '.join(message)
message = message.encode('utf8')
print('sending {}'.format(message))
client.sendall(message + b'\r')
amount_received = 0
amount_expected = len(message)
echo_message = b''
while amount_received < amount_expected:
data = client.recv(buffer_size)
amount_received += len(data)
echo_message += data
now = time.strftime('%H:%M:%S %d/%m/%Y')
print('[{}] Echoed: {}'.format(now, echo_message.decode('utf8')))
finally:
print('closing socket')
client.close()
if __name__ == "__main__":
client()
| [
"time.strftime",
"socket.getaddrinfo",
"socket.socket"
] | [((442, 479), 'socket.getaddrinfo', 'socket.getaddrinfo', (['"""127.0.0.1"""', '(3000)'], {}), "('127.0.0.1', 3000)\n", (460, 479), False, 'import socket\n'), ((492, 529), 'socket.getaddrinfo', 'socket.getaddrinfo', (['"""127.0.0.1"""', '(3000)'], {}), "('127.0.0.1', 3000)\n", (510, 529), False, 'import socket\n'), ((619, 656), 'socket.getaddrinfo', 'socket.getaddrinfo', (['"""127.0.0.1"""', '(3000)'], {}), "('127.0.0.1', 3000)\n", (637, 656), False, 'import socket\n'), ((686, 717), 'socket.socket', 'socket.socket', (['*stream_info[:3]'], {}), '(*stream_info[:3])\n', (699, 717), False, 'import socket\n'), ((795, 829), 'time.strftime', 'time.strftime', (['"""%H:%M:%S %d/%m/%Y"""'], {}), "('%H:%M:%S %d/%m/%Y')\n", (808, 829), False, 'import time\n'), ((1368, 1402), 'time.strftime', 'time.strftime', (['"""%H:%M:%S %d/%m/%Y"""'], {}), "('%H:%M:%S %d/%m/%Y')\n", (1381, 1402), False, 'import time\n')] |
import abc
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
import mujoco_py
from scipy.linalg import expm
import robosuite.utils.macros as macros
import robosuite.utils.angle_transformation as at
from robosuite.utils.control_utils import *
import robosuite.utils.transform_utils as T
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.interpolate import interp1d
class Controller(object, metaclass=abc.ABCMeta):
"""
General controller interface.
Requires reference to mujoco sim object, eef_name of specific robot, relevant joint_indexes to that robot, and
whether an initial_joint is used for nullspace torques or not
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
:`'joints'`: list of indexes to relevant robot joints
:`'qpos'`: list of indexes to relevant robot joint positions
:`'qvel'`: list of indexes to relevant robot joint velocities
actuator_range (2-tuple of array of float): 2-Tuple (low, high) representing the robot joint actuator range
"""
def __init__(self,
sim,
eef_name,
joint_indexes,
actuator_range,
plotting,
collect_data,
simulation_total_time,
):
# Actuator range
self.actuator_min = actuator_range[0]
self.actuator_max = actuator_range[1]
# Attributes for scaling / clipping inputs to outputs
self.action_scale = None
self.action_input_transform = None
self.action_output_transform = None
# Private property attributes
self.control_dim = None
self.output_min = None
self.output_max = None
self.input_min = None
self.input_max = None
# mujoco simulator state
self.sim = sim
self.model_timestep = macros.SIMULATION_TIMESTEP
self.eef_name = eef_name
self.joint_index = joint_indexes["joints"]
self.qpos_index = joint_indexes["qpos"]
self.qvel_index = joint_indexes["qvel"]
# robot states
self.ee_pos = None
self.ee_ori_mat = None
self.ee_pos_vel = None
self.ee_ori_vel = None
self.joint_pos = None
self.joint_vel = None
# dynamics and kinematics
self.J_pos = None
self.J_ori = None
self.J_full = None
self.mass_matrix = None
self.interaction_forces = None
self.interaction_forces_vec = []
self.PD_force_command = []
self.desired_frame_FT_vec = []
self.desired_frame_imp_position_vec = []
self.desired_frame_imp_ori_vec = []
self.desired_frame_imp_vel_vec = []
self.desired_frame_imp_ang_vel_vec = []
# Joint dimension
self.joint_dim = len(joint_indexes["joints"])
# Torques being outputted by the controller
self.torques = None
# Update flag to prevent redundant update calls
self.new_update = True
# Move forward one timestep to propagate updates before taking first update
self.sim.forward()
# Initialize controller by updating internal state and setting the initial joint, pos, and ori
self.update()
self.initial_joint = self.joint_pos
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
# minimum jerk specification - EC
self.initial_position = self.initial_ee_pos
self.final_position = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id("hole_middle_cylinder")])
self.final_position = [self.initial_position[0], self.initial_position[1]-0.2, self.initial_position[2]]
self.initial_orientation = self.initial_ee_ori_mat
# self.final_orientation = np.array([[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]]) # peg horizantal
self.final_orientation = np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]) # peg vertical (pointing down)
self.euler_initial_orientation = R.from_matrix(self.initial_orientation).as_euler('xyz', degrees=False)
self.euler_final_orientation = R.from_matrix(self.final_orientation).as_euler('xyz', degrees=False)
indexes_for_correction = np.abs(self.euler_final_orientation - self.euler_initial_orientation) > np.pi
correction = np.sign(self.euler_final_orientation) * (2 * np.pi) * indexes_for_correction
self.euler_final_orientation = self.euler_final_orientation - correction
self.simulation_total_time = simulation_total_time # from main
# EC - Run further definition and class variables
self._specify_constants()
# EC - this is the vector for the impedance equations
self.X_m = np.zeros((12, 1))
self.is_contact = False # becomes true when the peg hits the table
self.contact_time = 0.0
self.first_contact = True
self.Delta_T = 0.002
self.f_0 = np.array([0, 0, 0, 0, 0, 0])
self.K = 5000
self.M = 5
Wn = np.sqrt(self.K / self.M)
zeta = 0.707
# zeta = 1
self.C = 2 * self.M * zeta * Wn
# C = 0
self.K_imp = self.K * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
self.C_imp = self.C * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
self.M_imp = self.M * np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# define if you want to plot some data
self.collect_data = collect_data
self.plotting = plotting
def impedance_computations(self):
# EC - compute next impedance Xm(n+1) and Vm(n+1) in world base frame.
# state space formulation
# X=[xm;thm;xm_d;thm_d] U=[F_int;M_int]
M_inv = np.linalg.pinv(self.M_imp)
A_1 = np.concatenate((np.zeros([6, 6], dtype=int), np.identity(6)), axis=1)
A_2 = np.concatenate((np.dot(-M_inv, self.K_imp), np.dot(-M_inv, self.C_imp)), axis=1)
A = np.concatenate((A_1, A_2), axis=0)
B_1 = np.zeros([6, 6], dtype=int)
B_2 = M_inv
B = np.concatenate((B_1, B_2), axis=0)
# discrete state space A, B matrices interaction_forces
A_d = expm(A * self.Delta_T)
B_d = np.dot(np.dot(np.linalg.pinv(A), (A_d - np.identity(A_d.shape[0]))), B)
# convert the forces and torques to the desired frame
Rotation_world_to_desired = R.from_euler("xyz", self.min_jerk_orientation, degrees=False).as_matrix()
Rotation_desired_to_world = Rotation_world_to_desired.T
F_d = Rotation_desired_to_world @ self.interaction_forces[:3]
M_d = Rotation_desired_to_world @ self.interaction_forces[3:6]
f_0 = np.concatenate((Rotation_desired_to_world @ self.f_0[:3],
Rotation_desired_to_world @ self.f_0[3:6]), axis=0)
U = (f_0 + np.concatenate((F_d, M_d), axis=0)).reshape(6, 1)
# only for graphs!
if self.collect_data:
self.desired_frame_FT_vec.append(np.array(U))
self.desired_frame_imp_position_vec.append(np.array((self.X_m[:3]).reshape(3, )))
self.desired_frame_imp_ori_vec.append(np.array((self.X_m[3:6]).reshape(3, )))
self.desired_frame_imp_vel_vec.append(np.array((self.X_m[6:9]).reshape(3, )))
self.desired_frame_imp_ang_vel_vec.append(np.array((self.X_m[9:12]).reshape(3, )))
# discrete state solution X(k+1)=Ad*X(k)+Bd*U(k)
X_m_next = np.dot(A_d, self.X_m.reshape(12, 1)) + np.dot(B_d, U)
self.X_m = deepcopy(X_m_next)
return
def _min_jerk(self):
"""
EC
Compute the value of position velocity and acceleration in a minimum jerk trajectory
"""
t = self.time
x_traj = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.X_init
y_traj = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.Y_init
z_traj = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + self.Z_init
self.min_jerk_position = np.array([x_traj, y_traj, z_traj])
# velocities
vx = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vy = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vz = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
self.min_jerk_velocity = np.array([vx, vy, vz])
# acceleration
ax = (self.X_final - self.X_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
ay = (self.Y_final - self.Y_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
az = (self.Z_final - self.Z_init) / (self.tfinal ** 3) * (
120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
self.min_jerk_acceleration = np.array([ax, ay, az])
# euler xyz representation
alfa = (self.euler_final_orientation[0] - self.euler_initial_orientation[0]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[0]
beta = (self.euler_final_orientation[1] - self.euler_initial_orientation[1]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[1]
gamma = (self.euler_final_orientation[2] - self.euler_initial_orientation[2]) / (self.tfinal ** 3) * (
6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + \
self.euler_initial_orientation[2]
alfa_dot = (self.euler_final_orientation[0] - self.euler_initial_orientation[0]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
beta_dot = (self.euler_final_orientation[1] - self.euler_initial_orientation[1]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
gamma_dot = (self.euler_final_orientation[2] - self.euler_initial_orientation[2]) / (self.tfinal ** 3) * (
30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
self.min_jerk_orientation = np.array([alfa, beta, gamma])
self.min_jerk_orientation_dot = np.array([alfa_dot, beta_dot, gamma_dot])
R_world_to_body = R.from_euler('xyz', self.min_jerk_orientation, degrees=False).as_matrix()
# w = T*V -- the angular velocity
self.min_jerk_ang_vel = R_world_to_body @ (T.T_mat(self.min_jerk_orientation) @
self.min_jerk_orientation_dot.T)
return
def _specify_constants(self):
"""
EC
Assign constants in class variables
"""
self.X_init = self.initial_position[0]
self.Y_init = self.initial_position[1]
self.Z_init = self.initial_position[2]
self.X_final = self.final_position[0]
self.Y_final = self.final_position[1]
self.Z_final = self.final_position[2]
self.min_jerk_position = None
self.min_jerk_velocity = None
self.min_jerk_acceleration = None
self.min_jerk_orientation = None
self.min_jerk_orientation_dot = None
self.min_jerk_ang_vel = None
self.min_jerk_ang_acc = None
self.min_jerk_position_vec = []
self.min_jerk_velocity_vec = []
self.min_jerk_acceleration_vec = []
self.min_jerk_orientation_vec = []
self.min_jerk_orientation_dot_vec = []
self.min_jerk_angle_velocity_vec = []
self.tfinal = 2 # this is for the minimum jerk
self.time = 0.0
self.time_vec = []
self.real_position = None
self.real_velocity = None
self.real_orientation = None
self.real_angle_velocity = None
self.real_position_vec = []
self.real_velocity_vec = []
self.real_orientation_vec = []
self.real_angle_velocity_vec = []
self.impedance_orientation = []
self.impedance_position_vec = []
self.impedance_velocity_vec = []
self.impedance_acceleration_vec = []
self.impedance_orientation_vec = []
self.impedance_angle_velocity_vec = []
@abc.abstractmethod
def run_controller(self):
"""
Abstract method that should be implemented in all subclass controllers, and should convert a given action
into torques (pre gravity compensation) to be executed on the robot.
Additionally, resets the self.new_update flag so that the next self.update call will occur
"""
self.new_update = True
def scale_action(self, action):
"""
Clips @action to be within self.input_min and self.input_max, and then re-scale the values to be within
the range self.output_min and self.output_max
Args:
action (Iterable): Actions to scale
Returns:
np.array: Re-scaled action
"""
if self.action_scale is None:
self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min)
self.action_output_transform = (self.output_max + self.output_min) / 2.0
self.action_input_transform = (self.input_max + self.input_min) / 2.0
action = np.clip(action, self.input_min, self.input_max)
transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform
return transformed_action
def update(self, force=False):
"""
Updates the state of the robot arm, including end effector pose / orientation / velocity, joint pos/vel,
jacobian, and mass matrix. By default, since this is a non-negligible computation, multiple redundant calls
will be ignored via the self.new_update attribute flag. However, if the @force flag is set, the update will
occur regardless of that state of self.new_update. This base class method of @run_controller resets the
self.new_update flag
Args:
force (bool): Whether to force an update to occur or not
"""
# Only run update if self.new_update or force flag is set
# if self.new_update or force:
self.sim.forward()
self.time = self.sim.data.time
self.peg_edge = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id("peg_site")])
self.ee_pos = np.array(self.sim.data.site_xpos[self.sim.model.site_name2id(self.eef_name)])
self.ee_ori_mat = np.array(self.sim.data.site_xmat[self.sim.model.site_name2id(self.eef_name)].reshape([3, 3]))
self.ee_pos_vel = np.array(self.sim.data.site_xvelp[self.sim.model.site_name2id(self.eef_name)])
self.ee_ori_vel = np.array(self.sim.data.site_xvelr[self.sim.model.site_name2id(self.eef_name)])
self.joint_pos = np.array(self.sim.data.qpos[self.qpos_index])
self.joint_vel = np.array(self.sim.data.qvel[self.qvel_index])
self.J_pos = np.array(self.sim.data.get_site_jacp(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_ori = np.array(self.sim.data.get_site_jacr(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_full = np.array(np.vstack([self.J_pos, self.J_ori]))
mass_matrix = np.ndarray(shape=(len(self.sim.data.qvel) ** 2,), dtype=np.float64, order='C')
mujoco_py.cymj._mj_fullM(self.sim.model, mass_matrix, self.sim.data.qM)
mass_matrix = np.reshape(mass_matrix, (len(self.sim.data.qvel), len(self.sim.data.qvel)))
self.mass_matrix = mass_matrix[self.qvel_index, :][:, self.qvel_index]
# EC - force readings
# the forces needs to be transform to the world base frame
# the minus sign is because the measured forces are the forces that the robot apply on the environment
forces_world = np.dot(self.ee_ori_mat, -self.sim.data.sensordata[:3])
torques_world = np.dot(self.ee_ori_mat, -self.sim.data.sensordata[3:6])
self.interaction_forces = np.concatenate((forces_world, torques_world), axis=0)
# Clear self.new_update
self.new_update = False
def update_base_pose(self, base_pos, base_ori):
"""
Optional function to implement in subclass controllers that will take in @base_pos and @base_ori and update
internal configuration to account for changes in the respective states. Useful for controllers e.g. IK, which
is based on pybullet and requires knowledge of simulator state deviations between pybullet and mujoco
Args:
base_pos (3-tuple): x,y,z position of robot base in mujoco world coordinates
base_ori (4-tuple): x,y,z,w orientation or robot base in mujoco world coordinates
"""
pass
def update_initial_joints(self, initial_joints):
"""
Updates the internal attribute self.initial_joints. This is useful for updating changes in controller-specific
behavior, such as with OSC where self.initial_joints is used for determine nullspace actions
This function can also be extended by subclassed controllers for additional controller-specific updates
Args:
initial_joints (Iterable): Array of joint position values to update the initial joints
"""
self.initial_joint = np.array(initial_joints)
self.update(force=True)
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
def clip_torques(self, torques):
"""
Clips the torques to be within the actuator limits
Args:
torques (Iterable): Torques to clip
Returns:
np.array: Clipped torques
"""
return np.clip(torques, self.actuator_min, self.actuator_max)
def reset_goal(self):
"""
Resets the goal -- usually by setting to the goal to all zeros, but in some cases may be different (e.g.: OSC)
"""
raise NotImplementedError
@staticmethod
def nums2array(nums, dim):
"""
Convert input @nums into numpy array of length @dim. If @nums is a single number, broadcasts it to the
corresponding dimension size @dim before converting into a numpy array
Args:
nums (numeric or Iterable): Either single value or array of numbers
dim (int): Size of array to broadcast input to env.sim.data.actuator_force
Returns:
np.array: Array filled with values specified in @nums
"""
# First run sanity check to make sure no strings are being inputted
if isinstance(nums, str):
raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
# Check if input is an Iterable, if so, we simply convert the input to np.array and return
# Else, input is a single value, so we map to a numpy array of correct size and return
return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
@property
def torque_compensation(self):
"""
Gravity compensation for this robot arm
Returns:
np.array: torques
"""
return self.sim.data.qfrc_bias[self.qvel_index]
@property
def actuator_limits(self):
"""
Torque limits for this controller
Returns:
2-tuple:
- (np.array) minimum actuator torques
- (np.array) maximum actuator torques
"""
return self.actuator_min, self.actuator_max
@property
def control_limits(self):
"""
Limits over this controller's action space, which defaults to input min/max
Returns:
2-tuple:
- (np.array) minimum action values
- (np.array) maximum action values
"""
return self.input_min, self.input_max
@property
def name(self):
"""
Name of this controller
Returns:
str: controller name
"""
raise NotImplementedError
# EC
def add_path_parameter(self):
self.time_vec.append(self.time)
self.min_jerk_position_vec.append(self.min_jerk_position)
self.min_jerk_velocity_vec.append(self.min_jerk_velocity)
self.min_jerk_acceleration_vec.append(self.min_jerk_acceleration)
# self.min_jerk_orientation_vec.append(self.min_jerk_orientation)
self.min_jerk_orientation_vec.append(R.from_euler("xyz", self.min_jerk_orientation, degrees=False).as_rotvec())
self.min_jerk_orientation_dot_vec.append(self.min_jerk_orientation_dot)
self.min_jerk_angle_velocity_vec.append(self.min_jerk_ang_vel)
self.real_position_vec.append(self.real_position)
self.real_velocity_vec.append(self.real_velocity)
self.real_orientation_vec.append(self.real_orientation)
self.real_angle_velocity_vec.append(self.real_angle_velocity)
self.interaction_forces_vec.append(np.array(self.interaction_forces))
def plotter(self):
time = np.array(self.time_vec)
min_jerk_position = np.array(self.min_jerk_position_vec)
min_jerk_velocity = np.array(self.min_jerk_velocity_vec)
min_jerk_acceleration = np.array(self.min_jerk_acceleration_vec)
min_jerk_orientation = np.array(self.min_jerk_orientation_vec)
min_jerk_angle_velocity = np.array(self.min_jerk_angle_velocity_vec)
impedance_position = np.array(self.impedance_position_vec)
impedance_velocity = np.array(self.impedance_velocity_vec)
impedance_orientation = np.array(self.impedance_orientation_vec)
impedance_angle_velocity = np.array(self.impedance_angle_velocity_vec)
real_position = np.array(self.real_position_vec)
real_velocity = np.array(self.real_velocity_vec)
real_orientation = np.array(self.real_orientation_vec)
real_angle_velocity = np.array(self.real_angle_velocity_vec)
interaction_forces = np.array(self.interaction_forces_vec)
PD_force_command = np.array(self.PD_force_command)
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, min_jerk_position[:, 0], 'g', label=" X reference")
ax1.plot(time, impedance_position[:, 0], 'b--', label=" X impedance")
ax1.plot(time, real_position[:, 0], 'r--', label=" X real")
ax1.legend()
ax1.set_title("X [m]")
ax2 = plt.subplot(312)
ax2.plot(time, min_jerk_position[:, 1], 'g', label=" Y reference")
ax2.plot(time, impedance_position[:, 1], 'b--', label=" Y impedance")
ax2.plot(time, real_position[:, 1], 'r--', label=" Y real")
ax2.legend()
ax2.set_title("Y [m]")
ax3 = plt.subplot(313)
ax3.plot(time, min_jerk_position[:, 2], 'g', label=" Z reference")
ax3.plot(time, impedance_position[:, 2], 'b--', label=" Z impedance")
ax3.plot(time, real_position[:, 2], 'r--', label=" Z real")
ax3.legend()
ax3.set_title("Z [m]")
plt.tight_layout()
# ----------------------------------------------------------------------
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, min_jerk_velocity[:, 0], 'g', label="$\dot X$ minimum jerk")
ax1.plot(time, impedance_velocity[:, 0], 'b--', label="$\dot X$ impedance")
ax1.plot(time, real_velocity[:, 0], 'r--', label=" $\dot X$ real")
ax1.legend()
ax1.set_title("$\dot X$ [m/s]")
ax2 = plt.subplot(312)
ax2.plot(time, min_jerk_velocity[:, 1], 'g', label=" $\dot Y$ minimum jerk")
ax2.plot(time, impedance_velocity[:, 1], 'b--', label="$\dot X$ impedance")
ax2.plot(time, real_velocity[:, 1], 'r--', label=" $\dot Y$ real")
ax2.legend()
ax2.set_title("$\dot Y$ [m/s]")
ax3 = plt.subplot(313)
ax3.plot(time, min_jerk_velocity[:, 2], 'g', label=" $\dot Z$ minimum jerk")
ax3.plot(time, impedance_velocity[:, 2], 'b--', label="$\dot X$ impedance")
ax3.plot(time, real_velocity[:, 2], 'r--', label=" $\dot Z$ real")
ax3.legend()
ax3.set_title("$\dot Z$ [m/s]")
plt.tight_layout()
# ----------------------------------------------------------------------
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, min_jerk_orientation[:, 0], 'g', label="orientation 1st element - minimum jerk")
ax1.plot(time, impedance_orientation[:, 0], 'b--', label="orientation 1st element - impedance")
ax1.plot(time, real_orientation[:, 0], 'r--', label="orientation 1st element - real")
ax1.legend()
ax1.set_title("orientation 1st element")
ax2 = plt.subplot(312)
ax2.plot(time, min_jerk_orientation[:, 1], 'g', label="orientation 2nd element - minimum jerk")
ax2.plot(time, impedance_orientation[:, 1], 'b--', label="orientation 2nd element - impedance")
ax2.plot(time, real_orientation[:, 1], 'r--', label="orientation 2nd element - real")
ax2.legend()
ax2.set_title("orientation 2nd element")
ax3 = plt.subplot(313)
ax3.plot(time, min_jerk_orientation[:, 2], 'g', label="orientation 3rd element - minimum jerk")
ax3.plot(time, impedance_orientation[:, 2], 'b--', label="orientation 3rd element - impedance")
ax3.plot(time, real_orientation[:, 2], 'r--', label="orientation 3rd element - real")
ax3.legend()
ax3.set_title("orientation 3rd element")
plt.tight_layout()
# -------------------------------------------------------------
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, min_jerk_angle_velocity[:, 0], 'g', label="Wx - minimum jerk")
ax1.plot(time, impedance_angle_velocity[:, 0], 'b--', label="orientation 1st element - impedance")
ax1.plot(time, real_angle_velocity[:, 0], 'r--', label="Wx - real")
ax1.legend()
ax1.set_title("Wx")
ax2 = plt.subplot(312)
ax2.plot(time, min_jerk_angle_velocity[:, 1], 'g', label="Wy - minimum jerk")
ax2.plot(time, impedance_angle_velocity[:, 1], 'b--', label="orientation 2nd element - impedance")
ax2.plot(time, real_angle_velocity[:, 1], 'r--', label="Wy - real")
ax2.legend()
ax2.set_title("Wy")
ax3 = plt.subplot(313)
ax3.plot(time, min_jerk_angle_velocity[:, 2], 'g', label="Wz - minimum jerk")
ax3.plot(time, impedance_angle_velocity[:, 2], 'b--', label="orientation 3rd element - impedance")
ax3.plot(time, real_angle_velocity[:, 2], 'r--', label="Wz - real")
ax3.legend()
ax3.set_title("Wz")
plt.tight_layout()
# -------------------------------------------------------------
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, interaction_forces[:, 0], 'r--', label="from sensor")
ax1.plot(time, PD_force_command[:, 0], 'g--', label="from PD")
ax1.legend()
ax1.set_title("Fx [N]")
ax2 = plt.subplot(312)
ax2.plot(time, interaction_forces[:, 1], 'r--', label="from sensor")
ax2.plot(time, PD_force_command[:, 1], 'g--', label="from PD")
ax2.legend()
ax2.set_title("Fy [N]")
ax3 = plt.subplot(313)
ax3.plot(time, interaction_forces[:, 1], 'r--', label="from sensor")
ax3.plot(time, PD_force_command[:, 1], 'g--', label="from PD")
ax3.legend()
ax3.set_title("Fz [N]")
plt.tight_layout()
# -------------------------------------------------------------
plt.figure()
ax1 = plt.subplot(311)
ax1.plot(time, interaction_forces[:, 3], 'r--', label="from sensor")
ax1.plot(time, PD_force_command[:, 3], 'g--', label="from PD")
ax1.legend()
ax1.set_title("Mx [Nm]")
ax2 = plt.subplot(312)
ax2.plot(time, interaction_forces[:, 4], 'r--', label="from sensor")
ax2.plot(time, PD_force_command[:, 4], 'g--', label="from PD")
ax2.legend()
ax2.set_title("My [Nm]")
ax3 = plt.subplot(313)
ax3.plot(time, interaction_forces[:, 5], 'r--', label="from sensor")
ax3.plot(time, PD_force_command[:, 5], 'g--', label="from PD")
ax3.legend()
ax3.set_title("Mz [Nm]")
plt.tight_layout()
# plt.show()
# contact_time = self.contact_time
# contact_index = np.where(np.array(time) == contact_time)
# contact_index = int(contact_index[0])
# time = time[contact_index:]
# t = np.array(time)
# K = self.K
# M = self.M
# C = self.C
# FT = np.array(self.desired_frame_FT_vec)
# F = FT[:, 1]
# # F = FT[:, 3]
# y0 = [0, 0]
# pos = np.array(self.desired_frame_imp_position_vec)
# ori = np.array(self.desired_frame_imp_ori_vec)
# vel = np.array(self.desired_frame_imp_vel_vec)
# ang_vel = np.array(self.desired_frame_imp_ang_vel_vec)
#
# sol = odeint(self.pend, y0, t, args=(F, time, K, C, M))
# plt.figure()
# plt.plot(t, sol[:, 0], 'b', label='pos from ODE')
# plt.plot(time, pos[:, 1], 'g',
# label='from simulation')
# # plt.plot(time, ori[:, 0], 'g',
# # label='from simulation')
# # plt.plot(time, min_jerk_position[contact_index:, 0] - impedance_position[contact_index:, 0], 'g',
# # label='from simulation')
# plt.legend(loc='best')
# plt.xlabel('t')
# plt.grid()
#
# plt.figure()
# plt.plot(t, sol[:, 1], 'b', label='vel from ODE')
# plt.plot(time, vel[:, 1],
# 'g',
# label='from simulation')
# # plt.plot(time, ang_vel[:, 0],
# # 'g',
# # label='from simulation')
# plt.legend(loc='best')
# plt.xlabel('t')
# plt.grid()
plt.show()
return
def pend(self, y, t, F, time, K, C, M):
x, x_dot = y
f_interp = interp1d(time, F, axis=0, fill_value="extrapolate")
f = f_interp(t)
dydt = [x_dot, -K / M * x - C / M * x_dot + f / M]
return dydt
# EC
def get_path_info(self):
"""
Returns:
"""
info = {
"time": self.time_vec,
"min_jerk_position_vec": self.min_jerk_position_vec,
"min_jerk_velocity_vec": self.min_jerk_velocity_vec,
"min_jerk_acceleration_vec": self.min_jerk_acceleration_vec,
"min_jerk_orientation_vec": self.min_jerk_orientation_vec,
"min_jerk_angle_velocity_vec": self.min_jerk_angle_velocity_vec,
"impedance_position_vec": self.impedance_position_vec,
"impedance_velocity_vec": self.impedance_velocity_vec,
"impedance_orientation_vec": self.impedance_orientation_vec,
"impedance_angle_velocity_vec": self.impedance_angle_velocity_vec,
"real_position_vec": self.real_position_vec,
"real_velocity_vec": self.real_velocity_vec,
"real_orientation_vec": self.real_orientation_vec,
"real_angle_velocity_vec": self.real_angle_velocity_vec,
"interaction_forces_vec": self.interaction_forces_vec,
"K_imp": self.K,
"M_imp": self.M,
"C_imp": self.C,
"contact_time": self.contact_time,
"min_jerk_orientation_dot": self.min_jerk_orientation_dot_vec,
}
return info
| [
"numpy.clip",
"numpy.sqrt",
"numpy.linalg.pinv",
"scipy.interpolate.interp1d",
"numpy.array",
"copy.deepcopy",
"robosuite.utils.transform_utils.T_mat",
"scipy.spatial.transform.Rotation.from_euler",
"scipy.linalg.expm",
"numpy.dot",
"numpy.vstack",
"numpy.concatenate",
"mujoco_py.cymj._mj_fullM",
"numpy.identity",
"numpy.abs",
"numpy.ones",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.sign",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] | [((4356, 4401), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n', (4364, 4401), True, 'import numpy as np\n'), ((5279, 5296), 'numpy.zeros', 'np.zeros', (['(12, 1)'], {}), '((12, 1))\n', (5287, 5296), True, 'import numpy as np\n'), ((5487, 5515), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (5495, 5515), True, 'import numpy as np\n'), ((5571, 5595), 'numpy.sqrt', 'np.sqrt', (['(self.K / self.M)'], {}), '(self.K / self.M)\n', (5578, 5595), True, 'import numpy as np\n'), ((7114, 7140), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.M_imp'], {}), '(self.M_imp)\n', (7128, 7140), True, 'import numpy as np\n'), ((7332, 7366), 'numpy.concatenate', 'np.concatenate', (['(A_1, A_2)'], {'axis': '(0)'}), '((A_1, A_2), axis=0)\n', (7346, 7366), True, 'import numpy as np\n'), ((7382, 7409), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {'dtype': 'int'}), '([6, 6], dtype=int)\n', (7390, 7409), True, 'import numpy as np\n'), ((7442, 7476), 'numpy.concatenate', 'np.concatenate', (['(B_1, B_2)'], {'axis': '(0)'}), '((B_1, B_2), axis=0)\n', (7456, 7476), True, 'import numpy as np\n'), ((7556, 7578), 'scipy.linalg.expm', 'expm', (['(A * self.Delta_T)'], {}), '(A * self.Delta_T)\n', (7560, 7578), False, 'from scipy.linalg import expm\n'), ((8057, 8171), 'numpy.concatenate', 'np.concatenate', (['(Rotation_desired_to_world @ self.f_0[:3], Rotation_desired_to_world @ self\n .f_0[3:6])'], {'axis': '(0)'}), '((Rotation_desired_to_world @ self.f_0[:3], \n Rotation_desired_to_world @ self.f_0[3:6]), axis=0)\n', (8071, 8171), True, 'import numpy as np\n'), ((8902, 8920), 'copy.deepcopy', 'deepcopy', (['X_m_next'], {}), '(X_m_next)\n', (8910, 8920), False, 'from copy import deepcopy\n'), ((9693, 9727), 'numpy.array', 'np.array', (['[x_traj, y_traj, z_traj]'], {}), '([x_traj, y_traj, z_traj])\n', (9701, 9727), True, 'import numpy as np\n'), ((10278, 10300), 'numpy.array', 'np.array', (['[vx, vy, vz]'], {}), '([vx, vy, vz])\n', (10286, 10300), True, 'import numpy as np\n'), ((10842, 10864), 'numpy.array', 'np.array', (['[ax, ay, az]'], {}), '([ax, ay, az])\n', (10850, 10864), True, 'import numpy as np\n'), ((12358, 12387), 'numpy.array', 'np.array', (['[alfa, beta, gamma]'], {}), '([alfa, beta, gamma])\n', (12366, 12387), True, 'import numpy as np\n'), ((12428, 12469), 'numpy.array', 'np.array', (['[alfa_dot, beta_dot, gamma_dot]'], {}), '([alfa_dot, beta_dot, gamma_dot])\n', (12436, 12469), True, 'import numpy as np\n'), ((15483, 15530), 'numpy.clip', 'np.clip', (['action', 'self.input_min', 'self.input_max'], {}), '(action, self.input_min, self.input_max)\n', (15490, 15530), True, 'import numpy as np\n'), ((17043, 17088), 'numpy.array', 'np.array', (['self.sim.data.qpos[self.qpos_index]'], {}), '(self.sim.data.qpos[self.qpos_index])\n', (17051, 17088), True, 'import numpy as np\n'), ((17114, 17159), 'numpy.array', 'np.array', (['self.sim.data.qvel[self.qvel_index]'], {}), '(self.sim.data.qvel[self.qvel_index])\n', (17122, 17159), True, 'import numpy as np\n'), ((17561, 17632), 'mujoco_py.cymj._mj_fullM', 'mujoco_py.cymj._mj_fullM', (['self.sim.model', 'mass_matrix', 'self.sim.data.qM'], {}), '(self.sim.model, mass_matrix, self.sim.data.qM)\n', (17585, 17632), False, 'import mujoco_py\n'), ((18042, 18096), 'numpy.dot', 'np.dot', (['self.ee_ori_mat', '(-self.sim.data.sensordata[:3])'], {}), '(self.ee_ori_mat, -self.sim.data.sensordata[:3])\n', (18048, 18096), True, 'import numpy as np\n'), ((18121, 18176), 'numpy.dot', 'np.dot', (['self.ee_ori_mat', '(-self.sim.data.sensordata[3:6])'], {}), '(self.ee_ori_mat, -self.sim.data.sensordata[3:6])\n', (18127, 18176), True, 'import numpy as np\n'), ((18211, 18264), 'numpy.concatenate', 'np.concatenate', (['(forces_world, torques_world)'], {'axis': '(0)'}), '((forces_world, torques_world), axis=0)\n', (18225, 18264), True, 'import numpy as np\n'), ((19516, 19540), 'numpy.array', 'np.array', (['initial_joints'], {}), '(initial_joints)\n', (19524, 19540), True, 'import numpy as np\n'), ((19920, 19974), 'numpy.clip', 'np.clip', (['torques', 'self.actuator_min', 'self.actuator_max'], {}), '(torques, self.actuator_min, self.actuator_max)\n', (19927, 19974), True, 'import numpy as np\n'), ((23263, 23286), 'numpy.array', 'np.array', (['self.time_vec'], {}), '(self.time_vec)\n', (23271, 23286), True, 'import numpy as np\n'), ((23315, 23351), 'numpy.array', 'np.array', (['self.min_jerk_position_vec'], {}), '(self.min_jerk_position_vec)\n', (23323, 23351), True, 'import numpy as np\n'), ((23380, 23416), 'numpy.array', 'np.array', (['self.min_jerk_velocity_vec'], {}), '(self.min_jerk_velocity_vec)\n', (23388, 23416), True, 'import numpy as np\n'), ((23449, 23489), 'numpy.array', 'np.array', (['self.min_jerk_acceleration_vec'], {}), '(self.min_jerk_acceleration_vec)\n', (23457, 23489), True, 'import numpy as np\n'), ((23521, 23560), 'numpy.array', 'np.array', (['self.min_jerk_orientation_vec'], {}), '(self.min_jerk_orientation_vec)\n', (23529, 23560), True, 'import numpy as np\n'), ((23595, 23637), 'numpy.array', 'np.array', (['self.min_jerk_angle_velocity_vec'], {}), '(self.min_jerk_angle_velocity_vec)\n', (23603, 23637), True, 'import numpy as np\n'), ((23668, 23705), 'numpy.array', 'np.array', (['self.impedance_position_vec'], {}), '(self.impedance_position_vec)\n', (23676, 23705), True, 'import numpy as np\n'), ((23735, 23772), 'numpy.array', 'np.array', (['self.impedance_velocity_vec'], {}), '(self.impedance_velocity_vec)\n', (23743, 23772), True, 'import numpy as np\n'), ((23805, 23845), 'numpy.array', 'np.array', (['self.impedance_orientation_vec'], {}), '(self.impedance_orientation_vec)\n', (23813, 23845), True, 'import numpy as np\n'), ((23881, 23924), 'numpy.array', 'np.array', (['self.impedance_angle_velocity_vec'], {}), '(self.impedance_angle_velocity_vec)\n', (23889, 23924), True, 'import numpy as np\n'), ((23950, 23982), 'numpy.array', 'np.array', (['self.real_position_vec'], {}), '(self.real_position_vec)\n', (23958, 23982), True, 'import numpy as np\n'), ((24007, 24039), 'numpy.array', 'np.array', (['self.real_velocity_vec'], {}), '(self.real_velocity_vec)\n', (24015, 24039), True, 'import numpy as np\n'), ((24067, 24102), 'numpy.array', 'np.array', (['self.real_orientation_vec'], {}), '(self.real_orientation_vec)\n', (24075, 24102), True, 'import numpy as np\n'), ((24133, 24171), 'numpy.array', 'np.array', (['self.real_angle_velocity_vec'], {}), '(self.real_angle_velocity_vec)\n', (24141, 24171), True, 'import numpy as np\n'), ((24201, 24238), 'numpy.array', 'np.array', (['self.interaction_forces_vec'], {}), '(self.interaction_forces_vec)\n', (24209, 24238), True, 'import numpy as np\n'), ((24266, 24297), 'numpy.array', 'np.array', (['self.PD_force_command'], {}), '(self.PD_force_command)\n', (24274, 24297), True, 'import numpy as np\n'), ((24307, 24319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24317, 24319), True, 'import matplotlib.pyplot as plt\n'), ((24335, 24351), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (24346, 24351), True, 'import matplotlib.pyplot as plt\n'), ((24640, 24656), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (24651, 24656), True, 'import matplotlib.pyplot as plt\n'), ((24945, 24961), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (24956, 24961), True, 'import matplotlib.pyplot as plt\n'), ((25244, 25262), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25260, 25262), True, 'import matplotlib.pyplot as plt\n'), ((25353, 25365), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25363, 25365), True, 'import matplotlib.pyplot as plt\n'), ((25381, 25397), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (25392, 25397), True, 'import matplotlib.pyplot as plt\n'), ((25717, 25733), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (25728, 25733), True, 'import matplotlib.pyplot as plt\n'), ((26054, 26070), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (26065, 26070), True, 'import matplotlib.pyplot as plt\n'), ((26385, 26403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26401, 26403), True, 'import matplotlib.pyplot as plt\n'), ((26493, 26505), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26503, 26505), True, 'import matplotlib.pyplot as plt\n'), ((26521, 26537), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (26532, 26537), True, 'import matplotlib.pyplot as plt\n'), ((26925, 26941), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (26936, 26941), True, 'import matplotlib.pyplot as plt\n'), ((27329, 27345), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (27340, 27345), True, 'import matplotlib.pyplot as plt\n'), ((27727, 27745), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27743, 27745), True, 'import matplotlib.pyplot as plt\n'), ((27826, 27838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27836, 27838), True, 'import matplotlib.pyplot as plt\n'), ((27854, 27870), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (27865, 27870), True, 'import matplotlib.pyplot as plt\n'), ((28205, 28221), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (28216, 28221), True, 'import matplotlib.pyplot as plt\n'), ((28555, 28571), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (28566, 28571), True, 'import matplotlib.pyplot as plt\n'), ((28899, 28917), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28915, 28917), True, 'import matplotlib.pyplot as plt\n'), ((28998, 29010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29008, 29010), True, 'import matplotlib.pyplot as plt\n'), ((29026, 29042), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (29037, 29042), True, 'import matplotlib.pyplot as plt\n'), ((29259, 29275), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (29270, 29275), True, 'import matplotlib.pyplot as plt\n'), ((29492, 29508), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (29503, 29508), True, 'import matplotlib.pyplot as plt\n'), ((29719, 29737), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29735, 29737), True, 'import matplotlib.pyplot as plt\n'), ((29818, 29830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29828, 29830), True, 'import matplotlib.pyplot as plt\n'), ((29846, 29862), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (29857, 29862), True, 'import matplotlib.pyplot as plt\n'), ((30080, 30096), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (30091, 30096), True, 'import matplotlib.pyplot as plt\n'), ((30314, 30330), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (30325, 30330), True, 'import matplotlib.pyplot as plt\n'), ((30542, 30560), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30558, 30560), True, 'import matplotlib.pyplot as plt\n'), ((32202, 32212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32210, 32212), True, 'import matplotlib.pyplot as plt\n'), ((32313, 32364), 'scipy.interpolate.interp1d', 'interp1d', (['time', 'F'], {'axis': '(0)', 'fill_value': '"""extrapolate"""'}), "(time, F, axis=0, fill_value='extrapolate')\n", (32321, 32364), False, 'from scipy.interpolate import interp1d\n'), ((4774, 4843), 'numpy.abs', 'np.abs', (['(self.euler_final_orientation - self.euler_initial_orientation)'], {}), '(self.euler_final_orientation - self.euler_initial_orientation)\n', (4780, 4843), True, 'import numpy as np\n'), ((5723, 5857), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (5731, 5857), True, 'import numpy as np\n'), ((6084, 6218), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (6092, 6218), True, 'import numpy as np\n'), ((6445, 6579), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (6453, 6579), True, 'import numpy as np\n'), ((8867, 8881), 'numpy.dot', 'np.dot', (['B_d', 'U'], {}), '(B_d, U)\n', (8873, 8881), True, 'import numpy as np\n'), ((17414, 17449), 'numpy.vstack', 'np.vstack', (['[self.J_pos, self.J_ori]'], {}), '([self.J_pos, self.J_ori])\n', (17423, 17449), True, 'import numpy as np\n'), ((21132, 21146), 'numpy.array', 'np.array', (['nums'], {}), '(nums)\n', (21140, 21146), True, 'import numpy as np\n'), ((23188, 23221), 'numpy.array', 'np.array', (['self.interaction_forces'], {}), '(self.interaction_forces)\n', (23196, 23221), True, 'import numpy as np\n'), ((4562, 4601), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['self.initial_orientation'], {}), '(self.initial_orientation)\n', (4575, 4601), True, 'from scipy.spatial.transform import Rotation as R\n'), ((4672, 4709), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['self.final_orientation'], {}), '(self.final_orientation)\n', (4685, 4709), True, 'from scipy.spatial.transform import Rotation as R\n'), ((4873, 4910), 'numpy.sign', 'np.sign', (['self.euler_final_orientation'], {}), '(self.euler_final_orientation)\n', (4880, 4910), True, 'import numpy as np\n'), ((7171, 7198), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {'dtype': 'int'}), '([6, 6], dtype=int)\n', (7179, 7198), True, 'import numpy as np\n'), ((7200, 7214), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (7211, 7214), True, 'import numpy as np\n'), ((7255, 7281), 'numpy.dot', 'np.dot', (['(-M_inv)', 'self.K_imp'], {}), '(-M_inv, self.K_imp)\n', (7261, 7281), True, 'import numpy as np\n'), ((7283, 7309), 'numpy.dot', 'np.dot', (['(-M_inv)', 'self.C_imp'], {}), '(-M_inv, self.C_imp)\n', (7289, 7309), True, 'import numpy as np\n'), ((7607, 7624), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (7621, 7624), True, 'import numpy as np\n'), ((7764, 7825), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""xyz"""', 'self.min_jerk_orientation'], {'degrees': '(False)'}), "('xyz', self.min_jerk_orientation, degrees=False)\n", (7776, 7825), True, 'from scipy.spatial.transform import Rotation as R\n'), ((8369, 8380), 'numpy.array', 'np.array', (['U'], {}), '(U)\n', (8377, 8380), True, 'import numpy as np\n'), ((12496, 12557), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""xyz"""', 'self.min_jerk_orientation'], {'degrees': '(False)'}), "('xyz', self.min_jerk_orientation, degrees=False)\n", (12508, 12557), True, 'from scipy.spatial.transform import Rotation as R\n'), ((12664, 12698), 'robosuite.utils.transform_utils.T_mat', 'T.T_mat', (['self.min_jerk_orientation'], {}), '(self.min_jerk_orientation)\n', (12671, 12698), True, 'import robosuite.utils.transform_utils as T\n'), ((21182, 21194), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (21189, 21194), True, 'import numpy as np\n'), ((7633, 7658), 'numpy.identity', 'np.identity', (['A_d.shape[0]'], {}), '(A_d.shape[0])\n', (7644, 7658), True, 'import numpy as np\n'), ((8216, 8250), 'numpy.concatenate', 'np.concatenate', (['(F_d, M_d)'], {'axis': '(0)'}), '((F_d, M_d), axis=0)\n', (8230, 8250), True, 'import numpy as np\n'), ((22668, 22729), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""xyz"""', 'self.min_jerk_orientation'], {'degrees': '(False)'}), "('xyz', self.min_jerk_orientation, degrees=False)\n", (22680, 22729), True, 'from scipy.spatial.transform import Rotation as R\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, <NAME> <<EMAIL>>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
test_setup
==========
Test the setup procedure of the extension, make sure that everything is
installed at the proper place after the extension setup finished.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import re
BUILTIN_TRACKER_NAME_PATTERN = re.compile('lookup_(.*)_issue')
import pytest
from sphinx.environment import SphinxStandaloneReader
from sphinxcontrib import issuetracker
from sphinxcontrib.issuetracker import resolvers
def pytest_funcarg__content(request):
"""
Dummy content for this test module, overrides the global ``content``
funcarg.
This test module doesn't need issue references, but just a loaded and
ready-to-build sphinx application. Thus the content doesn't matter, but
still a sphinx application needs some content to build.
"""
return 'dummy content'
def test_builtin_issue_trackers():
"""
Test that all builtin issue trackers are really declared in the
BUILTIN_ISSUE_TRACKERS dict.
"""
trackers = dict(resolvers.BUILTIN_ISSUE_TRACKERS)
for attr in dir(resolvers):
match = BUILTIN_TRACKER_NAME_PATTERN.match(attr)
if match:
tracker_name = match.group(1).replace('_', ' ')
assert tracker_name in trackers
trackers.pop(tracker_name)
assert not trackers
def test_unknown_tracker(app):
"""
Test that setting ``issuetracker`` to an unknown tracker fails.
"""
app.config.issuetracker = 'spamtracker'
with pytest.raises(KeyError):
issuetracker.connect_builtin_tracker(app)
def test_add_stylesheet(app):
"""
Test that the stylesheet is properly added.
"""
from sphinx.builders.html import StandaloneHTMLBuilder
assert '_static/issuetracker.css' in StandaloneHTMLBuilder.css_files
def test_transform_added(app):
"""
Test that the transformer is properly added.
"""
assert issuetracker.IssueReferences in SphinxStandaloneReader.transforms
@pytest.mark.confoverrides(issuetracker_plaintext_issues=False)
def test_transform_not_added(app):
"""
Test that the transformer is not added if transformations are disabled.
"""
transforms = SphinxStandaloneReader.transforms
assert issuetracker.IssueReferences not in transforms
| [
"pytest.mark.confoverrides",
"pytest.raises",
"sphinxcontrib.issuetracker.connect_builtin_tracker",
"re.compile"
] | [((1748, 1779), 're.compile', 're.compile', (['"""lookup_(.*)_issue"""'], {}), "('lookup_(.*)_issue')\n", (1758, 1779), False, 'import re\n'), ((3452, 3514), 'pytest.mark.confoverrides', 'pytest.mark.confoverrides', ([], {'issuetracker_plaintext_issues': '(False)'}), '(issuetracker_plaintext_issues=False)\n', (3477, 3514), False, 'import pytest\n'), ((2971, 2994), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2984, 2994), False, 'import pytest\n'), ((3004, 3045), 'sphinxcontrib.issuetracker.connect_builtin_tracker', 'issuetracker.connect_builtin_tracker', (['app'], {}), '(app)\n', (3040, 3045), False, 'from sphinxcontrib import issuetracker\n')] |
'''
Urls for shortener app urlshortener/urls.py
'''
from django.urls import path
from .views import home_view, redirect_url_view
appname = "shortener"
urlpatterns = [
# Home view
path('', home_view, name='home'),
path('<str:shortened_part>', redirect_url_view, name='redirect'),
] | [
"django.urls.path"
] | [((190, 222), 'django.urls.path', 'path', (['""""""', 'home_view'], {'name': '"""home"""'}), "('', home_view, name='home')\n", (194, 222), False, 'from django.urls import path\n'), ((228, 292), 'django.urls.path', 'path', (['"""<str:shortened_part>"""', 'redirect_url_view'], {'name': '"""redirect"""'}), "('<str:shortened_part>', redirect_url_view, name='redirect')\n", (232, 292), False, 'from django.urls import path\n')] |
import random
#Importing all the words for the game from words python file.
from words import word_list
#defining a function , which will return a word for our game.
def get_word():
word = random.choice(word_list) # "random.choice()" method returns a list with the randomly selected element from the specified sequence.
return word.upper() #upper() is a built-in method used for string handling. It will return all the words/strings in uppercase format.
#This "play" function is for the actual interactive game_play.
def play(word):
word_completion = "_" * len(word) #This will contain the same "_(dash)" as tehe random chosen word. It will initially contain only underscores.
guessed = False #Initialzed to false
#Creating two lists, where "guessed_letters[] will hold the letters user guessed & "guesses_words" will hold the words user guessed.
guessed_letters = []
guessed_words=[]
tries = 6 #Total tries given to the user.
print("Let's play Hangman!")
print(display_hangman(tries)) #Print the number of tries available for the user
print(word_completion) #Print the underscores for guessing the letter.
print('\n')
while not guessed and tries > 0: # Case -> When we haven't entered any letter(when we haven't guessed anything)
guess = input("Please guess a letter or word: ").upper() # Taking input from user for guessing or asking to input letters , which will be taken in UpperCase.
# If only one letter is guessed & isalpha() method returns True if all the characters are alphabet letters.
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters: #If our guess is right
print("You already guessed the letter", guess)
elif guess not in word: #If our guess is wrong or not included in the random word generated, then print this
print(guess, "is not in the word.")
tries -= 1 # Reducing our tries by minus 1 for incoreect guessing
guessed_letters.append(guess)
else:
print("Good Job,", guess, "is in the word!") # Case -> If guess is right.
guessed_letters.append(guess)
word_as_list =list(word_completion)
#Here guess occurs in a word, if word is correct [ CASE -> RIGHT ]
indices = [ i for i, letter in enumerate(word) if letter == guess]
for index in indices:
word_as_list[index] = guess #Traversing through the loop to compare each index alphabet with our guessed alphabets.
word_completion = "".join(word_as_list) #Then joining all the alphabets together
if "_" not in word_completion:
guessed = True
elif len(guess) == len(word) and guess.isalpha():
if guess in guessed_words:
print("You already guessed the word", guess)
elif guess !=word:
print(guess, "is not the word.") #For Incorrect Case
tries -=1
guessed_words.append(guess)
else:
guessed = True
word_completion = word
else:
print("Not a valid guess.")
print(display_hangman(tries)) #Always remember indexing is very important in Python.
print(word_completion)
print('\n')
if guessed:
print("Congrats, you guessed the word! You win!")
else:
print("Sorry, you ran out of tries. The word was" + word + ". Maybe next time!")
def display_hangman(tries):
stages = [ #final state: head, torso, both arms, and both legs
"""
--------
| |
| O
| \|/
| |
| / \
-
""",
#head, torso, and both arms
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
# head, torso, and both arms
"""
--------
| |
| O
| \\|/
| |
|
-
""",
# head, torso, and one arm
"""
--------
| |
| O
| \\|
| |
|
-
""",
# head and torso
"""
--------
| |
| O
| |
| |
|
-
""",
# head
"""
--------
| |
| O
|
|
|
-
""",
# initial empty state
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def main():
word = get_word()
play(word)
while input("Play Again? (Y/N) ").upper() == "Y":
word = get_word()
play(word)
if __name__ == "__main__":
main()
| [
"random.choice"
] | [((194, 218), 'random.choice', 'random.choice', (['word_list'], {}), '(word_list)\n', (207, 218), False, 'import random\n')] |
#!/usr/bin/env python3
import json
import sys
animation_filename = sys.argv[1]
num_boxes = int(sys.argv[2])
with open(animation_filename, 'r') as animation_file:
animation_data = animation_file.readlines()
json_data = {}
json_data['timeStep'] = float(animation_data[2])
json_data['name'] = "Jared's Animation"
# Setup objects
color = [1.0, 0.4, 0.3, 1.0]
names = []
json_data['objects'] = []
for i, primitive in enumerate(animation_data[4:4+num_boxes]):
primitive = primitive.split(',')
obj_mesh = 'cube'
obj_scale = [float(v) for v in primitive[4:7]]
obj_name = obj_mesh + str(i)
names.append(obj_name)
json_data['objects'].append({
'name': obj_name,
'mesh': obj_mesh,
'scale': obj_scale,
'material': { 'color': color },
})
# Setup frames
json_data['frames'] = []
for pos, rot in zip(animation_data[4+num_boxes+1::2], animation_data[4+num_boxes+2::2]):
frame = {}
for i, name in enumerate(names):
obj_t = [float(v) for v in pos.split(',')[i*3:i*3+3]]
obj_r = [float(v) for v in rot.split(',')[i*4:i*4+4]]
obj_r = [obj_r[1], obj_r[2], obj_r[3], obj_r[0]]
frame[name] = {
't': obj_t,
'r': obj_r
}
json_data['frames'].append(frame)
json_data['timeStep'] = json_data['duration'] / (len(json_data['frames']) - 1)
print(json.dumps(json_data, indent=2, separators=(',', ': ')))
| [
"json.dumps"
] | [((1374, 1429), 'json.dumps', 'json.dumps', (['json_data'], {'indent': '(2)', 'separators': "(',', ': ')"}), "(json_data, indent=2, separators=(',', ': '))\n", (1384, 1429), False, 'import json\n')] |
import warnings
import os
import numpy as np
import pytest
from pytopomat.irvsp_caller import IRVSPOutput
from pytopomat.analyzer import BandParity
test_dir = os.path.join(os.path.dirname(__file__), "../../test_files/")
class TestIrvsp(object):
@pytest.fixture
def bp(self):
"""Returns BandParity instance on test data."""
irvsp_out = IRVSPOutput(test_dir + "CrO2_outir.txt")
band_parity = BandParity(irvsp_out, spin_polarized=True, efermi=3.3923)
return band_parity
def test_compute_z2(self, bp):
z2 = bp.compute_z2(tol=2)
tz2 = np.array([0., 0., 0., 0.])
np.testing.assert_array_equal(z2, tz2)
def test_compute_z4(self, bp):
z4 = bp.compute_z4()
assert z4 == 3.0
if __name__ == "__main__":
pytest.main() | [
"pytopomat.irvsp_caller.IRVSPOutput",
"pytest.main",
"os.path.dirname",
"numpy.array",
"pytopomat.analyzer.BandParity",
"numpy.testing.assert_array_equal"
] | [((174, 199), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (189, 199), False, 'import os\n'), ((780, 793), 'pytest.main', 'pytest.main', ([], {}), '()\n', (791, 793), False, 'import pytest\n'), ((363, 403), 'pytopomat.irvsp_caller.IRVSPOutput', 'IRVSPOutput', (["(test_dir + 'CrO2_outir.txt')"], {}), "(test_dir + 'CrO2_outir.txt')\n", (374, 403), False, 'from pytopomat.irvsp_caller import IRVSPOutput\n'), ((426, 483), 'pytopomat.analyzer.BandParity', 'BandParity', (['irvsp_out'], {'spin_polarized': '(True)', 'efermi': '(3.3923)'}), '(irvsp_out, spin_polarized=True, efermi=3.3923)\n', (436, 483), False, 'from pytopomat.analyzer import BandParity\n'), ((590, 620), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (598, 620), True, 'import numpy as np\n'), ((623, 661), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['z2', 'tz2'], {}), '(z2, tz2)\n', (652, 661), True, 'import numpy as np\n')] |
from functools import reduce
from itertools import groupby
# ========= Mapping ==========
words = ['Deer', 'Bear', 'River', 'Car',
'Car', 'River', 'Deer', 'Car', 'Bear']
mapping = map((lambda x: (x, 1)), words)
print(mapping)
# output:
# [('Deer', 1), ('Bear', 1), ('River', 1), ('Car', 1),
# ('Car', 1), ('River', 1), ('Deer', 1), ('Car', 1), ('Bear', 1)]
# ========= Shuffling ==========
sorted_mapping = sorted(mapping)
print(sorted_mapping)
# output:
# [('Bear', 1), ('Bear', 1), ('Car', 1), ('Car', 1),
# ('Car', 1), ('Deer', 1), ('Deer', 1), ('River', 1), ('River', 1)]
# ========= Reducing ==========
grouper = groupby(sorted_mapping, lambda p: p[0])
final = map(lambda l: (l[0], reduce(lambda x, y: x +
y, map(lambda p: p[1], l[1]))), grouper)
print(list(final))
# output:
# [('Bear', 2), ('Car', 3), ('Deer', 2), ('River', 2)]
| [
"itertools.groupby"
] | [((650, 689), 'itertools.groupby', 'groupby', (['sorted_mapping', '(lambda p: p[0])'], {}), '(sorted_mapping, lambda p: p[0])\n', (657, 689), False, 'from itertools import groupby\n')] |
import discord
import urllib.request
import json
import re
import os
#トークン
TOKEN = os.environ['DISCORD_BOT_TOKEN']
client = discord.Client()
ModeFlag = 0
citycodes = {
"土浦": '080020',
"水戸": '080010',
"札幌": '016010',
"仙台": '040010',
"東京": '130010',
"横浜": '140010',
"名古屋": '230010',
"大阪": '270000',
"広島": '340010',
"福岡": '400010',
"鹿児島": '460010',
"那覇": '471010'
}
taio = "札幌、仙台、土浦、水戸、東京、横浜、名古屋、大阪、広島、福岡、鹿児島、那覇"
@client.event
async def on_ready():
print("logged in as " + client.user.name)
await client.change_presence(status=discord.Status.idle,activity=discord.Game(name='創成の女神'))
@client.event
async def on_message(message):
if message.content == '対応都市':
await message.channel.send(taio)
if message.author != client.user:
reg_res = re.compile(u"ノア、(.+)の天気は?").search(message.content)
if reg_res:
if reg_res.group(1) in citycodes.keys():
citycode = citycodes[reg_res.group(1)]
resp = urllib.request.urlopen('http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%citycode).read()
resp = json.loads(resp.decode('utf-8'))
msg = resp['location']['city']
msg += "の天気は、\n"
for f in resp['forecasts']:
msg += f['dateLabel'] + "が" + f['telop'] + "\n"
msg += "です。\n```"
msg += resp['description']['text']
msg += "```"
await message.channel.send(message.author.mention + msg)
else:
await message.channel.send("そこの天気はわかりません...")
client.run(TOKEN)
| [
"discord.Client",
"re.compile",
"discord.Game"
] | [((126, 142), 'discord.Client', 'discord.Client', ([], {}), '()\n', (140, 142), False, 'import discord\n'), ((611, 637), 'discord.Game', 'discord.Game', ([], {'name': '"""創成の女神"""'}), "(name='創成の女神')\n", (623, 637), False, 'import discord\n'), ((807, 834), 're.compile', 're.compile', (['u"""ノア、(.+)の天気は?"""'], {}), "(u'ノア、(.+)の天気は?')\n", (817, 834), False, 'import re\n')] |
# UDP server example
import socket
import sys
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 12345
server_socket.bind(("", port))
#http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
print("Internal : ")
print([(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1])
print("UDPServer Waiting for client on port {}...".format(port))
while 1:
data, address = server_socket.recvfrom(256)
print("( {}:{} ) said {}".format(address[0], address[1], data))
| [
"socket.socket"
] | [((67, 115), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (80, 115), False, 'import socket\n'), ((415, 463), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (428, 463), False, 'import socket\n')] |
#! /usr/bin/env python3
import os
import readline
import importlib
from importlib import util
import xml.etree.ElementTree as ET
spec = importlib.util.find_spec('.subserv', package='lib')
m = spec.loader.load_module()
tree = ET.parse('./src/xssAttacks.xml')
root = tree.getroot()
def ListPayload():
print("\t**************************************************\n")
print(m.bcolors.GREEN + m.bcolors.BOLD + m.bcolors.UNDERLINE +"\tHere is the List of possible XSS Payloads\n" + m.bcolors.ENDC)
for attack in root.findall('attack'):
name = attack.find('name').text
print("\tName of XSS Attack: " + m.bcolors.ERROR + m.bcolors.BOLD + name + m.bcolors.ENDC)
def complete(text,state):
xss = ('XSS_Locator','XSS_Quick_Test','SCRIPT_w/Alert()',
'SCRIPT_w/Source_File','SCRIPT_w/Char_Code','BASE','BGSOUND','BODY_background-image','BODY_ONLOAD','DIV_background-image_1',
'DIV_background-image_2','DIV_expression','FRAME','IFRAME','INPUT_Image','IMG_w/JavaScript_Directive',
'IMG_No_Quotes/Semicolon','IMG_Dynsrc','IMG_Lowsrc','IMG_Embedded_commands_1',
'IMG_Embedded_commands_2','IMG_STYLE_w/expression','List-style-image','IMG_w/VBscript','LAYER','Livescript','US-ASCII_encoding','META',
'META_w/data:URL','META_w/additional_URL_parameter','Mocha','OBJECT','OBJECT_w/Embedded_XSS','Embed_Flash','OBJECT_w/Flash_2',
'STYLE','STYLE_w/Comment','STYLE_w/Anonymous_HTML','STYLE_w/background-image','STYLE_w/background','Stylesheet','Remote_Stylesheet_1',
'Remote_Stylesheet_2','Remote_Stylesheet_3','Remote_Stylesheet_4','TABLE','TD','XML_namespace','XML_data_island_w/CDATA',
'XML_data_island_w/comment','XML(locally-hosted)','XML_HTML+TIME','Commented-out_Block','Cookie_Manipulation','Local_.htc_file',
'Rename_.js_to_.jpg','SSI','PHP','JavaScript_Includes','Character_Encoding_Example','Case_Insensitive','HTML_Entities','Grave_Accents',
'Image_w/CharCode','UTF-8_Unicode_Encoding','Long_UTF-8_Unicode_w/out_Semicolons','DIV_w/Unicode',
'Hex_Encoding_w/out_Semicolons','UTF-7_Encoding','Escaping_JavaScript_escapes','End_title_tag','STYLE_w/broken_up_JavaScript','Embedded_Tab',
'Embedded_Encoded_Tab','Embedded_Newline','Embedded_Carriage_Return','Multiline_w/Carriage_Returns','Null_Chars_1','Null_Chars_2','Spaces/Meta_Chars',
'Non-Alpha/Non-Digit','Non-Alpha/Non-Digit_Part_2','No_Closing_Script_Tag','Protocol_resolution_in_script_tags','Half-Open_HTML/JavaScript','Double_open_angle_brackets',
'Extraneous_Open_Brackets','Malformed_IMG_Tags','No_Quotes/Semicolons','Event_Handlers_List_1','Event_Handlers_List_2','Event_Handlers_List_3',
'Evade_Regex_Filter_1','Evade_Regex_Filter_2','Evade_Regex_Filter_3','Evade_Regex_Filter_4','Evade_Regex_Filter_5','Filter_Evasion_1',
'Filter_Evasion_2','IP_Encoding','URL_Encoding','Dword_Encoding','Hex_Encoding','Octal_Encoding','Mixed_Encoding','Protocol_Resolution_Bypass',
'Firefox_Lookups_1','Firefox_Lookups_2','Firefox_Lookups_3','Removing_Cnames','Extra_dot_for_Absolute_DNS','JavaScript_Link_Location','Content_Replace'
)
options = [i for i in xss if i.startswith(text)]
if state < len(options):
return options[state]
else:
return None
def PickPayload():
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
print("\t**************************************************\n")
choice = input("\tWhich Payload do you want to use?: ")
for attack in root.findall('attack'):
name = attack.find('name').text
code = attack.find('code').text
desc = attack.find('desc').text
if name == choice:
print("\tName of XSS Attack: " + m.bcolors.ERROR + m.bcolors.BOLD + name + m.bcolors.ENDC)
print("\n\n\tThe C0de is: " + m.bcolors.ERROR + m.bcolors.BOLD + code + m.bcolors.ENDC)
print("\n\n\tDescription of attack: " + m.bcolors.ERROR + m.bcolors.BOLD + desc + m.bcolors.ENDC)
input("Press any key to go back to the menu!")
def XSS():
os.system("clear")
while (1):
print(m.bcolors.BLUE + "\t*******************************************************************" + m.bcolors.ENDC)
print(m.bcolors.BOLD + m.bcolors.GREEN + """
*******************************************************************
_ _ _ _ _ _ _ _ _ _ _ _ _ _
/ \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \
( X | S | S ) ( P | a | y | l | o | a | d ) ( M | e | n | u )
\_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ """ + m.bcolors.ENDC)
print(
m.bcolors.ERROR + "\t*******************************************************************" + m.bcolors.ENDC)
print("\t(1)\tList XSS Payloads")
print("\t(2)\tPick XSS Payload")
print("\t(99)\tGo back to the Custom Main Menu")
print(m.bcolors.BLUE + "\t*******************************************************************" + m.bcolors.ENDC)
options = input("\nW4@+ Payload R U W@^t1ng Broliath: ")
if options == "1":
ListPayload()
elif options == "2":
PickPayload()
elif options == "99":
os.system("clear")
break
else:
input("GO CHIEFS! Come on pick something... ")
| [
"readline.set_completer",
"readline.parse_and_bind",
"xml.etree.ElementTree.parse",
"importlib.util.find_spec",
"os.system"
] | [((136, 187), 'importlib.util.find_spec', 'importlib.util.find_spec', (['""".subserv"""'], {'package': '"""lib"""'}), "('.subserv', package='lib')\n", (160, 187), False, 'import importlib\n'), ((225, 257), 'xml.etree.ElementTree.parse', 'ET.parse', (['"""./src/xssAttacks.xml"""'], {}), "('./src/xssAttacks.xml')\n", (233, 257), True, 'import xml.etree.ElementTree as ET\n'), ((3164, 3204), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (3187, 3204), False, 'import readline\n'), ((3207, 3239), 'readline.set_completer', 'readline.set_completer', (['complete'], {}), '(complete)\n', (3229, 3239), False, 'import readline\n'), ((3915, 3933), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (3924, 3933), False, 'import os\n'), ((5127, 5145), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (5136, 5145), False, 'import os\n')] |
#!/usr/bin/env python
import shlex
from prompt_toolkit import PromptSession
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.completion import Completion, Completer
from scripts.cli_command_list import context
import scripts.cli_command_exec
class NestedCompleter(Completer):
def __init__(self, words_dic=None, meta_dict=None, ignore_case=True, match_middle=False):
if meta_dict is None:
meta_dict = {}
if words_dic is None:
words_dic = {}
self.ignore_case = ignore_case
self.match_middle = match_middle
self.words_dic = words_dic
self.meta_dict = meta_dict
self.cmd_list = []
self.last_cmd = []
pass
def get_completions(self, document, complete_event):
text_before_cursor = document.text_before_cursor
if self.ignore_case:
text_before_cursor = text_before_cursor.lower()
text_before_cursor = str(text_before_cursor)
# Moved to shlex.split to keep quoted strings together
# text_arr = text_before_cursor.split(' ')
text_arr = shlex.split(text_before_cursor)
last_words = text_arr[-1]
words = self.__get_current_words(text_arr[:-1])
def word_matches(word):
""" True when the word before the cursor matches. """
if self.ignore_case:
word = word.lower()
# # Always return True if this is an open text match (%W) and something has been provided
# if word == "%w" and last_words != "":
# return True
# # print(word, last_words)
if self.match_middle:
return last_words in word
else:
return word.startswith(last_words)
if words:
self.last_cmd = []
for tmpa in words:
# print("get_completions:2", tmpa)
a = tmpa["command"]
if word_matches(a):
display_meta = self.meta_dict.get(a, '')
self.last_cmd.append(a)
yield Completion(a, -len(last_words), display_meta=display_meta)
def __get_current_words(self, text_arr):
current_dic = self.words_dic
for tmp in text_arr:
if tmp == ' ' or tmp == '':
continue
try:
for c in current_dic:
if len(tmp) > 0 and c["command"] == "%W":
self.cmd_list.append(c["command"])
if "subcommands" in c:
current_dic = c["subcommands"]
if tmp in c["command"]:
if c["command"] not in self.cmd_list:
self.cmd_list.append(c["command"])
if "subcommands" in c:
current_dic = c["subcommands"]
else:
return []
# print(tmp, c["command"], self.cmd_list)
except Exception:
return []
if current_dic:
return list(current_dic)
def join_contexts(curcontext):
"""
This function will join the global context with the current context.
It will overwrite any commands in the global context with any commands
that are defined in the current context. I.e., so if you have a
command that generally functions one way, you can define it in the
global context to always behave that way. Then, you can over-ride it
in one or more specific contexts which will over-ride that default
global behavior. It will also merge the first layer of sub-commands,
so if you have "show" in global and "show" in a given context, it
will merge whatever subcommands are part of both contexts.
:param curcontext: the current context that is selected
:return: combined list of current commands
"""
ctx_dict = {}
sub_dict = {}
ctx_join = context["global"][:]
for c in range(0, len(ctx_join)):
# if ctx_join[c].get("special"):
# print(ctx_join[c].get("special"))
sub_list = ctx_join[c].get("subcommands", {})
for s in range(0, len(sub_list)):
sub_dict[sub_list[s]["command"]] = {"num": c}
ctx_dict[ctx_join[c]["command"]] = {"num": c, "sub": sub_dict}
for c in context[curcontext]:
# if c.get("special"):
# print(c.get("special"))
if c["command"] in ctx_dict:
if "subcommands" in c:
for sc in c["subcommands"]:
# if sc.get("special"):
# print(sc.get("special"))
if sc["command"] in ctx_dict[c["command"]]["sub"]:
pass
else:
ctx_join[ctx_dict[c["command"]]["num"]]["subcommands"] += [sc]
else:
ctx_join[ctx_dict[c["command"]]["num"]] = c
else:
ctx_join.append(c)
# check for special merge_context requests
for c in ctx_join:
if c.get("special") and "merge_context" in c.get("special"):
special_fxn = c.get("special").split("=")[1].split(":")
import_cxt = context[special_fxn[0]]
merge_cmds = special_fxn[1].split(";")
for cxt_cmds in import_cxt:
if cxt_cmds["command"] in merge_cmds:
if "subcommands" not in c:
c["subcommands"] = []
if cxt_cmds not in c["subcommands"]:
c["subcommands"].append(cxt_cmds)
# check for special request to merge commands into the "no" operator
do_merge_no = False
no_commands = []
merge_no_commands = None
for c in ctx_join:
if c.get("special") == "supports_no":
no_commands.append(c)
if c.get("special") and "merge_no_commands" in c.get("special"):
do_merge_no = True
merge_no_commands = c
if do_merge_no and merge_no_commands:
merge_no_commands["subcommands"] = no_commands
# print(ctx_join)
return ctx_join
def parse_input(strinput, curcontext, gethelp=False):
"""
This function is designed to parse the currently typed input to
determine what has been typed. It is called when the user
presses ? or when they press Enter.
:param strinput: Currently typed text at prompt
:param contextchain: Full context chain the user is in
:param gethelp: True if the user enters ?, otherwise False
:return: String that will be printed to screen
"""
modifiers = ["begin", "include", "exclude", "section"]
basic_cmd_list = strinput.split(" ")
cmd_list = shlex.split(strinput)
temp_cmd = join_contexts(curcontext)
add_modifiers = False
mod_text = []
sel_modifier = ""
command_chain = []
cur_commands = []
out_help = []
out_command = {}
last_func = None
leftovers = None
# print(basic_cmd_list, len(basic_cmd_list), basic_cmd_list[len(basic_cmd_list) - 1])
# shlex.split cuts of trailing whitespace, so leverage the regular split to detect those
# print(len(cmd_list), len(basic_cmd_list))
# if cmd_list[len(cmd_list) - 1] == "" or # disabled
if basic_cmd_list[len(basic_cmd_list) - 1] == "":
showhelp = True
else:
showhelp = False
# print(showhelp, cmd_list, len(cmd_list) - 1, cmd_list[len(cmd_list) - 1])
for c in cmd_list:
cur_commands = []
for a in temp_cmd:
# print(a["command"], c, a["command"].find(c))
if c != "" and (a["command"].find(c) == 0 or a["command"] == "%W"):
command_chain.append(a["command"])
cur_commands.append(a["command"])
out_command = a
temp_cmd = a.get("subcommands", {})
if a.get("function", None) is not None:
last_func = a.get("function", None)
leftovers = "".join(strinput.split(c)[-1:]).strip()
if out_command == {} and not strinput == "":
return "% Invalid input detected" # at '^' marker."
if sel_modifier != "":
mod_text.append(c)
if add_modifiers:
for m in modifiers:
if m.find(c) == 0 and c != "":
sel_modifier = m
add_modifiers = False
if c == "|":
add_modifiers = True
# generate the unabbreviated version of the command that the user typed
curcmd = ""
for cc in range(0, len(command_chain)):
# if the position is a variable, show the user input instead of the variable
# print(cc, command_chain[cc], cmd_list[cc])
if command_chain[cc] == "%W":
curcmd += "'" + cmd_list[cc] + "' "
else:
curcmd += command_chain[cc] + " "
if gethelp:
msg = ""
# print(gethelp, showhelp, temp_cmd)
if showhelp:
if temp_cmd == {} or temp_cmd[0].get("optional"):
# for optional subcommands, list the subcommands first
if isinstance(temp_cmd, list) and temp_cmd[0].get("optional"):
for x in temp_cmd:
outcmd = x["command"].replace("%W", "WORD")
out_help.append(["", outcmd, "", x.get("help", "no help available")])
if sel_modifier != "":
out_help.append(["", "LINE", "", "Regular Expression"])
elif add_modifiers:
out_help.append(["", "begin", "", "Begin with the line that matches"])
out_help.append(["", "exclude", "", "Exclude lines that match"])
out_help.append(["", "include", "", "Include lines that match"])
out_help.append(["", "section", "", "Filter a section of output"])
else:
out_help.append(["", "|", "", "Output modifiers"])
out_help.append(["", "<cr>", "", ""])
else:
for x in temp_cmd:
outcmd = x["command"].replace("%W", "WORD")
# hide commands that are flagged as special=hidden
if x.get("special") == "hidden":
continue
out_help.append(["", outcmd, "", x.get("help", "no help available")])
msg += "?\n" + scripts.cli_command_exec.format_data(out_help)
else:
if add_modifiers:
out_help.append("|")
msg += "?\n" + " ".join(out_help) + "\n"
elif sel_modifier != "":
out_help.append(["", "LINE", "", "Search Text"])
msg += "?\n" + scripts.cli_command_exec.format_data(out_help)
else:
# print(cur_commands)
for cc in cur_commands:
out_help.append(cc.replace("%W", "WORD"))
msg += "?\n" + " ".join(out_help) + "\n"
return msg
else:
if temp_cmd != {} and not (temp_cmd[0].get("optional") or
(temp_cmd[0].get("optional_on_no") and command_chain[0] == "no")):
if curcmd == "":
return ""
else:
return '% Incomplete command. Type "' + curcmd + '?" for a list of subcommands'
else:
if len(cur_commands) > 1:
return "% Ambiguous command: " + strinput
else:
return {"command": out_command, "function": last_func, "context": curcontext, "remains": leftovers,
"chain": command_chain}
def add_filters(input, output):
"""
This function is designed to filter output when using include,
exclude, etc.
:param input: Raw output string
:param output: Raw output from command
:return: Filtered string
"""
if "|" in input:
newout = ""
incmd = input.split("|")[1].strip()
filterlist = incmd.split(" ")
outlist = output.split("\n")
# newout = outlist[0] + "\n"
if filterlist[0] in ["i", "in", "inc", "incl", "inclu", "includ", "include"]:
for o in outlist:
# print(" ".join(filterlist[1:]).lower(), o.lower())
if " ".join(filterlist[1:]).lower() in o.lower():
newout += o + "\n"
elif filterlist[0] in ["e", "ex", "exc", "excl", "exclu", "exclud", "exclude"]:
for o in outlist:
if " ".join(filterlist[1:]).lower() not in o.lower():
newout += o + "\n"
elif filterlist[0] in ["b", "be", "beg", "begi", "begin"]:
foundbeg = False
for o in outlist:
if " ".join(filterlist[1:]).lower() in o.lower():
foundbeg = True
if foundbeg:
newout += o + "\n"
elif filterlist[0] in ["s", "se", "sec", "sect", "secti", "sectio", "section"]:
foundbeg = False
for o in outlist:
filter_str = " ".join(filterlist[1:]).lower()
if foundbeg and o.lower()[0:1] not in [" ", "!"]:
foundbeg = False
# if o.lower()[0:len(filter_str)] == filter_str:
if o.lower()[0:1] not in [" ", "!"] and filter_str in o.lower():
foundbeg = True
if foundbeg:
newout += o + "\n"
if newout[-2:] == "\n\n":
return newout[0:-1]
else:
return newout
return output
def main():
curcontextdesc = "#"
curcontext = "root"
contextchain = [{"prompt": curcontextdesc, "contextname": curcontext, "elements": None, "selected": None, "selected_data": None}]
bindings = KeyBindings()
@bindings.add('?')
def _(event):
i = parse_input(session.app.current_buffer.text, contextchain[len(contextchain)-1]["contextname"], gethelp=True)
print(i, end="")
print("\n" + contextchain[len(contextchain)-1]["prompt"] + " " + session.app.current_buffer.text, end="")
print('Welcome to the AdP Sync shell. Type help or ? to list commands.\n')
session = PromptSession()
while True:
try:
n = NestedCompleter(words_dic=join_contexts(contextchain[len(contextchain)-1]["contextname"]))
text = session.prompt(contextchain[len(contextchain)-1]["prompt"] + " ", key_bindings=bindings, completer=n, complete_style=CompleteStyle.READLINE_LIKE)
except KeyboardInterrupt: # Ctrl-C
continue
except EOFError: # Ctrl-D
break
else:
pi = parse_input(text, contextchain[len(contextchain)-1]["contextname"])
if pi != "":
if isinstance(pi, dict) and pi["command"]:
fxn = pi.get("function")
execfx = getattr(scripts.cli_command_exec, fxn)
result, contextchain = execfx(pi, text, contextchain)
if result != "":
print(add_filters(text, result))
if contextchain == []:
exit()
else:
print(pi)
if __name__ == '__main__':
main()
def run(): # pragma: no cover
main()
| [
"shlex.split",
"prompt_toolkit.PromptSession",
"prompt_toolkit.key_binding.KeyBindings"
] | [((6793, 6814), 'shlex.split', 'shlex.split', (['strinput'], {}), '(strinput)\n', (6804, 6814), False, 'import shlex\n'), ((13908, 13921), 'prompt_toolkit.key_binding.KeyBindings', 'KeyBindings', ([], {}), '()\n', (13919, 13921), False, 'from prompt_toolkit.key_binding import KeyBindings\n'), ((14318, 14333), 'prompt_toolkit.PromptSession', 'PromptSession', ([], {}), '()\n', (14331, 14333), False, 'from prompt_toolkit import PromptSession\n'), ((1167, 1198), 'shlex.split', 'shlex.split', (['text_before_cursor'], {}), '(text_before_cursor)\n', (1178, 1198), False, 'import shlex\n')] |
from django.shortcuts import render, HttpResponse, redirect, \
get_object_or_404, reverse
from django.views.generic import ListView,DetailView,View
from django.contrib import messages
from django import forms
from django.conf import settings
from decimal import Decimal
from .models import Payment,Address
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from .forms import CheckoutForm
from paypal.standard.forms import PayPalPaymentsForm
from django.contrib import messages
from contacts.models import Whatsapp
from page_edits.models import GmailLink,InstagramAccount,TwitterAccount,FacebookAccount,PhoneNumber
from jobs.models import Order
# Create your views here.
#checkout view
@login_required()
def checkout_view(request,slug):
order = Order.objects.get(reference_code=slug)
gmail_links = GmailLink.objects.all()
instagram_accounts = InstagramAccount.objects.all()
fb_accounts = FacebookAccount.objects.all()
twitter_accounts = TwitterAccount.objects.all()
phone_numbers = PhoneNumber.objects.all()
whatsapp = Whatsapp.objects.all()
context = {
'gmail_links':gmail_links,
'instagram_accounts':instagram_accounts,
'fb_accounts':fb_accounts,
'twitter_accounts':twitter_accounts,
'phone_numbers':phone_numbers,
'whatsapp':whatsapp,
'order':order,
}
billing_address_qs = Address.objects.filter(
user=request.user,
default=True
)
if billing_address_qs.exists():
context.update(
{'default_billing_address': billing_address_qs[0]})
if request.method == 'POST':
form =CheckoutForm(request.POST)
if form.is_valid():
use_default_billing = form.cleaned_data.get(
'use_default_billing')
if use_default_billing:
print("Using the defualt billing address")
address_qs = Address.objects.filter(
user=request.user,
default=True
)
if address_qs.exists():
billing_address = address_qs[0]
order.billing_address = billing_address
order.save()
messages.success(request,"Using default Billing address. Click the Buy button to complete payment")
return redirect('/payments/payment/'+order.reference_code+'/')
else:
messages.warning(
request, "No default billing address available")
return redirect('/payments/checkout/'+order.reference_code+'/')
else:
# User is entering a new billing Address
m_billing_address = form.cleaned_data['billing_address']
m_billing_address2 = form.cleaned_data['billing_address2']
m_billing_zip = form.cleaned_data['billing_zip']
m_first_name = form.cleaned_data['first_name']
m_last_name = form.cleaned_data['last_name']
try:
user = request.user
if user.first_name and user.last_name:
address = Address(
user = request.user,
street_address=m_billing_address,
apartment_address=m_billing_address2,
first_name=m_first_name,
last_name=m_last_name,
zip=m_billing_zip)
address.save()
else:
user.first_name = m_first_name
user.save()
user.last_name = m_last_name
user.save()
address = Address(
user = request.user,
street_address=m_billing_address,
apartment_address=m_billing_address2,
first_name=m_first_name,
last_name=m_last_name,
zip=m_billing_zip)
address.save()
# Setting default billing address
set_default_billing = form.cleaned_data.get(
'set_default_billing')
if set_default_billing:
address.default = True
address.save()
order.billing_address = address
order.save()
messages.success(request,"Billing address saved succesfully. Click the Buy button to complete payment")
return redirect('/payments/payment/'+order.reference_code+'/')
except Exception as e:
messages.warning(request,"Please enter all the required fields")
print(e)
return redirect('/payments/checkout/'+order.reference_code+'/')
else:
messages.warning(request,"Plese complete all the required fields")
print("exception occured or something")
return redirect('/payments/checkout/'+order.reference_code+'/')
else:
form = CheckoutForm()
context.update({
'form':form
})
return render(request,'payments/checkout.htm',context)
@login_required()
def payment_view(request,slug):
order = Order.objects.get(reference_code=slug)
order_id = request.session.get('order_id')
host = request.get_host()
gmail_links = GmailLink.objects.all()
instagram_accounts = InstagramAccount.objects.all()
fb_accounts = FacebookAccount.objects.all()
twitter_accounts = TwitterAccount.objects.all()
phone_numbers = PhoneNumber.objects.all()
whatsapp = Whatsapp.objects.all()
paypal_dict = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': '%.2f' % order.price,
'item_name': 'Order {}'.format(order.reference_code),
'invoice': str(order.reference_code),
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host,
reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host,
reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host,
reverse('payment_cancelled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
context = {
'gmail_links':gmail_links,
'instagram_accounts':instagram_accounts,
'fb_accounts':fb_accounts,
'twitter_accounts':twitter_accounts,
'phone_numbers':phone_numbers,
'whatsapp':whatsapp,
'order':order,
'form':form,
}
return render(request,'payments/payment.htm',context)
@csrf_exempt
def payment_done(request):
messages.success(request, "Your payment has been completed succesfully")
return redirect('/dashboard')
@csrf_exempt
def payment_canceled(request):
messages.warning(request, "Your payment has been cancelled. Please try again later")
return redirect('/dashboard')
| [
"django.shortcuts.render",
"contacts.models.Whatsapp.objects.all",
"page_edits.models.InstagramAccount.objects.all",
"page_edits.models.TwitterAccount.objects.all",
"page_edits.models.PhoneNumber.objects.all",
"paypal.standard.forms.PayPalPaymentsForm",
"django.contrib.messages.warning",
"django.shortcuts.redirect",
"page_edits.models.FacebookAccount.objects.all",
"jobs.models.Order.objects.get",
"django.contrib.auth.decorators.login_required",
"django.contrib.messages.success",
"django.shortcuts.reverse",
"page_edits.models.GmailLink.objects.all"
] | [((754, 770), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (768, 770), False, 'from django.contrib.auth.decorators import login_required\n'), ((5660, 5676), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (5674, 5676), False, 'from django.contrib.auth.decorators import login_required\n'), ((817, 855), 'jobs.models.Order.objects.get', 'Order.objects.get', ([], {'reference_code': 'slug'}), '(reference_code=slug)\n', (834, 855), False, 'from jobs.models import Order\n'), ((874, 897), 'page_edits.models.GmailLink.objects.all', 'GmailLink.objects.all', ([], {}), '()\n', (895, 897), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((923, 953), 'page_edits.models.InstagramAccount.objects.all', 'InstagramAccount.objects.all', ([], {}), '()\n', (951, 953), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((972, 1001), 'page_edits.models.FacebookAccount.objects.all', 'FacebookAccount.objects.all', ([], {}), '()\n', (999, 1001), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((1025, 1053), 'page_edits.models.TwitterAccount.objects.all', 'TwitterAccount.objects.all', ([], {}), '()\n', (1051, 1053), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((1074, 1099), 'page_edits.models.PhoneNumber.objects.all', 'PhoneNumber.objects.all', ([], {}), '()\n', (1097, 1099), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((1115, 1137), 'contacts.models.Whatsapp.objects.all', 'Whatsapp.objects.all', ([], {}), '()\n', (1135, 1137), False, 'from contacts.models import Whatsapp\n'), ((5610, 5659), 'django.shortcuts.render', 'render', (['request', '"""payments/checkout.htm"""', 'context'], {}), "(request, 'payments/checkout.htm', context)\n", (5616, 5659), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((5721, 5759), 'jobs.models.Order.objects.get', 'Order.objects.get', ([], {'reference_code': 'slug'}), '(reference_code=slug)\n', (5738, 5759), False, 'from jobs.models import Order\n'), ((5856, 5879), 'page_edits.models.GmailLink.objects.all', 'GmailLink.objects.all', ([], {}), '()\n', (5877, 5879), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((5905, 5935), 'page_edits.models.InstagramAccount.objects.all', 'InstagramAccount.objects.all', ([], {}), '()\n', (5933, 5935), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((5954, 5983), 'page_edits.models.FacebookAccount.objects.all', 'FacebookAccount.objects.all', ([], {}), '()\n', (5981, 5983), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((6007, 6035), 'page_edits.models.TwitterAccount.objects.all', 'TwitterAccount.objects.all', ([], {}), '()\n', (6033, 6035), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((6056, 6081), 'page_edits.models.PhoneNumber.objects.all', 'PhoneNumber.objects.all', ([], {}), '()\n', (6079, 6081), False, 'from page_edits.models import GmailLink, InstagramAccount, TwitterAccount, FacebookAccount, PhoneNumber\n'), ((6097, 6119), 'contacts.models.Whatsapp.objects.all', 'Whatsapp.objects.all', ([], {}), '()\n', (6117, 6119), False, 'from contacts.models import Whatsapp\n'), ((6754, 6793), 'paypal.standard.forms.PayPalPaymentsForm', 'PayPalPaymentsForm', ([], {'initial': 'paypal_dict'}), '(initial=paypal_dict)\n', (6772, 6793), False, 'from paypal.standard.forms import PayPalPaymentsForm\n'), ((7182, 7230), 'django.shortcuts.render', 'render', (['request', '"""payments/payment.htm"""', 'context'], {}), "(request, 'payments/payment.htm', context)\n", (7188, 7230), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((7276, 7348), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your payment has been completed succesfully"""'], {}), "(request, 'Your payment has been completed succesfully')\n", (7292, 7348), False, 'from django.contrib import messages\n'), ((7360, 7382), 'django.shortcuts.redirect', 'redirect', (['"""/dashboard"""'], {}), "('/dashboard')\n", (7368, 7382), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((7433, 7521), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Your payment has been cancelled. Please try again later"""'], {}), "(request,\n 'Your payment has been cancelled. Please try again later')\n", (7449, 7521), False, 'from django.contrib import messages\n'), ((7529, 7551), 'django.shortcuts.redirect', 'redirect', (['"""/dashboard"""'], {}), "('/dashboard')\n", (7537, 7551), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((5304, 5371), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Plese complete all the required fields"""'], {}), "(request, 'Plese complete all the required fields')\n", (5320, 5371), False, 'from django.contrib import messages\n'), ((5442, 5502), 'django.shortcuts.redirect', 'redirect', (["('/payments/checkout/' + order.reference_code + '/')"], {}), "('/payments/checkout/' + order.reference_code + '/')\n", (5450, 5502), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((6465, 6486), 'django.shortcuts.reverse', 'reverse', (['"""paypal-ipn"""'], {}), "('paypal-ipn')\n", (6472, 6486), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((6581, 6604), 'django.shortcuts.reverse', 'reverse', (['"""payment_done"""'], {}), "('payment_done')\n", (6588, 6604), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((6705, 6733), 'django.shortcuts.reverse', 'reverse', (['"""payment_cancelled"""'], {}), "('payment_cancelled')\n", (6712, 6733), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((2363, 2467), 'django.contrib.messages.success', 'messages.success', (['request', '"""Using default Billing address. Click the Buy button to complete payment"""'], {}), "(request,\n 'Using default Billing address. Click the Buy button to complete payment')\n", (2379, 2467), False, 'from django.contrib import messages\n'), ((2490, 2549), 'django.shortcuts.redirect', 'redirect', (["('/payments/payment/' + order.reference_code + '/')"], {}), "('/payments/payment/' + order.reference_code + '/')\n", (2498, 2549), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((2588, 2653), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""No default billing address available"""'], {}), "(request, 'No default billing address available')\n", (2604, 2653), False, 'from django.contrib import messages\n'), ((2706, 2766), 'django.shortcuts.redirect', 'redirect', (["('/payments/checkout/' + order.reference_code + '/')"], {}), "('/payments/checkout/' + order.reference_code + '/')\n", (2714, 2766), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((4853, 4966), 'django.contrib.messages.success', 'messages.success', (['request', '"""Billing address saved succesfully. Click the Buy button to complete payment"""'], {}), "(request,\n 'Billing address saved succesfully. Click the Buy button to complete payment'\n )\n", (4869, 4966), False, 'from django.contrib import messages\n'), ((4984, 5043), 'django.shortcuts.redirect', 'redirect', (["('/payments/payment/' + order.reference_code + '/')"], {}), "('/payments/payment/' + order.reference_code + '/')\n", (4992, 5043), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n'), ((5100, 5165), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Please enter all the required fields"""'], {}), "(request, 'Please enter all the required fields')\n", (5116, 5165), False, 'from django.contrib import messages\n'), ((5221, 5281), 'django.shortcuts.redirect', 'redirect', (["('/payments/checkout/' + order.reference_code + '/')"], {}), "('/payments/checkout/' + order.reference_code + '/')\n", (5229, 5281), False, 'from django.shortcuts import render, HttpResponse, redirect, get_object_or_404, reverse\n')] |
"""
Check compliance with some rules of rhythm, voice leading, and harmony.
Author: <NAME>
"""
from math import ceil
from typing import Callable, Dict, List
from rlmusician.utils.music_theory import ScaleElement, check_consonance
N_EIGHTHS_PER_MEASURE = 8
# Rhythm rules.
def check_validity_of_rhythmic_pattern(durations: List[int], **kwargs) -> bool:
"""
Check that current measure is properly divided by notes.
:param durations:
durations (in eighths) of all notes from a current measure
(including a new note); if a new note prolongs to the next measure,
its full duration is included; however, if the first note starts
in the previous measure, only its duration within the current measure
is included
:return:
indicator whether a continuation is in accordance with the rule
"""
valid_patterns = [
[4, 4],
[4, 2, 2],
[4, 2, 1, 1],
[2, 2, 2, 2],
[2, 2, 2, 1, 1],
[2, 1, 1, 2, 2],
[4, 8],
[2, 2, 8],
[2, 1, 1, 8],
]
for valid_pattern in valid_patterns:
if valid_pattern[:len(durations)] == durations:
return True
return False
# Voice leading rules.
def check_stability_of_rearticulated_pitch(
counterpoint_continuation: 'LineElement',
movement: int,
**kwargs
) -> bool:
"""
Check that a pitch to be rearticulated (repeated) is stable.
:param counterpoint_continuation:
current continuation of counterpoint line
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
if movement != 0:
return True
return counterpoint_continuation.scale_element.is_from_tonic_triad
def check_absence_of_stalled_pitches(
movement: int,
past_movements: List[int],
max_n_repetitions: int = 2,
**kwargs
) -> bool:
"""
Check that a pitch is not excessively repeated.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_repetitions:
maximum allowed number of repetitions in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if movement != 0:
return True
if len(past_movements) < max_n_repetitions - 1:
return True
changes = [x for x in past_movements[-max_n_repetitions+1:] if x != 0]
return len(changes) > 0
def check_absence_of_monotonous_long_motion(
counterpoint_continuation: 'LineElement',
current_motion_start_element: 'LineElement',
max_distance_in_semitones: int = 9,
**kwargs
) -> bool:
"""
Check that line does not move too far without any changes in direction.
:param counterpoint_continuation:
current continuation of counterpoint line
:param current_motion_start_element:
element of counterpoint line such that there are no
changes in direction after it
:param max_distance_in_semitones:
maximum allowed distance (in semitones)
:return:
indicator whether a continuation is in accordance with the rule
"""
current = counterpoint_continuation.scale_element.position_in_semitones
start = current_motion_start_element.scale_element.position_in_semitones
if abs(current - start) > max_distance_in_semitones:
return False
return True
def check_absence_of_skip_series(
movement: int,
past_movements: List[int],
max_n_skips: int = 2,
**kwargs
) -> bool:
"""
Check that there are no long series of skips.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_skips:
maximum allowed number of skips in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if abs(movement) <= 1:
return True
if len(past_movements) < max_n_skips:
return True
only_skips = all(abs(x) > 1 for x in past_movements[-max_n_skips:])
return not only_skips
def check_that_skip_is_followed_by_opposite_step_motion(
movement: int,
past_movements: List[int],
min_n_scale_degrees: int = 3,
**kwargs
) -> bool:
"""
Check that after a large skip there is a step motion in opposite direction.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param min_n_scale_degrees:
minimum size of a large enough skip (in scale degrees)
:return:
indicator whether a continuation is in accordance with the rule
"""
if len(past_movements) == 0:
return True
previous_movement = past_movements[-1]
if abs(previous_movement) < min_n_scale_degrees:
return True
return movement == -previous_movement / abs(previous_movement)
def check_resolution_of_submediant_and_leading_tone(
line: List['LineElement'],
movement: int,
**kwargs
) -> bool:
"""
Check that a sequence of submediant and leading tone properly resolves.
If a line has submediant followed by leading tone, tonic must be used
after leading tone, because there is strong attraction to it;
similarly, if a line has leading tone followed by submediant,
dominant must be used after submediant.
:param line:
counterpoint line in progress
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
if len(line) < 2:
return True
elif line[-1].scale_element.degree == 6 and line[-2].scale_element.degree == 7:
return movement == -1
elif line[-1].scale_element.degree == 7 and line[-2].scale_element.degree == 6:
return movement == 1
return True
def check_step_motion_to_final_pitch(
counterpoint_continuation: 'LineElement',
counterpoint_end: ScaleElement,
piece_duration: int,
prohibit_rearticulation: bool = True,
**kwargs
) -> bool:
"""
Check that there is a way to reach final pitch with step motion.
:param counterpoint_continuation:
current continuation of counterpoint line
:param counterpoint_end:
element that ends counterpoint line
:param piece_duration:
total duration of piece (in eighths)
:param prohibit_rearticulation:
if it is set to `True`, the last but one pitch can not be the same as
the final pitch
:return:
indicator whether a continuation is in accordance with the rule
"""
degrees_to_end_note = abs(
counterpoint_continuation.scale_element.position_in_degrees
- counterpoint_end.position_in_degrees
)
eighths_left = (
(piece_duration - N_EIGHTHS_PER_MEASURE)
- counterpoint_continuation.end_time_in_eighths
)
quarters_left = ceil(eighths_left / 2)
if quarters_left == 0 and degrees_to_end_note == 0:
return not prohibit_rearticulation
return degrees_to_end_note <= quarters_left + 1
# Harmony rules.
def check_consonance_on_strong_beat(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
**kwargs
) -> bool:
"""
Check that there is consonance if current beat is strong.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:return:
indicator whether a continuation is in accordance with the rule
"""
if counterpoint_continuation.start_time_in_eighths % 4 != 0:
return True
return check_consonance(
counterpoint_continuation.scale_element,
cantus_firmus_elements[0].scale_element
)
def check_step_motion_to_dissonance(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
movement: int,
**kwargs
) -> bool:
"""
Check that there is step motion to a dissonating element.
Note that this rule prohibits double neighboring tones.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param movement:
melodic interval (in scale degrees) for line continuation
:return:
indicator whether a continuation is in accordance with the rule
"""
ctp_scale_element = counterpoint_continuation.scale_element
cf_scale_element = cantus_firmus_elements[0].scale_element
if check_consonance(ctp_scale_element, cf_scale_element):
return True
return movement in [-1, 1]
def check_step_motion_from_dissonance(
movement: int,
is_last_element_consonant: bool,
**kwargs
) -> bool:
"""
Check that there is step motion from a dissonating element.
Note that this rule prohibits double neighboring tones.
:param movement:
melodic interval (in scale degrees) for line continuation
:param is_last_element_consonant:
indicator whether last element of counterpoint line (not including
a new continuation in question) forms consonance with cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
if is_last_element_consonant:
return True
return movement in [-1, 1]
def check_resolution_of_suspended_dissonance(
line: List['LineElement'],
movement: int,
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
is_last_element_consonant: bool,
**kwargs
) -> bool:
"""
Check that suspended dissonance is resolved by downward step motion.
:param line:
counterpoint line in progress
:param movement:
melodic interval (in scale degrees) for line continuation
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param is_last_element_consonant:
indicator whether last element of counterpoint line (not including
a new continuation in question) forms consonance with cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
last_note_start = line[-1].start_time_in_eighths
last_note_end = line[-1].end_time_in_eighths
last_note_duration = last_note_end - last_note_start
if last_note_duration != N_EIGHTHS_PER_MEASURE:
return True
if is_last_element_consonant:
return True
if movement != -1:
return False
return check_consonance(
counterpoint_continuation.scale_element,
cantus_firmus_elements[-1].scale_element
)
def check_absence_of_large_intervals(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
max_n_semitones: int = 16,
**kwargs
) -> bool:
"""
Check that there are no large intervals between adjacent pitches.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param max_n_semitones:
maximum allowed interval in semitones between two
simultaneously sounding pitches
:return:
indicator whether a continuation is in accordance with the rule
"""
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
for cantus_firmus_element in cantus_firmus_elements:
cf_pitch = cantus_firmus_element.scale_element.position_in_semitones
if abs(cpt_pitch - cf_pitch) > max_n_semitones:
return False
return True
def check_absence_of_lines_crossing(
counterpoint_continuation: 'LineElement',
cantus_firmus_elements: List['LineElement'],
is_counterpoint_above: bool,
prohibit_unisons: bool = True,
**kwargs
) -> bool:
"""
Check that there are no lines crossings.
:param counterpoint_continuation:
current continuation of counterpoint line
:param cantus_firmus_elements:
list of elements from cantus firmus that sound simultaneously with
the counterpoint element
:param is_counterpoint_above:
indicator whether counterpoint must be above cantus firmus
:param prohibit_unisons:
if it is set to `True`, unison are considered a special case of
lines crossing
:return:
indicator whether a continuation is in accordance with the rule
"""
initial_sign = 1 if is_counterpoint_above else -1
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
for cantus_firmus_element in cantus_firmus_elements:
cf_pitch = cantus_firmus_element.scale_element.position_in_semitones
if prohibit_unisons and cpt_pitch == cf_pitch:
return False
elif initial_sign * (cpt_pitch - cf_pitch) < 0:
return False
return True
def check_absence_of_overlapping_motion(
counterpoint_continuation: 'LineElement',
previous_cantus_firmus_element: 'LineElement',
is_counterpoint_above: bool,
**kwargs
) -> bool:
"""
Check that there is no overlapping motion.
:param counterpoint_continuation:
current continuation of counterpoint line
:param previous_cantus_firmus_element:
the latest element of cantus firmus that sounds simultaneously
with the last counterpoint element (excluding its continuation)
:param is_counterpoint_above:
indicator whether counterpoint must be above cantus firmus
:return:
indicator whether a continuation is in accordance with the rule
"""
initial_sign = 1 if is_counterpoint_above else -1
cpt_pitch = counterpoint_continuation.scale_element.position_in_semitones
cf_pitch = previous_cantus_firmus_element.scale_element.position_in_semitones
return initial_sign * (cpt_pitch - cf_pitch) > 0
# Registry.
def get_rules_registry() -> Dict[str, Callable]:
"""
Get mapping from names to corresponding functions that check rules.
:return:
registry of functions checking rules of rhythm, voice leading,
and harmony
"""
registry = {
# Rhythm rules:
'rhythmic_pattern_validity': check_validity_of_rhythmic_pattern,
# Voice leading rules:
'rearticulation_stability': check_stability_of_rearticulated_pitch,
'absence_of_stalled_pitches': check_absence_of_stalled_pitches,
'absence_of_long_motion': check_absence_of_monotonous_long_motion,
'absence_of_skip_series': check_absence_of_skip_series,
'turn_after_skip': check_that_skip_is_followed_by_opposite_step_motion,
'VI_VII_resolution': check_resolution_of_submediant_and_leading_tone,
'step_motion_to_end': check_step_motion_to_final_pitch,
# Harmony rules:
'consonance_on_strong_beat': check_consonance_on_strong_beat,
'step_motion_to_dissonance': check_step_motion_to_dissonance,
'step_motion_from_dissonance': check_step_motion_from_dissonance,
'resolution_of_suspended_dissonance': check_resolution_of_suspended_dissonance,
'absence_of_large_intervals': check_absence_of_large_intervals,
'absence_of_lines_crossing': check_absence_of_lines_crossing,
'absence_of_overlapping_motion': check_absence_of_overlapping_motion,
}
return registry
| [
"rlmusician.utils.music_theory.check_consonance",
"math.ceil"
] | [((7208, 7230), 'math.ceil', 'ceil', (['(eighths_left / 2)'], {}), '(eighths_left / 2)\n', (7212, 7230), False, 'from math import ceil\n'), ((8061, 8163), 'rlmusician.utils.music_theory.check_consonance', 'check_consonance', (['counterpoint_continuation.scale_element', 'cantus_firmus_elements[0].scale_element'], {}), '(counterpoint_continuation.scale_element,\n cantus_firmus_elements[0].scale_element)\n', (8077, 8163), False, 'from rlmusician.utils.music_theory import ScaleElement, check_consonance\n'), ((9052, 9105), 'rlmusician.utils.music_theory.check_consonance', 'check_consonance', (['ctp_scale_element', 'cf_scale_element'], {}), '(ctp_scale_element, cf_scale_element)\n', (9068, 9105), False, 'from rlmusician.utils.music_theory import ScaleElement, check_consonance\n'), ((11234, 11337), 'rlmusician.utils.music_theory.check_consonance', 'check_consonance', (['counterpoint_continuation.scale_element', 'cantus_firmus_elements[-1].scale_element'], {}), '(counterpoint_continuation.scale_element,\n cantus_firmus_elements[-1].scale_element)\n', (11250, 11337), False, 'from rlmusician.utils.music_theory import ScaleElement, check_consonance\n')] |
# Generated by Django 4.0.3 on 2022-03-06 08:19
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0006_rename_athletemodel_athlete_and_more"),
]
operations = [
migrations.RemoveField(
model_name="lift",
name="session",
),
migrations.AddField(
model_name="lift",
name="session_datetime",
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name="lift",
name="session_number",
field=models.IntegerField(blank=True, default=0),
preserve_default=False,
),
migrations.AlterField(
model_name="competition",
name="date_end",
field=models.DateField(blank=True),
),
migrations.AlterField(
model_name="competition",
name="date_start",
field=models.DateField(blank=True),
),
migrations.DeleteModel(
name="Session",
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.models.DateField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.db.migrations.RemoveField"
] | [((278, 335), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""lift"""', 'name': '"""session"""'}), "(model_name='lift', name='session')\n", (300, 335), False, 'from django.db import migrations, models\n'), ((1131, 1169), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Session"""'}), "(name='Session')\n", (1153, 1169), False, 'from django.db import migrations, models\n'), ((487, 554), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'default': 'django.utils.timezone.now'}), '(blank=True, default=django.utils.timezone.now)\n', (507, 554), False, 'from django.db import migrations, models\n'), ((716, 758), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(0)'}), '(blank=True, default=0)\n', (735, 758), False, 'from django.db import migrations, models\n'), ((923, 951), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)'}), '(blank=True)\n', (939, 951), False, 'from django.db import migrations, models\n'), ((1082, 1110), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)'}), '(blank=True)\n', (1098, 1110), False, 'from django.db import migrations, models\n')] |
# Copyright <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from logging.handlers import TimedRotatingFileHandler
from flask import Flask, session
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
# init Flask
app = Flask(__name__)
app.config.from_pyfile('../app_config.py')
# init logging
logger_formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger_handler = TimedRotatingFileHandler(filename=app.config['LOG_FILE_LOC'], when='midnight')
logger_handler.setFormatter(logger_formatter)
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
logger.addHandler(logger_handler)
# allow CORS
cors = CORS(app, supports_credentials=True, expose_headers=['Content-Disposition'],
regex={r'/api/*': {'origins': '*'}})
# init SQLAlchemy
db = SQLAlchemy(app)
from app.database.entities import *
db.create_all()
# create upload directory if it doesn't already exist
is_debug_mode = app.config['DEBUG']
if is_debug_mode:
upload_directory_path = app.config['UPLOAD_FOLDER']
upload_directory_exists = os.path.exists(upload_directory_path)
if not upload_directory_exists:
os.mkdir(upload_directory_path)
# register blueprints
from app.api import login_resource
from app.api import logout_resource
from app.api import file_resource
from app.api import admin_resource
app.register_blueprint(login_resource)
app.register_blueprint(logout_resource)
app.register_blueprint(file_resource)
app.register_blueprint(admin_resource)
# init Flask-Login
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.init_app(app)
# implement session callback functions
from app.database.entities import PiFillingUser
@login_manager.user_loader
def load_user(user_id: int) -> PiFillingUser:
"""
Callback function that gets the user object whose ID
is stored in the session. This function gets called
every time an endpoint is decorated with the
@login_required decorator function
:param user_id: the user's ID
:return: the PiFillingUser object
"""
return PiFillingUser.query.get(user_id)
@app.before_request
def set_session_details() -> None:
"""
Callback function that sets the user's session details
before every request that is made. This function ensures
that the user's session will expire in the amount of
time designated by the configuration variable
SESSION_DURATION, but it also ensures that when the user
makes a request, their session will continue to stay
alive and will reset to the amount of time designated by
the same configuration variable mentioned above.
:return:
"""
session.permanent = True
session.modified = True
app.permanent_session_lifetime = app.config['SESSION_DURATION']
| [
"logging.getLogger",
"flask_login.LoginManager",
"os.path.exists",
"flask_cors.CORS",
"flask.Flask",
"logging.Formatter",
"logging.handlers.TimedRotatingFileHandler",
"app.database.entities.PiFillingUser.query.get",
"os.mkdir",
"flask_sqlalchemy.SQLAlchemy"
] | [((806, 821), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (811, 821), False, 'from flask import Flask, session\n'), ((900, 1002), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""[%(levelname)s] %(asctime)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""'}), "(fmt='[%(levelname)s] %(asctime)s: %(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S %p')\n", (917, 1002), False, 'import logging\n'), ((1015, 1093), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', ([], {'filename': "app.config['LOG_FILE_LOC']", 'when': '"""midnight"""'}), "(filename=app.config['LOG_FILE_LOC'], when='midnight')\n", (1039, 1093), False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((1150, 1169), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1167, 1169), False, 'import logging\n'), ((1258, 1374), 'flask_cors.CORS', 'CORS', (['app'], {'supports_credentials': '(True)', 'expose_headers': "['Content-Disposition']", 'regex': "{'/api/*': {'origins': '*'}}"}), "(app, supports_credentials=True, expose_headers=['Content-Disposition'],\n regex={'/api/*': {'origins': '*'}})\n", (1262, 1374), False, 'from flask_cors import CORS\n'), ((1408, 1423), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (1418, 1423), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((2143, 2157), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (2155, 2157), False, 'from flask_login import LoginManager\n'), ((1673, 1710), 'os.path.exists', 'os.path.exists', (['upload_directory_path'], {}), '(upload_directory_path)\n', (1687, 1710), False, 'import os\n'), ((2694, 2726), 'app.database.entities.PiFillingUser.query.get', 'PiFillingUser.query.get', (['user_id'], {}), '(user_id)\n', (2717, 2726), False, 'from app.database.entities import PiFillingUser\n'), ((1755, 1786), 'os.mkdir', 'os.mkdir', (['upload_directory_path'], {}), '(upload_directory_path)\n', (1763, 1786), False, 'import os\n')] |
from ariadne import QueryType, make_executable_schema
type_defs = """
type Query {
hello: String!
}
"""
query = QueryType()
@query.field("hello")
def resolve_hello(*_):
return "Hello world!"
schema = make_executable_schema(type_defs, query) | [
"ariadne.make_executable_schema",
"ariadne.QueryType"
] | [((130, 141), 'ariadne.QueryType', 'QueryType', ([], {}), '()\n', (139, 141), False, 'from ariadne import QueryType, make_executable_schema\n'), ((224, 264), 'ariadne.make_executable_schema', 'make_executable_schema', (['type_defs', 'query'], {}), '(type_defs, query)\n', (246, 264), False, 'from ariadne import QueryType, make_executable_schema\n')] |
# Python
from typing import Any, Dict
# Django
from django import http
from django.http.response import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.forms.models import BaseModelForm
from django.urls.base import reverse_lazy
from django.views.generic.edit import CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
# Apps
from systemtest.users.forms import SignUpForm
class SignUpView(LoginRequiredMixin, CreateView):
template_name = "users/signup.html"
success_url = reverse_lazy("users:login")
form_class = SignUpForm
def dispatch(self, request: http.HttpRequest, *args: Any, **kwargs: Any) -> http.HttpResponse:
if not self.request.user.is_staff:
return HttpResponseNotAllowed(["GET", "POST"])
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self) -> Dict[str, Any]:
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
def form_valid(self, form: BaseModelForm) -> HttpResponse:
new_user = form.save()
new_user.department = self.request.user.department
new_user.shift = self.request.user.shift
new_user.groups.add(form.cleaned_data["groups"])
new_user.save()
return HttpResponseRedirect(str(self.success_url))
| [
"django.urls.base.reverse_lazy",
"django.http.response.HttpResponseNotAllowed"
] | [((526, 553), 'django.urls.base.reverse_lazy', 'reverse_lazy', (['"""users:login"""'], {}), "('users:login')\n", (538, 553), False, 'from django.urls.base import reverse_lazy\n'), ((744, 783), 'django.http.response.HttpResponseNotAllowed', 'HttpResponseNotAllowed', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (766, 783), False, 'from django.http.response import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect\n')] |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# coding: utf-8
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
import numpy as np
from climateeconomics.core.core_witness.policy_model import PolicyModel
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
from climateeconomics.core.core_witness.climateeco_discipline import ClimateEcoDiscipline
class PolicyDiscipline(SoSDiscipline):
# ontology information
_ontology_data = {
'label': 'Policy Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-balance-scale fa-fw',
'version': '',
}
_maturity = 'Research'
years = np.arange(2020, 2101)
DESC_IN = {
'year_start': ClimateEcoDiscipline.YEAR_START_DESC_IN,
'year_end': ClimateEcoDiscipline.YEAR_END_DESC_IN,
'CCS_price': {'type': 'dataframe', 'unit': '$/tCO2', 'visibility': SoSDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness'},
'CO2_damage_price': {'type': 'dataframe', 'unit': '$/tCO2', 'visibility': SoSDiscipline.SHARED_VISIBILITY, 'namespace': 'ns_witness'},
'ccs_price_percentage': {'type': 'float', 'default': 100., 'unit': '%',
'visibility': SoSDiscipline.SHARED_VISIBILITY,
'namespace': 'ns_witness', 'user_level': 2},
'co2_damage_price_percentage': {'type': 'float', 'default': 100., 'unit': '%',
'visibility': SoSDiscipline.SHARED_VISIBILITY,
'namespace': 'ns_witness', 'user_level': 2},
}
DESC_OUT = {
'CO2_taxes': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness', 'unit': '$/tCO2'}
}
def init_execution(self):
param_in = self.get_sosdisc_inputs()
self.policy_model = PolicyModel()
def run(self):
param_in = self.get_sosdisc_inputs()
self.policy_model.compute_smax(param_in)
dict_values = {
'CO2_taxes': self.policy_model.CO2_tax}
# store data
self.store_sos_outputs_values(dict_values)
def compute_sos_jacobian(self):
"""
Compute sos jacobian
"""
dCO2_tax_dCO2_damage, dCO2_tax_dCCS_price = self.policy_model.compute_CO2_tax_dCCS_dCO2_damage_smooth()
self.set_partial_derivative_for_other_types(
('CO2_taxes', 'CO2_tax'), ('CO2_damage_price', 'CO2_damage_price'), np.identity(len(dCO2_tax_dCO2_damage)) * np.array(dCO2_tax_dCO2_damage))
self.set_partial_derivative_for_other_types(
('CO2_taxes', 'CO2_tax'), ('CCS_price', 'ccs_price_per_tCO2'), np.identity(len(dCO2_tax_dCCS_price)) * np.array(dCO2_tax_dCCS_price))
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['CO2 tax']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if 'CO2 tax' in chart_list:
CCS_price = self.get_sosdisc_inputs('CCS_price')
CO2_damage_price = self.get_sosdisc_inputs('CO2_damage_price')
CO2_tax = self.get_sosdisc_outputs('CO2_taxes')
years = list(CCS_price['years'].values)
chart_name = 'CO2 tax chart'
new_chart = TwoAxesInstanciatedChart('years', 'CO2 tax ($/tCO2)',
chart_name=chart_name)
new_series = InstanciatedSeries(
years, list(CCS_price['ccs_price_per_tCO2'].values), 'CCS price', 'lines')
new_series2 = InstanciatedSeries(
years, list(CO2_damage_price['CO2_damage_price'].values), 'CO2 damage', 'lines')
new_series3 = InstanciatedSeries(
years, list(CO2_tax['CO2_tax'].values), 'CO2 tax', 'lines')
new_chart.series.append(new_series)
new_chart.series.append(new_series2)
new_chart.series.append(new_series3)
instanciated_charts.append(new_chart)
return instanciated_charts
| [
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart",
"numpy.array",
"climateeconomics.core.core_witness.policy_model.PolicyModel",
"numpy.arange",
"sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter"
] | [((1509, 1530), 'numpy.arange', 'np.arange', (['(2020)', '(2101)'], {}), '(2020, 2101)\n', (1518, 1530), True, 'import numpy as np\n'), ((2686, 2699), 'climateeconomics.core.core_witness.policy_model.PolicyModel', 'PolicyModel', ([], {}), '()\n', (2697, 2699), False, 'from climateeconomics.core.core_witness.policy_model import PolicyModel\n'), ((3923, 3978), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Charts"""', 'chart_list', 'chart_list', '"""charts"""'], {}), "('Charts', chart_list, chart_list, 'charts')\n", (3934, 3978), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((4877, 4953), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['"""years"""', '"""CO2 tax ($/tCO2)"""'], {'chart_name': 'chart_name'}), "('years', 'CO2 tax ($/tCO2)', chart_name=chart_name)\n", (4901, 4953), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((3344, 3374), 'numpy.array', 'np.array', (['dCO2_tax_dCO2_damage'], {}), '(dCO2_tax_dCO2_damage)\n', (3352, 3374), True, 'import numpy as np\n'), ((3546, 3575), 'numpy.array', 'np.array', (['dCO2_tax_dCCS_price'], {}), '(dCO2_tax_dCCS_price)\n', (3554, 3575), True, 'import numpy as np\n')] |
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('' , include('guardian.urls')),
path('account/' , include('account.urls')),
path('address/' , include('address.urls')),
path('pharmacy/' , include('pharmacy.urls')),
path('counsellor/' , include('counsellor.urls')),
path('froala_editor/', include('froala_editor.urls')),
path('store/', include('store.urls')),
path('cart/', include('cart.urls')),
path('counselor/', include('counselor.urls')),
path('blog/', include('blog.urls')),
path('booking/', include('booking.urls')),
path('orders/', include('orders.urls')),
path('orders/', include('rating.urls')),
path('prescription/' , include('prescription.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"django.conf.urls.static.static",
"django.urls.path",
"django.urls.include"
] | [((876, 937), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (882, 937), False, 'from django.conf.urls.static import static\n'), ((168, 199), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (172, 199), False, 'from django.urls import path, include\n'), ((215, 239), 'django.urls.include', 'include', (['"""guardian.urls"""'], {}), "('guardian.urls')\n", (222, 239), False, 'from django.urls import path, include\n'), ((264, 287), 'django.urls.include', 'include', (['"""account.urls"""'], {}), "('account.urls')\n", (271, 287), False, 'from django.urls import path, include\n'), ((312, 335), 'django.urls.include', 'include', (['"""address.urls"""'], {}), "('address.urls')\n", (319, 335), False, 'from django.urls import path, include\n'), ((361, 385), 'django.urls.include', 'include', (['"""pharmacy.urls"""'], {}), "('pharmacy.urls')\n", (368, 385), False, 'from django.urls import path, include\n'), ((413, 439), 'django.urls.include', 'include', (['"""counsellor.urls"""'], {}), "('counsellor.urls')\n", (420, 439), False, 'from django.urls import path, include\n'), ((470, 499), 'django.urls.include', 'include', (['"""froala_editor.urls"""'], {}), "('froala_editor.urls')\n", (477, 499), False, 'from django.urls import path, include\n'), ((521, 542), 'django.urls.include', 'include', (['"""store.urls"""'], {}), "('store.urls')\n", (528, 542), False, 'from django.urls import path, include\n'), ((563, 583), 'django.urls.include', 'include', (['"""cart.urls"""'], {}), "('cart.urls')\n", (570, 583), False, 'from django.urls import path, include\n'), ((609, 634), 'django.urls.include', 'include', (['"""counselor.urls"""'], {}), "('counselor.urls')\n", (616, 634), False, 'from django.urls import path, include\n'), ((655, 675), 'django.urls.include', 'include', (['"""blog.urls"""'], {}), "('blog.urls')\n", (662, 675), False, 'from django.urls import path, include\n'), ((699, 722), 'django.urls.include', 'include', (['"""booking.urls"""'], {}), "('booking.urls')\n", (706, 722), False, 'from django.urls import path, include\n'), ((745, 767), 'django.urls.include', 'include', (['"""orders.urls"""'], {}), "('orders.urls')\n", (752, 767), False, 'from django.urls import path, include\n'), ((790, 812), 'django.urls.include', 'include', (['"""rating.urls"""'], {}), "('rating.urls')\n", (797, 812), False, 'from django.urls import path, include\n'), ((842, 870), 'django.urls.include', 'include', (['"""prescription.urls"""'], {}), "('prescription.urls')\n", (849, 870), False, 'from django.urls import path, include\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-24 18:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20161024_1127'),
]
operations = [
migrations.AddField(
model_name='orderdetails',
name='approved',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='orderdetails',
name='delivered',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='orderdetails',
name='processed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='orderdetails',
name='returned',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='orderdetails',
name='shipped',
field=models.BooleanField(default=False),
),
]
| [
"django.db.models.BooleanField"
] | [((405, 439), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (424, 439), False, 'from django.db import migrations, models\n'), ((568, 602), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (587, 602), False, 'from django.db import migrations, models\n'), ((731, 765), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (750, 765), False, 'from django.db import migrations, models\n'), ((893, 927), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (912, 927), False, 'from django.db import migrations, models\n'), ((1054, 1088), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1073, 1088), False, 'from django.db import migrations, models\n')] |
from bs4 import BeautifulSoup
import requests
from flask import Flask
app = Flask(__name__)
url ='https://www.worldometers.info/world-population/world-population-by-year/'
year = []
population = []
@app.route('/')
def main():
r = requests.get(url).text
soup = BeautifulSoup(r, 'html.parser')
for data in soup.find('tbody'):
'''
Get every year then append every data on the year list
'''
year.append(data.td.get_text())
'''
Get every data, split the whitespaces get the second one which is the year then replace , with a blank string
'''
population.append(data.text.split()[1].replace(',', ''))
'''
Reverse the lists then put it on a dict and return it to the user
'''
year.reverse()
population.reverse()
json = {'info': f'this api is scraping {url} all of these are in order, it is optimized for a javascript library called Chart.js | Made by Yoshiinri | License: MIT | Repo: https://github.com/Yoshiinori/human-population-api','year': year, 'population': population}
return json
if '__main__' == __name__:
app.run(debug=True) | [
"bs4.BeautifulSoup",
"requests.get",
"flask.Flask"
] | [((77, 92), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (82, 92), False, 'from flask import Flask\n'), ((269, 300), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r', '"""html.parser"""'], {}), "(r, 'html.parser')\n", (282, 300), False, 'from bs4 import BeautifulSoup\n'), ((237, 254), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (249, 254), False, 'import requests\n')] |
"""
A convenient global store for application default values.
This is *not* a config file - more on what to do about that coming here soon.
"""
import os
import pathlib
from datetime import datetime
# Data Locations
ROOT_DIR = str(pathlib.Path(__file__).parents[1].absolute()) # Project root
TRAIN_DATA_DIR = os.path.join(
ROOT_DIR, "data/array/pendulum"
) # Directories containing audio files
OUTPUT_DIR = os.path.join(
ROOT_DIR, "data/output"
) # Generated files (audio, images, etc.) go here
LOG_DIR = os.path.join(ROOT_DIR, "data/logs") # Python logs go here
TENSORBOARD_DIR = os.path.join(
ROOT_DIR, "data/tensorboard/" + datetime.now().strftime("%Y%m%d-%H%M%S")
)
MODEL_WEIGHTS = os.path.join(ROOT_DIR, "models", "cvae_pendulum_keras.h5")
# ========================================================================
# Don't modify anything below this line unless you know what you're doing.
# ========================================================================
# Program Defaults - Best not to touch these
NUM_CPUS = int(round(os.cpu_count() * 0.75)) # Use a portion of available CPUs
SAMPLE_RATE = 32768 # Audio files are resampled to this many samples per second
RESAMPLE_TYPE = "kaiser_fast" # Resampling algorithm used by Librosa
N_FFT = 4096 # STFT window size (in samples)
HOP_LENGTH = 256 # STFT stride length (in samples)
N_MELS = 512 # Number of frequency bins per frame (timestep)
TOP_DB = 80
AUDIO_FORMAT = "wav"
# Default Hyperparameters
TEST_FRACTION = 0.2
EPOCHS = 100
LATENT_DIMS = 64
BATCH_SIZE = 1 # Number of windows per data sample
WINDOW_SIZE = 1 # Number of spectrogram chunks per window
EXAMPLES_TO_GENERATE = 16
# Data Options
CHANNELS_LAST = True
SHUFFLE_BUFFER = 1024 # Buffer size for shuffling data samples
PREFETCH_DATA = (
32 # Data samples to prefetch (resource intensive, but uses GPU more efficiently)
)
DATA_PARALLEL = True # Parallelize data pre-processing (can be resource-intensive)
# CLI Defaults
AUDIO_OFFSET = 0.0
AUDIO_DURATION = None
CHUNK_SIZE = 640 # Number of frames per spectrogram chunk
TRUNCATE = True
IMAGE_FLIP = True
| [
"datetime.datetime.now",
"os.path.join",
"os.cpu_count",
"pathlib.Path"
] | [((311, 356), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data/array/pendulum"""'], {}), "(ROOT_DIR, 'data/array/pendulum')\n", (323, 356), False, 'import os\n'), ((414, 451), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data/output"""'], {}), "(ROOT_DIR, 'data/output')\n", (426, 451), False, 'import os\n'), ((517, 552), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data/logs"""'], {}), "(ROOT_DIR, 'data/logs')\n", (529, 552), False, 'import os\n'), ((703, 761), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""models"""', '"""cvae_pendulum_keras.h5"""'], {}), "(ROOT_DIR, 'models', 'cvae_pendulum_keras.h5')\n", (715, 761), False, 'import os\n'), ((1054, 1068), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1066, 1068), False, 'import os\n'), ((644, 658), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (656, 658), False, 'from datetime import datetime\n'), ((232, 254), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (244, 254), False, 'import pathlib\n')] |
'''
Created on Jul 2, 2013
@author: <NAME>
All rights reserved.
'''
import time
from multiprocessing.pool import Pool
parallelSolve = False
INF = 1 << 31
def solve(par):
def match(c1, c2):
return c1[0] == c2[0] or c1[1] == c2[1]
deck = par
piles = []
for c in deck:
piles.append([c])
isMoved = True
while isMoved:
isMoved = False
P = list(piles)
for i, p in enumerate(P):
if i - 3 >= 0 and match(p[-1], piles[i - 3][-1]):
piles[i - 3].append(p.pop())
isMoved = True
if len(p) == 0:
del piles[i]
break
if i - 1 >= 0 and match(p[-1], piles[i - 1][-1]):
piles[i - 1].append(p.pop())
isMoved = True
if len(p) == 0:
del piles[i]
break
result = [len(p) for p in piles]
return ('%d piles remaining' % len(result)) + ' ' + ' '.join(str(e) for e in result)
class Solver:
def getInput(self):
self.numOfTests = 0
self.input = []
while True:
deck = self.fIn.readline().split()
if deck[0] == '#':
break
self.numOfTests += 1
deck += self.fIn.readline().split()
self.input.append((deck))
def __init__(self):
self.fIn = open('input.txt')
self.fOut = open('output.txt', 'w')
self.results = []
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def sequential(self):
self.getInput()
millis1 = int(round(time.time() * 1000))
for i in self.input:
self.results.append(solve(i))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
def makeOutput(self):
for test in range(self.numOfTests):
self.fOut.write("%s\n" % self.results[test])
self.fIn.close()
self.fOut.close()
if __name__ == '__main__':
solver = Solver()
if parallelSolve:
solver.parallel()
else:
solver.sequential()
| [
"time.time",
"multiprocessing.pool.Pool"
] | [((1545, 1552), 'multiprocessing.pool.Pool', 'Pool', (['(4)'], {}), '(4)\n', (1549, 1552), False, 'from multiprocessing.pool import Pool\n'), ((1581, 1592), 'time.time', 'time.time', ([], {}), '()\n', (1590, 1592), False, 'import time\n'), ((1678, 1689), 'time.time', 'time.time', ([], {}), '()\n', (1687, 1689), False, 'import time\n'), ((1869, 1880), 'time.time', 'time.time', ([], {}), '()\n', (1878, 1880), False, 'import time\n'), ((1989, 2000), 'time.time', 'time.time', ([], {}), '()\n', (1998, 2000), False, 'import time\n')] |
import os
import keras
import tensorflow as tf
from dotenv import load_dotenv
def get_model(path_or_name):
if type(path_or_name) is str:
model = tf.keras.models.load_model(path_or_name)
name = os.path.basename(path_or_name)
if type(path_or_name) is keras.Model:
model = path_or_name
if type(path_or_name) is keras.Sequential:
return path_or_name
raise Exception("can not find any model")
if __name__ == '__main__':
load_dotenv()
p1 = "/media/andy/z/python/毕业实习/app/deep_learning/model/1646726472"
print(get_model(p1).name)
from deep_learning.models import TransferLearningModel
m = TransferLearningModel()("tf2-preview_mobilenet_v2_classification_4")
print(type(m))
print(get_model(m).name) | [
"tensorflow.keras.models.load_model",
"os.path.basename",
"deep_learning.models.TransferLearningModel",
"dotenv.load_dotenv"
] | [((480, 493), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (491, 493), False, 'from dotenv import load_dotenv\n'), ((160, 200), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['path_or_name'], {}), '(path_or_name)\n', (186, 200), True, 'import tensorflow as tf\n'), ((216, 246), 'os.path.basename', 'os.path.basename', (['path_or_name'], {}), '(path_or_name)\n', (232, 246), False, 'import os\n'), ((663, 686), 'deep_learning.models.TransferLearningModel', 'TransferLearningModel', ([], {}), '()\n', (684, 686), False, 'from deep_learning.models import TransferLearningModel\n')] |
from __future__ import absolute_import, annotations
import csv
import importlib.metadata
import itertools
import json
import logging
import re
import typing as t
from enum import Enum
from pathlib import Path
import graphviz as gv
import networkx as nx
from arg_services.graph.v1 import graph_pb2
from arguebuf.models import Userdata
from arguebuf.models.analyst import Analyst
from arguebuf.models.edge import Edge
from arguebuf.models.metadata import Metadata
from arguebuf.models.node import (AtomNode, Attack, Node, Rephrase, SchemeNode,
Support)
from arguebuf.models.participant import Participant
from arguebuf.models.reference import Reference
from arguebuf.models.resource import Resource
from arguebuf.schema import aif, ova
from arguebuf.services import utils
from arguebuf.services.utils import ImmutableDict, ImmutableSet
from google.protobuf.json_format import MessageToDict, ParseDict
from lxml import html
log = logging.getLogger(__name__)
class GraphFormat(str, Enum):
ARGUEBUF = "arguebuf"
AIF = "aif"
# noinspection PyProtectedMember
class Graph:
"""Graph in AIF format.
No attribute is mandatory.
All nodes and edges attributes are read-only.
"""
__slots__ = (
"name",
"_nodes",
"_atom_nodes",
"_scheme_nodes",
"_edges",
"_incoming_nodes",
"_incoming_edges",
"_outgoing_nodes",
"_outgoing_edges",
"_major_claim",
"_resources",
"_participants",
"_analysts",
"metadata",
"userdata",
"library_version",
"schema_version",
)
name: str
_nodes: ImmutableDict[str, Node]
_atom_nodes: ImmutableDict[str, AtomNode]
_scheme_nodes: ImmutableDict[str, SchemeNode]
_edges: ImmutableDict[str, Edge]
_incoming_nodes: ImmutableDict[Node, ImmutableSet[Node]]
_incoming_edges: ImmutableDict[Node, ImmutableSet[Edge]]
_outgoing_nodes: ImmutableDict[Node, ImmutableSet[Node]]
_outgoing_edges: ImmutableDict[Node, ImmutableSet[Edge]]
_resources: ImmutableDict[str, Resource]
_participants: ImmutableDict[str, Participant]
_major_claim: t.Optional[AtomNode]
_analysts: ImmutableDict[str, Analyst]
library_version: t.Optional[str]
schema_version: t.Optional[int]
metadata: Metadata
userdata: Userdata
@property
def edges(self) -> t.Mapping[str, Edge]:
return self._edges
@property
def nodes(self) -> t.Mapping[str, Node]:
return self._nodes
@property
def atom_nodes(self) -> t.Mapping[str, AtomNode]:
return self._atom_nodes
@property
def scheme_nodes(self) -> t.Mapping[str, SchemeNode]:
return self._scheme_nodes
def incoming_nodes(self, node: t.Union[str, Node]) -> t.AbstractSet[Node]:
if isinstance(node, str):
node = self._nodes[node]
return self._incoming_nodes[node]
def incoming_atom_nodes(self, node: t.Union[str, Node]) -> t.AbstractSet[AtomNode]:
if isinstance(node, str):
node = self._nodes[node]
incoming_nodes = list(self._incoming_nodes[node])
incoming_atom_nodes = set()
while incoming_nodes:
incoming_node = incoming_nodes.pop()
if isinstance(incoming_node, AtomNode):
incoming_atom_nodes.add(incoming_node)
else:
incoming_nodes.extend(self._incoming_nodes[incoming_node])
return incoming_atom_nodes
def outgoing_nodes(self, node: t.Union[str, Node]) -> t.AbstractSet[Node]:
if isinstance(node, str):
node = self._nodes[node]
return self._outgoing_nodes[node]
def outgoing_atom_nodes(self, node: t.Union[str, Node]) -> t.AbstractSet[AtomNode]:
if isinstance(node, str):
node = self._nodes[node]
outgoing_nodes = list(self._outgoing_nodes[node])
outgoing_atom_nodes = set()
while outgoing_nodes:
outgoing_node = outgoing_nodes.pop()
if isinstance(outgoing_node, AtomNode):
outgoing_atom_nodes.add(outgoing_node)
else:
outgoing_nodes.extend(self._outgoing_nodes[outgoing_node])
return outgoing_atom_nodes
def incoming_edges(self, node: t.Union[str, Node]) -> t.AbstractSet[Edge]:
if isinstance(node, str):
node = self._nodes[node]
return self._incoming_edges[node]
def outgoing_edges(self, node: t.Union[str, Node]) -> t.AbstractSet[Edge]:
if isinstance(node, str):
node = self._nodes[node]
return self._outgoing_edges[node]
def scheme_between(
self, premise: AtomNode, claim: AtomNode
) -> t.Optional[SchemeNode]:
candidates = set(self._outgoing_nodes[premise]).intersection(
self._incoming_nodes[claim]
)
if len(candidates) == 1:
scheme = next(iter(candidates))
if isinstance(scheme, SchemeNode):
return scheme
return None
@property
def resources(self) -> t.Mapping[str, Resource]:
return self._resources
@property
def major_claim(self) -> t.Optional[AtomNode]:
if self._major_claim:
return self._major_claim
@property
def root_node(self) -> t.Optional[AtomNode]:
# If no major claim explicitly set, try to find one node with no outgoing edges.
# It is only returned if there exists exactly one node without connections.
# Otherwise, nothing is returned.
nodes = {
node
for node in self._atom_nodes.values()
if len(self._outgoing_nodes[node]) == 0
}
if len(nodes) == 1:
return next(iter(nodes))
return None
@major_claim.setter
def major_claim(self, value: t.Union[str, AtomNode, None]) -> None:
if isinstance(value, str):
value = self._atom_nodes[value]
elif not (value is None or isinstance(value, AtomNode)):
raise TypeError(utils.type_error(type(value), AtomNode))
self._major_claim = value
# self._metadata.update()
@property
def leaf_nodes(self) -> t.Set[Node]:
return {
node for node in self.nodes.values() if len(self.incoming_nodes(node)) == 0
}
@property
def leaf_atom_nodes(self) -> t.Set[AtomNode]:
return {
node
for node in self.atom_nodes.values()
if len(self.incoming_nodes(node)) == 0
}
@property
def leaf_scheme_nodes(self) -> t.Set[SchemeNode]:
return {
node
for node in self.scheme_nodes.values()
if len(self.incoming_nodes(node)) == 0
}
@property
def participants(self) -> t.Mapping[str, Participant]:
return self._participants
@property
def analysts(self) -> t.Mapping[str, Analyst]:
return self._analysts
def __init__(self, name: t.Optional[str] = None):
"""Create a graph from scratch."""
self.name = name or utils.uuid()
self._nodes = ImmutableDict()
self._atom_nodes = ImmutableDict()
self._scheme_nodes = ImmutableDict()
self._edges = ImmutableDict()
self._analysts = ImmutableDict()
self.userdata = {}
self._resources = ImmutableDict()
self._participants = ImmutableDict()
self.metadata = Metadata()
self._major_claim = None
self._incoming_nodes = ImmutableDict()
self._incoming_edges = ImmutableDict()
self._outgoing_nodes = ImmutableDict()
self._outgoing_edges = ImmutableDict()
self.library_version = None
self.schema_version = None
self.__post_init__()
def __post_init__(self):
pass
def __repr__(self):
return utils.class_repr(self, [self.name])
def add_node(self, node: Node) -> None:
"""Add a node to the graph.
Args:
node: Node object that is not already part of the graph.
Examples:
>>> g = Graph()
>>> g.add_node(AtomNode("Exemplary node"))
>>> len(g.nodes)
1
>>> g.add_node(SchemeNode(Support.DEFAULT))
>>> len(g.nodes)
2
>>> g.add_node("Test")
Traceback (most recent call last):
TypeError: Expected type '<class 'arguebuf.node.Node'>', but got '<class 'str'>'. Make sure that you are passing the correct method arguments.
"""
if not isinstance(node, Node):
raise TypeError(utils.type_error(type(node), Node))
if node.id in self._nodes:
raise ValueError(utils.duplicate_key_error(self.name, node.id))
self._nodes._store[node.id] = node
if isinstance(node, AtomNode):
self._atom_nodes._store[node.id] = node
if node.participant and node.participant.id not in self._participants:
self.add_participant(node.participant)
if (
node.reference
and node.reference.resource
and node.reference.resource.id not in self._resources
):
self.add_resource(node.reference.resource)
elif isinstance(node, SchemeNode):
self._scheme_nodes._store[node.id] = node
self._incoming_nodes._store[node] = ImmutableSet()
self._incoming_edges._store[node] = ImmutableSet()
self._outgoing_nodes._store[node] = ImmutableSet()
self._outgoing_edges._store[node] = ImmutableSet()
def remove_node(self, node: Node) -> None:
"""Remove a node and its corresponding edges from the graph.
Args:
node: Node object that is part of the graph.
Examples:
>>> g = Graph()
>>> n1 = AtomNode("Node1")
>>> n2 = SchemeNode(Support.DEFAULT)
>>> e = Edge(n1, n2)
>>> g.add_edge(e)
>>> len(g.nodes)
2
>>> len(g.edges)
1
>>> g.remove_node(n1)
>>> len(g.nodes)
1
>>> len(g.edges)
0
>>> g.remove_node(n1)
Traceback (most recent call last):
KeyError: Node not in graph.
"""
if node.id not in self.nodes:
raise KeyError(utils.missing_key_error(self.name, node.id))
del self._nodes._store[node.id]
if isinstance(node, AtomNode):
del self._atom_nodes._store[node.id]
elif isinstance(node, SchemeNode):
del self._scheme_nodes._store[node.id]
neighbor_edges = list(self._incoming_edges[node]) + list(
self._outgoing_edges[node]
)
for edge in neighbor_edges:
self.remove_edge(edge)
del self._incoming_nodes._store[node]
del self._incoming_edges._store[node]
del self._outgoing_nodes._store[node]
del self._outgoing_edges._store[node]
def add_edge(self, edge: Edge) -> None:
"""Add an edge and its nodes (if not already added).
Args:
edge: Edge object that is not part of the graph.
Examples:
>>> g = Graph()
>>> n1 = AtomNode("Premise")
>>> n2 = SchemeNode(Support.DEFAULT)
>>> n3 = AtomNode("Claim")
>>> e1 = Edge(n1, n2)
>>> e2 = Edge(n2, n3)
>>> g.add_edge(e1)
>>> len(g.edges)
1
>>> g.add_edge(e2)
>>> len(g.edges)
2
"""
if not isinstance(edge, Edge):
raise TypeError(utils.type_error(type(edge), Edge))
if edge.id in self._edges:
raise ValueError(utils.duplicate_key_error(self.name, edge.id))
self._edges._store[edge.id] = edge
if edge.source.id not in self.nodes:
self.add_node(edge.source)
if edge.target.id not in self.nodes:
self.add_node(edge.target)
self._outgoing_edges[edge.source]._store.add(edge)
self._incoming_edges[edge.target]._store.add(edge)
self._outgoing_nodes[edge.source]._store.add(edge.target)
self._incoming_nodes[edge.target]._store.add(edge.source)
def remove_edge(self, edge: Edge) -> None:
"""Remove an edge.
Args:
edge: Edge object that is part of the graph.
Examples:
>>> g = Graph()
>>> n1 = AtomNode("Node1")
>>> n2 = SchemeNode(Support.DEFAULT)
>>> e = Edge(n1, n2)
>>> g.add_edge(e)
>>> len(g.edges)
1
>>> len(g.nodes)
2
>>> g.remove_edge(e)
>>> len(g.edges)
0
>>> len(g.nodes)
2
"""
if not isinstance(edge, Edge):
raise TypeError(utils.type_error(type(edge), Edge))
if edge.id not in self._edges:
raise KeyError(utils.missing_key_error(self.name, edge.id))
del self._edges._store[edge.id]
self._outgoing_edges[edge.source]._store.remove(edge)
self._incoming_edges[edge.target]._store.remove(edge)
self._outgoing_nodes[edge.source]._store.remove(edge.target)
self._incoming_nodes[edge.target]._store.remove(edge.source)
##What does a Resource look like?
def add_resource(self, resource: Resource) -> None:
"""Add a resource.
Args:
resource: Resource object that is not part of the graph.
Examples:
>>> g = Graph()
>>> r1 = Resource("Resource1")
>>> g.add_resource(r1)
>>> len(g.resources)
1
"""
if not isinstance(resource, Resource):
raise TypeError(utils.type_error(type(resource), Resource))
if resource.id in self._resources:
raise ValueError(utils.duplicate_key_error(self.name, resource.id))
self._resources._store[resource.id] = resource
def remove_resource(self, resource: Resource) -> None:
"""Add a resource.
Args:
resource: Resource object that is part of the graph.
Examples:
>>> g = Graph()
>>> r1 = Resource("Resource1")
>>> g.add_resource(r1)
>>> len(g.resources)
1
>>> g.remove_resource(r1)
>>> len(g.resources)
0
"""
if not isinstance(resource, Resource):
raise TypeError(utils.type_error(type(resource), Resource))
if resource.id not in self._resources:
raise ValueError(utils.missing_key_error(self.name, resource.id))
del self._resources._store[resource.id]
for node in self._atom_nodes.values():
if node.reference and node.reference.resource == resource:
node.reference._resource = None
node.reference.offset = None
def clean_resources(self) -> None:
"""Remove resources from the graph that are used by no nodes"""
node_resources = {
node.reference.resource.id
for node in self._atom_nodes.values()
if node.reference and node.reference.resource
}
for resource_id in set(self._resources):
if resource_id not in node_resources:
del self._resources._store[resource_id]
def add_participant(self, participant: Participant) -> None:
"""Add a resource.
Args:
participant: Participant object that is not part of the graph.
Examples:
>>> g = Graph()
>>> p1 = Participant("Participant1")
>>> g.add_participant(p1)
>>> len(g.participants)
1
"""
if not isinstance(participant, Participant):
raise TypeError(utils.type_error(type(participant), Participant))
if participant.id in self._participants:
raise ValueError(utils.duplicate_key_error(self.name, participant.id))
self._participants._store[participant.id] = participant
def remove_participant(self, participant: Participant) -> None:
"""Add a resource.
Args:
participant: Participant object that is part of the graph.
Examples:
>>> g = Graph()
>>> p1 = Participant("Participant1")
>>> g.add_participant(p1)
>>> len(g.participants)
1
>>> g.remove_participant(p1)
>>> len(g.participants)
0
"""
if not isinstance(participant, Participant):
raise TypeError(utils.type_error(type(participant), Participant))
if participant.id not in self._participants:
raise ValueError(utils.missing_key_error(self.name, participant.id))
del self._participants._store[participant.id]
for node in self._atom_nodes.values():
if node.participant == participant:
node._participant = None
def clean_participants(self) -> None:
"""Remove resources from the graph that are used by no nodes"""
node_participants = {
node.participant.id
for node in self._atom_nodes.values()
if node.participant
}
for participant in set(self._participants):
if participant not in node_participants:
del self._participants._store[participant]
def add_analyst(self, analyst: Analyst) -> None:
"""Add a resource.
Args:
analyst: analyst object that is not part of the graph.
Examples:
>>> g = Graph()
>>> p1 = Analyst("Name")
>>> g.add_analyst(p1)
>>> len(g.analysts)
1
"""
if not isinstance(analyst, Analyst):
raise TypeError(utils.type_error(type(analyst), Analyst))
if analyst.id in self._analysts:
raise ValueError(utils.duplicate_key_error(self.name, analyst.id))
self._analysts._store[analyst.id] = analyst
def remove_analyst(self, analyst: Analyst) -> None:
"""Add a resource.
Args:
analyst: analyst object that is part of the graph.
Examples:
>>> import arguebuf
>>> g = Graph()
>>> p1 = Analyst("Name")
>>> g.add_analyst(p1)
>>> len(g.analysts)
1
>>> g.remove_analyst(p1)
>>> len(g.analysts)
0
"""
if not isinstance(analyst, Analyst):
raise TypeError(utils.type_error(type(analyst), Analyst))
if analyst.id not in self._analysts:
raise ValueError(utils.missing_key_error(self.name, analyst.id))
del self._analysts._store[analyst.id]
def node_distance(
self,
start_node: Node,
end_node: Node,
max_distance: t.Optional[int] = None,
directed: bool = True,
ignore_schemes: bool = False,
) -> t.Optional[int]:
"""Get the distance between `start_node` and `end_node` in the graph.
Args:
start_node: Node object that is part of the graph.
end_node: Node object that is part of the graph.
max_distance: Only search for nodes having at most a distance of this argument.
Especially helpful when dealing with large graphs where shorts paths are searched for.
directed: If `False`, also search for the direction `end_node` -> `start_node`.
Returns:
`None` if no path between
Examples:
>>> g = Graph()
>>> n1 = AtomNode("Premise")
>>> n2 = SchemeNode(Support.DEFAULT)
>>> n3 = AtomNode("Claim")
>>> e1 = Edge(n1, n2)
>>> e2 = Edge(n2, n3)
>>> g.add_node(n1)
>>> g.add_node(n2)
>>> len(g.nodes)
2
>>> g.add_edge(e1)
>>> g.add_edge(e2)
>>> len(g.edges)
2
>>> g.node_distance(n1, n3)
2
>>> g.node_distance(n3, n1)
"""
if start_node in self.nodes.values() and end_node in self.nodes.values():
if start_node == end_node:
return 0
connections = (
self.outgoing_atom_nodes if ignore_schemes else self.outgoing_nodes
)
dist = _node_distance(start_node, end_node, connections, max_distance)
if dist is None and not directed:
dist = _node_distance(end_node, start_node, connections, max_distance)
return dist
return None
@classmethod
def from_ova(
cls,
obj: ova.Graph,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from OVA argument graph file (reference: http://ova.uni-trier.de/)."""
g = cls(name)
resource = Resource.from_ova(obj["analysis"], nlp)
g.add_resource(resource)
for participant in obj["participants"]:
g.add_participant(Participant.from_ova(participant))
if analyst_name := obj["analysis"].get("annotatorName"):
g.add_analyst(Analyst(name=analyst_name))
for ova_node in obj["nodes"]:
node = (
atom_class.from_ova(ova_node, nlp)
if ova_node.get("type") == "I"
else scheme_class.from_ova(ova_node, nlp)
)
if node:
g.add_node(node)
if ova_node.get("major_claim") and isinstance(node, AtomNode):
g._major_claim = node
for ova_edge in obj["edges"]:
if edge := edge_class.from_ova(ova_edge, g._nodes):
g.add_edge(edge)
if (analysis := obj.get("analysis")) and (raw_text := analysis.get("txt")):
_inject_original_text(raw_text, g._atom_nodes, resource, nlp)
return g
@classmethod
def from_aif(
cls,
obj: aif.Graph,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from AIF argument graph file
(reference: http://www.wi2.uni-trier.de/shared/publications/2019_LenzOllingerSahitajBergmann_ICCBR.pdf)
"""
g = cls(name)
for aif_node in obj["nodes"]:
node = (
atom_class.from_aif(aif_node, nlp)
if aif_node["type"] == "I"
else scheme_class.from_aif(aif_node, nlp)
)
if node:
g.add_node(node)
for aif_edge in obj["edges"]:
if edge := edge_class.from_aif(aif_edge, g._nodes):
g.add_edge(edge)
return g
def to_aif(self) -> aif.Graph:
"""Export structure of Graph instance to AIF argument graph format."""
return {
"nodes": [node.to_aif() for node in self._nodes.values()],
"edges": [edge.to_aif() for edge in self._edges.values()],
"locutions": [],
}
def to_protobuf(self) -> graph_pb2.Graph:
"""Export structure of Graph instance to PROTOBUF argument graph format."""
try:
version = importlib.metadata.version("arg_services")
except importlib.metadata.PackageNotFoundError:
version = ""
g = graph_pb2.Graph(
schema_version=1,
library_version=version,
metadata=self.metadata.to_protobuf(),
)
for node_id, node in self._nodes.items():
g.nodes[node_id].CopyFrom(node.to_protobuf())
for edge_id, edge in self._edges.items():
g.edges[edge_id].CopyFrom(edge.to_protobuf())
if self._major_claim:
g.major_claim = self._major_claim.id
for resource_id, resource in self._resources.items():
g.resources[resource_id].CopyFrom(resource.to_protobuf())
for participant_id, participant in self._participants.items():
g.participants[participant_id].CopyFrom(participant.to_protobuf())
for analyst_id, analyst in self._analysts.items():
g.analysts[analyst_id].CopyFrom(analyst.to_protobuf())
g.userdata.update(self.userdata)
return g
@classmethod
def from_protobuf(
cls,
obj: graph_pb2.Graph,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
participant_class: t.Type[Participant] = Participant,
analyst_class: t.Type[Analyst] = Analyst,
resource_class: t.Type[Resource] = Resource,
reference_class: t.Type[Reference] = Reference,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from PROTOBUF argument graph file.(Link?)"""
g = cls(name)
for resource_id, resource in obj.resources.items():
g.add_resource(resource_class.from_protobuf(resource_id, resource, nlp))
for participant_id, participant in obj.participants.items():
g.add_participant(
participant_class.from_protobuf(participant_id, participant)
)
for analyst_id, analyst in obj.analysts.items():
g.add_analyst(analyst_class.from_protobuf(analyst_id, analyst))
for node_id, node in obj.nodes.items():
if node.WhichOneof("type") == "atom":
g.add_node(
atom_class.from_protobuf(
node_id,
node,
g._resources,
g._participants,
reference_class,
nlp,
)
)
elif node.WhichOneof("type") == "scheme":
g.add_node(
scheme_class.from_protobuf(
node_id,
node,
g._resources,
g._participants,
reference_class,
nlp,
)
)
# TODO: Raise error if node is neither scheme nor atom
for edge_id, edge in obj.edges.items():
g.add_edge(edge_class.from_protobuf(edge_id, edge, g._nodes))
major_claim = g._nodes[obj.major_claim] if obj.major_claim else None
if major_claim and isinstance(major_claim, AtomNode):
g._major_claim = major_claim
g.userdata.update(obj.userdata)
g.metadata = Metadata.from_protobuf(obj.metadata)
g.library_version = obj.library_version
g.schema_version = obj.schema_version
return g
@classmethod
def from_dict(
cls,
obj: t.Mapping[str, t.Any],
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from DICT argument graph file(Link?)."""
if "analysis" in obj:
return cls.from_ova(
t.cast(ova.Graph, obj), name, atom_class, scheme_class, edge_class, nlp
)
if "locutions" in obj:
return cls.from_aif(
t.cast(aif.Graph, obj), name, atom_class, scheme_class, edge_class, nlp
)
return cls.from_protobuf(
ParseDict(obj, graph_pb2.Graph()),
name,
atom_class,
scheme_class,
edge_class,
nlp=nlp,
)
def to_dict(self, format: GraphFormat) -> t.Dict[str, t.Any]:
"""Export structure of Graph instance to DICT argument graph format."""
if format == GraphFormat.AIF:
return t.cast(t.Dict[str, t.Any], self.to_aif())
return MessageToDict(self.to_protobuf(), including_default_value_fields=False)
@classmethod
def from_json(
cls,
obj: t.IO,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from JSON argument graph file(Link?)."""
return cls.from_dict(
json.load(obj), name, atom_class, scheme_class, edge_class, nlp
)
def to_json(
self,
obj: t.IO,
format: GraphFormat = GraphFormat.ARGUEBUF,
pretty: bool = False,
) -> None:
"""Export structure of Graph instance to JSON argument graph format."""
json.dump(
self.to_dict(format), obj, ensure_ascii=False, indent=4 if pretty else None
)
@classmethod
def from_brat(
cls,
obj: t.IO,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from BRAT argument graph file (reference: https://brat.nlplab.org/)"""
reader = csv.reader(obj, delimiter="\t")
g = cls(name)
atom_nodes = {}
mc = atom_class(utils.parse("", nlp))
g.add_node(mc)
g._major_claim = mc
for row in reader:
userdata = row[1].split()
if row[0].startswith("T"):
if userdata[0] == "MajorClaim":
mc.text = utils.parse(f"{mc.plain_text}. {row[2]}", nlp)
else:
atom = atom_class(utils.parse(row[2], nlp))
g.add_node(atom)
atom_nodes[row[0]] = atom
elif row[0].startswith("A") or row[0].startswith("R"):
if row[0].startswith("A"):
scheme_type = (
Attack.DEFAULT if userdata[2] == "Against" else Support.DEFAULT
)
source = atom_nodes[userdata[1]]
target = mc
else:
scheme_type = (
Attack.DEFAULT if userdata[0] == "attacks" else Support.DEFAULT
)
source = atom_nodes[userdata[1].split(":")[1]]
target = atom_nodes[userdata[2].split(":")[1]]
scheme = scheme_class(scheme_type)
g.add_node(scheme)
g.add_edge(edge_class(source, scheme))
g.add_edge(edge_class(scheme, target))
return g
@classmethod
def from_kialo(
cls,
obj: t.IO,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
if name_match := re.search(r"Discussion Title: (.*)", obj.readline()):
name = name_match[1]
# After the title, an empty line should follow
assert obj.readline().strip() == ""
g = cls(name)
# Example: 1.1. Pro: Gold is better than silver.
# Pattern: {ID}.{ID}. {STANCE (OPTIONAL)}: {TEXT}
pattern = re.compile(r"^(1\.(?:\d+\.)+) (?:(Con|Pro): )?(.*)")
current_line = obj.readline()
next_line = obj.readline()
mc_match = re.search(r"^((?:\d+\.)+) (.*)", current_line)
if not mc_match:
raise ValueError("The major claim is not present in the third line!")
mc_id = mc_match[1]
mc_text = mc_match[2]
# See in the following while loop for explanation of this block
while next_line and not pattern.search(next_line):
mc_text = f"{mc_text}\n{next_line.strip()}"
next_line = obj.readline()
mc = _kialo_atom_node(mc_id, mc_text, nlp, atom_class)
g.add_node(mc)
g.major_claim = mc
current_line = next_line
next_line = obj.readline()
while current_line:
if current_match := pattern.search(current_line):
source_id = current_match[1]
source_id_parts = source_id[:-1].split(".")
level = len(source_id_parts)
stance = current_match[2]
text = current_match[3]
# The text of a node is allowed to span multiple lines.
# Thus, we need to look ahead to concatenate the complete text.
# As long as the pattern is not found in the next line,
# we assume that the text belongs to the previous statement.
while next_line and not pattern.search(next_line):
text = f"{text}\n{next_line.strip()}"
next_line = obj.readline()
assert source_id
assert text
if id_ref_match := re.search(r"^-> See ((?:\d+\.)+)", text):
id_ref = id_ref_match[1]
source = g.atom_nodes[id_ref]
else:
source = _kialo_atom_node(source_id, text, nlp, atom_class)
g.add_node(source)
if stance:
stance = stance.lower()
scheme = scheme_class(
Attack.DEFAULT if stance == "con" else Support.DEFAULT,
id=f"{source_id}scheme",
)
else:
scheme = scheme_class(Rephrase.DEFAULT, id=f"{source_id}scheme")
target_id = ".".join(source_id_parts[:-1] + [""])
target = g.atom_nodes[target_id]
g.add_node(scheme)
g.add_edge(edge_class(source, scheme, id=f"{source.id}->{scheme.id}"))
g.add_edge(edge_class(scheme, target, id=f"{scheme.id}->{target.id}"))
current_line = next_line
next_line = obj.readline()
return g
@classmethod
def from_io(
cls,
obj: t.IO,
suffix: str,
name: t.Optional[str] = None,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from IO argument graph file(Link?)."""
args = (obj, name, atom_class, scheme_class, edge_class, nlp)
if suffix == ".ann":
return cls.from_brat(*args)
if suffix == ".txt":
return cls.from_kialo(*args)
return cls.from_json(*args)
def to_io(
self,
obj: t.IO,
format: GraphFormat = GraphFormat.ARGUEBUF,
pretty: bool = False,
) -> None:
"""Export structure of Graph instance to IO argument graph format."""
self.to_json(obj, format, pretty)
@classmethod
def from_file(
cls,
path: Path,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Generate Graph structure from a File."""
with path.open("r", encoding="utf-8") as file:
return cls.from_io(
file, path.suffix, path.stem, atom_class, scheme_class, edge_class, nlp
)
def to_file(
self,
path: Path,
format: GraphFormat = GraphFormat.ARGUEBUF,
pretty: bool = False,
) -> None:
"""Export strucure of Graph instance into structure of File/Folder format."""
if path.is_dir() or not path.suffix:
path = path / f"{self.name}.json"
with path.open("w", encoding="utf-8") as file:
self.to_io(file, format, pretty)
to_folder = to_file
@classmethod
def from_folder(
cls,
path: Path,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
suffixes: t.Iterable[str] = (".json"),
) -> t.List[Graph]:
"Generate Graph structure from Folder."
return [
cls.from_file(file, atom_class, scheme_class, edge_class, nlp)
for suffix in suffixes
for file in sorted(path.rglob(f"*{suffix}"))
]
def to_nx(
self,
atom_label: t.Optional[t.Callable[[AtomNode], str]] = None,
scheme_label: t.Optional[t.Callable[[SchemeNode], str]] = None,
) -> nx.DiGraph:
"""Transform a Graph instance into an instance of networkx directed graph. Refer to the networkx library for additional information.
Examples:
>>> g = Graph("Test")
>>> n1 = AtomNode("Node1")
>>> n2 = AtomNode("Node2")
>>> e = Edge(n1, n2)
>>> g.add_edge(e)
>>> gnx = g.to_nx()
>>> gnx.number_of_nodes()
2
"""
g = nx.DiGraph()
for node in self._atom_nodes.values():
node.to_nx(g, atom_label)
for node in self._scheme_nodes.values():
node.to_nx(g, scheme_label)
for edge in self._edges.values():
edge.to_nx(g)
return g
def to_gv(
self,
format: t.Optional[str] = None,
engine: t.Optional[str] = None,
nodesep: t.Optional[float] = None,
ranksep: t.Optional[float] = None,
wrap_col: t.Optional[int] = None,
margin: t.Optional[t.Tuple[float, float]] = None,
font_name: t.Optional[str] = None,
font_size: t.Optional[float] = None,
atom_label: t.Optional[t.Callable[[AtomNode], str]] = None,
scheme_label: t.Optional[t.Callable[[SchemeNode], str]] = None,
graph_attr: t.Optional[t.Mapping[str, str]] = None,
node_attr: t.Optional[t.Mapping[str, str]] = None,
edge_attr: t.Optional[t.Mapping[str, str]] = None,
) -> gv.Digraph:
"""Transform a Graph instance into an instance of GraphViz directed graph. Make sure that a GraphViz Executable path is set on your machine for visualization. Refer to the GraphViz library for additional information."""
gv_margin = lambda x: f"{x[0]},{x[1]}"
if not graph_attr:
graph_attr = {}
if not node_attr:
node_attr = {}
if not edge_attr:
edge_attr = {}
g = gv.Digraph(
name=str(self.name),
strict=True,
format=format or "pdf",
engine=engine or "dot",
node_attr={
"fontname": font_name or "Arial",
"fontsize": str(font_size or 11),
"margin": gv_margin(margin or (0.15, 0.1)),
"style": "filled",
"shape": "box",
"width": "0",
"height": "0",
**node_attr,
},
edge_attr={"color": "#666666", **edge_attr},
graph_attr={
"rankdir": "BT",
"margin": "0",
"nodesep": str(nodesep or 0.25),
"ranksep": str(ranksep or 0.5),
**graph_attr,
},
)
for node in self._atom_nodes.values():
node.to_gv(
g,
self.major_claim == node,
label_func=atom_label,
wrap_col=wrap_col or 36,
)
for node in self._scheme_nodes.values():
node.to_gv(
g,
self.major_claim == node,
label_func=scheme_label,
wrap_col=wrap_col or 36,
)
for edge in self._edges.values():
edge.to_gv(g)
return g
def strip_snodes(self) -> None:
"""Remove scheme nodes from graph and merge respective edges into singular edge"""
snodes = list(self._scheme_nodes.values())
for snode in snodes:
for incoming, outgoing in itertools.product(
self._incoming_edges[snode], self._outgoing_edges[snode]
):
if isinstance(incoming.source, AtomNode) and isinstance(
outgoing.target, AtomNode
):
self.add_edge(
Edge(
incoming.source,
outgoing.target,
id=f"{incoming.id}-{outgoing.id}",
)
)
self.remove_node(snode)
def copy(
self,
atom_class: t.Type[AtomNode] = AtomNode,
scheme_class: t.Type[SchemeNode] = SchemeNode,
edge_class: t.Type[Edge] = Edge,
nlp: t.Optional[t.Callable[[str], t.Any]] = None,
) -> Graph:
"""Contents of Graph instance are copied into new Graph object."""
return Graph.from_dict(
self.to_dict(format=GraphFormat.ARGUEBUF),
self.name,
atom_class,
scheme_class,
edge_class,
nlp,
)
def _node_distance(
node1: Node,
node2: Node,
connections: t.Callable[[Node], t.Iterable[Node]],
max_distance: t.Optional[int],
) -> t.Optional[int]:
expansion: t.List[t.Tuple[Node, int]] = [(n, 1) for n in connections(node1)]
while len(expansion) > 0:
candidate, distance = expansion.pop()
if max_distance is not None and distance > max_distance:
continue
elif candidate == node2:
return distance
else:
expansion.extend((n, distance + 1) for n in connections(candidate))
return None
def _kialo_atom_node(
id: str,
text: str,
nlp: t.Optional[t.Callable[[str], t.Any]],
atom_class: t.Type[AtomNode],
) -> AtomNode:
# Remove backslashes before parentheses/brackets
text = re.sub(r"\\([\[\]\(\)])", r"\1", text)
# Remove markdown links
text = re.sub(
r"\[(.*?)\]\(.*?\)",
r"\1",
text,
)
# Apply user-provided nlp function
text = utils.parse(text, nlp)
return atom_class(text, id=id)
def render(
g: t.Union[gv.Graph, gv.Digraph],
path: Path,
view: bool = False,
) -> None:
"""Visualize a Graph instance using a GraphViz backend. Make sure that a GraphViz Executable path is set on your machine for visualization."""
filename = path.stem
directory = path.parent
try:
g.render(
filename=filename,
directory=str(directory),
cleanup=True,
view=view,
)
except gv.ExecutableNotFound:
log.error("Rendering not possible. GraphViz might not be installed.")
def _inject_original_text(
raw_text: str,
nodes: t.Mapping[str, AtomNode],
resource: Resource,
nlp: t.Optional[t.Callable[[str], t.Any]],
) -> None:
doc = html.fromstring(f"<html><head></head><body>{raw_text}</body></html>")
text = ""
for elem in doc.body.iter():
# Span elements need special handling
if elem.tag == "span":
# The id is prefixed with 'node', e.g. 'node5'.
node_key = elem.attrib["id"].replace("node", "")
if node := nodes.get(node_key):
node._reference = Reference(
resource, len(text), utils.parse(elem.text, nlp)
)
if elem.text:
text += elem.text
elif elem.tag == "br":
text += "\n"
elif elem.text:
text += elem.text
# Text after a tag should always be added to the overall text
if elem.tail:
text += elem.tail
| [
"logging.getLogger",
"arguebuf.models.metadata.Metadata.from_protobuf",
"arguebuf.models.edge.Edge",
"re.compile",
"arguebuf.models.participant.Participant.from_ova",
"arguebuf.services.utils.duplicate_key_error",
"re.search",
"networkx.DiGraph",
"itertools.product",
"arg_services.graph.v1.graph_pb2.Graph",
"arguebuf.services.utils.missing_key_error",
"arguebuf.services.utils.parse",
"arguebuf.models.metadata.Metadata",
"csv.reader",
"arguebuf.services.utils.ImmutableSet",
"arguebuf.services.utils.ImmutableDict",
"re.sub",
"arguebuf.models.analyst.Analyst",
"arguebuf.models.resource.Resource.from_ova",
"typing.cast",
"lxml.html.fromstring",
"arguebuf.services.utils.class_repr",
"arguebuf.services.utils.uuid",
"json.load"
] | [((964, 991), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (981, 991), False, 'import logging\n'), ((42921, 42964), 're.sub', 're.sub', (['"""\\\\\\\\([\\\\[\\\\]\\\\(\\\\)])"""', '"""\\\\1"""', 'text'], {}), "('\\\\\\\\([\\\\[\\\\]\\\\(\\\\)])', '\\\\1', text)\n", (42927, 42964), False, 'import re\n'), ((43000, 43043), 're.sub', 're.sub', (['"""\\\\[(.*?)\\\\]\\\\(.*?\\\\)"""', '"""\\\\1"""', 'text'], {}), "('\\\\[(.*?)\\\\]\\\\(.*?\\\\)', '\\\\1', text)\n", (43006, 43043), False, 'import re\n'), ((43123, 43145), 'arguebuf.services.utils.parse', 'utils.parse', (['text', 'nlp'], {}), '(text, nlp)\n', (43134, 43145), False, 'from arguebuf.services import utils\n'), ((43930, 43999), 'lxml.html.fromstring', 'html.fromstring', (['f"""<html><head></head><body>{raw_text}</body></html>"""'], {}), "(f'<html><head></head><body>{raw_text}</body></html>')\n", (43945, 43999), False, 'from lxml import html\n'), ((7167, 7182), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7180, 7182), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7210, 7225), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7223, 7225), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7255, 7270), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7268, 7270), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7293, 7308), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7306, 7308), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7334, 7349), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7347, 7349), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7403, 7418), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7416, 7418), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7448, 7463), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7461, 7463), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7488, 7498), 'arguebuf.models.metadata.Metadata', 'Metadata', ([], {}), '()\n', (7496, 7498), False, 'from arguebuf.models.metadata import Metadata\n'), ((7564, 7579), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7577, 7579), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7611, 7626), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7624, 7626), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7658, 7673), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7671, 7673), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7705, 7720), 'arguebuf.services.utils.ImmutableDict', 'ImmutableDict', ([], {}), '()\n', (7718, 7720), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((7905, 7940), 'arguebuf.services.utils.class_repr', 'utils.class_repr', (['self', '[self.name]'], {}), '(self, [self.name])\n', (7921, 7940), False, 'from arguebuf.services import utils\n'), ((9470, 9484), 'arguebuf.services.utils.ImmutableSet', 'ImmutableSet', ([], {}), '()\n', (9482, 9484), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((9529, 9543), 'arguebuf.services.utils.ImmutableSet', 'ImmutableSet', ([], {}), '()\n', (9541, 9543), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((9588, 9602), 'arguebuf.services.utils.ImmutableSet', 'ImmutableSet', ([], {}), '()\n', (9600, 9602), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((9647, 9661), 'arguebuf.services.utils.ImmutableSet', 'ImmutableSet', ([], {}), '()\n', (9659, 9661), False, 'from arguebuf.services.utils import ImmutableDict, ImmutableSet\n'), ((21330, 21369), 'arguebuf.models.resource.Resource.from_ova', 'Resource.from_ova', (["obj['analysis']", 'nlp'], {}), "(obj['analysis'], nlp)\n", (21347, 21369), False, 'from arguebuf.models.resource import Resource\n'), ((27233, 27269), 'arguebuf.models.metadata.Metadata.from_protobuf', 'Metadata.from_protobuf', (['obj.metadata'], {}), '(obj.metadata)\n', (27255, 27269), False, 'from arguebuf.models.metadata import Metadata\n'), ((29988, 30019), 'csv.reader', 'csv.reader', (['obj'], {'delimiter': '"""\t"""'}), "(obj, delimiter='\\t')\n", (29998, 30019), False, 'import csv\n'), ((32120, 32174), 're.compile', 're.compile', (['"""^(1\\\\.(?:\\\\d+\\\\.)+) (?:(Con|Pro): )?(.*)"""'], {}), "('^(1\\\\.(?:\\\\d+\\\\.)+) (?:(Con|Pro): )?(.*)')\n", (32130, 32174), False, 'import re\n'), ((32266, 32313), 're.search', 're.search', (['"""^((?:\\\\d+\\\\.)+) (.*)"""', 'current_line'], {}), "('^((?:\\\\d+\\\\.)+) (.*)', current_line)\n", (32275, 32313), False, 'import re\n'), ((38003, 38015), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (38013, 38015), True, 'import networkx as nx\n'), ((7132, 7144), 'arguebuf.services.utils.uuid', 'utils.uuid', ([], {}), '()\n', (7142, 7144), False, 'from arguebuf.services import utils\n'), ((29119, 29133), 'json.load', 'json.load', (['obj'], {}), '(obj)\n', (29128, 29133), False, 'import json\n'), ((30091, 30111), 'arguebuf.services.utils.parse', 'utils.parse', (['""""""', 'nlp'], {}), "('', nlp)\n", (30102, 30111), False, 'from arguebuf.services import utils\n'), ((41042, 41117), 'itertools.product', 'itertools.product', (['self._incoming_edges[snode]', 'self._outgoing_edges[snode]'], {}), '(self._incoming_edges[snode], self._outgoing_edges[snode])\n', (41059, 41117), False, 'import itertools\n'), ((8768, 8813), 'arguebuf.services.utils.duplicate_key_error', 'utils.duplicate_key_error', (['self.name', 'node.id'], {}), '(self.name, node.id)\n', (8793, 8813), False, 'from arguebuf.services import utils\n'), ((10455, 10498), 'arguebuf.services.utils.missing_key_error', 'utils.missing_key_error', (['self.name', 'node.id'], {}), '(self.name, node.id)\n', (10478, 10498), False, 'from arguebuf.services import utils\n'), ((11851, 11896), 'arguebuf.services.utils.duplicate_key_error', 'utils.duplicate_key_error', (['self.name', 'edge.id'], {}), '(self.name, edge.id)\n', (11876, 11896), False, 'from arguebuf.services import utils\n'), ((13095, 13138), 'arguebuf.services.utils.missing_key_error', 'utils.missing_key_error', (['self.name', 'edge.id'], {}), '(self.name, edge.id)\n', (13118, 13138), False, 'from arguebuf.services import utils\n'), ((14026, 14075), 'arguebuf.services.utils.duplicate_key_error', 'utils.duplicate_key_error', (['self.name', 'resource.id'], {}), '(self.name, resource.id)\n', (14051, 14075), False, 'from arguebuf.services import utils\n'), ((14765, 14812), 'arguebuf.services.utils.missing_key_error', 'utils.missing_key_error', (['self.name', 'resource.id'], {}), '(self.name, resource.id)\n', (14788, 14812), False, 'from arguebuf.services import utils\n'), ((16117, 16169), 'arguebuf.services.utils.duplicate_key_error', 'utils.duplicate_key_error', (['self.name', 'participant.id'], {}), '(self.name, participant.id)\n', (16142, 16169), False, 'from arguebuf.services import utils\n'), ((16919, 16969), 'arguebuf.services.utils.missing_key_error', 'utils.missing_key_error', (['self.name', 'participant.id'], {}), '(self.name, participant.id)\n', (16942, 16969), False, 'from arguebuf.services import utils\n'), ((18123, 18171), 'arguebuf.services.utils.duplicate_key_error', 'utils.duplicate_key_error', (['self.name', 'analyst.id'], {}), '(self.name, analyst.id)\n', (18148, 18171), False, 'from arguebuf.services import utils\n'), ((18869, 18915), 'arguebuf.services.utils.missing_key_error', 'utils.missing_key_error', (['self.name', 'analyst.id'], {}), '(self.name, analyst.id)\n', (18892, 18915), False, 'from arguebuf.services import utils\n'), ((21482, 21515), 'arguebuf.models.participant.Participant.from_ova', 'Participant.from_ova', (['participant'], {}), '(participant)\n', (21502, 21515), False, 'from arguebuf.models.participant import Participant\n'), ((21609, 21635), 'arguebuf.models.analyst.Analyst', 'Analyst', ([], {'name': 'analyst_name'}), '(name=analyst_name)\n', (21616, 21635), False, 'from arguebuf.models.analyst import Analyst\n'), ((27881, 27903), 'typing.cast', 't.cast', (['ova.Graph', 'obj'], {}), '(ova.Graph, obj)\n', (27887, 27903), True, 'import typing as t\n'), ((28048, 28070), 'typing.cast', 't.cast', (['aif.Graph', 'obj'], {}), '(aif.Graph, obj)\n', (28054, 28070), True, 'import typing as t\n'), ((28196, 28213), 'arg_services.graph.v1.graph_pb2.Graph', 'graph_pb2.Graph', ([], {}), '()\n', (28211, 28213), False, 'from arg_services.graph.v1 import graph_pb2\n'), ((30348, 30394), 'arguebuf.services.utils.parse', 'utils.parse', (['f"""{mc.plain_text}. {row[2]}"""', 'nlp'], {}), "(f'{mc.plain_text}. {row[2]}', nlp)\n", (30359, 30394), False, 'from arguebuf.services import utils\n'), ((33785, 33826), 're.search', 're.search', (['"""^-> See ((?:\\\\d+\\\\.)+)"""', 'text'], {}), "('^-> See ((?:\\\\d+\\\\.)+)', text)\n", (33794, 33826), False, 'import re\n'), ((44376, 44403), 'arguebuf.services.utils.parse', 'utils.parse', (['elem.text', 'nlp'], {}), '(elem.text, nlp)\n', (44387, 44403), False, 'from arguebuf.services import utils\n'), ((30455, 30479), 'arguebuf.services.utils.parse', 'utils.parse', (['row[2]', 'nlp'], {}), '(row[2], nlp)\n', (30466, 30479), False, 'from arguebuf.services import utils\n'), ((41346, 41419), 'arguebuf.models.edge.Edge', 'Edge', (['incoming.source', 'outgoing.target'], {'id': 'f"""{incoming.id}-{outgoing.id}"""'}), "(incoming.source, outgoing.target, id=f'{incoming.id}-{outgoing.id}')\n", (41350, 41419), False, 'from arguebuf.models.edge import Edge\n')] |
"""XJTU dataset."""
import os
from pathlib import Path
import itertools
import json
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
# from scipy.io import loadmat
_METAINFO = pd.read_csv(Path(__file__).parent/'metainfo.csv')
_DESCRIPTION = """
XJTU-SY Bearing Datasets.
Description
===========
XJTU-SY bearing datasets are provided by the Institute of Design Science and Basic Component at Xi’an Jiaotong University (XJTU), Shaanxi, P.R. China (http://gr.xjtu.edu.cn/web/yaguolei) and the Changxing Sumyoung Technology Co.,
Ltd. (SY), Zhejiang, P.R. China (https://www.sumyoungtech.com.cn). The datasets contain complete run-to-failure data of 15 rolling element bearings that were acquired by conducting many accelerated degradation experiments. These datasets are publicly available and anyone can use them to validate prognostics algorithms of rolling element bearings. Publications making use of the XJTU-SY bearing datasets are requested to cite the following paper.
Citation
--------
<NAME>, <NAME>, <NAME>, <NAME>, “A Hybrid Prognostics Approach for Estimating Remaining Useful Life of Rolling Element Bearings”, IEEE Transactions on Reliability, pp. 1-12, 2018. DOI: 10.1109/TR.2018.2882682.
Homepage
--------
https://biaowang.tech/xjtu-sy-bearing-datasets/
Original data
=============
Sampling rate: 25.6 kHz
Duration: 1.28 seconds
Signal length: 32768
Sampling period: 1 minute
Number of channels: 2, horizontal and vertical acceleration
Original split: 3 operating conditions (rotating speed and radial force) on 5 bearings
- 1) 2100 rpm (35 Hz) and 12 kN;
- 2) 2250 rpm (37.5 Hz) and 11 kN;
- 3) 2400 rpm (40 Hz) and 10 kN.
Download
--------
https://www.dropbox.com/sh/qka3b73wuvn5l7a/AADdQk8ZCsNkzjz11JewU7cBa/Data?dl=0&subfolder_nav_tracking=1
Notes
=====
The original dataset contains 6 rar files which needs to be extracted all together.
"""
_CITATION = """
@article{wang2018hybrid,
title={A hybrid prognostics approach for estimating remaining useful life of rolling element bearings},
author={<NAME> and <NAME> and <NAME> and <NAME>},
journal={IEEE Transactions on Reliability},
volume={69},
number={1},
pages={401--412},
year={2018},
publisher={IEEE}
}
"""
_SPLIT_PATH_MATCH = {
'contidion1': '35Hz12kN',
'contidion2': '37.5Hz11kN',
'contidion3': '40Hz10kN',
}
# _PARSER_MATCH = {
# # 'file name pattern':
# }
# _DATA_URLS = ''
class XJTU(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for XJTU dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """
Please download all data via the following link:
https://www.dropbox.com/sh/qka3b73wuvn5l7a/AADdQk8ZCsNkzjz11JewU7cBa/Data?dl=0&subfolder_nav_tracking=1
or via any other links listed on the author's website:
https://biaowang.tech/xjtu-sy-bearing-datasets/
and extract all 6 rar files located in the subfolder `Data` (unrar the first one `XJTU-SY_Bearing_Datasets.part01.rar` will automatically extract all other files). Then proceed the installation manually e.g. from the terminal via the command
`$ tfds build xjtu --manual_dir $PATH_TO_EXTRACTED_FILES`
"""
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
# Number of channels is fixed and length is fixed
'signal': tfds.features.Tensor(shape=(None, 2), dtype=tf.float64),
# 'label': tfds.features.ClassLabel(names=['Healthy', 'Faulty', 'Unknown']),
'metadata': {
'OperatingCondition': tf.int32, # Operating condition
'BearingID': tf.int32, # ID of the bearing
'FaultComponent': tf.string, # Component of the fault, e.g. {'Roller', 'InnerRing'}
'Lifetime': tf.float32,
'FileName': tf.string, # Original filename with path
}
}),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
# supervised_keys=('signal', 'label'), # Set to `None` to disable
supervised_keys=None,
homepage='',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
if dl_manager._manual_dir.exists(): # prefer to use manually downloaded data
datadir = dl_manager._manual_dir
else: # automatically download data
# For too large dataset or unsupported format
raise FileNotFoundError(self.MANUAL_DOWNLOAD_INSTRUCTIONS)
return {
sp: self._generate_examples(datadir/fn) for sp, fn in _SPLIT_PATH_MATCH.items()
# 'train': self._generate_examples_all(datadir),
}
def _generate_examples(self, path):
# assert path.exists()
# If the download files are extracted
for fp in path.rglob('*.csv'):
# print(fp)
# parse the filename
_condition, _bearing = fp.parent.name[7:].split('_')
# print(int(_condition), int(_bearing))
metadata = _METAINFO.loc[(_METAINFO['OperatingCondition']==int(_condition)) & (_METAINFO['BearingID']==int(_bearing))].iloc[0].to_dict()
metadata['FileName'] = os.path.join(*fp.parts[-3:])
x = pd.read_csv(fp).values
yield hash(frozenset(metadata.items())), {
'signal': x,
# 'label': 'Faulty',
'metadata': metadata
}
@staticmethod
def get_references():
try:
with open(Path(__file__).parent / 'Exported Items.bib') as fp:
return fp.read()
except:
pass
| [
"pandas.read_csv",
"tensorflow_datasets.features.Tensor",
"pathlib.Path",
"os.path.join",
"tensorflow_datasets.core.Version"
] | [((2536, 2562), 'tensorflow_datasets.core.Version', 'tfds.core.Version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (2553, 2562), True, 'import tensorflow_datasets as tfds\n'), ((239, 253), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'from pathlib import Path\n'), ((5372, 5400), 'os.path.join', 'os.path.join', (['*fp.parts[-3:]'], {}), '(*fp.parts[-3:])\n', (5384, 5400), False, 'import os\n'), ((5412, 5427), 'pandas.read_csv', 'pd.read_csv', (['fp'], {}), '(fp)\n', (5423, 5427), True, 'import pandas as pd\n'), ((3536, 3591), 'tensorflow_datasets.features.Tensor', 'tfds.features.Tensor', ([], {'shape': '(None, 2)', 'dtype': 'tf.float64'}), '(shape=(None, 2), dtype=tf.float64)\n', (3556, 3591), True, 'import tensorflow_datasets as tfds\n'), ((5638, 5652), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5642, 5652), False, 'from pathlib import Path\n')] |
import redis
import sys
if sys.argv[1] == "prod":
HOST = 'redis_master_1'
else:
HOST = 'XX'
db = redis.Redis(host=HOST, port=6379, db=0,
password='<PASSWORD>', decode_responses=True)
| [
"redis.Redis"
] | [((107, 196), 'redis.Redis', 'redis.Redis', ([], {'host': 'HOST', 'port': '(6379)', 'db': '(0)', 'password': '"""<PASSWORD>"""', 'decode_responses': '(True)'}), "(host=HOST, port=6379, db=0, password='<PASSWORD>',\n decode_responses=True)\n", (118, 196), False, 'import redis\n')] |
import logging
from flask import Blueprint
from burgeon.api.tracks.create_track_api import CreateTrackAPI
from burgeon.api.tracks.update_track_api import UpdateTrackAPI
from burgeon.api.tracks.delete_track_api import DeleteTrackAPI
from burgeon.api.tracks.get_track_api import GetTrackAPI
from burgeon.api.tracks.get_tracks_api import GetTracksAPI
log = logging.getLogger('burgeon.tracks')
tracks_blueprint = Blueprint('tracks', __name__)
# API resources
get_tracks_api_view = GetTracksAPI.as_view('get_tracks_api')
get_track_api_view = GetTrackAPI.as_view('get_track_api')
create_track_api_view = CreateTrackAPI.as_view('create_tracks_api')
update_track_api_view = UpdateTrackAPI.as_view('update_tracks_api')
delete_track_api_view = DeleteTrackAPI.as_view('delete_tracks_api')
### API Endpoints
## GET
tracks_blueprint.add_url_rule(
'/tracks',
view_func=get_tracks_api_view,
methods=['GET']
)
tracks_blueprint.add_url_rule(
'/tracks/<int:track_id>',
view_func=get_track_api_view,
methods=['GET']
)
## POST
tracks_blueprint.add_url_rule(
'/tracks',
view_func=create_track_api_view,
methods=['POST']
)
tracks_blueprint.add_url_rule(
'/tracks/<int:track_id>',
view_func=update_track_api_view,
methods=['PUT']
)
tracks_blueprint.add_url_rule(
'/tracks/<int:track_id>',
view_func=delete_track_api_view,
methods=['DELETE']
)
| [
"logging.getLogger",
"burgeon.api.tracks.create_track_api.CreateTrackAPI.as_view",
"burgeon.api.tracks.delete_track_api.DeleteTrackAPI.as_view",
"burgeon.api.tracks.get_track_api.GetTrackAPI.as_view",
"burgeon.api.tracks.get_tracks_api.GetTracksAPI.as_view",
"burgeon.api.tracks.update_track_api.UpdateTrackAPI.as_view",
"flask.Blueprint"
] | [((356, 391), 'logging.getLogger', 'logging.getLogger', (['"""burgeon.tracks"""'], {}), "('burgeon.tracks')\n", (373, 391), False, 'import logging\n'), ((412, 441), 'flask.Blueprint', 'Blueprint', (['"""tracks"""', '__name__'], {}), "('tracks', __name__)\n", (421, 441), False, 'from flask import Blueprint\n'), ((483, 521), 'burgeon.api.tracks.get_tracks_api.GetTracksAPI.as_view', 'GetTracksAPI.as_view', (['"""get_tracks_api"""'], {}), "('get_tracks_api')\n", (503, 521), False, 'from burgeon.api.tracks.get_tracks_api import GetTracksAPI\n'), ((546, 582), 'burgeon.api.tracks.get_track_api.GetTrackAPI.as_view', 'GetTrackAPI.as_view', (['"""get_track_api"""'], {}), "('get_track_api')\n", (565, 582), False, 'from burgeon.api.tracks.get_track_api import GetTrackAPI\n'), ((607, 650), 'burgeon.api.tracks.create_track_api.CreateTrackAPI.as_view', 'CreateTrackAPI.as_view', (['"""create_tracks_api"""'], {}), "('create_tracks_api')\n", (629, 650), False, 'from burgeon.api.tracks.create_track_api import CreateTrackAPI\n'), ((675, 718), 'burgeon.api.tracks.update_track_api.UpdateTrackAPI.as_view', 'UpdateTrackAPI.as_view', (['"""update_tracks_api"""'], {}), "('update_tracks_api')\n", (697, 718), False, 'from burgeon.api.tracks.update_track_api import UpdateTrackAPI\n'), ((743, 786), 'burgeon.api.tracks.delete_track_api.DeleteTrackAPI.as_view', 'DeleteTrackAPI.as_view', (['"""delete_tracks_api"""'], {}), "('delete_tracks_api')\n", (765, 786), False, 'from burgeon.api.tracks.delete_track_api import DeleteTrackAPI\n')] |
# ResNet
# when tuning start with learning rate->mini_batch_size ->
# momentum-> #hidden_units -> # learning_rate_decay -> #layers
import tensorflow.keras as keras
import numpy as np
import pandas as pd
import time
import matplotlib
class Classifier_RESNET:
def __init__(self, input_shape, nb_classes):
self.model = self.build_model(input_shape, nb_classes)
def build_model(self, input_shape, nb_classes):
n_feature_maps = 64
input_layer = keras.layers.Input(input_shape)
# BLOCK 1
conv_x = keras.layers.Conv1D(filters=n_feature_maps, kernel_size=8, padding='same')(input_layer)
conv_x = keras.layers.BatchNormalization()(conv_x)
conv_x = keras.layers.Activation('relu')(conv_x)
conv_y = keras.layers.Conv1D(filters=n_feature_maps, kernel_size=5, padding='same')(conv_x)
conv_y = keras.layers.BatchNormalization()(conv_y)
conv_y = keras.layers.Activation('relu')(conv_y)
conv_z = keras.layers.Conv1D(filters=n_feature_maps, kernel_size=3, padding='same')(conv_y)
conv_z = keras.layers.BatchNormalization()(conv_z)
# expand channels for the sum 1 x 1 卷积
shortcut_y = keras.layers.Conv1D(filters=n_feature_maps, kernel_size=1, padding='same')(input_layer)
shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
output_block_1 = keras.layers.add([shortcut_y, conv_z])
output_block_1 = keras.layers.Activation('relu')(output_block_1)
# BLOCK 2
conv_x = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=8, padding='same')(output_block_1)
conv_x = keras.layers.BatchNormalization()(conv_x)
conv_x = keras.layers.Activation('relu')(conv_x)
conv_y = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=5, padding='same')(conv_x)
conv_y = keras.layers.BatchNormalization()(conv_y)
conv_y = keras.layers.Activation('relu')(conv_y)
conv_z = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=3, padding='same')(conv_y)
conv_z = keras.layers.BatchNormalization()(conv_z)
# expand channels for the sum
shortcut_y = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=1, padding='same')(output_block_1)
shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
output_block_2 = keras.layers.add([shortcut_y, conv_z])
output_block_2 = keras.layers.Activation('relu')(output_block_2)
# BLOCK 3
conv_x = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=8, padding='same')(output_block_2)
conv_x = keras.layers.BatchNormalization()(conv_x)
conv_x = keras.layers.Activation('relu')(conv_x)
conv_y = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=5, padding='same')(conv_x)
conv_y = keras.layers.BatchNormalization()(conv_y)
conv_y = keras.layers.Activation('relu')(conv_y)
conv_z = keras.layers.Conv1D(filters=n_feature_maps * 2, kernel_size=3, padding='same')(conv_y)
conv_z = keras.layers.BatchNormalization()(conv_z)
# no need to expand channels because they are equal
shortcut_y = keras.layers.BatchNormalization()(output_block_2)
output_block_3 = keras.layers.add([shortcut_y, conv_z])
output_block_3 = keras.layers.Activation('relu')(output_block_3)
# FINAL
gap_layer = keras.layers.GlobalAveragePooling1D()(output_block_3)
# GlobalAveragePooling1D 对于时序数据的全局平均池化。
output_layer = keras.layers.Dense(nb_classes, activation='softmax')(gap_layer)
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
| [
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv1D"
] | [((480, 511), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['input_shape'], {}), '(input_shape)\n', (498, 511), True, 'import tensorflow.keras as keras\n'), ((1384, 1422), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[shortcut_y, conv_z]'], {}), '([shortcut_y, conv_z])\n', (1400, 1422), True, 'import tensorflow.keras as keras\n'), ((2377, 2415), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[shortcut_y, conv_z]'], {}), '([shortcut_y, conv_z])\n', (2393, 2415), True, 'import tensorflow.keras as keras\n'), ((3280, 3318), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[shortcut_y, conv_z]'], {}), '([shortcut_y, conv_z])\n', (3296, 3318), True, 'import tensorflow.keras as keras\n'), ((3637, 3697), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), '(inputs=input_layer, outputs=output_layer)\n', (3655, 3697), True, 'import tensorflow.keras as keras\n'), ((549, 623), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'n_feature_maps', 'kernel_size': '(8)', 'padding': '"""same"""'}), "(filters=n_feature_maps, kernel_size=8, padding='same')\n", (568, 623), True, 'import tensorflow.keras as keras\n'), ((654, 687), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (685, 687), True, 'import tensorflow.keras as keras\n'), ((713, 744), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (736, 744), True, 'import tensorflow.keras as keras\n'), ((771, 845), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'n_feature_maps', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=n_feature_maps, kernel_size=5, padding='same')\n", (790, 845), True, 'import tensorflow.keras as keras\n'), ((871, 904), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (902, 904), True, 'import tensorflow.keras as keras\n'), ((930, 961), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (953, 961), True, 'import tensorflow.keras as keras\n'), ((988, 1062), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'n_feature_maps', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=n_feature_maps, kernel_size=3, padding='same')\n", (1007, 1062), True, 'import tensorflow.keras as keras\n'), ((1088, 1121), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1119, 1121), True, 'import tensorflow.keras as keras\n'), ((1203, 1277), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'n_feature_maps', 'kernel_size': '(1)', 'padding': '"""same"""'}), "(filters=n_feature_maps, kernel_size=1, padding='same')\n", (1222, 1277), True, 'import tensorflow.keras as keras\n'), ((1312, 1345), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1343, 1345), True, 'import tensorflow.keras as keras\n'), ((1448, 1479), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1471, 1479), True, 'import tensorflow.keras as keras\n'), ((1533, 1611), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(8)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=8, padding='same')\n", (1552, 1611), True, 'import tensorflow.keras as keras\n'), ((1645, 1678), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1676, 1678), True, 'import tensorflow.keras as keras\n'), ((1704, 1735), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1727, 1735), True, 'import tensorflow.keras as keras\n'), ((1762, 1840), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=5, padding='same')\n", (1781, 1840), True, 'import tensorflow.keras as keras\n'), ((1866, 1899), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1897, 1899), True, 'import tensorflow.keras as keras\n'), ((1925, 1956), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1948, 1956), True, 'import tensorflow.keras as keras\n'), ((1983, 2061), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=3, padding='same')\n", (2002, 2061), True, 'import tensorflow.keras as keras\n'), ((2087, 2120), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2118, 2120), True, 'import tensorflow.keras as keras\n'), ((2189, 2267), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(1)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=1, padding='same')\n", (2208, 2267), True, 'import tensorflow.keras as keras\n'), ((2305, 2338), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2336, 2338), True, 'import tensorflow.keras as keras\n'), ((2441, 2472), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2464, 2472), True, 'import tensorflow.keras as keras\n'), ((2526, 2604), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(8)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=8, padding='same')\n", (2545, 2604), True, 'import tensorflow.keras as keras\n'), ((2638, 2671), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2669, 2671), True, 'import tensorflow.keras as keras\n'), ((2697, 2728), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2720, 2728), True, 'import tensorflow.keras as keras\n'), ((2755, 2833), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=5, padding='same')\n", (2774, 2833), True, 'import tensorflow.keras as keras\n'), ((2859, 2892), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2890, 2892), True, 'import tensorflow.keras as keras\n'), ((2918, 2949), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2941, 2949), True, 'import tensorflow.keras as keras\n'), ((2976, 3054), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(n_feature_maps * 2)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=n_feature_maps * 2, kernel_size=3, padding='same')\n", (2995, 3054), True, 'import tensorflow.keras as keras\n'), ((3080, 3113), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3111, 3113), True, 'import tensorflow.keras as keras\n'), ((3204, 3237), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3235, 3237), True, 'import tensorflow.keras as keras\n'), ((3344, 3375), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (3367, 3375), True, 'import tensorflow.keras as keras\n'), ((3430, 3467), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (3465, 3467), True, 'import tensorflow.keras as keras\n'), ((3556, 3608), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['nb_classes'], {'activation': '"""softmax"""'}), "(nb_classes, activation='softmax')\n", (3574, 3608), True, 'import tensorflow.keras as keras\n')] |
"""Defines common utilities for creating pluggable Order models."""
from django.conf import settings
from django.db import models
try:
from django.dispatch import Signal
except ImportError:
from mauveinternet.signals import Signal
# base set of order status options;
# these are expected to be present for payment handlers to use
STATUS_OPTIONS=[
('N', 'New'),
('P', 'Paid'),
('C', 'Completed'), # ie. paid and fulfilled
('D', 'Declined'), # declined by payment gateway
('F', 'Failed'), # error from payment gateway
('R', 'Rejected'), # manually rejected by user or administrator
]
class OrderManager(models.Manager):
def for_order_number(self, number):
try:
id = int(number) - settings.ORDER_NUMBER_BASE
except ValueError:
from mauveinternet.ordering.models import get_order_model
model = get_order_model()
raise model.DoesNotExist("Not a valid order number")
return self.get(id=id)
def new(self):
return self.exclude(status__in=['R', 'C'])
class OrderBase(object):
"""Mix-in class providing some additional methods for order implementations"""
# Signal so that apps can opt to receive order status change events
status_change = Signal(providing_args=['status', 'previous_status'])
def order_number(self):
return '%05d' % (self.id + settings.ORDER_NUMBER_BASE)
def get_total(self):
return self.total
def set_status(self, status, message):
prevstatus = self.status
self.orderstatuschange_set.create(previous_status=prevstatus, message=message)
self.status = status
self.save()
# Dispatch signal
if status != prevstatus:
self.status_change.send(sender=self, status=status, previous_status=prevstatus)
def history(self):
return self.orderstatuschange_set.order_by('date')
| [
"mauveinternet.signals.Signal",
"mauveinternet.ordering.models.get_order_model"
] | [((1357, 1409), 'mauveinternet.signals.Signal', 'Signal', ([], {'providing_args': "['status', 'previous_status']"}), "(providing_args=['status', 'previous_status'])\n", (1363, 1409), False, 'from mauveinternet.signals import Signal\n'), ((968, 985), 'mauveinternet.ordering.models.get_order_model', 'get_order_model', ([], {}), '()\n', (983, 985), False, 'from mauveinternet.ordering.models import get_order_model\n')] |
import os
import unicodedata
import discord # type: ignore
from discord.ext import commands # type: ignore
import botto
from botto.core.models.kanjidic2 import Kanji, KanjiMeaningsReadings
from botto.utils import kanjivg_gif, kanimaji
class KanjiSearch(commands.Cog):
def __init__(self, bot: botto.Botto) -> None:
self.bot: botto.Botto = bot
async def get_stroke_diagram(self, character: str) -> discord.File:
codepoint = f"{ord(character):05x}"
# filename = f"resources/data/kanjivg_gif/{codepoint}.gif"
# if os.path.isfile(filename):
# return discord.File(
# filename, f"{unicodedata.name(character)}.gif".replace(" ", "_")
# )
# else:
# return await self.create_kanji_vg_gif(character)
filename = f"resources/data/kanjivg_kanimaji_gif/{codepoint}_anim.gif"
if os.path.isfile(filename):
return discord.File(
filename, f"{unicodedata.name(character)}.gif".replace(" ", "_")
)
else:
return await self.create_kanimaji_gif(character)
async def create_kanji_vg_gif(self, character: str) -> discord.File:
codepoint = f"{ord(character):05x}"
filename = f"resources/data/kanjivg_svg/{codepoint}.svg"
if not os.path.isfile(filename):
raise ValueError("No stroke diagram found.")
output = f"resources/data/kanjivg_gif/{codepoint}.gif"
await self.bot.loop.run_in_executor(
None, kanjivg_gif.create_gif, filename, output
)
return discord.File(
output, f"{unicodedata.name(character)}.gif".replace(" ", "_")
)
async def create_kanimaji_gif(self, character: str) -> discord.File:
codepoint = f"{ord(character):05x}"
filename = f"resources/data/kanjivg_svg/{codepoint}.svg"
if not os.path.isfile(filename):
raise ValueError("No stroke diagram found.")
output = f"resources/data/kanjivg_kanimaji_gif/{codepoint}_anim.gif"
await self.bot.loop.run_in_executor(
None, kanimaji.create_gif, filename, output
)
return discord.File(
output, f"{unicodedata.name(character)}.gif".replace(" ", "_")
)
@botto.command(name="kanji", aliases=["k", "かんじ", "漢字"])
async def kanji_search(self, ctx: botto.Context, kanji: str) -> None:
"""Look up a kanji character."""
if len(kanji) > 1:
await ctx.send("Try doing one character at a time.")
return
legal_prefixes = ("CJK UNIFIED IDEOGRAPH", "CJK COMPATIBILITY IDEOGRAPH")
if not unicodedata.name(kanji, "").startswith(legal_prefixes):
await ctx.send(
"Not found in the Japanese Industrial Standard (JIS) X kanji sets."
)
return
_kanji = await Kanji.query.where(Kanji.character == kanji).gino.first()
if _kanji is None:
await ctx.send(
"Not found in the Japanese Industrial Standard (JIS) X kanji sets."
)
return
meanings_readings = await KanjiMeaningsReadings.query.where(
KanjiMeaningsReadings.character == kanji
).gino.all()
embed: discord.Embed = discord.Embed(colour=botto.config["MAIN_COLOUR"])
embed.set_author(name=f"Kanji Lookup - {_kanji}")
embed.description = f"Stroke count: {_kanji.stroke_count}"
if _kanji.grade:
embed.description += f"\nGrade: {_kanji.grade}"
if _kanji.frequency_rank:
embed.description += f"\nFrequency rank: #{_kanji.frequency_rank}"
if _kanji.old_jlpt_level:
embed.description += f"\nFormer JLPT level: {_kanji.old_jlpt_level}"
lines = []
for i, mr_object in enumerate(meanings_readings):
if mr_object.meanings:
lines.append("__" + "/".join(mr_object.meanings) + "__")
else:
lines.append("*(miscellaneous readings)*")
if mr_object.kun_readings:
lines.append(
"**kun:** " + "\N{IDEOGRAPHIC COMMA}".join(mr_object.kun_readings)
)
if mr_object.on_readings:
lines.append(
"**on:** " + "\N{IDEOGRAPHIC COMMA}".join(mr_object.on_readings)
)
if i + 1 != len(meanings_readings):
lines.append("\n")
if meanings_readings:
embed.add_field(
name="Meanings and Readings", value="\n".join(lines), inline=False
)
if _kanji.nanori:
embed.add_field(
name="Nanori (Pronunciation in names)",
value="\N{IDEOGRAPHIC COMMA}".join(_kanji.nanori),
inline=False,
)
other_kwargs = {}
try:
stroke_diagram = await self.get_stroke_diagram(kanji)
embed.set_thumbnail(url=f"attachment://{stroke_diagram.filename}")
other_kwargs["file"] = stroke_diagram
except ValueError:
pass
await ctx.send(embed=embed, **other_kwargs)
@kanji_search.help_embed
async def kanji_help_embed(self, help_command) -> discord.Embed:
embed: discord.Embed = discord.Embed(colour=botto.config["MAIN_COLOUR"])
embed.set_author(name=self.kanji_search.name + " " + self.kanji_search.signature)
embed.description = (
f"{self.kanji_search.short_doc}\n\n" # pylint: disable=no-member
f"Kanji are the adopted logographic Chinese characters that are used in "
f"the Japanese writing system. They are used alongside the Japanese "
f"syllabic scripts hiragana and katakana. "
f"The Japanese term kanji for the Chinese characters literally means "
f'"Han characters". It is written with the same characters in the Chinese '
f"language to refer to the character writing system, hanzi (漢字).\n\n"
f"Tango looks through "
f"[KANJIDIC2](http://www.edrdg.org/wiki/index.php/KANJIDIC_Project) "
f"to provide you information on a kanji character.\n\n"
f"Animated stroke diagrams, when available, are generated using "
f"data from [KanjiVG](https://kanjivg.tagaini.net/) and "
f"[Yorwba's script](https://github.com/Yorwba/kanjivg-gif).\n\n"
f"You can also use the Jisho command to look up more information regarding "
f"a kanji character and its usages. To learn more, type "
f"`tango help jisho`."
)
embed.add_field(
name="Command Aliases",
value=" / ".join(self.kanji_search.aliases), # pylint: disable=no-member
)
return embed
@botto.command(aliases=["so", "ひつじゅん", "筆順", "かきじゅん", "書き順"])
async def strokeorder(self, ctx: botto.Context, kanji: str) -> None:
"""View an animated stroke diagram of a kanji."""
if len(kanji) > 1:
await ctx.send("Try doing one character at a time.")
return
legal_prefixes = ("CJK UNIFIED IDEOGRAPH", "CJK COMPATIBILITY IDEOGRAPH")
if not unicodedata.name(kanji, "").startswith(legal_prefixes):
await ctx.send(
"Not found in the Japanese Industrial Standard (JIS) X kanji sets."
)
return
async with ctx.typing():
try:
stroke_diagram = await self.get_stroke_diagram(kanji)
except ValueError as exc:
await ctx.send(exc)
return
await ctx.send(file=stroke_diagram)
@strokeorder.help_embed
async def strokeorder_help_embed(self, help_command) -> discord.Embed:
embed: discord.Embed = discord.Embed(colour=botto.config["MAIN_COLOUR"])
embed.set_author(name=self.strokeorder.name + " " + self.strokeorder.signature)
embed.description = (
f"{self.strokeorder.short_doc}\n\n" # pylint: disable=no-member
f"Animated stroke diagrams are generated using data from "
f"[KanjiVG](https://kanjivg.tagaini.net/) and "
f"[Yorwba's script](https://github.com/Yorwba/kanjivg-gif)."
)
embed.add_field(
name="Command Aliases",
value=" / ".join(self.strokeorder.aliases), # pylint: disable=no-member
)
return embed
def setup(bot: botto.Botto) -> None:
# Temporary solution to issue where commands in cog instances lose their help embed
cog = KanjiSearch(bot)
cog.kanji_search.help_embed(cog.kanji_help_embed)
cog.strokeorder.help_embed(cog.strokeorder_help_embed)
bot.add_cog(cog)
| [
"botto.core.models.kanjidic2.KanjiMeaningsReadings.query.where",
"os.path.isfile",
"unicodedata.name",
"botto.command",
"discord.Embed",
"botto.core.models.kanjidic2.Kanji.query.where"
] | [((2276, 2331), 'botto.command', 'botto.command', ([], {'name': '"""kanji"""', 'aliases': "['k', 'かんじ', '漢字']"}), "(name='kanji', aliases=['k', 'かんじ', '漢字'])\n", (2289, 2331), False, 'import botto\n'), ((6834, 6894), 'botto.command', 'botto.command', ([], {'aliases': "['so', 'ひつじゅん', '筆順', 'かきじゅん', '書き順']"}), "(aliases=['so', 'ひつじゅん', '筆順', 'かきじゅん', '書き順'])\n", (6847, 6894), False, 'import botto\n'), ((886, 910), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (900, 910), False, 'import os\n'), ((3287, 3336), 'discord.Embed', 'discord.Embed', ([], {'colour': "botto.config['MAIN_COLOUR']"}), "(colour=botto.config['MAIN_COLOUR'])\n", (3300, 3336), False, 'import discord\n'), ((5310, 5359), 'discord.Embed', 'discord.Embed', ([], {'colour': "botto.config['MAIN_COLOUR']"}), "(colour=botto.config['MAIN_COLOUR'])\n", (5323, 5359), False, 'import discord\n'), ((7834, 7883), 'discord.Embed', 'discord.Embed', ([], {'colour': "botto.config['MAIN_COLOUR']"}), "(colour=botto.config['MAIN_COLOUR'])\n", (7847, 7883), False, 'import discord\n'), ((1313, 1337), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1327, 1337), False, 'import os\n'), ((1885, 1909), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1899, 1909), False, 'import os\n'), ((2656, 2683), 'unicodedata.name', 'unicodedata.name', (['kanji', '""""""'], {}), "(kanji, '')\n", (2672, 2683), False, 'import unicodedata\n'), ((7235, 7262), 'unicodedata.name', 'unicodedata.name', (['kanji', '""""""'], {}), "(kanji, '')\n", (7251, 7262), False, 'import unicodedata\n'), ((2881, 2924), 'botto.core.models.kanjidic2.Kanji.query.where', 'Kanji.query.where', (['(Kanji.character == kanji)'], {}), '(Kanji.character == kanji)\n', (2898, 2924), False, 'from botto.core.models.kanjidic2 import Kanji, KanjiMeaningsReadings\n'), ((3146, 3221), 'botto.core.models.kanjidic2.KanjiMeaningsReadings.query.where', 'KanjiMeaningsReadings.query.where', (['(KanjiMeaningsReadings.character == kanji)'], {}), '(KanjiMeaningsReadings.character == kanji)\n', (3179, 3221), False, 'from botto.core.models.kanjidic2 import Kanji, KanjiMeaningsReadings\n'), ((1625, 1652), 'unicodedata.name', 'unicodedata.name', (['character'], {}), '(character)\n', (1641, 1652), False, 'import unicodedata\n'), ((2208, 2235), 'unicodedata.name', 'unicodedata.name', (['character'], {}), '(character)\n', (2224, 2235), False, 'import unicodedata\n'), ((974, 1001), 'unicodedata.name', 'unicodedata.name', (['character'], {}), '(character)\n', (990, 1001), False, 'import unicodedata\n')] |
import tensorflow as tf
import numpy as np
import Net
import os
import matplotlib
# OSX fix
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
plainTextLength = 8
keyLength = 8
N = plainTextLength / 2
batch = 4096
learningRate = 0.0008
TRAIN_STEP= 10000
iterations = 1
def get_random_block(N, batch):
return 2 * np.random.randint(2, size=(batch, N)) - 1
def train():
with tf.name_scope('input_variable'):
plain = tf.placeholder(tf.float32, shape=[None, plainTextLength], name='plainText')
key = tf.placeholder(tf.float32, shape=[None, keyLength], name='keyText')
Zeros = tf.zeros_like(plain, dtype=tf.float32, name='zeroVector')
#
Alice_output, Bob_output = Net._build_Network(plain, key, plainTextLength, keyLength)
reshape_Bob_output = tf.reshape(Bob_output, shape=[-1, plainTextLength])
# Bob L1 loss
with tf.name_scope('Bob_loss'):
Bob_loss = tf.reduce_mean(tf.abs(reshape_Bob_output - plain))
tf.summary.scalar('Bob_loss_value', Bob_loss)
# error
boolean_P = tf.greater(plain, Zeros)
boolean_B = tf.greater_equal(reshape_Bob_output, Zeros)
accuracy_B = tf.reduce_mean(tf.cast(tf.equal(boolean_B, boolean_P), dtype=tf.float32))
Bob_bits_wrong = plainTextLength - accuracy_B * plainTextLength
tf.summary.scalar('accuracy_B_value', accuracy_B)
tf.summary.scalar('Bob_bits_wrong', Bob_bits_wrong)
A_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Alice')
B_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Bob')
AB_vars = A_vars + B_vars
Alice_Bob_optimizer = tf.train.AdamOptimizer(learningRate).minimize(Bob_loss, var_list=AB_vars)
merged = tf.summary.merge_all()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
bob01_bits_wrong = []
bob_acc = []
train_writer = tf.summary.FileWriter('./adver_logs', session.graph)
if not os.path.exists('adver_logs'):
os.makedirs('adver_logs')
for step in range(TRAIN_STEP):
# train Bob
print ('Training Alice and Bob, Epoch:', step + 1)
feedDict = {plain: get_random_block(plainTextLength, batch),
key: get_random_block(keyLength, batch)}
for index in range(iterations):
_, Bob_error, Bob_accuracy, Bob_wrong_bits,summary = session.run(
[Alice_Bob_optimizer, Bob_loss, accuracy_B, Bob_bits_wrong, merged], feed_dict=feedDict)
Bob_accuracy_bits = Bob_accuracy * plainTextLength
bob01_bits_wrong.append(Bob_wrong_bits)
bob_acc.append(Bob_accuracy_bits)
res_a = session.run([Alice_output], feedDict)
print(Bob_accuracy_bits)
print(Bob_wrong_bits)
train_writer.add_summary(summary, step)
sns.set_style("darkgrid")
plt.plot(bob01_bits_wrong)
plt.legend(['bob'])
plt.xlabel('Epoch')
plt.ylabel('bits_wrong achieved')
plt.savefig("Graphname.png")
saver = tf.train.Saver()
saver.save(session,'model/save_net.ckpt',global_step=TRAIN_STEP)
def main(argv=None):
train()
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.equal",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"tensorflow.app.run",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"tensorflow.greater",
"tensorflow.zeros_like",
"tensorflow.summary.scalar",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.merge_all",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"tensorflow.reshape",
"tensorflow.summary.FileWriter",
"matplotlib.pyplot.legend",
"os.makedirs",
"tensorflow.train.Saver",
"Net._build_Network",
"tensorflow.global_variables_initializer",
"numpy.random.randint",
"tensorflow.name_scope",
"tensorflow.abs",
"tensorflow.greater_equal",
"tensorflow.get_collection"
] | [((93, 114), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (107, 114), False, 'import matplotlib\n'), ((631, 688), 'tensorflow.zeros_like', 'tf.zeros_like', (['plain'], {'dtype': 'tf.float32', 'name': '"""zeroVector"""'}), "(plain, dtype=tf.float32, name='zeroVector')\n", (644, 688), True, 'import tensorflow as tf\n'), ((727, 785), 'Net._build_Network', 'Net._build_Network', (['plain', 'key', 'plainTextLength', 'keyLength'], {}), '(plain, key, plainTextLength, keyLength)\n', (745, 785), False, 'import Net\n'), ((811, 862), 'tensorflow.reshape', 'tf.reshape', (['Bob_output'], {'shape': '[-1, plainTextLength]'}), '(Bob_output, shape=[-1, plainTextLength])\n', (821, 862), True, 'import tensorflow as tf\n'), ((991, 1036), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Bob_loss_value"""', 'Bob_loss'], {}), "('Bob_loss_value', Bob_loss)\n", (1008, 1036), True, 'import tensorflow as tf\n'), ((1066, 1090), 'tensorflow.greater', 'tf.greater', (['plain', 'Zeros'], {}), '(plain, Zeros)\n', (1076, 1090), True, 'import tensorflow as tf\n'), ((1107, 1150), 'tensorflow.greater_equal', 'tf.greater_equal', (['reshape_Bob_output', 'Zeros'], {}), '(reshape_Bob_output, Zeros)\n', (1123, 1150), True, 'import tensorflow as tf\n'), ((1314, 1363), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy_B_value"""', 'accuracy_B'], {}), "('accuracy_B_value', accuracy_B)\n", (1331, 1363), True, 'import tensorflow as tf\n'), ((1368, 1419), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Bob_bits_wrong"""', 'Bob_bits_wrong'], {}), "('Bob_bits_wrong', Bob_bits_wrong)\n", (1385, 1419), True, 'import tensorflow as tf\n'), ((1434, 1494), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""Alice"""'], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, 'Alice')\n", (1451, 1494), True, 'import tensorflow as tf\n'), ((1508, 1566), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""Bob"""'], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, 'Bob')\n", (1525, 1566), True, 'import tensorflow as tf\n'), ((1712, 1734), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1732, 1734), True, 'import tensorflow as tf\n'), ((3256, 3268), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3266, 3268), True, 'import tensorflow as tf\n'), ((411, 442), 'tensorflow.name_scope', 'tf.name_scope', (['"""input_variable"""'], {}), "('input_variable')\n", (424, 442), True, 'import tensorflow as tf\n'), ((460, 535), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, plainTextLength]', 'name': '"""plainText"""'}), "(tf.float32, shape=[None, plainTextLength], name='plainText')\n", (474, 535), True, 'import tensorflow as tf\n'), ((550, 617), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, keyLength]', 'name': '"""keyText"""'}), "(tf.float32, shape=[None, keyLength], name='keyText')\n", (564, 617), True, 'import tensorflow as tf\n'), ((890, 915), 'tensorflow.name_scope', 'tf.name_scope', (['"""Bob_loss"""'], {}), "('Bob_loss')\n", (903, 915), True, 'import tensorflow as tf\n'), ((1745, 1757), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1755, 1757), True, 'import tensorflow as tf\n'), ((1900, 1952), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./adver_logs"""', 'session.graph'], {}), "('./adver_logs', session.graph)\n", (1921, 1952), True, 'import tensorflow as tf\n'), ((2887, 2912), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (2900, 2912), True, 'import seaborn as sns\n'), ((2921, 2947), 'matplotlib.pyplot.plot', 'plt.plot', (['bob01_bits_wrong'], {}), '(bob01_bits_wrong)\n', (2929, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2975), 'matplotlib.pyplot.legend', 'plt.legend', (["['bob']"], {}), "(['bob'])\n", (2966, 2975), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (2994, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3012, 3045), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""bits_wrong achieved"""'], {}), "('bits_wrong achieved')\n", (3022, 3045), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3082), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Graphname.png"""'], {}), "('Graphname.png')\n", (3065, 3082), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3116), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3114, 3116), True, 'import tensorflow as tf\n'), ((346, 383), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(batch, N)'}), '(2, size=(batch, N))\n', (363, 383), True, 'import numpy as np\n'), ((951, 985), 'tensorflow.abs', 'tf.abs', (['(reshape_Bob_output - plain)'], {}), '(reshape_Bob_output - plain)\n', (957, 985), True, 'import tensorflow as tf\n'), ((1191, 1221), 'tensorflow.equal', 'tf.equal', (['boolean_B', 'boolean_P'], {}), '(boolean_B, boolean_P)\n', (1199, 1221), True, 'import tensorflow as tf\n'), ((1624, 1660), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learningRate'], {}), '(learningRate)\n', (1646, 1660), True, 'import tensorflow as tf\n'), ((1790, 1823), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1821, 1823), True, 'import tensorflow as tf\n'), ((1968, 1996), 'os.path.exists', 'os.path.exists', (['"""adver_logs"""'], {}), "('adver_logs')\n", (1982, 1996), False, 'import os\n'), ((2010, 2035), 'os.makedirs', 'os.makedirs', (['"""adver_logs"""'], {}), "('adver_logs')\n", (2021, 2035), False, 'import os\n')] |
import mdtraj as md
import networkx as nx
import numpy as np
from msibi.utils.general import get_fn
from msibi.utils.find_exclusions import find_1_n_exclusions
from msibi.utils.find_exclusions import is_1_n
def test_select_pair_no_exclusions():
"""Test pair selection without exclusions"""
top = md.load(get_fn('2chains.hoomdxml')).top
pairs = top.select_pairs("name 'tail'", "name 'tail'")
assert pairs.shape[0] == 190
def test_find_1_n_exclusions():
top = md.load(get_fn('2chains.hoomdxml')).top
pairs = top.select_pairs("name 'tail'", "name 'tail'")
to_delete = find_1_n_exclusions(top, pairs, 3)
assert to_delete.shape[0] == 28
def test_select_pair_with_exclusions():
traj = md.load(get_fn('2chains.hoomdxml'))
pairs = traj.top.select_pairs("name 'tail'", "name 'tail'")
to_delete = find_1_n_exclusions(traj.top, pairs, 3)
pairs = np.delete(pairs, to_delete, axis=0)
assert pairs.shape[0] == 162
def test_is_exclusion():
top = md.load(get_fn('2chains.hoomdxml')).top
G = nx.Graph()
G.add_nodes_from([a.index for a in top.atoms])
bonds = [b for b in top.bonds]
bonds_by_index = [(b[0].index, b[1].index) for b in bonds]
G.add_edges_from(bonds_by_index)
tail_tail = top.select_pairs("name 'tail'", "name 'tail'")
assert is_1_n(tail_tail[0], 3, G)
assert is_1_n(tail_tail[0], 2, G)
assert is_1_n(tail_tail[0], 4, G)
assert is_1_n(tail_tail[2], 4, G)
assert not is_1_n(tail_tail[2], 3, G)
| [
"numpy.delete",
"networkx.Graph",
"msibi.utils.find_exclusions.find_1_n_exclusions",
"msibi.utils.general.get_fn",
"msibi.utils.find_exclusions.is_1_n"
] | [((598, 632), 'msibi.utils.find_exclusions.find_1_n_exclusions', 'find_1_n_exclusions', (['top', 'pairs', '(3)'], {}), '(top, pairs, 3)\n', (617, 632), False, 'from msibi.utils.find_exclusions import find_1_n_exclusions\n'), ((838, 877), 'msibi.utils.find_exclusions.find_1_n_exclusions', 'find_1_n_exclusions', (['traj.top', 'pairs', '(3)'], {}), '(traj.top, pairs, 3)\n', (857, 877), False, 'from msibi.utils.find_exclusions import find_1_n_exclusions\n'), ((890, 925), 'numpy.delete', 'np.delete', (['pairs', 'to_delete'], {'axis': '(0)'}), '(pairs, to_delete, axis=0)\n', (899, 925), True, 'import numpy as np\n'), ((1044, 1054), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1052, 1054), True, 'import networkx as nx\n'), ((1315, 1341), 'msibi.utils.find_exclusions.is_1_n', 'is_1_n', (['tail_tail[0]', '(3)', 'G'], {}), '(tail_tail[0], 3, G)\n', (1321, 1341), False, 'from msibi.utils.find_exclusions import is_1_n\n'), ((1353, 1379), 'msibi.utils.find_exclusions.is_1_n', 'is_1_n', (['tail_tail[0]', '(2)', 'G'], {}), '(tail_tail[0], 2, G)\n', (1359, 1379), False, 'from msibi.utils.find_exclusions import is_1_n\n'), ((1391, 1417), 'msibi.utils.find_exclusions.is_1_n', 'is_1_n', (['tail_tail[0]', '(4)', 'G'], {}), '(tail_tail[0], 4, G)\n', (1397, 1417), False, 'from msibi.utils.find_exclusions import is_1_n\n'), ((1429, 1455), 'msibi.utils.find_exclusions.is_1_n', 'is_1_n', (['tail_tail[2]', '(4)', 'G'], {}), '(tail_tail[2], 4, G)\n', (1435, 1455), False, 'from msibi.utils.find_exclusions import is_1_n\n'), ((730, 756), 'msibi.utils.general.get_fn', 'get_fn', (['"""2chains.hoomdxml"""'], {}), "('2chains.hoomdxml')\n", (736, 756), False, 'from msibi.utils.general import get_fn\n'), ((1471, 1497), 'msibi.utils.find_exclusions.is_1_n', 'is_1_n', (['tail_tail[2]', '(3)', 'G'], {}), '(tail_tail[2], 3, G)\n', (1477, 1497), False, 'from msibi.utils.find_exclusions import is_1_n\n'), ((315, 341), 'msibi.utils.general.get_fn', 'get_fn', (['"""2chains.hoomdxml"""'], {}), "('2chains.hoomdxml')\n", (321, 341), False, 'from msibi.utils.general import get_fn\n'), ((491, 517), 'msibi.utils.general.get_fn', 'get_fn', (['"""2chains.hoomdxml"""'], {}), "('2chains.hoomdxml')\n", (497, 517), False, 'from msibi.utils.general import get_fn\n'), ((1004, 1030), 'msibi.utils.general.get_fn', 'get_fn', (['"""2chains.hoomdxml"""'], {}), "('2chains.hoomdxml')\n", (1010, 1030), False, 'from msibi.utils.general import get_fn\n')] |
from oc_util.email_util import fix_garbled_mail
fix_garbled_mail() | [
"oc_util.email_util.fix_garbled_mail"
] | [((49, 67), 'oc_util.email_util.fix_garbled_mail', 'fix_garbled_mail', ([], {}), '()\n', (65, 67), False, 'from oc_util.email_util import fix_garbled_mail\n')] |
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Seq import MutableSeq
# Seq doesn't inherit from String
print(Seq.__bases__)
# Example of f string formatting using Seq
my_seq = Seq("AGTACACTGGT", IUPAC.unambiguous_dna)
print(f"The value of my_seq is {my_seq}.")
# Example of using the format method on a Seq object
print("my_seq can be expressed as a string using the format method: {}".format(my_seq))
# Can Seq objects be overwritten by reverse complements or are they immutable?
my_seq = my_seq.reverse_complement()
print(f"The value of my_seq is {my_seq}.")
# What methods are available from MutableSeq
print(f"MutableSeq attributes and methods are {dir(MutableSeq)}.")
| [
"Bio.Seq.Seq"
] | [((195, 236), 'Bio.Seq.Seq', 'Seq', (['"""AGTACACTGGT"""', 'IUPAC.unambiguous_dna'], {}), "('AGTACACTGGT', IUPAC.unambiguous_dna)\n", (198, 236), False, 'from Bio.Seq import Seq\n')] |
import adv_test
from adv import *
import copy
from module.fsalt import *
def module():
return Albert
class Albert(Adv):
conf = {
'mod_a1':('fs','passive',0.5),
#'mod_wp':('s','passive',0.35),
}
def init(this):
this.fsa_conf = copy.deepcopy(this.conf)
this.fsa_conf.update( {
'fs_dmg':1.02,
'fs_sp':330,
'fs_recovery':26/60.0,
'x1fs_recovery':26/60.0,
})
this.s2timer = Timer(this.s2autocharge,1,1).on()
this.paralyze_count=3
this.s2buff = Selfbuff("s2_shapshift",1, 20,'ss','ss')
this.a2buff = Selfbuff('a2_str_passive',0.25,20,'att','passive')
this.fsalttimer = Timer(this.altend)
fs_alt_init(this, this.fsa_conf)
def altend(this,t):
fs_back(this)
def s2autocharge(this, t):
if not this.a2buff.get():
this.s2.charge(160000.0/40)
log('sp','s2autocharge')
def pre(this):
if this.condition('3s1 in on s2'):
this.conf['acl'] = """
`s2, s1.charged>=s1.sp-300
`s1
`s3, not this.s2buff.get()
`fs, seq=2
"""
if this.condition('big hitbox'):
this.s1_proc = this.c_s1_proc
def c_s1_proc(this, e):
if this.s2buff.get():
this.dmg_make("o_s1_s2boost",12.38-0.825+0.83*5)
def s1_proc(this, e):
if this.s2buff.get():
this.dmg_make("o_s1_s2boost",12.38-0.825+0.83*1)
def fs_proc(this, e):
if this.paralyze_count > 0:
if this.s2buff.get():
this.paralyze_count -= 1
this.dmg_make("o_s1_paralyze",0.803*3)
def s2_proc(this, e):
this.s2timer.on()
this.s2buff.on()
this.a2buff.on()
fs_alt(this)
this.fsalttimer(20)
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s2
`s1, this.s2.charged > 900
`s3
`fs, seq=2 and not this.s2buff.get()
"""
adv_test.test(module(), conf,verbose=0, mass=0)
| [
"copy.deepcopy"
] | [((287, 311), 'copy.deepcopy', 'copy.deepcopy', (['this.conf'], {}), '(this.conf)\n', (300, 311), False, 'import copy\n')] |
Subsets and Splits