hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfb68a5201db3b2abf55a2e729e1d1531d27950c | 77 | py | Python | src/buildercore/external.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 11 | 2017-03-01T18:00:30.000Z | 2021-12-10T05:11:02.000Z | src/buildercore/external.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 397 | 2016-07-08T14:39:46.000Z | 2022-03-30T12:45:09.000Z | src/buildercore/external.py | elifesciences/builder | 161829686f777f7ac7f97bd970395886ba5089c1 | [
"MIT"
] | 14 | 2016-07-13T08:33:28.000Z | 2020-04-22T21:42:21.000Z | import subprocess
def execute(cmd):
return subprocess.check_output(cmd)
| 15.4 | 39 | 0.779221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dfb696c1a314cee61ccd51a38771b72300f8407a | 648 | py | Python | Round 2/data_packing.py | kamyu104/GoogleCodeJam-2014 | ff29a677f502168eb0b92d6928ad6983d2622017 | [
"MIT"
] | 10 | 2016-04-10T22:50:54.000Z | 2021-04-17T18:17:02.000Z | Round 2/data_packing.py | kamyu104/GoogleCodeJam-2014 | ff29a677f502168eb0b92d6928ad6983d2622017 | [
"MIT"
] | null | null | null | Round 2/data_packing.py | kamyu104/GoogleCodeJam-2014 | ff29a677f502168eb0b92d6928ad6983d2622017 | [
"MIT"
] | 10 | 2016-07-19T08:43:38.000Z | 2021-07-22T22:38:44.000Z | # Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2014 Round 2 - Problem A. Data Packing
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000432fed/0000000000432b8d
#
# Time: O(NlogN)
# Space: O(1)
#
def data_packing():
N, X = map(int, raw_input().strip().split())
S = map(int, raw_input().strip().split())
S.sort()
result = 0
left, right = 0, len(S)-1
while left <= right:
if left < right and S[left]+S[right] <= X:
left += 1
right -= 1
result += 1
return result
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, data_packing())
| 24.923077 | 91 | 0.598765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.373457 |
dfb71ae9c49c8ec75050dd6031ca98dd54f66f9f | 18,950 | py | Python | BiModNeuroCNN/training/bimodal_classification.py | cfcooney/BiModNeuroCNN | f79da6150b4186bcbc15d876394f4af8a47076d0 | [
"MIT"
] | 4 | 2020-10-31T21:20:12.000Z | 2022-01-05T16:13:07.000Z | BiModNeuroCNN/training/bimodal_classification.py | cfcooney/BiModNeuroCNN | f79da6150b4186bcbc15d876394f4af8a47076d0 | [
"MIT"
] | null | null | null | BiModNeuroCNN/training/bimodal_classification.py | cfcooney/BiModNeuroCNN | f79da6150b4186bcbc15d876394f4af8a47076d0 | [
"MIT"
] | null | null | null | """
Description: Class for training CNNs using a nested cross-validation method. Train on the inner_fold to obtain
optimized hyperparameters. Train outer_fold to obtain classification performance.
"""
from braindecode.datautil.iterators import BalancedBatchSizeIterator
from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or
from braindecode.torch_ext.util import set_random_seeds, np_to_var, var_to_np
from braindecode.datautil.signal_target import SignalAndTarget
from braindecode.torch_ext.functions import square, safe_log
import torch as th
from sklearn.model_selection import train_test_split
from BiModNeuroCNN.training.training_utils import current_acc, current_loss
from BiModNeuroCNN.data_loader.data_utils import smote_augmentation, multi_SignalAndTarget
from BiModNeuroCNN.results.results import Results as res
from torch.nn.functional import nll_loss, cross_entropy
from BiModNeuroCNN.training.bimodal_training import Experiment
import numpy as np
import itertools as it
import torch
from torch import optim
import logging
from ast import literal_eval
from BiModNeuroCNN.results.metrics import cross_entropy
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
log = logging.getLogger(__name__)
torch.backends.cudnn.deterministic = True
class Classification:
def __init__(self, model, subnet1_params, subnet2_params, hyp_params, parameters, data_params, model_save_path, tag):
self.model = model
self.subnet1_params = subnet1_params
self.subnet2_params = subnet2_params
self.model_save_path = model_save_path
self.tag = tag
self.best_loss = parameters["best_loss"]
self.batch_size = parameters["batch_size"]
self.monitors = parameters["monitors"]
self.cuda = parameters["cuda"]
self.model_constraint = parameters["model_constraint"]
self.max_increase_epochs = parameters['max_increase_epochs']
self.lr_scheduler = parameters['learning_rate_scheduler']
self.lr_step = parameters['lr_step']
self.lr_gamma = parameters['lr_gamma']
self.n_classes = data_params["n_classes"]
self.n_chans_d1 = data_params["n_chans_d1"]
self.input_time_length_d1= data_params["input_time_length_d1"]
self.n_chans_d2 = data_params["n_chans_d2"]
self.input_time_length_d2 = data_params["input_time_length_d2"]
self.hyp_params = hyp_params
self.activation = "elu"
self.learning_rate = 0.001
self.dropout = 0.1
self.epochs = parameters['epochs']
self.window = None
self.structure = 'deep'
self.n_filts = 10 #n_filts in n-1 filters
self.first_pool = False
self.loss = nll_loss
for key in hyp_params:
setattr(self, key, hyp_params[key])
self.iterator = BalancedBatchSizeIterator(batch_size=self.batch_size)
self.best_params = None
self.model_number = 1
self.y_pred = np.array([])
self.y_true = np.array([])
self.probabilities = np.array([])
def call_model(self):
self.subnet1_params['structure'] = self.structure
self.subnet2_params['structure'] = self.structure
if self.model.__name__ == 'BiModalNet':
model = self.model(n_classes=self.n_classes, in_chans_1=self.n_chans_d1, input_time_1=self.input_time_length_d1,
SubNet_1_params=self.subnet1_params, in_chans_2=self.n_chans_d2,
input_time_2=self.input_time_length_d2, SubNet_2_params=self.subnet2_params,
linear_dims=100, drop_prob=.2, nonlin=torch.nn.functional.leaky_relu,
fc1_out_features=500, fc2_out_features=500, gru_hidden_size=250, gru_n_layers=1)
th.nn.init.kaiming_uniform_(model.fused_linear.weight)
th.nn.init.constant_(model.fused_linear.bias, 0)
elif self.model.__name__ == 'BiModalNet_w_Pool':
model = self.model(n_classes=self.n_classes, in_chans_1=self.n_chans_d1, input_time_1=self.input_time_length_d1,
SubNet_1_params=self.subnet1_params, in_chans_2=self.n_chans_d2,
input_time_2=self.input_time_length_d2, SubNet_2_params=self.subnet2_params,
linear_dims=100, drop_prob=.2, nonlin=torch.nn.functional.leaky_relu,
fc1_out_features=500, fc2_out_features=500, gru_hidden_size=250, gru_n_layers=1)
th.nn.init.kaiming_uniform_(model.fused_linear.weight)
th.nn.init.constant_(model.fused_linear.bias, 0)
return model
def train_model(self, train_set_1, val_set_1, test_set_1, train_set_2, val_set_2, test_set_2, save_model):
"""
:param train_set_1: (np.array) n_trials*n_channels*n_samples
:param val_set_1: (np.array) n_trials*n_channels*n_samples
:param test_set_1: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold
:param train_set_2: (np.array) n_trials*n_channels*n_samples
:param val_set_2: (np.array) n_trials*n_channels*n_samples
:param test_set_2: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold
:param save_model: (Bool) True if trained model is to be saved
:return: Accuracy and loss scores for the model trained with a given set of hyper-parameters
"""
model = self.call_model()
predictions = None
set_random_seeds(seed=20190629, cuda=self.cuda)
if self.cuda:
model.cuda()
torch.backends.cudnn.deterministic = True
model = torch.nn.DataParallel(model)
log.info(f"Cuda in use")
log.info("%s model: ".format(str(model)))
optimizer = optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=0.01, eps=1e-8, amsgrad=False)
stop_criterion = Or([MaxEpochs(self.epochs),
NoDecrease('valid_loss', self.max_increase_epochs)])
model_loss_function = None
#####Setup to run the selected model#####
model_test = Experiment(model, train_set_1, val_set_1, train_set_2, val_set_2, test_set_1=test_set_1, test_set_2=test_set_2,
iterator=self.iterator, loss_function=self.loss, optimizer=optimizer,
lr_scheduler=self.lr_scheduler(optimizer, step_size=self.lr_step, gamma=self.lr_gamma),
model_constraint=self.model_constraint, monitors=self.monitors, stop_criterion=stop_criterion,
remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function,
cuda=self.cuda, save_file=self.model_save_path, tag=self.tag, save_model=save_model)
model_test.run()
model_acc = model_test.epochs_df['valid_misclass'].astype('float')
model_loss = model_test.epochs_df['valid_loss'].astype('float')
current_val_acc = 1 - current_acc(model_acc)
current_val_loss = current_loss(model_loss)
test_accuracy = None
if train_set_1 is not None and test_set_2 is not None:
val_metric_index = self.get_model_index(model_test.epochs_df)
test_accuracy = round((1 - model_test.epochs_df['test_misclass'].iloc[val_metric_index]) * 100, 3)
predictions = model_test.model_predictions
probabilities = model_test.model_probabilities
return current_val_acc, current_val_loss, test_accuracy, model_test, predictions, probabilities
def train_inner(self, train_set_1, val_set_1, train_set_2, val_set_2, test_set_1=None, test_set_2=None, augment=False, save_model=False):
"""
:param train_set_1: (np.array) n_trials*n_channels*n_samples
:param val_set_1: (np.array) n_trials*n_channels*n_samples
:param test_set_1: (np.array) n_trials*n_channels*n_samples - can be None when performing HP optimization
:param train_set_2: (np.array) n_trials*n_channels*n_samples
:param val_set_2: (np.array) n_trials*n_channels*n_samples
:param test_set_2: (np.array) n_trials*n_channels*n_samples - can be None when performing HP optimization
:param augment: (Bool) True if data augmentation to be applied - currently only configured for SMOTE augmentation
:param save_model: (Bool) True if trained model is to be saved
:return: Accuracy, loss and cross entropy scores for the model trained with a given set of hyper-parameters
"""
val_acc, val_loss, val_cross_entropy = [], [], []
if augment:
# Only augment training data - never test or validation sets
train_set_1_os, train_labels_1_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_2_os, train_labels_2_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_1, train_set_2 = multi_SignalAndTarget((train_set_1_os, train_labels_1_os), (train_set_2_os, train_labels_2_os))
names = list(self.hyp_params.keys())
hyp_param_combs = it.product(*(self.hyp_params[Name] for Name in names))
for hyp_combination in hyp_param_combs:
assert len(hyp_combination) == len(self.hyp_params), f"HP combination must be of equal length to original set."
for i in range(len(self.hyp_params)):
setattr(self, list(self.hyp_params.keys())[i], hyp_combination[i])
if 'window' in self.hyp_params.keys():
# when using classification window as a hyperparameter - currently data would have to be of same number of samples
train_set_1_w = SignalAndTarget(train_set_1.X[:, :, self.window[0]:self.window[1]], train_set_1.y)
val_set_1_w = SignalAndTarget(val_set_1.X[:, :, self.window[0]:self.window[1]], val_set_1.y)
train_set_2_w = SignalAndTarget(train_set_2.X[:, :, self.window[0]:self.window[1]], train_set_2.y)
val_set_2_w = SignalAndTarget(val_set_2.X[:, :, self.window[0]:self.window[1]], val_set_2.y)
current_val_acc, current_val_loss, _, _, _, probabilities = self.train_model(train_set_1_w, val_set_1_w, test_set_1, train_set_2_w,
val_set_2_w, test_set_2, save_model)
else:
current_val_acc, current_val_loss, _, _, _, probabilities = self.train_model(train_set_1, val_set_1, test_set_1, train_set_2,
val_set_2, test_set_2, save_model)
val_acc.append(current_val_acc)
val_loss.append(current_val_loss)
probabilities = np.array(probabilities).reshape((val_set_1.y.shape[0],4))
val_cross_entropy.append(cross_entropy(val_set_1.y, probabilities)) #1 CE value per-HP, repeat for n_folds
return val_acc, val_loss, val_cross_entropy
def train_outer(self, trainsetlist, testsetlist, augment=False, save_model=True, epochs_save_path=None, print_details=False):
"""
:param trainsetlist: (list) data as split by k-folds n_folds*(n_trials*n_channels*n_samples)
:param testsetlist: (list) data as split by k-folds n_folds*(n_trials*n_channels*n_samples)
:param augment: (Bool) True if data augmentation to be applied - currently only configured for SMOTE augmentation
:param save_model: (Bool) True if trained model is to be saved
"""
scores, all_preds, probabilities_list, outer_cross_entropy, fold_models = [],[],[],[],[]
fold_number = 1
for train_set, test_set in zip(trainsetlist, testsetlist):
train_set_1, train_set_2 = train_set[0], train_set[1]
test_set_1, test_set_2 = test_set[0], test_set[1]
train_set_1_X, val_set_1_X, train_set_1_y, val_set_1_y = train_test_split(train_set_1.X, train_set_1.y, test_size=0.2,
shuffle=True, random_state=42, stratify= train_set_1.y)
train_set_2_X, val_set_2_X, train_set_2_y, val_set_2_y = train_test_split(train_set_2.X, train_set_2.y, test_size=0.2,
shuffle=True, random_state=42, stratify= train_set_2.y)
train_set_1, val_set_1, train_set_2, val_set_2 = multi_SignalAndTarget((train_set_1_X, train_set_1_y), (val_set_1_X, val_set_1_y),
(train_set_2_X, train_set_2_y), (val_set_2_X, val_set_2_y))
if augment:
# Only augment training data - never test or validation sets
train_set_1_os, train_labels_1_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_2_os, train_labels_2_os = smote_augmentation(train_set_2.X, train_set_2.y, 2)
train_set_1 = SignalAndTarget(train_set_1_os, train_labels_1_os)
train_set_2 = SignalAndTarget(train_set_2_os, train_labels_2_os)
print(train_set_1.X.shape)
if 'window' in self.hyp_params.keys():
# when using classification window as a hyperparameter - currently data would have to be of same number of samples
if type(self.window) == str:
self.window = literal_eval(self.window) # extract tuple of indices
train_set_1_w = SignalAndTarget(train_set_1.X[:,:,self.window[0]:self.window[1]], train_set_1.y)
val_set_1_w = SignalAndTarget(val_set_1.X[:,:,self.window[0]:self.window[1]], val_set_1.y)
test_set_1_w = SignalAndTarget(test_set_1.X[:,:,self.window[0]:self.window[1]], test_set_1.y)
train_set_2_w = SignalAndTarget(train_set_2.X[:,:,self.window[0]:self.window[1]], train_set_2.y)
val_set_2_w = SignalAndTarget(val_set_2.X[:,:,self.window[0]:self.window[1]], val_set_2.y)
test_set_2_w = SignalAndTarget(test_set_2.X[:, :, self.window[0]:self.window[1]], test_set_2.y)
_, _, test_accuracy, optimised_model, predictions, probabilities = self.train_model(train_set_1_w, val_set_1_w, test_set_1_w,
train_set_2_w, val_set_2_w, test_set_2_w, save_model)
if print_details:
print(f"Data 1 train set: {train_set_1.y.shape} | Data 1 val_set: {val_set_1.y.shape} | Data 1 test_set: {test_set_1.y.shape}")
print(f"Data 2 train set: {train_set_2.y.shape} | Data 2 val_set: {val_set_2.y.shape} | Data 2 test_set: {test_set_2.y.shape}")
else:
_, _, test_accuracy, optimised_model, predictions, probabilities = self.train_model(train_set_1, val_set_1, test_set_1,
train_set_2, val_set_2, test_set_2, save_model)
if epochs_save_path != None:
try:
optimised_model.epochs_df.to_excel(f"{epochs_save_path}/epochs{fold_number}.xlsx")
except FileNotFoundError:
optimised_model.epochs_df.to_excel(f"{epochs_save_path}/epochs{fold_number}.xlsx", engine='xlsxwriter')
fold_models.append(optimised_model)
probs_array = []
for lst in probabilities:
for trial in lst:
probs_array.append(trial) # all probabilities for this test-set
probabilities_list.append(probs_array) #outer probabilities to be used for cross-entropy
print(f"/"*20)
scores.append(test_accuracy)
self.concat_y_pred(predictions)
self.concat_y_true(test_set_1.y)
fold_number += 1
for y_true, y_probs in zip(testsetlist, probabilities_list):
outer_cross_entropy.append(cross_entropy(y_true[0].y, y_probs))
return scores, fold_models, self.y_pred, probabilities_list, outer_cross_entropy, self.y_true
def set_best_params(self):
"""
Set optimal hyperparameter values selected from optimization - Best parameter values can be
accessed with BiModNeuroCNN.results.Results.get_best_params() and the list assigned to self.best_params.
"""
assert type(self.best_params) is list, "list of selected parameters required"
for i in range(len(self.hyp_params)):
setattr(self, list(self.hyp_params.keys())[i], self.best_params[i])
@staticmethod
def get_model_index(df):
"""
Returns the row index of a pandas dataframe used for storing epoch-by-epoch results.
:param df: pandas.DataFrame
:return: int index of the selected epoch based on validation metric
"""
valid_metric_index = df['valid_misclass'].idxmin()
best_val_acc = df.index[df['valid_misclass'] == df['valid_misclass'].iloc[valid_metric_index]]
previous_best = 1.0
i = 0
for n, index in enumerate(best_val_acc):
value = df['test_misclass'][index]
if value < previous_best:
previous_best = value
i = n
return best_val_acc[i]
def concat_y_pred(self, y_pred_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_pred_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.y_pred = np.concatenate((self.y_pred, np.array(y_pred_fold)))
def concat_y_true(self, y_true_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_true_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.y_true = np.concatenate((self.y_true, np.array(y_true_fold)))
def concat_probabilities(self, probabilities_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_pred_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.probabilities = np.concatenate((self.probabilities, probabilities_fold))
| 57.95107 | 154 | 0.644063 | 17,628 | 0.930237 | 0 | 0 | 716 | 0.037784 | 0 | 0 | 4,701 | 0.248074 |
dfb822e8f7cafa7cb423cc71ade94b740d42328b | 7,462 | py | Python | simple_task_repeater/str_app.py | lavrpetrov/simple-task-repeater | cd56ed52143ac31171fc757c6e1f7740bebe1ed4 | [
"MIT"
] | null | null | null | simple_task_repeater/str_app.py | lavrpetrov/simple-task-repeater | cd56ed52143ac31171fc757c6e1f7740bebe1ed4 | [
"MIT"
] | null | null | null | simple_task_repeater/str_app.py | lavrpetrov/simple-task-repeater | cd56ed52143ac31171fc757c6e1f7740bebe1ed4 | [
"MIT"
] | 1 | 2021-04-20T15:38:44.000Z | 2021-04-20T15:38:44.000Z | import datetime
from collections import Counter
from functools import wraps
from dateparser import parse as parse_date
from calmlib import get_current_date, get_current_datetime, to_date, trim
from .base import Task
from .str_database import STRDatabase
from .telegram_bot import TelegramBot, command, catch_errors
DEFAULT_PERIOD = 4
TASK_PER_DAY_LIMIT = 3
class STRApp(TelegramBot):
# todo: rewrite all commands, add decorator that parses message and passes it to the command as kwargs.
@wraps(TelegramBot.__init__)
def __init__(self, db: STRDatabase, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db = db
self._actualize_tasks()
self._last_actualize_date = get_current_date()
@staticmethod
def _tokenize_message(message):
result = {}
# cut off command code and get shortcut.
parts = message.split(maxsplit=2)
if len(parts) == 1:
raise ValueError("No task shortcut provided")
elif len(parts) == 2:
return {'shortcut': parts[1]}
_, result['shortcut'], message = parts
parts = message.split(':')
key = "text"
for part in parts[:-1]:
result[key], key = map(str.strip, part.rsplit(maxsplit=1))
result[key] = parts[-1].strip()
if not result['text']:
del result['text']
return result
def _parse_task(self, user, task):
"""
"""
if 'date' in task:
try:
task['date'] = parse_date(task['date'])
except:
raise ValueError(f"Failed to parse date {task['date']}")
else:
task['date'] = self._determine_suitable_date(user)
if 'period' in task:
task['period'] = int(task['period'])
else:
task['period'] = self._determine_suitable_period(user)
return task
def _determine_suitable_period(self, user):
# todo: count current tasks and estimate period necessary to stay below task_per_day_limit
# discard large-period tasks.
return DEFAULT_PERIOD
def _determine_suitable_date(self, user_name):
tasks = self.db.get_users_tasks(user_name)
tasks_dates = Counter([task.date.date() for task in tasks])
# find_date
task_date = get_current_datetime()
td = datetime.timedelta(days=1)
while tasks_dates[task_date.date()] >= TASK_PER_DAY_LIMIT:
task_date += td
# this naturally stops because each task register only once.
return task_date
def parse_message(self, user, message):
return self._parse_task(user, STRApp._tokenize_message(message))
@command
@catch_errors
def add(self, user, message):
"""
Add new task from message
Message should have format
{shortcut} {task text} period:1 {key}:{value}
"""
result = ""
task = {'user': user}
task.update(self.parse_message(user, message))
# todo: if date is not specified pick something suitable.
# todo: if period is not specified - pick something suitable depending on current load
task = Task(**task)
self.db.add_task(task)
result += f"Added task {task.shortcut}"
return result
@command
@catch_errors
def update(self, user, message):
update = self.parse_message(user, message)
task = self.db.get_task(user, update['shortcut'])
task.__dict__.update(update)
self.db.update_task(user, task)
return f"Successfully updated task {task.shortcut}"
@command
@catch_errors
def remove(self, user, message):
"""
Remove task.
"""
task = self.parse_message(user, message)
self.db.remove_task(user, task['shortcut'])
return f"Task {task['shortcut']} removed"
@command
@catch_errors
def get(self, user, message):
"""
Remove task.
"""
task = self.parse_message(user, message)
task = self.db.get_task(user, task['shortcut'])
return repr(task)
@command
@catch_errors
def start(self, user, message):
try:
self.db.add_user(user)
except ValueError:
return f"User {user} already active"
return f"Added user {user} successfully"
@command
@catch_errors
def stop(self, user, message):
try:
self.db.remove_user(user)
except ValueError:
return f"No user {user}"
return f"Removed user {user} successfully"
@command
def list_all(self, user, message):
"""
List shortcuts of users tasks
"""
# todo: make a short task repr.
return '\n'.join([task.shortcut for task in self.db.get_users_tasks(user)])
@command
def list(self, user, message):
"""
Get tasks for particular date.
"""
message = trim(message, '/list')
if message.strip():
date = parse_date(message)
else:
date = get_current_datetime()
self.actualize_tasks()
tasks = self.db.get_users_tasks(user)
# need to cast into date because date is datetime with hours etc.
tasks = [task for task in tasks if to_date(task.date) == to_date(date)]
response = date.strftime("Tasks for %a, %d %b\n")
response += "\n".join([task.text for task in tasks])
return response
@command
def complete(self, user, message):
"""
Register that you've completed a task
:param user:
:param message:
:return:
"""
task = self.parse_message(user, message)
if 'date' in task:
date = parse_date(task['date'])
else:
date = get_current_datetime()
task = self.db.get_task(user=user, shortcut=task['shortcut'])
task.completions.append(date)
task.date = date + datetime.timedelta(days=task.period)
self.db.update_task(task)
@command
def help(self, user, message):
"""
Return commands and shortened docstrings.
"""
reply = ""
# todo: add docstrings - instead of help message for each command.
# todo: how to make telegram list all possible commands?
reply += "Commands: \n"
reply += '\n '.join([command.__name__ for command in self.commands])
reply += "Task fields: \n"
reply += '\n '.join(Task.declared_fields.keys())
return reply
def run(self):
with self.db:
super().run()
def actualize_tasks(self):
if self._last_actualize_date < get_current_date():
self._actualize_tasks()
self._last_actualize_date = get_current_date()
def _actualize_tasks(self):
"""
Go over all tasks and update date/reschedule
"""
for user in self.db.user_names:
for task in self.db.get_users_tasks(user):
today = get_current_datetime()
while to_date(task.date) < to_date(today):
if task.reschedule:
# if task is past due and to be rescheduled - reschedule it on today
task.date = today
else:
task.date += datetime.timedelta(days=task.period)
self.db.update_task(task)
| 32.163793 | 107 | 0.590592 | 7,099 | 0.951354 | 0 | 0 | 4,697 | 0.629456 | 0 | 0 | 1,878 | 0.251675 |
dfb8674c6f7746d9692d1c11fcd1c8fdb24ebb98 | 258 | py | Python | Strings/conversion-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 3 | 2022-03-28T09:10:08.000Z | 2022-03-29T10:47:56.000Z | Strings/conversion-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 1 | 2022-03-27T11:52:58.000Z | 2022-03-27T11:52:58.000Z | Strings/conversion-operation.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | null | null | null | # lower , title , upper operations on string
x = "spider"
y = "MAN"
v=x.upper() # all letters will become uppercase
w=y.lower() # all letters will become lowercase
z=y.title() # only first letter will become upper and rest of all lowercase
print(v,w,z)
| 19.846154 | 75 | 0.705426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.736434 |
dfb9067db6876e985a83eb3d9d6219b06ce32b30 | 1,197 | py | Python | setup.py | adadesions/sfcpy | d395218ae9f72fed378c30ad604923373b7fbf3f | [
"MIT"
] | 2 | 2019-08-28T19:30:32.000Z | 2020-03-28T16:17:01.000Z | setup.py | adadesions/sfcpy | d395218ae9f72fed378c30ad604923373b7fbf3f | [
"MIT"
] | 5 | 2021-03-18T22:53:57.000Z | 2022-03-11T23:42:38.000Z | setup.py | adadesions/sfcpy | d395218ae9f72fed378c30ad604923373b7fbf3f | [
"MIT"
] | null | null | null | """Setup script for sfcpy"""
import os.path
from setuptools import setup
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md"), encoding='utf-8') as fid:
README = fid.read()
# This call to setup() does all the work
setup(
name="sfcpy",
version="1.2.3",
description="Space-Filling Curve library for image-processing tasks",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/adadesions/sfcpy",
author="adadesions",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["sfcpy"],
include_package_data=True,
tests_require=['pytest'],
install_requires=[
"numpy", "matplotlib", "Pillow"
],
entry_points={"console_scripts": ["sfcpy=sfcpy.__main__:main"]},
)
| 29.925 | 73 | 0.652464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.521303 |
dfb94390c72e2b9eb210dfba78b3240cd00784e2 | 7,921 | py | Python | make_DigitalCommons_spreadsheet.py | lsulibraries/CWBR_DigitalCommons | 6eb994d08d6de088075cde82f6dc2b3aed15bdda | [
"Apache-2.0"
] | null | null | null | make_DigitalCommons_spreadsheet.py | lsulibraries/CWBR_DigitalCommons | 6eb994d08d6de088075cde82f6dc2b3aed15bdda | [
"Apache-2.0"
] | null | null | null | make_DigitalCommons_spreadsheet.py | lsulibraries/CWBR_DigitalCommons | 6eb994d08d6de088075cde82f6dc2b3aed15bdda | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import csv
import os
from collections import namedtuple
import string
from nameparser import HumanName
def csv_to_dict(filename):
file_dict = dict()
with open(filename, 'r', newline='', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar='"')
headers = next(csvreader)
CWBR = namedtuple('CWBR', headers)
for row in csvreader:
item = CWBR(*row)
if file_dict.get(item.ID):
print('**** Two examples of {} in these spreadsheets ****'.format(item.ID))
exit()
file_dict[item.ID] = item
return file_dict
def make_paragraphs_text(issue):
return '\n\t'.join([i for i in
issue.Review.replace('<br>', '<p>')
.replace('</br>', '</p>')
.replace('</p>', '')
.split('<p>')
if i])
def make_announcement_block(issue):
record_type = issue.Record_type
if record_type.lower() == 'classics':
return 'Feature Essay'
elif record_type.lower() in ('interview', 'editorial', 'review', ):
return record_type
def format_title_parts(string_segment):
string_segment = string.capwords(string_segment, ' ')
string_segment = string_segment.lstrip().replace("'S ", "'s ").replace('’', "'")
string_segment = string_segment.replace('“', '"')
string_segment = string_segment.replace('</p>', '').replace('<p>', '')
return string_segment
def make_title_block(issue):
title_parts, subtitle_parts = find_title_lines(issue)
title_string = ''.join([format_title_parts(title_part)
for title_part in title_parts if title_part])
subtitle_string = ''.join([format_title_parts(subtitle_part)
for subtitle_part in subtitle_parts if subtitle_part])
if title_string and subtitle_string:
return ': '.join([title_string, subtitle_string])
else:
return ''.join([title_string, subtitle_string])
def pull_title_from_Title(issue):
title = strip_bolds_breaks(issue.Title).replace('EDITORIAL:', '').replace('INTERVIEW:', '')
title_parts = [item for item in title.split('<p>') if item]
subtitle_parts = ''
return title_parts, subtitle_parts
def pull_title_from_Headline(issue):
title_parts = [item for item in issue.Headline.split('<p>') if item]
subtitle_parts = [item for item in issue.Sub_headline.split('<p>') if item]
return title_parts, subtitle_parts
def find_title_lines(issue):
if issue.Record_type not in ('Editorial', 'Interview'):
title_parts, subtitle_parts = pull_title_from_Headline(issue)
else:
title_parts, subtitle_parts = pull_title_from_Title(issue)
if not (title_parts or subtitle_parts):
title_parts, subtitle_parts = pull_title_from_Title(issue)
return title_parts, subtitle_parts
def strip_bolds_breaks(text):
for i in ('<br>', '</br>', '<BR>', '</BR>', '<b>', '</b>', '<B>', '</B>', ):
text = text.replace(i, '')
return text
def pick_authors(issue):
author_list = []
if issue.Record_type not in ('Review', 'Classics'):
for author in (issue.Auth_1, issue.Auth_2, issue.Auth_3):
if author:
author = author.replace('<br>', '<p>').replace('</br>', '</p>')
author_list.append(author)
return author_list
else:
if issue.Reviewer:
author_list.append(issue.Reviewer)
return author_list
def parse_name(name):
parsed_name = HumanName(name)
first = parsed_name.first
middle = parsed_name.middle
last = parsed_name.last
suffix = parsed_name.suffix
return (first, middle, last, suffix)
def reformat_issue_type(issue_type):
internal_external_dict = {'Editorial': 'editorial',
'Classics': 'feature_essay',
'Interview': 'author_interview',
'Review': 'review',
}
return internal_external_dict[issue_type]
def make_publication_date(issue_date):
season, year = issue_date.split(' ')
seasons_month_dict = {'Spring': '03',
'Summer': '06',
'Fall': '09',
'Winter': '12'}
month = seasons_month_dict[season]
return '{}-{}-01'.format(year, month)
def make_season(issue_date):
return issue_date.split(' ')[0]
def make_url(issue_id):
return 'https://s3-us-west-2.amazonaws.com/cwbr-publicshare/{}.pdf'.format(issue_id)
def make_csv_data(issues_dict):
csv_data = []
csv_data.append(['title',
'book_id',
'fulltext_url',
'isbn',
'price',
'publication_date',
'season',
'document_type',
'publisher',
'book_pub_date',
'author1_fname',
'author1_mname',
'author1_lname',
'author1_suffix',
'author2_fname',
'author2_mname',
'author2_lname',
'author2_suffix',
'author3_fname',
'author3_mname',
'author3_lname',
'author3_suffix',
'abstract',
])
for k, issue in sorted(issues_dict.items()):
authors_list = pick_authors(issue)
author1_fname, author1_mname, author1_lname, author1_suffix = '', '', '', ''
author2_fname, author2_mname, author2_lname, author2_suffix = '', '', '', ''
author3_fname, author3_mname, author3_lname, author3_suffix = '', '', '', ''
if authors_list:
author1_fname, author1_mname, author1_lname, author1_suffix = parse_name(authors_list[0])
if len(authors_list) > 1:
author2_fname, author2_mname, author2_lname, author2_suffix = parse_name(authors_list[1])
if len(authors_list) > 2:
author3_fname, author3_mname, author3_lname, author3_suffix = parse_name(authors_list[2])
csv_data.append([make_title_block(issue),
issue.ID,
make_url(issue.ID),
issue.ISBN,
issue.Price,
make_publication_date(issue.Issue_date),
make_season(issue.Issue_date),
reformat_issue_type(issue.Record_type),
issue.Publisher,
issue.Pub_date,
author1_fname,
author1_mname,
author1_lname,
author1_suffix,
author2_fname,
author2_mname,
author2_lname,
author2_suffix,
author3_fname,
author3_mname,
author3_lname,
author3_suffix,
make_paragraphs_text(issue),
])
csv_writer(csv_data)
def csv_writer(data):
output_dir = 'uploadSpreadsheet'
os.makedirs(output_dir, exist_ok=True)
with open('uploadSpreadsheet/DigitalCommonsSpreadsheet.csv', "w", newline='', encoding='utf-8') as csv_file:
writer = csv.writer(csv_file, delimiter='\t', quotechar='"')
for line in data:
writer.writerow(line)
if __name__ == '__main__':
issues_dict = csv_to_dict('3rdStageSourceCSVs/Interviews.csv')
make_csv_data(issues_dict)
| 36.004545 | 112 | 0.550562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.136656 |
dfb9a74f5e09588db5c20e479a0c85f0735ce76b | 7,524 | py | Python | pip_services3_redis/cache/RedisCache.py | pip-services-python/pip-services-redis-python | ecb2e667ab266af0274b0891a19e802cb256766a | [
"MIT"
] | null | null | null | pip_services3_redis/cache/RedisCache.py | pip-services-python/pip-services-redis-python | ecb2e667ab266af0274b0891a19e802cb256766a | [
"MIT"
] | null | null | null | pip_services3_redis/cache/RedisCache.py | pip-services-python/pip-services-redis-python | ecb2e667ab266af0274b0891a19e802cb256766a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import Optional, Any
import redis
from pip_services3_commons.config import IConfigurable, ConfigParams
from pip_services3_commons.errors import ConfigException, InvalidStateException
from pip_services3_commons.refer import IReferenceable, IReferences
from pip_services3_commons.run import IOpenable
from pip_services3_components.auth import CredentialResolver
from pip_services3_components.cache import ICache
from pip_services3_components.connect import ConnectionResolver
class RedisCache(ICache, IConfigurable, IReferenceable, IOpenable):
"""
Distributed cache that stores values in Redis in-memory database.
### Configuration parameters ###
- connection(s):
- discovery_key: (optional) a key to retrieve the connection from :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>`
- host: host name or IP address
- port: port number
- uri: resource URI or connection string with all parameters in it
- credential(s):
- store_key: key to retrieve parameters from credential store
- username: user name (currently is not used)
- password: user password
- options:
- retries: number of retries (default: 3)
- timeout: default caching timeout in milliseconds (default: 1 minute)
- max_size: maximum number of values stored in this cache (default: 1000)
### References ###
- `*:discovery:*:*:1.0` (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` services to resolve connection
- `*:credential-store:*:*:1.0` (optional) Credential stores to resolve credential
Example:
.. code-block:: python
cache = RedisCache()
cache.configure(ConfigParams.from_tuples(
"host", "localhost",
"port", 6379
))
cache.open("123")
cache.store("123", "key1", "ABC", None)
value = cache.retrieve("123", "key1") # Result: "ABC"
"""
def __init__(self):
"""
Creates a new instance of this cache
"""
self.__connection_resolver: ConnectionResolver = ConnectionResolver()
self.__credential_resolver: CredentialResolver = CredentialResolver()
self.__timeout: int = 30000
self.__retries: int = 3
self.__client: redis.Redis = None
def configure(self, config: ConfigParams):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self.__connection_resolver.configure(config)
self.__credential_resolver.configure(config)
self.__timeout = config.get_as_integer_with_default('options.timeout', self.__timeout)
self.__retries = config.get_as_integer_with_default('options.retries', self.__retries)
def set_references(self, references: IReferences):
"""
Sets references to dependent components.
:param references: references to locate the component dependencies.
"""
self.__connection_resolver.set_references(references)
self.__connection_resolver.set_references(references)
def is_open(self) -> bool:
"""
Checks if the component is opened.
:return: true if the component has been opened and false otherwise.
"""
return self.__client is not None
def open(self, correlation_id: Optional[str]):
"""
Opens the component.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
connection = self.__connection_resolver.resolve(correlation_id)
if connection is None:
raise ConfigException(
correlation_id,
'NO_CONNECTION',
'Connection is not configured'
)
credential = self.__credential_resolver.lookup(correlation_id)
options = {
# connect_timeout: self.__timeout,
# max_attempts: self.__retries,
'retry_on_timeout': True,
# 'retry_strategy': lambda options: self.__retry_strategy(options) # TODO add reconnect callback
}
if connection.get_uri():
options['url'] = connection.get_uri()
else:
options['host'] = connection.get_host() or 'localhost'
options['port'] = connection.get_port() or 6379
if credential is not None:
options['password'] = credential.get_password()
self.__client = redis.Redis(**options)
def close(self, correlation_id: Optional[str]):
"""
Closes component and frees used resources.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
if self.__client is None: return
self.__client.close()
self.__client = None
def __check_opened(self, correlation_id: Optional[str]):
if not self.is_open():
raise InvalidStateException(
correlation_id,
'NOT_OPENED',
'Connection is not opened'
)
def __retry_strategy(self, options: dict) -> Any:
if options['error'] and options['error']['code'] == 'ECONNREFUSED':
# End reconnecting on a specific error and flush all commands with
# a individual error
return Exception('The server refused the connection')
if options['total_retry_time'] > self.__timeout:
# End reconnecting after a specific timeout and flush all commands
# with a individual error
return Exception('Retry time exhausted')
if options['attempt'] > self.__retries:
# End reconnecting with built in error
return None
return min(int(options['attempt']) * 100, 3000)
def retrieve(self, correlation_id: Optional[str], key: str) -> Any:
"""
Retrieves cached value from the cache using its key.
If value is missing in the cache or expired it returns `None`.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:return: a retrieve cached value or `None` if nothing was found.
"""
self.__check_opened(correlation_id)
return self.__client.get(key)
def store(self, correlation_id: Optional[str], key: str, value: Any, timeout: int) -> Any:
"""
Stores value in the cache with expiration time.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:param value: a value to store.
:param timeout: expiration timeout in milliseconds.
:return: the stored value.
"""
self.__check_opened(correlation_id)
return self.__client.set(name=key, value=value, px=timeout)
def remove(self, correlation_id: Optional[str], key: str) -> Any:
"""
Removes a value from the cache by its key.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:return: the removed value.
"""
self.__check_opened(correlation_id)
return self.__client.delete(key)
| 37.064039 | 158 | 0.636895 | 7,010 | 0.931685 | 0 | 0 | 0 | 0 | 0 | 0 | 3,992 | 0.530569 |
dfbade8328cd7332030b49fd40ed470582f05c91 | 7,392 | py | Python | main/model/property.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | 1 | 2018-10-26T13:33:20.000Z | 2018-10-26T13:33:20.000Z | main/model/property.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | 652 | 2018-10-26T12:28:08.000Z | 2021-08-02T09:13:48.000Z | main/model/property.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
import util
class Property(model.Base):
name = ndb.StringProperty(required=True)
rank = ndb.IntegerProperty(default=0)
verbose_name = ndb.StringProperty(default='')
show_on_view = ndb.BooleanProperty(default=True, verbose_name='Show on View')
show_on_update = ndb.BooleanProperty(default=True, verbose_name='Show on Update')
show_on_list = ndb.BooleanProperty(default=True, verbose_name='Show on List')
show_on_admin_update = ndb.BooleanProperty(default=True, verbose_name='Show on Admin Update')
show_on_admin_list = ndb.BooleanProperty(default=True, verbose_name='Show on Admin List')
ndb_property = ndb.StringProperty(default='', verbose_name='NDB Property')
kind = ndb.StringProperty()
default = ndb.StringProperty()
required = ndb.BooleanProperty(default=False)
repeated = ndb.BooleanProperty(default=False)
tags = ndb.BooleanProperty(default=False)
indexed = ndb.BooleanProperty(default=True)
auto_now = ndb.BooleanProperty(default=False)
auto_now_add = ndb.BooleanProperty(default=False)
compressed = ndb.BooleanProperty(default=False)
ndb_choices = ndb.StringProperty(verbose_name='Choices')
field_property = ndb.StringProperty(default='')
wtf_property = ndb.StringProperty(default='', verbose_name='WTF Property')
description = ndb.StringProperty(default='')
strip_filter = ndb.BooleanProperty(default=False)
email_filter = ndb.BooleanProperty(default=False)
sort_filter = ndb.BooleanProperty(default=False)
choices = ndb.StringProperty()
forms_property = ndb.StringProperty(default='')
placeholder = ndb.StringProperty(default='')
autofocus = ndb.BooleanProperty(default=False)
readonly = ndb.BooleanProperty(default=False)
def ndb_field(self, include_babel=False):
args = [
'kind=%s' % self.kind if self.kind else '',
'default=%s' % self.default if self.default else '',
'required=True' if self.required else '',
'repeated=%s' % self.repeated if self.repeated else '',
'indexed=False' if not self.indexed else '',
'compressed=True' if self.compressed else '',
'choices=[%s]' % self.ndb_choices if self.ndb_choices else '',
]
if include_babel:
args.append("verbose_name=_(u'%s')" % self.verbose_name_)
else:
args.append("verbose_name=u'%s'" % self.verbose_name if self.verbose_name else '')
return '%s = %s(%s)' % (
self.name,
self.ndb_property,
', '.join([arg for arg in args if arg]),
)
@ndb.ComputedProperty
def api_field(self):
if not self.field_property:
return ''
if self.repeated:
return "'%s': fields.List(%s)," % (self.name, self.field_property)
return "'%s': %s," % (self.name, self.field_property)
@ndb.ComputedProperty
def wtf_field(self):
validators = ['wtforms.validators.%s()' % ('required' if self.required else 'optional')]
if self.ndb_property == 'ndb.StringProperty' and self.wtf_property in ['wtforms.TextAreaField', 'wtforms.StringField']:
validators.append('wtforms.validators.length(max=500)')
filters = [
'util.strip_filter' if self.strip_filter else '',
'util.email_filter' if self.email_filter else '',
'util.sort_filter' if self.sort_filter else '',
]
filters = [f for f in filters if f]
filters = ' filters=[%s],\n' % ', '.join(filters) if filters else ''
description = " description='%s',\n" % self.description if self.description else ''
choices = ''
if self.wtf_property in ['wtforms.RadioField', 'wtforms.SelectField', 'wtforms.SelectMultipleField']:
choices = ' choices=%s,\n' % (self.choices if self.choices else '[]')
date_format = ''
if self.wtf_property == 'wtforms.DateTimeField':
date_format = " format='%Y-%m-%dT%H:%M',\n"
title = '%r' % self.verbose_name_
if self.ndb_property:
title = 'model.%s.%s._verbose_name' % (self.key.parent().get().name, self.name)
if self.wtf_property == 'wtforms.GeoPtField':
validators += ['wtforms.validators.NumberRange(min=-90, max=90)']
validatorss = '[%s]' % ', '.join(validators)
lat = (
'%s_lat = wtforms.FloatField(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, title + " + ' Latitude'", validatorss, filters, choices, description, date_format))
validators.pop()
validators += ['wtforms.validators.NumberRange(min=-180, max=180)']
validatorss = '[%s]' % ', '.join(validators)
lon = (
'\n %s_lon = wtforms.FloatField(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, title + " + ' Longtitute'", validatorss, filters, choices, description, date_format))
return '%s %s' % (lat, lon)
validators = '[%s]' % ', '.join(validators)
return (
'%s = %s(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, self.wtf_property, title, validators, filters, choices, description, date_format))
@ndb.ComputedProperty
def forms_field(self):
autofocus = ', autofocus=True' if self.autofocus else ''
readonly = ', readonly=True' if self.readonly else ''
placeholder = ", placeholder='%s'" % self.placeholder if self.placeholder else ''
if self.forms_property == 'forms.geo_pt_field':
lat = "{{forms.number_field(form.%s_lat%s%s%s)}}" % (self.name, autofocus, readonly, placeholder)
lon = "{{forms.number_field(form.%s_lon%s%s%s)}}" % (self.name, autofocus, readonly, placeholder)
return ('<div class="row">\n'
' <div class="col-sm-6">%s</div>\n <div class="col-sm-6">%s</div>\n </div>' %(lat, lon))
return "{{%s(form.%s%s%s%s)}}" % (self.forms_property, self.name, autofocus, readonly, placeholder)
@ndb.ComputedProperty
def default_verbose_name(self):
return util.snake_to_verbose(self.name)
@ndb.ComputedProperty
def verbose_name_(self):
return self.verbose_name or self.default_verbose_name
def get_title_name(self):
if self.ndb_property != 'ndb.KeyProperty' or not self.kind:
return None
if self.kind == 'model.User':
return 'name'
model_qry = model.Model.query(ancestor=self.key.parent().parent())
model_qry = model_qry.filter(model.Model.name == self.kind.split('.')[1])
model_db = model_qry.get()
if model_db and model_db.title_property_key:
return model_db.title_property_key.get().name
return None
FIELDS = {
'auto_now': fields.Boolean,
'auto_now_add': fields.Boolean,
'autofocus': fields.Boolean,
'choices': fields.String,
'default': fields.String,
'description': fields.String,
'email_filter': fields.Boolean,
'field_property': fields.String,
'forms_property': fields.String,
'kind': fields.String,
'name': fields.String,
'ndb_property': fields.String,
'placeholder': fields.String,
'rank': fields.Integer,
'readonly': fields.Boolean,
'repeated': fields.Boolean,
'required': fields.Boolean,
'sort_filter': fields.Boolean,
'strip_filter': fields.Boolean,
'verbose_name': fields.String,
'wtf_property': fields.String,
}
FIELDS.update(model.Base.FIELDS)
| 38.701571 | 123 | 0.652056 | 7,246 | 0.980249 | 0 | 0 | 3,514 | 0.475379 | 0 | 0 | 1,772 | 0.239719 |
dfbc302b59b318fa83066ffc6aa91c4caa2533da | 1,189 | py | Python | tests/test_request.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | 1 | 2019-09-15T12:30:44.000Z | 2019-09-15T12:30:44.000Z | tests/test_request.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | null | null | null | tests/test_request.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | null | null | null | import pytest
from wired import ServiceContainer
@pytest.fixture
def request_container(registry, simple_root) -> ServiceContainer:
from wired_components.request import wired_setup as request_setup
from wired_components.resource import IRoot
from wired_components.url import IUrl, Url
# Outside system puts some things in the registry
registry.register_singleton(simple_root, IRoot)
request_setup(registry)
# Make a container and return it
container: ServiceContainer = registry.create_container(
context=simple_root
)
url = Url(path='somepath')
container.register_singleton(url, IUrl)
return container
def test_request_wired_setup(registry):
from wired_components.request import wired_setup
assert wired_setup(registry) is None
def test_request_instance(registry, request_container, simple_root):
# Get the request from the container
from wired_components.request import IRequest, Request
request: Request = request_container.get(IRequest)
# See if we're constructed correctly
assert request.context.title == 'My Site'
assert request.path == 'somepath'
assert request.root == simple_root
| 31.289474 | 69 | 0.764508 | 0 | 0 | 0 | 0 | 610 | 0.513036 | 0 | 0 | 182 | 0.15307 |
dfbd03cf9bf0d42acbc4621a1653916d133bdb8e | 958 | py | Python | Charts and Graphs/LollipopCharts.py | aprakash7/Buildyourown | 58f0530ea84bf9e91f258d947610ea1e93d7d456 | [
"MIT"
] | null | null | null | Charts and Graphs/LollipopCharts.py | aprakash7/Buildyourown | 58f0530ea84bf9e91f258d947610ea1e93d7d456 | [
"MIT"
] | null | null | null | Charts and Graphs/LollipopCharts.py | aprakash7/Buildyourown | 58f0530ea84bf9e91f258d947610ea1e93d7d456 | [
"MIT"
] | 1 | 2021-05-31T04:20:54.000Z | 2021-05-31T04:20:54.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 17 21:24:53 2021
@author: Akshay Prakash
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv(r'\1617table.csv')
table.head()
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = 'skyblue')
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.show()
teamColours = ['#034694','#001C58','#5CBFEB','#D00027',
'#EF0107','#DA020E','#274488','#ED1A3B',
'#000000','#091453','#60223B','#0053A0',
'#E03A3E','#1B458F','#000000','#53162f',
'#FBEE23','#EF6610','#C92520','#BA1F1A']
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = teamColours)
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.xlabel('Points')
plt.ylabel('Teams')
plt.title("Premier league 16/17")
| 30.903226 | 84 | 0.583507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.393528 |
dfbf2ca5c949daa624f3881dc6dcb4567701067b | 1,126 | py | Python | python/merge-kml-files/merge-kml-files.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:44:58.000Z | 2019-11-23T10:44:58.000Z | python/merge-kml-files/merge-kml-files.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 8 | 2020-07-16T07:14:12.000Z | 2020-10-14T17:25:33.000Z | python/merge-kml-files/merge-kml-files.py | bmaupin/graveyard | 71d52fe6589ce13dfe7433906d1aa50df48c9f94 | [
"MIT"
] | 1 | 2019-11-23T10:45:00.000Z | 2019-11-23T10:45:00.000Z | #!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
| 31.277778 | 79 | 0.599467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.232682 |
dfbf59c5b26596753447f4f968efc9068d24fa0b | 3,829 | py | Python | tccli/services/partners/v20180321/help.py | tarnover/tencentcloud-cli | 5b0537913a33884a20d7663405a8aa1c2276b41a | [
"Apache-2.0"
] | null | null | null | tccli/services/partners/v20180321/help.py | tarnover/tencentcloud-cli | 5b0537913a33884a20d7663405a8aa1c2276b41a | [
"Apache-2.0"
] | null | null | null | tccli/services/partners/v20180321/help.py | tarnover/tencentcloud-cli | 5b0537913a33884a20d7663405a8aa1c2276b41a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
DESC = "partners-2018-03-21"
INFO = {
"AgentPayDeals": {
"params": [
{
"name": "OwnerUin",
"desc": "订单所有者uin"
},
{
"name": "AgentPay",
"desc": "代付标志,1:代付;0:自付"
},
{
"name": "DealNames",
"desc": "订单号数组"
}
],
"desc": "代理商支付订单接口,支持自付/代付"
},
"DescribeAgentBills": {
"params": [
{
"name": "SettleMonth",
"desc": "支付月份,如2018-02"
},
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "PayMode",
"desc": "支付方式,prepay/postpay"
},
{
"name": "OrderId",
"desc": "预付费订单号"
},
{
"name": "ClientRemark",
"desc": "客户备注名称"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己及名下代客所有业务明细"
},
"AgentTransferMoney": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "Amount",
"desc": "转账金额,单位分"
}
],
"desc": "为合作伙伴提供转账给客户能力。仅支持合作伙伴为自己名下客户转账。"
},
"DescribeRebateInfos": {
"params": [
{
"name": "RebateMonth",
"desc": "返佣月份,如2018-02"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己名下全部返佣信息"
},
"ModifyClientRemark": {
"params": [
{
"name": "ClientRemark",
"desc": "客户备注名称"
},
{
"name": "ClientUin",
"desc": "客户账号ID"
}
],
"desc": "代理商可以对名下客户添加备注、修改备注"
},
"DescribeAgentClients": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "ClientName",
"desc": "客户名称。由于涉及隐私,名称打码显示,故名称仅支持打码后的模糊搜索"
},
{
"name": "ClientFlag",
"desc": "客户类型,a/b,类型定义参考代理商相关政策文档"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC, 不区分大小写,按申请时间排序"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己名下待审核客户列表"
},
"DescribeClientBalance": {
"params": [
{
"name": "ClientUin",
"desc": "客户(代客)账号ID"
}
],
"desc": "为合作伙伴提供查询客户余额能力。调用者必须是合作伙伴,只能查询自己名下客户余额"
},
"DescribeAgentAuditedClients": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "ClientName",
"desc": "客户名称。由于涉及隐私,名称打码显示,故名称仅支持打码后的模糊搜索"
},
{
"name": "ClientFlag",
"desc": "客户类型,a/b,类型定义参考代理商相关政策文档"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC, 不区分大小写,按审核通过时间排序"
},
{
"name": "ClientUins",
"desc": "客户账号ID列表"
},
{
"name": "HasOverdueBill",
"desc": "是否欠费。0:不欠费;1:欠费"
},
{
"name": "ClientRemark",
"desc": "客户备注"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
},
{
"name": "ClientType",
"desc": "客户类型:可以为new(新拓)/assign(指定)/old(存量)/空"
},
{
"name": "ProjectType",
"desc": "项目类型:可以为self(自拓项目)/platform(合作项目)/repeat(复算项目 )/空"
}
],
"desc": "查询已审核客户列表"
},
"AuditApplyClient": {
"params": [
{
"name": "ClientUin",
"desc": "待审核客户账号ID"
},
{
"name": "AuditResult",
"desc": "审核结果,可能的取值:accept/reject"
},
{
"name": "Note",
"desc": "申请理由,B类客户审核通过时必须填写申请理由"
}
],
"desc": "代理商可以审核其名下申请中代客"
}
} | 19.049751 | 68 | 0.404022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,081 | 0.627879 |
dfbfef0fe41686291ae36ae72197b63006cb0f9c | 83,387 | py | Python | src/main/python/lib/default/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/lib/default/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/lib/default/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null |
""" The default configuration, from which all others should be derived """
import os, plugins, sandbox, console, rundependent, comparetest, batch, performance, subprocess, operator, logging
from copy import copy
from string import Template
from fnmatch import fnmatch
from threading import Thread
# For back-compatibility
from runtest import RunTest, Running, Killed
from scripts import *
def getConfig(optionMap):
return Config(optionMap)
class Config:
loggingSetup = False
removePreviousThread = None
def __init__(self, optionMap):
self.optionMap = optionMap
self.filterFileMap = {}
if self.hasExplicitInterface():
self.trySetUpLogging()
from reconnect import ReconnectConfig
self.reconnectConfig = ReconnectConfig(optionMap)
def getMachineNameForDisplay(self, machine):
return machine # override for queuesystems
def getCheckoutLabel(self):
return "Use checkout"
def getMachineLabel(self):
return "Run on machine"
def addToOptionGroups(self, apps, groups):
recordsUseCases = len(apps) == 0 or self.anyAppHas(apps, lambda app: app.getConfigValue("use_case_record_mode") != "disabled")
useCatalogues = self.anyAppHas(apps, self.isolatesDataUsingCatalogues)
useCaptureMock = self.anyAppHas(apps, self.usesCaptureMock)
for group in groups:
if group.name.startswith("Select"):
group.addOption("t", "Test names containing", description="Select tests for which the name contains the entered text. The text can be a regular expression.")
group.addOption("ts", "Test paths containing", description="Select tests for which the full path to the test (e.g. suite1/subsuite/testname) contains the entered text. The text can be a regular expression. You can select tests by suite name this way.")
possibleDirs = self.getFilterFileDirectories(apps, useOwnTmpDir=True)
group.addOption("f", "Tests listed in file", possibleDirs=possibleDirs, selectFile=True)
group.addOption("desc", "Descriptions containing", description="Select tests for which the description (comment) matches the entered text. The text can be a regular expression.")
if self.anyAppHas(apps, self.hasPerformance):
group.addOption("r", "Execution time", description="Specify execution time limits, either as '<min>,<max>', or as a list of comma-separated expressions, such as >=0:45,<=1:00. Digit-only numbers are interpreted as minutes, while colon-separated numbers are interpreted as hours:minutes:seconds.")
group.addOption("grep", "Test-files containing", description="Select tests which have a file containing the entered text. The text can be a regular expression : e.g. enter '.*' to only look for the file without checking the contents.")
group.addOption("grepfile", "Test-file to search", allocateNofValues=2, description="When the 'test-files containing' field is non-empty, apply the search in files with the given stem. Unix-style file expansion (note not regular expressions) may be used. For example '*' will look in any file.")
elif group.name.startswith("Basic"):
if len(apps) > 0:
version = plugins.getAggregateString(apps, lambda app: app.getFullVersion())
checkout = plugins.getAggregateString(apps, lambda app: app.checkout)
machine = plugins.getAggregateString(apps, lambda app: app.getRunMachine())
else:
version, checkout, machine = "", "", ""
group.addOption("v", "Run this version", version)
group.addOption("c", self.getCheckoutLabel(), checkout)
group.addOption("m", self.getMachineLabel(), self.getMachineNameForDisplay(machine))
group.addOption("cp", "Times to run", 1, minimum=1, description="Set this to some number larger than 1 to run the same test multiple times, for example to try to catch indeterminism in the system under test")
if recordsUseCases:
group.addOption("delay", "Replay pause (sec)", 0.0, description="How long to wait, in seconds, between replaying each GUI action in the usecase file")
self.addDefaultSwitch(group, "gui", "Show GUI and record any extra actions",
description="Disable virtual display usage if any. Replay whatever is in the usecase file and enabled recording when done")
self.addDefaultSwitch(group, "screenshot", "Generate a screenshot after each replayed action",
description="The screenshots can be viewed via the 'View Screenshots' action in the test (left panel) context menu")
self.addDefaultSwitch(group, "stop", "Stop after first failure")
if useCatalogues:
self.addDefaultSwitch(group, "ignorecat", "Ignore catalogue file when isolating data", description="Treat test data identified by 'partial_copy_test_path' as if it were in 'copy_test_path', " +
"i.e. copy everything without taking notice of the catalogue file. Useful when many things have changed with the files written by the test")
if useCaptureMock:
self.addCaptureMockSwitch(group)
elif group.name.startswith("Advanced"):
self.addDefaultOption(group, "b", "Run batch mode session")
self.addDefaultOption(group, "name", "Name this run")
group.addOption("vanilla", "Ignore configuration files", self.defaultVanillaValue(),
possibleValues = [ "", "site", "personal", "all" ])
self.addDefaultSwitch(group, "keeptmp", "Keep temporary write-directories")
group.addSwitch("ignorefilters", "Ignore all run-dependent text filtering")
elif group.name.startswith("Self-diagnostics"):
self.addDefaultSwitch(group, "x", "Enable self-diagnostics")
defaultDiagDir = plugins.getPersonalDir("log")
group.addOption("xr", "Configure self-diagnostics from", os.path.join(defaultDiagDir, "logging.debug"),
possibleValues=[ os.path.join(plugins.installationDir("log"), "logging.debug") ])
group.addOption("xw", "Write self-diagnostics to", defaultDiagDir)
elif group.name.startswith("Invisible"):
# Options that don't make sense with the GUI should be invisible there...
group.addOption("a", "Load test applications named")
group.addOption("s", "Run this script")
group.addOption("d", "Look for test files under")
group.addSwitch("help", "Print configuration help text on stdout")
group.addSwitch("g", "use dynamic GUI")
group.addSwitch("gx", "use static GUI")
group.addSwitch("con", "use console interface")
group.addSwitch("coll", "Collect results for batch mode session")
group.addSwitch("collarchive", "Collect results for batch mode session using data in the archive, back to the given date")
group.addOption("tp", "Private: Tests with exact path") # use for internal communication
group.addOption("finverse", "Tests not listed in file")
group.addOption("fintersect", "Tests in all files")
group.addOption("funion", "Tests in any of files")
group.addOption("fd", "Private: Directory to search for filter files in")
group.addOption("td", "Private: Directory to search for temporary settings in")
group.addOption("count", "Private: How many tests we believe there will be")
group.addOption("o", "Overwrite failures, optionally using version")
group.addOption("reconnect", "Reconnect to previous run")
group.addSwitch("reconnfull", "Recompute file filters when reconnecting", options=self.getReconnFullOptions())
group.addSwitch("n", "Create new results files (overwrite everything)")
group.addSwitch("new", "Start static GUI with no applications loaded")
group.addOption("bx", "Select tests exactly as for batch mode session")
group.addSwitch("zen", "Make console output coloured, for use e.g. with ZenTest")
if recordsUseCases:
group.addSwitch("record", "Private: Record usecase rather than replay what is present")
group.addSwitch("autoreplay", "Private: Used to flag that the run has been autogenerated")
else:
# We may have other apps that do this, don't reject these options
group.addOption("delay", "Replay pause (sec)", 0)
group.addSwitch("gui", "Show GUI and record any extra actions")
group.addSwitch("screenshot", "Generate a screenshot after each replayed action")
if not useCatalogues:
group.addSwitch("ignorecat", "Ignore catalogue file when isolating data")
if not useCaptureMock:
self.addCaptureMockSwitch(group)
def addDefaultSwitch(self, group, key, name, *args, **kw):
group.addSwitch(key, name, self.optionIntValue(key), *args, **kw)
def addDefaultOption(self, group, key, name, *args, **kw):
group.addOption(key, name, self.optionValue(key), *args, **kw)
def addCaptureMockSwitch(self, group, value=0):
options = [ "Replay", "Record", "Mixed Mode", "Disabled" ]
descriptions = [ "Replay all existing interactions from the information in CaptureMock's mock files. Do not record anything new.",
"Ignore any existing CaptureMock files and record all the interactions afresh.",
"Replay all existing interactions from the information in the CaptureMock mock files. " + \
"Record any other interactions that occur.",
"Disable CaptureMock" ]
group.addSwitch("rectraffic", "CaptureMock", value=value, options=options, description=descriptions)
def getReconnFullOptions(self):
return ["Display results exactly as they were in the original run",
"Use raw data from the original run, but recompute run-dependent text, known bug information etc."]
def anyAppHas(self, apps, propertyMethod):
for app in apps:
for partApp in [ app ] + app.extras:
if propertyMethod(partApp):
return True
return False
def defaultVanillaValue(self):
if not self.optionMap.has_key("vanilla"):
return ""
given = self.optionValue("vanilla")
return given or "all"
def getRunningGroupNames(self):
return [ ("Basic", None, None), ("Self-diagnostics (internal logging)", "x", 0), ("Advanced", None, None) ]
def getAllRunningGroupNames(self, allApps):
if len(allApps) == 0:
return self.getRunningGroupNames()
names = []
for app in allApps:
for name in app.getRunningGroupNames():
if name not in names:
names.append(name)
return names
def createOptionGroups(self, allApps):
groupNames = [ "Selection", "Invisible" ] + [ x[0] for x in self.getAllRunningGroupNames(allApps) ]
optionGroups = map(plugins.OptionGroup, groupNames)
self.addToOptionGroups(allApps, optionGroups)
return optionGroups
def findAllValidOptions(self, allApps):
groups = self.createOptionGroups(allApps)
return reduce(operator.add, (g.keys() for g in groups), [])
def getActionSequence(self):
if self.optionMap.has_key("coll"):
return []
if self.isReconnecting():
return self.getReconnectSequence()
scriptObject = self.optionMap.getScriptObject()
if scriptObject:
if self.usesComparator(scriptObject):
return [ self.getWriteDirectoryMaker(), rundependent.FilterOriginalForScript(), scriptObject,
comparetest.MakeComparisons(ignoreMissing=True,enableColor=self.optionMap.has_key("zen")) ]
else:
return [ scriptObject ]
else:
return self.getTestProcessor()
def usesComparator(self, scriptObject):
try:
return scriptObject.usesComparator()
except AttributeError:
return False
def useGUI(self):
return self.optionMap.has_key("g") or self.optionMap.has_key("gx")
def useStaticGUI(self, app):
return self.optionMap.has_key("gx") or \
(not self.hasExplicitInterface() and app.getConfigValue("default_interface") == "static_gui")
def useConsole(self):
return self.optionMap.has_key("con")
def getExtraVersions(self, app):
fromConfig = self.getExtraVersionsFromConfig(app)
fromCmd = self.getExtraVersionsFromCmdLine(app, fromConfig)
return self.createComposites(fromConfig, fromCmd)
def createComposites(self, vlist1, vlist2):
allVersions = copy(vlist1)
for v2 in vlist2:
allVersions.append(v2)
for v1 in vlist1:
allVersions.append(v2 + "." + v1)
return allVersions
def getExtraVersionsFromCmdLine(self, app, fromConfig):
if self.isReconnecting():
return self.reconnectConfig.getExtraVersions(app, fromConfig)
else:
copyVersions = self.getCopyExtraVersions()
checkoutVersions, _ = self.getCheckoutExtraVersions(app)
# Generated automatically to be able to distinguish, don't save them
for ver in copyVersions + checkoutVersions:
app.addConfigEntry("unsaveable_version", ver)
return self.createComposites(checkoutVersions, copyVersions)
def getCopyExtraVersions(self):
try:
copyCount = int(self.optionMap.get("cp", 1))
except TypeError:
copyCount = 1
return [ "copy_" + str(i) for i in range(1, copyCount) ]
def makeParts(self, c):
return c.replace("\\", "/").split("/")
def versionNameFromCheckout(self, c, checkoutNames):
checkoutParts = self.makeParts(c)
for other in checkoutNames:
if other != c:
for otherPart in self.makeParts(other):
if otherPart in checkoutParts:
checkoutParts.remove(otherPart)
return checkoutParts[-1].replace(".", "_")
def getCheckoutExtraVersions(self, app):
checkoutNames = plugins.commasplit(self.optionValue("c"))
if len(checkoutNames) > 1:
expandedNames = [ self.expandCheckout(c, app) for c in checkoutNames ]
extraCheckouts = expandedNames[1:]
return [ self.versionNameFromCheckout(c, expandedNames) for c in extraCheckouts ], extraCheckouts
else:
return [], []
def getBatchSession(self, app):
return self.optionValue("b")
def getBatchSessionForSelect(self, app):
return self.getBatchSession(app) or self.optionMap.get("bx")
def getExtraVersionsFromConfig(self, app):
basic = app.getConfigValue("extra_version")
batchSession = self.getBatchSessionForSelect(app)
if batchSession is not None:
for batchExtra in app.getCompositeConfigValue("batch_extra_version", batchSession):
if batchExtra not in basic:
basic.append(batchExtra)
if self.optionMap.has_key("count"):
return [] # dynamic GUI started from static GUI, rely on it telling us what to load
for extra in basic:
if extra in app.versions:
return []
return basic
def getDefaultInterface(self, allApps):
if self.optionMap.has_key("s"):
return "console"
elif len(allApps) == 0 or self.optionMap.has_key("new"):
return "static_gui"
defaultIntf = None
for app in allApps:
appIntf = app.getConfigValue("default_interface")
if defaultIntf and appIntf != defaultIntf:
raise plugins.TextTestError, "Conflicting default interfaces for different applications - " + \
appIntf + " and " + defaultIntf
defaultIntf = appIntf
return defaultIntf
def setDefaultInterface(self, allApps):
mapping = { "static_gui" : "gx", "dynamic_gui": "g", "console": "con" }
defaultInterface = self.getDefaultInterface(allApps)
if mapping.has_key(defaultInterface):
self.optionMap[mapping[defaultInterface]] = ""
else:
raise plugins.TextTestError, "Invalid value for default_interface '" + defaultInterface + "'"
def hasExplicitInterface(self):
return self.useGUI() or self.batchMode() or self.useConsole() or self.optionMap.has_key("o")
def getLogfilePostfixes(self):
if self.optionMap.has_key("x"):
return [ "debug" ]
elif self.optionMap.has_key("gx"):
return [ "gui", "static_gui" ]
elif self.optionMap.has_key("g"):
return [ "gui", "dynamic_gui" ]
elif self.batchMode():
return [ "console", "batch" ]
else:
return [ "console" ]
def trySetUpLogging(self):
if not self.loggingSetup:
self.setUpLogging()
Config.loggingSetup = True
def setUpLogging(self):
filePatterns = [ "logging." + postfix for postfix in self.getLogfilePostfixes() ]
includeSite, includePersonal = self.optionMap.configPathOptions()
allPaths = plugins.findDataPaths(filePatterns, includeSite, includePersonal, dataDirName="log")
if len(allPaths) > 0:
plugins.configureLogging(allPaths[-1]) # Won't have any effect if we've already got a log file
else:
plugins.configureLogging()
def getResponderClasses(self, allApps):
# Global side effects first :)
if not self.hasExplicitInterface():
self.setDefaultInterface(allApps)
self.trySetUpLogging()
return self._getResponderClasses(allApps)
def _getResponderClasses(self, allApps):
classes = []
if not self.optionMap.has_key("gx"):
if self.optionMap.has_key("new"):
raise plugins.TextTestError, "'--new' option can only be provided with the static GUI"
elif len(allApps) == 0:
raise plugins.TextTestError, "Could not find any matching applications (files of the form config.<app>) under " + " or ".join(self.optionMap.rootDirectories)
if self.useGUI():
self.addGuiResponder(classes)
else:
classes.append(self.getTextDisplayResponderClass())
if not self.optionMap.has_key("gx"):
classes += self.getThreadActionClasses()
if self.batchMode() and not self.optionMap.has_key("s"):
if self.optionMap.has_key("coll"):
arg = self.optionMap["coll"]
if arg != "mail":
classes.append(self.getWebPageResponder())
if not arg or "web" not in arg:
classes.append(batch.CollectFilesResponder)
else:
if self.optionValue("b") is None:
plugins.log.info("No batch session identifier provided, using 'default'")
self.optionMap["b"] = "default"
if self.anyAppHas(allApps, lambda app: self.emailEnabled(app)):
classes.append(batch.EmailResponder)
if self.anyAppHas(allApps, lambda app: self.getBatchConfigValue(app, "batch_junit_format") == "true"):
from batch.junitreport import JUnitResponder
classes.append(JUnitResponder)
if os.name == "posix" and self.useVirtualDisplay():
from virtualdisplay import VirtualDisplayResponder
classes.append(VirtualDisplayResponder)
stateSaver = self.getStateSaver()
if stateSaver is not None:
classes.append(stateSaver)
if not self.useGUI() and not self.batchMode():
classes.append(self.getTextResponder())
# At the end, so we've done the processing before we proceed
from storytext_interface import ApplicationEventResponder
classes.append(ApplicationEventResponder)
return classes
def emailEnabled(self, app):
return self.getBatchConfigValue(app, "batch_recipients") or \
self.getBatchConfigValue(app, "batch_use_collection") == "true"
def getBatchConfigValue(self, app, configName, **kw):
return app.getCompositeConfigValue(configName, self.getBatchSession(app), **kw)
def isActionReplay(self):
for option, _ in self.getInteractiveReplayOptions():
if self.optionMap.has_key(option):
return True
return False
def noFileAdvice(self):
# What can we suggest if files aren't present? In this case, not much
return ""
def useVirtualDisplay(self):
# Don't try to set it if we're using the static GUI or
# we've requested a slow motion replay or we're trying to record a new usecase.
return not self.isRecording() and not self.optionMap.has_key("gx") and \
not self.isActionReplay() and not self.optionMap.has_key("coll") and not self.optionMap.runScript()
def getThreadActionClasses(self):
from actionrunner import ActionRunner
return [ ActionRunner ]
def getTextDisplayResponderClass(self):
return console.TextDisplayResponder
def isolatesDataUsingCatalogues(self, app):
return app.getConfigValue("create_catalogues") == "true" and \
len(app.getConfigValue("partial_copy_test_path")) > 0
def usesCaptureMock(self, app):
return "traffic" in app.defFileStems()
def hasWritePermission(self, path):
if os.path.isdir(path):
return os.access(path, os.W_OK)
else:
return self.hasWritePermission(os.path.dirname(path))
def getWriteDirectories(self, app):
rootDir = self.optionMap.setPathFromOptionsOrEnv("TEXTTEST_TMP", app.getConfigValue("default_texttest_tmp")) # Location of temporary files from test runs
if not os.path.isdir(rootDir) and not self.hasWritePermission(os.path.dirname(rootDir)):
rootDir = self.optionMap.setPathFromOptionsOrEnv("", "$TEXTTEST_PERSONAL_CONFIG/tmp")
writeDir = os.path.join(rootDir, self.getWriteDirectoryName(app))
localRootDir = self.optionMap.getPathFromOptionsOrEnv("TEXTTEST_LOCAL_TMP", app.getConfigValue("default_texttest_local_tmp")) # Location of temporary files on local disk from test runs. Defaults to value of TEXTTEST_TMP
if localRootDir:
return writeDir, os.path.join(localRootDir, self.getLocalWriteDirectoryName(app))
else:
return writeDir, writeDir
def getWriteDirectoryName(self, app):
appDescriptor = self.getAppDescriptor()
parts = self.getBasicRunDescriptors(app, appDescriptor) + self.getVersionDescriptors(appDescriptor) + \
[ self.getTimeDescriptor(), str(os.getpid()) ]
return ".".join(parts)
def getLocalWriteDirectoryName(self, app):
return self.getWriteDirectoryName(app)
def getBasicRunDescriptors(self, app, appDescriptor):
appDescriptors = [ appDescriptor ] if appDescriptor else []
if self.useStaticGUI(app):
return [ "static_gui" ] + appDescriptors
elif appDescriptors:
return appDescriptors
elif self.getBatchSession(app):
return [ self.getBatchSession(app) ]
elif self.optionMap.has_key("g"):
return [ "dynamic_gui" ]
else:
return [ "console" ]
def getTimeDescriptor(self):
return plugins.startTimeString().replace(":", "")
def getAppDescriptor(self):
givenAppDescriptor = self.optionValue("a")
if givenAppDescriptor and "," not in givenAppDescriptor:
return givenAppDescriptor
def getVersionDescriptors(self, appDescriptor):
givenVersion = self.optionValue("v")
if givenVersion:
# Commas in path names are a bit dangerous, some applications may have arguments like
# -path path1,path2 and just do split on the path argument.
# We try something more obscure instead...
versionList = plugins.commasplit(givenVersion)
if appDescriptor:
parts = appDescriptor.split(".", 1)
if len(parts) > 1:
versionList = self.filterForApp(versionList, parts[1])
return [ "++".join(versionList) ] if versionList else []
else:
return []
def filterForApp(self, versionList, appVersionDescriptor):
filteredVersions = []
for version in versionList:
if version != appVersionDescriptor:
filteredVersions.append(version.replace(appVersionDescriptor + ".", ""))
return filteredVersions
def addGuiResponder(self, classes):
from gtkgui.controller import GUIController
classes.append(GUIController)
def getReconnectSequence(self):
actions = [ self.reconnectConfig.getReconnectAction() ]
actions += [ self.getOriginalFilterer(), self.getTemporaryFilterer(), \
self.getTestComparator(), self.getFailureExplainer() ]
return actions
def getOriginalFilterer(self):
if not self.optionMap.has_key("ignorefilters"):
return rundependent.FilterOriginal(useFilteringStates=not self.batchMode())
def getTemporaryFilterer(self):
if not self.optionMap.has_key("ignorefilters"):
return rundependent.FilterTemporary(useFilteringStates=not self.batchMode())
def filterErrorText(self, app, errFile):
filterAction = rundependent.FilterErrorText()
return filterAction.getFilteredText(app, errFile, app)
def applyFiltering(self, test, fileName, version):
app = test.getAppForVersion(version)
filterAction = rundependent.FilterAction()
return filterAction.getFilteredText(test, fileName, app)
def getTestProcessor(self):
catalogueCreator = self.getCatalogueCreator()
ignoreCatalogues = self.shouldIgnoreCatalogues()
collator = self.getTestCollator()
from traffic import SetUpCaptureMockHandlers, TerminateCaptureMockHandlers
trafficSetup = SetUpCaptureMockHandlers(self.optionIntValue("rectraffic"))
trafficTerminator = TerminateCaptureMockHandlers()
return [ self.getExecHostFinder(), self.getWriteDirectoryMaker(), \
self.getWriteDirectoryPreparer(ignoreCatalogues), \
trafficSetup, catalogueCreator, collator, self.getOriginalFilterer(), self.getTestRunner(), \
trafficTerminator, catalogueCreator, collator, self.getTestEvaluator() ]
def isRecording(self):
return self.optionMap.has_key("record")
def shouldIgnoreCatalogues(self):
return self.optionMap.has_key("ignorecat") or self.isRecording()
def hasPerformance(self, app, perfType=""):
extractors = app.getConfigValue("performance_logfile_extractor")
if (perfType and extractors.has_key(perfType)) or (not perfType and len(extractors) > 0):
return True
else:
return app.hasAutomaticCputimeChecking()
def hasAutomaticCputimeChecking(self, app):
return len(app.getCompositeConfigValue("performance_test_machine", "cputime")) > 0
def getFilterFileDirectories(self, apps, useOwnTmpDir):
#
# - For each application, collect
# - temporary filter dir
# - all dirs in filter_file_directory
#
# Add these to a list. Never add the same dir twice. The first item will
# be the default save/open dir, and the others will be added as shortcuts.
#
dirs = []
for app in apps:
writeDir = app.writeDirectory if useOwnTmpDir else None
dirs += self._getFilterFileDirs(app, app.getDirectory(), writeDir)
return dirs
def _getFilterFileDirs(self, suiteOrApp, rootDir, writeDir=None):
dirs = []
appDirs = suiteOrApp.getConfigValue("filter_file_directory")
tmpDir = self.getTmpFilterDir(writeDir)
if tmpDir and tmpDir not in dirs:
dirs.append(tmpDir)
for dir in appDirs:
if os.path.isabs(dir) and os.path.isdir(dir):
if dir not in dirs:
dirs.append(dir)
else:
newDir = os.path.join(rootDir, dir)
if not newDir in dirs:
dirs.append(newDir)
return dirs
def getTmpFilterDir(self, writeDir):
cmdLineDir = self.optionValue("fd")
if cmdLineDir:
return os.path.normpath(cmdLineDir)
elif writeDir:
return os.path.join(writeDir, "temporary_filter_files")
def getFilterClasses(self):
return [ TestNameFilter, plugins.TestSelectionFilter, TestRelPathFilter,
performance.TimeFilter, performance.FastestFilter, performance.SlowestFilter,
plugins.ApplicationFilter, TestDescriptionFilter ]
def getAbsoluteFilterFileName(self, suite, filterFileName):
if os.path.isabs(filterFileName):
if os.path.isfile(filterFileName):
return filterFileName
else:
raise plugins.TextTestError, "Could not find filter file at '" + filterFileName + "'"
else:
dirsToSearchIn = self._getFilterFileDirs(suite, suite.app.getDirectory())
absName = suite.app.getFileName(dirsToSearchIn, filterFileName)
if absName:
return absName
else:
raise plugins.TextTestError, "No filter file named '" + filterFileName + "' found in :\n" + \
"\n".join(dirsToSearchIn)
def optionListValue(self, options, key):
if options.has_key(key):
return plugins.commasplit(options[key])
else:
return []
def findFilterFileNames(self, app, options, includeConfig):
names = self.optionListValue(options, "f") + self.optionListValue(options, "fintersect")
if includeConfig:
names += app.getConfigValue("default_filter_file")
batchSession = self.getBatchSessionForSelect(app)
if batchSession:
names += app.getCompositeConfigValue("batch_filter_file", batchSession)
return names
def findAllFilterFileNames(self, app, options, includeConfig):
return self.findFilterFileNames(app, options, includeConfig) + \
self.optionListValue(options, "funion") + self.optionListValue(options, "finverse")
def getFilterList(self, app, suites, options=None, **kw):
if options is None:
return self.filterFileMap.setdefault(app, self._getFilterList(app, self.optionMap, suites, includeConfig=True, **kw))
else:
return self._getFilterList(app, options, suites, includeConfig=False, **kw)
def checkFilterFileSanity(self, suite):
# This will check all the files for existence from the input, and throw if it can't.
# This is basically because we don't want to throw in a thread when we actually need the filters
# if they aren't sensible for some reason
self._checkFilterFileSanity(suite, self.optionMap, includeConfig=True)
def _checkFilterFileSanity(self, suite, options, includeConfig=False):
for filterFileName in self.findAllFilterFileNames(suite.app, options, includeConfig):
optionFinder = self.makeOptionFinder(suite, filterFileName)
self._checkFilterFileSanity(suite, optionFinder)
def _getFilterList(self, app, options, suites, includeConfig, **kw):
filters = self.getFiltersFromMap(options, app, suites, **kw)
for filterFileName in self.findFilterFileNames(app, options, includeConfig):
filters += self.getFiltersFromFile(app, filterFileName, suites)
if self.isReconnecting():
filters.append(self.reconnectConfig.getFilter())
orFilterFiles = self.optionListValue(options, "funion")
if len(orFilterFiles) > 0:
orFilterLists = [ self.getFiltersFromFile(app, f, suites) for f in orFilterFiles ]
filters.append(OrFilter(orFilterLists))
notFilterFile = options.get("finverse")
if notFilterFile:
filters.append(NotFilter(self.getFiltersFromFile(app, notFilterFile, suites)))
return filters
def makeOptionFinder(self, *args):
absName = self.getAbsoluteFilterFileName(*args)
fileData = ",".join(plugins.readList(absName))
return plugins.OptionFinder(fileData.split(), defaultKey="t")
def getFiltersFromFile(self, app, filename, suites):
for suite in suites:
if suite.app is app:
optionFinder = self.makeOptionFinder(suite, filename)
return self._getFilterList(app, optionFinder, suites, includeConfig=False)
def getFiltersFromMap(self, optionMap, app, suites, **kw):
filters = []
for filterClass in self.getFilterClasses():
argument = optionMap.get(filterClass.option)
if argument:
filters.append(filterClass(argument, app, suites))
batchSession = self.getBatchSessionForSelect(app)
if batchSession:
timeLimit = app.getCompositeConfigValue("batch_timelimit", batchSession)
if timeLimit:
filters.append(performance.TimeFilter(timeLimit))
if optionMap.has_key("grep"):
grepFile = optionMap.get("grepfile", app.getConfigValue("log_file"))
filters.append(GrepFilter(optionMap["grep"], grepFile, **kw))
return filters
def batchMode(self):
return self.optionMap.has_key("b")
def keepTemporaryDirectories(self):
if "keeptmp" in self.optionMap:
return self.optionMap.get("keeptmp") != "0"
else:
return self.batchMode() and not self.isReconnecting()
def hasKeeptmpFlag(self):
return "keeptmp" in self.optionMap and self.optionMap.get("keeptmp") != "0"
def cleanPreviousTempDirs(self):
return self.batchMode() and not self.isReconnecting() and "keeptmp" not in self.optionMap
def cleanWriteDirectory(self, suite):
if self.removePreviousThread and self.removePreviousThread.isAlive():
plugins.log.info("Waiting for removal of previous write directories to complete...")
self.removePreviousThread.join()
Config.removePreviousThread = None
if not self.hasKeeptmpFlag():
self._cleanLocalWriteDirectory(suite)
if not self.keepTemporaryDirectories():
self._cleanWriteDirectory(suite)
machine, tmpDir = self.getRemoteTmpDirectory(suite.app)
if tmpDir:
self.cleanRemoteDir(suite.app, machine, tmpDir)
def cleanRemoteDir(self, app, machine, tmpDir):
self.runCommandOn(app, machine, [ "rm", "-rf", tmpDir ])
def _cleanWriteDirectory(self, suite):
if os.path.isdir(suite.app.writeDirectory):
plugins.rmtree(suite.app.writeDirectory)
def _cleanLocalWriteDirectory(self, suite):
if suite.app.localWriteDirectory != suite.app.writeDirectory and os.path.isdir(suite.app.localWriteDirectory):
plugins.rmtree(suite.app.localWriteDirectory)
def findRemotePreviousDirInfo(self, app):
machine, tmpDir = self.getRemoteTmpDirectory(app)
if tmpDir: # Ignore the datetime and the pid at the end
searchParts = tmpDir.split(".")[:-2] + ["*"]
fileArg = ".".join(searchParts)
return machine, fileArg
else:
return None, None
def cleanPreviousWriteDirs(self, previousWriteDirs):
for previousWriteDir in previousWriteDirs:
plugins.rmtree(previousWriteDir, attempts=3)
def makeWriteDirectory(self, app, subdir=None):
if not self.removePreviousThread and self.cleanPreviousTempDirs():
previousWriteDirs = self.findPreviousWriteDirs(app.writeDirectory)
machine, fileArg = self.findRemotePreviousDirInfo(app)
if fileArg:
plugins.log.info("Removing previous remote write directories on " + machine + " matching " + fileArg)
self.runCommandOn(app, machine, [ "rm", "-rf", fileArg ])
for previousWriteDir in previousWriteDirs:
plugins.log.info("Removing previous write directory " + previousWriteDir + " in background")
if previousWriteDirs:
thread = Thread(target=self.cleanPreviousWriteDirs, args=(previousWriteDirs,))
thread.start()
Config.removePreviousThread = thread
dirToMake = app.writeDirectory
if subdir:
dirToMake = os.path.join(app.writeDirectory, subdir)
plugins.ensureDirectoryExists(dirToMake)
app.diag.info("Made root directory at " + dirToMake)
return dirToMake
def findPreviousWriteDirs(self, writeDir):
previousWriteDirs = []
rootDir, basename = os.path.split(writeDir)
if os.path.isdir(rootDir):
# Ignore the datetime and the pid at the end
searchParts = basename.split(".")[:-2]
for file in os.listdir(rootDir):
fileParts = file.split(".")
if fileParts[:-2] == searchParts:
previousWriteDir = os.path.join(rootDir, file)
if os.path.isdir(previousWriteDir) and not plugins.samefile(previousWriteDir, writeDir):
previousWriteDirs.append(previousWriteDir)
return previousWriteDirs
def isReconnecting(self):
return self.optionMap.has_key("reconnect")
def getWriteDirectoryMaker(self):
return sandbox.MakeWriteDirectory()
def getExecHostFinder(self):
return sandbox.FindExecutionHosts()
def getWriteDirectoryPreparer(self, ignoreCatalogues):
return sandbox.PrepareWriteDirectory(ignoreCatalogues)
def getTestRunner(self):
return RunTest()
def getTestEvaluator(self):
return [ self.getFileExtractor(), self.getTemporaryFilterer(), self.getTestComparator(), self.getFailureExplainer() ]
def getFileExtractor(self):
return [ self.getPerformanceFileMaker(), self.getPerformanceExtractor() ]
def getCatalogueCreator(self):
return sandbox.CreateCatalogue()
def getTestCollator(self):
return sandbox.CollateFiles()
def getPerformanceExtractor(self):
return sandbox.ExtractPerformanceFiles(self.getMachineInfoFinder())
def getPerformanceFileMaker(self):
return sandbox.MakePerformanceFile(self.getMachineInfoFinder())
def executingOnPerformanceMachine(self, test):
infoFinder = self.getMachineInfoFinder()
infoFinder.setUpApplication(test.app)
return infoFinder.allMachinesTestPerformance(test, "cputime")
def getMachineInfoFinder(self):
return sandbox.MachineInfoFinder()
def getFailureExplainer(self):
from knownbugs import CheckForBugs, CheckForCrashes
return [ CheckForCrashes(), CheckForBugs() ]
def showExecHostsInFailures(self, app):
return self.batchMode() or app.getRunMachine() != "localhost"
def getTestComparator(self):
return comparetest.MakeComparisons(enableColor=self.optionMap.has_key("zen"))
def getStateSaver(self):
if self.batchMode() and not self.isReconnecting():
return batch.SaveState
elif self.keepTemporaryDirectories():
return SaveState
def getConfigEnvironment(self, test, allVars):
testEnvironmentCreator = self.getEnvironmentCreator(test)
return testEnvironmentCreator.getVariables(allVars)
def getEnvironmentCreator(self, test):
return sandbox.TestEnvironmentCreator(test, self.optionMap)
def getInteractiveReplayOptions(self):
return [ ("gui", "visible GUI") ]
def getTextResponder(self):
return console.InteractiveResponder
def getWebPageResponder(self):
return batch.WebPageResponder
# Utilities, which prove useful in many derived classes
def optionValue(self, option):
return self.optionMap.get(option, "")
def optionIntValue(self, option):
if self.optionMap.has_key(option):
value = self.optionMap.get(option)
return int(value) if value is not None else 1
else:
return 0
def ignoreExecutable(self):
return self.optionMap.has_key("s") or self.ignoreCheckout() or self.optionMap.has_key("coll") or self.optionMap.has_key("gx")
def ignoreCheckout(self):
return self.isReconnecting() # No use of checkouts has yet been thought up when reconnecting :)
def setUpCheckout(self, app):
return self.getGivenCheckoutPath(app) if not self.ignoreCheckout() else ""
def verifyCheckoutValid(self, app):
if not os.path.isabs(app.checkout):
raise plugins.TextTestError, "could not create absolute checkout from relative path '" + app.checkout + "'"
elif not os.path.isdir(app.checkout):
self.handleNonExistent(app.checkout, "checkout", app)
def checkCheckoutExists(self, app):
if not app.checkout:
return "" # Allow empty checkout, means no checkout is set, basically
try:
self.verifyCheckoutValid(app)
except plugins.TextTestError, e:
if self.ignoreExecutable():
plugins.printWarning(str(e), stdout=True)
return ""
else:
raise
def checkSanity(self, suite):
if not self.ignoreCheckout():
self.checkCheckoutExists(suite.app)
if not self.ignoreExecutable():
self.checkExecutableExists(suite)
self.checkFilterFileSanity(suite)
self.checkCaptureMockMigration(suite)
self.checkConfigSanity(suite.app)
batchSession = self.getBatchSessionForSelect(suite.app)
if self.optionMap.has_key("coll") and batchSession is None:
raise plugins.TextTestError, "Must provide '-b' argument to identify the batch session when running with '-coll' to collect batch run data"
if batchSession is not None and not self.optionMap.has_key("coll"):
batchFilter = batch.BatchVersionFilter(batchSession)
batchFilter.verifyVersions(suite.app)
if self.isReconnecting():
self.reconnectConfig.checkSanity(suite.app)
# side effects really from here on :(
if self.readsTestStateFiles():
# Reading stuff from stored pickle files, need to set up categories independently
self.setUpPerformanceCategories(suite.app)
def checkCaptureMockMigration(self, suite):
if (suite.getCompositeConfigValue("collect_traffic", "asynchronous") or \
suite.getConfigValue("collect_traffic_python")) and \
not self.optionMap.runScript():
raise plugins.TextTestError, "collect_traffic settings have been deprecated.\n" + \
"They have been replaced by using the CaptureMock program which is now separate from TextTest.\n" + \
"Please run with '-s traffic.ConvertToCaptureMock' and consult the migration notes at\n" + \
os.path.join(plugins.installationDir("doc"), "MigrationNotes_from_3.20") + "\n"
def readsTestStateFiles(self):
return self.isReconnecting() or self.optionMap.has_key("coll")
def setUpPerformanceCategories(self, app):
# We don't create these in the normal way, so we don't know what they are.
allCategories = app.getConfigValue("performance_descriptor_decrease").values() + \
app.getConfigValue("performance_descriptor_increase").values()
for cat in allCategories:
if cat:
plugins.addCategory(*plugins.commasplit(cat))
def checkExecutableExists(self, suite):
executable = suite.getConfigValue("executable")
if self.executableShouldBeFile(suite.app, executable) and not os.path.isfile(executable):
self.handleNonExistent(executable, "executable program", suite.app)
for interpreterStr in suite.getConfigValue("interpreters").values():
interpreter = plugins.splitcmd(interpreterStr)[0]
if os.path.isabs(interpreter) and not os.path.exists(interpreter):
self.handleNonExistent(interpreter, "interpreter program", suite.app)
def pathExistsRemotely(self, app, path, machine):
exitCode = self.runCommandOn(app, machine, [ "test", "-e", path ], collectExitCode=True)
return exitCode == 0
def checkConnection(self, app, machine):
self.runCommandAndCheckMachine(app, machine, [ "echo", "hello" ])
def handleNonExistent(self, path, desc, app):
message = "The " + desc + " '" + path + "' does not exist"
remoteCopy = app.getConfigValue("remote_copy_program")
if remoteCopy:
runMachine = app.getRunMachine()
if runMachine != "localhost":
if not self.pathExistsRemotely(app, path, runMachine):
self.checkConnection(app, runMachine) # throws if we can't get to it
raise plugins.TextTestError, message + ", either locally or on machine '" + runMachine + "'."
else:
raise plugins.TextTestError, message + "."
def getRemoteTmpDirectory(self, app):
remoteCopy = app.getConfigValue("remote_copy_program")
if remoteCopy:
runMachine = app.getRunMachine()
if runMachine != "localhost":
return runMachine, "${HOME}/.texttest/tmp/" + os.path.basename(app.writeDirectory)
return "localhost", None
def getRemoteTestTmpDir(self, test):
machine, appTmpDir = self.getRemoteTmpDirectory(test.app)
if appTmpDir:
return machine, os.path.join(appTmpDir, test.app.name + test.app.versionSuffix(), test.getRelPath())
else:
return machine, appTmpDir
def hasChanged(self, var, value):
return os.getenv(var) != value
def executableShouldBeFile(self, app, executable):
if os.path.isabs(executable):
return True
# If it's part of the data it will be available as a relative path anyway
if executable in app.getDataFileNames():
return False
# For finding java classes, don't warn if they don't exist as files...
if executable.endswith(".jar"):
return False
interpreters = app.getConfigValue("interpreters").values()
return all(("java" not in i and "jython" not in i for i in interpreters))
def checkConfigSanity(self, app):
for key in app.getConfigValue("collate_file"):
if "." in key or "/" in key:
raise plugins.TextTestError, "Cannot collate files to stem '" + key + "' - '.' and '/' characters are not allowed"
definitionFileStems = app.defFileStems()
definitionFileStems += [ stem + "." + app.name for stem in definitionFileStems ]
for dataFileName in app.getDataFileNames():
if dataFileName in definitionFileStems:
raise plugins.TextTestError, "Cannot name data files '" + dataFileName + \
"' - this name is reserved by TextTest for a particular kind of definition file.\n" + \
"Please adjust the naming in your config file."
def getGivenCheckoutPath(self, app):
if self.optionMap.has_key("c"):
extraVersions, extraCheckouts = self.getCheckoutExtraVersions(app)
for versionName, checkout in zip(extraVersions, extraCheckouts):
if versionName in app.versions:
return checkout
checkout = self.getCheckout(app)
return self.expandCheckout(checkout, app)
def expandCheckout(self, checkout, app):
if os.path.isabs(checkout):
return os.path.normpath(checkout)
checkoutLocations = app.getCompositeConfigValue("checkout_location", checkout, expandVars=False)
if len(checkoutLocations) > 0:
return self.makeAbsoluteCheckout(checkoutLocations, checkout, app)
else:
return checkout
def getCheckout(self, app):
if self.optionMap.has_key("c"):
return plugins.commasplit(self.optionMap["c"])[0]
# Under some circumstances infer checkout from batch session
batchSession = self.getBatchSession(app)
if batchSession and batchSession != "default" and \
app.getConfigValue("checkout_location").has_key(batchSession):
return batchSession
else:
return app.getConfigValue("default_checkout")
def makeAbsoluteCheckout(self, locations, checkout, app):
isSpecific = app.getConfigValue("checkout_location").has_key(checkout)
for location in locations:
fullCheckout = self.absCheckout(location, checkout, isSpecific)
if os.path.isdir(fullCheckout):
return fullCheckout
return self.absCheckout(locations[0], checkout, isSpecific)
def absCheckout(self, location, checkout, isSpecific):
locationWithName = Template(location).safe_substitute(TEXTTEST_CHECKOUT_NAME=checkout)
fullLocation = os.path.normpath(os.path.expanduser(os.path.expandvars(locationWithName)))
if isSpecific or "TEXTTEST_CHECKOUT_NAME" in location:
return fullLocation
else:
# old-style: infer expansion in default checkout
return os.path.join(fullLocation, checkout)
def recomputeProgress(self, test, state, observers):
if state.isComplete():
if state.hasResults():
state.recalculateStdFiles(test)
fileFilter = rundependent.FilterResultRecompute()
fileFilter(test)
state.recalculateComparisons(test)
newState = state.makeNewState(test, "recalculated")
test.changeState(newState)
else:
collator = test.app.getTestCollator()
collator.tryFetchRemoteFiles(test)
fileFilter = rundependent.FilterProgressRecompute()
fileFilter(test)
comparator = self.getTestComparator()
comparator.recomputeProgress(test, state, observers)
def getRunDescription(self, test):
return RunTest().getRunDescription(test)
def expandExternalEnvironment(self):
return True
# For display in the GUI
def extraReadFiles(self, testArg):
return {}
def printHelpScripts(self):
pass
def printHelpDescription(self):
print "The " + self.__class__.__module__ + " configuration is a published configuration. Consult the online documentation."
def printHelpOptions(self):
pass
def printHelpText(self):
self.printHelpDescription()
print "\nAdditional Command line options supported :"
print "-------------------------------------------"
self.printHelpOptions()
print "\nPython scripts: (as given to -s <module>.<class> [args])"
print "--------------------------------------------------------"
self.printHelpScripts()
def getDefaultMailAddress(self):
user = os.getenv("USER", "$USER")
return user + "@localhost"
def getDefaultTestOverviewColours(self):
colours = {}
for wkday in plugins.weekdays:
colours["run_" + wkday + "_fg"] = "black"
colours["column_header_bg"] = "gray1"
colours["changes_header_bg"] = "#E2E2FF"
colours["row_header_bg"] = "#FFFFCC"
colours["performance_fg"] = "red6"
colours["memory_bg"] = "pink"
colours["success_bg"] = "#CEEFBD"
colours["failure_bg"] = "#FF3118"
colours["knownbug_bg"] = "#FF9900"
colours["incomplete_bg"] = "#8B1A1A"
colours["no_results_bg"] = "gray2"
colours["performance_bg"] = "#FFC6A5"
colours["test_default_fg"] = "black"
return colours
def getDefaultPageName(self, app):
pageName = app.fullName()
fullVersion = app.getFullVersion()
if fullVersion:
pageName += " - version " + fullVersion
return pageName
def getDefaultCollectCompulsoryVersions(self):
return { "default" : [] }
def setBatchDefaults(self, app):
# Batch values. Maps from session name to values
app.setConfigDefault("smtp_server", "localhost", "Server to use for sending mail in batch mode")
app.setConfigDefault("smtp_server_username", "", "Username for SMTP authentication when sending mail in batch mode")
app.setConfigDefault("smtp_server_password", "", "Password for SMTP authentication when sending mail in batch mode")
app.setConfigDefault("batch_result_repository", { "default" : "" }, "Directory to store historical batch results under")
app.setConfigDefault("file_to_url", {}, "Mapping of file locations to URLS, for linking to HTML reports")
app.setConfigDefault("historical_report_location", { "default" : "" }, "Directory to create reports on historical batch data under")
app.setConfigDefault("historical_report_page_name", { "default" : self.getDefaultPageName(app) }, "Header for page on which this application should appear")
app.setConfigDefault("historical_report_colours", self.getDefaultTestOverviewColours(), "Colours to use for historical batch HTML reports")
app.setConfigDefault("historical_report_subpages", { "default" : [ "Last six runs" ]}, "Names of subselection pages to generate as part of historical report")
app.setConfigDefault("historical_report_subpage_cutoff", { "default" : 100000, "Last six runs" : 6 }, "How many runs should the subpage show, starting from the most recent?")
app.setConfigDefault("historical_report_subpage_weekdays", { "default" : [] }, "Which weekdays should the subpage apply to (empty implies all)?")
app.setConfigDefault("historical_report_resource_pages", { "default": [ "" ] }, "Which performance/memory pages should be generated by default on running -coll")
app.setConfigDefault("historical_report_resource_page_tables", { "default": []}, "Resource names to generate the tables for the relevant performance/memory pages")
app.setConfigDefault("historical_report_piechart_summary", { "default": "false" }, "Generate pie chart summary page rather than default HTML tables.")
app.setConfigDefault("batch_sender", { "default" : self.getDefaultMailAddress() }, "Sender address to use sending mail in batch mode")
app.setConfigDefault("batch_recipients", { "default" : "" }, "Comma-separated addresses to send mail to in batch mode")
app.setConfigDefault("batch_timelimit", { "default" : "" }, "Maximum length of test to include in batch mode runs")
app.setConfigDefault("batch_filter_file", { "default" : [] }, "Generic filter for batch session, more flexible than timelimit")
app.setConfigDefault("batch_use_collection", { "default" : "false" }, "Do we collect multiple mails into one in batch mode")
app.setConfigDefault("batch_junit_format", { "default" : "false" }, "Do we write out results in junit format in batch mode")
app.setConfigDefault("batch_junit_folder", { "default" : "" }, "Which folder to write test results in junit format in batch mode. Only useful together with batch_junit_format")
app.setConfigDefault("batch_collect_max_age_days", { "default" : 100000 }, "When collecting multiple messages, what is the maximum age of run that we should accept?")
app.setConfigDefault("batch_collect_compulsory_version", self.getDefaultCollectCompulsoryVersions(), "When collecting multiple messages, which versions should be expected and give an error if not present?")
app.setConfigDefault("batch_mail_on_failure_only", { "default" : "false" }, "Send mails only if at least one test fails")
app.setConfigDefault("batch_use_version_filtering", { "default" : "false" }, "Which batch sessions use the version filtering mechanism")
app.setConfigDefault("batch_version", { "default" : [] }, "List of versions to allow if batch_use_version_filtering enabled")
app.setConfigAlias("testoverview_colours", "historical_report_colours")
def setPerformanceDefaults(self, app):
# Performance values
app.setConfigDefault("cputime_include_system_time", 0, "Include system time when measuring CPU time?")
app.setConfigDefault("default_performance_stem", "performance", "Which performance statistic to use when selecting tests by performance, placing performance in Junit XML reports etc")
app.setConfigDefault("performance_logfile", { "default" : [] }, "Which result file to collect performance data from")
app.setConfigDefault("performance_logfile_extractor", {}, "What string to look for when collecting performance data")
app.setConfigDefault("performance_test_machine", { "default" : [], "*mem*" : [ "any" ] }, \
"List of machines where performance can be collected")
app.setConfigDefault("performance_variation_%", { "default" : 10.0 }, "How much variation in performance is allowed")
app.setConfigDefault("performance_variation_serious_%", { "default" : 0.0 }, "Additional cutoff to performance_variation_% for extra highlighting")
app.setConfigDefault("use_normalised_percentage_change", { "default" : "true" }, \
"Do we interpret performance percentage changes as normalised (symmetric) values?")
app.setConfigDefault("performance_test_minimum", { "default" : 0.0 }, \
"Minimum time/memory to be consumed before data is collected")
app.setConfigDefault("performance_descriptor_decrease", self.defaultPerfDecreaseDescriptors(), "Descriptions to be used when the numbers decrease in a performance file")
app.setConfigDefault("performance_descriptor_increase", self.defaultPerfIncreaseDescriptors(), "Descriptions to be used when the numbers increase in a performance file")
app.setConfigDefault("performance_unit", self.defaultPerfUnits(), "Name to be used to identify the units in a performance file")
app.setConfigDefault("performance_ignore_improvements", { "default" : "false" }, "Should we ignore all improvements in performance?")
app.setConfigAlias("performance_use_normalised_%", "use_normalised_percentage_change")
app.setConfigAlias("batch_junit_performance", "default_performance_stem")
def setUsecaseDefaults(self, app):
app.setConfigDefault("use_case_record_mode", "disabled", "Mode for Use-case recording (GUI, console or disabled)")
app.setConfigDefault("use_case_recorder", "", "Which Use-case recorder is being used")
app.setConfigDefault("virtual_display_machine", [ "localhost" ], \
"(UNIX) List of machines to run virtual display server (Xvfb) on")
app.setConfigDefault("virtual_display_count", 1, \
"(UNIX) Number of virtual display server (Xvfb) instances to run, if enabled")
app.setConfigDefault("virtual_display_extra_args", "", \
"(UNIX) Extra arguments (e.g. bitdepth) to supply to virtual display server (Xvfb)")
app.setConfigDefault("virtual_display_hide_windows", "true", "(Windows) Whether to emulate the virtual display handling on Windows by hiding the SUT's windows")
def defaultPerfUnits(self):
units = {}
units["default"] = "seconds"
units["*mem*"] = "MB"
return units
def defaultPerfDecreaseDescriptors(self):
descriptors = {}
descriptors["default"] = ""
descriptors["memory"] = "smaller, memory-, used less memory"
descriptors["cputime"] = "faster, faster, ran faster"
return descriptors
def defaultPerfIncreaseDescriptors(self):
descriptors = {}
descriptors["default"] = ""
descriptors["memory"] = "larger, memory+, used more memory"
descriptors["cputime"] = "slower, slower, ran slower"
return descriptors
def defaultSeverities(self):
severities = {}
severities["errors"] = 1
severities["output"] = 1
severities["stderr"] = 1
severities["stdout"] = 1
severities["usecase"] = 1
severities["performance"] = 2
severities["catalogue"] = 2
severities["default"] = 99
return severities
def defaultDisplayPriorities(self):
prios = {}
prios["default"] = 99
return prios
def getDefaultCollations(self):
if os.name == "posix":
return { "stacktrace" : [ "core*" ] }
else:
return { "" : [] }
def getDefaultCollateScripts(self):
if os.name == "posix":
return { "default" : [], "stacktrace" : [ "interpretcore.py" ] }
else:
return { "default" : [] }
def getStdoutName(self, namingScheme):
if namingScheme == "classic":
return "output"
else:
return "stdout"
def getStderrName(self, namingScheme):
if namingScheme == "classic":
return "errors"
else:
return "stderr"
def getStdinName(self, namingScheme):
if namingScheme == "classic":
return "input"
else:
return "stdin"
def setComparisonDefaults(self, app, homeOS, namingScheme):
app.setConfigDefault("log_file", self.getStdoutName(namingScheme), "Result file to search, by default")
app.setConfigDefault("failure_severity", self.defaultSeverities(), \
"Mapping of result files to how serious diffs in them are")
app.setConfigDefault("failure_display_priority", self.defaultDisplayPriorities(), \
"Mapping of result files to which order they should be shown in the text info window.")
app.setConfigDefault("floating_point_tolerance", { "default" : 0.0 }, "Which tolerance to apply when comparing floating point values in output")
app.setConfigDefault("relative_float_tolerance", { "default" : 0.0 }, "Which relative tolerance to apply when comparing floating point values")
app.setConfigDefault("collate_file", self.getDefaultCollations(), "Mapping of result file names to paths to collect them from")
app.setConfigDefault("collate_script", self.getDefaultCollateScripts(), "Mapping of result file names to scripts which turn them into suitable text")
trafficText = "Deprecated. Use CaptureMock."
app.setConfigDefault("collect_traffic", { "default": [], "asynchronous": [] }, trafficText)
app.setConfigDefault("collect_traffic_environment", { "default" : [] }, trafficText)
app.setConfigDefault("collect_traffic_python", [], trafficText)
app.setConfigDefault("collect_traffic_python_ignore_callers", [], trafficText)
app.setConfigDefault("collect_traffic_use_threads", "true", trafficText)
app.setConfigDefault("collect_traffic_client_server", "false", trafficText)
app.setConfigDefault("run_dependent_text", { "default" : [] }, "Mapping of patterns to remove from result files")
app.setConfigDefault("unordered_text", { "default" : [] }, "Mapping of patterns to extract and sort from result files")
app.setConfigDefault("file_split_pattern", {}, "Pattern to use for splitting result files")
app.setConfigDefault("create_catalogues", "false", "Do we create a listing of files created/removed by tests")
app.setConfigDefault("catalogue_process_string", "", "String for catalogue functionality to identify processes created")
app.setConfigDefault("binary_file", [], "Which output files are known to be binary, and hence should not be shown/diffed?")
app.setConfigDefault("discard_file", [], "List of generated result files which should not be compared")
app.setConfigDefault("discard_file_text", { "default" : [] }, "List of generated result files which should not be compared if they contain the given patterns")
rectrafficValue = self.optionIntValue("rectraffic")
if rectrafficValue == 1:
# Re-record everything. Don't use this when only recording additional new stuff
# Should possibly have some way to configure this
app.addConfigEntry("implied", "rectraffic", "base_version")
if self.isRecording():
app.addConfigEntry("implied", "recusecase", "base_version")
if homeOS != "any" and homeOS != os.name:
app.addConfigEntry("implied", os.name, "base_version")
app.setConfigAlias("collect_traffic_py_module", "collect_traffic_python")
def defaultViewProgram(self, homeOS):
if os.name == "posix":
return "emacs"
else:
if homeOS == "posix":
# Notepad cannot handle UNIX line-endings: for cross platform suites use wordpad by default...
return "wordpad"
else:
return "notepad"
def defaultFollowProgram(self):
if os.name == "posix":
return "xterm -bg white -T $TEXTTEST_FOLLOW_FILE_TITLE -e tail -f"
else:
return "baretail"
def setExternalToolDefaults(self, app, homeOS):
app.setConfigDefault("text_diff_program", "diff", \
"External program to use for textual comparison of files")
app.setConfigDefault("lines_of_text_difference", 30, "How many lines to present in textual previews of file diffs")
app.setConfigDefault("max_width_text_difference", 500, "How wide lines can be in textual previews of file diffs")
app.setConfigDefault("max_file_size", { "default": "-1" }, "The maximum file size to load into external programs, in bytes. -1 means no limit.")
app.setConfigDefault("text_diff_program_filters", { "default" : [], "diff" : [ "^<", "^>" ]}, "Filters that should be applied for particular diff tools to aid with grouping in dynamic GUI")
app.setConfigDefault("diff_program", { "default": "tkdiff" }, "External program to use for graphical file comparison")
app.setConfigDefault("view_program", { "default": self.defaultViewProgram(homeOS) }, \
"External program(s) to use for viewing and editing text files")
app.setConfigDefault("view_file_on_remote_machine", { "default" : 0 }, "Do we try to start viewing programs on the test execution machine?")
app.setConfigDefault("follow_program", { "default": self.defaultFollowProgram() }, "External program to use for following progress of a file")
app.setConfigDefault("follow_file_by_default", 0, "When double-clicking running files, should we follow progress or just view them?")
app.setConfigDefault("bug_system_location", {}, "The location of the bug system we wish to extract failure information from.")
app.setConfigDefault("bug_system_username", {}, "Username to use when logging in to bug systems defined in bug_system_location")
app.setConfigDefault("bug_system_password", {}, "Password to use when logging in to bug systems defined in bug_system_location")
app.setConfigDefault("batch_jenkins_marked_artefacts", { "default": [] }, "Artefacts to highlight in the report when they are updated")
app.setConfigDefault("batch_jenkins_archive_file_pattern", { "default": "" }, "Path to the built files in the archive, in case Jenkins fingerprints need double-checking")
app.setConfigAlias("text_diff_program_max_file_size", "max_file_size")
def setInterfaceDefaults(self, app):
app.setConfigDefault("default_interface", "static_gui", "Which interface to start if none of -con, -g and -gx are provided")
# These configure the GUI but tend to have sensible defaults per application
app.setConfigDefault("gui_entry_overrides", { "default" : "<not set>" }, "Default settings for entries in the GUI")
app.setConfigDefault("gui_entry_options", { "default" : [] }, "Default drop-down box options for GUI entries")
app.setConfigDefault("suppress_stderr_text", [], "List of patterns which, if written on TextTest's own stderr, should not be propagated to popups and further logfiles")
app.setConfigAlias("suppress_stderr_popup", "suppress_stderr_text")
def getDefaultRemoteProgramOptions(self):
# The aim is to ensure they never hang, but always return errors if contact not possible
# Disable warnings, they get in the way of output
# Disable passwords: only use public key based authentication.
# Also disable hostkey checking, we assume we don't run tests on untrusted hosts.
# Also don't run tests on machines which take a very long time to connect to...
sshOptions = "-o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=10"
rsyncExcludeFile = plugins.installationPath("etc", "rsync_exclude_patterns")
return { "default": "", "ssh" : "-q " + sshOptions,
"rsync" : "-e 'ssh " + sshOptions + "' -av --copy-unsafe-links --exclude-from=" + rsyncExcludeFile, "scp": "-Crp " + sshOptions }
def getCommandArgsOn(self, app, machine, cmdArgs, graphical=False, agentForwarding=False):
if machine == "localhost":
return cmdArgs
else:
args = self.getRemoteProgramArgs(app, "remote_shell_program")
if graphical and args[0] == "ssh":
args.append("-Y")
if agentForwarding and args[0] == "ssh":
args.append("-A")
args.append(machine)
if graphical and args[0] == "rsh":
args += [ "env", "DISPLAY=" + self.getFullDisplay() ]
args += cmdArgs
if graphical:
remoteTmp = app.getRemoteTmpDirectory()[1]
if remoteTmp:
args[-1] = args[-1].replace(app.writeDirectory, remoteTmp)
for i in range(len(args)):
# Remote shells cause spaces etc to be interpreted several times
args[i] = args[i].replace(" ", "\ ")
return args
def getFullDisplay(self):
display = os.getenv("DISPLAY", "")
hostaddr = plugins.gethostname()
if display.startswith(":"):
return hostaddr + display
else:
return display.replace("localhost", hostaddr)
def runCommandOn(self, app, machine, cmdArgs, collectExitCode=False):
allArgs = self.getCommandArgsOn(app, machine, cmdArgs)
if allArgs[0] == "rsh" and collectExitCode:
searchStr = "remote cmd succeeded"
# Funny tricks here because rsh does not forward the exit status of the program it runs
allArgs += [ "&&", "echo", searchStr ]
diag = logging.getLogger("remote commands")
proc = subprocess.Popen(allArgs, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0]
diag.info("Running remote command " + repr(allArgs) + ", output was:\n" + output)
return searchStr not in output # Return an "exit code" which is 0 when we succeed!
else:
return subprocess.call(allArgs, stdin=open(os.devnull), stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
def runCommandAndCheckMachine(self, app, machine, cmdArgs):
allArgs = self.getCommandArgsOn(app, machine, cmdArgs)
proc = subprocess.Popen(allArgs, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0]
exitCode = proc.returncode
if exitCode > 0:
raise plugins.TextTestError, "Unable to contact machine '" + machine + \
"'.\nMake sure you have passwordless access set up correctly. The failing command was:\n" + \
" ".join(allArgs) + "\n\nThe command produced the following output:\n" + output.strip()
def ensureRemoteDirExists(self, app, machine, *dirnames):
quotedDirs = map(plugins.quote, dirnames)
self.runCommandAndCheckMachine(app, machine, [ "mkdir", "-p" ] + quotedDirs)
@staticmethod
def getRemotePath(fileName, machine):
if machine == "localhost":
# Right now the only way we can run remote execution on a Windows system is using Cygwin
# Remote copy programs like 'scp' assume that colons separate hostnames and so don't work
# on classic Windows paths.
# Assume for now that we can convert it to a Cygwin path.
drive, tail = os.path.splitdrive(fileName)
if drive:
cygwinDrive = '/cygdrive/' + drive[0].lower()
return cygwinDrive + tail
else:
return fileName
else:
return machine + ":" + plugins.quote(fileName)
def copyFileRemotely(self, app, srcFile, srcMachine, dstFile, dstMachine):
srcPath = self.getRemotePath(srcFile, srcMachine)
dstPath = self.getRemotePath(dstFile, dstMachine)
args = self.getRemoteProgramArgs(app, "remote_copy_program") + [ srcPath, dstPath ]
return subprocess.call(args, stdin=open(os.devnull), stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
def getRemoteProgramArgs(self, app, setting):
progStr = app.getConfigValue(setting)
progArgs = plugins.splitcmd(progStr)
argStr = app.getCompositeConfigValue("remote_program_options", progArgs[0])
return progArgs + plugins.splitcmd(argStr)
def setMiscDefaults(self, app, namingScheme):
app.setConfigDefault("default_texttest_tmp", "$TEXTTEST_PERSONAL_CONFIG/tmp", "Default value for $TEXTTEST_TMP, if it is not set")
app.setConfigDefault("default_texttest_local_tmp", "", "Default value for $TEXTTEST_LOCAL_TMP, if it is not set")
app.setConfigDefault("checkout_location", { "default" : []}, "Absolute paths to look for checkouts under")
app.setConfigDefault("default_checkout", "", "Default checkout, relative to the checkout location")
app.setConfigDefault("remote_shell_program", "ssh", "Program to use for running commands remotely")
app.setConfigDefault("remote_program_options", self.getDefaultRemoteProgramOptions(), "Default options to use for particular remote shell programs")
app.setConfigDefault("remote_copy_program", "", "(UNIX) Program to use for copying files remotely, in case of non-shared file systems")
app.setConfigDefault("default_filter_file", [], "Filter file to use by default, generally only useful for versions")
app.setConfigDefault("test_data_environment", {}, "Environment variables to be redirected for linked/copied test data")
app.setConfigDefault("filter_file_directory", [ "filter_files" ], "Default directories for test filter files, relative to an application directory.")
app.setConfigDefault("extra_version", [], "Versions to be run in addition to the one specified")
app.setConfigDefault("batch_extra_version", { "default" : [] }, "Versions to be run in addition to the one specified, for particular batch sessions")
app.setConfigDefault("save_filtered_file_stems", [], "Files where the filtered version should be saved rather than the SUT output")
# Applies to any interface...
app.setConfigDefault("auto_sort_test_suites", 0, "Automatically sort test suites in alphabetical order. 1 means sort in ascending order, -1 means sort in descending order.")
app.setConfigDefault("extra_test_process_postfix", [], "Postfixes to use on ordinary files to denote an additional run of the SUT to be triggered")
app.addConfigEntry("builtin", "options", "definition_file_stems")
app.addConfigEntry("regenerate", "usecase", "definition_file_stems")
app.addConfigEntry("builtin", self.getStdinName(namingScheme), "definition_file_stems")
app.addConfigEntry("builtin", "knownbugs", "definition_file_stems")
app.setConfigAlias("test_list_files_directory", "filter_file_directory")
def setApplicationDefaults(self, app):
homeOS = app.getConfigValue("home_operating_system")
namingScheme = app.getConfigValue("filename_convention_scheme")
self.setComparisonDefaults(app, homeOS, namingScheme)
self.setExternalToolDefaults(app, homeOS)
self.setInterfaceDefaults(app)
self.setMiscDefaults(app, namingScheme)
self.setBatchDefaults(app)
self.setPerformanceDefaults(app)
self.setUsecaseDefaults(app)
def setDependentConfigDefaults(self, app):
# For setting up configuration where the config file needs to have been read first
# Should return True if it does anything that could cause new config files to be found
interpreters = app.getConfigValue("interpreters").values()
if any(("python" in i or "storytext" in i for i in interpreters)):
app.addConfigEntry("default", "testcustomize.py", "definition_file_stems")
extraPostfixes = app.getConfigValue("extra_test_process_postfix")
for interpreterName in app.getConfigValue("interpreters").keys():
stem = interpreterName + "_options"
app.addConfigEntry("builtin", stem, "definition_file_stems")
for postfix in extraPostfixes:
app.addConfigEntry("builtin", stem + postfix, "definition_file_stems")
namingScheme = app.getConfigValue("filename_convention_scheme")
for postfix in extraPostfixes:
app.addConfigEntry("builtin", "options" + postfix, "definition_file_stems")
app.addConfigEntry("regenerate", "usecase" + postfix, "definition_file_stems")
app.addConfigEntry("builtin", self.getStdinName(namingScheme) + postfix, "definition_file_stems")
if app.getConfigValue("use_case_record_mode") == "GUI" and \
app.getConfigValue("use_case_recorder") in [ "", "storytext" ] and \
not any(("usecase" in k for k in app.getConfigValue("view_program"))):
app.addConfigEntry("*usecase*", "storytext_editor", "view_program")
return False
class SaveState(plugins.Responder):
def notifyComplete(self, test):
if test.state.isComplete(): # might look weird but this notification also comes in scripts etc.
test.saveState()
class OrFilter(plugins.Filter):
def __init__(self, filterLists):
self.filterLists = filterLists
def accepts(self, test):
return reduce(operator.or_, (test.isAcceptedByAll(filters, checkContents=False) for filters in self.filterLists), False)
def acceptsTestCase(self, test):
return self.accepts(test)
def acceptsTestSuite(self, suite):
return self.accepts(suite)
def acceptsTestSuiteContents(self, suite):
return reduce(operator.or_, (self.contentsAccepted(suite, filters) for filters in self.filterLists), False)
def contentsAccepted(self, suite, filters):
return reduce(operator.and_, (filter.acceptsTestSuiteContents(suite) for filter in filters), True)
class NotFilter(plugins.Filter):
def __init__(self, filters):
self.filters = filters
def acceptsTestCase(self, test):
return not test.isAcceptedByAll(self.filters)
class TestNameFilter(plugins.TextFilter):
option = "t"
def acceptsTestCase(self, test):
return self.stringContainsText(test.name)
class TestRelPathFilter(plugins.TextFilter):
option = "ts"
def parseInput(self, filterText, *args):
# Handle paths pasted from web page
return [ text.replace(" ", "/") for text in plugins.commasplit(filterText) ]
def acceptsTestCase(self, test):
return self.stringContainsText(test.getRelPath())
class GrepFilter(plugins.TextFilter):
def __init__(self, filterText, fileStem, useTmpFiles=False):
plugins.TextFilter.__init__(self, filterText)
self.fileStem = fileStem
self.useTmpFiles = useTmpFiles
def acceptsTestCase(self, test):
if self.fileStem == "free_text":
return self.stringContainsText(test.state.freeText)
for logFile in self.findAllFiles(test):
if self.matches(logFile):
return True
return False
def findAllFiles(self, test):
if self.useTmpFiles:
files = []
try:
for comparison in test.state.allResults:
if comparison.tmpFile and fnmatch(comparison.stem, self.fileStem) and os.path.isfile(comparison.tmpFile):
files.append(comparison.tmpFile)
return files
except AttributeError:
return []
else:
return self.findAllStdFiles(test)
def findAllStdFiles(self, test):
logFiles = []
for fileName in test.getFileNamesMatching(self.fileStem):
if os.path.isfile(fileName):
logFiles.append(fileName)
else:
test.refreshFiles()
return self.findAllStdFiles(test)
return logFiles
def matches(self, logFile):
for line in open(logFile).xreadlines():
if self.stringContainsText(line):
return True
return False
class TestDescriptionFilter(plugins.TextFilter):
option = "desc"
def acceptsTestCase(self, test):
return self.stringContainsText(test.description)
| 52.876982 | 316 | 0.651193 | 82,914 | 0.994328 | 0 | 0 | 707 | 0.008479 | 0 | 0 | 24,008 | 0.287911 |
dfc3450cc6a455bca7329de3130cbc552b8baa62 | 747 | py | Python | 2019/10 October/dp10302019.py | vishrutkmr7/DailyPracticeProblemsDIP | d1bfbc75f2024736c22c05385f753a90ddcfa0f5 | [
"MIT"
] | 5 | 2019-08-06T02:34:41.000Z | 2022-01-08T03:03:16.000Z | 2019/10 October/dp10302019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
] | 15 | 2021-06-01T14:04:16.000Z | 2022-03-08T21:17:22.000Z | 2019/10 October/dp10302019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
] | 4 | 2019-09-19T20:00:05.000Z | 2021-08-16T11:31:51.000Z | # This problem was recently asked by LinkedIn:
# Given a non-empty array where each element represents a digit of a non-negative integer, add one to the integer.
# The most significant digit is at the front of the array and each element in the array contains only one digit.
# Furthermore, the integer does not have leading zeros, except in the case of the number '0'.
class Solution:
def plusOne(self, digits):
# Fill this in.
num = ""
for i in range(0, len(digits)):
num = num + str(digits[i])
sol = int(num) + 1
sol = list(str(sol))
for j in range(0, len(sol)):
sol[j] = int(sol[j])
return sol
num = [2, 9, 9]
print(Solution().plusOne(num))
# [3, 0, 0]
| 28.730769 | 114 | 0.619813 | 313 | 0.419009 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.526104 |
dfc40c993839966190091cb6ae4333cb9d7b2cc3 | 1,122 | py | Python | kbr/run_utils.py | brugger/kbr-tools | 95c8f8274e28b986e7fd91c8404026433488c940 | [
"MIT"
] | 1 | 2021-02-02T09:47:40.000Z | 2021-02-02T09:47:40.000Z | kbr/run_utils.py | brugger/kbr-tools | 95c8f8274e28b986e7fd91c8404026433488c940 | [
"MIT"
] | 1 | 2021-08-04T13:00:00.000Z | 2021-08-04T13:00:00.000Z | kbr/run_utils.py | brugger/kbr-tools | 95c8f8274e28b986e7fd91c8404026433488c940 | [
"MIT"
] | null | null | null | import subprocess
import sys
import os
class ExecutionInfo:
def __init__(self, p_status: int, stdout: str, stderr: str):
self.p_status = p_status
self.stdout = stdout
self.stderr = stderr
def exit_fail(msg: str = "") -> None:
print(msg)
sys.exit(-1)
def exit_ok(msg: str = "") -> None:
print(msg)
sys.exit(0)
def launch_cmd(cmd: str, cwd: str = "", use_shell_env:bool=False) -> ExecutionInfo:
effective_command = cmd
d = None
if use_shell_env:
d = dict(os.environ)
if cwd == '':
p = subprocess.Popen(effective_command, stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, env=d)
else:
p = subprocess.Popen(effective_command, stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, cwd=cwd, env=d)
stdout, stderr = p.communicate()
p_status = p.wait()
return ExecutionInfo(p_status, stdout, stderr)
def print_outputs(e:ExecutionInfo) -> None:
if e.stdout != b'':
print(e.stdout.decode('utf-8').rstrip("\n"))
if e.stderr != b'':
print(e.stderr.decode('utf-8').rstrip("\n"))
| 23.375 | 123 | 0.632799 | 176 | 0.156863 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.032086 |
dfc40d4989f8ef494b36888ba91588827d76ffc5 | 2,614 | py | Python | tests/client/test_files.py | philopon/datapane | d7d69865d4def0cbe6eb334acd9edeb829dd67e6 | [
"Apache-2.0"
] | 481 | 2020-04-25T05:40:21.000Z | 2022-03-30T22:04:35.000Z | tests/client/test_files.py | tig/datapane | defae6776e73b07191c0a5804a50b284ec3c9a63 | [
"Apache-2.0"
] | 74 | 2020-04-28T10:47:35.000Z | 2022-03-14T15:50:55.000Z | tests/client/test_files.py | admariner/datapane | c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f | [
"Apache-2.0"
] | 41 | 2020-07-21T16:30:21.000Z | 2022-02-21T22:50:27.000Z | from pathlib import Path
import altair as alt
import folium
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as p_go
import pytest
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from pandas.io.formats.style import Styler
from datapane.client.api.files import save
data = pd.DataFrame({"x": np.random.randn(20), "y": np.random.randn(20)})
def test_save_base(tmp_path: Path, monkeypatch):
# absolute filename tests
# test with no filename
save(data)
save(data)
# relative filename tests
monkeypatch.chdir(tmp_path)
save(data)
def test_save_matplotlib(tmp_path: Path):
pd.set_option("plotting.backend", "matplotlib")
fig, ax = plt.subplots()
data.plot.scatter("x", "y", ax=ax)
# test svg default
save(fig)
# test save axes only
save(ax)
# test save ndarray
save(data.hist())
def test_save_bokeh(tmp_path: Path):
source = ColumnDataSource(data)
p = figure()
p.circle(x="x", y="y", source=source)
f = save(p)
assert f.mime == "application/vnd.bokeh.show+json"
def test_save_bokeh_layout(tmp_path: Path):
source = ColumnDataSource(data)
p = figure()
p.circle(x="x", y="y", source=source)
f = save(column(p, p))
assert f.mime == "application/vnd.bokeh.show+json"
def test_save_altair(tmp_path: Path):
plot = alt.Chart(data).mark_bar().encode(y="y", x="x")
save(plot)
def test_save_folium(tmp_path: Path):
map = folium.Map(location=[45.372, -121.6972], zoom_start=12, tiles="Stamen Terrain")
save(map)
def test_save_plotly(tmp_path: Path):
fig = p_go.Figure()
fig.add_trace(p_go.Scatter(x=[0, 1, 2, 3, 4, 5], y=[1.5, 1, 1.3, 0.7, 0.8, 0.9]))
save(fig)
# NOTE - test disabled until pip release of altair_pandas - however should work if altair test passes
@pytest.mark.skip(reason="altair_pandas not yet supported")
def test_save_altair_pandas(tmp_path: Path):
pd.set_option("plotting.backend", "altair") # Installing altair_pandas registers this.
plot = data.plot.scatter("x", "y")
save(plot)
# NOTE - test disabled updated pip release of pdvega that tracks git upstream - however should work if altair test passes
@pytest.mark.skip(reason="pdvega not yet supported")
def test_save_pdvega(tmp_path: Path):
import pdvega # noqa: F401
plot = data.vgplot.scatter("x", "y")
save(plot)
def test_save_table(tmp_path: Path):
# tests saving a DF directly to a html file
save(data)
# save styled table
save(Styler(data))
| 26.948454 | 121 | 0.694721 | 0 | 0 | 0 | 0 | 429 | 0.164116 | 0 | 0 | 708 | 0.270849 |
dfc53823fb3adccd40e9762c665f5bb3deecbf27 | 95 | py | Python | instance/config.py | antomuli/News_Highlight | 9feb33c0a32fa78cd93f5ab2c74942a8ca281701 | [
"Unlicense"
] | 2 | 2020-03-23T23:16:51.000Z | 2020-04-26T21:15:11.000Z | instance/config.py | antomuli/News_Highlight | 9feb33c0a32fa78cd93f5ab2c74942a8ca281701 | [
"Unlicense"
] | null | null | null | instance/config.py | antomuli/News_Highlight | 9feb33c0a32fa78cd93f5ab2c74942a8ca281701 | [
"Unlicense"
] | null | null | null | NEWS_API_KEY= '138b22df68394ecbaa9c9af0d0377adb'
SECRET_KEY= 'f9bf78b9a18ce6d46a0cd2b0b86df9da' | 47.5 | 48 | 0.905263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.715789 |
dfc5ea1ec35f681b24bc22174c17b45b8de95235 | 1,417 | py | Python | twirp/logging.py | batchar2/twirpy | e5940a2a038926844098def09748953287071747 | [
"Unlicense"
] | 51 | 2020-05-23T22:31:53.000Z | 2022-03-08T19:14:04.000Z | twirp/logging.py | batchar2/twirpy | e5940a2a038926844098def09748953287071747 | [
"Unlicense"
] | 20 | 2020-05-15T10:20:38.000Z | 2022-02-06T23:21:56.000Z | twirp/logging.py | batchar2/twirpy | e5940a2a038926844098def09748953287071747 | [
"Unlicense"
] | 10 | 2020-05-29T09:55:49.000Z | 2021-10-16T00:14:04.000Z | import os
import logging
import sys
import structlog
from structlog.stdlib import LoggerFactory, add_log_level
_configured = False
def configure(force = False):
"""
Configures logging & structlog modules
Keyword Arguments:
force: Force to reconfigure logging.
"""
global _configured
if _configured and not force:
return
# Check whether debug flag is set
debug = os.environ.get('DEBUG_MODE', False)
# Set appropriate log level
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# Set logging config
logging.basicConfig(
level = log_level,
format = "%(message)s",
)
# Configure structlog
structlog.configure(
logger_factory = LoggerFactory(),
processors = [
add_log_level,
# Add timestamp
structlog.processors.TimeStamper('iso'),
# Add stack information
structlog.processors.StackInfoRenderer(),
# Set exception field using exec info
structlog.processors.format_exc_info,
# Render event_dict as JSON
structlog.processors.JSONRenderer()
]
)
_configured = True
def get_logger(**kwargs):
"""
Get the structlog logger
"""
# Configure logging modules
configure()
# Return structlog
return structlog.get_logger(**kwargs)
| 22.140625 | 57 | 0.628088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.308398 |
dfc68640fe94c25498745f6373d4a8f15e6f9a5f | 878 | py | Python | setup.py | Arkq/pyexec | ec90b0aaff80996155d033bd722ff59c9259460e | [
"MIT"
] | null | null | null | setup.py | Arkq/pyexec | ec90b0aaff80996155d033bd722ff59c9259460e | [
"MIT"
] | null | null | null | setup.py | Arkq/pyexec | ec90b0aaff80996155d033bd722ff59c9259460e | [
"MIT"
] | null | null | null | # setup.py
# Copyright (c) 2015-2017 Arkadiusz Bokowy
#
# This file is a part of pyexec.
#
# This project is licensed under the terms of the MIT license.
from setuptools import setup
import pyexec
with open("README.rst") as f:
long_description = f.read()
setup(
name="pyexec",
version=pyexec.__version__,
author="Arkadiusz Bokowy",
author_email="[email protected]",
url="https://github.com/Arkq/pyexec",
description="Signal-triggered process reloader",
long_description=long_description,
license="MIT",
py_modules=["pyexec"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| 25.085714 | 62 | 0.654897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.575171 |
dfc68f46a49d56c4e9d2e4ea5761354ae3746b5b | 323 | py | Python | tests/func_eqconstr.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | 1 | 2022-03-16T16:37:11.000Z | 2022-03-16T16:37:11.000Z | tests/func_eqconstr.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | tests/func_eqconstr.py | andyjost/Sprite | 7ecd6fc7d48d7f62da644e48c12c7b882e1a2929 | [
"MIT"
] | null | null | null | '''Functional tests for the equational constraint.'''
import cytest # from ./lib; must be first
class TestEqConstr(cytest.FunctionalTestCase):
SOURCE_DIR = 'data/curry/eqconstr/'
PRINT_SKIPPED_GOALS = True
# RUN_ONLY = ['a0_.[02468][048]', 'a0b0c0_.[037][048]', 'prog0.'] # A subset for quick checks.
# SKIP = []
| 32.3 | 96 | 0.693498 | 224 | 0.693498 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.640867 |
dfc7144f2268699316911b76b5597b6509452a54 | 4,898 | py | Python | data-sources/kbr/authority-persons-marc-to-csv.py | kbrbe/beltrans-data-integration | 951ae3941b22a6fe0a8d30079bdf6f4f0a55f092 | [
"MIT"
] | null | null | null | data-sources/kbr/authority-persons-marc-to-csv.py | kbrbe/beltrans-data-integration | 951ae3941b22a6fe0a8d30079bdf6f4f0a55f092 | [
"MIT"
] | 21 | 2022-02-14T10:58:52.000Z | 2022-03-28T14:04:40.000Z | data-sources/kbr/authority-persons-marc-to-csv.py | kbrbe/beltrans-data-integration | 951ae3941b22a6fe0a8d30079bdf6f4f0a55f092 | [
"MIT"
] | null | null | null | #
# (c) 2022 Sven Lieber
# KBR Brussels
#
#import xml.etree.ElementTree as ET
import lxml.etree as ET
import os
import json
import itertools
import enchant
import hashlib
import csv
from optparse import OptionParser
import utils
import stdnum
NS_MARCSLIM = 'http://www.loc.gov/MARC21/slim'
ALL_NS = {'marc': NS_MARCSLIM}
# -----------------------------------------------------------------------------
def addAuthorityFieldsToCSV(elem, writer, natWriter, stats):
"""This function extracts authority relevant data from the given XML element 'elem' and writes it to the given CSV file writer."""
#
# extract relevant data from the current record
#
authorityID = utils.getElementValue(elem.find('./marc:controlfield[@tag="001"]', ALL_NS))
namePerson = utils.getElementValue(elem.find('./marc:datafield[@tag="100"]/marc:subfield[@code="a"]', ALL_NS))
nameOrg = utils.getElementValue(elem.find('./marc:datafield[@tag="110"]/marc:subfield[@code="a"]', ALL_NS))
nationalities = utils.getElementValue(elem.findall('./marc:datafield[@tag="370"]/marc:subfield[@code="c"]', ALL_NS))
gender = utils.getElementValue(elem.find('./marc:datafield[@tag="375"]/marc:subfield[@code="a"]', ALL_NS))
birthDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="f"]', ALL_NS))
deathDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="g"]', ALL_NS))
isniRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and (text()="isni" or text()="ISNI")]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
viafRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and text()="viaf"]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
countryCode = utils.getElementValue(elem.find('./marc:datafield[@tag="043"]/marc:subfield[@code="c"]', ALL_NS))
(familyName, givenName) = utils.extractNameComponents(namePerson)
birthDate = ''
deathDate = ''
datePatterns = ['%Y', '(%Y)', '[%Y]', '%Y-%m-%d', '%d/%m/%Y', '%Y%m%d']
if birthDateRaw:
birthDate = utils.parseDate(birthDateRaw, datePatterns)
if deathDateRaw:
deathDate = utils.parseDate(deathDateRaw, datePatterns)
name = f'{namePerson} {nameOrg}'.strip()
if nationalities:
nationalityURIString = utils.createURIString(nationalities, ';', 'http://id.loc.gov/vocabulary/countries/')
for n in nationalityURIString.split(';'):
natWriter.writerow({'authorityID': authorityID, 'nationality': n})
newRecord = {
'authorityID': authorityID,
'name': name,
'family_name': familyName,
'given_name': givenName,
'gender': gender,
'birth_date': birthDate,
'death_date': deathDate,
'isni_id': utils.extractIdentifier(authorityID, f'ISNI {isniRaw}', pattern='ISNI'),
'viaf_id': utils.extractIdentifier(authorityID, f'VIAF {viafRaw}', pattern='VIAF'),
'country_code': countryCode
}
writer.writerow(newRecord)
# -----------------------------------------------------------------------------
def main():
"""This script reads an XML file in MARC slim format and extracts several fields to create a CSV file."""
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-i', '--input-file', action='store', help='The input file containing MARC SLIM XML records')
parser.add_option('-o', '--output-file', action='store', help='The output CSV file containing selected MARC fields')
parser.add_option('-n', '--nationality-csv', action='store', help='The output CSV file containing the IDs of authorities and their nationality')
(options, args) = parser.parse_args()
#
# Check if we got all required arguments
#
if( (not options.input_file) or (not options.output_file) or (not options.nationality_csv) ):
parser.print_help()
exit(1)
#
# Instead of loading everything to main memory, stream over the XML using iterparse
#
with open(options.output_file, 'w') as outFile, \
open(options.nationality_csv, 'w') as natFile:
stats = {}
outputFields = ['authorityID', 'name', 'family_name', 'given_name', 'gender', 'birth_date', 'death_date', 'isni_id', 'viaf_id', 'country_code']
outputWriter = csv.DictWriter(outFile, fieldnames=outputFields, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
outputWriter.writeheader()
nationalityWriter = csv.DictWriter(natFile, fieldnames=['authorityID', 'nationality'], delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
nationalityWriter.writeheader()
for event, elem in ET.iterparse(options.input_file, events=('start', 'end')):
# The parser finished reading one authority record, get information and then discard the record
if event == 'end' and elem.tag == ET.QName(NS_MARCSLIM, 'record'):
addAuthorityFieldsToCSV(elem, outputWriter, nationalityWriter, stats)
main()
| 43.732143 | 186 | 0.682115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,175 | 0.444059 |
dfc7e5a8bbc57e53f20590d631fe2b87c31a1671 | 3,886 | py | Python | promoterz/evaluationPool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | null | null | null | promoterz/evaluationPool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | null | null | null | promoterz/evaluationPool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | 1 | 2021-11-29T20:18:25.000Z | 2021-11-29T20:18:25.000Z | #!/bin/python
import time
import random
from multiprocessing import Pool, Process, Pipe, TimeoutError
from multiprocessing.pool import ThreadPool
class EvaluationPool():
def __init__(self, EvaluationTool, Urls, poolsize):
self.EvaluationTool = EvaluationTool
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [5 for x in Urls]
def ejectURL(self, Index):
self.Urls.pop(Index)
self.lasttimes.pop(Index)
self.lasttimesperind.pop(Index)
self.poolsizes.pop(Index)
def evaluateBackend(self, DateRange, I, inds):
stime = time.time()
Q = [ (DateRange, ind, self.Urls[I]) for ind in inds ]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.EvaluationTool, Q )
P.close()
P.join()
delta_time=time.time()-stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [ind for ind in locale.population\
if not ind.fitness.valid]
props=self.distributeIndividuals(individues_to_simulate)
args = [ [locale.DateRange, I, props[I]]\
for I in range(len(self.Urls))]
pool = ThreadPool(len(self.Urls))
results=[]
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
TimedOut=[]
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A] if self.lasttimesperind[A] else 12
timeout = perindTime*len(props[A]) if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead, et al
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(*args[A])
TimedOut.append(A)
pool.join()
for PoolIndex in range(len(results)):
for i, fit in zip(range(len(results[PoolIndex][0])), results[PoolIndex][0]):
props[PoolIndex][i].fitness.values = fit
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] = self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert(all(F))
for T in TimedOut:
self.ejectURL(T)
return len(individues_to_simulate)
def distributeIndividuals(self, tosimulation):
nb_simulate = len(tosimulation)
sumtimes = sum(self.lasttimes)
#stdtime = sum(self.lasttimes)/len(self.lasttimes)
std = nb_simulate/len(self.Urls)
#stdTPI = sum(self.lasttimesperind)/len(self.lasttimesperind)
#print(stdTPI)
if sumtimes:
vels = [ 1/x for x in self.lasttimes ]
constant = nb_simulate/sum(vels)
proportions = [ max(1, x*constant) for x in vels ]
else:
proportions = [std for x in self.Urls]
proportions = [int(round(x)) for x in proportions]
pC = lambda x:random.randrange(0,len(x))
pB = lambda x: x.index(min(x))
pM = lambda x: x.index(max(x))
while sum(proportions) < nb_simulate:
proportions[pB(proportions)] +=1
print('+')
while sum(proportions) > nb_simulate:
proportions[pM(proportions)] -=1
print('-')
print(proportions)
assert(sum(proportions) == nb_simulate)
distribution = []
L=0
for P in proportions:
distribution.append(tosimulation[L:L+P])
L=L+P
return distribution
| 33.213675 | 97 | 0.587494 | 3,737 | 0.961657 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.068708 |
dfc9bea7af7becf02c3cd0e4f00d6640fee9f247 | 3,001 | py | Python | website/drawquest/apps/following/models.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 19 | 2015-11-10T17:36:20.000Z | 2021-04-12T07:36:00.000Z | website/drawquest/apps/following/models.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 1 | 2021-06-09T03:45:34.000Z | 2021-06-09T03:45:34.000Z | website/drawquest/apps/following/models.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 6 | 2015-11-11T00:38:38.000Z | 2020-07-25T20:10:08.000Z | from canvas.cache_patterns import CachedCall
from drawquest import knobs
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.drawquest_auth.details_models import UserDetails
from drawquest.pagination import FakePaginator
def _sorted(users):
return sorted(users, key=lambda user: user.username.lower())
def _for_viewer(users, viewer=None):
if viewer is None or not viewer.is_authenticated():
return users
following = [int(id_) for id_ in viewer.redis.new_following.zrange(0, -1)]
for user in users:
user.viewer_is_following = user.id in following
return users
def _paginate(redis_obj, offset, request=None):
'''
items should already start at the proper offset.
'''
if offset == 'top':
items = redis_obj.zrevrange(0, knobs.FOLLOWERS_PER_PAGE, withscores=True)
else:
items = redis_obj.zrevrangebyscore('({}'.format(offset), '-inf',
start=0,
num=knobs.FOLLOWERS_PER_PAGE,
withscores=True)
try:
next_offset = items[-1][1]
next_offset = next_offset.__repr__()
except IndexError:
next_offset = None
items = [item for item, ts in items]
pagination = FakePaginator(items, offset=offset, next_offset=next_offset)
return items, pagination
def followers(user, viewer=None, offset='top', direction='next', request=None):
""" The users who are following `user`. """
if direction != 'next':
raise ValueError("Follwers only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_followers.zrevrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_followers, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
def following(user, viewer=None, offset='top', direction='next', request=None):
""" The users that `user` is following. """
if direction != 'next':
raise ValueError("Following only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_following.zrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_following, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
def counts(user):
return {
'followers': user.redis.new_followers.zcard(),
'following': user.redis.new_following.zcard(),
}
| 34.102273 | 92 | 0.65978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.11896 |
dfcaf8188821bfe0448579c92b86161cf07a8cb5 | 3,674 | py | Python | Python 3/PyGame/Matrix_based_3D/entities.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | Python 3/PyGame/Matrix_based_3D/entities.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | null | null | null | Python 3/PyGame/Matrix_based_3D/entities.py | DarkShadow4/python | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | [
"MIT"
] | 1 | 2020-08-19T17:25:22.000Z | 2020-08-19T17:25:22.000Z | import numpy as np
def translationMatrix(dx=0, dy=0, dz=0):
return np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[dx, dy, dz, 1]])
def scalingMatrix(sx=1, sy=1, sz=1):
return np.array([[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]])
def rotateXmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
def rotateYmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[ c, 0, s, 0],
[ 0, 1, 0, 0],
[-s, 0, c, 0],
[ 0, 0, 0, 1]])
def rotateZmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[c, -s, 0 ,0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
class Entity(object):
"""docstring for Entity."""
def __init__(self, name="", type="", node_color=(0, 0, 0), edge_color=(255, 255, 255), node_radius=4):
super(Entity, self).__init__()
self.name = name
self.type = type
self.nodes = np.zeros((0, 4))
self.node_color = node_color
self.edge_color = edge_color
self.node_radius = node_radius
self.edges = []
####
self.initial_nodes = np.zeros((0, 4))
self.totalTransformations = {
"T":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RX":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RY":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RZ":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"S":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ]
}
####
def addNodes(self, nodes):
ones = np.ones((len(nodes), 1))
nodes = np.hstack((nodes, ones))
####
self.initial_nodes = np.vstack((self.initial_nodes, nodes))
self.nodes = np.dot(self.initial_nodes, self.totalTransformations["RY"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RX"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RZ"])
self.nodes = np.dot(self.nodes, self.totalTransformations["T"])
self.nodes = np.dot(self.nodes, self.totalTransformations["S"])
# centerX = sum(node[0] for node in self.nodes)/len(self.nodes)
# centerY = sum(node[1] for node in self.nodes)/len(self.nodes)
# centerZ = sum(node[2] for node in self.nodes)/len(self.nodes)
# self.center = (centerX, centerY, centerZ)
####
# self.nodes = np.vstack((self.nodes, nodes))
def addEdges(self, edges):
self.edges += edges
def transform(self, matrix, type):
self.totalTransformations[type] = np.dot(self.totalTransformations[type], matrix)
self.nodes = np.dot(self.initial_nodes, self.totalTransformations["RY"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RX"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RZ"])
self.nodes = np.dot(self.nodes, self.totalTransformations["T"])
self.nodes = np.dot(self.nodes, self.totalTransformations["S"])
| 33.099099 | 106 | 0.459717 | 2,604 | 0.708764 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.102885 |
dfcb6f77810c69d9413f60d8f54a4f595fd87395 | 65 | py | Python | pytreearray/multiply.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | 2 | 2021-10-02T20:29:44.000Z | 2021-10-02T20:38:28.000Z | pytreearray/multiply.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | 11 | 2021-10-01T09:15:06.000Z | 2022-03-21T09:19:23.000Z | pytreearray/multiply.py | PhilipVinc/netket_dynamics | 6e8009098c279271cb0f289ba9e85c039bb284e4 | [
"Apache-2.0"
] | null | null | null | from ._src.multiply import multiply_outer as outer # noqa: F401
| 32.5 | 64 | 0.784615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.184615 |
dfcda9e0f1ad0a543490dfbdc63f6f36b102ec00 | 1,258 | py | Python | setup.py | utix/django-json-api | 938f78f664a4ecbabb9e678595926d1a580f9d0c | [
"MIT"
] | 7 | 2021-02-26T14:35:17.000Z | 2021-02-26T21:21:58.000Z | setup.py | utix/django-json-api | 938f78f664a4ecbabb9e678595926d1a580f9d0c | [
"MIT"
] | 7 | 2021-02-26T14:44:30.000Z | 2021-06-02T14:27:17.000Z | setup.py | utix/django-json-api | 938f78f664a4ecbabb9e678595926d1a580f9d0c | [
"MIT"
] | 1 | 2021-02-26T20:10:42.000Z | 2021-02-26T20:10:42.000Z | #!/usr/bin/env python
from os.path import join
from setuptools import find_packages, setup
# DEPENDENCIES
def requirements_from_pip(filename):
with open(filename, "r") as pip:
return [line.strip() for line in pip if not line.startswith("#") and line.strip()]
core_deps = requirements_from_pip("requirements.txt")
dev_deps = requirements_from_pip("requirements_dev.txt")
# DESCRIPTION
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
author="Sharework",
author_email="[email protected]",
description="JSON API specification for Django services",
extras_require={"all": dev_deps, "dev": dev_deps},
install_requires=core_deps,
long_description=long_description,
long_description_content_type="text/markdown",
name="django-json-api",
package_data={"django_json_api": ["resources/VERSION"]},
packages=find_packages(),
python_requires=">=3.8",
url="https://github.com/share-work/django-json-api",
version=open(join("django_json_api", "resources", "VERSION")).read().strip(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.952381 | 90 | 0.692369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.375994 |
dfcf9bc6b50b9274d2e45ff7e0b6d1af9920cab0 | 1,632 | py | Python | youtube_dl/extractor/businessinsider.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 16 | 2020-12-01T15:26:58.000Z | 2022-02-24T23:12:14.000Z | youtube_dl/extractor/businessinsider.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 5 | 2021-02-20T10:30:00.000Z | 2021-06-01T21:12:31.000Z | youtube_dl/extractor/businessinsider.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 7 | 2020-12-01T15:27:04.000Z | 2022-01-09T23:21:53.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .jwplatform import JWPlatformIE
class BusinessInsiderIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?businessinsider\.(?:com|nl)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://uk.businessinsider.com/how-much-radiation-youre-exposed-to-in-everyday-life-2016-6',
'md5': 'ca237a53a8eb20b6dc5bd60564d4ab3e',
'info_dict': {
'id': 'hZRllCfw',
'ext': 'mp4',
'title': "Here's how much radiation you're exposed to in everyday life",
'description': 'md5:9a0d6e2c279948aadaa5e84d6d9b99bd',
'upload_date': '20170709',
'timestamp': 1499606400,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.businessinsider.nl/5-scientifically-proven-things-make-you-less-attractive-2017-7/',
'only_matching': True,
}, {
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
jwplatform_id = self._search_regex(
(r'data-media-id=["\']([a-zA-Z0-9]{8})',
r'id=["\']jwplayer_([a-zA-Z0-9]{8})',
r'id["\']?\s*:\s*["\']?([a-zA-Z0-9]{8})'),
webpage, 'jwplatform id')
return self.url_result(
'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
video_id=video_id)
| 37.953488 | 112 | 0.571078 | 1,501 | 0.91973 | 0 | 0 | 0 | 0 | 0 | 0 | 804 | 0.492647 |
dfcfb445e47c75ccbf0dd0f1527b09b9571a8702 | 578 | py | Python | map_house.py | renankalfa/python-0-ao-Data_Scientist | 2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88 | [
"MIT"
] | 1 | 2022-03-27T23:55:37.000Z | 2022-03-27T23:55:37.000Z | map_house.py | renankalfa/python-0-ao-Data_Scientist | 2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88 | [
"MIT"
] | null | null | null | map_house.py | renankalfa/python-0-ao-Data_Scientist | 2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88 | [
"MIT"
] | null | null | null | import plotly.express as px
import pandas as pd
data = pd.read_csv('kc_house_data.csv')
data_mapa = data[['id', 'lat', 'long', 'price']]
grafico1 = px.scatter_mapbox(data_mapa, lat='lat', lon='long',
hover_name='id', hover_data=['price'],
color_discrete_sequence=['fuchsia'],
zoom=3, height=300)
grafico1.update_layout(mapbox_style='open-street-map')
grafico1.update_layout(height=600, margin={'r': 0, 't': 0, 'l': 0, 'b': 0})
grafico1.show()
grafico1.write_html('map_house_rocket.html')
| 36.125 | 75 | 0.610727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.214533 |
dfcfba95af54686ffe34f16d2ea3725de4ec6aa5 | 1,561 | py | Python | scripts/api-timeboard.py | ryhennessy/hiring-engineers | f151fb593a016b38b92767ce48d217c3d57c492a | [
"Apache-2.0"
] | null | null | null | scripts/api-timeboard.py | ryhennessy/hiring-engineers | f151fb593a016b38b92767ce48d217c3d57c492a | [
"Apache-2.0"
] | null | null | null | scripts/api-timeboard.py | ryhennessy/hiring-engineers | f151fb593a016b38b92767ce48d217c3d57c492a | [
"Apache-2.0"
] | 1 | 2019-02-06T00:09:36.000Z | 2019-02-06T00:09:36.000Z | #!/usr/bin/python
from datadog import initialize, api
options = {
'api_key': '17370fa45ebc4a8184d3dde9f8189c38',
'app_key': 'b0d652bbd1d861656723c1a93bc1a2f22d493d57'
}
initialize(**options)
title = "Ryan Great Timeboard"
description = "My Timeboard that is super awesome"
graphs = [
{
"title": "My Metric over my host",
"definition": {
"requests": [
{
"q": "avg:my_metric{host:secondaryhost.hennvms.net}",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true",
"viz": "timeseries"
}
},
{
"title": "MySQL Anomaly Function Applied",
"definition": {
"viz": "timeseries",
"requests": [
{
"q": "anomalies(avg:mysql.performance.user_time{*}, 'basic', 2)",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true"
}
},
{
"title": "My Metric Rollup Function",
"definition": {
"viz": "query_value",
"requests": [
{
"q": "avg:my_metric{*}.rollup(sum, 60)",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true"
}
}]
api.Timeboard.create(title=title, description=description, graphs=graphs)
| 20.539474 | 73 | 0.547726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.584241 |
dfd10fcf278a06e3edb0f59aed0bddac1ebc200d | 732 | py | Python | Playground/Spin.py | fountainment/cherry-soda | 3dd0eb7d0b5503ba572ff2104990856ef7a87495 | [
"MIT"
] | 27 | 2020-01-16T08:20:54.000Z | 2022-03-29T20:40:15.000Z | Playground/Spin.py | fountainment/cherry-soda | 3dd0eb7d0b5503ba572ff2104990856ef7a87495 | [
"MIT"
] | 10 | 2022-01-07T14:07:27.000Z | 2022-03-19T18:13:44.000Z | Playground/Spin.py | fountainment/cherry-soda | 3dd0eb7d0b5503ba572ff2104990856ef7a87495 | [
"MIT"
] | 6 | 2019-12-27T10:04:07.000Z | 2021-12-15T17:29:24.000Z | import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_spin(radius_scale, height_scale, rounds):
xs, ys, zs = [], [], []
theta = 0.0
delta = 0.1
twopi = math.pi * 2.0
for i in range(int(rounds * twopi / delta)):
theta += delta
radius = theta / twopi * radius_scale
x = np.cos(theta) * radius
y = np.sin(theta) * radius
xs.append(x)
ys.append(y)
zs.append(theta / twopi * height_scale)
return xs, ys, zs
def main():
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(*get_spin(1.0, 3.0, 5.0))
ax.plot(*get_spin(1.05, 3.15, 5.0))
plt.show()
if __name__ == '__main__':
main()
| 23.612903 | 49 | 0.579235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013661 |
dfd2a91be88a84783b35bd946e501cc258160953 | 1,911 | py | Python | bin/get_latest_rotation.py | rhots/automation | cfa97656885f4ff91e1c79af5eb8fa38a85c35a8 | [
"0BSD"
] | 1 | 2017-06-06T03:07:01.000Z | 2017-06-06T03:07:01.000Z | bin/get_latest_rotation.py | rhots/automation | cfa97656885f4ff91e1c79af5eb8fa38a85c35a8 | [
"0BSD"
] | 3 | 2016-12-19T21:09:53.000Z | 2017-02-14T03:32:18.000Z | bin/get_latest_rotation.py | rhots/automation | cfa97656885f4ff91e1c79af5eb8fa38a85c35a8 | [
"0BSD"
] | null | null | null | import os.path
from bs4 import BeautifulSoup
import requests
# Location of file to store latest known page number
LAST_KNOWN_PAGE_FILE = "/tmp/rotation_checker_latest"
# URL of forum thread where latest rotations are posted
ROTATION_FORUM_THREAD = "https://us.battle.net/forums/en/heroes/topic/17936383460"
def write_last_known_page(page_num):
with open(LAST_KNOWN_PAGE_FILE, "w") as f:
f.write(str(page_num))
def read_last_known_page():
try:
with open(LAST_KNOWN_PAGE_FILE, "r") as f:
return int(f.read())
except OSError:
return 0
def is_404(html):
return "Page Not Found" in html
def load_page(page_num):
return requests.get(
ROTATION_FORUM_THREAD,
params={"page": page_num}
)
def load_latest_page(last_known_page=0):
if is_404(load_page(last_known_page+1).text):
return load_page(last_known_page)
else:
return load_latest_page(last_known_page+1)
def remove_slot_text(s):
if "Slot unlocked at" in s:
return s
return s.split(" (Slot unlocked at")[0]
def rotation_info_from_source(html):
soup = BeautifulSoup(html, 'html.parser')
latest_post_content = soup.select(".TopicPost-bodyContent")[-1]
header = latest_post_content.span.text
date = header.split("Rotation: ")[-1]
heroes = [remove_slot_text(li.text) for li in latest_post_content.find_all("li")]
return date, heroes
if __name__ == "__main__":
# read last known page number if we have it
last_known = read_last_known_page()
# load latest page, starting from last known page number
resp = load_latest_page(last_known)
# extract date and hero rotation
date, heroes = rotation_info_from_source(resp.text)
# write latest page number for future
page_num = int(resp.url.split("=")[-1])
write_last_known_page(page_num)
print(date)
print(heroes)
| 27.695652 | 85 | 0.697541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.259027 |
dfd2e5bbf8ec59072195c98d519d767f6b535cb9 | 2,485 | py | Python | fidelis/credentials.py | semperstew/fidelis | 8766b1bfa5bac342faf61bf4302a0e822d0a0ec9 | [
"Apache-2.0"
] | null | null | null | fidelis/credentials.py | semperstew/fidelis | 8766b1bfa5bac342faf61bf4302a0e822d0a0ec9 | [
"Apache-2.0"
] | null | null | null | fidelis/credentials.py | semperstew/fidelis | 8766b1bfa5bac342faf61bf4302a0e822d0a0ec9 | [
"Apache-2.0"
] | null | null | null | # fidelis/credentials.py
import datetime
import requests
import threading
from dateutil.tz import tzlocal
from collections import namedtuple
def _local_now():
return datetime.datetime.now(tzlocal())
class FidelisCredentials(object):
"""Object to hold authentication credentials"""
_default_token_timeout = 10 * 60
def __init__(self, username, password, baseURL, token=None, ignoressl=False):
self.baseURL = baseURL
self._username = username
self._password = password
self._token = token
self._ignoressl = ignoressl
self._time_fetcher = _local_now
self._expiration = self._time_fetcher()
self._refresh_lock = threading.Lock()
self.refresh()
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@baseURL
def baseURL(self):
return self.baseURL
@baseURL.setter
def baseURL(self, value):
self.baseURL = value
self._update_expiration()
def _refresh_needed(self, refresh_in=None):
"""Check if a token refresh is needed."""
if self._expiration is None:
return False
if refresh_in is None:
refresh_in = self._default_token_timeout
if self._seconds_remaining() >= refresh_in:
return False
return True
def _is_expired(self):
"""Check if token is expired"""
return self._refresh_needed(refresh_in=0)
def refresh(self, new_token=None):
if new_token is not None:
self._token = new_token
self._update_expiration()
if not self._is_expired():
return
else:
with self._refresh_lock:
self._protected_refresh()
def _protected_refresh(self):
"""Refresh bearer token"""
url= self.baseURL + 'authenticate'
body={'username': self._username, 'password': self._password}
headers={'Content-Type':'application/json'}
verify=self._ignoressl
r = requests.post(url=url, headers=headers, json=body, verify=verify)
self._token = r.data['token']
self._update_expiration()
def _seconds_remaining(self):
"""Calculate remaining seconds until token expiration"""
delta = self._expiration - self._time_fetcher()
return delta.total_seconds()
def _update_expiration(self):
delta = datetime.timedelta(seconds=self._default_token_timeout)
self._expiration = self._time_fetcher() + delta
def __call__(self, r):
self.refresh()
r.headers['Authorization'] = "bearer " + self._token
return r
| 25.10101 | 79 | 0.695775 | 2,277 | 0.916298 | 0 | 0 | 269 | 0.108249 | 0 | 0 | 322 | 0.129577 |
dfd45fc42c8fe07d08d4459f4ff51b022c580213 | 6,254 | py | Python | pos_wechat/tests/test_wechat_order.py | nahualventure/pos-addons | 3c911c28c259967fb74e311ddcc8e6ca032c005d | [
"MIT"
] | null | null | null | pos_wechat/tests/test_wechat_order.py | nahualventure/pos-addons | 3c911c28c259967fb74e311ddcc8e6ca032c005d | [
"MIT"
] | null | null | null | pos_wechat/tests/test_wechat_order.py | nahualventure/pos-addons | 3c911c28c259967fb74e311ddcc8e6ca032c005d | [
"MIT"
] | 3 | 2021-06-15T05:45:42.000Z | 2021-07-27T12:28:53.000Z | # Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
import logging
from odoo.addons.point_of_sale.tests.common import TestPointOfSaleCommon
try:
from unittest.mock import patch
except ImportError:
from mock import patch
_logger = logging.getLogger(__name__)
DUMMY_AUTH_CODE = "134579302432164181"
DUMMY_POS_ID = 1
class TestWeChatOrder(TestPointOfSaleCommon):
at_install = True
post_install = True
def setUp(self):
super(TestWeChatOrder, self).setUp()
# create wechat journals
self.pos_config.init_pos_wechat_journals()
self.Order = self.env["wechat.order"]
self.Refund = self.env["wechat.refund"]
self.product1 = self.env["product.product"].create({"name": "Product1"})
self.product2 = self.env["product.product"].create({"name": "Product2"})
def _patch_post(self, post_result):
def post(url, data):
self.assertIn(url, post_result)
_logger.debug("Request data for %s: %s", url, data)
return post_result[url]
# patch wechat
patcher = patch("wechatpy.pay.base.BaseWeChatPayAPI._post", wraps=post)
patcher.start()
self.addCleanup(patcher.stop)
def _create_pos_order(self):
def compute_tax(product, price, qty=1, taxes=None):
if taxes is None:
taxes = product.taxes_id.filtered(
lambda t: t.company_id.id == self.env.user.id
)
currency = self.pos_config.pricelist_id.currency_id
res = taxes.compute_all(price, currency, qty, product=product)
untax = res["total_excluded"]
return untax, sum(tax.get("amount", 0.0) for tax in res["taxes"])
# I click on create a new session button
self.pos_config.open_session_cb()
# I create a PoS order with 2 units of PCSC234 at 450 EUR
# and 3 units of PCSC349 at 300 EUR.
untax1, atax1 = compute_tax(self.product3, 450, 2)
untax2, atax2 = compute_tax(self.product4, 300, 3)
order = self.PosOrder.create(
{
"company_id": self.company_id,
"pricelist_id": self.partner1.property_product_pricelist.id,
"partner_id": self.partner1.id,
"lines": [
(
0,
0,
{
"name": "OL/0001",
"product_id": self.product3.id,
"price_unit": 450,
"discount": 0.0,
"qty": 2.0,
"tax_ids": [(6, 0, self.product3.taxes_id.ids)],
"price_subtotal": untax1,
"price_subtotal_incl": untax1 + atax1,
},
),
(
0,
0,
{
"name": "OL/0002",
"product_id": self.product4.id,
"price_unit": 300,
"discount": 0.0,
"qty": 3.0,
"tax_ids": [(6, 0, self.product4.taxes_id.ids)],
"price_subtotal": untax2,
"price_subtotal_incl": untax2 + atax2,
},
),
],
"amount_tax": atax1 + atax2,
"amount_total": untax1 + untax2 + atax1 + atax2,
"amount_paid": 0,
"amount_return": 0,
}
)
return order
def _create_wechat_order(self):
post_result = {
"pay/unifiedorder": {
"code_url": "weixin://wxpay/s/An4baqw",
"trade_type": "NATIVE",
"result_code": "SUCCESS",
}
}
self.lines = [
{
"product_id": self.product1.id,
"name": "Product 1 Name",
"quantity": 2,
"price": 450,
"category": "123456",
"description": "翻译服务器错误",
},
{
"product_id": self.product2.id,
"name": "Product 2 Name",
"quantity": 3,
"price": 300,
"category": "123456",
"description": "網路白目哈哈",
},
]
self._patch_post(post_result)
order, code_url = self.Order._create_qr(self.lines, total_fee=300)
self.assertEqual(order.state, "draft", "Just created order has wrong state")
return order
def test_refund(self):
# Order are not really equal because I'm lazy
# Just imagine that they are correspond each other
order = self._create_pos_order()
wechat_order = self._create_wechat_order()
order.wechat_order_id = wechat_order.id
# patch refund api request
post_result = {
"secapi/pay/refund": {"trade_type": "NATIVE", "result_code": "SUCCESS"}
}
self._patch_post(post_result)
# I create a refund
refund_action = order.refund()
refund = self.PosOrder.browse(refund_action["res_id"])
wechat_journal = self.env["account.journal"].search([("wechat", "=", "native")])
payment_context = {"active_ids": refund.ids, "active_id": refund.id}
refund_payment = self.PosMakePayment.with_context(**payment_context).create(
{
"amount": refund.amount_total,
"journal_id": wechat_journal.id,
"wechat_order_id": wechat_order.id,
}
)
# I click on the validate button to register the payment.
refund_payment.with_context(**payment_context).check()
self.assertEqual(refund.state, "paid", "The refund is not marked as paid")
self.assertEqual(
wechat_order.state,
"refunded",
"Wechat Order state is not changed after making refund payment",
)
| 35.942529 | 88 | 0.512312 | 5,874 | 0.93535 | 0 | 0 | 0 | 0 | 0 | 0 | 1,631 | 0.259713 |
dfd6670490ad28a09d2ea2ea84c8564b4b85c4b8 | 582 | py | Python | docker/settings.py | uw-it-aca/course-roster-lti | 599dad70e06bc85d3d862116c00e8ecf0e2e9c8c | [
"Apache-2.0"
] | null | null | null | docker/settings.py | uw-it-aca/course-roster-lti | 599dad70e06bc85d3d862116c00e8ecf0e2e9c8c | [
"Apache-2.0"
] | 53 | 2017-01-28T00:03:57.000Z | 2022-03-23T21:57:13.000Z | docker/settings.py | uw-it-aca/course-roster-lti | 599dad70e06bc85d3d862116c00e8ecf0e2e9c8c | [
"Apache-2.0"
] | null | null | null | from .base_settings import *
INSTALLED_APPS += [
'course_roster.apps.CourseRosterConfig',
'compressor',
]
COMPRESS_ROOT = '/static/'
COMPRESS_PRECOMPILERS = (('text/less', 'lessc {infile} {outfile}'),)
COMPRESS_OFFLINE = True
STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',)
if os.getenv('ENV', 'localdev') == 'localdev':
DEBUG = True
RESTCLIENTS_DAO_CACHE_CLASS = None
else:
RESTCLIENTS_DAO_CACHE_CLASS = 'course_roster.cache.IDCardPhotoCache'
COURSE_ROSTER_PER_PAGE = 50
IDCARD_PHOTO_EXPIRES = 60 * 60 * 2
IDCARD_TOKEN_EXPIRES = 60 * 60 * 2
| 26.454545 | 72 | 0.735395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.340206 |
dfd9ba7258d16727355e98f49f13d4ad3a7818cc | 606 | py | Python | pylons/decorators/util.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 118 | 2015-01-04T06:55:14.000Z | 2022-01-14T08:32:41.000Z | pylons/decorators/util.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 21 | 2015-01-03T02:16:28.000Z | 2021-03-24T06:10:57.000Z | pylons/decorators/util.py | KinSai1975/Menira.py | ca275ce244ee4804444e1827ba60010a55acc07c | [
"BSD-3-Clause"
] | 53 | 2015-01-04T03:21:08.000Z | 2021-08-04T20:52:01.000Z | """Decorator internal utilities"""
import pylons
from pylons.controllers import WSGIController
def get_pylons(decorator_args):
"""Return the `pylons` object: either the :mod`~pylons` module or
the :attr:`~WSGIController._py_object` equivalent, searching a
decorator's *args for the latter
:attr:`~WSGIController._py_object` is more efficient as it provides
direct access to the Pylons global variables.
"""
if decorator_args:
controller = decorator_args[0]
if isinstance(controller, WSGIController):
return controller._py_object
return pylons
| 31.894737 | 71 | 0.722772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.551155 |
dfdcfb5c19d0b0d911ba4d7178ec8d4e8e195552 | 1,191 | py | Python | blt_net/pcmad/config_pcmad.py | AlexD123123/BLT-net | 97a06795137e0d9fc9332bbb342ad3248db7dc37 | [
"Apache-2.0"
] | 1 | 2022-01-13T07:23:52.000Z | 2022-01-13T07:23:52.000Z | blt_net/pcmad/config_pcmad.py | AlexD123123/BLT-net | 97a06795137e0d9fc9332bbb342ad3248db7dc37 | [
"Apache-2.0"
] | null | null | null | blt_net/pcmad/config_pcmad.py | AlexD123123/BLT-net | 97a06795137e0d9fc9332bbb342ad3248db7dc37 | [
"Apache-2.0"
] | null | null | null | from yacs.config import CfgNode as CN
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
_C = CN()
_C.SYSTEM = CN()
_C.config_name = 'roi_config_default'
_C.img_shape = [1024, 2048]
_C.merge_grid_resolution = [20, 20]
_C.allowed_size_arr = [[[256, 128], [256, 256], [256, 384], [256, 512]],
[[384, 192], [384, 384], [384, 576], [384, 768]],
[[512, 256], [512, 512], [512, 768], [512, 1024]],
[[640, 320], [640, 640], [640, 960], [640, 1280]],
[[768, 384], [768, 768], [768, 1152], [768, 1536]]]
_C.scale_factor_arr = [1, 1.5, 2, 2.5, 3]
#01.01.2020
# _C.inital_padding_arr = [[20, 40], [20, 40], [40, 40], [50, 50], [60, 60]]
# _C.min_required_crop_padding_arr = [[30, 30], [30, 30], [40, 40], [50, 50], [60, 60]]
#28.03.2020
_C.inital_padding_arr = [[20, 40], [20, 40], [60, 60], [60, 60], [80, 80]]
_C.min_required_crop_padding_arr = [[30, 30], [30, 30], [60, 60], [60, 60], [80, 80]]
_C.proposals_min_conf = 0.01
| 29.775 | 87 | 0.565071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.314861 |
dfdddf5fadff57862aa0a7599f33c1b1aa1825cf | 1,188 | py | Python | src/pylendingclub/wrapper/order.py | bbarney213/PyLendingClub-Wrapper | 7f2ef823a56bc87b7f88abf86498805c2c9ce3d3 | [
"BSD-3-Clause"
] | 2 | 2018-06-06T20:04:04.000Z | 2018-06-13T12:17:37.000Z | src/pylendingclub/wrapper/order.py | bbarney213/PyLendingClub | 7f2ef823a56bc87b7f88abf86498805c2c9ce3d3 | [
"BSD-3-Clause"
] | 4 | 2018-10-18T13:59:14.000Z | 2018-10-23T16:08:51.000Z | src/pylendingclub/wrapper/order.py | bbarney213/PyLendingClub | 7f2ef823a56bc87b7f88abf86498805c2c9ce3d3 | [
"BSD-3-Clause"
] | null | null | null | class Order(object):
def order(self):
return {
'loanId': self._loan_id,
'requestedAmount': self._amount,
'portfolioId': self._portfolio
}
def __init__(self, loan_id, amount, portfolio):
self._loan_id = int(loan_id)
self._amount = amount
self._portfolio = portfolio
@classmethod
def from_dict(cls, input_dict):
return cls(input_dict.get('loanId'),
input_dict.get('requestedAmount'),
input_dict.get('portfolioId'))
class ConfirmedOrder():
# TODO : Surface properties of the notes being purchased
@property
def fulfilled(self):
return self._fulfilled
def data(self):
source = dict(self._source)
source['fulfilled'] = self.fulfilled
return source
def __init__(self, json):
self._source = json
self._loan_id = json.get('loanId')
self._requested_amount = json.get('requestedAmount')
self._invested_amount = json.get('investedAmount')
self._execution_status = json.get('executionStatus')
self._fulfilled = 'ORDER_FULFILLED' in self._execution_status
| 29.7 | 69 | 0.619529 | 1,184 | 0.996633 | 0 | 0 | 262 | 0.220539 | 0 | 0 | 218 | 0.183502 |
dfdf748f02b9b943852a62e3f8521187d01d62ea | 2,175 | py | Python | app/__init__.py | muthash/Weconnect-api | d3434c99b96a911258dfb8e3ff68696a2021a64b | [
"MIT"
] | 1 | 2018-03-15T17:08:11.000Z | 2018-03-15T17:08:11.000Z | app/__init__.py | muthash/Weconnect-api | d3434c99b96a911258dfb8e3ff68696a2021a64b | [
"MIT"
] | 1 | 2018-02-28T21:26:04.000Z | 2018-03-01T07:19:05.000Z | app/__init__.py | muthash/Weconnect-api | d3434c99b96a911258dfb8e3ff68696a2021a64b | [
"MIT"
] | 1 | 2018-03-09T03:45:22.000Z | 2018-03-09T03:45:22.000Z | """ The create_app function wraps the creation of a new Flask object, and
returns it after it's loaded up with configuration settings
using app.config
"""
from flask import jsonify
from flask_api import FlaskAPI
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
from flask_mail import Mail
from instance.config import app_config
db = SQLAlchemy()
jwt = JWTManager()
mail = Mail()
def create_app(config_name):
"""Function wraps the creation of a new Flask object, and returns it after it's
loaded up with configuration settings
"""
app = FlaskAPI(__name__, instance_relative_config=True)
cors = CORS(app)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
db.init_app(app)
jwt.init_app(app)
mail.init_app(app)
from app.auth.views import auth
from app.business.views import biz
from app.reviews.views import rev
from app.search.views import search
from app.models import BlacklistToken
@app.errorhandler(400)
def bad_request(error):
"""Error handler for a bad request"""
return jsonify(dict(error='The Server did not understand' +
'the request')), 400
@app.errorhandler(404)
def not_found(error):
"""Error handler for not found page"""
return jsonify(dict(error='The Resource is not available')), 404
@app.errorhandler(405)
def method_not_allowed(error):
"""Error handler for wrong method to an endpoint"""
return jsonify(dict(error='The HTTP request Method' +
' is not allowed')), 405
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
"""Check if token is blacklisted"""
jti = decrypted_token['jti']
blacklist = BlacklistToken.query.filter_by(token=jti).first()
if blacklist is None:
return False
return blacklist.revoked
app.register_blueprint(auth)
app.register_blueprint(biz)
app.register_blueprint(rev)
app.register_blueprint(search)
return app
| 31.521739 | 83 | 0.686897 | 0 | 0 | 0 | 0 | 948 | 0.435862 | 0 | 0 | 589 | 0.270805 |
dfdfaf2898c9221e7f4f486f5412b4e767f314f2 | 3,821 | py | Python | tests/test_constraints.py | jayvdb/versions | 951bc3fd99b6a675190f11ee0752af1d7ff5b440 | [
"MIT"
] | 3 | 2015-02-20T04:02:25.000Z | 2021-04-06T14:42:21.000Z | tests/test_constraints.py | jayvdb/versions | 951bc3fd99b6a675190f11ee0752af1d7ff5b440 | [
"MIT"
] | 1 | 2019-07-07T06:37:01.000Z | 2019-07-07T06:37:01.000Z | tests/test_constraints.py | jayvdb/versions | 951bc3fd99b6a675190f11ee0752af1d7ff5b440 | [
"MIT"
] | 2 | 2019-07-07T05:40:29.000Z | 2021-04-06T14:42:23.000Z | from unittest import TestCase
from versions.constraints import Constraints, merge, ExclusiveConstraints
from versions.constraint import Constraint
class TestConstraints(TestCase):
def test_match(self):
self.assertTrue(Constraints.parse('>1,<2').match('1.5'))
def test_match_in(self):
self.assertTrue('1.5' in Constraints.parse('>1,<2'))
def test_parse(self):
constraints = Constraints.parse('>1,<2')
def test_add(self):
self.assertEqual(Constraints() + '>1', Constraints.parse('>1'))
self.assertEqual(Constraints() + Constraints.parse('>1'),
Constraints.parse('>1'))
self.assertEqual(Constraints() + Constraint.parse('>1'),
Constraints.parse('>1'))
self.assertRaises(TypeError, Constraints().__add__, 42)
def test_iadd(self):
constraints = Constraints()
constraints += '>1'
self.assertEqual(constraints, Constraints.parse('>1'))
def test_str(self):
self.assertEqual(str(Constraints([Constraint.parse('>1'),
Constraint.parse('<2')])),
'>1.0.0,<2.0.0')
def test_repr(self):
self.assertEqual(repr(Constraints()), "Constraints()")
self.assertEqual(repr(Constraints.parse('==1')),
"Constraints.parse('==1.0.0')")
def test_eq_invalid_constraints_str(self):
self.assertFalse(Constraints() == '#@$!')
class TestMerge(TestCase):
def assertMerge(self, input, output):
self.assertEqual(merge(input), output)
def test_raises_ExclusiveConstraints(self):
self.assertRaises(ExclusiveConstraints, merge,
[Constraint.parse('==1'), Constraint.parse('==2')])
self.assertRaises(ExclusiveConstraints, merge,
[Constraint.parse('>2'), Constraint.parse('<1')])
self.assertRaises(ExclusiveConstraints, merge,
[Constraint.parse('>2'), Constraint.parse('<2')])
self.assertRaises(ExclusiveConstraints, merge,
[Constraint.parse('>2'), Constraint.parse('<=2')])
# the first 2 constraints will be merge into ==2,
# which conflicts with !=2
self.assertRaises(ExclusiveConstraints, merge,
[Constraint.parse('<=2'), Constraint.parse('>=2'),
Constraint.parse('!=2')])
def test(self):
constraints = [Constraint.parse('>1'), Constraint.parse('<2')]
self.assertMerge(constraints, constraints)
self.assertMerge([Constraint.parse('<2'), Constraint.parse('<3')],
[Constraint.parse('<2.0.0')])
self.assertMerge([Constraint.parse('<2'), Constraint.parse('>=1')],
[Constraint.parse('>=1.0.0'), Constraint.parse('<2.0.0')])
self.assertMerge([Constraint.parse('>=2'), Constraint.parse('>2')],
[Constraint.parse('>2.0.0')])
self.assertMerge([Constraint.parse('>1'), Constraint.parse('>=2')],
[Constraint.parse('>=2.0.0')])
self.assertMerge([Constraint.parse('<2'), Constraint.parse('<=1')],
[Constraint.parse('<=1.0.0')])
self.assertMerge([Constraint.parse('<=2'), Constraint.parse('<1')],
[Constraint.parse('<1.0.0')])
self.assertMerge([Constraint.parse('<=2'), Constraint.parse('>=2')],
[Constraint.parse('==2.0.0')])
# Negative constraints should not be omitted!
self.assertMerge([Constraint.parse('!=2'), Constraint.parse('!=1')],
sorted([Constraint.parse('!=1.0.0'),
Constraint.parse('!=2.0.0')]))
| 40.221053 | 83 | 0.559016 | 3,667 | 0.959696 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.12536 |
dfe01ac7f7adb258b0362ce750c15bb90b3ecb5f | 520 | py | Python | run.py | bayusetiawan01/poj | 9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a | [
"MIT"
] | 25 | 2016-02-26T17:35:19.000Z | 2021-08-17T10:30:14.000Z | run.py | bayusetiawan01/poj | 9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a | [
"MIT"
] | 5 | 2016-04-27T16:52:46.000Z | 2021-04-24T10:06:16.000Z | run.py | bayusetiawan01/poj | 9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a | [
"MIT"
] | 6 | 2016-04-27T16:50:13.000Z | 2021-04-03T06:27:41.000Z | import sys
import subprocess
if __name__ == "__main__":
try:
executable = sys.argv[1]
input_filename = sys.argv[2]
output_filename = sys.argv[3]
tl = sys.argv[4]
except IndexError:
sys.exit(-1)
input_file = open(input_filename, "r")
output_file = open(output_filename, "w")
returncode = subprocess.call(["timeout", tl, "./{}".format(executable)], stdin = input_file, stdout = output_file)
print(returncode)
input_file.close()
output_file.close()
| 27.368421 | 118 | 0.634615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.059615 |
dfe12be6330b0968db19fa6ffe0881fb8fa8099d | 1,224 | py | Python | typefactory/constraints/numeric.py | stevemccartney/typefactory | 75c9eb9eec9a7b9488db9cc0d06352f4fd1de1d9 | [
"Apache-2.0"
] | null | null | null | typefactory/constraints/numeric.py | stevemccartney/typefactory | 75c9eb9eec9a7b9488db9cc0d06352f4fd1de1d9 | [
"Apache-2.0"
] | null | null | null | typefactory/constraints/numeric.py | stevemccartney/typefactory | 75c9eb9eec9a7b9488db9cc0d06352f4fd1de1d9 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from numbers import Real
from typing import Optional
@dataclass
class MultipleOf:
param: Real
def __call__(self, value: Real) -> Optional[str]:
if value % self.param != 0:
return f"Value must be a multiple of {self.param}"
else:
return None
@dataclass
class Minimum:
param: Real
def __call__(self, value: Real) -> Optional[str]:
if value < self.param:
return f"Value must be >= {self.param}"
else:
return None
@dataclass
class ExclusiveMinimum:
param: Real
def __call__(self, value: Real) -> Optional[str]:
if value <= self.param:
return f"Value must be > {self.param}"
else:
return None
@dataclass
class Maximum:
param: Real
def __call__(self, value: Real) -> Optional[str]:
if value > self.param:
return f"Value must be <= {self.param}"
else:
return None
@dataclass
class ExclusiveMaximum:
param: Real
def __call__(self, value: Real) -> Optional[str]:
if value >= self.param:
return f"Value must be < {self.param}"
else:
return None
| 20.745763 | 62 | 0.586601 | 1,067 | 0.871732 | 0 | 0 | 1,122 | 0.916667 | 0 | 0 | 169 | 0.138072 |
dfe159b3edd0ee9b633d2a90e3ddecd214d799b8 | 4,580 | py | Python | Version Autonome/main.py | chiudidier/ProjetBloc5 | 214f401e5b35bc5894ecc3d20f338762b689f2ca | [
"CC0-1.0"
] | null | null | null | Version Autonome/main.py | chiudidier/ProjetBloc5 | 214f401e5b35bc5894ecc3d20f338762b689f2ca | [
"CC0-1.0"
] | null | null | null | Version Autonome/main.py | chiudidier/ProjetBloc5 | 214f401e5b35bc5894ecc3d20f338762b689f2ca | [
"CC0-1.0"
] | null | null | null | from taquin import *
from random import *
from math import *
#main
'''
old : ancienne façon de mélanger qui correspond à une manipulation du taquin. Plus à coder pour les élèves et pour faire des essais de profondeur
montxt='012345678'# position initiale = solution
montaquin=Taquin(montxt)# création du taquin
# mélange en realisant 15 coups aléatoires à partir de la position initiale pour garantir que la position obtenu soit bien solutionnable.
while montaquin.gagnant():
montaquin.melanger(15)
'''
continuer=True
while continuer:
'''
#old : ancienne façon de mélanger qui correspond à une manipulation du taquin. Plus à coder pour les élèves et pour faire des essais de profondeur
montxt='012345678'# position initiale = solution
montaquin=Taquin(montxt)# création du taquin
# mélange en realisant 15 coups aléatoires à partir de la position initiale pour garantir que la position obtenu soit bien solutionnable.
while montaquin.estGagnant():
montaquin.melanger(15)
'''
# création aléatoire du taquin initiale, n'utiliser qu'avec IDA
montxt=random_init('012345678')# position initiale créé à partir d'une position aléatoire mais dont la solvabilité est vérifiable
montaquin=Taquin(montxt)# création du taquin
print(montaquin)
# valeur arbitrairement choisie : une valeur plus grande donnera des taquins plus difficiles
if nbcoup(montxt) > 8 :
print('dsl nous ne pouvont pas résoudre se taquin en un temps raisonable')
else:
while not montaquin.estGagnant():# boucle principale du jeu. Sort qaund le taquin est rangé
chemin=[]
'''
#version BFS
# attention ne pas utiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
reste=bfs(montaquin.etat)# calcul la profondeur minimum de la solution
print(reste,' mouvements au moins pour terminer.')# affiche l'aide
#fin version BFS
'''
'''
#version DLS=BFS+DFS
# attention ne pas utuiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
reste=bfs(montaquin.etat)# calcul la profondeur minimum de la solution
dls(reste,montaquin.etat,0,chemin) #version DLS = DFS + BFS # attention ne pas utuiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
#fin version DLS
'''
'''
#version IDS = itération d'IDS
# attention ne pas utiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
#ids(montaquin.etat,chemin)
#fin version IDS
'''
'''
#version IDA calcule la profondeur minimum de la solution, les paramètres ne sont pas indispensables mais améliorent la lisibilité du code
'''
ida(montaquin.etat,chemin)
# cette partie est utilisable pour les version IDS, DFS et IDA
print('solution = ', chemin)#affichage des differents etats de la solution
print('nb coup à la solution',len(chemin))
nextmove=chemin.pop()
nexttaquin=Taquin(nextmove)
print('meilleur coup suivant :')
print(comparetaquins(montaquin,nexttaquin))#affichage du prochain coup
#fin de la partie solution
# enregistrement du coup du joueur
move=input('\n que voulez vous jouer (h,b,d,g): ')# demande le coup à jouer et applique le mouvement
if move=='h':
montaquin.haut()
elif move=='b':
montaquin.bas()
elif move=='d':
montaquin.droite()
elif move=='g':
montaquin.gauche()
print(montaquin)
# fin du coup du joueur
print('Bravo vous avez gagné !')
reponse=input('Voulez vous recommencer ? o/n : ')
if reponse == 'n':
continuer=False
print('Au revoir')
| 24.491979 | 224 | 0.606332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,372 | 0.726411 |
dfe1a99576a2d37093ebe1a9717fd7144a854d6e | 1,870 | py | Python | bindings/python/test/test_objects.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 5 | 2020-05-11T02:22:05.000Z | 2022-02-02T15:26:35.000Z | bindings/python/test/test_objects.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 6 | 2020-01-05T20:18:18.000Z | 2021-10-14T09:36:44.000Z | bindings/python/test/test_objects.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 2 | 2020-03-05T18:18:13.000Z | 2020-04-07T17:42:24.000Z | ################################################################################################################################################################
# @project Open Space Toolkit ▸ Mathematics
# @file bindings/python/test/test_objects.py
# @author Lucas Brémond <[email protected]>
# @license Apache License 2.0
################################################################################################################################################################
import ostk.mathematics as mathematics
################################################################################################################################################################
Angle = mathematics.geometry.Angle
Point2d = mathematics.geometry.d2.objects.Point
Polygon2d = mathematics.geometry.d2.objects.Polygon
Transformation2d = mathematics.geometry.d2.Transformation
Point3d = mathematics.geometry.d3.objects.Point
PointSet3d = mathematics.geometry.d3.objects.PointSet
Line3d = mathematics.geometry.d3.objects.Line
Ray3d = mathematics.geometry.d3.objects.Ray
Segment3d = mathematics.geometry.d3.objects.Segment
Plane = mathematics.geometry.d3.objects.Plane
Polygon3d = mathematics.geometry.d3.objects.Polygon
Cuboid = mathematics.geometry.d3.objects.Cuboid
Sphere = mathematics.geometry.d3.objects.Sphere
Ellipsoid = mathematics.geometry.d3.objects.Ellipsoid
Transformation3d = mathematics.geometry.d3.Transformation
Quaternion = mathematics.geometry.d3.transformations.rotations.Quaternion
RotationVector = mathematics.geometry.d3.transformations.rotations.RotationVector
RotationMatrix = mathematics.geometry.d3.transformations.rotations.RotationMatrix
################################################################################################################################################################
| 53.428571 | 160 | 0.535294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 838 | 0.447411 |
dfe2bc23e1d5c4fe435748a54d9f2a2f9b030af5 | 1,005 | py | Python | twemoji/mkqrc.py | ericosur/myqt | e96f77f99442c44e51a1dbe1ee93edfa09b3db0f | [
"MIT"
] | null | null | null | twemoji/mkqrc.py | ericosur/myqt | e96f77f99442c44e51a1dbe1ee93edfa09b3db0f | [
"MIT"
] | null | null | null | twemoji/mkqrc.py | ericosur/myqt | e96f77f99442c44e51a1dbe1ee93edfa09b3db0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
''' helper to generate emoji.qrc '''
from glob import glob
import sys
import re
def output_qrc(arr, fn='emoji.qrc') -> None:
''' emoji.qrc '''
header = '''<RCC>
<qresource prefix="/">'''
footer = ''' </qresource>
</RCC>'''
with open(fn, 'wt', encoding='utf8') as fobj:
print(header, file=fobj)
for f in arr:
print(f' <file>{f}</file>', file=fobj)
print(footer, file=fobj)
print('output to:', fn)
def output_list(arr, fn='parse_list/list.txt') -> None:
''' output arr '''
with open(fn, 'wt', encoding='utf8') as fobj:
for f in arr:
r = re.sub(r'^72x72\/(.+)\.png', r'\1', f)
print(r, file=fobj)
print('output to:', fn)
def main() -> None:
''' main '''
arr = glob('72x72/*.png')
#print('len:', len(arr))
if not arr:
sys.exit(1)
arr.sort()
output_qrc(arr)
output_list(arr)
if __name__ == '__main__':
main()
| 22.333333 | 57 | 0.528358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.360199 |
dfe2c5242a263720a913b48fff9c5a2c72756ddd | 1,246 | py | Python | Python3-StandardLibrary/Chapter16_Web03_cgi.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-StandardLibrary/Chapter16_Web03_cgi.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-StandardLibrary/Chapter16_Web03_cgi.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import cgi
form = cgi.FieldStorage() # 创建FieldStorage实例(应只创建一个)
name = form.getvalue('name', 'world') # CGI脚本通过getvalue方法获取值,这里默认值为world
print("""Content-type: text/html
<html>
<head>
<title>Greeting Page</title>
</head>
<body>
<h1>Hello, {}!</h1>
<form action='Chapter16_Web03_cgi.py'>
Change Name:<input type='text' name='name'>
<input type='submit' value='Submit'>
</form>
</body>
</html>
""".format(name))
# ### 脚本说明
# 实现包含HTML表单的简单CGI脚本;
# 执行脚本:
# 1-启动支持cgi的Web服务器:在命令行下执行“py -3 -m http.server --cgi”;
# 2-将本CGI脚本放在服务器所在目录的子目录cgi-bin,并设置权限;
# 3-在浏览器打开“http://127.0.0.1:8000/cgi-bin/Chapter16_Web03_cgi.py”;
# 4-填写文本并提交,将显示形如“Hello world”的内容;
#
# ### HTML表单的相关说明
# - HTML表单是一个包含表单元素的区域,允许用户在表单中输入内容,比如文本域、下拉列表、单选框、复选框、提交按钮等;
# - 使用表单标签<form>来设置,属性action设置为脚本的名称,意味着提交表单后将再次运行这个脚本;
# - 输入元素标签<input>:输入类型由类型属性(type)定义;
#
# ### Web框架
# 对于重要或复杂的Web应用,一般不会直接为其编写繁琐的CGI脚本,而是使用Web框架,自动完成很多繁重的环节;
# 更多信息:Python的Web编程指南(https://wiki.python.org/moin/WebProgramming);
#
# ### Web框架Flask
# 简单又实用的Flask,适用于较复杂的服务端Web应用开发;
# A simple framework for building complex web applications.
# Home-page: https://www.palletsprojects.com/p/flask/
# Documentation:http://flask.pocoo.org/docs/
| 28.318182 | 73 | 0.699839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,788 | 0.941053 |
dfe2c9adf24a8776a39019cdfbf8a0a54e0be58c | 1,809 | py | Python | predefined_values.py | kovvik/bert_reader | 1b3b6a2bc29a026c64d2d7ba53ec5fabebf1f9e5 | [
"MIT"
] | null | null | null | predefined_values.py | kovvik/bert_reader | 1b3b6a2bc29a026c64d2d7ba53ec5fabebf1f9e5 | [
"MIT"
] | null | null | null | predefined_values.py | kovvik/bert_reader | 1b3b6a2bc29a026c64d2d7ba53ec5fabebf1f9e5 | [
"MIT"
] | null | null | null | # https://uefi.org/sites/default/files/resources/UEFI%20Spec%202_6.pdf
# N.2.2 Section Descriptor
section_types = {
"9876ccad47b44bdbb65e16f193c4f3db": {
"name": "Processor Generic",
"error_record_reference": {}
},
"dc3ea0b0a1444797b95b53fa242b6e1d": {
"name": "Processor Specific - IA32/X64",
"error_record_reference": {}
},
"e429faf13cb711d4bca70080c73c8881": {
"name": "Processor Specific - IPF",
"error_record_reference": {}
},
"e19e3d16bc1111e49caac2051d5d46b0": {
"name": "Processor Specific - ARM",
"error_record_reference": {}
},
"a5bc11146f644edeb8633e83ed7c83b1": {
"name": "Platform Memory",
"error_record_reference": {}
},
"d995e954bbc1430fad91b44dcb3c6f35": {
"name": "PCIe",
"error_record_reference": {}
},
"81212a9609ed499694718d729c8e69ed": {
"name": "Firmware Error Record Reference",
"error_record_reference": {
"firmware_error_record_type": (0, 1, "byte"),
"reserved": (1, 7, "hex"),
"record_identifier": (8, 8, "hex")
}
},
"c57539633b844095bf78eddad3f9c9dd": {
"name": "PCI/PCI-X Bus",
"error_record_reference": {}
},
"eb5e4685ca664769b6a226068b001326": {
"name": "DMAr Generic",
"error_record_reference": {}
},
"71761d3732b245cda7d0b0fedd93e8cf": {
"name": "Intel® VT for Directed I/O specific DMAr section",
"error_record_reference": {}
},
"036f84e17f37428ca79e575fdfaa84ec": {
"name": "IOMMU specific DMAr section",
"error_record_reference": {}
}
}
error_severity = [
"Recoverable (also called non-fatal uncorrected)",
"Fatal",
"Corrected",
"Informational"
]
| 29.177419 | 70 | 0.599779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.675138 |
dfe30fcd6927ef89f0f16539956bef3a4837e607 | 1,336 | py | Python | tests/filestack_helpers_test.py | SanthoshBala18/filestack-python | db55f3a27a4d073e1ba33d3d09a3def8da1a25e4 | [
"Apache-2.0"
] | 47 | 2017-01-28T12:27:18.000Z | 2021-07-02T16:29:04.000Z | tests/filestack_helpers_test.py | malarozi/filestack-python | 7109a9c20225532c95f0204d12649137c0de01a1 | [
"Apache-2.0"
] | 36 | 2017-01-25T23:48:33.000Z | 2022-01-29T22:33:12.000Z | tests/filestack_helpers_test.py | malarozi/filestack-python | 7109a9c20225532c95f0204d12649137c0de01a1 | [
"Apache-2.0"
] | 24 | 2017-01-24T23:57:32.000Z | 2022-01-29T22:34:34.000Z | import pytest
from filestack.helpers import verify_webhook_signature
@pytest.mark.parametrize('signature, expected_result', [
('57cbb25386c3d6ff758a7a75cf52ba02cf2b0a1a2d6d5dfb9c886553ca6011cb', True),
('incorrect-signature', False),
])
def test_webhook_verification(signature, expected_result):
secret = 'webhook-secret'
body = b'{"text": {"filename": "filename.jpg", "key": "kGaeljnga9wkysK6Z_filename.jpg"}}'
headers = {
'FS-Signature': signature,
'FS-Timestamp': 123456789999
}
result, details = verify_webhook_signature(secret, body, headers)
assert result is expected_result
if expected_result is False:
assert 'Signature mismatch' in details['error']
@pytest.mark.parametrize('secret, body, headers, err_msg', [
('hook-secret', b'body', 'should be a dict', 'value is not a dict'),
(1, b'body', {'FS-Signature': 'abc', 'FS-Timestamp': 123}, 'value is not a string'),
('hook-secret', b'', {'FS-Timestamp': 123}, 'fs-signature header is missing'),
('hook-secret', ['incorrect'], {'FS-Signature': 'abc', 'FS-Timestamp': 123}, 'Invalid webhook body'),
])
def test_agrument_validation(secret, body, headers, err_msg):
result, details = verify_webhook_signature(secret, body, headers)
assert result is False
assert err_msg in details['error']
| 40.484848 | 105 | 0.695359 | 0 | 0 | 0 | 0 | 1,261 | 0.943862 | 0 | 0 | 570 | 0.426647 |
dfe42600497b94099e0a72123b092ceef56b943a | 4,558 | py | Python | pattern/check_multiples.py | Lostefra/TranslationCoherence | b7b09c475cc78842d9724161a8cbee372d41da08 | [
"MIT"
] | null | null | null | pattern/check_multiples.py | Lostefra/TranslationCoherence | b7b09c475cc78842d9724161a8cbee372d41da08 | [
"MIT"
] | null | null | null | pattern/check_multiples.py | Lostefra/TranslationCoherence | b7b09c475cc78842d9724161a8cbee372d41da08 | [
"MIT"
] | null | null | null | import rdflib
from rdflib.term import URIRef
from utilities.utility_functions import prefix
from utilities import constants
def has_equivalent(node, graph):
equivalents = list(graph.subjects(predicate=constants.EQUIVALENCE_PREDICATE, object=node)) + \
list(graph.subjects(predicate=constants.SYNONYMY_PREDICATE, object=node)) + \
list(graph.objects(subject=node, predicate=constants.EQUIVALENCE_PREDICATE)) + \
list(graph.objects(subject=node, predicate=constants.SYNONYMY_PREDICATE))
if equivalents:
return True
return False
# def multiple_classified(node1, node2, n, result_graph):
# expressions = result_graph.subject_objects(predicate=n.differentExpression)
# exprs_1, exprs_2 = [], []
# list_exprs = list(map(list, zip(*expressions)))
# if list_exprs:
# exprs_1, exprs_2 = list_exprs[0], list_exprs[1]
# # print(f"{exprs_1}, {exprs_2}")
# return any([(expr_1, n.involves_node, node1) in result_graph for expr_1 in exprs_1 + exprs_2]) or \
# any([(expr_2, n.involves_node, node2) in result_graph for expr_2 in exprs_2 + exprs_1])
# return False
def check_multiples(g1, g2, n, result_graph, indexes, lemmas, frontiers, new_frontiers):
# Check for pattern "several"
# fred:number_1 fred:numberOf | bunchOf | seriesOf, quant:hasQuantifier quant:multiple | quant:some, hasQuality fred:Several
multiples = ['number', 'bunch', 'series', 'array', 'collection', 'group', 'amount']
quantifiers = ['multiple', 'some', 'many']
quant_predicate = URIRef(constants.NAMESPACES['quant'] + 'hasQuantifier')
for node1, node2 in frontiers:
objs = list(g2.objects(subject=node2, predicate=quant_predicate))
if any([q in obj for q in quantifiers for obj in objs]):# and not multiple_classified(node1, node2, n, result_graph):
# print(f"OBJS: {[prefix(o2, g2) for o2 in objs]}")
for s1, p1 in g1.subject_predicates(object=node1):
if not has_equivalent(s1, result_graph):
for m in multiples:
if m in prefix(p1, g1):# and any([q in prefix(o2,g2) for q in quantifiers for o2 in objs]):
# Create a hierarchy relationship
# "multiples_i" is a reification of a N-ary relationship
expr_1 = "expression_" + next(indexes["expressions"])
expr_2 = "expression_" + next(indexes["expressions"])
result_graph.add((n[expr_1], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_2], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_1], n.involvesNoun, node1))
result_graph.add((n[expr_1], n.involvesMultiple, s1))
result_graph.add((n[expr_2], n.involvesNoun, node2))
for obj in objs:
result_graph.add((n[expr_2], quant_predicate, obj))
result_graph.add((n[expr_1], n.differentExpression, n[expr_2]))
# print("FOUND", prefix(node1, g1), prefix(p1, g1), prefix(node2, g2), [prefix(o2, g2) for o2 in objs])
objs = list(g1.objects(subject=node1, predicate=quant_predicate))
if any([q in obj for q in quantifiers for obj in objs]):# and not multiple_classified(node1, node2, n, result_graph):
# print(f"OBJS: {[prefix(o1, g1) for o1 in objs]}")
for s2,p2 in g2.subject_predicates(object=node2):
if not has_equivalent(s2, result_graph):
for m in multiples:
if m in prefix(p2, g2):# and any([q in prefix(o1,g1) for q in quantifiers for o1 in objs]):
# Create a hierarchy relationship
# "multiples_i" is a reification of a N-ary relationship
expr_1 = "expression_" + next(indexes["expressions"])
expr_2 = "expression_" + next(indexes["expressions"])
result_graph.add((n[expr_1], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_2], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_1], n.involvesNoun, node1))
for obj in objs:
result_graph.add((n[expr_1], quant_predicate, obj))
result_graph.add((n[expr_2], n.involvesNoun, node2))
result_graph.add((n[expr_2], n.involvesMultiple, s2))
result_graph.add((n[expr_1], n.differentExpression, n[expr_2]))
# print("FOUND", prefix(node2, g2), prefix(p2, g2), prefix(node1, g1), [prefix(o1, g1) for o1 in objs])
| 54.915663 | 125 | 0.698113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,813 | 0.397762 |
dfe4a5779ee044b60ee7d70e0fc7668e972bffae | 5,875 | py | Python | app/main/views.py | Ammoh-Moringa/pitches | 2551f2c8e323066ebdde3f92046368d7c7759fa6 | [
"MIT"
] | null | null | null | app/main/views.py | Ammoh-Moringa/pitches | 2551f2c8e323066ebdde3f92046368d7c7759fa6 | [
"MIT"
] | null | null | null | app/main/views.py | Ammoh-Moringa/pitches | 2551f2c8e323066ebdde3f92046368d7c7759fa6 | [
"MIT"
] | null | null | null | from flask import render_template, request, redirect, url_for, abort
from flask_login import login_required, current_user
from . forms import PitchForm, CommentForm, CategoryForm
from .import main
from .. import db
from ..models import User, Pitch, Comments, PitchCategory, Votes
#display categories on the landing page
@main.route('/')
def index():
"""
View root page function that returns index page
"""
all_category = PitchCategory.get_categories()
all_pitches = Pitch.query.order_by('id').all()
print(all_pitches)
title = 'Home- Welcome'
return render_template('index.html', title = title, categories=all_category, all_pitches=all_pitches)
#Route for adding a new pitch
@main.route('/pitch/newpitch',methods= ['POST','GET'])
@login_required
def newPitch():
pitch = PitchForm()
if pitch.validate_on_submit():
title = pitch.pitch_title.data
category = pitch.pitch_category.data
yourPitch = pitch.pitch_comment.data
#update pitch instance
newPitch = Pitch(pitch_title = title,pitch_category = category,pitch_comment = yourPitch,user= current_user)
#save pitch
newPitch.save_pitch()
return redirect(url_for('.index'))
title = 'NEW PITCH'
return render_template('new_pitch.html',title = title,pitchform = pitch)
@main.route('/categories/<int:id>')
def category(id):
category = PitchCategory.query.get(id)
if category is None:
abort(404)
pitches=Pitch.get_pitches(id)
return render_template('category.html', pitches=pitches, category=category)
@main.route('/add/category', methods=['GET','POST'])
@login_required
def new_category():
"""
View new group route function that returns a page with a form to create a category
"""
form = CategoryForm()
if form.validate_on_submit():
name = form.name.data
new_category = PitchCategory(name = name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New category'
return render_template('new_category.html', category_form = form, title = title)
#view single pitch alongside its comments
@main.route('/comment/<int:id>',methods= ['POST','GET'])
@login_required
def viewPitch(id):
onepitch = Pitch.getPitchId(id)
comments = Comments.get_comments(id)
if request.args.get("like"):
onepitch.likes = onepitch.likes + 1
db.session.add(onepitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=category.id))
elif request.args.get("dislike"):
onepitch.dislikes = onepitch.dislikes + 1
db.session.add(onepitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=category.id))
commentForm = CommentForm()
if commentForm.validate_on_submit():
opinion = commentForm.opinion.data
newComment = Comments(opinion = opinion,user = current_user,pitches_id= id)
newComment.save_comment()
return render_template('comments.html',commentForm = commentForm,comments = comments,pitch = onepitch)
#adding a comment
@main.route('/write_comment/<int:id>', methods=['GET', 'POST'])
@login_required
def post_comment(id):
"""
Function to post comments
"""
form = CommentForm()
title = 'post comment'
pitches = Pitch.query.filter_by(id=id).first()
if pitches is None:
abort(404)
if form.validate_on_submit():
opinion = form.opinion.data
new_comment = Comments(opinion = opinion, user_id = current_user.id, pitches_id = pitches.id)
new_comment.save_comment()
return redirect(url_for('.view_pitch', id = pitches.id))
return render_template('post_comment.html', comment_form = form, title = title)
@main.route('/category/interview',methods= ['GET'])
def displayInterviewCategory():
interviewPitches = Pitch.get_pitches('interview')
return render_template('interviews.html',interviewPitches = interviewPitches)
@main.route('/category/product',methods= ['POST','GET'])
def displayProductCategory():
productPitches = Pitch.get_pitches('product')
return render_template('product.html',productPitches = productPitches)
@main.route('/category/promotion',methods= ['POST','GET'])
def displayPromotionCategory():
promotionPitches = Pitch.get_pitches('promotion')
return render_template('promotion.html',promotionPitches = promotionPitches)
@main.route('/category/pickup',methods= ['POST','GET'])
def displayPickupCategory():
pickupPitches = Pitch.get_pitches('pickup')
return render_template('pickup.html',pickupPitches = pickupPitches)
#Routes upvoting/downvoting pitches
@main.route('/pitch/upvote/<int:id>&<int:vote_type>')
@login_required
def upvote(id,vote_type):
"""
View function that adds one to the vote_number column in the votes table
"""
# Query for user
votes = Votes.query.filter_by(user_id=current_user.id).all()
print(f'The new vote is {votes}')
to_str=f'{vote_type}:{current_user.id}:{id}'
print(f'The current vote is {to_str}')
if not votes:
new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
# print(len(count_likes))
print('YOU HAVE new VOTED')
for vote in votes:
if f'{vote}' == to_str:
print('YOU CANNOT VOTE MORE THAN ONCE')
break
else:
new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
print('YOU HAVE VOTED')
break
# count_likes = Votes.query.filter_by(pitches_id=id, vote=1).all()
# upvotes=len(count_likes)
# count_dislikes = Votes.query.filter_by(pitches_id=id, vote=2).all()
return redirect(url_for('.view_pitch', id=id))
| 30.759162 | 116 | 0.683574 | 0 | 0 | 0 | 0 | 5,391 | 0.917617 | 0 | 0 | 1,513 | 0.257532 |
dfe61cbf2bb3b5a52f9141b8b81d778c054609e4 | 10,299 | py | Python | networks/classes/centernet/models/ModelCenterNet.py | ALIENK9/Kuzushiji-recognition | a18c1fbfa72b6bbbcfe4004148cd0e90531acf6b | [
"MIT"
] | 2 | 2019-09-15T08:52:38.000Z | 2019-09-15T08:58:58.000Z | networks/classes/centernet/models/ModelCenterNet.py | MatteoRizzo96/CognitiveServices | a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3 | [
"MIT"
] | null | null | null | networks/classes/centernet/models/ModelCenterNet.py | MatteoRizzo96/CognitiveServices | a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3 | [
"MIT"
] | 2 | 2020-11-06T07:29:56.000Z | 2020-11-06T07:33:27.000Z | import glob
import os
import pandas as pd
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from typing import Dict, List, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from networks.classes.centernet.datasets.ClassificationDataset import ClassificationDataset
class ModelCenterNet:
def __init__(self, logs: Dict):
self.__logs = logs
self.__input_width: int = None
self.__input_height: int = None
def build_model(self,
model_generator,
input_shape: Tuple[int, int, int], mode: str,
n_category: int = 1) -> tf.keras.Model:
"""
Builds the network.
:param model_generator: a generator for the network
:param input_shape: the shape of the input images
:param mode: the type of model that must be generated
:param n_category: the number of categories (possible classes). Defaults to 1 in order to detect the
presence or absence of an object only (and not its label).
:return: a Keras model
"""
self.__input_width = input_shape[0]
self.__input_height = input_shape[1]
self.__logs['execution'].info('Building {} model...'.format(mode))
return model_generator.generate_model(input_shape, mode, n_category)
@staticmethod
def setup_callbacks(weights_log_path: str, batch_size: int, lr: float) -> List[
tf.keras.callbacks.Callback]:
"""
Sets up the callbacks for the training of the model.
"""
# Setup callback to save the best weights after each epoch
checkpointer = ModelCheckpoint(filepath=os.path.join(weights_log_path,
'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
verbose=0,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min')
tensorboard_log_dir = os.path.join(weights_log_path, 'tensorboard')
# Note that update_freq is set to batch_size * 10,
# because the epoch takes too long and batch size too short
tensorboard = TensorBoard(log_dir=tensorboard_log_dir,
write_graph=True,
histogram_freq=0,
write_grads=True,
write_images=False,
batch_size=batch_size,
update_freq=batch_size * 10)
def lrs(epoch):
if epoch > 10:
return lr / 10
elif epoch > 6:
return lr / 5
else:
return lr
lr_schedule = LearningRateScheduler(lrs, verbose=1)
return [tensorboard, checkpointer, lr_schedule]
def restore_weights(self,
model: tf.keras.Model,
init_epoch: int,
weights_folder_path: str) -> None:
"""
Restores the weights from an existing weights file
:param model:
:param init_epoch:
:param weights_folder_path:
"""
init_epoch_str = '0' + str(init_epoch) if init_epoch < 10 else str(init_epoch)
restore_path_reg = os.path.join(weights_folder_path, 'weights.{}-*.hdf5'.format(init_epoch_str))
list_files = glob.glob(restore_path_reg)
assert len(list_files) > 0, \
'ERR: No weights file match provided name {}'.format(restore_path_reg)
# Take real filename
restore_filename = list_files[0].split('/')[-1]
restore_path = os.path.join(weights_folder_path, restore_filename)
assert os.path.isfile(restore_path), \
'ERR: Weight file in path {} seems not to be a file'.format(restore_path)
self.__logs['execution'].info("Restoring weights in file {}...".format(restore_filename))
model.load_weights(restore_path)
def train(self,
dataset: Union[tf.data.Dataset, ClassificationDataset],
model: tf.keras.Model,
init_epoch: int,
epochs: int,
batch_size: int,
callbacks: List[tf.keras.callbacks.Callback],
class_weights=None,
augmentation: bool = False):
"""
Compiles and trains the model for the specified number of epochs.
"""
self.__logs['training'].info('Training the model...\n')
# Display the architecture of the model
self.__logs['training'].info('Architecture of the model:')
model.summary()
# Train the model
self.__logs['training'].info('Starting the fitting procedure:')
self.__logs['training'].info('* Total number of epochs: ' + str(epochs))
self.__logs['training'].info('* Initial epoch: ' + str(init_epoch) + '\n')
training_set, training_set_size = dataset.get_training_set()
validation_set, validation_set_size = dataset.get_validation_set()
training_steps = training_set_size // batch_size + 1
validation_steps = validation_set_size // batch_size + 1
if augmentation:
x_train, y_train = dataset.get_xy_training()
x_val, y_val = dataset.get_xy_validation()
train_image_data_generator = ImageDataGenerator(brightness_range=[0.7, 1.0],
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1)
val_image_data_generator = ImageDataGenerator()
train_generator = train_image_data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_train, 'class': y_train}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
val_generator = val_image_data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_val, 'class': y_val}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
model.fit_generator(train_generator,
epochs=epochs,
steps_per_epoch=training_steps,
validation_data=val_generator,
validation_steps=validation_steps,
callbacks=callbacks,
initial_epoch=init_epoch,
class_weight=class_weights)
else:
model.fit(training_set,
epochs=epochs,
steps_per_epoch=training_steps,
validation_data=validation_set,
validation_steps=validation_steps,
callbacks=callbacks,
initial_epoch=init_epoch,
class_weight=class_weights)
self.__logs['training'].info('Training procedure performed successfully!\n')
def evaluate(self,
model: tf.keras.Model,
evaluation_set: Union[tf.data.Dataset, ClassificationDataset],
evaluation_steps: Union[int, None] = None,
batch_size: Union[int, None] = None,
augmentation: bool = False) -> Union[float, List[float], None]:
"""
Evaluate the model on provided set.
:return: the loss value if model has no other metrics, otw returns array with loss and metrics
values.
"""
self.__logs['training'].info('Evaluating the model...')
if augmentation:
x_eval, y_eval = evaluation_set.get_xy_evaluation()
data_generator = ImageDataGenerator()
evaluation_set = data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_eval, 'class': y_eval}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
else:
if evaluation_steps is not None and evaluation_steps == 0:
self.__logs['training'].warn('Skipping evaluation since provided set is empty')
return None
return model.evaluate(evaluation_set, verbose=1, steps=evaluation_steps)
def predict(self,
model: tf.keras.Model,
dataset: Union[tf.data.Dataset, List[str]], # List is for submission
verbose: int = 1,
steps: Union[int, None] = None,
batch_size: Union[int, None] = None,
augmentation: bool = False) -> Union[np.ndarray, List[np.ndarray]]:
"""
Performs a prediction on a given dataset
"""
self.__logs['test'].info("Predicting...")
if augmentation:
data_generator = ImageDataGenerator()
generator = data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': dataset}),
directory='',
x_col='image',
class_mode=None,
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size,
shuffle=False)
steps = 1
return model.predict_generator(generator, steps=steps, verbose=verbose)
else:
return model.predict(dataset, verbose=verbose, steps=steps)
| 40.869048 | 108 | 0.556656 | 9,899 | 0.961161 | 0 | 0 | 1,656 | 0.160792 | 0 | 0 | 2,031 | 0.197204 |
dfe9372c929b790c9a52b80b77bdd70cddddba45 | 187 | py | Python | problem1.py | bakwc/PyCodeMonkey | 32ea3a8947133ee9f96bea269a5dfd7a5b264ac1 | [
"MIT"
] | null | null | null | problem1.py | bakwc/PyCodeMonkey | 32ea3a8947133ee9f96bea269a5dfd7a5b264ac1 | [
"MIT"
] | null | null | null | problem1.py | bakwc/PyCodeMonkey | 32ea3a8947133ee9f96bea269a5dfd7a5b264ac1 | [
"MIT"
] | null | null | null |
# find fibonacci number
def myFib(n):
pass
def tests():
assert myFib(1) == 1
assert myFib(2) == 1
assert myFib(3) == 2
assert myFib(4) == 3
assert myFib(5) == 5
| 15.583333 | 24 | 0.561497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.122995 |
dfea1cd7528525e57a90decbb00e4b3b1963212b | 4,114 | py | Python | tests/web/test_show_image.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 5 | 2015-11-23T18:40:00.000Z | 2019-03-22T06:54:04.000Z | tests/web/test_show_image.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 5 | 2016-04-07T15:35:53.000Z | 2019-02-10T23:00:32.000Z | tests/web/test_show_image.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 2 | 2015-12-02T16:44:05.000Z | 2017-09-29T23:17:33.000Z | from __future__ import unicode_literals
import json
from tests import TestCase, with_settings
from nose.tools import eq_
from catsnap import Client
from catsnap.table.image import Image, ImageResize
from catsnap.table.album import Album
class TestShowImage(TestCase):
@with_settings(aws={'bucket': 'snapcats'})
def test_view_an_image(self):
session = Client().session()
album = Album(name='cow shots')
session.add(album)
session.flush()
prev_image = Image(filename='badcafe',
album_id=album.album_id)
session.add(prev_image)
image = Image(filename='deadbeef',
description='one time I saw a dead cow',
title='dead beef',
album_id=album.album_id)
session.add(image)
next_image = Image(filename='dadface',
album_id=album.album_id)
session.add(next_image)
session.flush()
response = self.app.get('/image/%d' % image.image_id)
assert 'https://s3.amazonaws.com/snapcats/deadbeef' in response.data,\
response.data
assert 'one time I saw a dead cow' in response.data, response.data
assert 'cow shots' in response.data, response.data
assert str(prev_image.image_id) in response.data, response.data
assert str(next_image.image_id) in response.data, response.data
@with_settings(aws={'bucket': 'snapcats'})
def test_view_an_image__defaults_to_medium(self):
session = Client().session()
image = Image(filename='deadbeef',
description='one time I saw a dead cow',
title='dead beef')
session.add(image)
session.flush()
for (size, suffix) in [(100, 'thumbnail'), (320, 'small'), (500, 'medium'), (1600, 'large')]:
session.add(ImageResize(image_id=image.image_id, width=size, height=size, suffix=suffix))
session.flush()
response = self.app.get('/image/%d' % image.image_id)
assert 'https://s3.amazonaws.com/snapcats/deadbeef_medium' in response.data,\
response.data
# if no medium exists, assume it's because the original is smaller than a
# "medium," and thus the original is an appropriate size.
@with_settings(aws={'bucket': 'snapcats'})
def test_view_an_image__defaults_to_original_if_no_medium_exists(self):
session = Client().session()
image = Image(filename='deadbeef',
description='one time I saw a dead cow',
title='dead beef')
session.add(image)
session.flush()
for (size, suffix) in [(100, 'thumbnail'), (320, 'small')]:
session.add(ImageResize(image_id=image.image_id, width=size, height=size, suffix=suffix))
session.flush()
response = self.app.get('/image/%d' % image.image_id)
assert 'src="https://s3.amazonaws.com/snapcats/deadbeef"' in response.data,\
response.data
@with_settings(aws={'bucket': 'snapcats'})
def test_get_image_info_as_json(self):
session = Client().session()
album = Album(name='cow shots')
session.add(album)
session.flush()
image = Image(filename='deadbeef',
description='one time I saw a dead cow',
title='dead beef',
album_id=album.album_id)
session.add(image)
image.add_tags(['cow', 'dead'])
session.flush()
response = self.app.get('/image/%d.json' % image.image_id)
eq_(json.loads(response.data), {
'description': 'one time I saw a dead cow',
'title': 'dead beef',
'album_id': album.album_id,
'tags': [ 'cow', 'dead', ],
'source_url': 'https://s3.amazonaws.com/snapcats/deadbeef',
'camera': None,
'photographed_at': None,
'focal_length': None,
'aperture': None,
'shutter_speed': None,
'iso': None,
})
| 39.557692 | 101 | 0.59018 | 3,874 | 0.941663 | 0 | 0 | 3,681 | 0.89475 | 0 | 0 | 938 | 0.228002 |
dfec725778cb5fb317db1061f7feba9a3d3f7b10 | 554 | py | Python | tests/test_imgs2bw.py | antsfamily/improc | ceab171b0e61187fa2ced7c58540d5ffde79ebac | [
"MIT"
] | 2 | 2019-09-29T08:43:31.000Z | 2022-01-12T09:46:18.000Z | tests/test_imgs2bw.py | antsfamily/improc | ceab171b0e61187fa2ced7c58540d5ffde79ebac | [
"MIT"
] | null | null | null | tests/test_imgs2bw.py | antsfamily/improc | ceab171b0e61187fa2ced7c58540d5ffde79ebac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-07-04 09:43:51
# @Author : Zhi Liu ([email protected])
# @Link : http://iridescent.ink
# @Version : $1.1$
import matplotlib.cm as cm
from matplotlib import pyplot as plt
import improc as imp
datafolder = '/mnt/d/DataSets/oi/nsi/classical/'
imgspathes = [
datafolder + 'BaboonRGB.bmp',
datafolder + 'LenaRGB.bmp',
]
print(imgspathes)
bws = imp.imgs2bw(imgspathes, 50)
print(bws.dtype, bws.shape)
print(bws)
plt.figure()
plt.imshow(bws[:, :, :, 0], cm.gray)
plt.show()
| 19.103448 | 48 | 0.658845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.424188 |
dfec78daa3bbf2130e5e79b3fbc047fcd7c950b3 | 764 | py | Python | Un4/Un4.py | tonypithony/forktinypythonprojectsscripts | 3dae818c822ee7de6de021e9f46d02bfe05f7355 | [
"MIT"
] | null | null | null | Un4/Un4.py | tonypithony/forktinypythonprojectsscripts | 3dae818c822ee7de6de021e9f46d02bfe05f7355 | [
"MIT"
] | null | null | null | Un4/Un4.py | tonypithony/forktinypythonprojectsscripts | 3dae818c822ee7de6de021e9f46d02bfe05f7355 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Jump the Five"""
import argparse
# --------------------------------------------------
def get_args():
parser = argparse.ArgumentParser(description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args()
def main():
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
for char in args.text:
print(jumper.get(char, char), end='')
print()
# --------------------------------------------------
if __name__ == '__main__':
main()
# $ ./Un4.py 867-5309
# 243-0751
# $ ./Un4.py 'Call 1-800-329-8044 today!'
# Call 9-255-781-2566 today! | 25.466667 | 64 | 0.522251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.46466 |
dfed59a42f4a11efd34b43e01fd5f7beba8d46b6 | 169 | py | Python | tests/web_platform/css_grid_1/abspos/test_grid_positioned_items_gaps.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 71 | 2015-04-13T09:44:14.000Z | 2019-03-24T01:03:02.000Z | tests/web_platform/css_grid_1/abspos/test_grid_positioned_items_gaps.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 35 | 2019-05-06T15:26:09.000Z | 2022-03-28T06:30:33.000Z | tests/web_platform/css_grid_1/abspos/test_grid_positioned_items_gaps.py | jonboland/colosseum | cbf974be54fd7f6fddbe7285704cfaf7a866c5c5 | [
"BSD-3-Clause"
] | 139 | 2015-05-30T18:37:43.000Z | 2019-03-27T17:14:05.000Z | from tests.utils import W3CTestCase
class TestGridPositionedItemsGaps(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-positioned-items-gaps-'))
| 28.166667 | 82 | 0.804734 | 130 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.171598 |
dff0a891ee94445188ef897fe40edf7b03e0dcdf | 18 | py | Python | src/__init__.py | PMantovani/pympu6050 | bab4e680d700d9ad62855958cdb93feaaa16060c | [
"MIT"
] | null | null | null | src/__init__.py | PMantovani/pympu6050 | bab4e680d700d9ad62855958cdb93feaaa16060c | [
"MIT"
] | null | null | null | src/__init__.py | PMantovani/pympu6050 | bab4e680d700d9ad62855958cdb93feaaa16060c | [
"MIT"
] | null | null | null | name = "pympu6050" | 18 | 18 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.611111 |
dff25402be58788805ce4000a620f3bec7823781 | 4,537 | py | Python | iocage/main.py | krcNAS/iocage | 13d87e92f8ba186b6c8b7f64a948f26a05586430 | [
"BSD-2-Clause"
] | null | null | null | iocage/main.py | krcNAS/iocage | 13d87e92f8ba186b6c8b7f64a948f26a05586430 | [
"BSD-2-Clause"
] | null | null | null | iocage/main.py | krcNAS/iocage | 13d87e92f8ba186b6c8b7f64a948f26a05586430 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2014-2017, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""The main CLI for ioc."""
import locale
import os
import re
import signal
import subprocess as su
import sys
import click
# This prevents it from getting in our way.
from click import core
import iocage.lib.ioc_check as ioc_check
core._verify_python3_env = lambda: None
user_locale = os.environ.get("LANG", "en_US.UTF-8")
locale.setlocale(locale.LC_ALL, user_locale)
# @formatter:off
# Sometimes SIGINT won't be installed.
# http://stackoverflow.com/questions/40775054/capturing-sigint-using-keyboardinterrupt-exception-works-in-terminal-not-in-scr/40785230#40785230
signal.signal(signal.SIGINT, signal.default_int_handler)
# If a utility decides to cut off the pipe, we don't care (IE: head)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# @formatter:on
try:
su.check_call(["sysctl", "vfs.zfs.version.spa"],
stdout=su.PIPE, stderr=su.PIPE)
except su.CalledProcessError:
sys.exit("ZFS is required to use iocage.\n"
"Try calling 'kldload zfs' as root.")
def print_version(ctx, param, value):
"""Prints the version and then exits."""
if not value or ctx.resilient_parsing:
return
print("Version\t0.9.9.2 RC")
sys.exit()
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'cli'))
class IOCageCLI(click.MultiCommand):
"""
Iterates in the 'cli' directory and will load any module's cli definition.
"""
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
not filename.startswith('__init__'):
rv.append(re.sub(".py$", "", filename))
rv.sort()
return rv
def get_command(self, ctx, name):
try:
mod = __import__(f"iocage.cli.{name}",
None, None, ["cli"])
mod_name = mod.__name__.replace("iocage.cli.", "")
try:
if mod.__rootcmd__ and "--help" not in sys.argv[1:]:
if len(sys.argv) != 1:
if os.geteuid() != 0:
sys.exit("You need to have root privileges to"
f" run {mod_name}")
except AttributeError:
# It's not a root required command.
pass
return mod.cli
except (ImportError, AttributeError):
return
@click.command(cls=IOCageCLI)
@click.option("--version", "-v", is_flag=True, callback=print_version,
help="Display iocage's version and exit.")
def cli(version):
"""A jail manager."""
skip_check = False
skip_check_cmds = ["--help", "activate", "-v", "--version"]
try:
if "iocage" in sys.argv[0] and len(sys.argv) == 1:
skip_check = True
for arg in sys.argv[1:]:
if arg in skip_check_cmds:
skip_check = True
elif "clean" in arg:
skip_check = True
ioc_check.IOCCheck(silent=True)
if not skip_check:
ioc_check.IOCCheck()
except RuntimeError as err:
exit(err)
if __name__ == '__main__':
cli(prog_name="iocage")
| 33.858209 | 143 | 0.642054 | 1,152 | 0.253912 | 0 | 0 | 720 | 0.158695 | 0 | 0 | 2,191 | 0.482918 |
dff2b9a17cc9997e289067d562ccf28b75fc10b3 | 425 | py | Python | bookinfo/models.py | dustfine/myPlaygroundSite | 02db3321e9959437c588575f9df1079d2d8d1ed9 | [
"MIT"
] | null | null | null | bookinfo/models.py | dustfine/myPlaygroundSite | 02db3321e9959437c588575f9df1079d2d8d1ed9 | [
"MIT"
] | null | null | null | bookinfo/models.py | dustfine/myPlaygroundSite | 02db3321e9959437c588575f9df1079d2d8d1ed9 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Publisher(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=64, null=False, unique=True)
class Book(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=64, null=False, unique=True)
publisher = models.ForeignKey(to=Publisher,on_delete=models.CASCADE,default=None) | 35.416667 | 85 | 0.757647 | 365 | 0.858824 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.061176 |
dff4072d877687a20524346adc49201f57ca4cea | 905 | py | Python | svety/tests.py | clemsciences/svety | 44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90 | [
"MIT"
] | null | null | null | svety/tests.py | clemsciences/svety | 44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90 | [
"MIT"
] | null | null | null | svety/tests.py | clemsciences/svety | 44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90 | [
"MIT"
] | null | null | null | """
"""
import os
import unittest
from lxml import etree
from svety import PACKDIR
from svety import reader
from svety import retriever
__author__ = ["Clément Besnier <[email protected]>", ]
class TestMain(unittest.TestCase):
"""
"""
def setUp(self) -> None:
self.filename = "hellqvist.xml"
self.path = os.getcwd()
retriever.retrieve_dictionary()
def test_retrieve_text(self):
result = retriever.retrieve_dictionary()
self.assertTrue(result)
self.assertIn(self.filename, os.listdir(self.path))
def test_root(self):
root = reader.get_xml_root(self.filename, self.path)
self.assertEqual(type(root), etree._Element)
def test_lookup_word(self):
root = reader.get_xml_root(self.filename, self.path)
word = reader.read_entry(root, "enkom")
self.assertEqual(word["faksimilID"], '0208')
| 23.205128 | 60 | 0.667403 | 703 | 0.775938 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.111479 |
dff5479d5d3e3729b12a7cdf8fd0b259fd5d0c88 | 5,424 | py | Python | tests/internal/processes/test_generator.py | clausmichele/openeo-python-client | b20af2b24fcb12d0fce0e2acdb8afeeb881ff454 | [
"Apache-2.0"
] | 1 | 2021-04-01T13:15:35.000Z | 2021-04-01T13:15:35.000Z | tests/internal/processes/test_generator.py | clausmichele/openeo-python-client | b20af2b24fcb12d0fce0e2acdb8afeeb881ff454 | [
"Apache-2.0"
] | null | null | null | tests/internal/processes/test_generator.py | clausmichele/openeo-python-client | b20af2b24fcb12d0fce0e2acdb8afeeb881ff454 | [
"Apache-2.0"
] | null | null | null | from textwrap import dedent
from openeo.internal.processes.generator import PythonRenderer
from openeo.internal.processes.parse import Process
def test_render_basic():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [{"name": "x", "description": "value", "schema": {"type": "integer"}}],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x):
"""
Increment a value
:param x: value
:return: incremented value
"""
return process('incr', x=x)''')
def test_render_no_params():
process = Process.from_dict({
"id": "pi",
"description": "Pi",
"summary": "Pi",
"parameters": [],
"returns": {"description": "value of pi", "schema": {"type": "number"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def pi():
"""
Pi
:return: value of pi
"""
return process('pi', )''')
def test_render_with_default():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": "integer"}},
{"name": "i", "description": "increment", "schema": {"type": "integer"}, "default": 1},
],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x, i=1):
"""
Increment a value
:param x: value
:param i: increment
:return: incremented value
"""
return process('incr', x=x, i=i)''')
def test_render_with_optional():
process = Process.from_dict({
"id": "foo",
"description": "Foo",
"summary": "Foo",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": "integer"}},
{"name": "y", "description": "something", "schema": {"type": "integer"}, "optional": True, "default": 1},
],
"returns": {"description": "new value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer(optional_default="UNSET")
src = renderer.render_process(process)
assert src == dedent('''\
def foo(x, y=UNSET):
"""
Foo
:param x: value
:param y: something
:return: new value
"""
return process('foo', x=x, y=y)''')
def test_render_return_type_hint():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [{"name": "x", "description": "value", "schema": {"type": "integer"}}],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer(return_type_hint="FooBar")
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x) -> FooBar:
"""
Increment a value
:param x: value
:return: incremented value
"""
return process('incr', x=x)''')
def test_render_oo_no_params():
process = Process.from_dict({
"id": "pi",
"description": "Pi",
"summary": "Pi",
"parameters": [],
"returns": {"description": "value of pi", "schema": {"type": "number"}}
})
renderer = PythonRenderer(oo_mode=True)
src = "class Consts:\n" + renderer.render_process(process)
assert src == dedent('''\
class Consts:
def pi(self):
"""
Pi
:return: value of pi
"""
return process('pi', )''')
def test_render_keyword():
process = Process.from_dict({
"id": "or",
"description": "Boolean and",
"summary": "Boolean and",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": ["boolean", "null"]}},
{"name": "y", "description": "value", "schema": {"type": ["boolean", "null"]}}
],
"returns": {"description": "result", "schema": {"type": ["boolean", "null"]}},
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def or_(x, y):
"""
Boolean and
:param x: value
:param y: value
:return: result
"""
return process('or', x=x, y=y)''')
oo_renderer = PythonRenderer(oo_mode=True, body_template="return {safe_name}({args})", )
src = oo_renderer.render_process(process)
assert dedent(src) == dedent('''\
def or_(self, y):
"""
Boolean and
:param self: value
:param y: value
:return: result
"""
return or_(x=self, y=y)''')
| 28.851064 | 117 | 0.50295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,069 | 0.565819 |
dff6538ad1295913d7be8979ad8998c9e8d8ebc3 | 4,555 | py | Python | python/tests/range.py | mizuki-nana/coreVM | 1ff863b890329265a86ff46b0fdf7bac8e362f0e | [
"MIT"
] | 2 | 2017-02-12T21:59:54.000Z | 2017-02-13T14:57:48.000Z | python/tests/range.py | mizuki-nana/coreVM | 1ff863b890329265a86ff46b0fdf7bac8e362f0e | [
"MIT"
] | null | null | null | python/tests/range.py | mizuki-nana/coreVM | 1ff863b890329265a86ff46b0fdf7bac8e362f0e | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2015 Yanzheng Li
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## -----------------------------------------------------------------------------
def test_range_with_one_argument():
print range(0)
print range(1)
print range(2)
print range(5)
print range(10)
print range(100)
print range(-1)
print range(-2)
print range(-5)
print range(-10)
print range(-100)
print range(True)
print range(False)
## -----------------------------------------------------------------------------
def test_range_with_one_invalid_argument():
def test_range_with_typeerror(x):
try:
range(x)
except TypeError:
print 'Expected TypeError on calling range() with argument'
test_range_with_typeerror(None)
test_range_with_typeerror(3.14)
test_range_with_typeerror('1')
## -----------------------------------------------------------------------------
def test_range_with_two_arguments():
print range(0, 0)
print range(0, 1)
print range(1, 1)
print range(1, 2)
print range(1, 10)
print range(2, 9)
print range(0, 10)
print range(1, 0)
print range(0, -1)
print range(100, 99)
print range(10, 0)
## -----------------------------------------------------------------------------
def test_range_with_two_invalid_arguments():
def test_range_with_typeerror(x, y):
try:
range(x, y)
except TypeError:
print 'Expected TypeError on calling range() with argument'
test_range_with_typeerror(0, None)
test_range_with_typeerror(None, 1)
test_range_with_typeerror(None, None)
test_range_with_typeerror(0, 3.14)
test_range_with_typeerror(3.14, 0)
test_range_with_typeerror(3, '14')
## -----------------------------------------------------------------------------
def test_range_with_three_arguments():
print range(0, 0, 1)
print range(0, 10, 1)
print range(0, 10, 2)
print range(10, 0, -1)
print range(9, 1, -2)
print range(-1, -2, -1)
print range(0, 0, -1)
print range(0, 10, -1)
print range(0, 10, -2)
print range(0, -10, 2)
print range(9, -1, -2)
print range(-1, -2, -1)
print range(-1, -10, -12)
print range(10, 0, 1)
print range(9, 1, 2)
print range(-1, -2, 1)
## -----------------------------------------------------------------------------
def test_range_with_three_invalid_arguments():
def test_range_with_typeerror(x, y, z):
try:
range(x, y, z)
except TypeError:
print 'Expected TypeError on calling range() with argument'
test_range_with_typeerror(0, 0, None)
test_range_with_typeerror(None, None, 1)
test_range_with_typeerror(None, None, None)
test_range_with_typeerror(0, 0, 3.14)
test_range_with_typeerror(3.14, 0, 1)
test_range_with_typeerror(3, '14', '1')
def test_range_with_valueerror(x, y, z):
try:
range(x, y, z)
except ValueError:
print 'Expected ValueError on calling range() with argument'
test_range_with_valueerror(0, 0, 0)
## -----------------------------------------------------------------------------
test_range_with_one_argument()
test_range_with_one_invalid_argument()
test_range_with_two_arguments()
test_range_with_two_invalid_arguments()
test_range_with_three_arguments()
test_range_with_three_invalid_arguments()
## -----------------------------------------------------------------------------
| 31.413793 | 80 | 0.591438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,958 | 0.429857 |
dff9aadffba2a29e37c671ac7172c7de73a82cb0 | 14,895 | py | Python | hyperion/generators/adapt_sequence_batch_generator.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | 9 | 2019-09-22T05:19:59.000Z | 2022-03-05T18:03:37.000Z | hyperion/generators/adapt_sequence_batch_generator.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | null | null | null | hyperion/generators/adapt_sequence_batch_generator.py | jsalt2019-diadet/hyperion | 14a11436d62f3c15cd9b1f70bcce3eafbea2f753 | [
"Apache-2.0"
] | 4 | 2019-10-10T06:34:05.000Z | 2022-03-05T18:03:56.000Z | """
Copyright 2018 Jesus Villalba (Johns Hopkins University)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import sys
import os
import argparse
import time
import copy
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from ..hyp_defs import float_cpu
from ..io import RandomAccessDataReaderFactory as RF
from ..utils.scp_list import SCPList
from ..utils.tensors import to3D_by_seq
from ..transforms import TransformList
from .sequence_batch_generator_v1 import SequenceBatchGeneratorV1 as SBG
class AdaptSequenceBatchGenerator(SBG):
def __init__(self, rspecifier,
key_file, key_file_adapt,
r_adapt=1,
class_list = None,
path_prefix=None,
batch_size=1,
iters_per_epoch='auto',
gen_method='random',
min_seq_length=None, max_seq_length=None,
seq_overlap=0,
prune_min_length=0,
return_class = True,
class_weight = None,
seq_weight = 'balanced',
shuffle_seqs=True,
transform=None,
init_epoch=0,
sg_seed=1024, reset_rng=False,
scp_sep=' ',
part_idx=1, num_parts=1):
self.scp_adapt = SCPList.load(key_file_adapt, sep=scp_sep)
if num_parts > 1:
self.scp_adapt = self.scp_adapt.split(part_idx, num_parts, group_by_key=False)
assert r_adapt < batch_size
self.r_adapt = r_adapt
self._init_seq_lengths_adapt = None
self._seq_lengths_adapt = None
self.init_scp_adapt = self.scp_adapt
self.cur_seq_adapt = 0
self.cur_frame_adapt = None
self.cur_subseq = None
self._init_num_subseqs_adapt = None
self.num_subseqs_adapt = None
super(AdaptSequenceBatchGenerator, self).__init__(
rspecifier, key_file, class_list, path_prefix, batch_size,
iters_per_epoch, gen_method, min_seq_length, max_seq_length, seq_overlap,
prune_min_length, return_class, class_weight, seq_weight,
shuffle_seqs, transform, init_epoch, sg_seed, reset_rng, scp_sep,
part_idx,num_parts)
@property
def num_seqs(self):
return len(self.scp)
@property
def num_seqs_adapt(self):
return len(self.scp_adapt)
@property
def seq_lengths(self):
if self._seq_lengths is None:
self._init_seq_lengths = self.r.read_num_rows(self.scp.file_path)
self._seq_lengths = self._init_seq_lengths
return self._seq_lengths
@property
def seq_lengths_adapt(self):
if self._seq_lengths_adapt is None:
self._init_seq_lengths_adapt = self.r.read_num_rows(self.scp_adapt.file_path)
self._seq_lengths_adapt = self._init_seq_lengths_adapt
return self._seq_lengths_adapt
@property
def total_length(self):
return np.sum(self.seq_lengths)
@property
def total_length_adapt(self):
return np.sum(self.seq_lengths_adapt)
@property
def min_seq_length(self):
if self._min_seq_length is None:
self._min_seq_length = min(np.min(self.seq_lengths), np.min(self.seq_lengths_adapt))
return self._min_seq_length
@property
def max_seq_length(self):
if self._max_seq_length is None:
self._max_seq_length = max(np.max(self.seq_lengths), np.max(self.seq_lengths_adapt))
return self._max_seq_length
@property
def steps_per_epoch(self):
if self._steps_per_epoch is None:
if self.gen_method == 'sequential':
if self.seq_weight == 'balanced':
seqs_per_iter = self.num_seqs*np.max(self.num_subseqs)
else:
seqs_per_iter = np.sum(self.num_subseqs)
else:
seqs_per_iter = self.num_seqs
self._steps_per_epoch = int(np.floor(
self.iters_per_epoch * seqs_per_iter/(self.batch_size-self.r_adapt)))
return self._steps_per_epoch
@property
def num_total_subseqs(self):
return self.steps_per_epoch * self.batch_size
def _prune_min_length(self, min_length):
keep_idx = self.seq_lengths >= min_length
self.scp = self.scp.filter_index(keep_idx)
keep_idx = self.seq_lengths_adapt >= min_length
self.scp_adapt = self.scp_adapt.filter_index(keep_idx)
self._seq_lengths = None
self._seq_lengths_adapt = None
def _prepare_class_info(self, class_list):
if class_list is None:
class_dict = {k:i for i, k in enumerate(np.unique(self.scp.key))}
class_dict.update({k:i for i, k in enumerate(np.unique(self.scp_adapt.key))})
else:
with open(class_list) as f:
class_dict={line.rstrip().split()[0]: i for i, line in enumerate(f)}
self.num_classes = len(class_dict)
self.key2class = {p: class_dict[k] for k, p in zip(self.scp.key, self.scp.file_path)}
self.key2class.update({p: class_dict[k] for k, p in zip(self.scp_adapt.key, self.scp_adapt.file_path)})
def _balance_class_weight(self):
super(AdaptSequenceBatchGenerator, self)._balance_class_weight()
classes, class_ids = np.unique(self.scp_adapt.key, return_inverse=True)
idx = self._balance_class_weigth_helper(class_ids)
self.scp_adapt = self.scp_adapt.filter_index(idx)
assert len(self.scp_adapt) == len(num_samples)*max_samples
if self._init_seq_lengths_adapt is not None:
self._init_seq_legths_adapt = self._init_seq_lengths_adapt[idx]
self._seq_lengths_adapt = self._init_seq_legths_adapt
def _prepare_full_seqs(self):
pass
def _prepare_random_subseqs(self):
pass
def _prepare_sequential_subseqs(self):
super(AdaptSequenceBatchGenerator, self)._prepare_sequential_subseqs()
seq_lengths = self.seq_lengths_adapt
avg_length = int((self.max_seq_length + self.min_seq_length)/2)
shift = avg_length - self.seq_overlap
self._init_num_subseqs_adapt = np.ceil(seq_lengths/shift).astype(int)
self.num_subseqs_adapt = self._init_num_subseqs_adapt
self.cur_frame_adapt = np.zeros((self.num_seqs_adapt,), dtype=int)
self.cur_subseq_adapt = np.zeros((self.num_seqs_adapt,), dtype=int)
def reset(self):
super(AdaptSequenceBatchGenerator, self).reset()
self.cur_seq_adapt = 0
if self.shuffle_seqs:
if self._init_seq_lengths_adapt is None:
self.seq_lengths_adapt
self.scp_adapt = self.init_scp_adapt.copy()
index = self.scp_adapt.shuffle(rng=self.rng)
self._seq_lengths_adapt = self._init_seq_lengths_adapt[index]
if self._init_num_subseqs_adapt is not None:
self.num_subseqs_adapt = self._init_num_subseqs_adapt[index]
if self.gen_method == 'sequential':
self.cur_subseq_adapt[:] = 0
self.cur_frame_adapt[:] = 0
def _read_full_seqs(self):
batch_size = self.batch_size - self.r_adapt
keys = list(self.scp.file_path[self.cur_seq:self.cur_seq+batch_size])
self.cur_seq += batch_size
if len(keys) < batch_size:
delta = batch_size - len(keys)
keys += self.scp.file_path[:delta]
self.cur_seq = delta
assert len(keys) == batch_size
batch_size = self.r_adapt
keys_adapt = list(self.scp_adapt.file_path[self.cur_seq_adapt:self.cur_seq_adapt+batch_size])
self.cur_seq_adapt += batch_size
if len(keys_adapt) < batch_size:
delta = batch_size - len(keys)
keys_adapt += self.scp_adapt.file_path[:delta]
self.cur_seq_adapt = delta
assert len(keys_adapt) == batch_size
keys += keys_adapt
return keys, self.r.read(keys)
def _read_random_subseqs(self):
keys = []
seq_lengths =[]
first_frames = []
for i in xrange(self.batch_size-self.r_adapt):
key = self.scp.file_path[self.cur_seq]
full_seq_length = self.seq_lengths[self.cur_seq]
max_seq_length = min(full_seq_length, self.max_seq_length)
min_seq_length = min(full_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
first_frame = self.rng.randint(
low=0, high=full_seq_length-seq_length+1)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
for i in xrange(self.r_adapt):
key = self.scp_adapt.file_path[self.cur_seq_adapt]
full_seq_length = self.seq_lengths_adapt[self.cur_seq_adapt]
max_seq_length = min(full_seq_length, self.max_seq_length)
min_seq_length = min(full_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
first_frame = self.rng.randint(
low=0, high=full_seq_length-seq_length+1)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
return keys, self.r.read(keys, row_offset=first_frames,
num_rows=seq_lengths)
def _read_sequential_subseqs(self):
keys = []
seq_lengths =[]
first_frames = []
count = 0
while count < self.batch_size - self.r_adapt:
key = self.scp.file_path[self.cur_seq]
first_frame = self.cur_frame[self.cur_seq]
full_seq_length = self.seq_lengths[self.cur_seq]
remainder_seq_length = full_seq_length - first_frame
if self.cur_subseq[self.cur_seq] == self.num_subseqs[self.cur_seq]:
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
continue
if self.cur_subseq[self.cur_seq] == self.num_subseqs[self.cur_seq]-1:
seq_length = min(remainder_seq_length, self.max_seq_length)
self.cur_frame[self.cur_seq] = 0
else:
max_seq_length = min(
max(self.min_seq_length,
remainder_seq_length-self.min_seq_length),
self.max_seq_length)
min_seq_length = min(remainder_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
self.cur_frame[self.cur_seq] = min(
full_seq_length - self.min_seq_length,
first_frame + seq_length - self.seq_overlap)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_subseq[self.cur_seq] += 1
if self.seq_weight == 'balanced':
self.cur_subseq[self.cur_seq] %= self.num_subseqs[self.cur_seq]
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
count += 1
while count < self.batch_size:
key = self.scp_adapt.file_path[self.cur_seq_adapt]
first_frame = self.cur_frame_adapt[self.cur_seq_adapt]
full_seq_length = self.seq_lengths_adapt[self.cur_seq_adapt]
remainder_seq_length = full_seq_length - first_frame
if self.cur_subseq_adapt[self.cur_seq_adapt] == self.num_subseqs_adapt[self.cur_seq_adapt]:
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
continue
if self.cur_subseq_adapt[self.cur_seq_adapt] == self.num_subseqs_adapt[self.cur_seq_adapt]-1:
seq_length = min(remainder_seq_length, self.max_seq_length)
self.cur_frame_adapt[self.cur_seq_adapt] = 0
else:
max_seq_length = min(
max(self.min_seq_length,
remainder_seq_length-self.min_seq_length),
self.max_seq_length)
min_seq_length = min(remainder_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
self.cur_frame_adapt[self.cur_seq_adapt] = min(
full_seq_length - self.min_seq_length,
first_frame + seq_length - self.seq_overlap)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_subseq_adapt[self.cur_seq_adapt] += 1
if self.seq_weight == 'balanced':
self.cur_subseq_adapt[self.cur_seq_adapt] %= self.num_subseqs_adapt[self.cur_seq_adapt]
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
count += 1
assert len(keys) == self.batch_size
return keys, self.r.read(keys, row_offset=first_frames,
num_rows=seq_lengths)
@staticmethod
def filter_args(prefix=None, **kwargs):
args = super(AdaptSequenceBatchGenerator,
AdaptSequenceBatchGenerator).filter_args(prefix, **kwargs)
if prefix is None:
p = ''
else:
p = prefix + '_'
valid_args = ('r_adapt',)
new_args = dict((k, kwargs[p+k])
for k in valid_args if p+k in kwargs)
args.update(new_args)
return args
@staticmethod
def add_argparse_args(parser, prefix=None):
args = super(AdaptSequenceBatchGenerator,
AdaptSequenceBatchGenerator).add_argparse_args(parser, prefix)
if prefix is None:
p1 = '--'
p2 = ''
else:
p1 = '--' + prefix + '-'
p2 = prefix + '_'
parser.add_argument(p1+'r-adapt', dest=(p2+'r_adapt'),
default=64, type=int,
help=('batch size of adaptation data.'))
| 34.320276 | 111 | 0.608728 | 14,209 | 0.953944 | 0 | 0 | 2,958 | 0.19859 | 0 | 0 | 284 | 0.019067 |
dffa822e50735b496917f2c8ca75cc5ca8d78488 | 1,113 | py | Python | main.py | Lojlvenom/simple-python-blockchain | b226f81644daa066156aa5b9581c04cf4d47d0dc | [
"MIT"
] | null | null | null | main.py | Lojlvenom/simple-python-blockchain | b226f81644daa066156aa5b9581c04cf4d47d0dc | [
"MIT"
] | null | null | null | main.py | Lojlvenom/simple-python-blockchain | b226f81644daa066156aa5b9581c04cf4d47d0dc | [
"MIT"
] | null | null | null | import fastapi as _fastapi
import blockchain as _blockchain
app_desc = {
'title':'Simple python blockchain API',
'version':'1.0.0',
}
bc = _blockchain.Blockchain()
app = _fastapi.FastAPI(**app_desc)
def validade_blockchain():
if not bc._is_chain_valid():
return _fastapi.HTTPException(
status_code= 400, detail="Blockchain nao e valida"
)
@app.get("/", tags=["Endpoints"])
def hello():
return {
"message":"Bem vindo ao simple python blockchain API, para saber mais acesse /docs"
}
# EP PARA ADICIONAR UM BLOCO
@app.post("/mine_block/", tags=["Endpoints"])
def mine_block(data: str):
validade_blockchain()
block = bc.mine_block(data)
return block
@app.get("/blockchain/", tags=["Endpoints"])
def get_blockchain():
validade_blockchain
chain = bc.chain
return chain
@app.get('/check_is_valid', tags=["Endpoints"])
def check_is_valid():
is_valid = validade_blockchain()
if is_valid:
return {
"message": "Is valid"
}
else:
return {
"message": "Not valid"
}
| 22.26 | 91 | 0.629829 | 0 | 0 | 0 | 0 | 688 | 0.618149 | 0 | 0 | 319 | 0.286613 |
dffa84ab01f78c539667e6f6871367dc2095eb09 | 1,747 | py | Python | setup.py | SanjeevaRDodlapati/Chem-Learn | 2db2e98061ee3dbb00ed20c51ea18b15956e298e | [
"MIT"
] | null | null | null | setup.py | SanjeevaRDodlapati/Chem-Learn | 2db2e98061ee3dbb00ed20c51ea18b15956e298e | [
"MIT"
] | null | null | null | setup.py | SanjeevaRDodlapati/Chem-Learn | 2db2e98061ee3dbb00ed20c51ea18b15956e298e | [
"MIT"
] | null | null | null | from glob import glob
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='chemlearn',
version='0.0.0',
description='Deep learning for chemistry',
long_description=read('README.rst'),
author='Sanjeeva Reddy Dodlapati',
author_email='[email protected]',
license="MIT",
url='https://github.com/SanjeevaRDodlapati/Chem-Learn',
packages=find_packages(),
scripts=glob('./scripts/*.py'),
install_requires=['h5py',
'argparse',
'pandas',
'numpy',
'pytest',
'torch',
'rdkit-pypi',
],
keywords=['Deep learning',
'Deep neural networks',
'Molecular graphs',
'Drug discovery',
'Drug target interaction'],
classifiers=['Development Status :: 0 - developmet',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Chem-Informatics',
]
)
| 36.395833 | 80 | 0.499714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 799 | 0.457355 |
dffe92bdb0898e53b6acb0b1fcb7c940caeeb1d9 | 49,731 | py | Python | scripts/tests_with_setup.py | rombie/contrail-test | a68c71d6f282142501a7e2e889bbb232fdd82dc3 | [
"Apache-2.0"
] | null | null | null | scripts/tests_with_setup.py | rombie/contrail-test | a68c71d6f282142501a7e2e889bbb232fdd82dc3 | [
"Apache-2.0"
] | null | null | null | scripts/tests_with_setup.py | rombie/contrail-test | a68c71d6f282142501a7e2e889bbb232fdd82dc3 | [
"Apache-2.0"
] | null | null | null | # Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
from tests_with_setup_base import *
class TestSanity(TestSanityBase):
def setUp(self):
super(TestSanity, self).setUp()
# end setUp
def cleanUp(self):
super(TestSanity, self).cleanUp()
# end cleanUp
@preposttest_wrapper
def test_diff_proj_same_vn_vm_add_delete(self):
''' Test to validate that a VN and VM with the same name and same subnet can be created in two different projects
'''
vm_name = 'vm_mine'
vn_name = 'vn222'
vn_subnets = ['11.1.1.0/24']
projects = ['project111', 'project222']
user_list = [('gudi', 'gudi123', 'admin'), ('mal', 'mal123', 'admin')]
auth_url = 'http://%s:5000/v2.0' % (self.inputs.openstack_ip)
kc = ksclient.Client(
username=self.inputs.stack_user, password=self.inputs.stack_password,
tenant_name=self.inputs.project_name, auth_url=auth_url)
user_pass = {}
user_role = {}
user_set = set()
role_set = set()
for (n, p, r) in user_list:
user_pass[n] = p
user_role[n] = r
user_set.add(n)
role_set.add(n)
users = set([user.name for user in kc.users.list()])
roles = set([user.name for user in kc.roles.list()])
tenants = kc.tenants.list()
admin_tenant = [x for x in tenants if x.name == 'admin'][0]
create_user_set = user_set - users
create_role_set = role_set - roles
for new_tenant in projects:
kc.tenants.create(new_tenant)
role_dict = {}
tenant_dict = {}
for role in kc.roles.list():
role_dict[role.name] = role
for tenant in kc.tenants.list():
tenant_dict[tenant.name] = tenant
for name in create_user_set:
user = kc.users.create(
name, user_pass[name], '', tenant_id=admin_tenant.id)
for new_tenant in projects:
kc.roles.add_user_role(
user, role_dict[user_role[name]], tenant_dict[new_tenant])
user_dict = {}
for user in kc.users.list():
user_dict[user.name] = user
self.new_proj_inputs1 = self.useFixture(ContrailTestInit(self.ini_file, stack_user=user_list[
0][0], stack_password=user_list[0][1], project_fq_name=['default-domain', projects[0]]))
self.new_proj_connections1 = ContrailConnections(self.new_proj_inputs1)
self.new_proj_inputs2 = self.useFixture(ContrailTestInit(self.ini_file, stack_user=user_list[
1][0], stack_password=user_list[1][1], project_fq_name=['default-domain', projects[1]]))
self.new_proj_connections2 = ContrailConnections(self.new_proj_inputs2)
vn1_fixture = self.useFixture(
VNFixture(
project_name=projects[
0], connections=self.new_proj_connections1,
vn_name=vn_name, inputs=self.new_proj_inputs1, subnets=vn_subnets))
assert vn1_fixture.verify_on_setup()
vn1_obj = vn1_fixture.obj
vn2_fixture = self.useFixture(
VNFixture(
project_name=projects[
1], connections=self.new_proj_connections2,
vn_name=vn_name, inputs=self.new_proj_inputs2, subnets=vn_subnets))
assert vn2_fixture.verify_on_setup()
vn2_obj = vn2_fixture.obj
vm1_fixture = self.useFixture(
VMFixture(connections=self.new_proj_connections1,
vn_obj=vn1_obj, vm_name=vm_name, project_name=projects[0]))
vm2_fixture = self.useFixture(
VMFixture(connections=self.new_proj_connections2,
vn_obj=vn2_obj, vm_name=vm_name, project_name=projects[1]))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
if not vm1_fixture.agent_label == vm2_fixture.agent_label:
self.logger.info("Correct label assigment")
else:
self.logger.error(
"The same label has been assigned for both the VMs")
return False
testfail = 0
for new_tenant in projects:
try:
kc.tenants.delete(tenant_dict[new_tenant])
except Exception as e:
self.logger.error(
'ClientException:This is because the project info still remains in the API server ==> Bug 744')
for name in create_user_set:
try:
kc.users.delete(user_dict[name])
except Exception as e:
self.logger.error(
'ClientException:This is because the project info still remains in the API server ==> Bug 744')
assert testfail > 0, "Placeholder till the Bug 744 is fixed "
return True
# end test_diff_proj_same_vn_vm_add_delete
# start subnet ping
# verifying that ping to subnet broadcast is respended by other vms in same subnet
# vm from other subnet should not respond
@preposttest_wrapper
def test_ping_on_broadcast_multicast(self):
''' Validate Ping on subnet broadcast,link local multucast,network broadcast .
'''
result = True
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
ping_count = '5'
vn1_vm1_name = self.res.vn1_vm1_name
vn1_vm2_name = self.res.vn1_vm2_name
vn1_vm3_name = self.res.vn1_vm3_name
vn1_vm4_name = self.res.vn1_vm4_name
vn1_fixture = self.res.get_vn1_fixture()
vm1_fixture = self.res.get_vn1_vm1_fixture()
vm2_fixture = self.res.get_vn1_vm2_fixture()
vm3_fixture = self.res.get_vn1_vm3_fixture()
vm4_fixture = self.res.get_vn1_vm4_fixture()
assert vm1_fixture.wait_till_vm_is_up()
assert vm2_fixture.wait_till_vm_is_up()
assert vm3_fixture.wait_till_vm_is_up()
assert vm4_fixture.wait_till_vm_is_up()
# Geting the VM ips
vm1_ip = vm1_fixture.vm_ip
vm2_ip = vm2_fixture.vm_ip
vm3_ip = vm3_fixture.vm_ip
vm4_ip = vm4_fixture.vm_ip
ip_list = [vm1_ip, vm2_ip, vm3_ip, vm4_ip]
bcast_ip = str(IPNetwork(vn1_subnets[0]).broadcast)
list_of_ip_to_ping = [bcast_ip, '224.0.0.1', '255.255.255.255']
# passing command to vms so that they respond to subnet broadcast
cmd = ['echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts']
vm_fixtures = [vm1_fixture, vm2_fixture, vm3_fixture, vm4_fixture]
for vm in vm_fixtures:
print 'Running cmd for %s' % vm.vm_name
for i in range(3):
try:
self.logger.info("Retry %s" % (i))
ret = vm.run_cmd_on_vm(cmds=cmd, as_sudo=True)
if not ret:
for vn in vm.vn_fq_names:
vm.ping_vm_from_host(vn)
raise Exception
except Exception as e:
time.sleep(5)
self.logger.exception("Got exception as %s" % (e))
else:
break
for dst_ip in list_of_ip_to_ping:
self.logger.info('pinging from %s to %s' % (vm1_ip, dst_ip))
# pinging from Vm1 to subnet broadcast
ping_output = vm1_fixture.ping_to_ip(
dst_ip, return_output=True, count=ping_count, other_opt='-b')
self.logger.info("ping output : \n %s" % (ping_output))
expected_result = ' 0% packet loss'
if not expected_result in ping_output:
self.logger.error('Expected 0% packet loss seen!')
self.logger.error('Ping result : %s' % (ping_output))
result = result and True
# getting count of ping response from each vm
string_count_dict = {}
string_count_dict = get_string_match_count(ip_list, ping_output)
self.logger.info("output %s" % (string_count_dict))
self.logger.info(
"There should be atleast 4 echo reply from each ip")
for k in ip_list:
# this is a workaround : ping utility exist as soon as it gets
# one response
# assert (string_count_dict[k] >= (int(ping_count) - 1))
if not string_count_dict[k] >= (int(ping_count) - 1):
self.logger.error('Seen %s reply instead of atleast %s' % (
(int(ping_count) - 1)))
result = result and False
if not result:
self.logger.error('There were errors. Verifying VM fixtures')
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
assert vm3_fixture.verify_on_setup()
assert vm4_fixture.verify_on_setup()
return True
# end subnet ping
@preposttest_wrapper
def test_ping_within_vn_two_vms_two_different_subnets(self):
''' Validate Ping between two VMs within a VN-2 vms in 2 different subnets.
Validate ping to subnet broadcast not responded back by other vm
Validate ping to network broadcast (all 255) is responded back by other vm
'''
vn1_name = 'vn030'
vn1_subnets = ['31.1.1.0/30', '31.1.2.0/30']
# vn1_subnets=['30.1.1.0/24']
vn1_vm1_name = 'vm1'
vn1_vm2_name = 'vm2'
vn1_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn1_name, inputs=self.inputs, subnets=vn1_subnets))
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=vn1_fixture.obj, vm_name=vn1_vm1_name))
vm2_fixture = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=vn1_fixture.obj, vm_name=vn1_vm2_name))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
assert vm2_fixture.ping_with_certainty(vm1_fixture.vm_ip)
# Geting the VM ips
vm1_ip = vm1_fixture.vm_ip
vm2_ip = vm2_fixture.vm_ip
ip_list = [vm1_ip, vm2_ip]
# gettig broadcast ip for vm1_ip
ip_broadcast = ''
ip_broadcast = get_subnet_broadcast_from_ip(vm1_ip, '30')
list_of_ip_to_ping = [ip_broadcast, '224.0.0.1', '255.255.255.255']
# passing command to vms so that they respond to subnet broadcast
cmd_list_to_pass_vm = [
'echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts']
vm1_fixture.run_cmd_on_vm(cmds=cmd_list_to_pass_vm, as_sudo=True)
vm2_fixture.run_cmd_on_vm(cmds=cmd_list_to_pass_vm, as_sudo=True)
for dst_ip in list_of_ip_to_ping:
print 'pinging from %s to %s' % (vm1_ip, dst_ip)
# pinging from Vm1 to subnet broadcast
ping_output = vm1_fixture.ping_to_ip(
dst_ip, return_output=True, other_opt='-b')
expected_result = ' 0% packet loss'
assert (expected_result in ping_output)
# getting count of ping response from each vm
string_count_dict = {}
string_count_dict = get_string_match_count(ip_list, ping_output)
print string_count_dict
if (dst_ip == ip_broadcast):
assert (string_count_dict[vm2_ip] == 0)
if (dst_ip == '224.0.0.1' or dst_ip == '255.255.255.255'):
assert (string_count_dict[vm2_ip] > 0)
return True
# end test_ping_within_vn
@preposttest_wrapper
def test_policy_to_deny(self):
''' Test to validate that with policy having rule to disable icmp within the VN, ping between VMs should fail
1. Pick 2 VN from resource pool which have one VM in each
2. Create policy with icmp deny rule
3. Associate policy to both VN
4. Ping from one VM to another. Ping should fail
Pass criteria: Step 2,3 and 4 should pass
'''
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
policy_name = 'policy1'
rules = [
{
'direction': '<>', 'simple_action': 'deny',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn1_name,
},
]
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.res.get_vn1_fixture()
vn1_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
assert vn1_fixture.verify_on_setup()
vn1_vm1_name = self.res.vn1_vm1_name
vn1_vm2_name = self.res.vn1_vm2_name
vm1_fixture = self.res.get_vn1_vm1_fixture()
vm2_fixture = self.res.get_vn1_vm2_fixture()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
if vm1_fixture.ping_to_ip(vm2_fixture.vm_ip):
self.logger.error('Ping from %s to %s passed,expected it to fail' % (
vm1_fixture.vm_name, vm2_fixture.vm_name))
self.logger.info('Doing verifications on the fixtures now..')
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
return True
# end test_policy_to_deny
@preposttest_wrapper
def test_process_restart_in_policy_between_vns(self):
''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
with process restarts
1. Pick 2 VN's from resource pool which has one VM each
2. Create policy with icmp allow rule between those VN's and bind it networks
3. Ping from one VM to another VM
4. Restart process 'vrouter' and 'control' on setup
5. Ping again between VM's after process restart
Pass criteria: Step 2,3,4 and 5 should pass
'''
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
vn2_name = self.res.vn2_name
vn2_subnets = self.res.vn2_subnets
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name, rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.res.get_vn1_fixture()
assert vn1_fixture.verify_on_setup()
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
vn2_fixture = self.res.get_vn2_fixture()
assert vn2_fixture.verify_on_setup()
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
vn1_vm1_name = self.res.vn1_vm1_name
vn2_vm1_name = self.res.vn2_vm1_name
vm1_fixture = self.res.get_vn1_vm1_fixture()
vm2_fixture = self.res.get_vn2_vm1_fixture()
assert vm1_fixture.wait_till_vm_is_up()
assert vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
for compute_ip in self.inputs.compute_ips:
self.inputs.restart_service('contrail-vrouter', [compute_ip])
for bgp_ip in self.inputs.bgp_ips:
self.inputs.restart_service('contrail-control', [bgp_ip])
self.logger.info('Sleeping for 10 seconds')
sleep(10)
vn1_vm2_name = self.res.vn1_vm2_name
vn2_vm2_name = self.res.vn2_vm2_name
vm3_fixture = self.res.get_vn1_vm2_fixture()
assert vm3_fixture.verify_on_setup()
vm4_fixture = self.res.get_vn2_vm2_fixture()
assert vm4_fixture.verify_on_setup()
vm3_fixture.wait_till_vm_is_up()
vm4_fixture.wait_till_vm_is_up()
assert vm3_fixture.ping_with_certainty(vm4_fixture.vm_ip)
return True
# end test_process_restart_in_policy_between_vns
@preposttest_wrapper
def test_policy_between_vns(self):
''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
'''
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
vn2_name = self.res.vn2_name
vn2_subnets = self.res.vn2_subnets
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name, rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.res.get_vn1_fixture()
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
assert vn1_fixture.verify_on_setup()
vn2_fixture = self.res.get_vn2_fixture()
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
assert vn2_fixture.verify_on_setup()
vm1_fixture = self.res.get_vn1_vm1_fixture()
assert vm1_fixture.verify_on_setup()
vm2_fixture = self.res.get_vn2_vm1_fixture()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
return True
# end test_policy_between_vns
@preposttest_wrapper
def test_tcp_transfer_from_fip_vm(self):
''' Validate data transfer through floating ip.
'''
fip_pool_name = 'testpool'
fvn_name = self.res.fip_vn_name
fvm_name = self.res.fvn_vm1_name
fvn_subnets = self.res.fip_vn_subnets
vn1_name = self.res.vn1_name
vm1_name = self.res.vn1_vm4_name
vn1_subnets = self.res.vn1_subnets
vn2_name = self.res.vn2_name
vm2_name = self.res.vn2_vm1_name
vn2_subnets = self.res.vn2_subnets
# policy between frontend and backend
policy_name = 'frontend-to-backend-policy'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
policy_fixture = self.useFixture(
PolicyFixture(policy_name=policy_name,
rules_list=rules, inputs=self.inputs,
connections=self.connections))
# frontend VN
vn1_fixture = self.res.get_vn1_fixture()
vn1_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
vn1_fixture.verify_on_setup()
# backend VN
vn2_fixture = self.res.get_vn2_fixture()
vn2_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy_fixture.policy_fq_name])
vn2_fixture.verify_on_setup()
# public VN
fvn_fixture = self.res.get_fvn_fixture()
fvn_fixture.verify_on_setup()
# frontend VM
vm1_fixture = self.res.get_vn1_vm4_fixture()
assert vm1_fixture.verify_on_setup()
# backend VM
vm2_fixture = self.res.get_vn2_vm1_fixture()
assert vm2_fixture.verify_on_setup()
# public VM
fvm_fixture = self.res.get_fvn_vm1_fixture()
assert fvm_fixture.verify_on_setup()
fip_fixture = self.useFixture(FloatingIPFixture(
project_name=self.inputs.project_name, inputs=self.inputs,
connections=self.connections, pool_name=fip_pool_name,
vn_id=fvn_fixture.vn_id))
assert fip_fixture.verify_on_setup()
fip_id = fip_fixture.create_and_assoc_fip(fvn_fixture.vn_id,
vm1_fixture.vm_id)
fip = vm1_fixture.vnc_lib_h.floating_ip_read(
id=fip_id).get_floating_ip_address()
assert fvm_fixture.ping_to_ip(fip)
result = fvm_fixture.tcp_data_transfer(vm1_fixture.local_ip, fip)
fip_fixture.disassoc_and_delete_fip(fip_id)
assert result
return result
# end test_tcp_transfer_from_fip_vm
@preposttest_wrapper
def test_multiple_vn_vm(self):
""" Validate creation of multiple VN with multiple subnet and VMs in it.
"""
result = True
# Multiple VN's with multiple subnets
vn_s = {'vn-1': '20.1.1.0/24', 'vn-2':
['10.1.1.0/24', '10.1.2.0/24']}
multi_vn_fixture = self.useFixture(MultipleVNFixture(
connections=self.connections, inputs=self.inputs, subnet_count=2,
vn_name_net=vn_s, project_name=self.inputs.project_name))
assert multi_vn_fixture.verify_on_setup()
vn_objs = multi_vn_fixture.get_all_fixture_obj()
multi_vm_fixture = self.useFixture(MultipleVMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vm_count_per_vn=2, vn_objs=vn_objs))
assert multi_vm_fixture.verify_on_setup()
return True
# end test_multiple_vn_vm
@preposttest_wrapper
def test_process_restart_with_multiple_vn_vm(self):
''' Test to validate that multiple VM creation and deletion passes.
'''
vm1_name = 'vm_mine'
vn_name = 'vn222'
vn_subnets = ['11.1.1.0/24']
vn_count_for_test = 32
if (len(self.inputs.compute_ips) == 1):
vn_count_for_test = 10
vm_fixture = self.useFixture(
create_multiple_vn_and_multiple_vm_fixture(
connections=self.connections,
vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name,
subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1))
compute_ip = []
for vmobj in vm_fixture.vm_obj_dict.values():
vm_host_ip = vmobj.vm_node_ip
if vm_host_ip not in compute_ip:
compute_ip.append(vm_host_ip)
self.inputs.restart_service('contrail-vrouter', compute_ip)
sleep(10)
for vmobj in vm_fixture.vm_obj_dict.values():
assert vmobj.verify_on_setup()
return True
@preposttest_wrapper
def test_control_node_switchover(self):
''' Stop the control node and check peering with agent fallback to other control node.
1. Pick one VN from respource pool which has 2 VM's in it
2. Verify ping between VM's
3. Find active control node in cluster by agent inspect
4. Stop control service on active control node
5. Verify agents are connected to new active control-node using xmpp connections
6. Bring back control service on previous active node
7. Verify ping between VM's again after bringing up control serveice
Pass criteria: Step 2,5 and 7 should pass
'''
if len(set(self.inputs.bgp_ips)) < 2:
self.logger.info(
"Skiping Test. At least 2 control node required to run the test")
raise self.skipTest(
"Skiping Test. At least 2 control node required to run the test")
result = True
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
vn1_vm1_name = self.res.vn1_vm1_name
vn1_vm2_name = self.res.vn1_vm2_name
vn1_fixture = self.res.get_vn1_fixture()
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.res.get_vn1_vm1_fixture()
assert vm1_fixture.verify_on_setup()
vm2_fixture = self.res.get_vn1_vm2_fixture()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
# Figuring the active control node
active_controller = None
self.agent_inspect = self.connections.agent_inspect
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
active_controller = entry['controller_ip']
active_controller_host_ip = self.inputs.host_data[
active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, active_controller_host_ip))
# Stop on Active node
self.logger.info('Stoping the Control service in %s' %
(active_controller_host_ip))
self.inputs.stop_service(
'contrail-control', [active_controller_host_ip])
sleep(5)
# Check the control node shifted to other control node
new_active_controller = None
new_active_controller_state = None
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
new_active_controller = entry['controller_ip']
new_active_controller_state = entry['state']
new_active_controller_host_ip = self.inputs.host_data[
new_active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, new_active_controller_host_ip))
if new_active_controller_host_ip == active_controller_host_ip:
self.logger.error(
'Control node switchover fail. Old Active controlnode was %s and new active control node is %s' %
(active_controller_host_ip, new_active_controller_host_ip))
result = False
if new_active_controller_state != 'Established':
self.logger.error(
'Agent does not have Established XMPP connection with Active control node')
result = result and False
# Start the control node service again
self.logger.info('Starting the Control service in %s' %
(active_controller_host_ip))
self.inputs.start_service(
'contrail-control', [active_controller_host_ip])
# Check the BGP peering status from the currently active control node
sleep(5)
cn_bgp_entry = self.cn_inspect[
new_active_controller_host_ip].get_cn_bgp_neigh_entry()
for entry in cn_bgp_entry:
if entry['state'] != 'Established':
result = result and False
self.logger.error(
'With Peer %s peering is not Established. Current State %s ' %
(entry['peer'], entry['state']))
# Check the ping
self.logger.info('Checking the ping between the VM again')
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error('Switchover of control node failed')
assert result
return True
# end test_control_node_switchover
@preposttest_wrapper
def test_agent_cleanup_with_control_node_stop(self):
''' Stop all the control node and verify the cleanup process in agent
'''
if len(set(self.inputs.bgp_ips)) < 2:
raise self.skipTest(
"Skiping Test. At least 2 control node required to run the test")
result = True
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
vn1_vm1_name = self.res.vn1_vm1_name
vn1_vm2_name = self.res.vn1_vm2_name
vn1_fixture = self.res.get_vn1_fixture()
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.res.get_vn1_vm1_fixture()
assert vm1_fixture.verify_on_setup()
vm2_fixture = self.res.get_vn1_vm2_fixture()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
# Collecting all the control node details
controller_list = []
self.agent_inspect = self.connections.agent_inspect
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
controller_list.append(entry['controller_ip'])
list_of_vm = inspect_h.get_vna_vm_list()
# Stop all the control node
for entry in controller_list:
self.logger.info('Stoping the Control service in %s' % (entry))
self.inputs.stop_service('contrail-control', [entry])
sleep(5)
# It seems that cleanup happens after 2 mins
sleep(120)
# Verify VM entry is removed from the agent introspect
vm_id_list = inspect_h.get_vna_vm_list()
if vm1_fixture.vm_id in vm_id_list:
result = result and False
self.logger.error(
'VM %s is still present in Agent Introspect.Cleanup not working when all control node shut' %
(vm1_fixture.vm_name))
if vm2_fixture.vm_id in vm_id_list:
result = result and False
self.logger.error(
'VM %s is still present in Agent Introspect.Cleanup not working when all control node shut' %
(vm2_fixture.vm_name))
# TODO Verify the IF-Map entry
# Start all the control node
for entry in controller_list:
self.logger.info('Starting the Control service in %s' % (entry))
self.inputs.start_service('contrail-control', [entry])
sleep(10)
self.logger.info('Checking the VM came up properly or not')
assert vm2_fixture.verify_on_setup()
assert vm1_fixture.verify_on_setup()
# Check everything came up fine
vm_id_list = inspect_h.get_vna_vm_list()
if vm1_fixture.vm_id not in vm_id_list or vm2_fixture.vm_id not in vm_id_list:
result = result and False
self.logger.error(
'After starting the service all the VM entry did not came up properly')
if not result:
self.logger.error(
'Test to verify cleanup of agent after control nodes stop Failed')
assert result
return True
# end test_agent_cleanup_with_control_node_stop
@preposttest_wrapper
def test_bring_up_vm_with_control_node_down(self):
''' Create VM when there is not active control node. Verify VM comes up fine when all control nodes are back
'''
self.agent_inspect = self.connections.agent_inspect
if len(set(self.inputs.bgp_ips)) < 2:
raise self.skipTest(
"Skiping Test. At least 2 control node required to run the test")
result = True
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
# Collecting all the control node details
controller_list = []
for entry in self.inputs.compute_ips:
inspect_h = self.agent_inspect[entry]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
controller_list.append(entry['controller_ip'])
controller_list = set(controller_list)
# Stop all the control node
for entry in controller_list:
self.logger.info('Stoping the Control service in %s' % (entry))
self.inputs.stop_service('contrail-control', [entry])
sleep(10)
vn1_vm1_name = self.res.vn1_vm1_name
vn1_vm2_name = self.res.vn1_vm2_name
vn1_fixture = self.res.get_vn1_fixture()
vm1_fixture = self.res.get_vn1_vm1_fixture()
vm2_fixture = self.res.get_vn1_vm2_fixture()
# Check all the VM got IP when control node is down
# Verify VM in Agent. This is more required to get TAP iterface and Metadata IP.
# TODO Need to check the verify_vm_in_agent chance to get passed when
# Control node is down with new implmenetation
vm1_fixture.verify_vm_in_agent()
vm2_fixture.verify_vm_in_agent()
vm_ip1 = vm1_fixture.get_vm_ip_from_vm()
vm_ip2 = vm2_fixture.get_vm_ip_from_vm()
if vm_ip1 is None or vm_ip2 is None:
result = result and False
self.logger.error(
'VM does not get an IP when all control nodes are down')
else:
self.logger.info(
'Both VM got required IP when control nodes are down')
# Start all the control node
for entry in controller_list:
self.logger.info('Starting the Control service in %s' % (entry))
self.inputs.start_service('contrail-control', [entry])
sleep(10)
self.logger.info('Checking the VM came up properly or not')
assert vn1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
assert vm1_fixture.verify_on_setup()
# Check ping between VM
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error(
'Test to verify cleanup of agent after control nodes stop Failed')
assert result
return True
# end test_bring_up_vm_with_control_node_down
# @preposttest_wrapper
# def test_vn_add_delete_no_subnet(self):
# '''Test to validate VN creation even when no subnet is provided. Commented till 811 is fixed.
# '''
# vn_obj=self.useFixture( VNFixture(project_name= self.inputs.project_name, connections= self.connections,
# vn_name='vn007', inputs= self.inputs ))
# assert vn_obj.verify_on_setup()
# assert vn_obj
# return True
# end test_vn_add_delete_no_subnet
# @preposttest_wrapper
# def test_vn_reboot_nodes(self):
# ''' Test to validate persistence of VN across compute/control/cfgm node reboots Commented till 129 is fixed.
# '''
# vn_obj=self.useFixture( VNFixture(project_name= self.inputs.project_name, connections= self.connections,
# vn_name='vn111', inputs= self.inputs, subnets=['100.100.100.0/24']))
# assert vn_obj.verify_on_setup()
# reboot the compute node now and verify the VN persistence
# for compute_ip in self.inputs.compute_ips:
# self.inputs.reboot(compute_ip)
# sleep(120)
# assert vn_obj.verify_on_setup()
# reboot the control nodes now and verify the VN persistence
# for bgp_ip in self.inputs.bgp_ips:
# self.inputs.reboot(bgp_ip)
# sleep(120)
# assert vn_obj.verify_on_setup()
# reboot the cfgm node now and verify the VN persistence
# self.inputs.reboot(self.inputs.cfgm_ip)
# sleep(120)
# assert vn_obj.verify_on_setup()
# assert vn_obj
# return True
# end test_vn_reboot_nodes
# @preposttest_wrapper
# def vn_subnet_tests(self):
# """ Validate various type of subnets associated to VNs.Commented till 762, 801, 802, 803 and 805 are fixed.
# """
#
# result = True
# vn_s = {'vn-1' : '0.0.0.0/0', 'vn-2' : ['10.1.1.0/24', '10.1.1.0/24'], 'vn-3' : '169.254.1.1/24', 'vn-4' : '251.2.2.1/24', 'vn-5' : '127.0.0.1/32', 'vn-6' : '8.8.8.8/32', 'vn-7' : '9.9.9.9/31','vn-8' : ['11.11.11.0/30', '11.11.11.11/29']}
# multi_vn_fixture = self.useFixture(MultipleVNFixture(
# connections=self.connections, inputs=self.inputs, subnet_count=2,
# vn_name_net=vn_s, project_name=self.inputs.project_name))
#
# vn_objs = multi_vn_fixture.get_all_fixture_obj()
# assert not multi_vn_fixture.verify_on_setup()
#
# return True
# end test_subnets_vn
@preposttest_wrapper
def test_uve(self):
'''Test to validate collector uve.
'''
analytics_obj = AnalyticsVerification(
inputs=self.inputs, connections=self.connections)
assert analytics_obj.verify_collector_uve()
return True
# end test_uve
@preposttest_wrapper
def test_multiple_floating_ip_for_single_vm(self):
'''Test to validate floating-ip Assignment to a VM. It creates a VM, assigns a FIP to it and pings to a IP in the FIP VN.
'''
result = True
fip_pool_name = 'some-other-pool1'
fvn_name = self.res.fvn_name
fvm_name = self.res.fvn_vm1_name
fvn_subnets = self.res.fip_vn_subnets
fip_pool_name1 = 'some-pool2'
fvn_name1 = 'fvnn200'
fvm_name1 = 'vm200'
fvn_subnets1 = ['150.1.1.0/24']
vm1_name = self.res.vn1_vm1_name
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
# VN Fixture
fvn_fixture = self.res.get_fvn_fixture()
assert fvn_fixture.verify_on_setup()
fvn_fixture1 = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=fvn_name1, inputs=self.inputs, subnets=fvn_subnets1))
assert fvn_fixture1.verify_on_setup()
vn1_fixture = self.res.get_vn1_fixture()
assert vn1_fixture.verify_on_setup()
# VM Fixture
vm1_fixture = self.res.get_vn1_vm1_fixture()
assert vm1_fixture.verify_on_setup()
fvm_fixture = self.res.get_fvn_vm1_fixture()
assert fvm_fixture.verify_on_setup()
fvm_fixture1 = self.useFixture(
VMFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_obj=fvn_fixture1.obj, vm_name=fvm_name1))
assert fvm_fixture1.verify_on_setup()
# Floating Ip Fixture
fip_fixture = self.useFixture(
FloatingIPFixture(
project_name=self.inputs.project_name, inputs=self.inputs,
connections=self.connections, pool_name=fip_pool_name, vn_id=fvn_fixture.vn_id))
assert fip_fixture.verify_on_setup()
fip_fixture1 = self.useFixture(
FloatingIPFixture(
project_name=self.inputs.project_name, inputs=self.inputs,
connections=self.connections, pool_name=fip_pool_name1, vn_id=fvn_fixture1.vn_id))
assert fip_fixture1.verify_on_setup()
fip_id = fip_fixture.create_and_assoc_fip(
fvn_fixture.vn_id, vm1_fixture.vm_id)
assert fip_fixture.verify_fip(fip_id, vm1_fixture, fvn_fixture)
fip_id1 = fip_fixture.create_and_assoc_fip(
fvn_fixture1.vn_id, vm1_fixture.vm_id)
assert fip_fixture1.verify_fip(fip_id1, vm1_fixture, fvn_fixture1)
# Check the communication from borrower VM to all 2 networks
if not vm1_fixture.ping_with_certainty(fvm_fixture.vm_ip):
result = result and False
if not vm1_fixture.ping_with_certainty(fvm_fixture1.vm_ip):
result = result and False
# Check the floating IP provider VNs should commmunicate with each
# other
self.logger.info(
'Ping should fail here. %s and %s should not able to communicate with each oether' %
(fvm_name1, fvm_name))
if fvm_fixture1.ping_to_ip(fvm_fixture.vm_ip):
result = result and False
# Check after disscocition of floating ip communication should and only
# should stop from that network
fip_fixture.disassoc_and_delete_fip(fip_id)
self.logger.info(
'Ping should fail here as floating IP pool is already released')
if vm1_fixture.ping_to_ip(fvm_fixture.vm_ip):
result = result and False
if not vm1_fixture.ping_with_certainty(fvm_fixture1.vm_ip):
result = result and False
fip_fixture1.disassoc_and_delete_fip(fip_id1)
if not result:
self.logger.error(
'Test to check multiple floating ip for single VM has failed')
assert result
return True
# end test_floating_ip
@preposttest_wrapper
def test_ipam_add_delete(self):
'''Test to validate IPAM creation, association of a VN and creating VMs in the VN. Ping b/w the VMs should be successful.
1. Create non-default IPAM
2. Create VN with user-created IPAM and verify
3. Launch 2 VM's within VN which is using non-default IPAM
4. Ping between these 2 VM's
Pass criteria: Step 1,2,3 and 4 should pass
'''
project_obj = self.useFixture(
ProjectFixture(vnc_lib_h=self.vnc_lib, connections=self.connections))
ipam_obj = self.useFixture(
IPAMFixture(project_obj=project_obj, name='my-ipam'))
assert ipam_obj
vn_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name='vn22', inputs=self.inputs, subnets=['22.1.1.0/24'], ipam_fq_name=ipam_obj.fq_name))
assert vn_fixture.verify_on_setup()
vm1_fixture = self.useFixture(VMFixture(connections=self.connections,
vn_obj=vn_fixture.obj, vm_name='vm1'))
vm2_fixture = self.useFixture(VMFixture(connections=self.connections,
vn_obj=vn_fixture.obj, vm_name='vm2'))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
return True
# end test_ipam_add_delete
@preposttest_wrapper
def test_remove_policy_with_ref(self):
''' This tests the following scenarios.
1. Test to validate that policy removal will fail when it referenced with VN.
2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs.
3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs.
'''
# vn1_name='vn4'
# vn1_subnets=['10.1.1.0/24']
vn1_name = self.res.vn1_name
vn1_subnets = self.res.vn1_subnets
policy_name = 'policy1'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn1_name,
},
]
project_obj = self.useFixture(
ProjectFixture(vnc_lib_h=self.vnc_lib, connections=self.connections))
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.res.get_vn1_fixture()
#policy_fq_names = [self.quantum_fixture.get_policy_fq_name(policy_fixture.policy_obj)]
#vn1_fixture.bind_policies( policy_fq_names,vn1_fixture.vn_id)
vn1_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn1_fixture.vn_id)
assert vn1_fixture.verify_on_setup()
# try to remove policy which was referenced with VN.
policy_removal = True
pol_list = self.quantum_fixture.list_policys()
pol_id = None
for policy in pol_list['policys']:
if policy['name'] == policy_name:
pol_id = policy['id']
policy_removal = self.quantum_fixture.delete_policy(
policy['id'])
# In failure screnario clearing the Policy from the VN for
# further test case
if policy_removal:
vn1_fixture.unbind_policies(
vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
break
self.assertFalse(
policy_removal, 'Policy removal succeed as not expected since policy is referenced with VN')
assert vn1_fixture.verify_on_setup()
policy_fixture.verify_policy_in_api_server()
if vn1_fixture.policy_objs:
policy_fq_names = [
self.quantum_fixture.get_policy_fq_name(x) for x in vn1_fixture.policy_objs]
# unbind the policy from VN
vn1_fixture.unbind_policies(
vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
# Verify policy ref is removed from VN
vn_pol_found = vn1_fixture.verify_vn_policy_not_in_api_server(
policy_name)
self.assertFalse(
vn_pol_found, 'policy not removed from VN after policy unbind from VN')
# remove the policy using quantum API
policy_removal = self.quantum_fixture.delete_policy(pol_id)
# TODO This code is not working because of bug#1056. Need to test once bug is Fixed.
#pol_found = policy_fixture.verify_policy_not_in_api_server()
#self.assertFalse(pol_found,'policy not removed from API server when policy removed from Quantum')
return True
@preposttest_wrapper
def test_verify_generator_collector_connections(self):
'''
Description: Verify generator:module connections to collector
1.Verify all generators connected to collector - fails otherwise
2.Get the xmpp peers in vrouter uve and get the active xmpp peer out of it
3.Verify from agent introspect that active xmpp matches with step 2 - fails otherwise
4.Get bgp peers from bgp-peer uve and verify from control node introspect that that matches - fails otherwise
Maintainer: [email protected]
'''
self.logger.info("START ...")
# check collector-generator connections through uves.
assert self.analytics_obj.verify_collector_uve()
# Verify vrouter uve active xmpp connections
assert self.analytics_obj.verify_active_xmpp_peer_in_vrouter_uve()
# Verify vrouter uve for xmpp connections
assert self.analytics_obj.verify_vrouter_xmpp_connections()
# count of xmpp peer and bgp peer verification in bgp-router uve
assert self.analytics_obj.verify_bgp_router_uve_xmpp_and_bgp_count()
self.logger.info("END...")
return True
# end test_remove_policy_with_ref
# end TestSanityFixture
| 43.623684 | 247 | 0.632603 | 49,296 | 0.991253 | 0 | 0 | 45,879 | 0.922543 | 0 | 0 | 14,347 | 0.288492 |
dfff599aef2fa931d79fa84797d0acce9a216f5a | 5,407 | py | Python | murder.py | lgrn/murder | 1e4582cc5fa8c31c35e70997daebd111f1badf4d | [
"Unlicense"
] | null | null | null | murder.py | lgrn/murder | 1e4582cc5fa8c31c35e70997daebd111f1badf4d | [
"Unlicense"
] | null | null | null | murder.py | lgrn/murder | 1e4582cc5fa8c31c35e70997daebd111f1badf4d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# murder 0.2.3
import sys
if sys.version_info[0] != (3):
sys.stdout.write("Sorry this software requires Python 3. This is Python {}.\n".format(sys.version_info[0]))
sys.exit(1)
import time
import requests
import json
import re
# Your "filename" file should contain one word per row. Don't worry about
# newlines and whitespace, it will be stripped. Any names containing anything
# but A-Z/a-z, underscores and numbers will be skipped and not queried.
filename = "input.txt"
try:
with open(filename) as f:
lines = [line.strip().strip('\n').lower() for line in open(filename)]
lines = list(set(lines))
except FileNotFoundError:
print("For this script to work, {} needs to exist in the working directory. Exiting.".format(filename))
raise SystemExit
except UnicodeDecodeError:
print("Oops! {} isn't UTF-8. Convert it, for example by running iconv. Exiting.".format(filename))
raise SystemExit
unavailable_filename = "unavailable.txt"
try:
with open(unavailable_filename) as f:
unavailable_lines = [line.strip().strip('\n') for line in open(unavailable_filename)]
except FileNotFoundError:
print("\n{} was not found. That's fine, probably there wasn't a previous run.".format(unavailable_filename))
available_filename = "output.txt"
try:
with open(available_filename) as f:
available_lines = [line.strip().strip('\n') for line in open(available_filename)]
except FileNotFoundError:
print("\n{} was not found. That's fine, probably there wasn't a previous run.".format(available_filename))
pretty_amount = "{:,}".format(len(lines))
print("\n[>>>>>>>>>] Imported {} words from {}.".format(pretty_amount,filename))
# This regex pattern validates usernames.
pattern = re.compile("^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$")
sys.stdout.flush()
# This function will check if a name is available:
def is_available(username):
url = ("https://twitter.com/users/username_available"
"?scribeContext%5Bcomponent%5D=form&scribeContext%5B"
"element%5D=screen_name&username=" + str(username.lower()) +
"&value=" + str(username.lower()))
response = requests.get(url)
try:
data = json.loads(response.text)
reason = data.get("reason")
except UnboundLocalError:
print('[ JSON! ] Twitter refused to give us a decent response for this request: ')
print(url)
print('[ JSON! ] Assuming its unavailable and attempting to move on.')
reason = "unavailable"
pass
except ValueError:
print('[ JSON! ] UH-OH! You\'re probably being rate limited :( ) ')
print('[ JSON! ] You should stop for now and/or adjust your sleep_timer ) ')
print('[ JSON! ] ValueError for this request: ')
print(url)
raise SystemExit
if reason == "available":
return True
else:
return False
def write_available(i):
f = open("output.txt", "a")
f.write(i)
f.close()
def write_unavailable(i):
f = open("unavailable.txt", "a")
f.write(i)
f.close()
failed_tries = 0
ok_tries = 0
# Let's clean up our "lines" array first so it only contains stuff we
# actually want to throw at the API.
clean_lines = []
for i in lines:
if pattern.match(i) and len(str(i)) == 5:
clean_lines.append(i)
# NOTE: "Compliant" below is decided by the for loop above.
pretty_amount = "{:,}".format(len(clean_lines))
print("[>>>>>>>>>] Cleaned up import to only include compliant words. We now have {} words.".format(pretty_amount) + "\n")
# Clean the array further by removing already checked names (failed and succeeded).
try:
for i in unavailable_lines:
if i in clean_lines:
clean_lines.remove(i)
print("[ CLEANUP ] '{}' will not be checked, we already know it's taken.".format(i.lower()))
except NameError:
# If there wasn't a previous run, this won't exist. That's fine.
pass
try:
for i in available_lines:
if i in clean_lines:
clean_lines.remove(i)
print("[ CLEANUP ] '{}' will not be checked, we already know it's available.".format(i.lower()))
except NameError:
# If there wasn't a previous run, this won't exist. That's fine.
pass
try:
if unavailable_lines or available_lines:
pretty_amount = "{:,}".format(len(clean_lines))
print("[>>>>>>>>>] Done cross-checking txt files from previous runs, we now have {} words.".format(pretty_amount) + "\n")
except NameError:
pass
# NOTE: time.sleep waits because twitter has a rate limit of 150/15min (?) <- bad guess
print("[>>>>>>>>>] Making API calls now." + "\n")
sleep_seconds = 10
for i in clean_lines:
sys.stdout.flush()
if is_available(i):
print("[AVAILABLE] '{}'! Saving to output.txt, stalling for next API call.".format(i.lower()))
ok_tries += 1
write_available(i.lower() + '\n')
sys.stdout.flush()
time.sleep(sleep_seconds)
else:
print("[ TAKEN ] '{}'. Too bad. Stalling for next API call.".format(i.lower()))
failed_tries += 1
#delete_row(i)
write_unavailable(i.lower() + '\n')
time.sleep(sleep_seconds)
total_tries = failed_tries + ok_tries
print("Script finished. Twitter was hit with "
"{} queries. We found {} available names, saved to output.txt".format(total_tries,ok_tries)) | 32.769697 | 129 | 0.653227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,448 | 0.452746 |
dfff777451f2b530e80b5323a7284116b77ea627 | 703 | py | Python | cfn_review_bot/merge.py | biochimia/cfn-review-bot | 1c8a84b51f7c398c21725cb888a9ab694ddfbb56 | [
"Apache-2.0"
] | 1 | 2019-04-04T12:09:16.000Z | 2019-04-04T12:09:16.000Z | cfn_review_bot/merge.py | biochimia/cfn-review-bot | 1c8a84b51f7c398c21725cb888a9ab694ddfbb56 | [
"Apache-2.0"
] | null | null | null | cfn_review_bot/merge.py | biochimia/cfn-review-bot | 1c8a84b51f7c398c21725cb888a9ab694ddfbb56 | [
"Apache-2.0"
] | null | null | null | def _deep_merge_mapping(old, new):
merged = {}
merged.update(old)
for k, nv in new.items():
try:
ov = merged[k]
except KeyError:
merged[k] = nv
continue
merged[k] = deep_merge(ov, nv)
return merged
def _deep_merge_sequence(old, new):
return old + new
def deep_merge(old, new):
if (isinstance(old, dict)
and isinstance(new, dict)):
return _deep_merge_mapping(old, new)
if (isinstance(old, list)
and isinstance(new, list)):
return _deep_merge_sequence(old, new)
if old == new:
return old
raise Exception('Unable to merge {} with {}'.format(old, new))
| 20.676471 | 66 | 0.571835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.039829 |
5f0133420725ce23664fd5aac6eace5b4be90d9b | 324 | py | Python | 02_module/package_test/module1/my_sum.py | zzz0072/Python_Exercises | 9918aa8197a77ef237e5e60306c7785eca5cb1d3 | [
"BSD-2-Clause"
] | null | null | null | 02_module/package_test/module1/my_sum.py | zzz0072/Python_Exercises | 9918aa8197a77ef237e5e60306c7785eca5cb1d3 | [
"BSD-2-Clause"
] | null | null | null | 02_module/package_test/module1/my_sum.py | zzz0072/Python_Exercises | 9918aa8197a77ef237e5e60306c7785eca5cb1d3 | [
"BSD-2-Clause"
] | null | null | null | #/usr/bin/env python
from ..module2 import my_print
def my_sum(x, y):
result = x + y
my_print.my_print(result)
# To run method alone
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print("%s str1 str2" % sys.argv[0])
raise SystemExit(1)
my_sum(sys.argv[1], sys.argv[2])
| 18 | 43 | 0.608025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.200617 |
5f018a5353d8adb9d68568f7a0b49dde04ed193e | 75 | py | Python | storch/models/__init__.py | STomoya/storch | 47754eecd5fb5404dd345f06fb0f8d3270a9e5b9 | [
"MIT"
] | null | null | null | storch/models/__init__.py | STomoya/storch | 47754eecd5fb5404dd345f06fb0f8d3270a9e5b9 | [
"MIT"
] | null | null | null | storch/models/__init__.py | STomoya/storch | 47754eecd5fb5404dd345f06fb0f8d3270a9e5b9 | [
"MIT"
] | null | null | null |
from multiscale import MultiScale
from patchgan import PatchDiscriminator
| 18.75 | 39 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f033434eab634732c27a8827763d152ae9391a1 | 1,054 | py | Python | repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py | AloisMahdal/leapp-repository | 9ac2b8005750e8e56e5fde61e8762044d0f16257 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py | AloisMahdal/leapp-repository | 9ac2b8005750e8e56e5fde61e8762044d0f16257 | [
"Apache-2.0"
] | 9 | 2020-01-07T12:48:59.000Z | 2020-01-16T10:44:34.000Z | repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py | AloisMahdal/leapp-repository | 9ac2b8005750e8e56e5fde61e8762044d0f16257 | [
"Apache-2.0"
] | null | null | null | from os import symlink, path, access, X_OK
import pytest
from leapp.libraries.actor import workaround
from leapp.libraries.common.utils import makedirs
def fake_symlink(basedir):
def impl(source, target):
source_path = str(basedir.join(*source.lstrip('/').split('/')))
makedirs(source_path)
symlink(source_path, target)
return impl
def test_apply_python3_workaround(monkeypatch, tmpdir):
leapp_home = tmpdir.mkdir('tmp_leapp_py3')
monkeypatch.setattr(workaround.os, 'symlink', fake_symlink(tmpdir.mkdir('lib')))
monkeypatch.setattr(workaround, 'LEAPP_HOME', str(leapp_home))
# Ensure double invocation doesn't cause a problem
workaround.apply_python3_workaround()
workaround.apply_python3_workaround()
# Ensure creation of all required elements
assert path.islink(str(leapp_home.join('leapp')))
assert path.isfile(str(leapp_home.join('leapp3')))
assert access(str(leapp_home.join('leapp3')), X_OK)
assert str(leapp_home) in leapp_home.join('leapp3').read_text('utf-8')
| 32.9375 | 84 | 0.734345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.167932 |
5f03df5d79ef568c79e0a3f2f05ab7cc845b03d5 | 707 | py | Python | codility/equi_leader.py | py-in-the-sky/challenges | 4a36095de8cb56b4f9f83c241eafb13dfbeb4065 | [
"MIT"
] | null | null | null | codility/equi_leader.py | py-in-the-sky/challenges | 4a36095de8cb56b4f9f83c241eafb13dfbeb4065 | [
"MIT"
] | null | null | null | codility/equi_leader.py | py-in-the-sky/challenges | 4a36095de8cb56b4f9f83c241eafb13dfbeb4065 | [
"MIT"
] | null | null | null | """
https://codility.com/programmers/task/equi_leader/
"""
from collections import Counter, defaultdict
def solution(A):
def _is_equi_leader(i):
prefix_count_top = running_counts[top]
suffix_count_top = total_counts[top] - prefix_count_top
return (prefix_count_top * 2 > i + 1) and (suffix_count_top * 2 > len(A) - i - 1)
total_counts = Counter(A)
running_counts = defaultdict(int)
top = A[0]
result = 0
for i in xrange(len(A) - 1):
n = A[i]
running_counts[n] += 1
top = top if running_counts[top] >= running_counts[n] else n
if _is_equi_leader(i):
result += 1
return result
| 24.37931 | 89 | 0.595474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.082037 |
5f049724d72ac2de8c5b11138f1e4b59bdb512ad | 1,744 | py | Python | src/harness/cu_pass/dpa_calculator/helpers/list_distributor/list_distributor.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | null | null | null | src/harness/cu_pass/dpa_calculator/helpers/list_distributor/list_distributor.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | null | null | null | src/harness/cu_pass/dpa_calculator/helpers/list_distributor/list_distributor.py | NSF-Swift/Spectrum-Access-System | 02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Any, List, TypeVar
from cu_pass.dpa_calculator.helpers.list_distributor.fractional_distribution.fractional_distribution import \
FractionalDistribution
RETURN_TYPE = TypeVar('RETURN_TYPE')
class ListDistributor(ABC):
def __init__(self, items_to_distribute: List[Any]):
self._items = items_to_distribute
def distribute(self) -> List[List[RETURN_TYPE]]:
return [self._modify_group(distribution=distribution, group=group)
for distribution, group in zip(self._distributions, self._groups)]
@abstractmethod
def _modify_group(self, distribution: FractionalDistribution, group: List[Any]) -> List[RETURN_TYPE]:
return group
@property
def _groups(self) -> List[List[Any]]:
groups = []
for distribution in self._distributions:
next_index = sum(len(group) for group in groups)
remaining_items = self._items[next_index:]
items_in_group = self._get_items_in_distribution(distribution=distribution, items=remaining_items)
groups.append(items_in_group)
return groups
@property
@abstractmethod
def _distributions(self) -> List[FractionalDistribution]:
raise NotImplementedError
def _get_items_in_distribution(self, distribution: FractionalDistribution, items: List[Any]) -> List[Any]:
number_at_this_distribution = round(self._total_number_of_items * distribution.fraction)
is_last_distribution = distribution == self._distributions[-1]
return items if is_last_distribution else items[:number_at_this_distribution]
@property
def _total_number_of_items(self) -> int:
return len(self._items)
| 38.755556 | 110 | 0.725344 | 1,491 | 0.854931 | 0 | 0 | 764 | 0.438073 | 0 | 0 | 13 | 0.007454 |
5f05166068ffa658a5a11fcc559025940e70a85b | 1,419 | py | Python | downloader.py | Luonic/tf-cnn-lstm-ocr-captcha | 9ac6202d546093d95083a32c71cdccb800dfdea2 | [
"MIT"
] | 10 | 2017-08-08T22:57:32.000Z | 2020-04-07T21:50:20.000Z | downloader.py | Luonic/tf-cnn-lstm-ocr-captcha | 9ac6202d546093d95083a32c71cdccb800dfdea2 | [
"MIT"
] | null | null | null | downloader.py | Luonic/tf-cnn-lstm-ocr-captcha | 9ac6202d546093d95083a32c71cdccb800dfdea2 | [
"MIT"
] | 5 | 2018-07-17T16:47:21.000Z | 2021-11-06T15:03:56.000Z | import urllib
import requests
import multiprocessing.pool
from multiprocessing import Pool
import uuid
import os
images_dir = os.path.join("data", "train")
small_letters = map(chr, range(ord('a'), ord('f')+1))
digits = map(chr, range(ord('0'), ord('9')+1))
base_16 = digits + small_letters
MAX_THREADS = 100
def captcha(code):
try:
r = requests.get("https://local.thedrhax.pw/rucaptcha/?" + code)
filename = code + "_" + str(uuid.uuid1().time) + ".png"
path = os.path.join(images_dir, filename)
with open(path, "wb") as png:
png.write(bytes(r.content))
print("Downloaded " + str(code))
except Exception as e:
print(str(e))
if __name__ == "__main__":
labels = []
for i in range(0, len(base_16)):
for j in range(0, len(base_16)):
for m in range(0, len(base_16)):
for n in range(0, len(base_16)):
try:
label = base_16[i] + base_16[j] + base_16[m] + base_16[n]
labels.append(label)
# urllib.urlretrieve("https://local.thedrhax.pw/rucaptcha/?" + str(label), str(label) + ".png")
except Exception as e:
print(str(e))
print(labels)
p = Pool(MAX_THREADS)
while 1:
p.map(captcha, labels)
print("Finished all downloads") | 30.191489 | 143 | 0.547569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.171247 |
5f05920c4f06c4b47bf5845e7dd08b41ac585c06 | 7,679 | py | Python | code/Models.py | IGLICT/CMIC-Retrieval | d2f452517360f127d0a8175d55ba9f9491c152c2 | [
"MIT"
] | 29 | 2021-10-01T12:05:54.000Z | 2022-03-16T02:40:19.000Z | code/Models.py | IGLICT/CMIC-Retrieval | d2f452517360f127d0a8175d55ba9f9491c152c2 | [
"MIT"
] | 5 | 2021-12-20T12:25:58.000Z | 2022-03-10T19:08:32.000Z | code/Models.py | IGLICT/CMIC-Retrieval | d2f452517360f127d0a8175d55ba9f9491c152c2 | [
"MIT"
] | 1 | 2022-01-04T05:52:49.000Z | 2022-01-04T05:52:49.000Z | import jittor as jt
from jittor import nn, models
if jt.has_cuda:
jt.flags.use_cuda = 1 # jt.flags.use_cuda
class QueryEncoder(nn.Module):
def __init__(self, out_dim=128):
super(QueryEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet50(pretrained=False)
self.resnet.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, input):
embeddings = self.resnet(input)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class RenderingEncoder(nn.Module):
def __init__(self, out_dim=128):
super(RenderingEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet18(pretrained=False)
self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, inputs):
embeddings = self.resnet(inputs)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class Attention(nn.Module):
'''
Revised from pytorch version: <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>
'''
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def execute(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == "general":
query = query.view(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.view(batch_size, output_len, dimensions)
# TODO: Include mask on PADDING_INDEX?
# (batch_size, output_len, dimensions) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, query_len)
# attention_scores = nn.bmm(query, context.transpose(1, 2).contiguous())
attention_scores = nn.bmm(query, context.transpose(0, 2, 1))
# Compute weights across every context sequence
attention_scores = attention_scores.view(batch_size * output_len, query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len, query_len)
# (batch_size, output_len, query_len) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, dimensions)
mix = nn.bmm(attention_weights, context)
# concat -> (batch_size * output_len, 2*dimensions)
combined = jt.concat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
# Apply linear_out on every 2nd dimension of concat
# output -> (batch_size, output_len, dimensions)
output = self.linear_out(combined).view(batch_size, output_len, dimensions)
output = self.tanh(output)
return output, attention_weights
class RetrievalNet(nn.Module):
'''
QueryEncoder
RenderingEncoder
Attention
'''
def __init__(self, cfg):
super(RetrievalNet, self).__init__()
self.dim = cfg.models.z_dim
self.size = cfg.data.pix_size
self.view_num = cfg.data.view_num
self.query_encoder = QueryEncoder(self.dim)
self.rendering_encoder = RenderingEncoder(self.dim)
self.attention = Attention(self.dim)
def execute(self, query, rendering):
query_ebd = self.get_query_ebd(query)
bs = query_ebd.shape[0]
rendering = rendering.view(-1, 1, self.size, self.size)
rendering_ebds = self.get_rendering_ebd(rendering).view(-1, self.view_num, self.dim)
#(shape, image, ebd) -> (bs, bs, 128)
query_ebd = query_ebd.unsqueeze(0).repeat(bs, 1, 1)
# query_ebd: bs, bs, dim
# rendering_ebds: bs, 12, dim
_, weights = self.attention_query(query_ebd, rendering_ebds)
# weights: bxxbsx12
# rendering_ebds: bsx12x128
# queried_rendering_ebd: bsxbsx128 (shape, model, 128)
# reference to https://pytorchnlp.readthedocs.io/en/latest/_modules/torchnlp/nn/attention.html#Attentionl
queried_rendering_ebd = nn.bmm(weights, rendering_ebds)
return query_ebd, queried_rendering_ebd
def get_query_ebd(self, inputs):
return self.query_encoder(inputs)
def get_rendering_ebd(self, inputs):
return self.rendering_encoder(inputs)
def attention_query(self, ebd, pool_ebd):
return self.attention(ebd, pool_ebd)
if __name__ == '__main__':
import yaml
import argparse
with open('./configs/pix3d.yaml', 'r') as f:
config = yaml.load(f)
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
config = dict2namespace(config)
models = RetrievalNet(config)
img = jt.random([2,4,224,224]).stop_grad()
mask = jt.random([2,12,224,224]).stop_grad()
# mm = models.resnet50(pretrained=False)
# # print(mm)
# a = mm(img)
outputs = models(img, mask) | 36.393365 | 113 | 0.625602 | 6,796 | 0.885011 | 0 | 0 | 0 | 0 | 0 | 0 | 2,775 | 0.361375 |
5f05a35db2bf24e5cd3d450829e44e1d6868265e | 2,348 | py | Python | apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py | victoraguilarc/agendas | 31b24a2d6350605b638b59062f297ef3f58e9879 | [
"MIT"
] | 2 | 2020-06-06T23:10:27.000Z | 2020-10-06T19:12:26.000Z | apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py | victoraguilarc/medical-appointment | 31b24a2d6350605b638b59062f297ef3f58e9879 | [
"MIT"
] | 3 | 2021-04-08T20:44:38.000Z | 2021-09-22T19:04:16.000Z | apps/agendas/tests/unit/selectors/test_doctor_profile_selector.py | victoraguilarc/agendas | 31b24a2d6350605b638b59062f297ef3f58e9879 | [
"MIT"
] | 1 | 2020-10-10T14:07:37.000Z | 2020-10-10T14:07:37.000Z | # -*- coding: utf-8 -*-
import pytest
from django.db.models import QuerySet
from rest_framework.exceptions import NotFound
from apps.accounts.response_codes import INVALID_TOKEN
from apps.accounts.selectors.pending_action_selector import PendingActionSelector
from apps.accounts.tests.factories.pending_action import PendingActionFactory
from apps.accounts.tests.factories.user import UserFactory
from apps.agendas.models import DoctorProfile
from apps.agendas.response_codes import DOCTOR_NOT_FOUND
from apps.agendas.selectors.appointment import AppointmentSelector
from apps.agendas.selectors.doctor_profile import DoctorProfileSelector
from apps.agendas.tests.factories.doctor_profile import DoctorProfileFactory
from apps.contrib.api.exceptions import SimpleValidationError
from faker import Factory
from faker.providers import misc
faker = Factory.create()
faker.add_provider(misc)
@pytest.mark.django_db
class DoctorProfileSelectorTests:
@staticmethod
def test_get_by_uuid():
doctor_profile = DoctorProfileFactory()
selected_doctor_profile = DoctorProfileSelector.get_by_uuid(str(doctor_profile.uuid))
assert isinstance(doctor_profile, DoctorProfile)
assert selected_doctor_profile == doctor_profile
@staticmethod
def test_get_by_uuid_not_found():
with pytest.raises(NotFound) as exec_info:
DoctorProfileSelector.get_by_uuid(faker.uuid4())
assert exec_info.value.detail.code == DOCTOR_NOT_FOUND['code']
@staticmethod
def test_get_enabled_doctors(test_user):
inactive_user = UserFactory(is_active=False)
active_doctor = DoctorProfileFactory(user=test_user)
DoctorProfileFactory(user=inactive_user)
doctors = DoctorProfileSelector.get_enabled_doctors()
assert isinstance(doctors, QuerySet)
assert doctors.count() == 1
assert doctors.first() == active_doctor
@staticmethod
def test_get_enabled_doctors_empty():
inactive_user = UserFactory(is_active=False)
DoctorProfileFactory(user=inactive_user)
doctors = DoctorProfileSelector.get_enabled_doctors()
assert isinstance(doctors, QuerySet)
assert doctors.count() == 0
def test_get_by_username_or_email(self):
pass
def test_get_by_username_or_email_not_found(self):
pass
| 35.044776 | 93 | 0.774702 | 1,429 | 0.608603 | 0 | 0 | 1,452 | 0.618399 | 0 | 0 | 29 | 0.012351 |
5f0603321cb19a9d08d78984f4e1439bd2f1a90c | 121 | py | Python | python/testData/findUsages/GlobalUsages.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/findUsages/GlobalUsages.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/findUsages/GlobalUsages.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | <caret>search_variable = 1
def function():
global search_variable
search_variable = 2
print(search_variable) | 20.166667 | 26 | 0.735537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f06913d44d8487508a5267f1e736e022aea0e78 | 379 | py | Python | backend/api/tests/models/location_tests.py | Pachwenko/ember-django-example | cfed8a4519e307ea72a097336f9b07bfa5ee576f | [
"MIT"
] | null | null | null | backend/api/tests/models/location_tests.py | Pachwenko/ember-django-example | cfed8a4519e307ea72a097336f9b07bfa5ee576f | [
"MIT"
] | 1 | 2022-01-17T00:51:15.000Z | 2022-01-17T00:51:15.000Z | backend/api/tests/models/location_tests.py | Pachwenko/ember-django-example | cfed8a4519e307ea72a097336f9b07bfa5ee576f | [
"MIT"
] | null | null | null | from decimal import Decimal
import pytest
from api.tests.factories.location import LocationFactory
# potentially helpful fixtures provided by pytest-django
# https://pytest-django.readthedocs.io/en/latest/helpers.html#fixtures
@pytest.mark.django_db()
def test_create_rental():
LocationFactory(
lat=Decimal(20.123456789),
lng=Decimal(20.123456789),
)
| 23.6875 | 70 | 0.759894 | 0 | 0 | 0 | 0 | 147 | 0.387863 | 0 | 0 | 126 | 0.332454 |
5f073a6e60f359858de246c60af2ab6ba1d0660b | 293 | py | Python | dias/dia.py | GU1LH3RME-LIMA/pythonWebjs | 4786ef3b900f3b45522a0d3f0c4b83e1e68ae25b | [
"MIT"
] | null | null | null | dias/dia.py | GU1LH3RME-LIMA/pythonWebjs | 4786ef3b900f3b45522a0d3f0c4b83e1e68ae25b | [
"MIT"
] | null | null | null | dias/dia.py | GU1LH3RME-LIMA/pythonWebjs | 4786ef3b900f3b45522a0d3f0c4b83e1e68ae25b | [
"MIT"
] | null | null | null | import datetime
from flask import Flask, render_template
'''
algoritmo simples para definir se o dia é par ou impar
'''
app = Flask(__name__)
@app.route("/")
def index():
hoje=datetime.datetime.now()#objeto recebera a data atual
return render_template("index.html",dia=hoje.day)
| 22.538462 | 61 | 0.723549 | 0 | 0 | 0 | 0 | 145 | 0.493197 | 0 | 0 | 111 | 0.377551 |
5f0795f4ecbd539f9866bfa75241bdbacd313bed | 1,532 | py | Python | catalog/views/api.py | iraquitan/catalog-app-flask | 563981ddc8d55c62428cd4811bdea73ee8f8a846 | [
"MIT"
] | null | null | null | catalog/views/api.py | iraquitan/catalog-app-flask | 563981ddc8d55c62428cd4811bdea73ee8f8a846 | [
"MIT"
] | null | null | null | catalog/views/api.py | iraquitan/catalog-app-flask | 563981ddc8d55c62428cd4811bdea73ee8f8a846 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
* Created by PyCharm.
* Project: catalog
* Author name: Iraquitan Cordeiro Filho
* Author login: pma007
* File: api
* Date: 2/26/16
* Time: 11:26
* To change this template use File | Settings | File Templates.
"""
from flask import Blueprint, jsonify
from catalog.models import Category, Item
# Define api Blueprint for JSON endpoints
api = Blueprint('api', __name__)
@api.route('/catalog.json')
def catalog_api():
categories = Category.query.all()
all_result = []
for category in categories:
items = Item.query.filter_by(category_id=category.id).all()
result = category.serialize
result['Item'] = [i.serialize for i in items]
all_result.append(result)
return jsonify(Category=all_result)
@api.route('/category/<string:category_slug>.json')
def category_api(category_slug):
category = Category.query.filter_by(slugfield=category_slug).first_or_404()
return jsonify(category=category.serialize)
@api.route('/category/<string:category_slug>/items.json')
def category_items_api(category_slug):
category = Category.query.filter_by(slugfield=category_slug).first_or_404()
items = Item.query.filter_by(category_id=category.id).all()
result = category.serialize
result['item'] = [i.serialize for i in items]
return jsonify(category=result)
@api.route('/item/<string:item_slug>.json')
def item_api(item_slug):
item = Item.query.filter_by(slugfield=item_slug).first_or_404()
return jsonify(item=item.serialize)
| 30.039216 | 79 | 0.719321 | 0 | 0 | 0 | 0 | 1,114 | 0.727154 | 0 | 0 | 436 | 0.284595 |
5f0912cfcdcc52bdc014aa57f2387fcc7c7c1a0f | 1,854 | py | Python | tests/test_debug.py | HazemElAgaty/psycopg2-pgevents | c0952608777052ea2cb90d8c78802ad03f8f3da1 | [
"MIT"
] | 11 | 2019-07-12T17:25:36.000Z | 2021-06-07T12:51:31.000Z | tests/test_debug.py | HazemElAgaty/psycopg2-pgevents | c0952608777052ea2cb90d8c78802ad03f8f3da1 | [
"MIT"
] | 5 | 2020-06-21T14:58:21.000Z | 2021-09-06T09:34:32.000Z | tests/test_debug.py | HazemElAgaty/psycopg2-pgevents | c0952608777052ea2cb90d8c78802ad03f8f3da1 | [
"MIT"
] | 4 | 2019-07-12T17:25:37.000Z | 2021-07-13T13:26:58.000Z | from pytest import raises
from psycopg2_pgevents import debug
from psycopg2_pgevents.debug import log, set_debug
class TestDebug:
def test_set_debug_disabled(self):
debug._DEBUG_ENABLED = True
set_debug(False)
assert not debug._DEBUG_ENABLED
def test_set_debug_enabled(self):
debug._DEBUG_ENABLED = False
set_debug(True)
assert debug._DEBUG_ENABLED
def test_log_invalid_category(self, log_capture):
with raises(ValueError):
log("foo", category="warningwarningwarning")
logs = log_capture.actual()
assert len(logs) == 0
def test_log_debug_disabled(self, log_capture):
set_debug(False)
log("foo")
logs = log_capture.actual()
# Only log should be the one notifying that logging is being disabled
assert len(logs) == 1
def test_log_info(self, log_capture):
log("foo")
logs = log_capture.actual()
assert len(logs) == 1
assert ("pgevents", "INFO", "foo") == logs.pop()
def test_log_error(self, log_capture):
log("foo", category="error")
logs = log_capture.actual()
assert len(logs) == 1
assert ("pgevents", "ERROR", "foo") == logs.pop()
def test_log_args(self, log_capture):
log("foo %s %s %d", "bar", "baz", 1)
log("foo %(word1)s %(word2)s %(num)d", {"word2": "baz", "num": 1, "word1": "bar"})
logs = log_capture.actual()
assert len(logs) == 2
assert ("pgevents", "INFO", "foo bar baz 1") == logs.pop(0)
assert ("pgevents", "INFO", "foo bar baz 1") == logs.pop(0)
def test_log_custom_logger(self, log_capture):
log("foo", logger_name="test")
logs = log_capture.actual()
assert len(logs) == 1
assert ("test", "INFO", "foo") == logs.pop()
| 27.264706 | 90 | 0.600863 | 1,737 | 0.936893 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.182309 |
5f0a52e79ef2f2c527b1cb664f5e0e589f53a413 | 1,148 | py | Python | abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py | Sheldan/abstracto | cef46737c5f34719c80c71aa9cd68bc53aea9a68 | [
"MIT"
] | 5 | 2020-05-27T14:18:51.000Z | 2021-03-24T09:23:09.000Z | abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py | Sheldan/abstracto | cef46737c5f34719c80c71aa9cd68bc53aea9a68 | [
"MIT"
] | 5 | 2020-05-29T21:53:53.000Z | 2021-05-26T12:19:16.000Z | abstracto-application/installer/src/main/docker/deployment/python/templates_deploy.py | Sheldan/abstracto | cef46737c5f34719c80c71aa9cd68bc53aea9a68 | [
"MIT"
] | null | null | null | import glob
import os
import sqlalchemy as db
from sqlalchemy.sql import text
def deploy_template_folder(db_config, folder):
engine = db.create_engine('postgresql://%s:%s@%s:%s/%s' % (db_config.user, db_config.password, db_config.host, db_config.port, db_config.database))
if not os.path.isdir(folder):
print("Given path was not a folder. Exiting.")
exit(1)
files = glob.glob(folder + '/**/*.ftl', recursive=True)
templates = []
for file in files:
with open(file) as template_file:
file_content = template_file.read()
template_key = os.path.splitext(os.path.basename(file))[0]
template = {'key': template_key, 'content': file_content}
templates.append(template)
print('Deploying %s templates from folder %s' % (len(templates), folder))
with engine.connect() as con:
with con.begin():
statement = text("""INSERT INTO template(key, content, last_modified) VALUES(:key, :content, NOW()) ON CONFLICT (key) DO UPDATE SET content = :content""")
for line in templates:
con.execute(statement, **line)
| 37.032258 | 166 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.233449 |
5f0a965a14ab29cfb59691e71680ca0613d8037e | 8,466 | py | Python | src/external.py | erick-dsnk/Electric | 7e8aad1f792321d7839717ed97b641bee7a4a64e | [
"Apache-2.0"
] | null | null | null | src/external.py | erick-dsnk/Electric | 7e8aad1f792321d7839717ed97b641bee7a4a64e | [
"Apache-2.0"
] | null | null | null | src/external.py | erick-dsnk/Electric | 7e8aad1f792321d7839717ed97b641bee7a4a64e | [
"Apache-2.0"
] | null | null | null | ######################################################################
# EXTERNAL #
######################################################################
from Classes.Metadata import Metadata
from subprocess import PIPE, Popen
from extension import *
from colorama import *
from utils import *
import mslex
import halo
import sys
def handle_python_package(package_name: str, mode: str, metadata: Metadata):
command = ''
valid = Popen(mslex.split('pip --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE)
_, err = valid.communicate()
if err:
click.echo(click.style('Python Is Not Installed. Exit Code [0011]', fg='red'))
disp_error_msg(get_error_message('0011', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
command = 'python -m pip install --upgrade --no-input'
command += f' {package_name}'
proc = Popen(mslex.split(command), stdin=PIPE,
stdout=PIPE, stderr=PIPE)
py_version = sys.version.split()
for line in proc.stdout:
line = line.decode('utf-8')
if f'Collecting {package_name}' in line:
write(f'Python v{py_version[0]} :: Collecting {package_name}', 'green', metadata)
if 'Downloading' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Downloading {package_name}', 'green', metadata)
if 'Installing collected packages' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Installing {package_name}', 'green', metadata)
if f'Requirement already satisfied: {package_name} ' in line and package_name in line:
write(
f'Python v{py_version[0]} :: {package_name} Is Already Installed And On The Latest Version ==> {line.split()[-1]}', 'yellow', metadata)
if 'Successfully installed' in line and package_name in line:
ver = line.split('-')[1]
write(
f'Python v{py_version[0]} :: Successfully Installed {package_name} {ver}', 'green', metadata)
if 'You should consider upgrading via' in line:
wants = click.confirm(
'Would you like to upgrade your pip version?')
if wants:
write('Updating Pip Version', 'green', metadata)
Popen(mslex.split('python -m pip install --upgrade pip'))
elif mode == 'uninstall':
command = 'python -m pip uninstall --no-input --yes'
command += f' {package_name}'
proc = Popen(mslex.split(command), stdin=PIPE,
stdout=PIPE, stderr=PIPE)
py_version = sys.version.split()
for line in proc.stdout:
line = line.decode('utf-8')
if 'Uninstalling' in line and package_name in line:
write(
f'Python v{py_version[0]} :: Uninstalling {package_name}', 'green', metadata)
if 'Successfully uninstalled' in line and package_name in line:
ver = line.split('-')[1]
write(
f'Python v{py_version[0]} :: Successfully Uninstalled {package_name} {ver}', 'green', metadata)
_, err = proc.communicate()
if err:
err = err.decode('utf-8')
if f'WARNING: Skipping {package_name}' in err:
write(
f'Python v{py_version[0]} :: Could Not Find Any Installations Of {package_name}', 'yellow', metadata)
def handle_node_package(package_name: str, mode: str, metadata: Metadata):
version_proc = Popen(mslex.split('npm --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
version, err = version_proc.communicate()
version = version.decode().strip()
if err:
click.echo(click.style('npm Or node Is Not Installed. Exit Code [0011]', fg='bright_yellow'))
disp_error_msg(get_error_message('0011', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
proc = Popen(mslex.split(f'npm i {package_name} -g'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
write(f'npm v{version} :: Collecting {package_name}', 'green', metadata)
package_version = None
for line in proc.stdout:
line = line.decode()
if 'node install.js' in line:
write(f'npm v{version} :: Running `node install.js` for {package_name}', 'green', metadata)
if package_name in line and '@' in line and 'install' in line or ' postinstall' in line:
package_version = line.split()[1]
write(f'npm v{version} :: {package_version} Installing To <=> "{line.split()[3]}"', 'green', metadata)
if 'Success' in line and package_name in line or 'added' in line:
write(f'npm v{version} :: Successfully Installed {package_version}', 'green', metadata)
if 'updated' in line:
if package_version:
write(f'npm v{version} :: Sucessfully Updated {package_version}', 'green', metadata)
else:
write(f'npm v{version} :: Sucessfully Updated {package_name}', 'green', metadata)
else:
proc = Popen(mslex.split(f'npm uninstall -g {package_name}'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'up to date' in line:
write(f'npm v{version} :: Could Not Find Any Existing Installations Of {package_name}', 'yellow', metadata)
if 'removed' in line:
number = line.split(' ')[1].strip()
time = line.split(' ')[4].strip()
write(f'npm v{version} :: Sucessfully Uninstalled {package_name} And {number} Other Dependencies in {time}', 'green', metadata)
def handle_vscode_extension(package_name: str, mode: str, metadata: Metadata):
try:
version_proc = Popen(mslex.split('code --version'), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
except FileNotFoundError:
click.echo(click.style('Visual Studio Code Or vscode Is Not Installed. Exit Code [0111]', fg='bright_yellow'))
disp_error_msg(get_error_message('0111', 'install'))
handle_exit('ERROR', None, metadata)
version, err = version_proc.communicate()
version = version.decode().strip().split('\n')[0]
if err:
click.echo(click.style('Visual Studio Code Or vscode Is Not Installed. Exit Code [0111]', fg='bright_yellow'))
disp_error_msg(get_error_message('0111', 'install'))
handle_exit('ERROR', None, metadata)
if mode == 'install':
command = f'code --install-extension {package_name} --force'
proc = Popen(mslex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'Installing extensions' in line:
write(f'Code v{version} :: Installing {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if 'is already installed' in line:
write(f'{Fore.GREEN}Code v{version} :: {Fore.MAGENTA}{package_name}{Fore.YELLOW} is already installed!', 'white', metadata)
if 'was successfully installed' in line:
write(f'{Fore.GREEN}Code v{version} :: Successfully Installed {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if mode == 'uninstall':
command = f'code --uninstall-extension {package_name} --force'
proc = Popen(mslex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in proc.stdout:
line = line.decode()
if 'Uninstalling' in line:
write(f'Code v{version} :: Uninstalling {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
if 'is not installed' in line:
write(f'{Fore.GREEN}Code v{version} :: {Fore.MAGENTA}{package_name}{Fore.YELLOW} is not installed!', 'white', metadata)
if 'was successfully uninstalled' in line:
write(f'{Fore.GREEN}Code v{version} :: Successfully Uninstalled {Fore.MAGENTA}{package_name}{Fore.RESET}', 'green', metadata)
| 46.516484 | 155 | 0.583038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,375 | 0.398653 |
5f0b1b6f16ccc30241e064e1c6cda37d2700becb | 3,242 | py | Python | testing/regrid/testGhostedDistArray.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | testing/regrid/testGhostedDistArray.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | testing/regrid/testGhostedDistArray.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | import distarray
import numpy
import unittest
from mpi4py import MPI
class TestGhostedDistArray(unittest.TestCase):
"""
Test distributed array
"""
def setUp(self):
pass
def test_test0(self):
"""
Test constructors
"""
da = distarray.ghZeros( (2,3), numpy.float64 )
da.free()
da = distarray.ghOnes( (2,3), numpy.float64 )
da.free()
da = distarray.ghArray( [1,2,3] )
da.free()
def test_test1(self):
"""
1D, float64
"""
dtyp = numpy.float64
# MPI stuff
comm = MPI.COMM_WORLD
rk = comm.Get_rank()
sz = comm.Get_size()
# create the dist array
n = 10
da = distarray.ghZeros( (n,), dtyp, ghostWidth=1 )
# set data
da[:] = 100*rk + numpy.array([i for i in range(n)], dtyp)
# access remote data
leftRk = (rk - 1) % sz
print 'proc %d tries to access data from %d' % (rk, leftRk)
leftData = da.get(pe=leftRk, winID=(1,))
print 'leftData for rank %d = %s' % (rk, str(leftData))
# check
if leftRk < rk:
self.assertEqual(leftData[0], da[-1] - 100)
else:
self.assertEqual(leftData[0], da[-1] + 100*(sz-1))
# free
da.free()
def test_test2(self):
"""
1D, float32
"""
dtyp = numpy.float64
# MPI stuff
comm = MPI.COMM_WORLD
rk = comm.Get_rank()
sz = comm.Get_size()
# create the dist array
n = 10
da = distarray.ghZeros( (n,), dtyp, ghostWidth=1 )
# set data
da[:] = 100*rk + numpy.array([i for i in range(n)], dtyp)
# access remote data
leftRk = (rk - 1) % sz
print 'proc %d tries to access data from %d' % (rk, leftRk)
leftData = da.get(pe=leftRk, winID=(1,))
print 'leftData for rank %d = %s' % (rk, str(leftData))
# check
if leftRk < rk:
self.assertEqual(leftData[0], da[-1] - 100)
else:
self.assertEqual(leftData[0], da[-1] + 100*(sz-1))
# free
da.free()
def test_test3(self):
"""
1D, int
"""
dtyp = numpy.int64
# MPI stuff
comm = MPI.COMM_WORLD
rk = comm.Get_rank()
sz = comm.Get_size()
# create the dist array
n = 10
da = distarray.ghZeros( (n,), dtyp, ghostWidth=1 )
# set data
da[:] = 100*rk + numpy.array([i for i in range(n)], dtyp)
# access remote data
leftRk = (rk - 1) % sz
print 'proc %d tries to access data from %d' % (rk, leftRk)
leftData = da.get(pe=leftRk, winID=(1,))
print 'leftData for rank %d = %s' % (rk, str(leftData))
# check
if leftRk < rk:
self.assertEqual(leftData[0], da[-1] - 100)
else:
self.assertEqual(leftData[0], da[-1] + 100*(sz-1))
# free
da.free()
if __name__ == '__main__':
print "" # Spacer
suite = unittest.TestLoader().loadTestsFromTestCase(TestGhostedDistArray)
unittest.TextTestRunner(verbosity = 1).run(suite)
MPI.Finalize()
| 26.57377 | 77 | 0.507403 | 2,947 | 0.909007 | 0 | 0 | 0 | 0 | 0 | 0 | 627 | 0.193399 |
5f0b9146ca28c5866c71c4fff522e7ed582731d7 | 3,900 | py | Python | cir/user_views.py | wafield/cir | 123d4bfe3e5bb4b0d605de486a91a0cb7eb34e4c | [
"MIT"
] | null | null | null | cir/user_views.py | wafield/cir | 123d4bfe3e5bb4b0d605de486a91a0cb7eb34e4c | [
"MIT"
] | null | null | null | cir/user_views.py | wafield/cir | 123d4bfe3e5bb4b0d605de486a91a0cb7eb34e4c | [
"MIT"
] | 1 | 2018-06-23T21:11:53.000Z | 2018-06-23T21:11:53.000Z | import json
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.utils import timezone
from django.contrib.auth.signals import user_logged_in
from cir.models import *
VISITOR_ROLE = 'visitor'
def login_view(request):
response = {}
email = request.REQUEST.get('email').lower()
password = request.REQUEST.get('password')
users = User.objects.filter(username=email)
if users.count() != 1:
return HttpResponse("Your user name and/or password is incorrect.", status=403)
user = authenticate(username=users[0].username, password=password)
if user:
login(request, user)
# request.session['user_id'] = user.id
response['user_id'] = user.id
response['user_name'] = user.get_full_name()
request.session['role'] = VISITOR_ROLE
try:
forum = Forum.objects.get(id=request.session['forum_id'])
if request.session['forum_id'] != -1:
request.session['role'] = Role.objects.get(user=user, forum=forum).role
except:
pass
response['role'] = request.session['role']
return HttpResponse(json.dumps(response), mimetype='application/json')
else:
return HttpResponse("Your user name and/or password is incorrect.", status=403)
def register(request):
response = {}
email = request.REQUEST.get('email').lower()
if User.objects.filter(username=email).count() > 0:
return HttpResponse("This user already exists; please sign in.", status=403)
password = request.POST['password']
description = request.POST['description']
user = User.objects.create_user(email, email, password)
user.first_name = request.POST['first-name']
user.last_name = request.POST['last-name']
user.save()
userinfo = UserInfo(user=user, description=description, last_visited_forum=None)
userinfo.save()
user = authenticate(username=email, password=password)
if user:
login(request, user)
# request.session['user_id'] = user.id
response['user_id'] = user.id
response['user_name'] = user.get_full_name()
response['role'] = VISITOR_ROLE
return HttpResponse(json.dumps(response), mimetype='application/json')
else:
return HttpResponse("Unknown error.", status=403)
def logout_view(request):
forum_id = request.session['forum_id']
logout(request)
# request.session['user_id'] = -1
request.session['forum_id'] = forum_id
return HttpResponse(json.dumps({}), mimetype='application/json')
def change_info(request):
if not request.user.is_authenticated():
return HttpResponse("Please log in first.", status=403)
user = request.user
action = request.REQUEST.get('action')
if action == 'get':
response = {}
response['first_name'] = user.first_name
response['last_name'] = user.last_name
response['email'] = user.email
response['description'] = user.info.description
return HttpResponse(json.dumps(response), mimetype='application/json')
if action == 'set-pw':
old_pw = request.REQUEST.get('old_password')
if not user.check_password(old_pw):
return HttpResponse("Password incorrect.", status=403)
new_pw = request.REQUEST.get('new_password')
user.set_password(new_pw)
user.save()
return HttpResponse(json.dumps({}), mimetype='application/json')
if action == 'set-info':
response = {}
user.first_name = request.REQUEST.get('first-name')
user.last_name = request.REQUEST.get('last-name')
user.info.description = request.REQUEST.get('description')
user.info.save()
user.save()
response['user_name'] = user.get_full_name()
return HttpResponse(json.dumps(response), mimetype='application/json')
| 39 | 87 | 0.661282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.192051 |
5f0bcf77d0e89f7eeb81cfefde1fb86ef9a0fc3f | 2,844 | py | Python | LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | LeetCode/Python3/HashTable/451. Sort Characters By Frequency.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # Given a string, sort it in decreasing order based on the frequency of characters.
# Example 1:
# Input:
# "tree"
# Output:
# "eert"
# Explanation:
# 'e' appears twice while 'r' and 't' both appear once.
# So 'e' must appear before both 'r' and 't'. Therefore "eetr" is also a valid answer.
# Example 2:
# Input:
# "cccaaa"
# Output:
# "cccaaa"
# Explanation:
# Both 'c' and 'a' appear three times, so "aaaccc" is also a valid answer.
# Note that "cacaca" is incorrect, as the same characters must be together.
# Example 3:
# Input:
# "Aabb"
# Output:
# "bbAa"
# Explanation:
# "bbaA" is also a valid answer, but "Aabb" is incorrect.
# Note that 'A' and 'a' are treated as two different characters.
import collections
import heapq
class Solution:
def frequencySort(self, s: str) -> str:
# M1. 模拟 O(nlogn) O(n)
if not s:
return s
# Convert s to a list.
s = list(s)
# Sort the characters in s.
s.sort()
# Make a list of strings, one for each unique char.
all_strings = []
cur_sb = [s[0]]
for c in s[1:]:
# If the last character on string builder is different...
if cur_sb[-1] != c:
all_strings.append("".join(cur_sb))
cur_sb = []
cur_sb.append(c)
all_strings.append("".join(cur_sb))
# Sort the strings by length from *longest* to shortest.
all_strings.sort(key=lambda string : len(string), reverse=True)
# Convert to a single string to return.
# Converting a list of strings to a string is often done
# using this rather strange looking python idiom.
return "".join(all_strings)
# ====================================
# M2. 哈希表+排序 O(nlogn) O(n)
# Count the occurence on each character
cnt = collections.defaultdict(int)
for c in s:
cnt[c] += 1
# Sort and Build string
res = []
for k, v in sorted(cnt.items(), key = lambda x: -x[1]):
res += [k] * v
return "".join(res)
# ====================================
# O(nlogk) O(n)
# Count the occurence on each character
cnt = collections.Counter(s)
# Build string
res = []
for k, v in cnt.most_common():
res += [k] * v
return "".join(res)
# ====================================
# M3.哈希表 + 优先级队列 O(nlogk) O(n)
# Count the occurence on each character
cnt = collections.Counter(s)
# Build heap
heap = [(-v, k) for k, v in cnt.items()]
heapq.heapify(heap)
# Build string
res = []
while heap:
v, k = heapq.heappop(heap)
res += [k] * -v
return ''.join(res) | 27.61165 | 86 | 0.522504 | 2,137 | 0.743563 | 0 | 0 | 0 | 0 | 0 | 0 | 1,461 | 0.508351 |
5f0bf2755cfa5ea302283c30bc9e0ccfd4f8893d | 1,837 | py | Python | ttkinter_app.py | bombero2020/python_tools | 393092609c4555e47b9789eb3fcb614ea25fdef9 | [
"MIT"
] | null | null | null | ttkinter_app.py | bombero2020/python_tools | 393092609c4555e47b9789eb3fcb614ea25fdef9 | [
"MIT"
] | null | null | null | ttkinter_app.py | bombero2020/python_tools | 393092609c4555e47b9789eb3fcb614ea25fdef9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
class HashCorpFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name_entry = ttk.Entry(self)
self.name_entry.pack()
self.greet_button = ttk.Button(
self, text="Saludar", command=self.say_hello)
self.greet_button.pack()
self.greet_label = ttk.Label(self)
self.greet_label.pack()
def say_hello(self):
self.greet_label["text"] = \
"¡Hola, {}!".format(self.name_entry.get())
class AboutFrame(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = ttk.Label(self)
self.label["text"] = ("Visitanos en recursospython.com y "
"foro.recursospython.com.")
self.label.pack()
self.web_button = ttk.Button(self, text="Visitar web")
self.web_button.pack(pady=10)
self.forum_button = ttk.Button(self, text="Visitar foro")
self.forum_button.pack()
class Application(ttk.Frame):
def __init__(self, main_window):
super().__init__(main_window)
main_window.title("Hashing Coorp.")
main_window.geometry('700x400') # anchura x altura
main_window.configure(bg = 'beige')
self.notebook = ttk.Notebook(self)
self.hashcorp_frame = HashCorpFrame(self.notebook)
self.notebook.add(
self.hashcorp_frame, text="Saludos", padding=10)
self.about_frame = AboutFrame(self.notebook)
self.notebook.add(
self.about_frame, text="Acerca de", padding=10)
self.notebook.pack(padx=10, pady=10)
self.pack()
main_window = tk.Tk()
app = Application(main_window)
app.mainloop() | 27.41791 | 66 | 0.616222 | 1,668 | 0.907508 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.128945 |
5f0dcd6e6a26bb27177e11fcbcba91b603bd720d | 8,513 | py | Python | api/src/dojo.py | mosoriob/dojo | 71bba04c4fdc4224320087b4c400fcba91b6597d | [
"MIT"
] | 1 | 2021-10-08T00:47:58.000Z | 2021-10-08T00:47:58.000Z | api/src/dojo.py | mosoriob/dojo | 71bba04c4fdc4224320087b4c400fcba91b6597d | [
"MIT"
] | null | null | null | api/src/dojo.py | mosoriob/dojo | 71bba04c4fdc4224320087b4c400fcba91b6597d | [
"MIT"
] | null | null | null |
import uuid
from typing import List
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from fastapi import APIRouter, Response, status
from validation import DojoSchema
from src.settings import settings
import logging
logger = logging.getLogger(__name__)
router = APIRouter()
es = Elasticsearch([settings.ELASTICSEARCH_URL], port=settings.ELASTICSEARCH_PORT)
def search_by_model(model_id):
q = {"query": {"term": {"model_id.keyword": {"value": model_id, "boost": 1.0}}}}
return q
def search_and_scroll(index, query=None, size=10, scroll_id=None):
if query:
q = {
"query": {
"query_string": {
"query": query,
}
},
}
else:
q = {"query": {"match_all": {}}}
if not scroll_id:
# we need to kick off the query
results = es.search(index=index, body=q, scroll="2m", size=size)
else:
# otherwise, we can use the scroll
results = es.scroll(scroll_id=scroll_id, scroll="2m")
# get count
count = es.count(index=index, body=q)
# if results are less than the page size (10) don't return a scroll_id
if len(results["hits"]["hits"]) < size:
scroll_id = None
else:
scroll_id = results.get("_scroll_id", None)
return {
"hits": count["count"],
"scroll_id": scroll_id,
"results": [i["_source"] for i in results["hits"]["hits"]],
}
@router.post("/dojo/directive")
def create_directive(payload: DojoSchema.ModelDirective):
"""
Create a `directive` for a model. This is the command which is used to execute
the model container. The `directive` is templated out using Jinja, where each templated `{{ item }}`
maps directly to the name of a specific `parameter.
"""
try:
es.update(index="directives", body={"doc": payload.dict()}, id=payload.model_id)
return Response(
status_code=status.HTTP_200_OK,
headers={"location": f"/dojo/directive/{payload.model_id}"},
content=f"Created directive for model with id = {payload.model_id}",
)
except NotFoundError:
es.index(index="directives", body=payload.json(), id=payload.model_id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/dojo/directive/{payload.model_id}"},
content=f"Created directive for model with id = {payload.model_id}",
)
@router.get("/dojo/directive/{model_id}")
def get_directive(model_id: str) -> DojoSchema.ModelDirective:
results = es.search(index="directives", body=search_by_model(model_id))
try:
directive = results["hits"]["hits"][-1]["_source"]
return directive
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Directive for model {model_id} not found.",
)
@router.post("/dojo/config")
def create_configs(payload: List[DojoSchema.ModelConfig]):
"""
Create one or more model `configs`. A `config` is a settings file which is used by the model to
set a specific parameter level. Each `config` is stored to S3, templated out using Jinja, where each templated `{{ item }}`
maps directly to the name of a specific `parameter.
"""
for p in payload:
es.index(index="configs", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/config/{p.id}"},
content=f"Created config(s) for model with id = {p.model_id}",
)
@router.get("/dojo/config/{model_id}")
def get_configs(model_id: str) -> List[DojoSchema.ModelConfig]:
results = es.search(index="configs", body=search_by_model(model_id))
try:
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Config(s) for model {model_id} not found.",
)
@router.post("/dojo/outputfile")
def create_outputfiles(payload: List[DojoSchema.ModelOutputFile]):
"""
Create an `outputfile` for a model. Each `outputfile` represents a single file that is created upon each model
execution. Here we store key metadata about the `outputfile` which enables us to find it within the container and
normalize it into a CauseMos compliant format.
"""
for p in payload:
es.index(index="outputfiles", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/outputfile/{p.id}"},
content=f"Created outputfile(s) for model with id = {p.model_id}",
)
@router.get("/dojo/outputfile/{model_id}")
def get_outputfiles(model_id: str) -> List[DojoSchema.ModelOutputFile]:
results = es.search(index="outputfiles", body=search_by_model(model_id))
try:
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Outputfile(s) for model {model_id} not found.",
)
### Accessories Endpoints
@router.get("/dojo/accessories/{model_id}")
def get_accessory_files(model_id: str) -> List[DojoSchema.ModelAccessory]:
"""
Get the `accessory files` for a model.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
try:
results = es.search(index="accessories", body=search_by_model(model_id))
return [i["_source"] for i in results["hits"]["hits"]]
except:
return Response(
status_code=status.HTTP_404_NOT_FOUND,
content=f"Accessory file(s) for model {model_id} not found.",
)
@router.post("/dojo/accessories")
def create_accessory_file(payload: DojoSchema.ModelAccessory):
"""
Create or update an `accessory file` for a model.
`id` is optional and will be assigned a uuid by the API.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
try:
payload.id = uuid.uuid4() # update payload with uuid
es.update(index="accessories", body={"doc": payload.dict()}, id=payload.id)
return Response(
status_code=status.HTTP_200_OK,
headers={"location": f"/dojo/accessory/{payload.model_id}"},
content=f"Created accessory for model with id = {payload.model_id}",
)
except NotFoundError:
es.index(index="accessories", body=payload.json(), id=payload.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/dojo/accessory/{payload.model_id}"},
content=f"Created accessory for model with id = {payload.model_id}",
)
@router.put("/dojo/accessories")
def create_accessory_files(payload: List[DojoSchema.ModelAccessory]):
"""
The PUT would overwrite the entire array with a new array.
For each, create an `accessory file` for a model.
`id` is optional and will be assigned a uuid by the API.
Each `accessory file` represents a single file that is created to be
associated with the model. Here we store key metadata about the
`accessory file` which enables us to find it within the container and
provide it to Uncharted.
"""
if len(payload) == 0:
return Response(status_code=status.HTTP_400_BAD_REQUEST,content=f"No payload")
# Delete previous entries.
try:
results = es.search(index="accessories", body=search_by_model(payload[0].model_id))
for i in results["hits"]["hits"]:
es.delete(index="accessories", id=i["_source"]["id"])
except Exception as e:
logger.error(e)
# Add the new entries.
for p in payload:
p.id = uuid.uuid4() # update payload with uuid
es.index(index="accessories", body=p.json(), id=p.id)
return Response(
status_code=status.HTTP_201_CREATED,
headers={"location": f"/api/dojo/accessory/{p.id}"},
content=f"Created accessories(s) for model with id = {p.model_id}",
)
| 35.470833 | 127 | 0.649595 | 0 | 0 | 0 | 0 | 6,966 | 0.818278 | 0 | 0 | 3,808 | 0.447316 |
5f0f752c5211938014f35ccb9166c1413d779264 | 3,313 | py | Python | test/test_prob_models.py | sylar233/de-identification | 44731e9c22de647063bd82a19936b4c5a144ea6e | [
"Apache-2.0"
] | 5 | 2016-11-07T12:54:51.000Z | 2018-12-15T00:20:26.000Z | test/test_prob_models.py | sylar233/de-identification | 44731e9c22de647063bd82a19936b4c5a144ea6e | [
"Apache-2.0"
] | 5 | 2016-07-05T06:06:31.000Z | 2016-07-27T05:21:36.000Z | test/test_prob_models.py | sylar233/de-identification | 44731e9c22de647063bd82a19936b4c5a144ea6e | [
"Apache-2.0"
] | 3 | 2018-07-18T07:32:43.000Z | 2021-11-05T05:25:55.000Z | from django.test import TestCase
from common.data_utilities import DataUtils
from prob_models.dep_graph import DependencyGraph
from prob_models.jtree import JunctionTree
import common.constant as c
TESTING_FILE = c.TEST_DATA_PATH
"""
The test file has for fields, and the dependency graph would be a complete graph.
The junction Tree has only one clique
"""
class DependencyGraphTests(TestCase):
def setUp(self):
self.data = DataUtils(TESTING_FILE)
def test_dep_graph_edges_length_is_6(self):
"""
Test the Dependency graph computation
"""
dep_graph = DependencyGraph(self.data)
edges = dep_graph.get_dep_edges()
#print self.data.get_domain()
self.assertEqual(len(edges) == 3, True)
def test_dep_graph_with_white_list(self):
dep_graph = DependencyGraph(self.data, white_list = [['Age', 'Income', 'TRV'], ['DGF', 'HTN']])
edges = dep_graph.get_dep_edges()
def dep_graph_without_noise(self):
dep_graph = DependencyGraph(self.data, noise_flag = False)
self.assertEqual(
dep_graph.get_dep_edges() == [['Height', 'HTN'], ['Weight', 'HTN'], ['Income', 'TRV']],
True)
def test_dep_graph_contruct_from_edges(self):
edges = [['A','B'], ['B','C'], ['C', 'D'], ['D', 'E']]
dep_graph = DependencyGraph(edges = edges)
self.assertEqual(dep_graph.get_dep_edges() == edges, True)
def test_dep_graph_add_white_list(self):
edges = [['A','B'], ['B','C'], ['C', 'D'], ['D', 'E'], ['F']]
white_list = [['A','B', 'C'], ['C', 'D', 'F']]
dep_graph = DependencyGraph(edges = edges)
self.assertEqual(dep_graph.set_white_list(white_list).get_dep_edges() == [['A', 'B'], ['B', 'C'], ['C', 'D'], ['D', 'E'], ['F'], ('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D'), ('C', 'F'), ('D', 'F')], True)
class JunctionTreeTests(TestCase):
def setUp(self):
self.data = DataUtils(TESTING_FILE)
self.dep_graph = DependencyGraph(self.data)
self.edges = self.dep_graph.get_dep_edges()
self.nodes = self.data.get_nodes_name()
self.jtree_path = c.TEST_JTREE_FILE_PATH
def test_jtree_without_noise(self):
dep_graph = DependencyGraph(self.data, noise_flag = False)
edges = dep_graph.get_dep_edges()
jtree = JunctionTree(edges, self.nodes, self.jtree_path)
cliques = jtree.get_jtree()['cliques']
self.assertEqual(cliques == [['HTN', 'Height'], ['HTN', 'Weight'], ['Income', 'TRV'], ['Age'], ['DGF']], True)
def test_jtree_with_white_list(self):
dep_graph = DependencyGraph(self.data, white_list = [['Age', 'Income', 'TRV'], ['DGF', 'HTN']])
edges = dep_graph.get_dep_edges()
jtree = JunctionTree(edges, self.nodes, self.jtree_path)
cliques = jtree.get_jtree()['cliques']
self.assertEqual(cliques == [['HTN', 'Height'], ['HTN', 'Weight'], ['HTN', 'DGF'], ['Income', 'TRV', 'Age']], True)
def test_build_jtree_then_check_jtree_file(self):
self.TestA()
self.TestB()
def TestA(self):
"""
The dependency graph is a complete graph,
so there is only one clique in the junction tree
"""
jtree = JunctionTree(self.edges, self.nodes, self.jtree_path)
jtreepy = jtree.get_jtree()
#print jtreepy
self.assertEqual(len(jtreepy) == 3, True)
def TestB(self):
import os, time
from stat import *
st = os.stat(self.jtree_path)
now = time.time()
# TODO: Need to know this file is new modified
#self.assertEqual((st.st_mtime - now) < 100000, True) | 35.244681 | 209 | 0.677634 | 2,948 | 0.889828 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.233021 |
5f10c305acc4e613b5656eb25e050b130ecbb7b2 | 631 | py | Python | examples/house_prices_kaggle.py | ChillBoss/ml_automation | 50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741 | [
"MIT"
] | null | null | null | examples/house_prices_kaggle.py | ChillBoss/ml_automation | 50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741 | [
"MIT"
] | null | null | null | examples/house_prices_kaggle.py | ChillBoss/ml_automation | 50d42b3cd5a3bb2f7a91e4c53bf3bbfe7a3b1741 | [
"MIT"
] | null | null | null | # Regression Task, assumption is that the data is in the right directory
# data can be taken from https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data
import os
import ml_automation
if __name__ == '__main__':
data_dir = os.path.join(os.path.dirname(__file__), 'data')
f_train = os.path.join(data_dir, 'train.csv')
f_test = os.path.join(data_dir, 'test.csv')
#training
ml_automation.automate(path=f_train,
ignore_cols=['Id'],
out_dir='model')
#predictions
preds = ml_automation.predict(f_test, model_dir='model')
print(preds)
| 30.047619 | 98 | 0.66561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.389857 |
5f10f60ec6549819aee0320bd7db378dfb94aabf | 3,232 | py | Python | src/bel_commons/explorer_toolbox.py | cthoyt/pybel-web | a27f30617b9209d5531a6b65760597f8d45e9957 | [
"MIT"
] | 2 | 2019-07-17T16:17:44.000Z | 2019-07-18T17:05:36.000Z | src/bel_commons/explorer_toolbox.py | cthoyt/pybel-web | a27f30617b9209d5531a6b65760597f8d45e9957 | [
"MIT"
] | 3 | 2020-04-25T17:30:58.000Z | 2020-04-25T17:32:11.000Z | src/bel_commons/explorer_toolbox.py | cthoyt/pybel-web | a27f30617b9209d5531a6b65760597f8d45e9957 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Constants for building the biological network explorer's transformations toolbox."""
from typing import List, Tuple
from pybel.struct.pipeline.decorators import mapped
# Default NetworkX explorer toolbox functions (name, button text, description)
_explorer_toolbox = (
('collapse_to_genes', 'Collapse to Genes', 'Collapse proteins and RNAs to genes'),
('collapse_all_variants', 'Collapse Variants', 'Collapse Variants to their Parent Nodes'),
('collapse_to_protein_interactions', 'Protein Interaction Network',
'Reduce the Network to Interactions between Proteins'),
('enrich_protein_and_rna_origins', 'Expand Protein Origins',
'Adds RNAs corresponding to Proteins, then adds Genes corresponding to RNAs and miRNAs'),
('prune_protein_rna_origins', 'Prune Genes/RNAs',
'Delete genes/RNAs that only have transcription/translation edges'),
('expand_periphery', 'Expand Periphery', 'Expand the periphery of the network'),
('expand_internal', 'Expand Internal', 'Adds missing edges between nodes in the network'),
('remove_isolated_nodes', 'Remove Isolated Nodes', 'Remove from the network all isolated nodes'),
('get_largest_component', 'Get Largest Component', 'Retain only the largest component and removes all others'),
('enrich_unqualified', 'Enrich unqualified edges', 'Adds unqualified edges from the universe'),
('remove_associations', 'Remove Associations', 'Remove associative relations'),
('remove_pathologies', 'Remove Pathologies', 'Removes all pathology nodes'),
('remove_biological_processes', 'Remove Biological Processes', 'Removes all biological process nodes'),
)
_bio2bel_functions = (
(
'enrich_rnas',
'Enrich RNA controllers from miRTarBase',
'Adds the miRNA controllers of RNA nodes from miRTarBase'
), (
'enrich_mirnas',
'Enrich miRNA targets',
'Adds the RNA targets of miRNA nodes from miRTarBase'
), (
'enrich_genes_with_families',
'Enrich Genes with Gene Family Membership',
'Adds the parents of HGNC Gene Families'
), (
'enrich_families_with_genes',
'Enrich Gene Family Membership',
'Adds the children to HGNC gene familes'
), (
'enrich_bioprocesses',
'Enrich Biological Process Hierarchy',
'Adds parent biological processes'
), (
'enrich_chemical_hierarchy',
'Enrich Chemical Hierarchy',
'Adds parent chemical entries'
), (
'enrich_proteins_with_enzyme_families',
'Add Enzyme Class Members',
'Adds enzyme classes for each protein'
), (
'enrich_enzymes',
'Enrich Enzyme Classes',
'Adds proteins corresponding to present ExPASy Enzyme codes'
)
)
def _function_is_registered(name: str) -> bool:
return name in mapped
def get_explorer_toolbox() -> List[Tuple[str, str, str]]:
"""Get the explorer toolbox list."""
explorer_toolbox = list(_explorer_toolbox)
explorer_toolbox.extend(
(func_name, title, description)
for func_name, title, description in _bio2bel_functions
if _function_is_registered(func_name)
)
return explorer_toolbox
| 40.911392 | 115 | 0.697401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,213 | 0.684715 |
5f140a8d48047d7ddb5cd0c7677d6976d0a7cec0 | 827 | py | Python | threatstack/v1/client.py | giany/threatstack-python-client | c9e0a4bed55685d3a032c6f1a03261d44de64c4a | [
"MIT"
] | 4 | 2018-03-14T21:51:46.000Z | 2020-01-06T17:25:53.000Z | threatstack/v1/client.py | giany/threatstack-python-client | c9e0a4bed55685d3a032c6f1a03261d44de64c4a | [
"MIT"
] | 4 | 2018-01-17T19:58:29.000Z | 2018-04-13T17:03:01.000Z | threatstack/v1/client.py | giany/threatstack-python-client | c9e0a4bed55685d3a032c6f1a03261d44de64c4a | [
"MIT"
] | 6 | 2018-01-15T18:46:25.000Z | 2022-02-17T10:13:35.000Z | """
V1 Client
"""
from threatstack.base import BaseClient
from threatstack.v1 import resources
class Client(BaseClient):
BASE_URL = "https://app.threatstack.com/api/v1/"
def __init__(self, api_key=None, org_id=None, user_id=None, timeout=None):
BaseClient.__init__(self, api_key=api_key, timeout=timeout)
self.org_id = org_id
self.user_id = user_id
self.agents = resources.Agents(self)
self.alerts = resources.Alerts(self)
self.logs = resources.Logs(self)
self.organizations = resources.Organizations(self)
self.policies = resources.Policies(self)
def request_headers(self, _method, _url):
headers = { "Authorization": self.api_key }
if self.org_id:
headers["Organization"] = self.org_id
return headers
| 27.566667 | 78 | 0.665054 | 726 | 0.877872 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.100363 |
5f14372008e665aac666215605a53db7e9d8af9a | 5,930 | py | Python | djangocms_newsletter/admin/mailinglist.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | null | null | null | djangocms_newsletter/admin/mailinglist.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | null | null | null | djangocms_newsletter/admin/mailinglist.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | 2 | 2021-03-15T13:33:53.000Z | 2021-05-18T20:34:47.000Z | """ModelAdmin for MailingList"""
from datetime import datetime
from django.contrib import admin
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
from django.utils.encoding import smart_str
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponseRedirect
from emencia.django.newsletter.models import Contact
from emencia.django.newsletter.models import MailingList
from emencia.django.newsletter.settings import USE_WORKGROUPS
from emencia.django.newsletter.utils.workgroups import request_workgroups
from emencia.django.newsletter.utils.workgroups import request_workgroups_contacts_pk
from emencia.django.newsletter.utils.workgroups import request_workgroups_mailinglists_pk
from emencia.django.newsletter.utils.vcard import vcard_contacts_export_response
from emencia.django.newsletter.utils.excel import ExcelResponse
class MailingListAdmin(admin.ModelAdmin):
date_hierarchy = 'creation_date'
list_display = ('creation_date', 'name', 'description',
'subscribers_count', 'unsubscribers_count',
'exportation_links')
list_editable = ('name', 'description')
list_filter = ('creation_date', 'modification_date')
search_fields = ('name', 'description',)
filter_horizontal = ['subscribers', 'unsubscribers']
fieldsets = ((None, {'fields': ('name', 'description',)}),
(None, {'fields': ('subscribers',)}),
(None, {'fields': ('unsubscribers',)}),
)
actions = ['merge_mailinglist']
actions_on_top = False
actions_on_bottom = True
def queryset(self, request):
queryset = super(MailingListAdmin, self).queryset(request)
if not request.user.is_superuser and USE_WORKGROUPS:
mailinglists_pk = request_workgroups_mailinglists_pk(request)
queryset = queryset.filter(pk__in=mailinglists_pk)
return queryset
def save_model(self, request, mailinglist, form, change):
workgroups = []
if not mailinglist.pk and not request.user.is_superuser \
and USE_WORKGROUPS:
workgroups = request_workgroups(request)
mailinglist.save()
for workgroup in workgroups:
workgroup.mailinglists.add(mailinglist)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if 'subscribers' in db_field.name and not request.user.is_superuser \
and USE_WORKGROUPS:
contacts_pk = request_workgroups_contacts_pk(request)
kwargs['queryset'] = Contact.objects.filter(pk__in=contacts_pk)
return super(MailingListAdmin, self).formfield_for_manytomany(
db_field, request, **kwargs)
def merge_mailinglist(self, request, queryset):
"""Merge multiple mailing list"""
if queryset.count() == 1:
self.message_user(request, _('Please select a least 2 mailing list.'))
return None
subscribers = {}
unsubscribers = {}
for ml in queryset:
for contact in ml.subscribers.all():
subscribers[contact] = ''
for contact in ml.unsubscribers.all():
unsubscribers[contact] = ''
when = str(datetime.now()).split('.')[0]
new_mailing = MailingList(name=_('Merging list at %s') % when,
description=_('Mailing list created by merging at %s') % when)
new_mailing.save()
new_mailing.subscribers = subscribers.keys()
new_mailing.unsubscribers = unsubscribers.keys()
if not request.user.is_superuser and USE_WORKGROUPS:
for workgroup in request_workgroups(request):
workgroup.mailinglists.add(new_mailing)
self.message_user(request, _('%s succesfully created by merging.') % new_mailing)
return HttpResponseRedirect(reverse('admin:newsletter_mailinglist_change',
args=[new_mailing.pk]))
merge_mailinglist.short_description = _('Merge selected mailinglists')
def exportation_links(self, mailinglist):
"""Display links for exportation"""
return u'<a href="%s">%s</a> / <a href="%s">%s</a>' % (
reverse('admin:newsletter_mailinglist_export_excel',
args=[mailinglist.pk]), _('Excel'),
reverse('admin:newsletter_mailinglist_export_vcard',
args=[mailinglist.pk]), _('VCard'))
exportation_links.allow_tags = True
exportation_links.short_description = _('Export')
def exportion_vcard(self, request, mailinglist_id):
"""Export subscribers in the mailing in VCard"""
mailinglist = get_object_or_404(MailingList, pk=mailinglist_id)
name = 'contacts_%s' % smart_str(mailinglist.name)
return vcard_contacts_export_response(mailinglist.subscribers.all(), name)
def exportion_excel(self, request, mailinglist_id):
"""Export subscribers in the mailing in Excel"""
mailinglist = get_object_or_404(MailingList, pk=mailinglist_id)
name = 'contacts_%s' % smart_str(mailinglist.name)
return ExcelResponse(mailinglist.subscribers.all(), name)
def get_urls(self):
urls = super(MailingListAdmin, self).get_urls()
my_urls = patterns('',
url(r'^export/vcard/(?P<mailinglist_id>\d+)/$',
self.admin_site.admin_view(self.exportion_vcard),
name='newsletter_mailinglist_export_vcard'),
url(r'^export/excel/(?P<mailinglist_id>\d+)/$',
self.admin_site.admin_view(self.exportion_excel),
name='newsletter_mailinglist_export_excel'))
return my_urls + urls
| 47.063492 | 96 | 0.663238 | 4,936 | 0.832378 | 0 | 0 | 0 | 0 | 0 | 0 | 1,062 | 0.179089 |
5f18561e37fb0a33a844c99a5051aea7c8863cea | 4,263 | py | Python | lib/train/recorder.py | rurusasu/OrigNet | 3b3384cb3d09b52c7c98bb264901285f006e51c1 | [
"Apache-2.0"
] | null | null | null | lib/train/recorder.py | rurusasu/OrigNet | 3b3384cb3d09b52c7c98bb264901285f006e51c1 | [
"Apache-2.0"
] | null | null | null | lib/train/recorder.py | rurusasu/OrigNet | 3b3384cb3d09b52c7c98bb264901285f006e51c1 | [
"Apache-2.0"
] | 1 | 2021-09-24T01:24:05.000Z | 2021-09-24T01:24:05.000Z | import os
import sys
from collections import deque, defaultdict
from typing import Dict, Union
sys.path.append("../../")
import torch
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class Recorder(object):
def __init__(self, cfg):
if "record_dir" not in cfg and "resume" not in cfg:
raise ("The required parameter is not set.")
# log_dir = os.path.join(pth.DATA_DIR, cfg.task, cfg.record_dir)
log_dir = cfg.record_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not cfg.resume:
os.system("rm -rf {}".format(log_dir))
self.writer = SummaryWriter(log_dir)
# scalars
self.epoch = 0
self.step = 0
self.loss_stats = defaultdict(SmoothedValue)
self.batch_time = SmoothedValue()
# images
self.image_stats = defaultdict(object)
def VisualizeNetwork(self, network: torch.nn, inputs: torch.Tensor) -> None:
"""TensorBoard 上でネットワークの構造を可視化するためのデータを作成するための関数.
Args:
network (torch.nn): 可視化したいモデル
inputs (torch.Tensor): モデルの構造を定義するための入力データ
"""
self.writer.add_graph(network, inputs)
def update_image_stats(self, image_stats: Dict) -> None:
"""
Arg:
image_stats(Dict[batch_imgs]):
辞書の内部に保存される値は、
* 4D形状のミニバッチテンソル (B x C x H x W)
* すべて同じサイズの画像のリスト。
"""
for k, v in image_stats.items():
self.image_stats[k] = v.detach().cpu()
def update_loss_stats(self, loss_dict: Dict) -> None:
for k, v in loss_dict.items():
self.loss_stats[k].update(v.detach().cpu())
def record(
self,
prefix,
step: int = -1,
loss_stats: Union[Dict, None] = None,
image_stats: Union[Dict, None] = None,
):
pattern = prefix + "/{}"
step = step if step >= 0 else self.step
loss_stats = loss_stats if loss_stats else self.loss_stats
image_stats = image_stats if image_stats else self.image_stats
for k, v in loss_stats.items():
if isinstance(v, SmoothedValue):
self.writer.add_scalar(pattern.format(k), v.median, step)
else:
self.writer.add_scalar(pattern.format(k), v, step)
for k, v in self.image_stats.items():
# RGB かつ [0, 1] の範囲の値を持つ場合
if len(v.size()) == 3:
b_size, h, w = v.size()[0], v.size()[1], v.size()[2]
v = v.view(b_size, -1, h, w)
v = v.float() if v.dtype != torch.float32 else v
self.writer.add_image(
pattern.format(k), vutils.make_grid(v, value_range=[0, 1]), step
)
del loss_stats
def state_dict(self):
scalar_dict = {}
scalar_dict["step"] = self.step
return scalar_dict
def load_state_dict(self, scalar_dict):
self.step = scalar_dict["step"]
def __str__(self):
loss_state = []
for k, v in self.loss_stats.items():
loss_state.append("{}: {:.4f}".format(k, v.avg))
loss_state = " ".join(loss_state)
recording_state = " ".join(
["epoch: {}", "step: {}", "{}", "batch_time: {:.3f} sec."]
)
return recording_state.format(
self.epoch,
self.step,
loss_state,
# self.data_time.avg,
self.batch_time.avg,
)
def make_recorder(cfg):
return Recorder(cfg)
| 29 | 80 | 0.569317 | 4,228 | 0.940601 | 0 | 0 | 283 | 0.062959 | 0 | 0 | 990 | 0.220245 |
5f1970116641c3a579674b0a3cde7a6940267ce4 | 5,642 | py | Python | scrapytest/spiders.py | coderatchet/scrapy-test | 4f5febfca05d267dc98df94e65a403210ce39d81 | [
"Apache-2.0"
] | null | null | null | scrapytest/spiders.py | coderatchet/scrapy-test | 4f5febfca05d267dc98df94e65a403210ce39d81 | [
"Apache-2.0"
] | null | null | null | scrapytest/spiders.py | coderatchet/scrapy-test | 4f5febfca05d267dc98df94e65a403210ce39d81 | [
"Apache-2.0"
] | null | null | null | import logging
import re
from datetime import datetime
import scrapy
from scrapy.http import Response
# noinspection PyUnresolvedReferences
import scrapytest.db
from scrapytest.config import config
from scrapytest.types import Article
from scrapytest.utils import merge_dict
log = logging.getLogger(__name__)
class GuardianNewsSpider(scrapy.spiders.CrawlSpider):
""" Spider that crawls over the Guardian news website"""
name = "guardian"
_user_config = {}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if hasattr(self, '_user_config'):
self._config = merge_dict(self._user_config, config['guardian_spider'])
else:
self._config = config['guardian_spider']
@classmethod
def update_settings(cls, settings):
super(GuardianNewsSpider, cls).update_settings(settings)
# super impose user cmd line args onto the current spider configuration.
if 'custom_guardian_config' in settings:
cls._user_config = settings.get('custom_guardian_config')
def start_requests(self):
"""
generator for requesting the content from each of the main news collection entry points
"""
urls = ['http://{host}/{path}'.format(host=self._config['host'], path=path) for path in
self._config['collection_paths']]
for url in urls:
max_depth = self._config['max_depth']
yield scrapy.Request(url=url, callback=lambda response: self._parse_news_list(response, max_depth))
def _parse_news_list(self, response: Response, depth=10):
"""
handle the raw html
:param depth: maximum depth we should search for articles
:param response: the top level news response
"""
log.debug("Parsing news list link: {}".format(response.url))
for link in self._article_links(response):
link = response.urljoin(link)
yield scrapy.Request(url=link, callback=self._parse_article_link)
# if next link exists and depth not exceeded, visit next link and yield results.
next_page = response.css(self._config['next_page_selector']).extract_first()
# we keep iterating through until our maximum depth is reached.
if next_page is not None and depth > 0:
next_page = response.urljoin(next_page)
yield scrapy.Request(url=next_page, callback=lambda list_response: self._parse_news_list(list_response,
depth - 1))
def _parse_article_link(self, article: Response):
"""
parses the article's main page
:param Response article: top level article page.
should search for existing article and store if not found.
"""
import re
# some author elements have clickable links with the name and picture of author
author_raw = article.css(self._config['author_selector'])
log.debug("author_raw: {}".format(author_raw.extract_first()))
try:
if author_raw.css('a').extract_first() is not None:
author = author_raw.css('a::text').extract_first()
else:
author = author_raw.css('*::text').extract_first()
author = re.split(r"-", author)[0].strip()
except:
author = "The Guardian"
log.debug("parsed author name: {}".format(author))
# author is in format of "name - email"
date_time_string = article.css(self._config['date_time_selector']).extract_first()
# remove the ':' from the date string as sftptime does not support this
sub = re.sub(r':([0-9]{2})$', r'\1', date_time_string)
date_time = datetime.strptime(sub, self._config['date_time_format'])
# assemble the article object
title = article.css(self._config['title_selector']).extract_first().strip()
data = {
'title': title,
'author': author,
'date_time': date_time,
'content': '\n'.join(article.css(self._config['content_selector']).extract()).strip()
}
# don't save the article if nothing exists in it.
if not data['content'].strip() == '':
# persist it if it doesn't exist yet
log.debug("Searching for existing article with the title '{}' and date_time '{}'".format(title, date_time))
existing_article = Article.objects(title__exact=title, date_time__exact=date_time).first()
if existing_article is None:
log.debug("Article not found for {} - {}, saving new article: {}".format(title, date_time, data))
new_article = Article(**data)
new_article.save()
else:
log.debug("Article found, not saving")
@staticmethod
def _parse_author_tag(author_tag: Response):
"""
parse the author section for the name
:param author_tag: the author/div tag to parse
:return: the name of the author
"""
text = author_tag.css('.story-header__author-name::text').extract_first()
return re.split(r"-", text)[0].strip()
def _article_links(self, news_list_response: Response):
"""
Generator for iterating through articles
:param scrapy.http.Response news_list_response: a top level news list page
:yields: the next article in the news list
"""
for article_link in news_list_response.css(self._config['article_list_item_link_selector']):
yield article_link.extract()
| 40.589928 | 119 | 0.63045 | 5,327 | 0.944169 | 1,927 | 0.341546 | 674 | 0.119461 | 0 | 0 | 2,091 | 0.370613 |
5f19f6344a219107ca416c8c6abd1b139dea3270 | 6,490 | py | Python | src/lib/parsers/parseskipfish.py | Project-Prismatica/Prism-Shell | 006d04fdabbe51c4a3fd642e05ba276251f1bba4 | [
"MIT"
] | null | null | null | src/lib/parsers/parseskipfish.py | Project-Prismatica/Prism-Shell | 006d04fdabbe51c4a3fd642e05ba276251f1bba4 | [
"MIT"
] | null | null | null | src/lib/parsers/parseskipfish.py | Project-Prismatica/Prism-Shell | 006d04fdabbe51c4a3fd642e05ba276251f1bba4 | [
"MIT"
] | 1 | 2018-02-22T02:18:48.000Z | 2018-02-22T02:18:48.000Z | #!/usr/bin/python
# parseskipfish.py
#
# By Adrien de Beaupre [email protected] | [email protected]
# Copyright 2011 Intru-Shun.ca Inc.
# v0.09
# 16 October 2011
#
# The current version of these scripts are at: http://dshield.handers.org/adebeaupre/ossams-parser.tgz
#
# Parses skipfish HTML and JSON output
# http://code.google.com/p/skipfish/
#
# This file is part of the ossams-parser.
#
# The ossams-parser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ossams-parser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ossams-parser. If not, see <http://www.gnu.org/licenses/>.
#
"""
Napping of tool field to OSSAMS database field:
tool field: database field:
sf_version tooloutput.version
scan_date tooloutput.tooldate
severity vulnerabilities.severity
type vulnerabilities.vulnerabilityid
name vulnerabilities.vulnerabilityname
request vulnerabilities.httprequest
response vulnerabilities.httpresponse
url vulnerabilities.vulnerabilityuri
extra vulnerabilities.vulnerabilityextra
"""
# parseskipfish function
def parseskipfish(time, os, etree, sys, filetoread, db, dbconnection, projectname, projectid, separator):
parser = etree.HTMLParser()
newsamples = ""
newsummary = ""
counter = 0
descriptions = ""
directory = ""
skipfishfile = filetoread.split(separator)
file = skipfishfile[-1]
filetime = time.ctime(os.path.getmtime(filetoread))
timenow = time.ctime()
skipfishfile.pop()
for item in skipfishfile:
directory = directory + item + separator
samplesfilein = directory + "samples.js"
summaryfilein = directory + "summary.js"
samplesfileout = directory + "skipfishsamples.py"
summaryfileout = directory + "skipfishsummary.py"
descfileout = directory + "skipfishdesc.py"
if os.path.isfile(samplesfilein):
sampleshandle = open(samplesfilein, 'r')
for line in sampleshandle:
newsamples = newsamples + line.replace('var ','')
samplesfilehandle = open(samplesfileout, 'w')
samplesfilehandle.write(newsamples)
samplesfilehandle.close()
else:
print "Could not locate the skipfish samples.js file in: ", directory
return
if os.path.isfile(summaryfilein):
summaryhandle = open(summaryfilein, 'r')
for line in summaryhandle:
newsummary = newsummary + line.replace('var ','')
summaryfilehandle = open(summaryfileout, 'w')
summaryfilehandle.write(newsummary)
summaryfilehandle.close()
else:
print "Could not locate the skipfish summary.js file in: ", directory
return
if os.path.isfile(filetoread):
tree = etree.parse(filetoread, parser)
root = tree.getroot()
# Check to see if the document root is 'html', exit if it is not
if root.tag:
if root.tag != "html":
print filetoread, "is not an skipfish HTML report file"
return
javascripts = root.findall('head/script')
for javascript in javascripts:
if javascript.text:
contents = javascript.text
splitcontents = contents.split('\n')
for iterator in splitcontents:
if 'var issue_desc' in iterator:
firstcounter = counter
if 'Simple HTML' in iterator:
secondcounter = counter
counter+=1
for i in range(firstcounter, secondcounter):
if 'var' in splitcontents[i]:
descriptions = descriptions + splitcontents[i].replace('var ','')
else:
descriptions = descriptions + splitcontents[i]
descfilehandle = open(descfileout, 'w')
descfilehandle.write(descriptions)
descfilehandle.close()
sys.path.append(directory)
from skipfishdesc import issue_desc
from skipfishsamples import issue_samples
import skipfishsummary
db.execute("""
INSERT INTO tooloutput (toolname, filename, OSSAMSVersion, filedate, version, inputtimestamp,
tooldate, projectname, projectid)
VALUES
('skipfish', '%s', 0.09, '%s', '%s', '%s', '%s', '%s', '%s')
""" % (file, filetime, skipfishsummary.sf_version, timenow, skipfishsummary.scan_date, projectname, projectid)
)
tooloutputnumber = int(db.lastrowid)
print "Processed skipfish report number:", tooloutputnumber
db.execute("""
INSERT INTO hosts (tooloutputnumber, recon, hostcriticality)
VALUES
('%s', '1', '0')
""" % (tooloutputnumber)
)
hostnumber = int(db.lastrowid)
for issue in range(len(issue_samples)):
issuedict = issue_samples[issue]
typenum = issuedict['type']
samplelist = issuedict['samples']
for sample in range(len(samplelist)):
sampledict = samplelist[sample]
if sampledict['dir'] != '':
directory = directory + sampledict['dir'].replace('/', separator)
requestfile = directory + '\\request.dat'
if os.path.isfile(requestfile):
requestfilehandle = open(requestfile, 'r')
request = requestfilehandle.read()
requestfilehandle.close()
else:
request = ""
responsefile = directory + separator + 'response.dat'
if os.path.isfile(responsefile):
responsefilehandle = open(responsefile, 'r')
response = responsefilehandle.read()
responsefilehandle.close()
else:
response = ""
db.execute("""
INSERT INTO vulnerabilities (tooloutputnumber, hostnumber, vulnerabilityid, vulnerabilityseverity, vulnerabilityname,
vulnerabilityextra, vulnerabilityuri, httprequest, httpresponse, vulnerabilityvalidation)
VALUES
('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', 0)
""" % (tooloutputnumber, hostnumber, typenum, issuedict['severity'], dbconnection.escape_string(issue_desc[str(typenum)]),
dbconnection.escape_string(sampledict['extra']), dbconnection.escape_string(sampledict['url']),
dbconnection.escape_string(request), dbconnection.escape_string(response))
)
vulnnumber = int(db.lastrowid)
else:
print "Could not locate the skipfish index.html file"
return
| 38.402367 | 130 | 0.686441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,774 | 0.427427 |
5f1c1c275c8674c941378ad94c7d52f05e79ddd2 | 14,861 | py | Python | userge/plugins/admin/gban.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | userge/plugins/admin/gban.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | userge/plugins/admin/gban.py | wildyvpn-network/bot | 87459495000bd6004b8f62a9cb933c164da9ef29 | [
"MIT"
] | null | null | null | """ setup gban """
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved
import json
import asyncio
from typing import Union
import aiohttp
import spamwatch
from spamwatch.types import Ban
from pyrogram.errors.exceptions.bad_request_400 import (
ChatAdminRequired, UserAdminInvalid, ChannelInvalid)
from userge import userge, Message, Config, get_collection, filters, pool
SAVED_SETTINGS = get_collection("CONFIGS")
GBAN_USER_BASE = get_collection("GBAN_USER")
WHITELIST = get_collection("WHITELIST_USER")
CHANNEL = userge.getCLogger(__name__)
LOG = userge.getLogger(__name__)
async def _init() -> None:
s_o = await SAVED_SETTINGS.find_one({'_id': 'ANTISPAM_ENABLED'})
if s_o:
Config.ANTISPAM_SENTRY = s_o['data']
@userge.on_cmd("antispam", about={
'header': "enable / disable antispam",
'description': "Toggle API Auto Bans"}, allow_channels=False)
async def antispam_(message: Message):
""" enable / disable antispam """
if Config.ANTISPAM_SENTRY:
Config.ANTISPAM_SENTRY = False
await message.edit("`antispam disabled !`", del_in=3)
else:
Config.ANTISPAM_SENTRY = True
await message.edit("`antispam enabled !`", del_in=3)
await SAVED_SETTINGS.update_one(
{'_id': 'ANTISPAM_ENABLED'}, {"$set": {'data': Config.ANTISPAM_SENTRY}}, upsert=True)
@userge.on_cmd("gban", about={
'header': "Globally Ban A User",
'description': "Adds User to your GBan List. "
"Bans a Globally Banned user if they join or message. "
"[NOTE: Works only in groups where you are admin.]",
'examples': "{tr}gban [userid | reply] [reason for gban] (mandatory)"},
allow_channels=False, allow_bots=False)
async def gban_user(message: Message):
""" ban a user globally """
await message.edit("`GBanning...`")
user_id, reason = message.extract_user_and_text
if not user_id:
await message.edit(
"`no valid user_id or message specified,`"
"`don't do .help gban for more info. "
"Coz no one's gonna help ya`(。ŏ_ŏ) ⚠", del_in=0)
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
if not reason:
await message.edit(
f"**#Aborted**\n\n**Gbanning** of [{firstname}](tg://user?id={user_id}) "
"Aborted coz No reason of gban provided by banner", del_in=5)
return
user_id = get_mem['id']
if user_id == (await message.client.get_me()).id:
await message.edit(r"LoL. Why would I GBan myself ¯\(°_o)/¯")
return
if user_id in Config.SUDO_USERS:
await message.edit(
"That user is in my Sudo List, Hence I can't ban him.\n\n"
"**Tip:** Remove them from Sudo List and try again. (¬_¬)", del_in=5)
return
found = await GBAN_USER_BASE.find_one({'user_id': user_id})
if found:
await message.edit(
"**#Already_GBanned**\n\nUser Already Exists in My Gban List.\n"
f"**Reason For GBan:** `{found['reason']}`", del_in=5)
return
await message.edit(r"\\**#GBanned_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n**Reason:** `{reason}`")
# TODO: can we add something like "GBanned by {any_sudo_user_fname}"
if message.client.is_bot:
chats = [message.chat]
else:
chats = await message.client.get_common_chats(user_id)
gbanned_chats = []
for chat in chats:
try:
await chat.kick_member(user_id)
gbanned_chats.append(chat.id)
await CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {chat.title}\n"
f"**Chat ID:** `{chat.id}`\n"
f"**Reason:** `{reason}`\n\n$GBAN #id{user_id}")
except (ChatAdminRequired, UserAdminInvalid, ChannelInvalid):
pass
await GBAN_USER_BASE.insert_one({'firstname': firstname,
'user_id': user_id,
'reason': reason,
'chat_ids': gbanned_chats})
if message.reply_to_message:
await CHANNEL.fwd_msg(message.reply_to_message)
await CHANNEL.log(f'$GBAN #prid{user_id} ⬆️')
LOG.info("G-Banned %s", str(user_id))
@userge.on_cmd("ungban", about={
'header': "Globally Unban an User",
'description': "Removes an user from your Gban List",
'examples': "{tr}ungban [userid | reply]"},
allow_channels=False, allow_bots=False)
async def ungban_user(message: Message):
""" unban a user globally """
await message.edit("`UnGBanning...`")
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await GBAN_USER_BASE.find_one({'user_id': user_id})
if not found:
await message.err("User Not Found in My Gban List")
return
if 'chat_ids' in found:
for chat_id in found['chat_ids']:
try:
await userge.unban_chat_member(chat_id, user_id)
await CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n\n"
f"$UNGBAN #id{user_id}")
except (ChatAdminRequired, UserAdminInvalid, ChannelInvalid):
pass
await message.edit(r"\\**#UnGbanned_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`")
await GBAN_USER_BASE.delete_one({'firstname': firstname, 'user_id': user_id})
LOG.info("UnGbanned %s", str(user_id))
@userge.on_cmd("glist", about={
'header': "Get a List of Gbanned Users",
'description': "Get Up-to-date list of users Gbanned by you.",
'examples': "Lol. Just type {tr}glist"},
allow_channels=False)
async def list_gbanned(message: Message):
""" vies gbanned users """
msg = ''
async for c in GBAN_USER_BASE.find():
msg += ("**User** : " + str(c['firstname']) + "-> with **User ID** -> "
+ str(c['user_id']) + " is **GBanned for** : " + str(c['reason']) + "\n\n")
await message.edit_or_send_as_file(
f"**--Globally Banned Users List--**\n\n{msg}" if msg else "`glist empty!`")
@userge.on_cmd("whitelist", about={
'header': "Whitelist a User",
'description': "Use whitelist to add users to bypass API Bans",
'useage': "{tr}whitelist [userid | reply to user]",
'examples': "{tr}whitelist 5231147869"},
allow_channels=False, allow_bots=False)
async def whitelist(message: Message):
""" add user to whitelist """
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await WHITELIST.find_one({'user_id': user_id})
if found:
await message.err("User Already in My WhiteList")
return
await asyncio.gather(
WHITELIST.insert_one({'firstname': firstname, 'user_id': user_id}),
message.edit(
r"\\**#Whitelisted_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`"),
CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {message.chat.title}\n"
f"**Chat ID:** `{message.chat.id}`\n\n$WHITELISTED #id{user_id}")
)
LOG.info("WhiteListed %s", str(user_id))
@userge.on_cmd("rmwhite", about={
'header': "Removes a User from Whitelist",
'description': "Use it to remove users from WhiteList",
'useage': "{tr}rmwhite [userid | reply to user]",
'examples': "{tr}rmwhite 5231147869"},
allow_channels=False, allow_bots=False)
async def rmwhitelist(message: Message):
""" remove a user from whitelist """
user_id, _ = message.extract_user_and_text
if not user_id:
await message.err("user-id not found")
return
get_mem = await message.client.get_user_dict(user_id)
firstname = get_mem['fname']
user_id = get_mem['id']
found = await WHITELIST.find_one({'user_id': user_id})
if not found:
await message.err("User Not Found in My WhiteList")
return
await asyncio.gather(
WHITELIST.delete_one({'firstname': firstname, 'user_id': user_id}),
message.edit(
r"\\**#Removed_Whitelisted_User**//"
f"\n\n**First Name:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`"),
CHANNEL.log(
r"\\**#Antispam_Log**//"
f"\n**User:** [{firstname}](tg://user?id={user_id})\n"
f"**User ID:** `{user_id}`\n"
f"**Chat:** {message.chat.title}\n"
f"**Chat ID:** `{message.chat.id}`\n\n$RMWHITELISTED #id{user_id}")
)
LOG.info("WhiteListed %s", str(user_id))
@userge.on_cmd("listwhite", about={
'header': "Get a List of Whitelisted Users",
'description': "Get Up-to-date list of users WhiteListed by you.",
'examples': "Lol. Just type {tr}listwhite"},
allow_channels=False)
async def list_white(message: Message):
""" list whitelist """
msg = ''
async for c in WHITELIST.find():
msg += ("**User** : " + str(c['firstname']) + "-> with **User ID** -> " +
str(c['user_id']) + "\n\n")
await message.edit_or_send_as_file(
f"**--Whitelisted Users List--**\n\n{msg}" if msg else "`whitelist empty!`")
@userge.on_filters(filters.group & filters.new_chat_members, group=1, check_restrict_perm=True)
async def gban_at_entry(message: Message):
""" handle gbans """
chat_id = message.chat.id
for user in message.new_chat_members:
user_id = user.id
first_name = user.first_name
if await WHITELIST.find_one({'user_id': user_id}):
continue
gbanned = await GBAN_USER_BASE.find_one({'user_id': user_id})
if gbanned:
if 'chat_ids' in gbanned:
chat_ids = gbanned['chat_ids']
chat_ids.append(chat_id)
else:
chat_ids = [chat_id]
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{gbanned['reason']}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** {gbanned['reason']}\n**Quick Action:** "
f"Banned in {message.chat.title}"),
GBAN_USER_BASE.update_one(
{'user_id': user_id, 'firstname': first_name},
{"$set": {'chat_ids': chat_ids}}, upsert=True)
)
elif Config.ANTISPAM_SENTRY:
async with aiohttp.ClientSession() as ses:
async with ses.get(f'https://api.cas.chat/check?user_id={user_id}') as resp:
res = json.loads(await resp.text())
if res['ok']:
reason = ' | '.join(
res['result']['messages']) if 'result' in res else None
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
"**$SENTRY CAS Federation Ban**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{reason}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
"**$SENRTY #CAS BAN**"
f"\n**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{reason}`\n**Quick Action:**"
f" Banned in {message.chat.title}\n\n$AUTOBAN #id{user_id}")
)
elif Config.SPAM_WATCH_API:
intruder = await _get_spamwatch_data(user_id)
if intruder:
await asyncio.gather(
message.client.kick_chat_member(chat_id, user_id),
message.reply(
r"\\**#Userge_Antispam**//"
"\n\nGlobally Banned User Detected in this Chat.\n\n"
"**$SENTRY SpamWatch Federation Ban**\n"
f"**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{intruder.reason}`\n\n"
"**Quick Action:** Banned", del_in=10),
CHANNEL.log(
r"\\**#Antispam_Log**//"
"\n\n**GBanned User $SPOTTED**\n"
"**$SENRTY #SPAMWATCH_API BAN**"
f"\n**User:** [{first_name}](tg://user?id={user_id})\n"
f"**ID:** `{user_id}`\n**Reason:** `{intruder.reason}`\n"
f"**Quick Action:** Banned in {message.chat.title}\n\n"
f"$AUTOBAN #id{user_id}")
)
message.continue_propagation()
@pool.run_in_thread
def _get_spamwatch_data(user_id: int) -> Union[Ban, bool]:
return spamwatch.Client(Config.SPAM_WATCH_API).get_ban(user_id)
| 43.200581 | 95 | 0.554942 | 0 | 0 | 0 | 0 | 13,856 | 0.931433 | 12,002 | 0.806803 | 6,024 | 0.404948 |
5f203d4af3e16b58b2caf10d5df99a539d4f0417 | 41 | py | Python | user/tests/test_verifycode.py | Hrsn2861/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | null | null | null | user/tests/test_verifycode.py | Hrsn2861/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | 7 | 2020-06-06T01:55:39.000Z | 2022-02-10T11:46:31.000Z | user/tests/test_verifycode.py | Hrsnnnn/pysat-server | 72224bb0e6af8ef825eaf3259587698b5639b8a5 | [
"MIT"
] | null | null | null | """pytest for user.models.verifycode
"""
| 13.666667 | 36 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.97561 |
5f20ed53742e88f3bc18e3804150cf7252de73ee | 7,638 | py | Python | src/python/codebay/common/runcommand.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 5 | 2015-04-16T08:36:17.000Z | 2017-05-12T17:20:12.000Z | src/python/codebay/common/runcommand.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | null | null | null | src/python/codebay/common/runcommand.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 4 | 2015-03-19T14:39:51.000Z | 2019-01-23T08:22:55.000Z | """
Codebay process running utils.
@group Running commands: run, call
@group Preexec functions: chroot, cwd
@var PASS:
Specifies that the given file descriptor should be passed directly
to the parent. Given as an argument to run.
@var FAIL:
Specifies that if output is received for the given file descriptor,
an exception should be signalled. Given as an argument to
run.
@var STDOUT:
Specifies that standard error should be redirected to the same file
descriptor as standard out. Given as an argument to run.
"""
__docformat__ = 'epytext en'
import os
from codebay.common import subprocess
PASS = -1
FAIL = -2
STDOUT = -3
class RunException(Exception):
"""Running command failed.
@ivar rv: Return value of the command.
@ivar stdout: Captured stdout of the command or None.
@ivar stderr: Captured stderr of the command or None.
"""
class chroot:
"""Returns a function that will do a chroot to path when invoked."""
def __init__(self, path):
self.path = path
def __call__(self):
os.chroot(self.path)
def __repr__(self):
return '%s(%s)' % (str(self.__class__), repr(self.path))
class cwd:
"""Returns a function that will do a cwd to path when invoked."""
def __init__(self, path):
self.path = path
def __call__(self):
os.chdir(self.path)
def __repr__(self):
return '%s(%s)' % (str(self.__class__), repr(self.path))
def call(*args, **kw):
"""Convenience wrapper for calling run.
Positional arguments are converted to a list and given to run as
an argument. Keyword arguments are passed as is to run.
>>> call('echo','-n','foo')
[0, 'foo', '']
>>> call('exit 1', shell=True)
[1, '', '']
"""
return run(list(args), **kw)
def run(args, executable=None, cwd=None, env=None, stdin=None, stdout=None, stderr=None, shell=False, preexec=None, retval=None):
"""Wrapper for running commands.
run takes a lot of arguments and they are explained here.
>>> run(['echo','-n','foo'])
[0, 'foo', '']
>>> run('exit 1', shell=True)
[1, '', '']
@param args:
List of strings or a single string specifying the program and
the arguments to execute. It is mandatory.
@param executable:
Name of the executable to be passed in argv[0]. Defaults to the
first value of args.
@param cwd:
Working directory to execute the program in. Defaults to no
change. Executes before preexec.
@param env:
Environment to execute the process with. Defaults to inheriting
the environment of the current process.
@param stdin:
If None, process is executed with a pipe with no data given. If
a string, process is executed with a pipe with the string as
input. If PASS, process stdin is inherited from the current
process. Defaults to None.
@param stdout:
If None, process stdout is captured with a pipe and returned. If
PASS, process stdout is inherited from the current process. If
FAIL, process stdout is captured with a pipe and an exception is
raised if the process prints to stdout. Defaults to None.
@param stderr:
Same as above with one addition. If STDOUT, then stderr is
redirected to the same destination as stdout.
@param shell:
If False, the command is executed directly. If True, the
arguments are passed to the shell for interpretation. Defaults
to False.
@param preexec:
Can be used to specify things to do just before starting the new
child process. The argument should be a list or tuple, all of
the callables in the list are executed just before starting the
child process. Defaults to no function executed.
@param retval:
If None, no checks are performed on the child process' return
value. If FAIL, an exception is raised if the child process
return value is not zero. If a callable, the callable is invoked
with the child process return value as an argument and an
exception is raised if the callable returned False.
@return:
List of retval, stdout output string and stderr output
string. If stdout or stderr is not captured, None is returned
instead.
@raise RunException:
Raised if stdout output, stderr output or return value check
triggered a failure.
@raise ValueError:
Raised if illegal arguments are detected.
@raise OSError:
Raised if starting the child process failed.
"""
if isinstance(args, list):
popen_args = args
elif isinstance(args, str):
popen_args = [args]
else:
raise ValueError('Unknown value %s passed as args.' % repr(args))
if preexec is None:
preexec_fn = None
elif isinstance(preexec, (list, tuple)):
def do_preexec():
for f in preexec:
f()
preexec_fn = do_preexec
else:
raise ValueError('Unknown value %s passed as preexec.' % repr(preexec))
if stdin is None:
popen_stdin = subprocess.PIPE
popen_input = None
elif stdin is PASS:
popen_stdin = None
popen_input = None
elif isinstance(stdin, str):
popen_stdin = subprocess.PIPE
popen_input = stdin
else:
raise ValueError('Unknown value %s passed as stdin.' % repr(stdin))
if stdout is None:
popen_stdout = subprocess.PIPE
elif stdout is PASS:
popen_stdout = None
elif stdout is FAIL:
popen_stdout = subprocess.PIPE
else:
raise ValueError('Unknown value %s passed as stdout.' % repr(stdout))
if stderr is None:
popen_stderr = subprocess.PIPE
elif stderr is PASS:
popen_stderr = None
elif stderr is FAIL:
popen_stderr = subprocess.PIPE
elif stderr is STDOUT:
popen_stderr = subprocess.STDOUT
else:
raise ValueError('Unknown value %s passed as stderr.' % repr(stderr))
if retval is None:
rvcheck = None
elif retval is FAIL:
def do_check(i):
return i == 0
rvcheck = do_check
elif callable(retval):
rvcheck = retval
else:
raise ValueError('Unknown value %s passed as retval.' % repr(retval))
handle, rv = None, None
try:
handle = subprocess.Popen(popen_args,
executable=executable,
stdin=popen_stdin,
stdout=popen_stdout,
stderr=popen_stderr,
close_fds=True,
cwd=cwd,
env=env,
shell=shell,
preexec_fn=preexec_fn)
stdout, stderr = handle.communicate(input=popen_input)
finally:
if handle is not None:
rv = handle.wait()
if stdout is FAIL:
if stdout != '':
e = RunException('Process printed to stdout.')
e.rv = rv
e.stdout = stdout
e.stderr = stderr
raise e
if stderr is FAIL:
if stderr != '':
e = RunException('Process printed to stderr.')
e.rv = rv
e.stdout = stdout
e.stderr = stderr
raise e
if rvcheck is not None:
if not rvcheck(rv):
e = RunException('Process return value check failed.')
e.rv = rv
e.stdout = stdout
e.stderr = stderr
raise e
return [rv, stdout, stderr]
| 32.092437 | 129 | 0.615213 | 794 | 0.103954 | 0 | 0 | 0 | 0 | 0 | 0 | 4,086 | 0.534957 |
5f268730f61e34ddeec03c49cb3a27cf05cffa58 | 545 | py | Python | SourceCode/Module2/escape_sequences.py | hackettccp/CIS106SampleCode | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | 1 | 2019-10-23T03:25:43.000Z | 2019-10-23T03:25:43.000Z | SourceCode/Module2/escape_sequences.py | hackettccp/CIS106 | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | null | null | null | SourceCode/Module2/escape_sequences.py | hackettccp/CIS106 | 0717fa0f6dc0c48bc51f16ab44e7425b186a35c3 | [
"MIT"
] | null | null | null | """
Demonstrates escape sequences in strings
"""
#Line Feed escape sequence.
output1 = "First part \n Second part"
print(output1)
#********************************#
print()
#Double quotes escape sequence.
output2 = "The book \"War and Peace\" is very long"
print(output2)
#********************************#
print()
#Single quote escape sequence.
output3 = 'That is Tom\'s bike'
print(output3)
#********************************#
print()
#Backslash escape sequence.
output4 = "A single backslash \\ will be inserted"
print(output4)
| 18.793103 | 51 | 0.577982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.722936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.