blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a18d6a2dec529d3ec7e607d68fa268b6e10ab14f | fea398a9638acdfa2fb06e7a9695d5894452ded7 | /0x03-python-data_structures/6-print_matrix_integer.py | 94690f8578ca1d676f2b335b5640454178a148b3 | [] | no_license | OscarDRT/holbertonschool-higher_level_programming | d15585aa93ced9bc04464ced9bfd4197e73c42fa | f57ef3344df6350bded78ffce975eea693e67727 | refs/heads/master | 2020-09-30T19:56:30.788311 | 2020-05-14T19:52:10 | 2020-05-14T19:52:10 | 227,360,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | #!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
if (matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print("{:d}".format(matrix[i][j]), end='')
if (j < len(matrix[i]) - 1):
print(' ', end='')
print()
| [
"[email protected]"
] | |
fd7d056dca6eb683dac51fb9acb8977975310b3c | 872cd13f25621825db0c598268ecd21b49cc2c79 | /Lesson_11/unit_tests/test_client.py | 10a7ec604a3caf9f471dcc27973a5e6aa6a5b511 | [] | no_license | ss2576/client_server_applications_Python | c4e9ebe195d23c8ca73211894aa50a74014013d5 | 9b599e37e5dae5af3dca06e197916944f12129d5 | refs/heads/master | 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 | Python | UTF-8 | Python | false | false | 1,934 | py | import sys
import os
sys.path.append(os.path.join(os.getcwd(), '..'))
from unittest import TestCase, main
from common.classes import *
from common.variables import *
from common.utils import *
from common.codes import *
class TestJimClasses(TestCase):
def test_request_dict(self):
body = 'test'
time = dt.timestamp(dt.now())
request = Request(RequestAction.PRESENCE, body)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.PRESENCE, TIME: time, BODY: body})
request = Request(RequestAction.QUIT)
self.assertEqual(request.get_dict(), {ACTION: RequestAction.QUIT, TIME: time, BODY: ''})
self.assertRaises(TypeError, Request)
def test_response_dict(self):
time = dt.timestamp(dt.now())
response = Response(OK)
self.assertEqual(response.get_dict(), {CODE: 200, TIME: time, MESSAGE: 'OK'})
self.assertRaises(TypeError, Response)
class TestJimFunctions(TestCase):
class TestSocket:
encoded_data = None
request = None
def __init__(self, data):
self.data = data
def send(self, request):
json_str = json.dumps(self.data)
self.encoded_data = json_str.encode(ENCODING)
self.request = request
def recv(self, buf):
json_str = json.dumps(self.data)
return json_str.encode(ENCODING)
def test_send_request(self):
request = Request(RequestAction.MESSAGE)
socket = self.TestSocket(request.get_dict())
send_data(socket, request)
self.assertEqual(socket.encoded_data, socket.request)
def test_get_data(self):
response = Response(BASIC)
socket = self.TestSocket(response.get_dict())
self.assertEqual(get_data(socket), response.get_dict())
self.assertEqual(Response.from_dict(get_data(socket)), response)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
2f07645844113c62897b33114cef7c03ca4b7b31 | 7d172bc83bc61768a09cc97746715b8ec0e13ced | /facebook/views.py | bc76a115e91554ace708d6e9fc2227bacf2b21cf | [] | no_license | shivam1111/jjuice | a3bcd7ee0ae6647056bdc62ff000ce6e6af27594 | 6a2669795ed4bb4495fda7869eeb221ed6535582 | refs/heads/master | 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from django.shortcuts import render
from django.views import View
from django.http import JsonResponse
import requests
from django.conf import settings
class Ratings(View):
def get(self, request, template_name="index.html"):
response = {}
payload ={
'access_token':settings.FACEBOOK_ACCESSS_TOKEN,
'fields':"has_rating,has_review,rating,review_text,reviewer"
}
res = requests.get('https://graph.facebook.com/v2.9/vapejjuice/ratings',params=payload)
return JsonResponse(data=res.json(), status=200, safe=False)
| [
"[email protected]"
] | |
1dc802022a2096fe6390e9c8c00491b79e22fd57 | c7a5448821669b2fdebf5c2a4eb0ea70bba545d3 | /creme/optim/adam.py | 3c29444f912d89f1b786e209b735cfb90c961960 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brp-sara/creme | e5eb44e5d75cea0120c8fd17c20a963a1fe6c153 | 56c3baf6ee160015b72ab8ebedc0e03da32a6eae | refs/heads/master | 2020-09-08T17:10:18.903069 | 2019-11-11T12:14:32 | 2019-11-11T12:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | import collections
from . import base
__all__ = ['Adam']
class Adam(base.Optimizer):
"""Adam optimizer.
Example:
::
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> from creme import stream
>>> from sklearn import datasets
>>> X_y = stream.iter_sklearn_dataset(
... dataset=datasets.load_breast_cancer(),
... shuffle=True,
... random_state=42
... )
>>> optimizer = optim.Adam()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer)
... )
>>> metric = metrics.F1()
>>> model_selection.online_score(X_y, model, metric)
F1: 0.959554
References:
1. `Adam: A method for stochastic optimization <https://arxiv.org/pdf/1412.6980.pdf>`_
"""
def __init__(self, lr=0.1, beta_1=0.9, beta_2=0.999, eps=1e-8):
super().__init__(lr)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.m = collections.defaultdict(float)
self.v = collections.defaultdict(float)
def _update_after_pred(self, w, g):
for i, gi in g.items():
self.m[i] = self.beta_1 * self.m[i] + (1 - self.beta_1) * gi
self.v[i] = self.beta_2 * self.v[i] + (1 - self.beta_2) * gi ** 2
m = self.m[i] / (1 - self.beta_1 ** (self.n_iterations + 1))
v = self.v[i] / (1 - self.beta_2 ** (self.n_iterations + 1))
w[i] -= self.learning_rate * m / (v ** 0.5 + self.eps)
return w
| [
"[email protected]"
] | |
81eff45dface1cc77149b38692253a13f88601ea | 10d98fecb882d4c84595364f715f4e8b8309a66f | /neural_additive_models/nam_train.py | 227023ece7cb38a085e91249181db0bf08cbda5e | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 14,352 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Training script for Neural Additive Models.
"""
import operator
import os
from typing import Tuple, Iterator, List, Dict
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from neural_additive_models import data_utils
from neural_additive_models import graph_builder
gfile = tf.io.gfile
DatasetType = data_utils.DatasetType
FLAGS = flags.FLAGS
flags.DEFINE_integer('training_epochs', None,
'The number of epochs to run training for.')
flags.DEFINE_float('learning_rate', 1e-2, 'Hyperparameter: learning rate.')
flags.DEFINE_float('output_regularization', 0.0, 'Hyperparameter: feature reg')
flags.DEFINE_float('l2_regularization', 0.0, 'Hyperparameter: l2 weight decay')
flags.DEFINE_integer('batch_size', 1024, 'Hyperparameter: batch size.')
flags.DEFINE_string('logdir', None, 'Path to dir where to store summaries.')
flags.DEFINE_string('dataset_name', 'Teleco',
'Name of the dataset to load for training.')
flags.DEFINE_float('decay_rate', 0.995, 'Hyperparameter: Optimizer decay rate')
flags.DEFINE_float('dropout', 0.5, 'Hyperparameter: Dropout rate')
flags.DEFINE_integer(
'data_split', 1, 'Dataset split index to use. Possible '
'values are 1 to `FLAGS.num_splits`.')
flags.DEFINE_integer('tf_seed', 1, 'seed for tf.')
flags.DEFINE_float('feature_dropout', 0.0,
'Hyperparameter: Prob. with which features are dropped')
flags.DEFINE_integer(
'num_basis_functions', 1000, 'Number of basis functions '
'to use in a FeatureNN for a real-valued feature.')
flags.DEFINE_integer('units_multiplier', 2, 'Number of basis functions for a '
'categorical feature')
flags.DEFINE_boolean(
'cross_val', False, 'Boolean flag indicating whether to '
'perform cross validation or not.')
flags.DEFINE_integer(
'max_checkpoints_to_keep', 1, 'Indicates the maximum '
'number of recent checkpoint files to keep.')
flags.DEFINE_integer(
'save_checkpoint_every_n_epochs', 10, 'Indicates the '
'number of epochs after which an checkpoint is saved')
flags.DEFINE_integer('n_models', 1, 'the number of models to train.')
flags.DEFINE_integer('num_splits', 3, 'Number of data splits to use')
flags.DEFINE_integer('fold_num', 1, 'Index of the fold to be used')
flags.DEFINE_string(
'activation', 'exu', 'Activation function to used in the '
'hidden layer. Possible options: (1) relu, (2) exu')
flags.DEFINE_boolean(
'regression', False, 'Boolean flag indicating whether we '
'are solving a regression task or a classification task.')
flags.DEFINE_boolean('debug', False, 'Debug mode. Log additional things')
flags.DEFINE_boolean('shallow', False, 'Whether to use shallow or deep NN.')
flags.DEFINE_boolean('use_dnn', False, 'Deep NN baseline.')
flags.DEFINE_integer('early_stopping_epochs', 60, 'Early stopping epochs')
_N_FOLDS = 5
GraphOpsAndTensors = graph_builder.GraphOpsAndTensors
EvaluationMetric = graph_builder.EvaluationMetric
@flags.multi_flags_validator(['data_split', 'cross_val'],
message='Data split should not be used in '
'conjunction with cross validation')
def data_split_with_cross_validation(flags_dict):
return (flags_dict['data_split'] == 1) or (not flags_dict['cross_val'])
def _get_train_and_lr_decay_ops(
graph_tensors_and_ops,
early_stopping):
"""Returns training and learning rate decay ops."""
train_ops = [
g['train_op']
for n, g in enumerate(graph_tensors_and_ops)
if not early_stopping[n]
]
lr_decay_ops = [
g['lr_decay_op']
for n, g in enumerate(graph_tensors_and_ops)
if not early_stopping[n]
]
return train_ops, lr_decay_ops
def _update_latest_checkpoint(checkpoint_dir,
best_checkpoint_dir):
"""Updates the latest checkpoint in `best_checkpoint_dir` from `checkpoint_dir`."""
for filename in gfile.glob(os.path.join(best_checkpoint_dir, 'model.*')):
gfile.remove(filename)
for name in gfile.glob(os.path.join(checkpoint_dir, 'model.*')):
gfile.copy(
name,
os.path.join(best_checkpoint_dir, os.path.basename(name)),
overwrite=True)
def _create_computation_graph(
x_train, y_train, x_validation,
y_validation, batch_size
):
"""Build the computation graph."""
graph_tensors_and_ops = []
metric_scores = []
for n in range(FLAGS.n_models):
graph_tensors_and_ops_n, metric_scores_n = graph_builder.build_graph(
x_train=x_train,
y_train=y_train,
x_test=x_validation,
y_test=y_validation,
activation=FLAGS.activation,
learning_rate=FLAGS.learning_rate,
batch_size=batch_size,
shallow=FLAGS.shallow,
output_regularization=FLAGS.output_regularization,
l2_regularization=FLAGS.l2_regularization,
dropout=FLAGS.dropout,
num_basis_functions=FLAGS.num_basis_functions,
units_multiplier=FLAGS.units_multiplier,
decay_rate=FLAGS.decay_rate,
feature_dropout=FLAGS.feature_dropout,
regression=FLAGS.regression,
use_dnn=FLAGS.use_dnn,
trainable=True,
name_scope=f'model_{n}')
graph_tensors_and_ops.append(graph_tensors_and_ops_n)
metric_scores.append(metric_scores_n)
return graph_tensors_and_ops, metric_scores
def _create_graph_saver(graph_tensors_and_ops,
logdir, num_steps_per_epoch):
"""Create saving hook(s) as well as model and checkpoint directories."""
saver_hooks, model_dirs, best_checkpoint_dirs = [], [], []
save_steps = num_steps_per_epoch * FLAGS.save_checkpoint_every_n_epochs
# The MonitoredTraining Session counter increments by `n_models`
save_steps = save_steps * FLAGS.n_models
for n in range(FLAGS.n_models):
scaffold = tf.train.Scaffold(
saver=tf.train.Saver(
var_list=graph_tensors_and_ops[n]['nn_model'].trainable_variables,
save_relative_paths=True,
max_to_keep=FLAGS.max_checkpoints_to_keep))
model_dirs.append(os.path.join(logdir, 'model_{}').format(n))
best_checkpoint_dirs.append(os.path.join(model_dirs[-1], 'best_checkpoint'))
gfile.makedirs(best_checkpoint_dirs[-1])
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=model_dirs[-1], save_steps=save_steps, scaffold=scaffold)
saver_hooks.append(saver_hook)
return saver_hooks, model_dirs, best_checkpoint_dirs
def _update_metrics_and_checkpoints(sess,
epoch,
metric_scores,
curr_best_epoch,
best_validation_metric,
best_train_metric,
model_dir,
best_checkpoint_dir,
metric_name = 'RMSE'):
"""Update metric scores and latest checkpoint."""
# Minimize RMSE and maximize AUROC
compare_metric = operator.lt if FLAGS.regression else operator.gt
# Calculate the AUROC/RMSE on the validation split
validation_metric = metric_scores['test'](sess)
if FLAGS.debug:
tf.logging.info('Epoch %d %s Val %.4f', epoch, metric_name,
validation_metric)
if compare_metric(validation_metric, best_validation_metric):
curr_best_epoch = epoch
best_validation_metric = validation_metric
best_train_metric = metric_scores['train'](sess)
# copy the checkpoints files *.meta *.index, *.data* each time
# there is a better result
_update_latest_checkpoint(model_dir, best_checkpoint_dir)
return curr_best_epoch, best_validation_metric, best_train_metric
def training(x_train, y_train, x_validation,
y_validation,
logdir):
"""Trains the Neural Additive Model (NAM).
Args:
x_train: Training inputs.
y_train: Training labels.
x_validation: Validation inputs.
y_validation: Validation labels.
logdir: dir to save the checkpoints.
Returns:
Best train and validation evaluation metric obtained during NAM training.
"""
tf.logging.info('Started training with logdir %s', logdir)
batch_size = min(FLAGS.batch_size, x_train.shape[0])
num_steps_per_epoch = x_train.shape[0] // batch_size
# Keep track of the best validation RMSE/AUROC and train AUROC score which
# corresponds to the best validation metric score.
if FLAGS.regression:
best_train_metric = np.inf * np.ones(FLAGS.n_models)
best_validation_metric = np.inf * np.ones(FLAGS.n_models)
else:
best_train_metric = np.zeros(FLAGS.n_models)
best_validation_metric = np.zeros(FLAGS.n_models)
# Set to a large value to avoid early stopping initially during training
curr_best_epoch = np.full(FLAGS.n_models, np.inf)
# Boolean variables to indicate whether the training of a specific model has
# been early stopped.
early_stopping = [False] * FLAGS.n_models
# Classification: AUROC, Regression : RMSE Score
metric_name = 'RMSE' if FLAGS.regression else 'AUROC'
tf.reset_default_graph()
with tf.Graph().as_default():
tf.compat.v1.set_random_seed(FLAGS.tf_seed)
# Setup your training.
graph_tensors_and_ops, metric_scores = _create_computation_graph(
x_train, y_train, x_validation, y_validation, batch_size)
train_ops, lr_decay_ops = _get_train_and_lr_decay_ops(
graph_tensors_and_ops, early_stopping)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver_hooks, model_dirs, best_checkpoint_dirs = _create_graph_saver(
graph_tensors_and_ops, logdir, num_steps_per_epoch)
if FLAGS.debug:
summary_writer = tf.summary.FileWriter(os.path.join(logdir, 'tb_log'))
with tf.train.MonitoredSession(hooks=saver_hooks) as sess:
for n in range(FLAGS.n_models):
sess.run([
graph_tensors_and_ops[n]['iterator_initializer'],
graph_tensors_and_ops[n]['running_vars_initializer']
])
for epoch in range(1, FLAGS.training_epochs + 1):
if not all(early_stopping):
for _ in range(num_steps_per_epoch):
sess.run(train_ops) # Train the network
# Decay the learning rate by a fixed ratio every epoch
sess.run(lr_decay_ops)
else:
tf.logging.info('All models early stopped at epoch %d', epoch)
break
for n in range(FLAGS.n_models):
if early_stopping[n]:
sess.run(increment_global_step)
continue
# Log summaries
if FLAGS.debug:
global_summary, global_step = sess.run([
graph_tensors_and_ops[n]['summary_op'],
graph_tensors_and_ops[n]['global_step']
])
summary_writer.add_summary(global_summary, global_step)
if epoch % FLAGS.save_checkpoint_every_n_epochs == 0:
(curr_best_epoch[n], best_validation_metric[n],
best_train_metric[n]) = _update_metrics_and_checkpoints(
sess, epoch, metric_scores[n], curr_best_epoch[n],
best_validation_metric[n], best_train_metric[n], model_dirs[n],
best_checkpoint_dirs[n], metric_name)
if curr_best_epoch[n] + FLAGS.early_stopping_epochs < epoch:
tf.logging.info('Early stopping at epoch {}'.format(epoch))
early_stopping[n] = True # Set early stopping for model `n`.
train_ops, lr_decay_ops = _get_train_and_lr_decay_ops(
graph_tensors_and_ops, early_stopping)
# Reset running variable counters
sess.run(graph_tensors_and_ops[n]['running_vars_initializer'])
tf.logging.info('Finished training.')
for n in range(FLAGS.n_models):
tf.logging.info(
'Model %d: Best Epoch %d, Individual %s: Train %.4f, Validation %.4f',
n, curr_best_epoch[n], metric_name, best_train_metric[n],
best_validation_metric[n])
return np.mean(best_train_metric), np.mean(best_validation_metric)
def create_test_train_fold(
fold_num
):
"""Splits the dataset into training and held-out test set."""
data_x, data_y, _ = data_utils.load_dataset(FLAGS.dataset_name)
tf.logging.info('Dataset: %s, Size: %d', FLAGS.dataset_name, data_x.shape[0])
tf.logging.info('Cross-val fold: %d/%d', FLAGS.fold_num, _N_FOLDS)
# Get the training and test set based on the StratifiedKFold split
(x_train_all, y_train_all), test_dataset = data_utils.get_train_test_fold(
data_x,
data_y,
fold_num=fold_num,
num_folds=_N_FOLDS,
stratified=not FLAGS.regression)
data_gen = data_utils.split_training_dataset(
x_train_all,
y_train_all,
FLAGS.num_splits,
stratified=not FLAGS.regression)
return data_gen, test_dataset
def single_split_training(data_gen,
logdir):
"""Uses a specific (training, validation) split for NAM training."""
for _ in range(FLAGS.data_split):
(x_train, y_train), (x_validation, y_validation) = next(data_gen)
curr_logdir = os.path.join(logdir, 'fold_{}',
'split_{}').format(FLAGS.fold_num,
FLAGS.data_split)
training(x_train, y_train, x_validation, y_validation, curr_logdir)
def main(argv):
del argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
data_gen, _ = create_test_train_fold(FLAGS.fold_num)
single_split_training(data_gen, FLAGS.logdir)
if __name__ == '__main__':
flags.mark_flag_as_required('logdir')
flags.mark_flag_as_required('training_epochs')
app.run(main)
| [
"[email protected]"
] | |
0c84a9d6e3298e137bf520780a4fa47a312b78ad | 2324d8e4544a9b813153ce0ed0f858972ea7f909 | /135-分发糖果.py | fc857a4ba5410fc1316af0a1170fd5c03458002d | [] | no_license | Terry-Ma/Leetcode | af8a4ad8059975f8d12b0351610336f1f5f01097 | cc7f41e2fb3ed5734c2a5af97e49a5bc17afbceb | refs/heads/master | 2021-08-10T16:40:20.482851 | 2021-07-03T08:35:56 | 2021-07-03T08:35:56 | 225,814,239 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | class Solution:
def candy(self, ratings: List[int]) -> int:
left = [1] * len(ratings)
right = 1
for i in range(1, len(ratings)):
if ratings[i] > ratings[i - 1]:
left[i] = left[i - 1] + 1
res = left[-1]
for i in range(len(ratings) - 2, -1, -1):
if ratings[i] > ratings[i + 1]:
right += 1
else:
right = 1
res += max(right, left[i])
return res
| [
"[email protected]"
] | |
f61caaf7302bda93ce12e0e98e8ec61ca87ffdfc | cde11aea86ce9e1e370b02fb14553358b4aaab8b | /practice/hard/_51_disk_stacking.py | 69b5dbd133b93eaa83f31887401f81f3562c17be | [] | no_license | pavankumarag/ds_algo_problem_solving_python | 56f9a2bb64dd62f16028c3f49a72542b8588369a | cbd323de31f2f4a4b35334ce3249bb3e9525dbf8 | refs/heads/master | 2023-06-21T20:29:41.317005 | 2023-06-10T18:11:39 | 2023-06-10T18:11:39 | 223,919,558 | 2 | 1 | null | 2023-06-10T18:11:40 | 2019-11-25T10:16:27 | Python | UTF-8 | Python | false | false | 780 | py | """
Tower of Honoi
we have three rods and n disks the objective of the puzzle is to move the entire stack to another rod,
obeying the following simple rules:
1) Only one disk can be moved at a time.
2) Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack i.e.
a disk can only be moved if it is the uppermost disk on a stack.
3) No disk may be placed on top of a smaller disk.
"""
def tower_of_honoi(n, from_rod, to_rod, aux_rod):
if n == 1:
print "Move disk 1 from ", from_rod, "to ", to_rod
return
tower_of_honoi(n-1, from_rod, aux_rod, to_rod)
print "Move disk",n, "from ", from_rod, "to ", to_rod
tower_of_honoi(n-1, aux_rod, to_rod, from_rod)
if __name__ == "__main__":
n = 4
tower_of_honoi(n, 'A', 'C', 'B') | [
"[email protected]"
] | |
ec25fbfa0846875e29b7c321050a45e0d6c05ffb | 65e54ca14ac21d2c2572ba35ba351df5903cb667 | /src/petronia/core/layout/binding/bootstrap.py | 7fc0858f4fa68c43fe1e660bdcc50a8a0f177cf0 | [
"MIT"
] | permissive | groboclown/petronia | 29b93e88b82d2732bb529621ad8bff50334d36b9 | 486338023d19cee989e92f0c5692680f1a37811f | refs/heads/master | 2022-07-25T10:08:58.468385 | 2020-01-23T14:59:03 | 2020-01-23T14:59:03 | 71,741,212 | 22 | 3 | NOASSERTION | 2022-07-13T15:27:32 | 2016-10-24T01:30:01 | Python | UTF-8 | Python | false | false | 7,016 | py |
"""
Bootstrap the hotkey bindings for the layout events.
"""
from typing import List
from ....aid.std import i18n as _
from ....aid.std import (
EventBus,
EventId,
ParticipantId,
ErrorReport,
report_error,
create_user_error,
)
from ....aid.bootstrap import (
ANY_VERSION,
create_singleton_identity,
)
from ....aid.lifecycle import create_module_listener_helper
from ....base.internal_.internal_extension import petronia_extension
from ....base.util.simple_type import (
PersistTypeSchemaItem,
PERSISTENT_TYPE_SCHEMA_NAME__DOC,
PERSISTENT_TYPE_SCHEMA_TYPE__BOOL,
PERSISTENT_TYPE_SCHEMA_TYPE__STR,
PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT,
optional_str, optional_int, optional_bool,
collect_errors,
)
from ...hotkeys.api import (
HotkeyEventTriggeredEvent,
BoundServiceActionSchema,
as_hotkey_event_triggered_listener,
)
from ..tile.api import (
RequestMoveResizeFocusedWindowEvent,
send_request_move_resize_focused_window_event,
RequestShiftLayoutFocusEvent,
send_request_shift_layout_focus_event,
RequestSetFocusedWindowVisibilityEvent,
send_request_set_window_visibility_event,
)
from ..window.api import (
)
from ..navigation.api import (
)
TARGET_ID_LAYOUT_HOTKEYS = create_singleton_identity("core.layout.binding")
HOTKEY_ACTION_MOVE_ACTIVE = 'move-active'
HOTKEY_ACTION_SHIFT_FOCUS = 'shift-focus'
HOTKEY_ACTION_SET_VISIBILITY = 'set-visible'
def bootstrap_layout_handlers(bus: EventBus) -> None:
listeners = create_module_listener_helper(bus, TARGET_ID_LAYOUT_HOTKEYS)
def handler(
_event_id: EventId,
_target_id: ParticipantId,
event_obj: HotkeyEventTriggeredEvent
) -> None:
errors: List[ErrorReport] = []
# -------------------------------------------------------------------
if event_obj.data.action == HOTKEY_ACTION_MOVE_ACTIVE:
dx = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dx',
lambda: create_user_error(handler, _('"dx" must be a number'))
)) or 0
dy = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dy',
lambda: create_user_error(handler, _('"dy" must be a number'))
)) or 0
dw = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dw',
lambda: create_user_error(handler, _('"dw" must be a number'))
)) or 0
dh = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dh',
lambda: create_user_error(handler, _('"dh" must be a number'))
)) or 0
dz = collect_errors(errors, optional_int(
event_obj.data.parameters, 'dz',
lambda: create_user_error(handler, _('"dz" must be a number'))
)) or 0
send_request_move_resize_focused_window_event(bus, dx, dy, dw, dh, dz)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SHIFT_FOCUS:
name = collect_errors(errors, optional_str(
event_obj.data.parameters, 'name',
lambda: create_user_error(handler, _('"name" must be a string'))
)) or ''
index = collect_errors(errors, optional_int(
event_obj.data.parameters, 'index',
lambda: create_user_error(handler, _('"index" must be a number'))
)) or 0
print("DEBUG data {0} -> {1}/{2}".format(event_obj.data.parameters, name, index))
send_request_shift_layout_focus_event(bus, name, index)
# -------------------------------------------------------------------
elif event_obj.data.action == HOTKEY_ACTION_SET_VISIBILITY:
visible = collect_errors(errors, optional_bool(
event_obj.data.parameters, 'visible',
lambda: create_user_error(handler, _('"visible" must be true or false'))
)) or False
send_request_set_window_visibility_event(bus, visible)
for error in errors:
report_error(bus, error)
listeners.listen(TARGET_ID_LAYOUT_HOTKEYS, as_hotkey_event_triggered_listener, handler)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_MOVE_ACTIVE, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestMoveResizeFocusedWindowEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"dx": PersistTypeSchemaItem(
"Change in window x position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dy": PersistTypeSchemaItem(
"Change in window y position (move)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dw": PersistTypeSchemaItem(
"Change in window width (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dh": PersistTypeSchemaItem(
"Change in window height (resize)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
"dz": PersistTypeSchemaItem(
"Change in window z-order (focus)", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SHIFT_FOCUS, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestShiftLayoutFocusEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"name": PersistTypeSchemaItem(
"Layout focus shift name", PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"index": PersistTypeSchemaItem(
"Layout focus shift index", PERSISTENT_TYPE_SCHEMA_TYPE__FLOAT
),
}
)
)
listeners.bind_hotkey(
BoundServiceActionSchema(
TARGET_ID_LAYOUT_HOTKEYS, HOTKEY_ACTION_SET_VISIBILITY, {
PERSISTENT_TYPE_SCHEMA_NAME__DOC: PersistTypeSchemaItem(
RequestSetFocusedWindowVisibilityEvent.__doc__ or '',
PERSISTENT_TYPE_SCHEMA_TYPE__STR
),
"visible": PersistTypeSchemaItem(
"True to make the window visible, False to make it hidden", PERSISTENT_TYPE_SCHEMA_TYPE__BOOL
),
}
)
)
EXTENSION_METADATA = petronia_extension({
"name": "core.layout.binding",
"type": "standalone",
"version": (1, 0, 0,),
"depends": ({
"extension": "core.hotkeys.api",
"minimum": ANY_VERSION,
}, {
"extension": "core.layout.api",
"minimum": ANY_VERSION,
},),
})
| [
"[email protected]"
] | |
c1d39ebc5f1174152c28d88c2a6e92745f8fea7c | 1e35944fcd9a0e2209e069fb0056f23597e3196c | /0x02-python-import_modules/4-hidden_discovery.py | 9f95073b69cc6970c576aeb2f8a13779a4a17885 | [] | no_license | sonnentag/holbertonschool-higher_level_programming | 1496be9390f557cfa7a3e31bb74b208a7dfbb98f | 5992e3c7ff97ab3fefe33bec5632bdca4d3d8a05 | refs/heads/master | 2022-12-23T12:47:02.957781 | 2020-09-25T04:01:27 | 2020-09-25T04:01:27 | 259,382,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/usr/bin/python3
if __name__ == "__main__":
import hidden_4
for func in dir(hidden_4):
if func[1] != "_":
print(func)
| [
"[email protected]"
] | |
919078d7b56ca845ac4d22fcaa4f1a78a15a1fd6 | 8f4710009ca956bd3780cb423d9f4aa896d61183 | /hsds/servicenode_lib.py | 787f486f609faa18da7934fb8a1bc1f1498e4e69 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | murlock/hsds | b5482ee3f36680728f1b64034c2f6b9c3bd4cad7 | 9f5fc3cdb64017d07e34eb422eee5398553d213c | refs/heads/master | 2020-06-18T06:35:06.817652 | 2019-07-09T02:01:06 | 2019-07-09T02:01:06 | 196,197,570 | 0 | 0 | Apache-2.0 | 2019-07-10T12:02:51 | 2019-07-10T12:02:50 | null | UTF-8 | Python | false | false | 10,606 | py | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
#
# service node of hsds cluster
#
import os.path as op
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound, HTTPInternalServerError
from util.idUtil import getDataNodeUrl, getCollectionForId, isSchema2Id, getS3Key
from util.s3Util import getS3JSONObj
from util.authUtil import aclCheck
from util.httpUtil import http_get
from util.domainUtil import getBucketForDomain
import hsds_logger as log
async def getDomainJson(app, domain, reload=False):
""" Return domain JSON from cache or fetch from DN if not found
Note: only call from sn!
"""
# TBD - default reload to True because some h5pyd tests fail due to
# cached values being picked up (test case deletes/re-creates domain)
# It would be desirable to use default of False to avoid extra
# round-trips to DN node
log.info(f"getDomainJson({domain}, reload={reload})")
if app["node_type"] != "sn":
log.error("wrong node_type")
raise HTTPInternalServerError()
domain_cache = app["domain_cache"]
if domain in domain_cache:
if reload:
del domain_cache[domain]
else:
log.debug("returning domain_cache value")
return domain_cache[domain]
req = getDataNodeUrl(app, domain)
req += "/domains"
params = { "domain": domain }
log.debug(f"sending dn req: {req}")
domain_json = await http_get(app, req, params=params)
if 'owner' not in domain_json:
log.warn("No owner key found in domain")
raise HTTPInternalServerError()
if 'acls' not in domain_json:
log.warn("No acls key found in domain")
raise HTTPInternalServerError()
domain_cache[domain] = domain_json # add to cache
return domain_json
async def validateAction(app, domain, obj_id, username, action):
""" check that the given object belongs in the domain and that the
requested action (create, read, update, delete, readACL, udpateACL)
is permitted for the requesting user.
"""
meta_cache = app['meta_cache']
log.info(f"validateAction(domain={domain}, obj_id={obj_id}, username={username}, action={action})")
# get domain JSON
domain_json = await getDomainJson(app, domain)
if "root" not in domain_json:
msg = f"Expected root key for domain: {domain}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
obj_json = None
if obj_id in meta_cache:
obj_json = meta_cache[obj_id]
else:
# fetch from DN
collection = getCollectionForId(obj_id)
req = getDataNodeUrl(app, obj_id)
req += '/' + collection + '/' + obj_id
bucket = getBucketForDomain(domain)
params = {}
if bucket:
params["bucket"] = bucket
obj_json = await http_get(app, req, params=params)
meta_cache[obj_id] = obj_json
log.debug("obj_json[root]: {} domain_json[root]: {}".format(obj_json["root"], domain_json["root"]))
if obj_json["root"] != domain_json["root"]:
log.info("unexpected root, reloading domain")
domain_json = await getDomainJson(app, domain, reload=True)
if "root" not in domain_json or obj_json["root"] != domain_json["root"]:
msg = "Object id is not a member of the given domain"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
if action not in ("create", "read", "update", "delete", "readACL", "updateACL"):
log.error(f"unexpected action: {action}")
raise HTTPInternalServerError()
reload = False
try:
aclCheck(domain_json, action, username) # throws exception if not allowed
except HTTPForbidden:
log.info(f"got HttpProcessing error on validate action for domain: {domain}, reloading...")
# just in case the ACL was recently updated, refetch the domain
reload = True
if reload:
domain_json = await getDomainJson(app, domain, reload=True)
aclCheck(domain_json, action, username)
async def getObjectJson(app, obj_id, bucket=None, refresh=False, include_links=False, include_attrs=False):
""" Return top-level json (i.e. excluding attributes or links by default) for a given obj_id.
If refresh is False, any data present in the meta_cache will be returned. If not
the DN will be queries, and any resultant data added to the meta_cache.
Note: meta_cache values may be stale, but use of immutable data (e.g. type of a dataset)
is always valid
"""
meta_cache = app['meta_cache']
obj_json = None
if include_links or include_attrs:
# links and attributes are subject to change, so always refresh
refresh = True
log.info(f"getObjectJson {obj_id}")
if obj_id in meta_cache and not refresh:
log.debug(f"found {obj_id} in meta_cache")
obj_json = meta_cache[obj_id]
else:
req = getDataNodeUrl(app, obj_id)
collection = getCollectionForId(obj_id)
params = {}
if include_links:
params["include_links"] = 1
if include_attrs:
params["include_attrs"] = 1
if bucket:
params["bucket"] = bucket
req += '/' + collection + '/' + obj_id
obj_json = await http_get(app, req, params=params) # throws 404 if doesn't exist
meta_cache[obj_id] = obj_json
if obj_json is None:
msg = f"Object: {obj_id} not found"
log.warn(msg)
raise HTTPNotFound()
return obj_json
async def getObjectIdByPath(app, obj_id, h5path, bucket=None, refresh=False):
""" Find the object at the provided h5path location.
If not found raise 404 error.
"""
log.info(f"getObjectIdByPath obj_id: {obj_id} h5path: {h5path} refresh: {refresh}")
if h5path.startswith("./"):
h5path = h5path[2:] # treat as relative path
links = h5path.split('/')
for link in links:
if not link:
continue # skip empty link
log.debug(f"getObjectIdByPath for objid: {obj_id} got link: {link}")
if getCollectionForId(obj_id) != "groups":
# not a group, so won't have links
msg = f"h5path: {h5path} not found"
log.warn(msg)
raise HTTPNotFound()
req = getDataNodeUrl(app, obj_id)
req += "/groups/" + obj_id + "/links/" + link
log.debug("get LINK: " + req)
params = {}
if bucket:
params["bucket"] = bucket
link_json = await http_get(app, req, params=params)
log.debug("got link_json: " + str(link_json))
if link_json["class"] != 'H5L_TYPE_HARD':
# don't follow soft/external links
msg = f"h5path: {h5path} not found"
log.warn(msg)
raise HTTPInternalServerError()
obj_id = link_json["id"]
# if we get here, we've traveresed the entire path and found the object
return obj_id
async def getPathForObjectId(app, parent_id, idpath_map, tgt_id=None, bucket=None):
""" Search the object starting with the given parent_id.
idpath should be a dict with at minimum the key: parent_id: <parent_path>.
If tgt_id is not None, returns first path that matches the tgt_id or None if not found.
If Tgt_id is no, returns the idpath_map.
"""
if not parent_id:
log.error("No parent_id passed to getPathForObjectId")
raise HTTPInternalServerError()
if parent_id not in idpath_map:
msg = f"Obj {parent_id} expected to be found in idpath_map"
log.error(msg)
raise HTTPInternalServerError()
parent_path = idpath_map[parent_id]
if parent_id == tgt_id:
return parent_path
req = getDataNodeUrl(app, parent_id)
req += "/groups/" + parent_id + "/links"
params = {}
if bucket:
params["bucket"] = bucket
log.debug("getPathForObjectId LINKS: " + req)
links_json = await http_get(app, req, params=params)
log.debug(f"getPathForObjectId got links json from dn for parent_id: {parent_id}")
links = links_json["links"]
h5path = None
for link in links:
if link["class"] != "H5L_TYPE_HARD":
continue # ignore everything except hard links
link_id = link["id"]
if link_id in idpath_map:
continue # this node has already been visited
title = link["title"]
if tgt_id is not None and link_id == tgt_id:
# found it!
h5path = op.join(parent_path, title)
break
idpath_map[link_id] = op.join(parent_path, title)
if getCollectionForId(link_id) != "groups":
continue
h5path = await getPathForObjectId(app, link_id, idpath_map, tgt_id=tgt_id, bucket=bucket) # recursive call
if tgt_id is not None and h5path:
break
return h5path
async def getRootInfo(app, root_id, bucket=None):
""" Get extra information the root collection. """
# Gather additional info on the domain
log.debug(f"getRootInfo {root_id}")
if not isSchema2Id(root_id):
log.info(f"no dataset details not available for schema v1 id: {root_id} returning null results")
return None
s3_key = getS3Key(root_id)
parts = s3_key.split('/')
# dset_key is in the format db/<root>/d/<dset>/.dataset.json
# get the key for the root info object as: db/<root>/.info.json
if len(parts) != 3:
log.error(f"Unexpected s3key format: {s3_key}")
return None
info_key = f"db/{parts[1]}/.info.json"
try:
info_json = await getS3JSONObj(app, info_key, bucket=bucket)
except HTTPNotFound:
log.warn(f"info.json not found for key: {info_key}")
return None
return info_json
| [
"[email protected]"
] | |
b970cb7a9421a179fb53f5272a8b21908a4e9e7e | 8b81588cea990aca1ecc4ce3fe45847cc46e7d00 | /x11/library/libXScrnSaver/actions.py | 676e427c37687edf597e963446797e167056b929 | [] | no_license | Zaryob/SulinRepository | 67a4a6d15d909422f73d5ec4bbc8bd16f40057a9 | c89c643b9773d191996d721b262dd739e4203bc0 | refs/heads/main | 2021-06-12T19:30:34.281242 | 2019-04-18T17:56:24 | 2019-04-18T17:56:24 | 201,469,580 | 11 | 2 | null | 2021-06-02T16:51:13 | 2019-08-09T13:08:57 | Roff | UTF-8 | Python | false | false | 507 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
inarytools.dodoc("ChangeLog", "COPYING", "README")
| [
"[email protected]"
] | |
2cacc35dad927239826dea74300b3926c7cc1092 | cbca22133ba7c02ba0532bc046d7e6b0524c2f4c | /Matplotlib_With_PYQT/封装toolbar功能/fnag.py | 5b9a1a4d3a75baf71b162e3e9c3e93eb74751638 | [] | no_license | Inc175/ll_crowluya-Matplotlib_With_PYQT-master | a923c195121f5e1d382b702b6a9ea0732c60c204 | dcf1fd6725f4fffd0b7ff6b9298cc3635735b30d | refs/heads/master | 2021-09-24T23:58:02.044255 | 2018-10-05T16:00:11 | 2018-10-05T16:00:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | import sys
import matplotlib
import PyQt5.sip
# matplotlib的键盘按压事件引入到pyqt5中
# from matplotlib.backend_bases import key_press_handler
matplotlib.use("Qt5Agg")
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QSizePolicy, QAction, QLabel,
QWidget,QStackedWidget, QPushButton,QTabWidget, QAction, QMessageBox, QFileDialog, QHBoxLayout)
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from MatploWidget import PlotCanvas # qt绘制matplotlib图像的类
from mainFrom import Ui_MainWindow # 弹出为屏幕中心主窗口
from loadlines import load_all_lines # 加载数据
# 添加曲线到画布上
from PyQt5.QtWidgets import QDesktopWidget, QApplication, QMainWindow, QPushButton
from utils import log
# from MatploWidget import PlotCanvas
fig = plt.figure()
ax = fig.add_subplot(111)
lines = load_all_lines()
tab1 = PlotCanvas(width=9, height=6, dpi=100)
tab1.draw_one_line(lines[0])
# fig.add_subplot(tab1)
tab1.draw()
# plt.show() | [
"[email protected]"
] | |
42e748ffc45d9278916009d2483b54f316602368 | 7133de159c5cdc06b92bc5b168fe193caf0bea2a | /packages/grid_control/parameters/psource_data.py | f2f1003f2b7798eb21834106677ade5e27e87a17 | [] | no_license | thomas-mueller/grid-control | fac566c21bb79b0bd4439d36421a0c0b14bc8776 | 36f01d19b71c41c8dd55eddd190181db8849f920 | refs/heads/master | 2020-12-28T23:34:59.983357 | 2016-04-22T06:28:57 | 2016-04-22T06:28:57 | 56,689,010 | 0 | 0 | null | 2016-04-20T13:26:29 | 2016-04-20T13:26:29 | null | UTF-8 | Python | false | false | 4,408 | py | # | Copyright 2009-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, time
from grid_control import utils
from grid_control.datasets import DataProvider
from grid_control.gc_exceptions import UserError
from grid_control.parameters.psource_base import ParameterSource
from python_compat import md5_hex
class DataParameterSource(ParameterSource):
def __init__(self, dataDir, srcName, dataProvider, dataSplitter, dataProc, keepOld = True):
ParameterSource.__init__(self)
(self._dataDir, self._srcName, self._dataProvider, self._dataSplitter, self._part_proc) = \
(dataDir, srcName, dataProvider, dataSplitter, dataProc)
if not dataProvider:
pass # debug mode - used by scripts - disables resync
elif os.path.exists(self.getDataPath('cache.dat') and self.getDataPath('map.tar')):
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
else:
DataProvider.saveToFile(self.getDataPath('cache.dat'), self._dataProvider.getBlocks(silent = False))
self._dataSplitter.splitDataset(self.getDataPath('map.tar'), self._dataProvider.getBlocks())
self._maxN = self._dataSplitter.getMaxJobs()
self._keepOld = keepOld
def getNeededDataKeys(self):
return self._part_proc.getNeededKeys(self._dataSplitter)
def getMaxParameters(self):
return self._maxN
def fillParameterKeys(self, result):
result.extend(self._part_proc.getKeys())
def fillParameterInfo(self, pNum, result):
splitInfo = self._dataSplitter.getSplitInfo(pNum)
self._part_proc.process(pNum, splitInfo, result)
def getHash(self):
return md5_hex(str(self._srcName) + str(self._dataSplitter.getMaxJobs()) + str(self.resyncEnabled()))
def show(self):
return ['%s: src = %s' % (self.__class__.__name__, self._srcName)]
def __repr__(self):
return 'data(%s)' % utils.QM(self._srcName == 'data', '', self._srcName)
def getDataPath(self, postfix):
return os.path.join(self._dataDir, self._srcName + postfix)
def resync(self):
(result_redo, result_disable, result_sizeChange) = ParameterSource.resync(self)
if self.resyncEnabled() and self._dataProvider:
# Get old and new dataset information
old = DataProvider.loadFromFile(self.getDataPath('cache.dat')).getBlocks()
self._dataProvider.clearCache()
new = self._dataProvider.getBlocks()
self._dataProvider.saveToFile(self.getDataPath('cache-new.dat'), new)
# Use old splitting information to synchronize with new dataset infos
jobChanges = self._dataSplitter.resyncMapping(self.getDataPath('map-new.tar'), old, new)
if jobChanges:
# Move current splitting to backup and use the new splitting from now on
def backupRename(old, cur, new):
if self._keepOld:
os.rename(self.getDataPath(cur), self.getDataPath(old))
os.rename(self.getDataPath(new), self.getDataPath(cur))
backupRename( 'map-old-%d.tar' % time.time(), 'map.tar', 'map-new.tar')
backupRename('cache-old-%d.dat' % time.time(), 'cache.dat', 'cache-new.dat')
old_maxN = self._dataSplitter.getMaxJobs()
self._dataSplitter.importPartitions(self.getDataPath('map.tar'))
self._maxN = self._dataSplitter.getMaxJobs()
result_redo.update(jobChanges[0])
result_disable.update(jobChanges[1])
result_sizeChange = result_sizeChange or (old_maxN != self._maxN)
self.resyncFinished()
return (result_redo, result_disable, result_sizeChange)
def create(cls, pconfig = None, src = 'data'): # pylint:disable=arguments-differ
if src not in DataParameterSource.datasetsAvailable:
raise UserError('Dataset parameter source "%s" not setup!' % src)
result = DataParameterSource.datasetsAvailable[src]
DataParameterSource.datasetsUsed.append(result)
return result
create = classmethod(create)
DataParameterSource.datasetsAvailable = {}
DataParameterSource.datasetsUsed = []
ParameterSource.managerMap['data'] = 'DataParameterSource'
| [
"[email protected]"
] | |
819f916451d212969a294520210767ee7b4da40d | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Leetcode/Algorithm/python/3000/02079-Watering Plants.py | 4e375badaaf013ccb48f4140475ac47e3102f9c7 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | class Solution(object):
def wateringPlants(self, plants, capacity):
cur = capacity
step = 0
for i, plant in enumerate(plants):
if plant > cur:
cur = capacity - plant
step += i * 2 + 1
else:
cur -= plant
step += 1
return step
| [
"[email protected]"
] | |
4045d144a83b1c65582baa5d98f4ceece2698cd4 | 498a2d08c19eaf36945468e11fad1be97d62135b | /yaml_lsp/main.py | 125cdb4ed5c7ac43fcdd5ddc1769dfca7aed8329 | [
"BSD-3-Clause"
] | permissive | martinRenou/yaml-lsp | 94f4dc1744b5e8a4763983725cf482a5ab3f1207 | 79186d50289d172d2dc5a8420f1dc2cad1046ce7 | refs/heads/master | 2023-08-25T08:40:39.172933 | 2021-04-08T14:37:04 | 2021-04-08T14:37:04 | 417,399,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import pathlib
import shutil
import subprocess
import sys
NODE_LOCATION = (
shutil.which("node") or
shutil.which("node.exe") or
shutil.which("node.cmd")
)
NODE = str(pathlib.Path(NODE_LOCATION).resolve())
PATH_TO_BIN_JS = str(
(
pathlib.Path(__file__).parent /
'node_modules' / 'yaml-language-server' /
'bin' / 'yaml-language-server'
).resolve()
)
def main():
p = subprocess.Popen(
[NODE, PATH_TO_BIN_JS, '--stdio', *sys.argv[1:]],
stdin=sys.stdin, stdout=sys.stdout
)
sys.exit(p.wait())
def load(app):
return {
"yaml-language-server": {
"version": 2,
"argv": ['yaml-lsp'],
"languages": ["yaml"],
"mime_types": [
"text/x-yaml", "text/yaml"
]
}
}
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
34df80d44954fbb824a9dad7091e6ee2e6eb9a0a | ac235a23f22be0d6f1818bb53902177f9969813a | /tests/datastreams/test_processor.py | d8b3879b0af52c14c14035455326ecafe89c7cd8 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 3,013 | py | import time
from ddtrace.internal.datastreams.processor import ConsumerPartitionKey
from ddtrace.internal.datastreams.processor import DataStreamsProcessor
from ddtrace.internal.datastreams.processor import PartitionKey
def test_data_streams_processor():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 1)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 2)
processor.on_checkpoint_creation(1, 2, ["direction:out", "topic:topicA", "type:kafka"], now, 1, 4)
processor.on_checkpoint_creation(2, 4, ["direction:in", "topic:topicA", "type:kafka"], now, 1, 2)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
aggr_key_1 = (",".join(["direction:out", "topic:topicA", "type:kafka"]), 1, 2)
aggr_key_2 = (",".join(["direction:in", "topic:topicA", "type:kafka"]), 2, 4)
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.count == 3
assert processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.count == 1
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_1].full_pathway_latency.get_quantile_value(1) - 4)
<= 4 * 0.008
) # relative accuracy of 0.00775
assert (
abs(processor._buckets[bucket_time_ns].pathway_stats[aggr_key_2].full_pathway_latency.get_quantile_value(1) - 2)
<= 2 * 0.008
) # relative accuracy of 0.00775
def test_data_streams_loop_protection():
processor = DataStreamsProcessor("http://localhost:8126")
ctx = processor.set_checkpoint(["direction:in", "topic:topicA", "type:kafka"])
parent_hash = ctx.hash
processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"])
# the application sends data downstream to two different places.
# Use the consume checkpoint as the parent
child_hash = processor.set_checkpoint(["direction:out", "topic:topicB", "type:kafka"]).hash
expected_child_hash = ctx._compute_hash(["direction:out", "topic:topicB", "type:kafka"], parent_hash)
assert child_hash == expected_child_hash
def test_kafka_offset_monitoring():
processor = DataStreamsProcessor("http://localhost:8126")
now = time.time()
processor.track_kafka_commit("group1", "topic1", 1, 10, now)
processor.track_kafka_commit("group1", "topic1", 1, 14, now)
processor.track_kafka_produce("topic1", 1, 34, now)
processor.track_kafka_produce("topic1", 2, 10, now)
now_ns = int(now * 1e9)
bucket_time_ns = int(now_ns - (now_ns % 1e10))
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 1)] == 34
assert processor._buckets[bucket_time_ns].latest_produce_offsets[PartitionKey("topic1", 2)] == 10
assert processor._buckets[bucket_time_ns].latest_commit_offsets[ConsumerPartitionKey("group1", "topic1", 1)] == 14
| [
"[email protected]"
] | |
a11414020e389e004fa7ba41d64bb7afc662c6ec | 33cc37817d93dd784be2398c904c9b4cacf84c52 | /Week5_Object_Oriented_Programming/Practice_Problems/currencies.py | 1e40cb96d3c767ece8d29f152b283a21e682a68a | [] | no_license | M1c17/ICS_and_Programming_Using_Python | 305e53561af27067998cb767ee5d566dfc02d33d | ee5127a272fbf19289a6a97cbe9b2ada2f7785ca | refs/heads/master | 2020-07-02T00:04:28.574491 | 2019-08-09T00:16:11 | 2019-08-09T00:16:11 | 201,354,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 12:09:10 2019
@author: MASTER
"""
'''
The class "Ccy" can be used to define money values in various currencies.
A Ccy instance has the string attributes 'unit' (e.g. 'CHF', 'CAD' od 'EUR'
and the 'value' as a float.
A currency object consists of a value and the corresponding unit.
'''
class Ccy:
currencies = {'CHF': 1.0821202355817312,
'CAD': 1.488609845538393,
'GBP': 0.8916546282920325,
'JPY': 114.38826536281809,
'EUR': 1.0,
'USD': 1.11123458162018}
def __init__(self, value, unit = 'EUR'):
self.value = value
self.unit = unit
def __str__(self):
return "{0:5.2f}".format(self.value) + " " + self.unit
def changeTo(self, new_unit):
"""
An Ccy object is transformed from the unit "self.unit" to "new_unit"
"""
self.value = (self.value / Ccy.currencies[self.unit] * Ccy.currencies[new_unit])
self.unit = new_unit
def __add__(self, other):
"""
Defines the '+' operator.
If other is a CCy object the currency values
are added and the result will be the unit of
self. If other is an int or a float, other will
be treated as a Euro value.
"""
if type(other) == int or type(other) == float:
x = (other * Ccy.currencies[self.unit])
else:
x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])
return Ccy(x + self.value, self.unit)
def __iadd__(self, other):
"""
Similar to __add__
"""
if type(other) == int or type(other) == float:
x = (other * Ccy.currencies[self.unit])
else:
x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])
self.value += x
return self
# we need change to EUR
def __radd__(self, other):
res = self + other
if self.unit != "EUR":
res.changeTo("EUR")
return res
# def __radd__(self, other):
# return Ccy.__add__(self,other)
def __mul__(self, other):
"""
Multiplication is only defined as a scalar multiplication,
i.e. a money value can be multiplied by an int or a float.
It is not possible to multiply to money values
"""
if type(other)==int or type(other)==float:
return Ccy(self.value * other, self.unit)
else:
raise TypeError("unsupported operand type(s) for *: 'Ccy' and " + type(other).__name__)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if type(other)==int or type(other)==float:
self.value *= other
return self
else:
raise TypeError("unsupported operand type(s) for *: 'Ccy' and " + type(other).__name__)
x = Ccy(10,"USD")
y = Ccy(11)
z = Ccy(12.34, "JPY")
z = 7.8 + x + y + 255 + z
print(z)
lst = [Ccy(10,"USD"), Ccy(11), Ccy(12.34, "JPY"), Ccy(12.34, "CAD")]
z = sum(lst)
print(z) | [
"[email protected]"
] | |
f9c126902d927e7a260fb705bce0e1c27552cc30 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/309/usersdata/284/72935/submittedfiles/atm.py | bdf46aa78ba6d32e46748200a85c384c0a0db6f1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CODIGO AQUI
v=int(input('digite o valor a ser sacado: '))
a=20
b=10
c=5
d=2
e=1
f=(v%a)
g=(f%10)
h=(g%5)
i=(h%2)
if v//a!=0:
print(v//a)
print(f//10)
print(g//5)
print(h//2)
print(i//1)
| [
"[email protected]"
] | |
1f96f72f233d70286289b429157d02f586e49a0c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/134/usersdata/228/53506/submittedfiles/escadarolante.py | b21aed3136bcf8b81ed533496221440ebca0f3d7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
def rolagem(lista):
for i in range (0,len(lista)-2,1):
tempo=10
if lista[i+1]<(lista[i]+10):
tempo=tempo+(lista[i]-lista[i+1])
elif lista[i+1]>=(lista[i]+10):
tempo=tempo+10
for i in range (len(lista)-1,len(lista)-2,1):
if lista[len(lista-1)]<lista[len(lista)-2]+10:
tempo=tempo+(lista[len(lista)-1]-lista[len(lista)-2])
elif lista[len(lista)-1]>=lista[len(lista)-2]+10:
tempo=tempo+10
return(tempo+10)
n=int(input('digite um valor:'))
lista=[]
for i in range(0,n,1):
tn=int(input('digite um tempo de passagem:'))
lista.append(tn)
print (rolagem(lista))
| [
"[email protected]"
] | |
07ee526ae4cc62f861473e517b4b79b7aba4d6be | 8dc64db8a0d7ddb8778c8eae2dac9075b9a90e2b | /env/Lib/site-packages/pylint/checkers/variables.py | c7fd6318483362dc3ead1935ccf35244b261e505 | [
"MIT"
] | permissive | theXtroyer1221/Cloud-buffer | c3992d1b543a1f11fde180f6f7d988d28b8f9684 | 37eabdd78c15172ea980b59d1aff65d8628cb845 | refs/heads/master | 2022-11-22T22:37:10.453923 | 2022-02-25T01:15:57 | 2022-02-25T01:15:57 | 240,901,269 | 1 | 1 | MIT | 2022-09-04T14:48:02 | 2020-02-16T14:00:32 | HTML | UTF-8 | Python | false | false | 81,382 | py | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Grant Welch <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017-2018 Ville Skyttä <[email protected]>
# Copyright (c) 2017 Dan Garrette <[email protected]>
# Copyright (c) 2018-2019 Jim Robertson <[email protected]>
# Copyright (c) 2018 Mike Miller <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Drew <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Marianna Polatoglou <[email protected]>
# Copyright (c) 2018 mar-chi-pan <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Nick Drozd <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2020 Ashley Whetter <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import collections
import copy
import itertools
import os
import re
from functools import lru_cache
import astroid
from astroid import decorators, modutils, objects
from astroid.context import InferenceContext
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.interfaces import HIGH, INFERENCE, INFERENCE_FAILURE, IAstroidChecker
from pylint.utils import get_global_option
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, astroid.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, astroid.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = " %s" % inferred.as_string()
elif inferred.lineno:
more = " defined at line %s" % inferred.lineno
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (astroid.FunctionDef, astroid.Arguments)):
return False
elif any(
not isinstance(f, (astroid.ClassDef, astroid.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node):
parent = node.parent
if not isinstance(parent, astroid.If):
return False
test = parent.test
return test.as_string() in TYPING_TYPE_CHECKS_GUARDS
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
astroid.FunctionDef,
astroid.ClassDef,
astroid.Import,
astroid.ImportFrom,
)
for call in scope.nodes_of_class(astroid.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Used when a local variable is accessed before its assignment.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import %s from wildcard import",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0623": (
"Redefining name %r from %s in exception handler",
"redefine-in-handler",
"Used when an exception handler assigns the exception to an existing name",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
ScopeConsumer = collections.namedtuple(
"ScopeConsumer", "to_consume consumed scope_type"
)
class NamesConsumer:
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# Get the definition of `node` from this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (
found_node
and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent
):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
if (
found_node
and isinstance(parent_node, astroid.For)
and parent_node.iter == node
and parent_node.target in found_node
):
found_node = None
return found_node
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = (
None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
)
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._postponed_evaluation_enabled = False
@utils.check_messages("redefined-outer-name")
def visit_for(self, node):
assigned_to = [
var.name for var in node.target.nodes_of_class(astroid.AssignName)
]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node):
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"unused-variable",
)
def leave_module(self, node):
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node):
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (
isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, astroid.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(
stmt
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, astroid.node_classes.Import)
for local in locals_.get(name, ())
)
if not assign_nodes and not_defined_locally_by_import:
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
def visit_name(self, node):
"""Check that a name is defined in the current scope"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
name = node.name
frame = stmt.scope()
start_index = len(self._to_consume) - 1
undefined_variable_is_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
used_before_assignment_is_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if current_consumer.scope_type == "class" and (
utils.is_ancestor_name(current_consumer.node, node)
or (i != start_index and self._ignore_class_scope(node))
):
continue
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (
current_consumer.scope_type == "function"
and self._defined_in_function_definition(node, current_consumer.node)
):
# ignore function scope if is an annotation/default/decorator, as not in the body
continue
if current_consumer.scope_type == "lambda" and utils.is_default_argument(
node, current_consumer.node
):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, i)
):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if (
undefined_variable_is_enabled or used_before_assignment_is_enabled
) and defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, astroid.ClassDef)
and node.name == defframe.name
)
if (
recursive_klass
and utils.is_inside_lambda(node)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
break
(
maybee0601,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
)
if use_outer_definition:
continue
if (
maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (astroid.DelName, astroid.AssignName)
)
if (
recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
astroid.AnnAssign,
astroid.FunctionDef,
astroid.Arguments,
),
)
and name in node.root().locals
):
self.add_message(
"undefined-variable", args=name, node=node
)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt, (astroid.AnnAssign, astroid.FunctionDef)
)
):
self.add_message(
"used-before-assignment", args=name, node=node
)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment", args=name, node=node
)
else:
self.add_message(
"undefined-variable", args=name, node=node
)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if undefined_variable_is_enabled and not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or (
name == "__class__"
and isinstance(frame, astroid.FunctionDef)
and frame.is_method()
)
):
if not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=name, node=node)
@utils.check_messages("no-name-in-module")
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, astroid.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node):
self._store_type_annotation_names(node)
def leave_with(self, node):
self._store_type_annotation_names(node)
def visit_arguments(self, node):
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if isinstance(frame, astroid.FunctionDef) and node.statement() is frame:
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: astroid.node_classes.NodeNG, frame: astroid.node_classes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, astroid.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if (
isinstance(parent, astroid.node_classes.Comprehension)
and child is not parent.iter
):
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, astroid.scoped_nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
):
# pylint: disable=too-many-nested-blocks
# node: Node to check for violation
# name: name of node to check violation for
# frame: Scope of statement of node
# base_scope_type: local scope type
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, astroid.FunctionDef)
or isinstance(node.frame(), astroid.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, astroid.node_classes.Comprehension
)
# check if we have a nonlocal
elif name in defframe.locals:
maybee0601 = not any(
isinstance(child, astroid.Nonlocal) and name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, astroid.ClassDef)
and name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (
isinstance(defnode, astroid.Arguments)
and node in defnode.defaults
and frame.locals[name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, astroid.ClassDef) and isinstance(
frame, astroid.FunctionDef
):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if node is frame.returns and defframe.parent_of(frame.returns):
maybee0601 = annotation_return = True
if (
maybee0601
and defframe.name in defframe.locals
and defframe.locals[name][0].lineno < frame.lineno
):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
elif (
isinstance(defstmt, astroid.Assign)
and isinstance(defstmt.value, astroid.IfExp)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assingment expression on same
# line as assigment
# x = b if (b := True) else False
maybee0601 = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, astroid.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybee0601 = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (astroid.Import, astroid.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, astroid.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, astroid.Assign):
defined_in_or_else = any(
target.name == name for target in definition.targets
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybee0601 = True
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(
isinstance(frame, astroid.ClassDef)
or in_annotation_or_default_or_decorator
)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
if not self.linter.is_message_enabled("undefined-loop-variable"):
return
astmts = [stmt for stmt in node.lookup(name)[1] if hasattr(stmt, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, astroid.FunctionDef) and any(
asmt.statement().parent_of(scope) for asmt in astmts
):
return
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if (
not astmts
or (astmts[0].is_statement or astmts[0].parent)
and astmts[0].statement().parent_of(node)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement().parent_of(stmt) and not in_for_else_branch(
astmts[i].statement(), stmt
):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(
assign, (astroid.For, astroid.Comprehension, astroid.GeneratorExp)
)
and assign.statement() is not node.statement()
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, astroid.For):
self.add_message("undefined-loop-variable", args=name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
astroid.List,
astroid.Tuple,
astroid.Dict,
astroid.Set,
objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# pylint: disable=too-many-branches
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, astroid.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], astroid.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (astroid.Global, astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(
stmt.parent, (astroid.Assign, astroid.AnnAssign)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, astroid.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = "import %s" % name
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, astroid.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)
or isinstance(stmt, astroid.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node, assignment_node):
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
def _is_direct_lambda_call():
return (
isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope
)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index - 1 :: -1]:
if _consumer.scope_type == "function" and node.name in _consumer.to_consume:
return True
return False
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, astroid.Name):
self._type_annotation_names.append(type_annotation.name)
return
if not isinstance(type_annotation, astroid.Subscript):
return
if (
isinstance(type_annotation.value, astroid.Attribute)
and isinstance(type_annotation.value.expr, astroid.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name
for annotation in type_annotation.nodes_of_class(astroid.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node):
"""Check that self/cls don't get assigned"""
assign_names = {
target.name
for target in node.targets
if isinstance(target, astroid.AssignName)
}
scope = node.scope()
nonlocals_with_same_name = any(
child
for child in scope.body
if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, astroid.scoped_nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
target_assign_names = (
target.name
for target in node.targets
if isinstance(target, astroid.node_classes.AssignName)
)
if self_cls_name in target_assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, astroid.Arguments)
and isinstance(node.value, astroid.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
if isinstance(inferred, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = inferred.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, astroid.Module):
return module
return None
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, astroid.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in not_consumed.items():
for node in nodes:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, astroid.Import) or (
isinstance(stmt, astroid.ImportFrom) and not stmt.modname
):
if isinstance(stmt, astroid.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, astroid.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
self.add_message("unused-wildcard-import", args=name, node=stmt)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
del self._to_consume
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, astroid.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = None
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif (
isinstance(klass._metaclass, astroid.Attribute)
and klass._metaclass.expr
):
name = klass._metaclass.expr.name
if name is not None:
if not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or name in parent_node.locals
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
| [
"[email protected]"
] | |
97f9717024df32c598a14e0f724cbdbe3bf03874 | 08d316151302f7ba4ae841c15b7adfe4e348ddf1 | /reviewboard/hostingsvcs/tests/test_sourceforge.py | f51901fb883aee02393c4d7033117c7503f6470d | [
"MIT"
] | permissive | LloydFinch/reviewboard | aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8 | 563c1e8d4dfd860f372281dc0f380a0809f6ae15 | refs/heads/master | 2020-08-10T20:02:32.204351 | 2019-10-02T20:46:08 | 2019-10-02T20:46:08 | 214,411,166 | 2 | 0 | MIT | 2019-10-11T10:44:55 | 2019-10-11T10:44:54 | null | UTF-8 | Python | false | false | 2,727 | py | """Unit tests for the SourceForge hosting service."""
from __future__ import unicode_literals
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
class SourceForgeTests(HostingServiceTestCase):
"""Unit tests for the SourceForge hosting service."""
service_name = 'sourceforge'
def test_service_support(self):
"""Testing SourceForge service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertTrue(self.service_class.supports_repositories)
def test_get_repository_fields_with_bazaar(self):
"""Testing SourceForge.get_repository_fields for Bazaar"""
self.assertEqual(
self.get_repository_fields(
'Bazaar',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'bzr://myproj.bzr.sourceforge.net/bzrroot/myproj',
'mirror_path': ('bzr+ssh://myproj.bzr.sourceforge.net/bzrroot/'
'myproj'),
})
def test_get_repository_fields_with_cvs(self):
"""Testing SourceForge.get_repository_fields for CVS"""
self.assertEqual(
self.get_repository_fields(
'CVS',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': (':pserver:[email protected]:'
'/cvsroot/myproj'),
'mirror_path': 'myproj.cvs.sourceforge.net/cvsroot/myproj',
})
def test_get_repository_fields_with_mercurial(self):
"""Testing SourceForge.get_repository_fields for Mercurial"""
self.assertEqual(
self.get_repository_fields(
'Mercurial',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'http://myproj.hg.sourceforge.net:8000/hgroot/myproj',
'mirror_path': 'ssh://myproj.hg.sourceforge.net/hgroot/myproj',
})
def test_get_repository_fields_with_svn(self):
"""Testing SourceForge.get_repository_fields for Subversion"""
self.assertEqual(
self.get_repository_fields(
'Subversion',
fields={
'sourceforge_project_name': 'myproj',
}
),
{
'path': 'http://myproj.svn.sourceforge.net/svnroot/myproj',
'mirror_path': ('https://myproj.svn.sourceforge.net/svnroot/'
'myproj'),
})
| [
"[email protected]"
] | |
b2d53ffd5c0d66ff8ad6d790ce50cfb7e2ec15cd | 04a97dbb2ab510ec64a944dd5aaf834271788a32 | /django2/mysite/city_summary_stats.py | 465ddd22ae68c7689bedb5da94be9e8d77ced5a7 | [] | no_license | emilyding/ACE-cs122project | 98d2b67aa8151b2649c4a980d6daf80fb9817898 | 5941d8de85770f3e1b05e0bdc5a4b1cd6f1d35f5 | refs/heads/master | 2021-01-11T17:19:25.243093 | 2017-03-14T21:25:18 | 2017-03-14T21:25:18 | 79,745,285 | 0 | 0 | null | 2017-03-10T20:16:16 | 2017-01-22T20:59:03 | HTML | UTF-8 | Python | false | false | 6,330 | py | # Summary City Data
'''
Takes as an input a dictionary with the name of a city, and returns interesting
summary statistics. Note that using the adjusted database will lead to errors
identifying universally hated/acclaimed restaurants (as ratings of 1 or 5 will
be adjusted slightly upwards or downwards)
Usage: Call get_summary_info with the city of interest.
Example Call: get_summary_info({'city': 'Los Angeles'})
'''
import sqlite3
import csv
# Maps cities to median min meters between starbucks
# Dictionary readout produced by build_starbucks_dictionary.py
starbucks_mapper = {'albuquerque': '154.15', 'arlington': '83.33', 'atlanta': '352.59',
'austin': '123.41', 'baltimore': '86.41', 'boston': '98.32', 'buffalo': '162.93',
'charlotte': '251.00', 'chicago': '138.73', 'cleveland': '149.90', 'colorado springs': '221.52',
'columbus': '385.16', 'dallas': '517.69', 'denver': '282.46', 'detroit': '486.73',
'el paso': '241.77', 'fort worth': '239.43', 'fresno': '96.81', 'honolulu': '33.39',
'houston': '393.32', 'indianapolis': '406.86', 'jacksonville': '184.75', 'kansas city': '978.47',
'las vegas': '395.43', 'long beach': '112.44', 'los angeles': '187.45', 'louisville': '213.46',
'memphis': '219.27', 'mesa': '411.07', 'miami': '142.43', 'milwaukee': '146.95',
'minneapolis': '317.86', 'nashville': '173.47', 'new orleans': '103.72', 'new york': '105.39',
'oakland': '97.87', 'oklahoma city': '213.86', 'omaha': '228.06', 'philadelphia': '106.38',
'phoenix': '531.17', 'pittsburgh': '272.22', 'portland': '193.92', 'raleigh': '564.58',
'sacramento': '84.44', 'san antonio': '363.24', 'san diego': '110.48', 'san francisco': '67.07',
'san jose': '89.94', 'seattle': '134.22', 'st louis': '635.64', 'st paul': '125.64',
'tampa': '324.66', 'tucson': '135.19', 'tulsa': '327.75', 'virginia beach': '140.52',
'washington dc': '106.63'}
def build_starbucks_dictionary(filepath = 'starbucks_index.csv'):
'''
Given a filepath, constructs a dictionary mapping each city to the
median minimum distance to another Starbucks. Used in get_summary_info.
Inputs:
- filepath: The location of the Starbucks distance csv
Returns:
- Dictionary mapping cities to the median min starbucks distance
'''
starbucks_mapper = {}
with open(filepath) as holding:
reader = csv.reader(holding)
for row in reader:
# Skip headers
if row[2] != "Median Distance":
# Build dictionary, applying rounding
starbucks_mapper.update({row[1]: "{0:.2f}".format(float(row[2]))})
return starbucks_mapper
def get_summary_info(city = {'city': 'Los Angeles'}, database = "yelp_raw.db"):
'''
Takes in a city dictionary and database and returns summary statistics.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- A list of tuples displaying summary information
'''
# Change city input to lowercase, if necessary
city["city"] = city["city"].lower()
# Find necessary information
total_restaurants = find_total_restaurants(city, database)
starbucks_index = starbucks_mapper[city["city"]]
most_reviewed = find_most_reviewed_restaurant(city, database)
most_acclaimed = find_consensus_restaurant(city, database, rating = 5)
most_hated = find_consensus_restaurant(city, database, rating = 1)
# Construct Result List
result_list = []
result_list.append(("Total Restaurants in City:", total_restaurants))
result_list.append(("Starbucks Distance Index:",
"{} Meters".format(starbucks_index)))
result_list.append(("Most Reviewed Restaurant:",
"{}, {} Reviews".format(most_reviewed[0], most_reviewed[1])))
result_list.append(("Most Reviewed 5-Star Restaurant:",
"{}, {} Reviews".format(most_acclaimed[0], most_acclaimed[1])))
result_list.append(("Most Reviewed 1-Star Restaurant:",
"{}, {} Reviews".format(most_hated[0], most_hated[1])))
return result_list
def find_total_restaurants(city, database):
'''
Finds total number of restauarants in a city.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Integer of number of cities
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT COUNT(*)
FROM restaurant
WHERE city = ?
COLLATE NOCASE
'''
params = [city["city"]]
result = c.execute(search_string, params)
result = result.fetchone()
connection.commit()
c.connection.close()
return result[0]
def find_most_reviewed_restaurant(city, database):
'''
Finds the most reviewed restaurant and its review count
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Most reviewed restauarant and review count as a list
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT name, reviews
FROM restaurant
WHERE city = ?
COLLATE NOCASE
'''
params = [city["city"]]
result = c.execute(search_string, params)
results = result.fetchall()
# Sort by review count
results = sorted(results, key=lambda x: x[1], reverse = True)
connection.commit()
c.connection.close()
return results[0]
def find_consensus_restaurant(city, database, rating):
'''
Finds most reviewed restaurant at a given rating level.
Inputs:
- city: City of interest. Format is {'city': 'Chicago'}
- database = Location of unadjusted database
Returns:
- Most reviewed restauarant and review count as a list
'''
connection = sqlite3.connect(database)
c = connection.cursor()
search_string = '''SELECT name, reviews, rating
FROM restaurant
WHERE city = ?
COLLATE NOCASE
AND rating = ?;
'''
params = [city["city"], rating]
result = c.execute(search_string, params)
results = result.fetchall()
# Sort by review count
results = sorted(results, key=lambda x: x[1], reverse = True)
connection.commit()
c.connection.close()
return results[0]
| [
"[email protected]"
] | |
1068b7713ec020de5734f28062543b8079b9df6d | d62fbff86f8d4f332e843843bba7d07e2361554f | /Examples/tests/example_2_unittest.py | a4ecd8c88c26e34fb9560b454a023f409b1c0266 | [] | no_license | haisfo/python-environments | e031850fa4e8778eea7c618d1eec74e723e615f1 | 73fb4dbe56f1ebbfba71d440ba3c953556688bf9 | refs/heads/master | 2022-12-27T19:51:13.046530 | 2020-10-16T00:11:28 | 2020-10-16T00:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | import unittest
from rock_paper_scissors_buggy import determine_winner, game_over, YOU, COMP
class TestWordGame(unittest.TestCase):
def test_determine_winner(self):
self.assertEqual(determine_winner('r', 'r'), None)
self.assertEqual(determine_winner('r', 'p'), COMP)
self.assertEqual(determine_winner('r', 's'), YOU)
self.assertEqual(determine_winner('p', 'r'), YOU)
self.assertEqual(determine_winner('p', 'p'), None)
self.assertEqual(determine_winner('p', 's'), COMP)
self.assertEqual(determine_winner('s', 'r'), COMP)
self.assertEqual(determine_winner('s', 'p'), YOU)
self.assertEqual(determine_winner('s', 's'), None)
def test_game_over(self):
self.assertEqual(game_over(3, [0, 0]), None)
self.assertEqual(game_over(3, [1, 1]), None)
self.assertEqual(game_over(3, [2, 1]), YOU)
self.assertEqual(game_over(3, [1, 2]), COMP)
self.assertEqual(game_over(5, [2, 2]), None)
self.assertEqual(game_over(5, [3, 0]), YOU)
self.assertEqual(game_over(5, [1, 3]), COMP)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
87000c88ad989e720e8303d6f29d9912748b2d30 | 254d38ad3d455b94170e4ef17045d6c10daee986 | /doc_src/code_sample/case1_admin.py | f34ae1be6a6e05c2cdd7ae730c848ce4f701da26 | [
"Apache-2.0"
] | permissive | giorgil/django-articleappkit | 4d2108b6bb32e3089c61c5b81c55d7d6febf5acb | d301f2d511a65461eedbcc301955dafecba189ca | refs/heads/master | 2019-07-06T06:43:47.256809 | 2017-10-19T18:10:40 | 2017-10-19T18:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from django.contrib import admin
from articleappkit.admin import (ArticleBaseAdmin, ARTICLE_BASE_FIELDSET,
SINGLE_AUTHOR_FIELDSET, KEY_IMAGE_FIELDSET,
PUBLISHING_FIELDSET)
from .models import Story
class StoryAdmin(ArticleBaseAdmin):
fieldsets = (
ARTICLE_BASE_FIELDSET,
SINGLE_AUTHOR_FIELDSET,
KEY_IMAGE_FIELDSET,
PUBLISHING_FIELDSET,
)
admin.site.register(Story, StoryAdmin) | [
"[email protected]"
] | |
8d9bbcf792afe8a014df437737fec5c542ae3093 | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/eager/python/checkpointable_utils.py | 4a060f0c75f66abe18e471a6b7e2ccdaaba062c3 | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,665 | py | """Utilities for working with Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.contrib.eager.proto import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as core_checkpointable
from tensorflow.python.training import checkpointable_utils as core_checkpointable_utils
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
# Key where the object graph proto is saved in a TensorBundle
_OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.Object.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
# TODO(allenl): Extract out gathering logic so the naming logic doesn't have
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run restore ops in. If `None`, uses the default
session.
"""
self.run_restore_ops(session=session)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable):
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Only objects which would be saved by `Saver.save` will be initialized. See
`gather_initializers` for details.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
session.run(gather_initializers(self._root_checkpointable))
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
if not context.executing_eagerly():
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
else:
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._object_graph_restore_tensor = None
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert _OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[_OBJECT_GRAPH_PROTO_KEY] = _NoRestoreSaveable(
tensor=object_graph_tensor,
name=_OBJECT_GRAPH_PROTO_KEY)
if not in_graph_mode or self._last_save_object_graph != graph_proto:
if self._last_save_object_graph is not None and in_graph_mode:
raise NotImplementedError(
"Using a single Saver to save a mutated object graph is not "
"currently supported when graph building. Use a different Saver "
"when the object graph changes (save ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
saver = saver_lib.Saver(var_list=named_variables)
if in_graph_mode:
self._last_save_saver = saver
self._last_save_object_graph = graph_proto
else:
saver = self._last_save_saver
with ops.device("/cpu:0"):
save_path = saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path, session=None):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run. A
session is required to retrieve checkpoint metadata.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
session: The session to retrieve metadata with. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
session = None
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
try:
if not in_graph_mode or self._object_graph_restore_tensor is None:
with ops.device("/cpu:0"):
object_graph_string, = io_ops.restore_v2(
prefix=file_prefix_tensor,
tensor_names=[_OBJECT_GRAPH_PROTO_KEY],
shape_and_slices=[""],
dtypes=[dtypes.string],
name="object_graph_proto_read")
if in_graph_mode:
self._object_graph_restore_tensor = object_graph_string
if in_graph_mode:
object_graph_string = session.run(
self._object_graph_restore_tensor,
feed_dict=file_prefix_feed_dict)
else:
object_graph_string = object_graph_string.numpy()
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = core_checkpointable_utils._Checkpoint( # pylint: disable=protected-access
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
core_checkpointable._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint, feed_dict=file_prefix_feed_dict)
return load_status
class Checkpoint(core_checkpointable.Checkpointable):
"""A utility class which groups `Checkpointable` objects.
Accepts arbitrary keyword arguments to its constructor and saves those values
with a checkpoint. Maintains a `save_counter` for numbering checkpoints.
Example usage:
```python
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
root = tfe.Checkpoint(optimizer=optimizer, model=model)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... )
root.save(file_prefix=checkpoint_prefix)
```
For more manual control over saving, use `tfe.CheckpointableSaver` directly.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Attribute values must derive from
`CheckpointableBase`.
Raises:
ValueError: If objects in `kwargs` are not Checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, core_checkpointable.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting an object derived from "
"`CheckpointableBase`, got %s.") % (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a checkpoint. Wraps `tfe.CheckpointableSaver.save`."""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a checkpoint. Wraps `tfe.CheckpointableSaver.restore`."""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
| [
"[email protected]"
] | |
8f51ba00c95343b9bb716afd8882bf94bf4931e4 | 0c85cba348e9abace4f16dfb70531c70175dac68 | /cloudroast/blockstorage/volumes_api/integration/compute/fixtures.py | caefc012dfe9f485b122f7e89da62af0983f99b6 | [
"Apache-2.0"
] | permissive | RULCSoft/cloudroast | 31157e228d1fa265f981ec82150255d4b7876af2 | 30f0e64672676c3f90b4a582fe90fac6621475b3 | refs/heads/master | 2020-04-04T12:20:59.388355 | 2018-11-02T21:32:27 | 2018-11-02T21:32:27 | 155,923,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,788 | py | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.common.tools.datagen import random_string
from cloudcafe.compute.composites import ComputeIntegrationComposite
from cloudroast.blockstorage.volumes_api.fixtures import VolumesTestFixture
class ComputeIntegrationTestFixture(VolumesTestFixture):
@classmethod
def setUpClass(cls):
super(ComputeIntegrationTestFixture, cls).setUpClass()
cls.compute = ComputeIntegrationComposite()
cls.servers = cls.compute.servers
cls.flavors = cls.compute.flavors
cls.images = cls.compute.images
cls.volume_attachments = cls.compute.volume_attachments
@classmethod
def random_server_name(cls):
return random_string(prefix="Server_", size=10)
@classmethod
def new_server(
cls, name=None, image=None, flavor=None, add_cleanup=True):
name = name or cls.random_server_name()
image = image or cls.images.config.primary_image
flavor = flavor or cls.flavors.config.primary_flavor
resp = cls.servers.behaviors.create_active_server(
name, image_ref=image, flavor_ref=flavor)
if add_cleanup:
cls.addClassCleanup(
cls.servers.client.delete_server, resp.entity.id)
return resp.entity
@classmethod
def attach_volume_and_get_device_info(
cls, server_connection, server_id, volume_id):
original_details = server_connection.get_all_disk_details()
attachment = \
cls.volume_attachments.behaviors.attach_volume_to_server(
server_id, volume_id)
assert attachment, "Could not attach volume {0} to server {1}".format(
volume_id, server_id)
new_details = server_connection.get_all_disk_details()
volume_details = [d for d in new_details if d not in original_details]
cls.fixture_log.debug(volume_details)
assert len(volume_details) == 1, (
"Could not uniquely identity the attached volume via the OS.")
setattr(attachment, 'os_disk_details', volume_details)
os_disk_device_name = \
volume_details[0].get('Number') or "/dev/{0}".format(
volume_details[0].get('name'))
assert os_disk_device_name, (
"Could not get a unique device name from the OS")
setattr(attachment, 'os_disk_device_name', os_disk_device_name)
return attachment
@classmethod
def format_attached_volume(
cls, server_connection, device_name, fstype=None):
resp = None
if device_name.startswith('/dev'):
resp = server_connection.format_disk(device_name, fstype or 'ext3')
else:
resp = server_connection.format_disk(device_name, fstype or 'ntfs')
assert resp is not None, (
"An error occured while trying to format the attached volume")
return resp
@classmethod
def mount_attached_volume(
cls, server_connection, device_name, mount_point=None):
mount_point = mount_point or server_connection.generate_mountpoint()
if device_name.startswith('/dev'):
server_connection.create_directory(mount_point)
return server_connection.mount_disk(
source_path=device_name, destination_path=mount_point)
@classmethod
def unmount_attached_volume(cls, server_connection, device_name):
return server_connection.unmount_disk(device_name)
@classmethod
def _add_directory_prefix(cls, file_directory_string):
if not file_directory_string.startswith('/'):
if len(file_directory_string) == 1:
file_directory_string = file_directory_string + ":\\"
return file_directory_string
@classmethod
def get_remote_file_md5_hash(
cls, server_connection, file_directory, file_name):
file_directory = cls._add_directory_prefix(file_directory)
return server_connection.get_md5sum_for_remote_file(
file_directory, file_name)
@classmethod
def create_remote_file(
cls, server_connection, file_directory, file_name,
file_content=None):
file_content = file_content or "content"
file_directory = cls._add_directory_prefix(file_directory)
return server_connection.create_file(
file_name, file_content, file_directory)
@classmethod
def _get_remote_client(cls, client_type):
client = None
if client_type == 'windows':
from cloudcafe.compute.common.clients.remote_instance.windows.\
windows_client import WindowsClient
client = WindowsClient
if client_type == 'linux':
from cloudcafe.compute.common.clients.remote_instance.linux.\
linux_client import LinuxClient
client = LinuxClient
if not client:
raise Exception(
"Unrecognized client type: {0}".format(client_type))
return client
@classmethod
def _connect(
cls, remote_client, ip_address=None, username=None,
connection_timeout=None, key=None, password=None):
kwargs = {
'ip_address': ip_address,
'username': username,
'connection_timeout': connection_timeout}
# Key always takes precendence over password if both are provided
auth_strategy = "key" if key else "password"
kwargs[auth_strategy] = key or password
_client = remote_client(**kwargs)
return _client
@classmethod
def connect_to_server(
cls, ip_address, username='root', password=None, key=None,
connection_timeout=None, client_type='linux'):
"""Returns a client for communication with the server"""
remote_client = cls._get_remote_client(client_type)
return cls._connect(
remote_client, ip_address=ip_address, username=username,
connection_timout=connection_timeout, key=key,
password=password)
@classmethod
def get_image_os_type(cls, image_id):
# TODO: make this method handle the various versions of the images
# api and image model. This might mean making an images auto composite.
image = cls.images.client.get_image(image_id).entity
return image.metadata.get('os_type', '').lower()
@classmethod
def connect_to_instance(
cls, server_instance_model, key=None, connection_timeout=None,
os_type=None):
"""Special helper method that pulls all neccessary values from a
compute server model, and returns a client for communication with
that server
"""
_usernames = {'windows': 'administrator', 'linux': 'root'}
ip_address = None
if hasattr(server_instance_model, 'accessIPv4'):
ip_address = server_instance_model.accessIPv4
else:
ip_address = server_instance_model.addresses.public.ipv4
if os_type is None:
os_type = cls.get_image_os_type(server_instance_model.image.id)
username = _usernames.get(os_type)
password = server_instance_model.admin_pass
connection_timeout = \
connection_timeout or cls.servers.config.connection_timeout
remote_client = cls._get_remote_client(os_type)
return cls._connect(
remote_client, ip_address=ip_address, username=username,
connection_timeout=connection_timeout, key=key,
password=password)
@classmethod
def setup_server_and_attached_volume_with_data(
cls, server=None, volume=None):
"""
Builds a new server using configured defaults
Attaches, formats and mounts a new volume
Writes data to the volume
Saves the md5sum of the written data as a class attribute
Syncs the filesystem write cache.
"""
# Build new server using configured defaults
cls.test_server = server or cls.new_server()
# Set remote instance client up
cls.server_conn = cls.connect_to_instance(cls.test_server)
cls.volume_mount_point = cls.server_conn.generate_mountpoint()
cls.test_volume = volume or cls.new_volume()
# Attach Volume
cls.test_attachment = cls.attach_volume_and_get_device_info(
cls.server_conn, cls.test_server.id, cls.test_volume.id_)
# Format Volume
cls.format_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name)
# Mount Volume
cls.mount_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name,
mount_point=cls.volume_mount_point)
# Write data to volume
cls.written_data = "a" * 1024
cls.written_filename = "qe_test_datafile"
resp = cls.create_remote_file(
cls.server_conn, cls.volume_mount_point, cls.written_filename,
file_content=cls.written_data)
assert resp is not None, (
"Could not verify writability of attached volume")
# Save written file md5sum
cls.original_md5hash = cls.get_remote_file_md5_hash(
cls.server_conn, cls.volume_mount_point, cls.written_filename)
assert cls.original_md5hash is not None, (
"Unable to hash file on mounted volume")
# Make the fs writes cached data to disk before unmount.
cls.server_conn.filesystem_sync()
@classmethod
def unmount_and_detach_test_volume(cls):
cls.unmount_attached_volume(
cls.server_conn, cls.test_attachment.os_disk_device_name)
cls.volume_attachments.behaviors.delete_volume_attachment(
cls.test_attachment.id_, cls.test_server.id)
def calculate_volume_size_for_image(self, image):
"""Get size from image object if possible, or use configured value
TODO: Move this into a behavior
"""
size = getattr(image, 'min_disk', None)
# Log missing sizes
if not size:
msg = (
"Image {image_id} did not report a meaningful disks size. "
"Falling back to configured min_volume_size_from_image".format(
image_id=image.id))
self.fixture_log.warning(msg)
# If size is 0 or not reported (None), fall back to configured
# value for min_volume_size_from_image
return max(size, self.volumes.config.min_volume_from_image_size)
def _compare_volume_image_metadata(self, image, volume, key_list=None):
key_list = key_list or []
comparable_keys = [
key for key in image.metadata.keys() if key in key_list]
error_messages = []
for key in comparable_keys:
if key not in volume.volume_image_metadata:
error_messages.append(
"Metadata key '{0}' from image {1} not found in volume"
"{2} volume-image-metadata".format(
key, image.id, volume.id_))
elif volume.volume_image_metadata[key] != image.metadata[key]:
error_messages.append(
"Metadata keypair '{0}: {1}' from image {2} did not "
"match the keypair '{3}: {4}' in the "
"volume-image-metadata of volume {5}".format(
key, image.metadata[key], image.id,
key, volume.volume_image_metadata[key], volume.id_))
return error_messages
def assertImageMetadataWasCopiedToVolume(
self, image, volume, key_list=None, msg=None):
errors = self._compare_volume_image_metadata(image, volume, key_list)
if errors:
self.fail(self._formatMessage(msg, "\n".join(errors)))
def assertMinDiskSizeIsSet(self, image, msg=None):
# TODO: This should probably be an images behavior method that I
# wrap here.
if getattr(image, 'min_disk', 0) <= 0:
stdmsg = (
"\nImage {0} '{1}' does not have a min_disk size set, or "
"has a min_disk size of 0".format(image.id, image.name))
self.fail(self._formatMessage(msg, stdmsg))
def check_if_minimum_disk_size_is_set(self, image):
"""Check the image info to make sure the min_disk attribute
is set"""
try:
self.assertMinDiskSizeIsSet(image)
except AssertionError:
return False
return True
def make_server_snapshot(self, server, add_cleanup=True):
server_snapshot_name = random_string(
prefix="cbs_qe_image_of_{0}_".format(server.name), size=10)
create_img_resp = self.servers.client.create_image(
server.id, name=server_snapshot_name)
assert create_img_resp.ok, (
"Create-Server-Image call failed with a {0}".format(
create_img_resp.status_code))
self.images.behaviors.verify_server_snapshotting_progression(server.id)
# Poll for list of all snapshots and find the one that belongs to our
# server.
list_imgs_resp = self.images.client.list_images()
assert list_imgs_resp.ok, (
"list-images call failed with a {0}".format(
list_imgs_resp.status_code))
assert list_imgs_resp.entity is not None, (
"Unable to deserialize list-images response".format(
list_imgs_resp.status_code))
image_list = list_imgs_resp.entity
server_snapshot = None
for img in image_list:
if img.name == server_snapshot_name:
server_snapshot = img
break
assert server_snapshot is not None, "Could not locate image by name."
if add_cleanup is True:
self.addCleanup(
self.images.client.delete_image, server_snapshot.id)
# Wait for the image to become active just in case
self.images.behaviors.wait_for_image_status(
server_snapshot.id, 'ACTIVE', 10, 600)
# get the model for the snapshot in question
resp = self.images.client.get_image(server_snapshot.id)
assert resp.ok, ("Could not get updated snapshot info after create")
assert resp.entity is not None, (
"Could not deserialize snapshot infor response")
return resp.entity
def create_bootable_volume_from_server_snapshot(
self, image, flavor, volume_type):
# Create a server from the given image and flavor
server = self.new_server(
name=None, image=image.id, flavor=flavor.id, add_cleanup=False)
self.addCleanup(self.servers.client.delete_server, server.id)
# Make a snapshot of the server via the images api
server_snapshot = self.make_server_snapshot(server)
# Create a bootable volume from the server snapshot
return self.create_volume_from_image_test(volume_type, server_snapshot)
def create_volume_from_image_test(
self, volume_type, image, add_cleanup=True):
size = self.calculate_volume_size_for_image(image)
volume = self.volumes.behaviors.create_available_volume(
size, volume_type.id_, image_ref=image.id,
timeout=self.volumes.config.volume_create_max_timeout)
if add_cleanup:
try:
self.addCleanup(
self.volumes.behaviors.delete_volume_confirmed, volume.id_)
except:
raise Exception(
"Could not create a volume in setup for "
"create_volume_from_image test")
self.assertEquals(
str(size), str(volume.size),
"Expected volume size {0} did not match actual observed volume"
" size {1}".format(size, volume.size))
# TODO: Break this out into it's own assertion with progress verifer
# to give the bootable flag time to populate.
self.assertEquals(
'true', volume.bootable, "Volume built from image was not marked "
"as bootable")
self.assertImageMetadataWasCopiedToVolume(image, volume)
return volume
def create_bootable_volume_from_third_snapshot_of_server_test(
self, image, flavor, volume_type):
# Create a server from the given image and flavor
server = self.new_server(
name=None, image=image.id, flavor=flavor.id, add_cleanup=False)
self.addCleanup(self.servers.client.delete_server, server.id)
# Make a snapshot of the server via the images api
self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
server_snapshot_3 = self.make_server_snapshot(server)
self.servers.behaviors.wait_for_server_status(
server.id, 'ACTIVE', timeout=300)
# Create a bootable volume from the server snapshot
self.create_volume_from_image_test(volume_type, server_snapshot_3)
| [
"[email protected]"
] | |
5d95d610ebbca94067bde0d1705a24306610257b | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day2/functions/basics/2.py | 0c74a24791d53e68bd3d08f432369d8dbfa42c55 | [] | no_license | sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | def sqr(x):
return x*x
print sqr(10,20)
| [
"[email protected]"
] | |
856dc99df1b2a415589cdc169f574672bd782c91 | 2616952e9dcf7a996c691e5410551d89ec735943 | /Python Basic for ML and DL Book3/Ensemble methods Ensemble Error diagram.py | 2ec889de7b8cb72089d30d213d0d2065c1cbc6fa | [] | no_license | BaoBao0406/Machine-Learning | 5c9f00c19422e7fead74d4f441fcc43556b62b78 | c3e1c03301b41220c58a1bbda8f872638dc24104 | refs/heads/master | 2021-07-12T10:25:28.791579 | 2020-08-24T00:17:43 | 2020-08-24T00:17:43 | 197,107,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from scipy.misc import comb
import math
def ensemble_error(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * error**k * (1-error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
ensemble_error(n_classifier=11, error=0.25)
import numpy as np
import matplotlib.pyplot as plt
error_range = np.range(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
plt.plot(error_range, ens_errors, label='Ensemble error', linewidth=2)
plt.plot(error_range, error_range, linestyle='--', label='Base error', linewidth=2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc='upper left')
plt.grid(alpha=0.5)
plt.show()
| [
"[email protected]"
] | |
639a60840ad7e8b452e12cb388e417b5a2b16264 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/12143431.py | e56ad1dfbd9078ce9947efcd957b4a0ee3f23907 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/12143431.py generated: Fri, 27 Mar 2015 16:10:07
#
# Event Type: 12143431
#
# ASCII decay Descriptor: [B+ -> K+ (J/psi(1S) -> mu+ mu- {,gamma} {,gamma}) (eta -> gamma gamma)]cc
#
from Configurables import Generation
Generation().EventType = 12143431
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_JpsietaK,mm,gg=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12143431
| [
"[email protected]"
] | |
4c507eda6df4eec5409d9ba0f7c5c58dbe4adc2c | a8637de7c6e38c95cd19b46b45b3e00c42ae8140 | /recruitments/forms.py | b4d18a84d6fec828e87868c360f345c9f5ccb4dd | [] | no_license | nishant57/edc | 9c0d3d363882c44bc08dc4da47024e5e83731077 | 5ab9f6dc5d474b5071c7f027cd287c32a9d43501 | refs/heads/master | 2021-01-12T19:31:49.655509 | 2013-02-17T10:00:36 | 2013-02-17T10:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | from django.db import models
from django.forms import ModelForm, Textarea
from django import forms
from constants import *
from recruitments.models import Candidate, Setup
from ckeditor.widgets import CKEditorWidget
from django.core.exceptions import ObjectDoesNotExist
class CandidateForm(forms.ModelForm):
class Meta:
model = Candidate
exclude = ('hash_value', 'blocked', 'setup', 'slot')
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get('email')
try:
s = Setup.objects.get(date_recruitment_ends__gt=datetime.now(), date_recruitment_starts__lt=datetime.now())
Candidate.objects.get(email=email, setup=s)
raise forms.ValidationError('This Email ID has already applied for this recruitment process')
except ObjectDoesNotExist:
pass
return cleaned_data
'''
salutation = forms.CharField(max_length=10,required=True,choices=SALUTATION_CHOICES)
name = forms.CharField(max_length=50,required = True,label='Your name')
email = forms.EmailField(max_length=50,required=True,label='Email Address')
branch = forms.CharField(max_length=50,required=True,choices=BRANCH_CHOICES)
phone = forms.CharField(max_length=15, required=True)
why_edc = forms.TextField(max_length=500,required=True)
other_groups = forms.TextField(max_length=100)
interests = forms.
'''
| [
"[email protected]"
] | |
ea026000b6292aaf81ca75b7bc134d1a849290bd | ee561aa019a80f621007f82bdb21fe6ed8b6278f | /build/turtlebot3-melodic-devel/turtlebot3_navigation/catkin_generated/pkg.installspace.context.pc.py | 2314ec5e0e068618e2ccde34da5357db5a46d171 | [] | no_license | allanwhledu/agv_edu_prj | 4fb5fbf14cf0a14edd57ee9bd87903dc25d4d4f2 | 643a8a96ca7027529332f25208350de78c07e33d | refs/heads/master | 2020-09-23T23:32:54.430035 | 2019-12-04T07:47:55 | 2019-12-04T07:47:55 | 225,613,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_navigation"
PROJECT_SPACE_DIR = "/home/sjtuwhl/ROBOTLAB_WS/install"
PROJECT_VERSION = "1.2.2"
| [
"[email protected]"
] | |
3892f819ab9827f25746acaf5e7ddb23394850ca | f259ca399ab33b5c2e66ae07921711ea5917ac9e | /pytorch/sphereface.py | d6413d43827cf63587f8d89889c96b608aa81521 | [] | no_license | jizhuoran/HyperTea_Maker | 9a7930e1d6af995c8fdb9a15354eea5fc29f0806 | 2c3f8dfcb699495093165cd986eebedfb17a2433 | refs/heads/master | 2020-04-22T19:32:39.385611 | 2019-04-14T15:12:06 | 2019-04-14T15:12:48 | 170,610,900 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # -*- coding: utf-8 -*-
import torch
from sphere20a import sphere20a as Model
from hypertea_generator.hypertea_generator import HyperteaGenerator
model = Model().train()
precision = 'float'
genetator = HyperteaGenerator(model, torch.ones((1, 3, 112, 96), dtype = torch.float), precision)
output = genetator.get_net_output()
inference_code = f'''
void inference( std::vector<{precision}> &data_from_user, std::vector<{precision}> &data_to_user) {{
auto x = DeviceTensor(data_from_user);
x = relu1_1(conv1_1(x))
x = x + relu1_3(conv1_3(relu1_2(conv1_2(x))))
x = relu2_1(conv2_1(x))
x = x + relu2_3(conv2_3(relu2_2(conv2_2(x))))
x = x + relu2_5(conv2_5(relu2_4(conv2_4(x))))
x = relu3_1(conv3_1(x))
x = x + relu3_3(conv3_3(relu3_2(conv3_2(x))))
x = x + relu3_5(conv3_5(relu3_4(conv3_4(x))))
x = x + relu3_7(conv3_7(relu3_6(conv3_6(x))))
x = x + relu3_9(conv3_9(relu3_8(conv3_8(x))))
x = relu4_1(conv4_1(x))
x = x + relu4_3(conv4_3(relu4_2(conv4_2(x))))
x = fc5(x)
x = fc6(x)
x.copy_to_ptr((void*)data_to_user.data());
}}
'''
print(genetator.network_defination(inference_code, 'work_space/new_net'))
| [
"[email protected]"
] | |
a3309f48dc0f1a5cf170079337921110045939e1 | e823bc36af457f229f6879d6e6a3ef6247c129aa | /virtualenv/Lib/site-packages/twisted/conch/test/test_knownhosts.py | fd2cec16c516734dfc889451660e4fb395e172db | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | William-An/DFB_Final | e772fa979c41f2f83a4bf657cde499456215fb3b | 49a9244c98116574676992ebecd1d9435e1d5b1e | refs/heads/master | 2022-11-07T15:47:36.189057 | 2017-07-22T01:01:37 | 2017-07-22T01:01:43 | 97,426,562 | 1 | 1 | MIT | 2022-10-15T02:45:57 | 2017-07-17T02:21:42 | Python | UTF-8 | Python | false | false | 49,361 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.client.knownhosts}.
"""
from __future__ import absolute_import, division
import os
from binascii import Error as BinasciiError, b2a_base64, a2b_base64
from twisted.python.reflect import requireModule
if requireModule('cryptography') and requireModule('pyasn1'):
from twisted.conch.ssh.keys import Key, BadKeyError
from twisted.conch.client.knownhosts import \
PlainEntry, HashedEntry, KnownHostsFile, UnparsedEntry, ConsoleUI
from twisted.conch.client import default
from twisted.conch.test import keydata
else:
skip = "cryptography and PyASN1 required for twisted.conch.knownhosts."
from zope.interface.verify import verifyObject
from twisted.python.filepath import FilePath
from twisted.python.compat import networkString
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.conch.interfaces import IKnownHostEntry
from twisted.conch.error import HostKeyChanged, UserRejectedKey, InvalidEntry
from twisted.test.testutils import ComparisonTestsMixin
sampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAQEAsV0VMRbGmzhqxxayLRHmvnFvtyNqgbNKV46dU1bVFB+3y'
b'tNvue4Riqv/SVkPRNwMb7eWH29SviXaBxUhYyzKkDoNUq3rTNnH1Vnif6d6X4JCrUb5d3W+Dm'
b'YClyJrZ5HgD/hUpdSkTRqdbQ2TrvSAxRacj+vHHT4F4dm1bJSewm3B2D8HVOoi/CbVh3dsIiC'
b'dp8VltdZx4qYVfYe2LwVINCbAa3d3tj9ma7RVfw3OH2Mfb+toLd1N5tBQFb7oqTt2nC6I/6Bd'
b'4JwPUld+IEitw/suElq/AIJVQXXujeyiZlea90HE65U2mF1ytr17HTAIT2ySokJWyuBANGACk'
b'6iIaw==')
otherSampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAIEAwaeCZd3UCuPXhX39+/p9qO028jTF76DMVd9mPvYVDVXuf'
b'WckKZauF7+0b7qm+ChT7kan6BzRVo4++gCVNfAlMzLysSt3ylmOR48tFpAfygg9UCX3DjHz0E'
b'lOOUKh3iifc9aUShD0OPaK3pR5JJ8jfiBfzSYWt/hDi/iZ4igsSs8=')
thirdSampleEncodedKey = (
b'AAAAB3NzaC1yc2EAAAABIwAAAQEAl/TQakPkePlnwCBRPitIVUTg6Z8VzN1en+DGkyo/evkmLw'
b'7o4NWR5qbysk9A9jXW332nxnEuAnbcCam9SHe1su1liVfyIK0+3bdn0YRB0sXIbNEtMs2LtCho'
b'/aV3cXPS+Cf1yut3wvIpaRnAzXxuKPCTXQ7/y0IXa8TwkRBH58OJa3RqfQ/NsSp5SAfdsrHyH2'
b'aitiVKm2jfbTKzSEqOQG/zq4J9GXTkq61gZugory/Tvl5/yPgSnOR6C9jVOMHf27ZPoRtyj9SY'
b'343Hd2QHiIE0KPZJEgCynKeWoKz8v6eTSK8n4rBnaqWdp8MnGZK1WGy05MguXbyCDuTC8AmJXQ'
b'==')
ecdsaSampleEncodedKey = (
b'AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIFwh3/zBANyPPIE60'
b'SMMfdKMYo3OvfvzGLZphzuKrzSt0q4uF+/iYqtYiHhryAwU/fDWlUQ9kck9f+IlpsNtY4=')
sampleKey = a2b_base64(sampleEncodedKey)
otherSampleKey = a2b_base64(otherSampleEncodedKey)
thirdSampleKey = a2b_base64(thirdSampleEncodedKey)
ecdsaSampleKey = a2b_base64(ecdsaSampleEncodedKey)
samplePlaintextLine = (
b"www.twistedmatrix.com ssh-rsa " + sampleEncodedKey + b"\n")
otherSamplePlaintextLine = (
b"divmod.com ssh-rsa " + otherSampleEncodedKey + b"\n")
sampleHostIPLine = (
b"www.twistedmatrix.com,198.49.126.131 ssh-rsa " + sampleEncodedKey + b"\n")
sampleHashedLine = (
b"|1|gJbSEPBG9ZSBoZpHNtZBD1bHKBA=|bQv+0Xa0dByrwkA1EB0E7Xop/Fo= ssh-rsa " +
sampleEncodedKey + b"\n")
class EntryTestsMixin:
"""
Tests for implementations of L{IKnownHostEntry}. Subclasses must set the
'entry' attribute to a provider of that interface, the implementation of
that interface under test.
@ivar entry: a provider of L{IKnownHostEntry} with a hostname of
www.twistedmatrix.com and an RSA key of sampleKey.
"""
def test_providesInterface(self):
"""
The given entry should provide IKnownHostEntry.
"""
verifyObject(IKnownHostEntry, self.entry)
def test_fromString(self):
"""
Constructing a plain text entry from an unhashed known_hosts entry will
result in an L{IKnownHostEntry} provider with 'keyString', 'hostname',
and 'keyType' attributes. While outside the interface in question,
these attributes are held in common by L{PlainEntry} and L{HashedEntry}
implementations; other implementations should override this method in
subclasses.
"""
entry = self.entry
self.assertEqual(entry.publicKey, Key.fromString(sampleKey))
self.assertEqual(entry.keyType, b"ssh-rsa")
def test_matchesKey(self):
"""
L{IKnownHostEntry.matchesKey} checks to see if an entry matches a given
SSH key.
"""
twistedmatrixDotCom = Key.fromString(sampleKey)
divmodDotCom = Key.fromString(otherSampleKey)
self.assertEqual(
True,
self.entry.matchesKey(twistedmatrixDotCom))
self.assertEqual(
False,
self.entry.matchesKey(divmodDotCom))
def test_matchesHost(self):
"""
L{IKnownHostEntry.matchesHost} checks to see if an entry matches a
given hostname.
"""
self.assertTrue(self.entry.matchesHost(b"www.twistedmatrix.com"))
self.assertFalse(self.entry.matchesHost(b"www.divmod.com"))
class PlainEntryTests(EntryTestsMixin, TestCase):
"""
Test cases for L{PlainEntry}.
"""
plaintextLine = samplePlaintextLine
hostIPLine = sampleHostIPLine
def setUp(self):
"""
Set 'entry' to a sample plain-text entry with sampleKey as its key.
"""
self.entry = PlainEntry.fromString(self.plaintextLine)
def test_matchesHostIP(self):
"""
A "hostname,ip" formatted line will match both the host and the IP.
"""
self.entry = PlainEntry.fromString(self.hostIPLine)
self.assertTrue(self.entry.matchesHost(b"198.49.126.131"))
self.test_matchesHost()
def test_toString(self):
"""
L{PlainEntry.toString} generates the serialized OpenSSL format string
for the entry, sans newline.
"""
self.assertEqual(self.entry.toString(), self.plaintextLine.rstrip(b"\n"))
multiHostEntry = PlainEntry.fromString(self.hostIPLine)
self.assertEqual(multiHostEntry.toString(),
self.hostIPLine.rstrip(b"\n"))
class PlainTextWithCommentTests(PlainEntryTests):
"""
Test cases for L{PlainEntry} when parsed from a line with a comment.
"""
plaintextLine = samplePlaintextLine[:-1] + b" plain text comment.\n"
hostIPLine = sampleHostIPLine[:-1] + b" text following host/IP line\n"
class HashedEntryTests(EntryTestsMixin, ComparisonTestsMixin, TestCase):
"""
Tests for L{HashedEntry}.
This suite doesn't include any tests for host/IP pairs because hashed
entries store IP addresses the same way as hostnames and does not support
comma-separated lists. (If you hash the IP and host together you can't
tell if you've got the key already for one or the other.)
"""
hashedLine = sampleHashedLine
def setUp(self):
"""
Set 'entry' to a sample hashed entry for twistedmatrix.com with
sampleKey as its key.
"""
self.entry = HashedEntry.fromString(self.hashedLine)
def test_toString(self):
"""
L{HashedEntry.toString} generates the serialized OpenSSL format string
for the entry, sans the newline.
"""
self.assertEqual(self.entry.toString(), self.hashedLine.rstrip(b"\n"))
def test_equality(self):
"""
Two L{HashedEntry} instances compare equal if and only if they represent
the same host and key in exactly the same way: the host salt, host hash,
public key type, public key, and comment fields must all be equal.
"""
hostSalt = b"gJbSEPBG9ZSBoZpHNtZBD1bHKBA"
hostHash = b"bQv+0Xa0dByrwkA1EB0E7Xop/Fo"
publicKey = Key.fromString(sampleKey)
keyType = networkString(publicKey.type())
comment = b"hello, world"
entry = HashedEntry(
hostSalt, hostHash, keyType, publicKey, comment)
duplicate = HashedEntry(
hostSalt, hostHash, keyType, publicKey, comment)
# Vary the host salt
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt[::-1], hostHash, keyType, publicKey,
comment))
# Vary the host hash
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash[::-1], keyType, publicKey,
comment))
# Vary the key type
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType[::-1], publicKey,
comment))
# Vary the key
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType,
Key.fromString(otherSampleKey), comment))
# Vary the comment
self.assertNormalEqualityImplementation(
entry, duplicate,
HashedEntry(
hostSalt, hostHash, keyType, publicKey,
comment[::-1]))
class HashedEntryWithCommentTests(HashedEntryTests):
"""
Test cases for L{PlainEntry} when parsed from a line with a comment.
"""
hashedLine = sampleHashedLine[:-1] + b" plain text comment.\n"
class UnparsedEntryTests(TestCase, EntryTestsMixin):
"""
Tests for L{UnparsedEntry}
"""
def setUp(self):
"""
Set up the 'entry' to be an unparsed entry for some random text.
"""
self.entry = UnparsedEntry(b" This is a bogus entry. \n")
def test_fromString(self):
"""
Creating an L{UnparsedEntry} should simply record the string it was
passed.
"""
self.assertEqual(b" This is a bogus entry. \n",
self.entry._string)
def test_matchesHost(self):
"""
An unparsed entry can't match any hosts.
"""
self.assertFalse(self.entry.matchesHost(b"www.twistedmatrix.com"))
def test_matchesKey(self):
"""
An unparsed entry can't match any keys.
"""
self.assertFalse(self.entry.matchesKey(Key.fromString(sampleKey)))
def test_toString(self):
"""
L{UnparsedEntry.toString} returns its input string, sans trailing
newline.
"""
self.assertEqual(b" This is a bogus entry. ", self.entry.toString())
class ParseErrorTests(TestCase):
"""
L{HashedEntry.fromString} and L{PlainEntry.fromString} can raise a variety
of errors depending on misformattings of certain strings. These tests make
sure those errors are caught. Since many of the ways that this can go
wrong are in the lower-level APIs being invoked by the parsing logic,
several of these are integration tests with the C{base64} and
L{twisted.conch.ssh.keys} modules.
"""
def invalidEntryTest(self, cls):
"""
If there are fewer than three elements, C{fromString} should raise
L{InvalidEntry}.
"""
self.assertRaises(InvalidEntry, cls.fromString, b"invalid")
def notBase64Test(self, cls):
"""
If the key is not base64, C{fromString} should raise L{BinasciiError}.
"""
self.assertRaises(BinasciiError, cls.fromString, b"x x x")
def badKeyTest(self, cls, prefix):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{fromString} should raise L{BadKeyError}.
"""
self.assertRaises(BadKeyError, cls.fromString, b' '.join(
[prefix, b"ssh-rsa", b2a_base64(
b"Hey, this isn't an SSH key!").strip()]))
def test_invalidPlainEntry(self):
"""
If there are fewer than three whitespace-separated elements in an
entry, L{PlainEntry.fromString} should raise L{InvalidEntry}.
"""
self.invalidEntryTest(PlainEntry)
def test_invalidHashedEntry(self):
"""
If there are fewer than three whitespace-separated elements in an
entry, or the hostname salt/hash portion has more than two elements,
L{HashedEntry.fromString} should raise L{InvalidEntry}.
"""
self.invalidEntryTest(HashedEntry)
a, b, c = sampleHashedLine.split()
self.assertRaises(InvalidEntry, HashedEntry.fromString, b' '.join(
[a + b"||", b, c]))
def test_plainNotBase64(self):
"""
If the key portion of a plain entry is not decodable as base64,
C{fromString} should raise L{BinasciiError}.
"""
self.notBase64Test(PlainEntry)
def test_hashedNotBase64(self):
"""
If the key, host salt, or host hash portion of a hashed entry is not
encoded, it will raise L{BinasciiError}.
"""
self.notBase64Test(HashedEntry)
a, b, c = sampleHashedLine.split()
# Salt not valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join([b"|1|x|" + b2a_base64(b"stuff").strip(), b, c]))
# Host hash not valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join(
[HashedEntry.MAGIC + b2a_base64(b"stuff").strip() + b"|x",
b, c]))
# Neither salt nor hash valid base64.
self.assertRaises(
BinasciiError, HashedEntry.fromString,
b' '.join([b"|1|x|x", b, c]))
def test_hashedBadKey(self):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{HashedEntry.fromString} should raise L{BadKeyError}.
"""
a, b, c = sampleHashedLine.split()
self.badKeyTest(HashedEntry, a)
def test_plainBadKey(self):
"""
If the key portion of the entry is valid base64, but is not actually an
SSH key, C{PlainEntry.fromString} should raise L{BadKeyError}.
"""
self.badKeyTest(PlainEntry, b"hostname")
class KnownHostsDatabaseTests(TestCase):
"""
Tests for L{KnownHostsFile}.
"""
def pathWithContent(self, content):
"""
Return a FilePath with the given initial content.
"""
fp = FilePath(self.mktemp())
fp.setContent(content)
return fp
def loadSampleHostsFile(self, content=(
sampleHashedLine + otherSamplePlaintextLine +
b"\n# That was a blank line.\n"
b"This is just unparseable.\n"
b"|1|This also unparseable.\n")):
"""
Return a sample hosts file, with keys for www.twistedmatrix.com and
divmod.com present.
"""
return KnownHostsFile.fromPath(self.pathWithContent(content))
def test_readOnlySavePath(self):
"""
L{KnownHostsFile.savePath} is read-only; if an assignment is made to
it, L{AttributeError} is raised and the value is unchanged.
"""
path = FilePath(self.mktemp())
new = FilePath(self.mktemp())
hostsFile = KnownHostsFile(path)
self.assertRaises(AttributeError, setattr, hostsFile, "savePath", new)
self.assertEqual(path, hostsFile.savePath)
def test_defaultInitializerIgnoresExisting(self):
"""
The default initializer for L{KnownHostsFile} disregards any existing
contents in the save path.
"""
hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine))
self.assertEqual([], list(hostsFile.iterentries()))
def test_defaultInitializerClobbersExisting(self):
"""
After using the default initializer for L{KnownHostsFile}, the first use
of L{KnownHostsFile.save} overwrites any existing contents in the save
path.
"""
path = self.pathWithContent(sampleHashedLine)
hostsFile = KnownHostsFile(path)
entry = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
hostsFile.save()
# Check KnownHostsFile to see what it thinks the state is
self.assertEqual([entry], list(hostsFile.iterentries()))
# And also directly check the underlying file itself
self.assertEqual(entry.toString() + b"\n", path.getContent())
def test_saveResetsClobberState(self):
"""
After L{KnownHostsFile.save} is used once with an instance initialized
by the default initializer, contents of the save path are respected and
preserved.
"""
hostsFile = KnownHostsFile(self.pathWithContent(sampleHashedLine))
preSave = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
hostsFile.save()
postSave = hostsFile.addHostKey(
b"another.example.com", Key.fromString(thirdSampleKey))
hostsFile.save()
self.assertEqual([preSave, postSave], list(hostsFile.iterentries()))
def test_loadFromPath(self):
"""
Loading a L{KnownHostsFile} from a path with six entries in it will
result in a L{KnownHostsFile} object with six L{IKnownHostEntry}
providers in it.
"""
hostsFile = self.loadSampleHostsFile()
self.assertEqual(6, len(list(hostsFile.iterentries())))
def test_iterentriesUnsaved(self):
"""
If the save path for a L{KnownHostsFile} does not exist,
L{KnownHostsFile.iterentries} still returns added but unsaved entries.
"""
hostsFile = KnownHostsFile(FilePath(self.mktemp()))
hostsFile.addHostKey(b"www.example.com", Key.fromString(sampleKey))
self.assertEqual(1, len(list(hostsFile.iterentries())))
def test_verifyHashedEntry(self):
"""
Loading a L{KnownHostsFile} from a path containing a single valid
L{HashedEntry} entry will result in a L{KnownHostsFile} object
with one L{IKnownHostEntry} provider.
"""
hostsFile = self.loadSampleHostsFile((sampleHashedLine))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], HashedEntry)
self.assertTrue(entries[0].matchesHost(b"www.twistedmatrix.com"))
self.assertEqual(1, len(entries))
def test_verifyPlainEntry(self):
"""
Loading a L{KnownHostsFile} from a path containing a single valid
L{PlainEntry} entry will result in a L{KnownHostsFile} object
with one L{IKnownHostEntry} provider.
"""
hostsFile = self.loadSampleHostsFile((otherSamplePlaintextLine))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], PlainEntry)
self.assertTrue(entries[0].matchesHost(b"divmod.com"))
self.assertEqual(1, len(entries))
def test_verifyUnparsedEntry(self):
"""
Loading a L{KnownHostsFile} from a path that only contains '\n' will
result in a L{KnownHostsFile} object containing a L{UnparsedEntry}
object.
"""
hostsFile = self.loadSampleHostsFile((b"\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"")
self.assertEqual(1, len(entries))
def test_verifyUnparsedComment(self):
"""
Loading a L{KnownHostsFile} from a path that contains a comment will
result in a L{KnownHostsFile} object containing a L{UnparsedEntry}
object.
"""
hostsFile = self.loadSampleHostsFile((b"# That was a blank line.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"# That was a blank line.")
def test_verifyUnparsableLine(self):
"""
Loading a L{KnownHostsFile} from a path that contains an unparseable
line will be represented as an L{UnparsedEntry} instance.
"""
hostsFile = self.loadSampleHostsFile((b"This is just unparseable.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"This is just unparseable.")
self.assertEqual(1, len(entries))
def test_verifyUnparsableEncryptionMarker(self):
"""
Loading a L{KnownHostsFile} from a path containing an unparseable line
that starts with an encryption marker will be represented as an
L{UnparsedEntry} instance.
"""
hostsFile = self.loadSampleHostsFile((b"|1|This is unparseable.\n"))
entries = list(hostsFile.iterentries())
self.assertIsInstance(entries[0], UnparsedEntry)
self.assertEqual(entries[0].toString(), b"|1|This is unparseable.")
self.assertEqual(1, len(entries))
def test_loadNonExistent(self):
"""
Loading a L{KnownHostsFile} from a path that does not exist should
result in an empty L{KnownHostsFile} that will save back to that path.
"""
pn = self.mktemp()
knownHostsFile = KnownHostsFile.fromPath(FilePath(pn))
entries = list(knownHostsFile.iterentries())
self.assertEqual([], entries)
self.assertFalse(FilePath(pn).exists())
knownHostsFile.save()
self.assertTrue(FilePath(pn).exists())
def test_loadNonExistentParent(self):
"""
Loading a L{KnownHostsFile} from a path whose parent directory does not
exist should result in an empty L{KnownHostsFile} that will save back
to that path, creating its parent directory(ies) in the process.
"""
thePath = FilePath(self.mktemp())
knownHostsPath = thePath.child("foo").child(b"known_hosts")
knownHostsFile = KnownHostsFile.fromPath(knownHostsPath)
knownHostsFile.save()
knownHostsPath.restat(False)
self.assertTrue(knownHostsPath.exists())
def test_savingAddsEntry(self):
"""
L{KnownHostsFile.save} will write out a new file with any entries
that have been added.
"""
path = self.pathWithContent(sampleHashedLine +
otherSamplePlaintextLine)
knownHostsFile = KnownHostsFile.fromPath(path)
newEntry = knownHostsFile.addHostKey(b"some.example.com",
Key.fromString(thirdSampleKey))
expectedContent = (
sampleHashedLine +
otherSamplePlaintextLine + HashedEntry.MAGIC +
b2a_base64(newEntry._hostSalt).strip() + b"|" +
b2a_base64(newEntry._hostHash).strip() + b" ssh-rsa " +
thirdSampleEncodedKey + b"\n")
# Sanity check, let's make sure the base64 API being used for the test
# isn't inserting spurious newlines.
self.assertEqual(3, expectedContent.count(b"\n"))
knownHostsFile.save()
self.assertEqual(expectedContent, path.getContent())
def test_savingAvoidsDuplication(self):
"""
L{KnownHostsFile.save} only writes new entries to the save path, not
entries which were added and already written by a previous call to
C{save}.
"""
path = FilePath(self.mktemp())
knownHosts = KnownHostsFile(path)
entry = knownHosts.addHostKey(
b"some.example.com", Key.fromString(sampleKey))
knownHosts.save()
knownHosts.save()
knownHosts = KnownHostsFile.fromPath(path)
self.assertEqual([entry], list(knownHosts.iterentries()))
def test_savingsPreservesExisting(self):
"""
L{KnownHostsFile.save} will not overwrite existing entries in its save
path, even if they were only added after the L{KnownHostsFile} instance
was initialized.
"""
# Start off with one host/key pair in the file
path = self.pathWithContent(sampleHashedLine)
knownHosts = KnownHostsFile.fromPath(path)
# After initializing the KnownHostsFile instance, add a second host/key
# pair to the file directly - without the instance's help or knowledge.
with path.open("a") as hostsFileObj:
hostsFileObj.write(otherSamplePlaintextLine)
# Add a third host/key pair using the KnownHostsFile instance
key = Key.fromString(thirdSampleKey)
knownHosts.addHostKey(b"brandnew.example.com", key)
knownHosts.save()
# Check that all three host/key pairs are present.
knownHosts = KnownHostsFile.fromPath(path)
self.assertEqual([True, True, True], [
knownHosts.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)),
knownHosts.hasHostKey(
b"divmod.com", Key.fromString(otherSampleKey)),
knownHosts.hasHostKey(b"brandnew.example.com", key)])
def test_hasPresentKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given
hostname is present and matches the expected key.
"""
hostsFile = self.loadSampleHostsFile()
self.assertTrue(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)))
def test_notPresentKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{False} when a key for the given
hostname is not present.
"""
hostsFile = self.loadSampleHostsFile()
self.assertFalse(hostsFile.hasHostKey(
b"non-existent.example.com", Key.fromString(sampleKey)))
self.assertTrue(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(sampleKey)))
self.assertFalse(hostsFile.hasHostKey(
b"www.twistedmatrix.com", Key.fromString(ecdsaSampleKey)))
def test_hasLaterAddedKey(self):
"""
L{KnownHostsFile.hasHostKey} returns C{True} when a key for the given
hostname is present in the file, even if it is only added to the file
after the L{KnownHostsFile} instance is initialized.
"""
key = Key.fromString(sampleKey)
entry = PlainEntry([b"brandnew.example.com"], key.sshType(), key, b"")
hostsFile = self.loadSampleHostsFile()
with hostsFile.savePath.open("a") as hostsFileObj:
hostsFileObj.write(entry.toString() + b"\n")
self.assertEqual(
True, hostsFile.hasHostKey(b"brandnew.example.com", key))
def test_savedEntryHasKeyMismatch(self):
"""
L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is
present in the underlying file, but different from the expected one.
The resulting exception should have an C{offendingEntry} indicating the
given entry.
"""
hostsFile = self.loadSampleHostsFile()
entries = list(hostsFile.iterentries())
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
self.assertEqual(exception.offendingEntry, entries[0])
self.assertEqual(exception.lineno, 1)
self.assertEqual(exception.path, hostsFile.savePath)
def test_savedEntryAfterAddHasKeyMismatch(self):
"""
Even after a new entry has been added in memory but not yet saved, the
L{HostKeyChanged} exception raised by L{KnownHostsFile.hasHostKey} has a
C{lineno} attribute which indicates the 1-based line number of the
offending entry in the underlying file when the given host key does not
match the expected host key.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
self.assertEqual(exception.lineno, 1)
self.assertEqual(exception.path, hostsFile.savePath)
def test_unsavedEntryHasKeyMismatch(self):
"""
L{KnownHostsFile.hasHostKey} raises L{HostKeyChanged} if the host key is
present in memory (but not yet saved), but different from the expected
one. The resulting exception has a C{offendingEntry} indicating the
given entry, but no filename or line number information (reflecting the
fact that the entry exists only in memory).
"""
hostsFile = KnownHostsFile(FilePath(self.mktemp()))
entry = hostsFile.addHostKey(
b"www.example.com", Key.fromString(otherSampleKey))
exception = self.assertRaises(
HostKeyChanged, hostsFile.hasHostKey,
b"www.example.com", Key.fromString(thirdSampleKey))
self.assertEqual(exception.offendingEntry, entry)
self.assertIsNone(exception.lineno)
self.assertIsNone(exception.path)
def test_addHostKey(self):
"""
L{KnownHostsFile.addHostKey} adds a new L{HashedEntry} to the host
file, and returns it.
"""
hostsFile = self.loadSampleHostsFile()
aKey = Key.fromString(thirdSampleKey)
self.assertEqual(False,
hostsFile.hasHostKey(b"somewhere.example.com", aKey))
newEntry = hostsFile.addHostKey(b"somewhere.example.com", aKey)
# The code in OpenSSH requires host salts to be 20 characters long.
# This is the required length of a SHA-1 HMAC hash, so it's just a
# sanity check.
self.assertEqual(20, len(newEntry._hostSalt))
self.assertEqual(True,
newEntry.matchesHost(b"somewhere.example.com"))
self.assertEqual(newEntry.keyType, b"ssh-rsa")
self.assertEqual(aKey, newEntry.publicKey)
self.assertEqual(True,
hostsFile.hasHostKey(b"somewhere.example.com", aKey))
def test_randomSalts(self):
"""
L{KnownHostsFile.addHostKey} generates a random salt for each new key,
so subsequent salts will be different.
"""
hostsFile = self.loadSampleHostsFile()
aKey = Key.fromString(thirdSampleKey)
self.assertNotEqual(
hostsFile.addHostKey(b"somewhere.example.com", aKey)._hostSalt,
hostsFile.addHostKey(b"somewhere-else.example.com", aKey)._hostSalt)
def test_verifyValidKey(self):
"""
Verifying a valid key should return a L{Deferred} which fires with
True.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(b"1.2.3.4", Key.fromString(sampleKey))
ui = FakeUI()
d = hostsFile.verifyHostKey(ui, b"www.twistedmatrix.com", b"1.2.3.4",
Key.fromString(sampleKey))
l = []
d.addCallback(l.append)
self.assertEqual(l, [True])
def test_verifyInvalidKey(self):
"""
Verifying an invalid key should return a L{Deferred} which fires with a
L{HostKeyChanged} failure.
"""
hostsFile = self.loadSampleHostsFile()
wrongKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
hostsFile.addHostKey(b"1.2.3.4", Key.fromString(sampleKey))
d = hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"1.2.3.4", wrongKey)
return self.assertFailure(d, HostKeyChanged)
def verifyNonPresentKey(self):
"""
Set up a test to verify a key that isn't present. Return a 3-tuple of
the UI, a list set up to collect the result of the verifyHostKey call,
and the sample L{KnownHostsFile} being used.
This utility method avoids returning a L{Deferred}, and records results
in the returned list instead, because the events which get generated
here are pre-recorded in the 'ui' object. If the L{Deferred} in
question does not fire, the it will fail quickly with an empty list.
"""
hostsFile = self.loadSampleHostsFile()
absentKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
l = []
d = hostsFile.verifyHostKey(
ui, b"sample-host.example.com", b"4.3.2.1", absentKey)
d.addBoth(l.append)
self.assertEqual([], l)
self.assertEqual(
ui.promptText,
b"The authenticity of host 'sample-host.example.com (4.3.2.1)' "
b"can't be established.\n"
b"RSA key fingerprint is "
b"SHA256:mS7mDBGhewdzJkaKRkx+wMjUdZb/GzvgcdoYjX5Js9I=.\n"
b"Are you sure you want to continue connecting (yes/no)? ")
return ui, l, hostsFile
def test_verifyNonPresentKey_Yes(self):
"""
Verifying a key where neither the hostname nor the IP are present
should result in the UI being prompted with a message explaining as
much. If the UI says yes, the Deferred should fire with True.
"""
ui, l, knownHostsFile = self.verifyNonPresentKey()
ui.promptDeferred.callback(True)
self.assertEqual([True], l)
reloaded = KnownHostsFile.fromPath(knownHostsFile.savePath)
self.assertEqual(
True,
reloaded.hasHostKey(b"4.3.2.1", Key.fromString(thirdSampleKey)))
self.assertEqual(
True,
reloaded.hasHostKey(b"sample-host.example.com",
Key.fromString(thirdSampleKey)))
def test_verifyNonPresentKey_No(self):
"""
Verifying a key where neither the hostname nor the IP are present
should result in the UI being prompted with a message explaining as
much. If the UI says no, the Deferred should fail with
UserRejectedKey.
"""
ui, l, knownHostsFile = self.verifyNonPresentKey()
ui.promptDeferred.callback(False)
l[0].trap(UserRejectedKey)
def test_verifyNonPresentECKey(self):
"""
Set up a test to verify an ECDSA key that isn't present.
Return a 3-tuple of the UI, a list set up to collect the result
of the verifyHostKey call, and the sample L{KnownHostsFile} being used.
"""
ecObj = Key._fromECComponents(
x=keydata.ECDatanistp256['x'],
y=keydata.ECDatanistp256['y'],
privateValue=keydata.ECDatanistp256['privateValue'],
curve=keydata.ECDatanistp256['curve']
)
hostsFile = self.loadSampleHostsFile()
ui = FakeUI()
l = []
d = hostsFile.verifyHostKey(
ui, b"sample-host.example.com", b"4.3.2.1", ecObj)
d.addBoth(l.append)
self.assertEqual([], l)
self.assertEqual(
ui.promptText,
b"The authenticity of host 'sample-host.example.com (4.3.2.1)' "
b"can't be established.\n"
b"ECDSA key fingerprint is "
b"SHA256:fJnSpgCcYoYYsaBbnWj1YBghGh/QTDgfe4w4U5M5tEo=.\n"
b"Are you sure you want to continue connecting (yes/no)? ")
def test_verifyHostIPMismatch(self):
"""
Verifying a key where the host is present (and correct), but the IP is
present and different, should result the deferred firing in a
HostKeyChanged failure.
"""
hostsFile = self.loadSampleHostsFile()
wrongKey = Key.fromString(thirdSampleKey)
ui = FakeUI()
d = hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"4.3.2.1", wrongKey)
return self.assertFailure(d, HostKeyChanged)
def test_verifyKeyForHostAndIP(self):
"""
Verifying a key where the hostname is present but the IP is not should
result in the key being added for the IP and the user being warned
about the change.
"""
ui = FakeUI()
hostsFile = self.loadSampleHostsFile()
expectedKey = Key.fromString(sampleKey)
hostsFile.verifyHostKey(
ui, b"www.twistedmatrix.com", b"5.4.3.2", expectedKey)
self.assertEqual(
True, KnownHostsFile.fromPath(hostsFile.savePath).hasHostKey(
b"5.4.3.2", expectedKey))
self.assertEqual(
["Warning: Permanently added the RSA host key for IP address "
"'5.4.3.2' to the list of known hosts."],
ui.userWarnings)
def test_getHostKeyAlgorithms(self):
"""
For a given host, get the host key algorithms for that
host in the known_hosts file.
"""
hostsFile = self.loadSampleHostsFile()
hostsFile.addHostKey(
b"www.twistedmatrix.com", Key.fromString(otherSampleKey))
hostsFile.addHostKey(
b"www.twistedmatrix.com", Key.fromString(ecdsaSampleKey))
hostsFile.save()
options = {}
options['known-hosts'] = hostsFile.savePath.path
algorithms = default.getHostKeyAlgorithms(
b"www.twistedmatrix.com", options)
expectedAlgorithms = [b'ssh-rsa', b'ecdsa-sha2-nistp256']
self.assertEqual(algorithms, expectedAlgorithms)
class FakeFile(object):
"""
A fake file-like object that acts enough like a file for
L{ConsoleUI.prompt}.
"""
def __init__(self):
self.inlines = []
self.outchunks = []
self.closed = False
def readline(self):
"""
Return a line from the 'inlines' list.
"""
return self.inlines.pop(0)
def write(self, chunk):
"""
Append the given item to the 'outchunks' list.
"""
if self.closed:
raise IOError("the file was closed")
self.outchunks.append(chunk)
def close(self):
"""
Set the 'closed' flag to True, explicitly marking that it has been
closed.
"""
self.closed = True
class ConsoleUITests(TestCase):
"""
Test cases for L{ConsoleUI}.
"""
def setUp(self):
"""
Create a L{ConsoleUI} pointed at a L{FakeFile}.
"""
self.fakeFile = FakeFile()
self.ui = ConsoleUI(self.openFile)
def openFile(self):
"""
Return the current fake file.
"""
return self.fakeFile
def newFile(self, lines):
"""
Create a new fake file (the next file that self.ui will open) with the
given list of lines to be returned from readline().
"""
self.fakeFile = FakeFile()
self.fakeFile.inlines = lines
def test_promptYes(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is 'yes', then it returns a L{Deferred} that fires with
True.
"""
for okYes in [b'yes', b'Yes', b'yes\n']:
self.newFile([okYes])
l = []
self.ui.prompt("Hello, world!").addCallback(l.append)
self.assertEqual(["Hello, world!"], self.fakeFile.outchunks)
self.assertEqual([True], l)
self.assertTrue(self.fakeFile.closed)
def test_promptNo(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is 'no', then it returns a L{Deferred} that fires with
False.
"""
for okNo in [b'no', b'No', b'no\n']:
self.newFile([okNo])
l = []
self.ui.prompt("Goodbye, world!").addCallback(l.append)
self.assertEqual(["Goodbye, world!"], self.fakeFile.outchunks)
self.assertEqual([False], l)
self.assertTrue(self.fakeFile.closed)
def test_promptRepeatedly(self):
"""
L{ConsoleUI.prompt} writes a message to the console, then reads a line.
If that line is neither 'yes' nor 'no', then it says "Please enter
'yes' or 'no'" until it gets a 'yes' or a 'no', at which point it
returns a Deferred that answers either True or False.
"""
self.newFile([b'what', b'uh', b'okay', b'yes'])
l = []
self.ui.prompt(b"Please say something useful.").addCallback(l.append)
self.assertEqual([True], l)
self.assertEqual(self.fakeFile.outchunks,
[b"Please say something useful."] +
[b"Please type 'yes' or 'no': "] * 3)
self.assertTrue(self.fakeFile.closed)
self.newFile([b'blah', b'stuff', b'feh', b'no'])
l = []
self.ui.prompt(b"Please say something negative.").addCallback(l.append)
self.assertEqual([False], l)
self.assertEqual(self.fakeFile.outchunks,
[b"Please say something negative."] +
[b"Please type 'yes' or 'no': "] * 3)
self.assertTrue(self.fakeFile.closed)
def test_promptOpenFailed(self):
"""
If the C{opener} passed to L{ConsoleUI} raises an exception, that
exception will fail the L{Deferred} returned from L{ConsoleUI.prompt}.
"""
def raiseIt():
raise IOError()
ui = ConsoleUI(raiseIt)
d = ui.prompt("This is a test.")
return self.assertFailure(d, IOError)
def test_warn(self):
"""
L{ConsoleUI.warn} should output a message to the console object.
"""
self.ui.warn("Test message.")
self.assertEqual(["Test message."], self.fakeFile.outchunks)
self.assertTrue(self.fakeFile.closed)
def test_warnOpenFailed(self):
"""
L{ConsoleUI.warn} should log a traceback if the output can't be opened.
"""
def raiseIt():
1 / 0
ui = ConsoleUI(raiseIt)
ui.warn("This message never makes it.")
self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)
class FakeUI(object):
"""
A fake UI object, adhering to the interface expected by
L{KnownHostsFile.verifyHostKey}
@ivar userWarnings: inputs provided to 'warn'.
@ivar promptDeferred: last result returned from 'prompt'.
@ivar promptText: the last input provided to 'prompt'.
"""
def __init__(self):
self.userWarnings = []
self.promptDeferred = None
self.promptText = None
def prompt(self, text):
"""
Issue the user an interactive prompt, which they can accept or deny.
"""
self.promptText = text
self.promptDeferred = Deferred()
return self.promptDeferred
def warn(self, text):
"""
Issue a non-interactive warning to the user.
"""
self.userWarnings.append(text)
class FakeObject(object):
"""
A fake object that can have some attributes. Used to fake
L{SSHClientTransport} and L{SSHClientFactory}.
"""
class DefaultAPITests(TestCase):
"""
The API in L{twisted.conch.client.default.verifyHostKey} is the integration
point between the code in the rest of conch and L{KnownHostsFile}.
"""
def patchedOpen(self, fname, mode):
"""
The patched version of 'open'; this returns a L{FakeFile} that the
instantiated L{ConsoleUI} can use.
"""
self.assertEqual(fname, "/dev/tty")
self.assertEqual(mode, "r+b")
return self.fakeFile
def setUp(self):
"""
Patch 'open' in verifyHostKey.
"""
self.fakeFile = FakeFile()
self.patch(default, "_open", self.patchedOpen)
self.hostsOption = self.mktemp()
self.hashedEntries = {}
knownHostsFile = KnownHostsFile(FilePath(self.hostsOption))
for host in (b"exists.example.com", b"4.3.2.1"):
entry = knownHostsFile.addHostKey(host, Key.fromString(sampleKey))
self.hashedEntries[host] = entry
knownHostsFile.save()
self.fakeTransport = FakeObject()
self.fakeTransport.factory = FakeObject()
self.options = self.fakeTransport.factory.options = {
'host': b"exists.example.com",
'known-hosts': self.hostsOption
}
def test_verifyOKKey(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fires with
C{1} when passed a host, IP, and key which already match the
known_hosts file it is supposed to check.
"""
l = []
default.verifyHostKey(self.fakeTransport, b"4.3.2.1", sampleKey,
b"I don't care.").addCallback(l.append)
self.assertEqual([1], l)
def replaceHome(self, tempHome):
"""
Replace the HOME environment variable until the end of the current
test, with the given new home-directory, so that L{os.path.expanduser}
will yield controllable, predictable results.
@param tempHome: the pathname to replace the HOME variable with.
@type tempHome: L{str}
"""
oldHome = os.environ.get('HOME')
def cleanupHome():
if oldHome is None:
del os.environ['HOME']
else:
os.environ['HOME'] = oldHome
self.addCleanup(cleanupHome)
os.environ['HOME'] = tempHome
def test_noKnownHostsOption(self):
"""
L{default.verifyHostKey} should find your known_hosts file in
~/.ssh/known_hosts if you don't specify one explicitly on the command
line.
"""
l = []
tmpdir = self.mktemp()
oldHostsOption = self.hostsOption
hostsNonOption = FilePath(tmpdir).child(".ssh").child("known_hosts")
hostsNonOption.parent().makedirs()
FilePath(oldHostsOption).moveTo(hostsNonOption)
self.replaceHome(tmpdir)
self.options['known-hosts'] = None
default.verifyHostKey(self.fakeTransport, b"4.3.2.1", sampleKey,
b"I don't care.").addCallback(l.append)
self.assertEqual([1], l)
def test_verifyHostButNotIP(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fires with
C{1} when passed a host which matches with an IP is not present in its
known_hosts file, and should also warn the user that it has added the
IP address.
"""
l = []
default.verifyHostKey(self.fakeTransport, b"8.7.6.5", sampleKey,
b"Fingerprint not required.").addCallback(l.append)
self.assertEqual(
["Warning: Permanently added the RSA host key for IP address "
"'8.7.6.5' to the list of known hosts."],
self.fakeFile.outchunks)
self.assertEqual([1], l)
knownHostsFile = KnownHostsFile.fromPath(FilePath(self.hostsOption))
self.assertTrue(knownHostsFile.hasHostKey(b"8.7.6.5",
Key.fromString(sampleKey)))
def test_verifyQuestion(self):
"""
L{default.verifyHostKey} should return a L{Default} which fires with
C{0} when passed an unknown host that the user refuses to acknowledge.
"""
self.fakeTransport.factory.options['host'] = b'fake.example.com'
self.fakeFile.inlines.append(b"no")
d = default.verifyHostKey(
self.fakeTransport, b"9.8.7.6", otherSampleKey,
b"No fingerprint!")
self.assertEqual(
[b"The authenticity of host 'fake.example.com (9.8.7.6)' "
b"can't be established.\n"
b"RSA key fingerprint is "
b"SHA256:vD0YydsNIUYJa7yLZl3tIL8h0vZvQ8G+HPG7JLmQV0s=.\n"
b"Are you sure you want to continue connecting (yes/no)? "],
self.fakeFile.outchunks)
return self.assertFailure(d, UserRejectedKey)
def test_verifyBadKey(self):
"""
L{default.verifyHostKey} should return a L{Deferred} which fails with
L{HostKeyChanged} if the host key is incorrect.
"""
d = default.verifyHostKey(
self.fakeTransport, b"4.3.2.1", otherSampleKey,
"Again, not required.")
return self.assertFailure(d, HostKeyChanged)
def test_inKnownHosts(self):
"""
L{default.isInKnownHosts} should return C{1} when a host with a key
is in the known hosts file.
"""
host = self.hashedEntries[b"4.3.2.1"].toString().split()[0]
r = default.isInKnownHosts(
host, Key.fromString(sampleKey).blob(),
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(1, r)
def test_notInKnownHosts(self):
"""
L{default.isInKnownHosts} should return C{0} when a host with a key
is not in the known hosts file.
"""
r = default.isInKnownHosts(
"not.there", b"irrelevant",
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(0, r)
def test_inKnownHostsKeyChanged(self):
"""
L{default.isInKnownHosts} should return C{2} when a host with a key
other than the given one is in the known hosts file.
"""
host = self.hashedEntries[b"4.3.2.1"].toString().split()[0]
r = default.isInKnownHosts(
host, Key.fromString(otherSampleKey).blob(),
{"known-hosts": FilePath(self.hostsOption).path})
self.assertEqual(2, r)
| [
"[email protected]"
] | |
e9dfb0e3bcc9bd274fa48b51fe6060bd14ae10b0 | a6281073aaddf903d13d903e01ef8f6597e0c366 | /RPWR/lookup/urls.py | 5e482fdb24bd35e8a3820306e06da0aa9fab213c | [] | no_license | pronob1010/D152-Recipe-provider-with-Redis | 9c92be028bef4260a26b876084fde6aa51662ea6 | 970b5f98da7e5e35de9fe8b9642d64e89daff809 | refs/heads/main | 2023-06-23T18:21:42.697646 | 2021-07-25T11:57:01 | 2021-07-25T11:57:01 | 389,307,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django.urls import path
from . views import *
urlpatterns = [
path('', index, name="index"),
path('details/<int:pk>', details, name="details" )
]
| [
"[email protected]"
] | |
d5b6070866f6f4dc00662100e340c931bfb8608c | f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7 | /htdocs/submissions/d5b6070866f6f4dc00662100e340c931bfb8608c.py | a1b97dd53dd2bbaee95485fe137f502923b7d1af | [] | no_license | pycontest/pycontest.github.io | ed365ebafc5be5d610ff9d97001240289de697ad | 606015cad16170014c41e335b1f69dc86250fb24 | refs/heads/master | 2021-01-10T04:47:46.713713 | 2016-02-01T11:03:46 | 2016-02-01T11:03:46 | 50,828,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | seven_seg=lambda x:"\n".join("".join(' |'[b/4&1]+' _'[b&2]+' |'[b&1]for b in[i>>3*int(e)for e in x])for i in[306775170,1060861645,524130191])+'\n' | [
"[email protected]"
] | |
28ac291d1ae4422fbc31b029ac29566e60bb06d6 | aa01560e68a07033d4b24c4770966771349e2b4f | /src/jobs/migrations/0006_auto_20201209_1527.py | 6892aa2f3a57b13172292e19d88ab9cece02673e | [] | no_license | fluffcoding/solitaireHR | a0a357e1b19b955caae8df11ca92188cad79e217 | b97a29f9accc5b45cd62986b62673a6ba802771b | refs/heads/main | 2023-04-05T11:46:41.855323 | 2021-04-26T04:57:27 | 2021-04-26T04:57:27 | 322,067,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # Generated by Django 3.1.2 on 2020-12-09 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0005_jobapplication'),
]
operations = [
migrations.AddField(
model_name='jobapplication',
name='applied',
field=models.BooleanField(blank=True, default=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='interviewed',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='selected',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='shortlisted',
field=models.BooleanField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
be4dc6b82a739c6373f3f76ca9b40558b0e72d4b | f15449e438b0b799a3866ba21243924ce0e4fa2d | /survey/migrations/0026_auto__add_field_paper_step.py | d21b49cfb317dedb70e0a7dafc09a7da47aa375e | [] | no_license | xmduhan/qisite | 46af79d0e4d1af814298862cfaa18c6f7ddf3a74 | 2c9d7513c3e0cd483341dc457a8d289e5e174f20 | refs/heads/master | 2021-01-17T08:44:29.826082 | 2020-02-07T11:22:29 | 2020-02-07T11:22:29 | 14,419,020 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,819 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Paper.step'
db.add_column(u'survey_paper', 'step',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Paper.step'
db.delete_column(u'survey_paper', 'step')
models = {
u'account.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'birthDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userCreated'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userModified'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.branch': {
'Meta': {'object_name': 'Branch'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'branchCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'branchModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'nextQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'fromBranch'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['survey.Question']"}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.custlist': {
'Meta': {'object_name': 'CustList'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'descrition': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.custlistitem': {
'Meta': {'object_name': 'CustListItem'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItemCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custList': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItem_set'", 'to': u"orm['survey.CustList']"}),
'defineInfo_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.DefineInfo']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItemModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.defineinfo': {
'Meta': {'object_name': 'DefineInfo'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defineInfoCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defineInfoModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.paper': {
'Meta': {'ordering': "['title']", 'object_name': 'Paper'},
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperCreated_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inOrder': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lookBack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperModified_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paging': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'questionNumStyle': ('django.db.models.fields.CharField', [], {'default': "'123'", 'max_length': '50'}),
'step': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperReversed_set'", 'null': 'True', 'to': u"orm['survey.Survey']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'T'", 'max_length': '10'})
},
u'survey.papercatalog': {
'Meta': {'object_name': 'PaperCatalog'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Paper']", 'through': u"orm['survey.PaperCatalogPaper']", 'symmetrical': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.PaperCatalog']", 'null': 'True', 'blank': 'True'})
},
u'survey.papercatalogpaper': {
'Meta': {'object_name': 'PaperCatalogPaper'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogPaperCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogPaperModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']"}),
'paperCatalog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.PaperCatalog']"})
},
u'survey.question': {
'Meta': {'ordering': "['ord']", 'object_name': 'Question'},
'branchNumStyle': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'max_length': '50'}),
'confused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contentLength': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'nextQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valueMax': ('django.db.models.fields.FloatField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'valueMin': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'survey.questioncatalog': {
'Meta': {'object_name': 'QuestionCatalog'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.QuestionCatalog']", 'null': 'True', 'blank': 'True'}),
'question_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Question']", 'through': u"orm['survey.QuestionCatalogQuestion']", 'symmetrical': 'False'})
},
u'survey.questioncatalogquestion': {
'Meta': {'object_name': 'QuestionCatalogQuestion'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogQuestionCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogQuestionModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'questionCatalog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.QuestionCatalog']"})
},
u'survey.resource': {
'Meta': {'object_name': 'Resource'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resourceCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'height': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resourceModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'resourceType': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'resourceUrl': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'width': ('django.db.models.fields.FloatField', [], {})
},
u'survey.sample': {
'Meta': {'object_name': 'Sample'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sampleCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipAddress': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'isValid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sampleModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']"}),
'targetCust': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.TargetCust']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.User']", 'null': 'True', 'blank': 'True'})
},
u'survey.sampleitem': {
'Meta': {'object_name': 'SampleItem'},
'branch_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Branch']", 'symmetrical': 'False'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sampleItemCreated_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sampleItemModified_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Sample']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bonus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveyCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custList': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['survey.CustList']", 'null': 'True', 'blank': 'True'}),
'endTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 9, 20, 0, 0)'}),
'fee': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'hardCost': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipLimit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'lastSmsSendTime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'macLimit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveyModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_set'", 'null': 'True', 'to': u"orm['survey.Paper']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pay': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'publishTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'resubmit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '5'}),
'targetOnly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'validSampleLimit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'viewResult': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'survey.targetcust': {
'Meta': {'object_name': 'TargetCust'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCustCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'defineInfo_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.DefineInfo']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCustModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCust_set'", 'to': u"orm['survey.Survey']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['survey'] | [
"[email protected]"
] | |
0d04bd3854dda5ce09a0ee3aa7f1f60626f35220 | 0d5e4ad0a693492204aa6210c2de470b26732509 | /commands/eztv_mininova.py | f03143d557ebdcff904fff885f927ad0d6d242bd | [] | no_license | enlavin/tvscrap | 7d4ffe16a5af9f1747c021a0cc6bd187a5b0c91e | 28d9baf1a2b2db4321b59747e85f1302f92f3a98 | refs/heads/master | 2020-04-29T10:20:45.150974 | 2015-04-26T18:11:26 | 2015-04-26T18:11:26 | 18,444,784 | 1 | 1 | null | 2015-04-28T20:24:58 | 2014-04-04T16:18:24 | Python | UTF-8 | Python | false | false | 2,225 | py | # -*- coding: utf-8 -*-
# GNU General Public Licence (GPL)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
try:
import feedparser
except ImportError:
print "feedparser support not installed. Try easy_install feedparser."
import sys
sys.exit(1)
import re
from optparse import OptionParser
from db import Show, Episode
from lib.feed_command import FeedCommand
EZTV_MININOVA_RSS="http://www.mininova.org/rss.xml?user=eztv"
class Command(FeedCommand):
def __init__(self, store):
super(Command, self).__init__(store)
self.rx_episode_size = re.compile(u'Size:\s+([0-9.]+)')
def _config_feed(self):
import feedparser
if getattr(self.options, "file"):
self.feed = feedparser.parse(self.options.file)
elif getattr(self.options, "url"):
self.feed = feedparser.parse(self.options.url)
else:
self.feed = feedparser.parse(EZTV_MININOVA_RSS)
if not self.feed["entries"]:
raise Exception()
def _iter_feed(self):
for entry in self.feed["entries"]:
try:
size = float(self.rx_episode_size.findall(entry["summary"])[0])
except IndexError:
print "File size not available. Skipping"
continue
except TypeError:
print "File size field corrupt. Skipping"
continue
yield {
"name": entry["title"],
"size": size,
"url_torrent": [entry['enclosures'][0]["href"]],
}
| [
"devnull@localhost"
] | devnull@localhost |
3fcdfddc6d13051a9dca15b880b1b4b6fe496fbc | d88397be1c6a31985bc2283280e743fd3b988dd1 | /nncf/hw_config.py | 1167773db5e48ea9112bf8784a671aa0ad028ed1 | [
"Apache-2.0"
] | permissive | sshyran/openvino-nncf-pytorch | f5e09066a216fa786927937a91a0e6742f347660 | fd02652950cd803a36f5283f5a5df999bb45433b | refs/heads/develop | 2023-04-18T06:58:54.646669 | 2021-03-12T15:41:39 | 2021-03-12T15:41:39 | 347,374,166 | 0 | 0 | Apache-2.0 | 2023-04-03T23:52:21 | 2021-03-13T13:11:32 | null | UTF-8 | Python | false | false | 11,105 | py | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from enum import Enum
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Type
import addict as ad
import jstyleson as json
import warnings
from nncf.common.os import safe_open
from nncf.config import product_dict
from nncf.definitions import HW_CONFIG_RELATIVE_DIR
from nncf.definitions import NNCF_PACKAGE_ROOT_DIR
from nncf.dynamic_graph.operator_metatypes import OPERATOR_METATYPES
from nncf.hw_config_op_names import HWConfigOpName
from nncf.quantization.layers import AsymmetricQuantizer
from nncf.quantization.layers import QuantizationMode
from nncf.quantization.layers import QuantizerConfig
from nncf.quantization.layers import SymmetricQuantizer
class HWConfigType(Enum):
CPU = 'CPU'
GPU = 'GPU'
VPU = 'VPU'
@staticmethod
def from_str(config_value: str) -> 'HWConfigType':
if config_value == HWConfigType.CPU.value:
return HWConfigType.CPU
if config_value == HWConfigType.GPU.value:
return HWConfigType.GPU
if config_value == HWConfigType.VPU.value:
return HWConfigType.VPU
raise RuntimeError("Unknown HW config type string")
HW_CONFIG_TYPE_TARGET_DEVICE_MAP = {
'ANY': HWConfigType.CPU.value,
'CPU': HWConfigType.CPU.value,
'VPU': HWConfigType.VPU.value,
'GPU': HWConfigType.GPU.value,
'TRIAL': None
}
def get_metatypes_by_hw_config_name(hw_config_name: HWConfigOpName) -> List['OperatorMetatype']:
retval = []
for op_meta in OPERATOR_METATYPES.registry_dict.values(): # type: OperatorMetatype
if hw_config_name in op_meta.hw_config_names:
retval.append(op_meta)
return retval
class HWConfig(list):
QUANTIZATION_ALGORITHM_NAME = "quantization"
ATTRIBUTES_NAME = "attributes"
SCALE_ATTRIBUTE_NAME = "scales"
UNIFIED_TYPE_NAME = "unified"
ADJUST_PADDING_ATTRIBUTE_NAME = "adjust_padding"
TYPE_TO_CONF_NAME_DICT = {
HWConfigType.CPU: "cpu.json",
HWConfigType.VPU: "vpu.json",
HWConfigType.GPU: "gpu.json"
}
def __init__(self):
super().__init__()
self.registered_algorithm_configs = {}
self.target_device = None
@staticmethod
def get_path_to_hw_config(hw_config_type: HWConfigType):
return '/'.join([NNCF_PACKAGE_ROOT_DIR, HW_CONFIG_RELATIVE_DIR,
HWConfig.TYPE_TO_CONF_NAME_DICT[hw_config_type]])
@classmethod
def from_dict(cls, dct: dict):
# pylint:disable=too-many-nested-blocks,too-many-branches
hw_config = cls()
hw_config.target_device = dct['target_device']
for algorithm_name, algorithm_configs in dct.get('config', {}).items():
hw_config.registered_algorithm_configs[algorithm_name] = {}
for algo_config_alias, algo_config in algorithm_configs.items():
for key, val in algo_config.items():
if not isinstance(val, list):
algo_config[key] = [val]
hw_config.registered_algorithm_configs[algorithm_name][algo_config_alias] = list(
product_dict(algo_config))
for op_dict in dct.get('operations', []):
for algorithm_name in op_dict:
if algorithm_name not in hw_config.registered_algorithm_configs:
continue
tmp_config = {}
for algo_and_op_specific_field_name, algorithm_configs in op_dict[algorithm_name].items():
if not isinstance(algorithm_configs, list):
algorithm_configs = [algorithm_configs]
tmp_config[algo_and_op_specific_field_name] = []
for algorithm_config in algorithm_configs:
if isinstance(algorithm_config, str): # Alias was supplied
tmp_config[algo_and_op_specific_field_name].extend(
hw_config.registered_algorithm_configs[algorithm_name][algorithm_config])
else:
for key, val in algorithm_config.items():
if not isinstance(val, list):
algorithm_config[key] = [val]
tmp_config[algo_and_op_specific_field_name].extend(list(product_dict(algorithm_config)))
op_dict[algorithm_name] = tmp_config
hw_config.append(ad.Dict(op_dict))
return hw_config
@classmethod
def from_json(cls, path):
file_path = Path(path).resolve()
with safe_open(file_path) as f:
json_config = json.load(f, object_pairs_hook=OrderedDict)
return HWConfig.from_dict(json_config)
@staticmethod
def get_quantization_mode_from_config_value(str_val: str):
if str_val == "symmetric":
return QuantizationMode.SYMMETRIC
if str_val == "asymmetric":
return QuantizationMode.ASYMMETRIC
raise RuntimeError("Invalid quantization type specified in HW config")
@staticmethod
def get_is_per_channel_from_config_value(str_val: str):
if str_val == "perchannel":
return True
if str_val == "pertensor":
return False
raise RuntimeError("Invalid quantization granularity specified in HW config")
@staticmethod
def get_qconf_from_hw_config_subdict(quantization_subdict: Dict):
bits = quantization_subdict["bits"]
mode = HWConfig.get_quantization_mode_from_config_value(quantization_subdict["mode"])
is_per_channel = HWConfig.get_is_per_channel_from_config_value(quantization_subdict["granularity"])
signedness_to_force = None
if 'level_low' in quantization_subdict and 'level_high' in quantization_subdict:
signedness_to_force = False
if mode == QuantizationMode.SYMMETRIC:
if quantization_subdict['level_low'] < 0 < quantization_subdict['level_high']:
signedness_to_force = True
true_level_low, true_level_high, _ = SymmetricQuantizer.calculate_level_ranges(bits, True)
else:
signedness_to_force = True
true_level_low, true_level_high, _ = AsymmetricQuantizer.calculate_level_ranges(bits)
assert quantization_subdict['level_low'] == true_level_low, \
"Invalid value of quantizer parameter `level_low`.\
The parameter must be consistent with other parameters!"
assert quantization_subdict['level_high'] == true_level_high, \
"Invalid value of quantizer parameter `level_high`.\
The parameter must be consistent with other parameters!"
return QuantizerConfig(num_bits=bits,
mode=mode,
per_channel=is_per_channel,
signedness_to_force=signedness_to_force)
@staticmethod
def is_qconf_list_corresponding_to_unspecified_op(qconf_list: Optional[List[QuantizerConfig]]):
return qconf_list is None
@staticmethod
def is_wildcard_quantization(qconf_list: Optional[List[QuantizerConfig]]):
# Corresponds to an op itself being specified in the HW config, but having no associated quantization
# configs specified
return qconf_list is not None and len(qconf_list) == 0
def get_metatype_vs_quantizer_configs_map(self, for_weights=False) -> Dict[Type['OperatorMetatype'],
Optional[List[QuantizerConfig]]]:
# 'None' for ops unspecified in HW config, empty list for wildcard quantization ops
retval = {k: None for k in OPERATOR_METATYPES.registry_dict.values()}
config_key = "weights" if for_weights else "activations"
for op_dict in self:
hw_config_op_name = op_dict.type # type: HWConfigOpName
metatypes = get_metatypes_by_hw_config_name(hw_config_op_name)
if not metatypes:
warnings.warn("Operation name {} in HW config is not registered in NNCF under any supported operation "
"metatype - will be ignored".format(hw_config_op_name))
if self.QUANTIZATION_ALGORITHM_NAME in op_dict:
allowed_qconfs = op_dict[self.QUANTIZATION_ALGORITHM_NAME][config_key]
else:
allowed_qconfs = []
qconf_list_with_possible_duplicates = []
for hw_config_qconf_dict in allowed_qconfs:
qconf_list_with_possible_duplicates.append(
self.get_qconf_from_hw_config_subdict(hw_config_qconf_dict))
qconf_list = list(OrderedDict.fromkeys(qconf_list_with_possible_duplicates))
for meta in metatypes:
retval[meta] = qconf_list
return retval
def _get_operations_with_attribute_values(self, attribute_name_per_its_value: Dict[str, Any]) -> \
Set[Type['OperatorMetatype']]:
result = set()
for op_dict in self:
if self.ATTRIBUTES_NAME not in op_dict:
continue
for attr_name, attr_value in attribute_name_per_its_value.items():
is_value_matched = op_dict[self.ATTRIBUTES_NAME][attr_name] == attr_value
is_attr_set = attr_name in op_dict[self.ATTRIBUTES_NAME]
if is_value_matched and is_attr_set:
hw_config_op_name = op_dict.type # type: HWConfigOpName
metatypes = get_metatypes_by_hw_config_name(hw_config_op_name)
if not metatypes:
warnings.warn(
"Operation name {} in HW config is not registered in NNCF under any supported "
"operation metatype - will be ignored".format(hw_config_op_name))
result.update(metatypes)
return result
def get_operations_with_unified_scales(self) -> Set[Type['OperatorMetatype']]:
return self._get_operations_with_attribute_values({self.SCALE_ATTRIBUTE_NAME: self.UNIFIED_TYPE_NAME})
def get_operations_with_adjusted_paddings(self) -> Set[Type['OperatorMetatype']]:
return self._get_operations_with_attribute_values({self.ADJUST_PADDING_ATTRIBUTE_NAME: True})
| [
"[email protected]"
] | |
a95071156b455721a03968795e2f8b317dfe27a2 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/compute/instances/set_scheduling.py | 978e800fe7a236c88bdf266e3ed098e87d9fc1bf | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 9,785 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting scheduling for virtual machine instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
from googlecloudsdk.command_lib.compute.sole_tenancy import flags as sole_tenancy_flags
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.core.util import times
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SetSchedulingInstances(base.SilentCommand):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
detailed_help = {
'EXAMPLES':
"""
To set instance to be terminated during maintenance, run:
$ {command} example-instance --maintenance-policy=TERMINATE --zone=us-central1-b
"""
}
_support_host_error_timeout_seconds = False
_support_local_ssd_recovery_timeout = True
_support_max_run_duration = False
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
flags.AddMaintenancePolicyArgs(parser)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
def _Run(self, args):
"""Issues request necessary for setting scheduling options."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
scheduling_options = client.messages.Scheduling()
scheduling_options.automaticRestart = args.restart_on_failure
if args.IsSpecified('preemptible'):
scheduling_options.preemptible = args.preemptible
if self._support_host_error_timeout_seconds and hasattr(
args, 'host_error_timeout_seconds'):
scheduling_options.hostErrorTimeoutSeconds = args.host_error_timeout_seconds
if self._support_local_ssd_recovery_timeout and hasattr(
args, 'local_ssd_recovery_timeout') and args.IsSpecified(
'local_ssd_recovery_timeout'):
scheduling_options.localSsdRecoveryTimeout = client.messages.Duration(
seconds=args.local_ssd_recovery_timeout)
if (hasattr(args, 'provisioning_model') and
args.IsSpecified('provisioning_model')):
scheduling_options.provisioningModel = (
client.messages.Scheduling.ProvisioningModelValueValuesEnum(
args.provisioning_model))
cleared_fields = []
if (hasattr(args, 'instance_termination_action') and
args.IsSpecified('instance_termination_action')):
flags.ValidateInstanceScheduling(args, self._support_max_run_duration)
scheduling_options.instanceTerminationAction = (
client.messages.Scheduling.InstanceTerminationActionValueValuesEnum(
args.instance_termination_action))
elif args.IsSpecified('clear_instance_termination_action'):
scheduling_options.instanceTerminationAction = None
cleared_fields.append('instanceTerminationAction')
if args.IsSpecified('min_node_cpu'):
scheduling_options.minNodeCpus = int(args.min_node_cpu)
elif args.IsSpecified('clear_min_node_cpu'):
scheduling_options.minNodeCpus = None
cleared_fields.append('minNodeCpus')
if args.IsSpecified('maintenance_policy'):
scheduling_options.onHostMaintenance = (
client.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
if hasattr(args, 'max_run_duration') and args.IsSpecified(
'max_run_duration'
):
scheduling_options.maxRunDuration = client.messages.Duration(
seconds=args.max_run_duration
)
elif hasattr(args, 'clear_max_run_duration') and args.IsSpecified(
'clear_max_run_duration'
):
scheduling_options.maxRunDuration = None
cleared_fields.append('maxRunDuration')
if hasattr(args, 'termination_time') and args.IsSpecified(
'termination_time'
):
scheduling_options.terminationTime = times.FormatDateTime(
args.termination_time
)
elif hasattr(args, 'clear_termination_time') and args.IsSpecified(
'clear_termination_time'
):
scheduling_options.terminationTime = None
cleared_fields.append('terminationTime')
if instance_utils.IsAnySpecified(args, 'node', 'node_affinity_file',
'node_group'):
affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, client.messages)
scheduling_options.nodeAffinities = affinities
elif args.IsSpecified('clear_node_affinities'):
scheduling_options.nodeAffinities = []
cleared_fields.append('nodeAffinities')
with holder.client.apitools_client.IncludeFields(cleared_fields):
request = client.messages.ComputeInstancesSetSchedulingRequest(
instance=instance_ref.Name(),
project=instance_ref.project,
scheduling=scheduling_options,
zone=instance_ref.zone)
return client.MakeRequests([(client.apitools_client.instances,
'SetScheduling', request)])
def Run(self, args):
return self._Run(args)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SetSchedulingInstancesBeta(SetSchedulingInstances):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
_support_host_error_timeout_seconds = True
_support_max_run_duration = True
_support_local_ssd_recovery_timeout = True
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
flags.AddMaintenancePolicyArgs(parser)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddHostErrorTimeoutSecondsArgs(parser)
flags.AddMaxRunDurationVmArgs(parser, is_update=True)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
def Run(self, args):
return self._Run(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetSchedulingInstancesAlpha(SetSchedulingInstancesBeta):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
_support_host_error_timeout_seconds = True
_support_local_ssd_recovery_timeout = True
_support_max_run_duration = True
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
# Deprecated in Alpha
flags.AddMaintenancePolicyArgs(parser, deprecate=True)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddHostErrorTimeoutSecondsArgs(parser)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
flags.AddMaxRunDurationVmArgs(parser, is_update=True)
| [
"[email protected]"
] | |
8ec24f1b1554727d877bc3dc9f4884c8b5a7f4f7 | eacb726dfb05071fa65877f44960826fb4561af0 | /sqlshare_rest/test/api/permissions.py | 94d9a5e8461143e0a4f8c64a1239dc5ff412df2c | [
"Apache-2.0"
] | permissive | uw-it-aca/sqlshare-rest | 4d629cf13d058b2168c07ad69e451584bf63af49 | e441ce9286a915586a68a0bfa3105f122d6ae18f | refs/heads/master | 2020-04-06T06:30:45.900372 | 2019-09-13T17:32:43 | 2019-09-13T17:32:43 | 31,608,784 | 0 | 1 | Apache-2.0 | 2019-09-13T17:32:44 | 2015-03-03T16:33:59 | Python | UTF-8 | Python | false | false | 30,130 | py | from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
from django.core import mail
import json
from testfixtures import LogCapture
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query, add_public_access
from sqlshare_rest.util.query_queue import process_queue
from sqlshare_rest.util.dataset_emails import send_new_emails
from sqlshare_rest.models import Query
from sqlshare_rest.util.db import is_sqlite3, is_mysql
from sqlshare_rest.models import Dataset, DatasetSharingEmail
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',),
SQLSHARE_QUERY_CACHE_DB="test_ss_query_db"
)
class DatasetPermissionsAPITest(BaseAPITest):
def setUp(self):
super(DatasetPermissionsAPITest, self).setUp()
# Try to cleanup from any previous test runs...
self.remove_users = []
self.client = Client()
try:
cursor = connection.cursor()
cursor.execute("DROP DATABASE test_ss_query_db")
except Exception as ex:
pass
def test_unauthenticated(self):
url = reverse("sqlshare_view_dataset_permissions", kwargs={"owner":"foo", "name":"bar"})
response = self.client.get(url)
self.assertEquals(response.status_code, 403)
def test_accounts(self):
owner = "permissions_user1"
dataset_name = "ds1"
other_user1 = "permissions_user2"
other_user2 = "permissions_user3"
other_user3 = "permissions_user4"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
self.remove_users.append(other_user2)
self.remove_users.append(other_user3)
backend = get_backend()
backend.get_user(other_user1)
backend.get_user(other_user2)
backend.get_user(other_user3)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
user2_auth_headers = self.get_auth_header_for_username(other_user2)
user3_auth_headers = self.get_auth_header_for_username(other_user3)
# Test the default situation...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
# Test the default state of the permissions api...
with LogCapture() as l:
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'GET dataset permissions; owner: permissions_user1; name: ds1'))
# Test round 1 of changes...
new_data = { "accounts": [ other_user1, other_user2 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
with LogCapture() as l:
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: permissions_user1; name: ds1; set account: permissions_user2'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: permissions_user1; name: ds1; set account: permissions_user3'))
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = account
self.assertTrue(other_user1 in lookup)
self.assertTrue(other_user2 in lookup)
self.assertFalse(other_user3 in lookup)
self.assertEquals(lookup[other_user1]["login"], other_user1)
self.assertEquals(lookup[other_user2]["login"], other_user2)
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], True)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(permissions_url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
# Test round 2 of changes... add a new user, drop a user
new_data = { "accounts": [ other_user3, other_user2 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = account
self.assertTrue(other_user3 in lookup)
self.assertTrue(other_user2 in lookup)
self.assertFalse(other_user1 in lookup)
self.assertEquals(lookup[other_user3]["login"], other_user3)
self.assertEquals(lookup[other_user2]["login"], other_user2)
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], True)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 200)
# Test round 3 of changes... remove all acces
new_data = { "accounts": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["emails"], [])
self.assertEquals(data["accounts"], [])
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], False)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
def test_emails(self):
owner = "email_permissions_user2"
dataset_name = "ds2"
self.remove_users.append(owner)
owner_auth_headers = self.get_auth_header_for_username(owner)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
# Test the default state of the permissions api...
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
# Add 2 emails:
new_data = { "emails": [ "[email protected]", "[email protected]" ] }
with LogCapture() as l:
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2; set email: [email protected]'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2; set email: [email protected]'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions finished; owner: email_permissions_user2; name: ds2'))
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["accounts"], [])
emails = data["emails"]
lookup = {}
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "[email protected]": True, "[email protected]": True })
# Change the 2 emails, keeping 1 the same...
new_data = { "emails": [ "[email protected]", "[email protected]" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["accounts"], [])
emails = data["emails"]
lookup = {}
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "[email protected]": True, "[email protected]": True })
# Drop all emails...
new_data = { "emails": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
def test_send_emails(self):
owner = "email_permissions_user3"
dataset_name = "ds3"
self.remove_users.append(owner)
owner_obj = get_backend().get_user(owner)
owner_auth_headers = self.get_auth_header_for_username(owner)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
# Add 2 emails:
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "emails": [ "[email protected]"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
# empty out the memory outbox:
mail.outbox = []
# Now make sure we send 1 email
send_new_emails()
self.assertEquals(len(mail.outbox), 1)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
self.assertEquals(mail.outbox[0].to, ["[email protected]"])
self.assertEquals(mail.outbox[0].from_email, "[email protected]")
self.assertTrue(mail.outbox[0].body.find(sharing.access_token) > 0)
new_data = { "emails": [ "[email protected]"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Make sure we send a new email
send_new_emails()
self.assertEquals(len(mail.outbox), 2)
new_data = { "emails": [ "[email protected]", "[email protected]"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Make sure we send a replacement email for user1
send_new_emails()
self.assertEquals(len(mail.outbox), 3)
# Now make sure we don't send any more emails:
send_new_emails()
self.assertEquals(len(mail.outbox), 3)
def test_preview_table_permissions(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user1"
dataset_name = "ds4"
other_user1 = "permissions_preview_user2"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
new_data = { "accounts": [ other_user1 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_preview_table_permissions_pre_process(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user5"
dataset_name = "ds5"
other_user1 = "permissions_preview_user6"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
new_data = { "accounts": [ other_user1 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Test that we get a 200 while the preview is being built
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data_status"], "working")
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
# Test that permission was added after the query is finished.
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_preview_table_permissions_public(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user7"
dataset_name = "ds6"
other_user1 = "permissions_preview_user8"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
add_public_access(ds1)
# Test that we get a 200 while the preview is being built
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data_status"], "working")
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
# Test that permission was added after the query is finished.
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_public_to_shared(self):
owner = "permissions_xpublic_user1"
other_user1 = "permissions_xpublic_user2"
dataset_name = "ds7"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
add_public_access(ds1)
owner_auth_headers = self.get_auth_header_for_username(owner)
new_data = { "accounts": [ other_user1 ], "is_public": False }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
self.assertEquals(data["accounts"], [{'login': 'permissions_xpublic_user2'}])
def test_sharing_tokens(self):
owner = "permissions_token_user1"
other = "permissions_token_taker"
other2 = "permissions_token_taker2"
dataset_name = "ds8"
self.remove_users.append(owner)
self.remove_users.append(other)
self.remove_users.append(other2)
backend = get_backend()
owner_obj = backend.get_user(owner)
backend.get_user(other)
backend.get_user(other2)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
owner_auth_headers = self.get_auth_header_for_username(owner)
other_auth_headers = self.get_auth_header_for_username(other)
other_auth_headers2 = self.get_auth_header_for_username(other2)
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "emails": [ "[email protected]" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
email = sharing.email
access_token1 = sharing.access_token
self.assertEquals(email.email, "[email protected]")
# Clear the emails, then put the same one back - make sure we get a
# different token
new_data = { "emails": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
self.assertEquals(len(DatasetSharingEmail.objects.filter(dataset=obj)), 0)
new_data = { "emails": [ "[email protected]" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
email = sharing.email
self.assertEquals(email.email, "[email protected]")
access_token2 = sharing.access_token
self.assertNotEqual(access_token1, access_token2)
# Make sure that token 1 doesn't give access
token1_url = reverse("sqlshare_token_access", kwargs={"token": access_token1})
response = self.client.post(token1_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 404)
# Make sure that token 2 does give access
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["owner"], "permissions_token_user1")
self.assertEquals(data["name"], "ds8")
# the token is reusable - if someone emails a mailing list, say:
response = self.client.post(token2_url, data={}, **other_auth_headers2)
self.assertEquals(response.status_code, 200)
# Make sure if we try to add the user a second time, nothing weird happens
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 200)
# Make sure that if we add the owner this way, they don't end up in the list
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
# Now, make sure the email is still in the permissions api document,
# But also the 2 new users.
response = self.client.get(permissions_url, **owner_auth_headers)
data = json.loads(response.content.decode("utf-8"))
accounts = list(map(lambda x: x["login"], data["accounts"]))
self.assertEquals(len(accounts), 2)
self.assertTrue(other in accounts)
self.assertTrue(other2 in accounts)
emails = data["emails"]
self.assertEquals(emails, ["[email protected]"])
def test_flat_auth_list(self):
owner = "permissions_flat_user1"
dataset_name = "ds_flat1"
other_user1 = "permissions_flat_user2"
other_user2 = "permissions_flat_user3"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
self.remove_users.append(other_user2)
backend = get_backend()
backend.get_user(other_user1)
backend.get_user(other_user2)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "authlist": [ other_user1, other_user2, "[email protected]", "not_email_but_whatever"] }
owner_auth_headers = self.get_auth_header_for_username(owner)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = True
self.assertEquals(lookup, { "permissions_flat_user2": True, "permissions_flat_user3": True })
lookup = {}
emails = data["emails"]
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "[email protected]": True, "not_email_but_whatever": True })
# empty out the memory outbox:
mail.outbox = []
# Now make sure we send 1 email
send_new_emails()
# empty out the memory outbox:
mail.outbox = []
@classmethod
def setUpClass(cls):
super(DatasetPermissionsAPITest, cls).setUpClass()
def _run_query(sql):
cursor = connection.cursor()
try:
cursor.execute(sql)
except Exception as ex:
# Hopefully all of these will fail, so ignore the failures
pass
# This is just an embarrassing list of things to cleanup if something fails.
# It gets added to when something like this blocks one of my test runs...
_run_query("drop login permissions_preview_user8")
_run_query("drop login permissions_preview_user2")
_run_query("drop login permissions_preview_user5")
_run_query("drop login permissions_preview_user6")
_run_query("drop login permissions_preview_user7")
_run_query("drop login permissions_token_user1")
_run_query("drop login permissions_xpublic_user1")
_run_query("drop login permissions_user1")
_run_query("drop login email_permissions_user2")
| [
"[email protected]"
] | |
0bcde7ba149facae00a9e3c782315ca4ab3be275 | 637d2b471ab26a683cf67b259c58d2f9318a1bf2 | /McUtils/Coordinerds/CoordinateSystems/ZMatrixToCartesian.py | 790c2e356f7ad998828695adb0844d7da096249b | [
"MIT"
] | permissive | McCoyGroup/McUtils | 9c1c3befcef88d6094961e23a894efb4d97c84b1 | c7c3910e7cb5105c65b01ecb17a6668d126b2063 | refs/heads/master | 2023-08-18T16:24:17.718849 | 2023-08-11T23:10:21 | 2023-08-11T23:10:21 | 188,920,933 | 0 | 2 | MIT | 2022-12-15T19:17:02 | 2019-05-27T23:29:24 | Python | UTF-8 | Python | false | false | 8,885 | py | from .CoordinateSystemConverter import CoordinateSystemConverter
from .CommonCoordinateSystems import CartesianCoordinates3D, ZMatrixCoordinates
from ...Numputils import *
import numpy as np
class ZMatrixToCartesianConverter(CoordinateSystemConverter):
"""
A converter class for going from ZMatrix coordinates to Cartesian coordinates
"""
@property
def types(self):
return (ZMatrixCoordinates, CartesianCoordinates3D)
def default_ordering(self, coordlist):
if coordlist.shape[-1] == 6:
ordering = coordlist[:, :, (0, 2, 4)]
coordlist = coordlist[:, :, (1, 3, 5)]
else:
r = np.arange(len(coordlist[0]))
ordering = np.broadcast_to(
np.array([r, np.roll(r, 1), np.roll(r, 2)]).T[np.newaxis],
coordlist.shape[:2] + (3,)
)
return ordering, coordlist
def convert_many(self,
coordlist,
ordering=None, origins=None, axes=None, use_rad=True,
return_derivs=False,
**kw
):
"""Expects to get a list of configurations
These will look like:
[
[dist, angle, dihedral]
...
]
and ordering will be
[
[pos, point, line, plane]
...
]
**For efficiency it is assumed that all configurations have the same length**
:param coordlist:
:type coordlist:
:param origins:
:type origins:
:param axes:
:type axes:
:param use_rad:
:type use_rad:
:param kw:
:type kw:
:param ordering:
:type ordering:
:param return_derivs:
:type return_derivs:
:return:
:rtype:
"""
# make sure we have the ordering stuff in hand
if ordering is None:
ordering, coordlist = self.default_ordering(coordlist)
else:
ordering = np.array(ordering)
coordlist = np.asarray(coordlist)
if np.min(ordering) > 0:
ordering = ordering - 1
dim_diff = coordlist.ndim - ordering.ndim
if dim_diff > 0:
missing = coordlist.shape[:dim_diff]
ordering = np.broadcast_to(ordering, missing + ordering.shape )
if ordering.shape[-1] > 3:
atom_ordering = ordering[:, :, 0]
ordering = ordering[:, 1:, 1:]
else:
atom_ordering = None
sysnum = len(coordlist)
coordnum = len(coordlist[0])
total_points = np.empty((sysnum, coordnum+1, 3))
if return_derivs is not True and return_derivs is not False and isinstance(return_derivs, int):
return_derivs = True
return_deriv_order = return_derivs
elif return_derivs:
return_deriv_order = 2
if return_derivs:
derivs = [
None, # no need to stoare a copy of total_points here...
np.zeros((sysnum, coordnum, 3, coordnum + 1, 3)),
np.zeros((sysnum, coordnum, 3, coordnum, 3, coordnum + 1, 3))
]
# first we put the origin whereever the origins are specified
if origins is None:
origins = [0, 0, 0]
origins = np.asarray(origins)
if len(origins.shape) < 2:
origins = np.broadcast_to(origins, (sysnum, 3))
total_points[:, 0] = origins
# set up the next points by just setting them along the x-axis by default
if axes is None:
axes = [1, 0, 0]
axes = np.asarray(axes)
if axes.ndim == 1:
axes = np.array([
axes,
[0, 1, 0]
]) # np.concatenate((np.random.uniform(low=.5, high=1, size=(2,)), np.zeros((1,)) ))])
if axes.ndim == 2:
axes = np.broadcast_to(axes[np.newaxis], (sysnum, 2, 3))
x_pts = origins + vec_normalize(axes[:, 0])
y_pts = origins + vec_normalize(axes[:, 1])
dists = coordlist[:, 0, 0]
if return_derivs:
der_stuff = cartesian_from_rad_derivatives(origins,
x_pts, y_pts, dists,
None, None,
0,
np.full((len(dists),), -1, dtype=int),
np.full((len(dists),), -1, dtype=int),
np.full((len(dists),), -1, dtype=int),
derivs,
order=return_deriv_order
)
total_points[:, 1] = der_stuff[0]
if return_deriv_order > 0:
derivs[1][np.arange(sysnum), :1, :, 1, :] = der_stuff[1]
if return_deriv_order > 1:
derivs[2][np.arange(sysnum), :1, :, :1, :, 1, :] = der_stuff[2]
else:
ref_points_1, _ = cartesian_from_rad(origins, x_pts, y_pts, dists, None, None)
total_points[:, 1] = ref_points_1
# print(">> z2c >> ordering", ordering[0])
# iteratively build the rest of the coords with one special cases for n=2
for i in range(1, coordnum):
# Get the distances away
ref_coords1 = ordering[:, i, 0] # reference atom numbers for first coordinate
refs1 = total_points[np.arange(sysnum), ref_coords1.astype(int)] # get the actual reference coordinates
dists = np.reshape(coordlist[:, i, 0], (sysnum, 1)) # pull the requisite distances
ref_coords2 = ordering[:, i, 1] # reference atom numbers for second coordinate
refs2 = total_points[np.arange(sysnum), ref_coords2.astype(int)] # get the actual reference coordinates for the angle
angle = coordlist[:, i, 1] # pull the requisite angle values
if not use_rad:
angle = np.deg2rad(angle)
if i == 1:
refs3 = y_pts
dihed = None
ref_coords3 = np.full((len(dists),), -1, dtype=int)
psi_flag = False
else:
ref_coords3 = ordering[:, i, 2] # reference atom numbers for dihedral ref coordinate
refs3 = total_points[np.arange(sysnum), ref_coords3.astype(int)] # get the actual reference coordinates for the dihed
dihed = coordlist[:, i, 2] # pull proper dihedral values
if not use_rad:
dihed = np.deg2rad(dihed)
if ordering.shape[-1] == 4:
raise ValueError("Unclear if there is a difference between tau and psi")
psi_flag = ordering[:, i, 3] == 1
# dihed[psi_flag] = -dihed[psi_flag]
else:
psi_flag = False
if return_derivs:
if ordering.shape[-1] == 4:
raise NotImplementedError("don't have derivatives for case with psi angles")
der_stuff = cartesian_from_rad_derivatives(
refs1, refs2, refs3,
dists, angle, dihed,
i,
ref_coords1,
ref_coords2,
ref_coords3,
derivs,
order=return_deriv_order
)
# crd, d1, d2 = stuff
total_points[:, i+1] = der_stuff[0]
if return_deriv_order > 0:
derivs[1][np.arange(sysnum), :i+1, :, i+1, :] = der_stuff[1]
if return_deriv_order > 1:
derivs[2][np.arange(sysnum), :i+1, :, :i+1, :, i+1, :] = der_stuff[2]
else:
ref_points_1, _ = cartesian_from_rad(refs1, refs2, refs3, dists, angle, dihed, psi=psi_flag)
total_points[:, i+1] = ref_points_1
if atom_ordering is not None:
rev_ord = atom_ordering#np.argsort(atom_ordering, axis=1)
total_points = total_points[np.arange(len(atom_ordering))[:, np.newaxis], rev_ord] #wat?
converter_opts = dict(use_rad=use_rad, ordering=ordering)
if return_derivs:
if return_deriv_order > 0:
converter_opts['derivs'] = derivs[1:][:return_deriv_order]
return total_points, converter_opts
def convert(self, coords, **kw):
"""dipatches to convert_many but only pulls the first"""
total_points, opts = self.convert_many(coords[np.newaxis], **kw)
return total_points[0], opts
__converters__ = [ ZMatrixToCartesianConverter() ] | [
"[email protected]"
] | |
1a254cdc13044408437afdc922e0f764e45c5795 | 0e9fad9c000430a735e10568644dc3e0c6a1de54 | /curriculum_ctvt/input_mistakes.py | f578c6927d70e2f14f213a7c2b373d114132797e | [] | no_license | pedal-edu/curriculum-ctvt | 54d489926f366b486a3e5663de444221a5924f92 | 2f8472627b9adceb90466f206f1131fdecc3a2e5 | refs/heads/master | 2023-04-01T20:17:58.542300 | 2021-03-25T13:54:51 | 2021-03-25T13:54:51 | 276,709,305 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from pedal.core.commands import gently, explain
from pedal.cait.cait_api import *
def unnecessary_cast(needed_casts):
"""
Args:
needed_casts: List of casts that are necessary to this problem
Returns:
"""
message = "Converting to {} is unnecessary in this problem"
code = "ex_cast"
tldr = "Unnecessary Conversion"
known_casts = ["float", "int", "str"]
matches = find_matches("_cast_(___)")
for match in matches:
user_cast = match["_cast_"].id
if user_cast not in needed_casts and user_cast in known_casts:
return explain(message.format(user_cast), label=code, title=tldr)
return False
| [
"[email protected]"
] | |
1c459705148d5b935a6f2166345b8b6e897b9b97 | 763774bbcd6aa6adf64bde5fbe9521a937785362 | /tests/test_concise_keras.py | 6203e9afeee825596836ae6f6bce515552396ef0 | [
"MIT"
] | permissive | morphinggen/concise | 969075dfbed99071fae53b0cba637bb5c25e3359 | 12078d75f37fe176bb7d221134b8b14aeb48e11f | refs/heads/master | 2022-04-28T03:11:08.606943 | 2020-04-15T19:19:34 | 2020-04-15T19:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | """
test_concise_keras
----------------------------------
Tests for `concise_keras` module
"""
import keras
from keras.models import model_from_json
from concise.legacy.models import single_layer_pos_effect as concise_model
from concise.eval_metrics import mse
from sklearn.linear_model import LinearRegression
import pytest
from tests.setup_concise_load_data import load_example_data
import numpy as np
def test_serialization():
c = concise_model(init_motifs=["TAATA", "TGCGAT"],
pooling_layer="sum",
n_splines=10,
)
js = c.to_json()
assert isinstance(model_from_json(js), keras.models.Model)
def test_serialization_disk(tmpdir):
param, X_feat, X_seq, y, id_vec = load_example_data()
dc = concise_model(pooling_layer="sum",
init_motifs=["TGCGAT", "TATTTAT"],
n_splines=10,
n_covariates=X_feat.shape[1],
seq_length=X_seq.shape[1],
**param)
dc.fit([X_seq, X_feat], y, epochs=1,
validation_data=([X_seq, X_feat], y))
fn = tmpdir.mkdir('data').join('test_keras.h5')
dc.save(str(fn))
dc = keras.models.load_model(str(fn))
assert isinstance(dc, keras.models.Model)
class TestKerasConciseBasic(object):
@classmethod
def setup_class(cls):
cls.data = load_example_data()
# pass
def test_no_error(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=X_feat.shape[1],
seq_length=X_seq.shape[1],
**param)
dc.fit([X_seq, X_feat], y, epochs=1,
validation_data=([X_seq, X_feat], y))
y_pred = dc.predict([X_seq, X_feat])
y_pred
def test_train_predict_no_X_feat(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=0,
seq_length=X_seq.shape[1],
**param)
dc.fit(X_seq, y, epochs=1,
validation_data=(X_seq, y))
y_pred = dc.predict(X_seq)
y_pred
@classmethod
def teardown_class(cls):
pass
class TestMultiTaskLearning(TestKerasConciseBasic):
"""
Test multi-task learning
"""
@classmethod
def setup_class(cls):
cls.data = load_example_data(num_tasks=3)
class TestConcisePrediction(object):
@classmethod
def setup_class(cls):
cls.data = load_example_data(trim_seq_len=1, standardize_features=False)
cls.data[0]["n_motifs"] = 1
cls.data[0]["motif_length"] = 1
cls.data[0]["step_size"] = 0.001
cls.data[0]["early_stop_patience"] = 3
def test_non_std(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=X_feat.shape[1],
lambd=0,
seq_length=X_seq.shape[1],
**param)
callback = keras.callbacks.EarlyStopping(patience=param["early_stop_patience"])
dc.fit([X_seq, X_feat], y, epochs=50,
callbacks=[callback],
validation_data=([X_seq, X_feat], y))
dc_coef = dc.layers[-1].get_weights()[0][-X_feat.shape[1]:, 0]
lm = LinearRegression()
lm.fit(X_feat, y)
# np.allclose(lm.coef_, dc_coef, atol=0.02)
# # weights has to be the same as for linear regression
# (dc_coef - lm.coef_) / lm.coef_
# they both have to predict the same
y_pred = dc.predict([X_seq, X_feat])
mse_lm = mse(y, lm.predict(X_feat))
mse_dc = mse(y, y_pred)
print("mse_lm")
print(mse_lm)
print("mse_dc")
print(mse_dc)
assert mse_dc < mse_lm + 0.01
| [
"[email protected]"
] | |
a66f9c41196e481531527bb60b25dca5cff97b40 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/numba/tests/test_lists.py | 5e10c7df5ba329a8e6492e7b5fde5844df12789a | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 23,959 | py | from __future__ import print_function
from collections import namedtuple
import contextlib
import itertools
import math
import sys
from numba.compiler import compile_isolated, Flags
from numba import jit, types
import numba.unittest_support as unittest
from numba import testing
from .support import TestCase, MemoryLeakMixin, tag
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
Point = namedtuple('Point', ('a', 'b'))
def noop(x):
pass
def unbox_usecase(x):
"""
Expect a list of numbers
"""
res = 0
for v in x:
res += v
return res
def unbox_usecase2(x):
"""
Expect a list of tuples
"""
res = 0
for v in x:
res += len(v)
return res
def unbox_usecase3(x):
"""
Expect a (number, list of numbers) tuple.
"""
a, b = x
res = a
for v in b:
res += v
return res
def unbox_usecase4(x):
"""
Expect a (number, list of tuples) tuple.
"""
a, b = x
res = a
for v in b:
res += len(v)
return res
def create_list(x, y, z):
return [x, y, z]
def create_nested_list(x, y, z, a, b, c):
return [[x, y, z], [a, b, c]]
def list_comprehension1():
return sum([x**2 for x in range(10)])
def list_comprehension2():
return sum([x for x in range(10) if x % 2 == 0])
def list_comprehension3():
return sum([math.pow(x, 2) for x in range(10)])
def list_comprehension4():
return sum([x * y for x in range(10) for y in range(10)])
def list_comprehension5():
return [x * 2 for x in range(10)]
def list_comprehension6():
return [[x for x in range(y)] for y in range(3)]
def list_constructor(n):
return list(range(n))
def list_append(n):
l = []
l.append(42)
for i in range(n):
l.append(i)
return l
def list_append_heterogenous(n):
l = []
l.append(42.0)
for i in range(n):
l.append(i)
return l
def list_extend(n):
l = []
# A non-list iterable and a list
l.extend(range(n))
l.extend(l[:-1])
l.extend(range(n, 0, -1))
return l
def list_extend_heterogenous(n):
l = []
# Extend with various iterables, including lists, with different types
l.extend(range(n))
l.extend(l[:-1])
l.extend((5, 42))
l.extend([123.0])
return l
def list_pop0(n):
l = list(range(n))
res = 0
while len(l) > 0:
res += len(l) * l.pop()
return res
def list_pop1(n, i):
l = list(range(n))
x = l.pop(i)
return x, l
def list_len(n):
l = list(range(n))
return len(l)
def list_getitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
res += i * l[i]
# Negative indices
for i in range(-len(l), 0):
res -= i * l[i]
return res
def list_setitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
l[i] = i * l[i]
# Negative indices
for i in range(-len(l), 0):
l[i] = i * l[i]
for i in range(len(l)):
res += l[i]
return res
def list_getslice2(n, start, stop):
l = list(range(n))
return l[start:stop]
def list_getslice3(n, start, stop, step):
l = list(range(n))
return l[start:stop:step]
def list_setslice2(n, n_source, start, stop):
# Generic setslice with size change
l = list(range(n))
v = list(range(100, 100 + n_source))
l[start:stop] = v
return l
def list_setslice3(n, start, stop, step):
l = list(range(n))
v = l[start:stop:step]
for i in range(len(v)):
v[i] += 100
l[start:stop:step] = v
return l
def list_setslice3_arbitrary(n, n_src, start, stop, step):
l = list(range(n))
l[start:stop:step] = list(range(100, 100 + n_src))
return l
def list_delslice0(n):
l = list(range(n))
del l[:]
return l
def list_delslice1(n, start, stop):
l = list(range(n))
del l[start:]
del l[:stop]
return l
def list_delslice2(n, start, stop):
l = list(range(n))
del l[start:stop]
return l
def list_clear(n):
l = list(range(n))
l.clear()
return l
def list_copy(n):
l = list(range(n))
ll = l.copy()
l.append(42)
return l, ll
def list_iteration(n):
l = list(range(n))
res = 0
for i, v in enumerate(l):
res += i * v
return res
def list_contains(n):
l = list(range(n))
return (0 in l, 1 in l, n - 1 in l, n in l)
def list_index1(n, v):
l = list(range(n, 0, -1))
return l.index(v)
def list_index2(n, v, start):
l = list(range(n, 0, -1))
return l.index(v, start)
def list_index3(n, v, start, stop):
l = list(range(n, 0, -1))
return l.index(v, start, stop)
def list_remove(n, v):
l = list(range(n - 1, -1, -1))
l.remove(v)
return l
def list_insert(n, pos, v):
l = list(range(0, n))
l.insert(pos, v)
return l
def list_count(n, v):
l = []
for x in range(n):
l.append(x & 3)
return l.count(v)
def list_reverse(n):
l = list(range(n))
l.reverse()
return l
def list_add(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
res = a + b
res.append(42) # check result is a copy
return a, b, res
def list_add_heterogenous():
a = [1]
b = [2.0]
c = a + b
d = b + a
# check result is a copy
a.append(3)
b.append(4.0)
return a, b, c, d
def list_add_inplace(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
a += b
return a, b
def list_add_inplace_heterogenous():
a = [1]
b = [2.0]
a += b
b += a
return a, b
def list_mul(n, v):
a = list(range(n))
return a * v
def list_mul_inplace(n, v):
a = list(range(n))
a *= v
return a
def list_bool(n):
a = list(range(n))
return bool(a), (True if a else False)
def eq_usecase(a, b):
return list(a) == list(b)
def ne_usecase(a, b):
return list(a) != list(b)
def gt_usecase(a, b):
return list(a) > list(b)
def ge_usecase(a, b):
return list(a) >= list(b)
def lt_usecase(a, b):
return list(a) < list(b)
def le_usecase(a, b):
return list(a) <= list(b)
def identity_usecase(n):
a = list(range(n))
b = a
c = a[:]
return (a is b), (a is not b), (a is c), (a is not c)
def bool_list_usecase():
# Exercise getitem, setitem, iteration with bool values (issue #1373)
l = [False]
l[0] = True
x = False
for v in l:
x = x ^ v
return l, x
def reflect_simple(l, ll):
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_conditional(l, ll):
# `l` may or may not actually reflect a Python list
if ll[0]:
l = [11., 22., 33., 44.]
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_exception(l):
l.append(42)
raise ZeroDivisionError
def reflect_dual(l, ll):
l.append(ll.pop())
return l is ll
class TestLists(MemoryLeakMixin, TestCase):
def test_create_list(self):
pyfunc = create_list
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3), pyfunc(1, 2, 3))
def test_create_nested_list(self):
pyfunc = create_nested_list
with self.assertTypingError():
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3, 4, 5, 6), pyfunc(1, 2, 3, 4, 5, 6))
@testing.allow_interpreter_mode
def test_list_comprehension(self):
list_tests = [list_comprehension1,
list_comprehension2,
list_comprehension3,
list_comprehension4,
list_comprehension5,
list_comprehension6]
for test in list_tests:
pyfunc = test
cr = compile_isolated(pyfunc, ())
cfunc = cr.entry_point
self.assertEqual(cfunc(), pyfunc())
def check_unary_with_size(self, pyfunc, precise=True):
cfunc = jit(nopython=True)(pyfunc)
# Use various sizes, to stress the allocation algorithm
for n in [0, 3, 16, 70, 400]:
eq = self.assertPreciseEqual if precise else self.assertEqual
eq(cfunc(n), pyfunc(n))
def test_constructor(self):
self.check_unary_with_size(list_constructor)
def test_append(self):
self.check_unary_with_size(list_append)
@tag('important')
def test_append_heterogenous(self):
self.check_unary_with_size(list_append_heterogenous, precise=False)
def test_extend(self):
self.check_unary_with_size(list_extend)
@tag('important')
def test_extend_heterogenous(self):
self.check_unary_with_size(list_extend_heterogenous, precise=False)
def test_pop0(self):
self.check_unary_with_size(list_pop0)
@tag('important')
def test_pop1(self):
pyfunc = list_pop1
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
for i in [0, 1, n - 2, n - 1, -1, -2, -n + 3, -n + 1]:
expected = pyfunc(n, i)
self.assertPreciseEqual(cfunc(n, i), expected)
def test_pop_errors(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
cfunc = jit(nopython=True)(list_pop1)
with self.assertRaises(IndexError) as cm:
cfunc(0, 5)
self.assertEqual(str(cm.exception), "pop from empty list")
with self.assertRaises(IndexError) as cm:
cfunc(1, 5)
self.assertEqual(str(cm.exception), "pop index out of range")
def test_insert(self):
pyfunc = list_insert
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for i in indices:
expected = pyfunc(n, i, 42)
self.assertPreciseEqual(cfunc(n, i, 42), expected)
def test_len(self):
self.check_unary_with_size(list_len)
@tag('important')
def test_getitem(self):
self.check_unary_with_size(list_getitem)
@tag('important')
def test_setitem(self):
self.check_unary_with_size(list_setitem)
def check_slicing2(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n in sizes:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, start, stop)
self.assertPreciseEqual(cfunc(n, start, stop), expected)
def test_getslice2(self):
self.check_slicing2(list_getslice2)
def test_setslice2(self):
pyfunc = list_setslice2
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n, n_src in itertools.product(sizes, sizes):
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, n_src, start, stop)
self.assertPreciseEqual(cfunc(n, n_src, start, stop), expected)
@tag('important')
def test_getslice3(self):
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
@tag('important')
def test_setslice3(self):
pyfunc = list_setslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
def test_setslice3_resize(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
pyfunc = list_setslice3_arbitrary
cfunc = jit(nopython=True)(pyfunc)
# step == 1 => can resize
cfunc(5, 10, 0, 2, 1)
# step != 1 => cannot resize
with self.assertRaises(ValueError) as cm:
cfunc(5, 100, 0, 3, 2)
self.assertIn("cannot resize", str(cm.exception))
def test_delslice0(self):
self.check_unary_with_size(list_delslice0)
def test_delslice1(self):
self.check_slicing2(list_delslice1)
@tag('important')
def test_delslice2(self):
self.check_slicing2(list_delslice2)
def test_invalid_slice(self):
self.disable_leak_check()
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 1, 2, 0)
self.assertEqual(str(cm.exception), "slice step cannot be zero")
def test_iteration(self):
self.check_unary_with_size(list_iteration)
@tag('important')
def test_reverse(self):
self.check_unary_with_size(list_reverse)
def test_contains(self):
self.check_unary_with_size(list_contains)
def check_index_result(self, pyfunc, cfunc, args):
try:
expected = pyfunc(*args)
except ValueError:
with self.assertRaises(ValueError):
cfunc(*args)
else:
self.assertPreciseEqual(cfunc(*args), expected)
def test_index1(self):
self.disable_leak_check()
pyfunc = list_index1
cfunc = jit(nopython=True)(pyfunc)
for v in (0, 1, 5, 10, 99999999):
self.check_index_result(pyfunc, cfunc, (16, v))
def test_index2(self):
self.disable_leak_check()
pyfunc = list_index2
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start in indices:
self.check_index_result(pyfunc, cfunc, (16, v, start))
def test_index3(self):
self.disable_leak_check()
pyfunc = list_index3
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start, stop in itertools.product(indices, indices):
self.check_index_result(pyfunc, cfunc, (16, v, start, stop))
def test_remove(self):
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 15):
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_remove_error(self):
self.disable_leak_check()
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 42)
self.assertEqual(str(cm.exception), "list.remove(x): x not in list")
def test_count(self):
pyfunc = list_count
cfunc = jit(nopython=True)(pyfunc)
for v in range(5):
self.assertPreciseEqual(cfunc(18, v), pyfunc(18, v))
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.clear() needs Python 3.3+")
def test_clear(self):
self.check_unary_with_size(list_clear)
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.copy() needs Python 3.3+")
def test_copy(self):
self.check_unary_with_size(list_copy)
def check_add(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [0, 3, 50, 300]
for m, n in itertools.product(sizes, sizes):
expected = pyfunc(m, n)
self.assertPreciseEqual(cfunc(m, n), expected)
def test_add(self):
self.check_add(list_add)
def test_add_heterogenous(self):
pyfunc = list_add_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def test_add_inplace(self):
self.check_add(list_add_inplace)
def test_add_inplace_heterogenous(self):
pyfunc = list_add_inplace_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def check_mul(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 3, 50, 300]:
for v in [1, 2, 3, 0, -1, -42]:
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_mul(self):
self.check_mul(list_mul)
def test_mul_inplace(self):
self.check_mul(list_mul_inplace)
@unittest.skipUnless(sys.maxsize >= 2**32,
"need a 64-bit system to test for MemoryError")
def test_mul_error(self):
self.disable_leak_check()
pyfunc = list_mul
cfunc = jit(nopython=True)(pyfunc)
# Fail in malloc()
with self.assertRaises(MemoryError):
cfunc(1, 2**58)
# Overflow size computation when multiplying by item size
with self.assertRaises(MemoryError):
cfunc(1, 2**62)
def test_bool(self):
pyfunc = list_bool
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 1, 3]:
expected = pyfunc(n)
self.assertPreciseEqual(cfunc(n), expected)
def test_list_passing(self):
# Check one can pass a list from a Numba function to another
@jit(nopython=True)
def inner(lst):
return len(lst), lst[-1]
@jit(nopython=True)
def outer(n):
l = list(range(n))
return inner(l)
self.assertPreciseEqual(outer(5), (5, 4))
def _test_compare(self, pyfunc):
def eq(args):
self.assertIs(cfunc(*args), pyfunc(*args),
"mismatch for arguments %s" % (args,))
cfunc = jit(nopython=True)(pyfunc)
eq(((1, 2), (1, 2)))
eq(((1, 2, 3), (1, 2)))
eq(((1, 2), (1, 2, 3)))
eq(((1, 2, 4), (1, 2, 3)))
eq(((1.0, 2.0, 3.0), (1, 2, 3)))
eq(((1.0, 2.0, 3.5), (1, 2, 3)))
def test_eq(self):
self._test_compare(eq_usecase)
def test_ne(self):
self._test_compare(ne_usecase)
def test_le(self):
self._test_compare(le_usecase)
def test_lt(self):
self._test_compare(lt_usecase)
def test_ge(self):
self._test_compare(ge_usecase)
def test_gt(self):
self._test_compare(gt_usecase)
def test_identity(self):
pyfunc = identity_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(3), pyfunc(3))
def test_bool_list(self):
# Check lists of bools compile and run successfully
pyfunc = bool_list_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
class TestUnboxing(MemoryLeakMixin, TestCase):
"""
Test unboxing of Python lists into native Numba lists.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertRegexpMatches(str(raises.exception), msg)
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check([1, 2])
check([1j, 2.5j])
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check([(1, 2), (3, 4)])
check([(1, 2j), (3, 4j)])
check([(), (), ()])
@tag('important')
def test_list_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, [2, 3, 4]))
def test_list_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, [(2,), (3,)]))
def test_errors(self):
# See #1545 and #1594: error checking should ensure the list is
# homogenous
msg = "can't unbox heterogenous list"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
lst = [1, 2.5]
with self.assert_type_error(msg):
cfunc(lst)
# The list hasn't been changed (bogus reflecting)
self.assertEqual(lst, [1, 2.5])
with self.assert_type_error(msg):
cfunc([1, 2j])
# Same when the list is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, [1, 2j]))
with self.assert_type_error(msg):
cfunc(Point(1, [1, 2j]))
# Issue #1638: tuples of different size.
# Note the check is really on the tuple side.
lst = [(1,), (2, 3)]
with self.assertRaises(ValueError) as raises:
cfunc(lst)
self.assertEqual(str(raises.exception),
"size mismatch for tuple, expected 1 element(s) but got 2")
class TestListReflection(MemoryLeakMixin, TestCase):
"""
Test reflection of native Numba lists on Python list objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [([1., 2., 3., 4.], [0.]),
([1., 2., 3., 4.], [5., 6., 7., 8., 9.]),
]
for dest, src in samples:
expected = list(dest)
got = list(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, lists should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
l = [1, 2, 3]
with self.assertRefCount(l):
with self.assertRaises(ZeroDivisionError):
cfunc(l)
self.assertPreciseEqual(l, [1, 2, 3, 42])
@tag('important')
def test_reflect_same_list(self):
"""
When the same list object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pylist = [1, 2, 3]
clist = pylist[:]
expected = pyfunc(pylist, pylist)
got = cfunc(clist, clist)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pylist, clist)
self.assertPreciseEqual(sys.getrefcount(pylist), sys.getrefcount(clist))
def test_reflect_clean(self):
"""
When the list wasn't mutated, no reflection should take place.
"""
cfunc = jit(nopython=True)(noop)
# Use a complex, as Python integers can be cached
l = [12.5j]
ids = [id(x) for x in l]
cfunc(l)
self.assertEqual([id(x) for x in l], ids)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
92a176149b6adf9cc5a297e10f227ab9d00f6a35 | ceed0a361e951b72c1a0fd8c50f093d66d6bfc31 | /towel/__init__.py | 15d8d2ec20144e03ccef873dbdd4d9c90f102e2e | [
"BSD-2-Clause"
] | permissive | jensneuhaus/towel | 481914ea6c2a71021c890d8c3c74ff7659df4b88 | a91039d71e458c7d59457e472cfe5bd6cad6a492 | refs/heads/master | 2020-12-11T05:44:40.220543 | 2015-09-03T08:56:24 | 2015-09-03T08:56:45 | 49,214,911 | 0 | 0 | null | 2016-01-07T16:10:05 | 2016-01-07T16:06:30 | Python | UTF-8 | Python | false | false | 105 | py | """
Towel - Keeping you DRY since 2010
"""
VERSION = (0, 7, 0)
__version__ = '.'.join(map(str, VERSION))
| [
"[email protected]"
] | |
9a5ec1a187e75627e6dcb81ca8146aa919e1183d | 69b4f343861f6fb366c8fbbe590376a1bdd0c658 | /Tests.py | 055c802f1a0a921b5024e5e83d059629edfe7772 | [] | no_license | freQuensy23-coder/CaptchServiceAPI | 81f8a705193b07892f65cdc05b84a8ac6961b286 | 85a8b3585a4c6e6b98ae5c11375567b9d4b4dbfa | refs/heads/main | 2023-03-13T14:43:45.044766 | 2021-03-02T19:03:22 | 2021-03-02T19:03:22 | 341,452,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | import unittest
from generator import generate_font_image, generate_random_word, do_image_dim
from PIL import Image
class Tester(unittest.TestCase):
def setUp(self) -> None:
pass
def test_get_font(self):
# TODO
for i in range(5555):
generate_font_image()
def test_generate_random_word(self):
for i in range(50):
print(str(generate_random_word()))
def test_do_image_dim(self):
im = Image.open("background.jpg")
do_image_dim(im, force=4096).show() | [
"[email protected]"
] | |
6c2e15fe001ee7f4ada3747278a504be5e557b84 | f4b79529109fbb4055f334d0d9c7c96cb0710447 | /colour/examples/colorimetry/examples_photometry.py | 1a933c8490479fed65be2c0deaf6b89803b4c56e | [
"BSD-3-Clause"
] | permissive | trevorandersen/colour | 167381b3d03e506a270a8d2a519a164808995437 | 02b595b26313c4b4f55adc41d599f90c4c9edbcd | refs/heads/develop | 2021-07-15T04:48:19.585586 | 2021-01-23T23:51:44 | 2021-01-23T23:51:44 | 230,421,054 | 0 | 0 | BSD-3-Clause | 2019-12-28T12:54:20 | 2019-12-27T10:10:30 | null | UTF-8 | Python | false | false | 858 | py | # -*- coding: utf-8 -*-
"""
Showcases *Photometry* computations.
"""
import colour
from colour.utilities import message_box
message_box('"Photometry" Computations')
sd_light_source = colour.SDS_LIGHT_SOURCES['Neodimium Incandescent']
message_box(('Computing "Luminous Flux" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_flux(sd_light_source))
print('\n')
message_box(('Computing "Luminous Efficiency" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_efficiency(sd_light_source))
print('\n')
message_box(('Computing "Luminous Efficacy" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_efficacy(sd_light_source))
| [
"[email protected]"
] | |
395fa81b18711e219bc6cd2cb0dbbacfb2042d17 | 6230dd7501bb504643cb3b8d8d18889f4bc9e292 | /web_frameworks/web_frameworks/settings.py | bbbc17a5d7ddd4c0c58cd449416a3ff2c7e94384 | [
"MIT"
] | permissive | Minkov/python-web-frameworks-2020-11 | f83a8560cbbcd06549bcacaca83de3af4824adc6 | 5857bb626792a9efe1f2d06677fa3779f5e2cc1d | refs/heads/main | 2023-01-21T07:02:46.141981 | 2020-12-01T18:30:20 | 2020-12-01T18:30:20 | 310,352,954 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,445 | py | """
Django settings for web_frameworks project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from os.path import join
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '73l^kfu(th-t&nk219%xvlg&29*5khenic!ji$(s-3r5-tc!ww'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'templates_advanced',
'resources',
'cbv',
'books',
'books_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web_frameworks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web_frameworks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
STATIC_ROOT = '/tmp/static'
MEDIA_URL = '/media/'
MEDIA_ROOT = join(BASE_DIR, 'media')
| [
"[email protected]"
] | |
12dff4722892f3042a30723dc845bff0321cbf83 | c6f47e7e96c5a9f7f0f24026dffe60fbf5bb034d | /notebooks/pendigits/pendigits_dmkde_adp.py | 5a1d32c2b06c4dcb647eb5bb198c15006267ab63 | [] | no_license | Joaggi/anomaly-detection-density-matrix-kernel-density-estimation | 762b2a944cef2ea06172834e6f445f02a52a7f89 | 34c3eb16fde9f2aad4daaaf233947c362b0f5416 | refs/heads/master | 2023-04-08T06:53:16.742624 | 2022-11-09T21:39:50 | 2022-11-09T21:39:50 | 425,664,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | current_path = ""
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
import os
import sys
sys.path.append('submodules/qmc/')
#sys.path.append('../../../../submodules/qmc/')
print(sys.path)
else:
import sys
sys.path.append('submodules/qmc/')
sys.path.append('data/')
#sys.path.append('../../../../submodules/qmc/')
print(sys.path)
# %cd ../../
print(os.getcwd())
sys.path.append('scripts/')
import qmc.tf.layers as layers
import qmc.tf.models as models
import tensorflow as tf
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from experiments import experiments
from mlflow_create_experiment import mlflow_create_experiment
setting = {
"z_name_of_experiment": 'dmkde_adp-pendigits',
"z_run_name": "dmkde_adp",
"z_dataset": "pendigits",
"z_rff_components": 1000,
"z_num_samples": 10000,
"z_batch_size": 16,
"z_select_best_experiment": True,
"z_threshold": 0.0
}
prod_settings = {"z_gamma": [2**i for i in range(-9,6)]}
params_int = ["z_rff_components", "z_batch_size", "z_num_samples"]
params_float = ["z_gamma", "z_threshold"]
mlflow = mlflow_create_experiment(setting["z_name_of_experiment"])
experiments(setting, prod_settings, params_int, params_float, mlflow)
| [
"[email protected]"
] | |
9867fe19d328e3fb7a896205afc9498f7e784422 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Z8REdTE5P57f4q7dK_20.py | 02025f57265a048945b02e93032e46722f6d5199 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
def collatz(n, r=[]):
if not r: r = [n]
if n == 1: return (len(r), max(*r))
n = n * 3 + 1 if n & 1 else n // 2
return collatz(n, r + [n])
| [
"[email protected]"
] | |
d8d7c2533a436b336dde94b74fadb5d8c040b775 | d05c946e345baa67e7894ee33ca21e24b8d26028 | /general/data-cleaning-pandas/data_cleaning.py | 7e03b3efd348f785821adfca186f950771cfa799 | [
"MIT"
] | permissive | x4nth055/pythoncode-tutorials | 327255550812f84149841d56f2d13eaa84efd42e | d6ba5d672f7060ba88384db5910efab1768c7230 | refs/heads/master | 2023-09-01T02:36:58.442748 | 2023-08-19T14:04:34 | 2023-08-19T14:04:34 | 199,449,624 | 1,858 | 2,055 | MIT | 2023-08-25T20:41:56 | 2019-07-29T12:35:40 | Jupyter Notebook | UTF-8 | Python | false | false | 202 | py | import pandas as pd
# Config settings
pd.set_option('max_columns', None)
pd.set_option('max_rows', 12)
# Import CSV data
data_frames = pd.read_csv (r'simulated_data.csv')
print(data_frames.head(10))
| [
"[email protected]"
] | |
287ccbc12cf46d63d2972071600fb8ed009446d4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_13227.py | 4515d6bed05050aef381bac0d3fb8fb5bd8c6364 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | # Scrapy : spider which doesn't work
allowed_domains
| [
"[email protected]"
] | |
e87ab6118cff6287802446efa6e5b0769cb7256f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/82/usersdata/205/42973/submittedfiles/decimal2bin.py | 1bdf2cb16de1f0f06f74293fae6aa792b5408320 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | # -*- coding: utf-8 -*-
soma=0
i=0
while (n>0):
resto=n%10
soma=soma+resto*(2**i)
i=i+1
n=n//10
print(soma)
| [
"[email protected]"
] | |
19336d3be69f6065ae890b7c90e28de999166652 | 564fe9c8409d9ff4ba5f88dd36c0743d417767fa | /opsgenie_swagger/models/contact.py | 8a11eaff8b7fc6180a2fc9809ec2522e53d44ade | [
"Apache-2.0"
] | permissive | criteo-forks/opsgenie-python-sdk | 28cf4b2e5eb5f10df582cfd6393a0e952dee5102 | 2a3924a0bd779eab47937925eb5d42ffbbd751d4 | refs/heads/master | 2020-04-05T23:09:41.002143 | 2019-04-12T13:37:22 | 2019-04-12T13:37:22 | 65,009,459 | 0 | 2 | null | 2016-08-05T10:08:55 | 2016-08-05T10:08:55 | null | UTF-8 | Python | false | false | 4,584 | py | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.contact_status import ContactStatus # noqa: F401,E501
class Contact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'method': 'str',
'to': 'str',
'status': 'ContactStatus'
}
attribute_map = {
'id': 'id',
'method': 'method',
'to': 'to',
'status': 'status'
}
def __init__(self, id=None, method=None, to=None, status=None): # noqa: E501
"""Contact - a model defined in Swagger""" # noqa: E501
self._id = None
self._method = None
self._to = None
self._status = None
self.discriminator = None
if id is not None:
self.id = id
if method is not None:
self.method = method
if to is not None:
self.to = to
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this Contact. # noqa: E501
:return: The id of this Contact. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Contact.
:param id: The id of this Contact. # noqa: E501
:type: str
"""
self._id = id
@property
def method(self):
"""Gets the method of this Contact. # noqa: E501
:return: The method of this Contact. # noqa: E501
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this Contact.
:param method: The method of this Contact. # noqa: E501
:type: str
"""
self._method = method
@property
def to(self):
"""Gets the to of this Contact. # noqa: E501
:return: The to of this Contact. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this Contact.
:param to: The to of this Contact. # noqa: E501
:type: str
"""
self._to = to
@property
def status(self):
"""Gets the status of this Contact. # noqa: E501
:return: The status of this Contact. # noqa: E501
:rtype: ContactStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Contact.
:param status: The status of this Contact. # noqa: E501
:type: ContactStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Contact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ff3457af2885ad007f99f5934781e303b81c3a85 | 7b20e2f86c2bb2145ae9ca5bcd4b9ad1566e79b0 | /ABC/ABC072/B.py | c1e5f4aca531a38a39572c87cb4b7882f3a478d9 | [] | no_license | pto8913/KyoPro | 5f5e769960dfec73af5b0f338f32659ff067094b | 29ebc30a3d45fea273cb9034fba8311673a406dd | refs/heads/master | 2021-06-13T16:43:40.275854 | 2021-03-23T00:02:25 | 2021-03-23T00:02:25 | 174,684,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | # URL: https://atcoder.jp/contests/abc072/tasks/abc072_b
print(input()[::2])
| [
"[email protected]"
] | |
f81fa202ac030ed1854e85d1d468bada85820ad5 | 4d9e7425ea6902a45eeda81c5cd5ede7d44fd087 | /examples/starwars/tests/test_connections.py | d266df33d782e73fb8019b1abe3fbbc0aa505e77 | [
"MIT"
] | permissive | Salalem/graphene-neo4j | 2258b29093337fd8981b880065b144dc4e6c145b | f2b99fa18b7367cf3a581d1f4a71fda16a1320fc | refs/heads/master | 2023-08-08T06:57:41.749557 | 2019-06-14T16:22:14 | 2019-06-14T16:22:14 | 222,071,239 | 1 | 1 | MIT | 2023-07-22T21:45:41 | 2019-11-16T08:45:32 | null | UTF-8 | Python | false | false | 1,441 | py | import pytest
from ..data import initialize
from ..schema import schema
pytestmark = pytest.mark.django_db
def test_correct_fetch_first_ship_rebels():
initialize()
query = '''
query RebelsShipsQuery {
rebels {
name,
hero {
name
}
ships(first: 1) {
edges {
node {
name
}
}
}
}
}
'''
expected = {
'rebels': {
'name': 'Alliance to Restore the Republic',
'hero': {
'name': 'Human'
},
'ships': {
'edges': [
{
'node': {
'name': 'X-Wing'
}
}
]
}
}
}
result = schema.execute(query)
assert not result.errors
assert result.data == expected
def test_correct_list_characters():
initialize()
query = '''
query RebelsShipsQuery {
node(id: "U2hpcDox") {
... on Ship {
name
characters {
name
}
}
}
}
'''
expected = {
'node': {
'name': 'X-Wing',
'characters': [{
'name': 'Human'
}],
}
}
result = schema.execute(query)
assert not result.errors
assert result.data == expected
| [
"[email protected]"
] | |
50aa838108041994970b4a245b95fa893a34737f | 66dd570bf5945dcbd183ed3c0cf897c0359cbccd | /python/python语法/pyexercise/Exercise05_09.py | e942db9fe0e15a945d4dac56ce26d7e5c0745b7a | [] | no_license | SamJ2018/LeetCode | 302cc97626220521c8847d30b99858e63fa509f3 | 784bd0b1491050bbd80f5a0e2420467b63152d8f | refs/heads/master | 2021-06-19T10:30:37.381542 | 2021-02-06T16:15:01 | 2021-02-06T16:15:01 | 178,962,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | tuition = 10000
count = 1
while count <= 10:
tuition = tuition * 1.05;
count += 1
print("Tuition in ten years is", tuition)
sum = tuition
for i in range(2, 5):
tuition = tuition * 1.05
sum += tuition
print("The four-year tuition in ten years is", sum)
| [
"[email protected]"
] | |
017d0a8ffb2b9b577b8ef976168c48995c63e689 | 6df76f8a6fcdf444c3863e3788a2f4b2c539c22c | /django code/p105/p105/settings.py | a76b29db32f2d0ab4cdf3fc1733f20e72b6b2894 | [] | no_license | basantbhandari/DjangoProjectsAsDocs | 068e4a704fade4a97e6c40353edb0a4299bd9678 | 594dbb560391eaf94bb6db6dc07702d127010b88 | refs/heads/master | 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | """
Django settings for p105 project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*o3o*nsr8m66()euw(-%s1%0(y@(a$-bypjgao_uqbn1q=elc!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#userapp
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'p105.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'p105.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
ccd9a13878867c64b046d0c2430669e314344e6b | 259cc507d97bfeff84d21de3a0ab56640676a9eb | /venv1/Lib/site-packages/tensorflow/contrib/sparsemax/python/ops/sparsemax.py | ab6bdcb499055455ea400a70cb2c8dbe89ad712d | [
"MIT",
"Apache-2.0"
] | permissive | Soum-Soum/Tensorflow_Face_Finder | c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2 | fec6c15d2df7012608511ad87f4b55731bf99478 | refs/heads/master | 2020-03-22T20:31:39.606644 | 2018-07-12T13:47:56 | 2018-07-12T13:47:56 | 140,607,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
z = logits - math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis]
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
indices = array_ops.stack([math_ops.range(0, obs), k_z - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
return math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
| [
"[email protected]"
] | |
c883e1a0a408db687bff3e281fefff765a1d8a66 | c6ec292a52ea54499a35a7ec7bc042a9fd56b1aa | /Python/1102.py | 2cae0a34d41306787e668057e921d884cf86347d | [] | no_license | arnabs542/Leetcode-38 | ad585353d569d863613e90edb82ea80097e9ca6c | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | refs/heads/master | 2023-02-01T01:18:45.851097 | 2020-12-19T03:46:26 | 2020-12-19T03:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | class Solution:
def maximumMinimumPath(self, A: List[List[int]]) -> int:
if not A or not A[0]:
return 0
m, n = len(A), len(A[0])
visited = [[False] * n for _ in range(m)]
mi = A[0][0]
heap = [(-mi, 0, 0)]
dx = [1, -1, 0, 0]
dy = [0, 0, -1, 1]
while heap:
curMin, x, y = heapq.heappop(heap)
if x == m - 1 and y == n - 1:
return -curMin
for i in range(4):
nx, ny = dx[i] + x, dy[i] + y
if 0 <= nx < m and 0 <= ny < n and not visited[nx][ny]:
visited[nx][ny] = True
newMin = min(-curMin, A[nx][ny])
heapq.heappush(heap, (-newMin, nx, ny))
return -1
| [
"[email protected]"
] | |
b2385dc3272c957e8e027af6117d2102403e8702 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03243/s474613072.py | 7218ea9f3043f0cb31d16a785cad563de5b7ff3f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | n = int(input())
mul = lambda x: x * 100 + x * 10 + x
if mul(n//100)>=n:ans = mul(n//100)
else :ans = mul(n//100+1)
print(ans) | [
"[email protected]"
] | |
9fd32d09e3dee1b1d467de5785167d31fbc3ffa7 | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-tree/longest_univalue_path.py | 1278c1e329a7ad0dd71f2ab4433e53e2e009049e | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 8,459 | py | # https://leetcode.com/problems/longest-univalue-path
# https://leetcode.com/problems/longest-univalue-path/solution
from TreeNode import TreeNode
class Solution:
# Wrong Answer
def longestUnivaluePath0(self, root):
if root is None:
return 0
cur, stack, res = root, [], []
while cur or stack:
if cur:
stack.append(cur)
cur = cur.left
else:
cur = stack.pop()
res.append(cur.val)
cur = cur.right
print(res)
s, e, maxLen = 0, 0, 0
for i, r in enumerate(res):
if 0 == i:
continue
if res[i - 1] != r:
e = i
print('{}[{}]~{}[{}]'.format(res[s], s, res[e], e))
maxLen = max(maxLen, e - 1 - s)
s = i
maxLen = max(maxLen, len(res) - 1 - s)
return maxLen
# Wrong Answer
def longestUnivaluePath1(self, root):
if root is None:
return 0
queue, maxVal = [(root, [])], 0
while queue:
cur, prevVals = queue.pop(0)
prevVals.append(cur.val)
if cur.left is None and cur.right is None:
print(prevVals)
cnt = 0
for i, val in enumerate(prevVals):
if 0 == i:
continue
if prevVals[i - 1] == val:
cnt += 1
maxVal = max(maxVal, cnt)
else:
cnt = 0
else:
if cur.left:
queue.append((cur.left, prevVals[:]))
if cur.right:
queue.append((cur.right, prevVals[:]))
return maxVal
# Wrong Answer
def longestUnivaluePath2(self, root):
if root is None:
return 0
def getCount(node):
if node is None:
return 0
lCount, rCount = 0, 0
if node.left:
if node.left.val == node.val:
lCount = 1 + getCount(node.left)
else:
lCount = getCount(node.left)
if node.right:
if node.right.val == node.val:
rCount = 1 + getCount(node.right)
else:
rCount = getCount(node.right)
if node.left and node.right and node.val == node.left.val == node.right.val:
return lCount + rCount
return max(lCount, rCount)
return getCount(root)
# Wrong Answer
def longestUnivaluePath3(self, root):
def getConnectedCount(node, val):
if node is None:
return 0
lCount, rCount = 0, 0
if node.left:
if node.left.val == node.val == val:
lCount = 1 + getConnectedCount(node.left, val)
else:
lCount = getConnectedCount(node.left, val)
if node.right:
if node.right.val == node.val == val:
rCount = 1 + getConnectedCount(node.right, val)
else:
rCount = getConnectedCount(node.right, val)
if node.left and node.right and val == node.val == node.left.val == node.right.val:
return lCount + rCount
return max(lCount, rCount)
if root is None:
return 0
queue, candidates = [root], set()
while queue:
cur = queue.pop(0)
if cur.left:
if cur.val == cur.left.val:
candidates.add(cur.val)
queue.append(cur.left)
if cur.right:
if cur.val == cur.right.val:
candidates.add(cur.val)
queue.append(cur.right)
print(candidates)
maxLen = 0
for cand in candidates:
maxLen = max(maxLen, getConnectedCount(root, cand))
return maxLen
# Wrong Answer
def longestUnivaluePath(self, root):
if root is None:
return 0
def combine(node):
if node is None:
return []
res = []
if node.left and node.right and node.left.val == node.right.val == node.val:
lRes = combine(node.left)
if 0 == len(lRes):
res.append(node.left.val)
else:
res.extend(lRes)
res.append(node.val)
rRes = combine(node.right)
if 0 == len(rRes):
res.append(node.right.val)
else:
res.extend(rRes)
elif node.left and node.left.val == node.val:
lRes = combine(node.left)
if 0 == len(lRes):
res.append(node.left.val)
else:
res.extend(lRes)
res.append(node.val)
elif node.right and node.right.val == node.val:
res.append(node.val)
rRes = combine(node.right)
if 0 == len(rRes):
res.append(node.right.val)
else:
res.extend(rRes)
return res
queue, maxVal = [root], 0
while queue:
cur = queue.pop(0)
maxVal = max(maxVal, len(combine(cur)) - 1)
if cur.left:
queue.append(cur.left)
if cur.right:
queue.append(cur.right)
return maxVal
# 57.52% solution
def longestUnivaluePath(self, root):
self.ans = 0
def getLength(node):
if node is None:
return 0
lLength, rLength = getLength(node.left), getLength(node.right)
lChild, rChild = 0, 0
if node.left and node.left.val == node.val:
lChild = lLength + 1
if node.right and node.right.val == node.val:
rChild = rLength + 1
self.ans = max(self.ans, lChild + rChild)
return max(lChild, rChild)
getLength(root)
return self.ans
s = Solution()
'''
5
/ \
4 5
/ \ \
1 1 5
'''
root = TreeNode(5)
root.left = TreeNode(4)
root.left.left = TreeNode(1)
root.left.right = TreeNode(1)
root.right = TreeNode(5)
root.right.right = TreeNode(5)
print(s.longestUnivaluePath(root))
'''
1
/ \
4 5
/ \ \
4 4 5
'''
root = TreeNode(1)
root.left = TreeNode(4)
root.left.left = TreeNode(4)
root.left.right = TreeNode(4)
root.right = TreeNode(5)
root.right.right = TreeNode(5)
print(s.longestUnivaluePath(root))
'''
1
/
4
/
4
/
1
'''
root = TreeNode(1)
root.left = TreeNode(4)
root.left.left = TreeNode(4)
root.left.left.left = TreeNode(1)
print(s.longestUnivaluePath(root))
'''
1
\
4
\
4
\
1
'''
root = TreeNode(1)
root.right = TreeNode(4)
root.right.right = TreeNode(4)
root.right.right.right = TreeNode(1)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 2
/ \ \
2 2 2
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(2)
root.left.right = TreeNode(2)
root.right = TreeNode(2)
root.right.left = TreeNode(2)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 2
/ \
2 2
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(2)
root.left.right = TreeNode(2)
root.right = TreeNode(2)
print(s.longestUnivaluePath(root))
'''
1
/ \
2 3
'''
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
print(s.longestUnivaluePath(root))
'''
4
/ \
-7 -3
/ \
-9 -3
/
-4
'''
root = TreeNode(4)
root.left = TreeNode(-7)
root.right = TreeNode(-3)
root.right.left = TreeNode(-9)
root.right.right = TreeNode(-3)
root.right.right.left = TreeNode(-4)
print(s.longestUnivaluePath(root))
| [
"[email protected]"
] | |
77acc0d3cf53b10d4d349208c468bc9079016a6e | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/sale_coupon/wizard/__init__.py | 635af11d6b33d4b83895e11bf4abe859856175f1 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import sale_coupon_apply_code
from . import sale_coupon_generate
| [
"[email protected]"
] | |
290d56dcec1a58dca055cb026dc4d25f4b012abe | 010aa27c9b532a98acba678ac61cd603b3d9fd2e | /cltk/lemmatize/latin/backoff.py | 46497c4758d48485457d7a8704249d1724d404ec | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ashwanidv100/cltk | 2e83e394139173a7024e6bb5d21e826986f9ab13 | ddfdb530aa322ac73f7ad57f725c9a80ed1b56ff | refs/heads/master | 2021-04-09T15:25:31.172534 | 2018-03-21T17:16:04 | 2018-03-21T17:16:04 | 125,635,475 | 1 | 0 | MIT | 2018-03-17T14:26:14 | 2018-03-17T14:08:35 | Python | UTF-8 | Python | false | false | 23,168 | py | """Module for lemmatizing Latin—includes several classes for different
lemmatizing approaches--based on training data, regex pattern matching,
etc. These can be chained together using the backoff parameter. Also,
includes a pre-built chain that uses models in latin_models_cltk repo
called BackoffLatinLemmatizer.
The logic behind the backoff lemmatizer is based on backoff POS-tagging in
NLTK and repurposes several of the tagging classes for lemmatization
tasks. See here for more info on sequential backoff tagging in NLTK:
http://www.nltk.org/_modules/nltk/tag/sequential.html
"""
__author__ = ['Patrick J. Burns <[email protected]>']
__license__ = 'MIT License. See LICENSE.'
import os
import re
from nltk.probability import ConditionalFreqDist
from nltk.tag.api import TaggerI
from nltk.tag.sequential import SequentialBackoffTagger, ContextTagger, DefaultTagger, NgramTagger, UnigramTagger, RegexpTagger
from cltk.utils.file_operations import open_pickle
from cltk.lemmatize.latin.latin import latin_sub_patterns, latin_verb_patterns, latin_pps, rn_patterns
# Unused for now
#def backoff_lemmatizer(train_sents, lemmatizer_classes, backoff=None):
# """From Python Text Processing with NLTK Cookbook."""
# for cls in lemmatizer_classes:
# backoff = cls(train_sents, backoff=backoff)
# return backoff
class LemmatizerI(TaggerI):
"""Inherit base tagging class for Latin lemmatizer."""
# def __init__(self):
# TaggerI.__init__(self)
pass
class SequentialBackoffLemmatizer(LemmatizerI, SequentialBackoffTagger):
""""""
def __init__(self, backoff=None):
"""Setup for SequentialBackoffLemmatizer()
:param backoff: Next lemmatizer in backoff chain.
"""
LemmatizerI.__init__(self)
SequentialBackoffTagger.__init__(self, backoff)
def lemmatize(self, tokens):
"""Transform tag method into custom method for lemmatizing tasks. Can
be overwritten by specific instances where list of tokens should
be handled in a different manner. (Cf. IdentityLemmatizer)
:param tokens: List of tokens to be lemmatized
:return: Tuple of the form (TOKEN, LEMMA)
"""
return SequentialBackoffLemmatizer.tag(self, tokens)
def choose_tag(self, tokens, index, history):
"""Override choose_tag with lemmatizer-specific method for various
methods that expect a method with this name.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: String with lemma, if found; otherwise NONE
"""
return self.choose_lemma(tokens, index, history)
class DefaultLemmatizer(SequentialBackoffLemmatizer, DefaultTagger):
""""""
def __init__(self, lemma=None):
"""Setup for DefaultLemmatizer().
:param lemma: String with default lemma to be assigned for all tokens;
set to None if no parameter is assigned.
"""
self._lemma = lemma
SequentialBackoffLemmatizer.__init__(self, None)
DefaultTagger.__init__(self, self._lemma)
def choose_lemma(self, tokens, index, history):
return DefaultTagger.choose_tag(self, tokens, index, history)
class IdentityLemmatizer(SequentialBackoffLemmatizer):
""""""
def __init__(self, backoff=None):
"""Setup for IdentityLemmatizer()."""
SequentialBackoffLemmatizer.__init__(self, backoff)
def lemmatize(self, tokens):
"""
Custom lemmatize method for working with identity. No need to
call tagger because token is return as lemma.
:param tokens: List of tokens to be lemmatized
:return: Tuple of the form (TOKEN, LEMMA)
Note: "enumerate" may be better way of handling this loop in general;
compare "range(len(tokens))" in nltk.tag.sequential.
"""
lemmas = []
for i in enumerate(tokens):
lemmas.append(i[1])
return list(zip(tokens, lemmas))
def choose_lemma(self, tokens, index, history):
"""Returns the given token as the lemma.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: String, spec. the token found at the current index.
"""
return tokens[index]
class TrainLemmatizer(SequentialBackoffLemmatizer):
"""Standalone version of 'model' function found in UnigramTagger; by
defining as its own class, it is clearer that this lemmatizer is
based on dictionary lookup and does not use training data."""
def __init__(self, model, backoff=None):
"""Setup for TrainLemmatizer().
:param model: Dictionary with form {TOKEN: LEMMA}
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
self.model = model
def choose_lemma(self, tokens, index, history):
"""Returns the given token as the lemma.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized; NOT USED
:return: String, spec. the dictionary value found with token as key.
"""
keys = self.model.keys()
if tokens[index] in keys:
return self.model[tokens[index]]
class ContextLemmatizer(SequentialBackoffLemmatizer, ContextTagger):
""""""
def __init__(self, context_to_lemmatize, backoff=None):
"""Setup for ContextLemmatizer().
:param context_to_lemmatize: List of tuples of the form (TOKEN, LEMMA);
this should be 'gold standard' data that can be used to train on a
given context, e.g. unigrams, bigrams, etc.
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
self._context_to_lemmatize = (context_to_lemmatize if context_to_lemmatize else {})
ContextTagger.__init__(self, self._context_to_lemmatize, backoff)
def choose_lemma(self, tokens, index, history):
return ContextTagger.choose_tag(self, tokens, index, history)
class NgramLemmatizer(ContextLemmatizer, NgramTagger):
""""""
def __init__(self, n, train=None, model=None, backoff=None, cutoff=0):
"""Setup for NgramLemmatizer()
:param n: Int with length of 'n'-gram
:param train: List of tuples of the form (TOKEN, LEMMA)
:param model: Dict; DEPRECATED, use TrainLemmatizer
:param backoff: Next lemmatizer in backoff chain.
:param cutoff: Int with minimum number of matches to choose lemma
"""
self._n = n
self._check_params(train, model)
ContextLemmatizer.__init__(self, model, backoff)
NgramTagger.__init__(self, self._n, train, model, backoff, cutoff)
if train:
# Refactor to remove model? Always train?
self._train(train, cutoff)
def context(self, tokens, index, history):
""""""
return NgramTagger.context(self, tokens, index, history)
class UnigramLemmatizer(NgramLemmatizer, UnigramTagger):
"""Setup for UnigramLemmatizer()"""
def __init__(self, train=None, model=None, backoff=None, cutoff=0):
""""""
NgramLemmatizer.__init__(self, 1, train, model, backoff, cutoff) # Note 1 for unigram
UnigramTagger.__init__(self, train, model, backoff, cutoff)
class RegexpLemmatizer(SequentialBackoffLemmatizer, RegexpTagger):
""""""
def __init__(self, regexps=None, backoff=None):
"""Setup for RegexpLemmatizer()
:param regexps: List of tuples of form (PATTERN, REPLACEMENT)
:param backoff: Next lemmatizer in backoff chain.
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
RegexpTagger.__init__(self, regexps, backoff)
self._regexs = regexps
def choose_lemma(self, tokens, index, history):
"""Use regular expressions for rules-based lemmatizing based on word endings;
tokens are matched for patterns with the base kept as a group; an word ending
replacement is added to the (base) group.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with concatenated lemma
"""
for pattern, replace in self._regexs:
if re.search(pattern, tokens[index]):
return re.sub(pattern, replace, tokens[index])
break # pragma: no cover
class PPLemmatizer(RegexpLemmatizer):
"""Customization of the RegexpLemmatizer for Latin. The RegexpLemmatizer is
used as a stemmer; the stem is then applied to a dictionary lookup of
principal parts."""
def __init__(self, regexps=None, pps=None, backoff=None):
"""Setup PPLemmatizer().
:param regexps: List of tuples of form (PATTERN, INT) where INT is
the principal part number needed to lookup the correct stem.
:param backoff: Next lemmatizer in backoff chain.
"""
RegexpLemmatizer.__init__(self, regexps, backoff)
# Note different compile to make use of principal parts dictionary structure; also, note
# that the PP dictionary has been set up so that principal parts match their traditional
# numbering, i.e. present stem is indexed as 1. The 0 index is used for the lemma.
self._regexs = latin_verb_patterns
self.pps = latin_pps
def choose_lemma(self, tokens, index, history):
"""Use regular expressions for rules-based lemmatizing based on
principal parts stems. Tokens are matched for patterns with
the ending kept as a group; the stem is looked up in a dictionary
by PP number (see above) and ending is discarded.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with index[0] from the dictionary value, see above about '0 index'
"""
for regexp in self._regexs:
m = re.match(regexp[0], tokens[index])
if m:
root = m.group(1)
match = [lemma for (lemma, pp) in self.pps.items() if root == pp[regexp[1]]]
if not match:
pass
else:
return match[0] # Lemma is indexed at zero in PP dictionary
class RomanNumeralLemmatizer(RegexpLemmatizer):
""""""
def __init__(self, regexps=rn_patterns, default=None, backoff=None):
"""RomanNumeralLemmatizer"""
RegexpLemmatizer.__init__(self, regexps, backoff)
self._regexs = [(re.compile(regexp), pattern,) for regexp, pattern in regexps]
self.default = default
def choose_lemma(self, tokens, index, history):
"""Test case for customized rules-based improvements to lemmatizer using regex; differs
from base RegexpLemmatizer in that it returns the given pattern without stemming,
concatenating, etc.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with replacement from pattern
"""
for pattern, replace in self._regexs:
if re.search(pattern, tokens[index]):
if self.default:
return self.default
else:
return replace
break # pragma: no cover
class ContextPOSLemmatizer(ContextLemmatizer):
"""Lemmatizer that combines context with POS-tagging based on
training data. Subclasses define context.
The code for _train closely follows ContextTagger in
https://github.com/nltk/nltk/blob/develop/nltk/tag/sequential.py
This lemmatizer is included here as proof of concept that
lemma disambiguation can be made based on the pattern:
LEMMA & POS of following word.
Should be rewritten to give more flexibility to the kinds
of context that a free word order language demand. I.e. to
study patterns such as:
POS of preceding word & LEMMA
LEMMA & POS of following two words
LEMMA & POS of n-skipgrams
etc.
"""
def __init__(self, context_to_lemmatize, include=None, backoff=None):
"""Setup ContextPOSLemmatizer().
:param context_to_lemmatize: List of tuples of the form (TOKEN, LEMMA);
this should be 'gold standard' data that can be used to train on a
given context, e.g. unigrams, bigrams, etc.
:param include: List of tokens to include, all other tokens return None
from choose_lemma--runs VERY SLOW if no list is given as a parameter
since every token gets POS-tagged. Only tested so far on 'cum'
--also, test data only distinguishes 'cum1'/'cum2'. Further
testing should be done with ambiguous lemmas using Morpheus numbers.
:param backoff: Next lemmatizer in backoff chain.
:param include: List of tokens to consider
"""
# SequentialBackoffLemmatizer.__init__(self, backoff)
ContextLemmatizer.__init__(self, context_to_lemmatize, backoff)
self.include = include
self._context_to_tag = (context_to_lemmatize if context_to_lemmatize else {})
def _get_pos_tags(self, tokens):
"""Iterate through list of tokens and use POS tagger to build
a corresponding list of tags.
:param tokens: List of tokens to be POS-tagged
:return: List with POS-tag for each token
"""
# Import (and define tagger) with other imports?
from cltk.tag.pos import POSTag
tagger = POSTag('latin')
tokens = " ".join(tokens)
tags = tagger.tag_ngram_123_backoff(tokens)
tags = [tag[1][0].lower() if tag[1] else tag[1] for tag in tags]
return tags
def choose_lemma(self, tokens, index, history):
"""Choose lemma based on POS-tag defined by context.
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with POS-tags of tokens that have already
been lemmatized.
:return: String with suggested lemma
"""
if self.include:
if tokens[index] not in self.include:
return None
history = self._get_pos_tags(tokens)
context = self.context(tokens, index, history)
suggested_lemma = self._context_to_tag.get(context)
return suggested_lemma
def _train(self, lemma_pos_corpus, cutoff=0):
"""Override method for _train from ContextTagger in
nltk.tag.sequential. Original _train method expects
tagged corpus of form (TOKEN, LEMMA); this expects in
addition POS-tagging information.
:param lemma_pos_corpus: List of tuples of form (TOKEN, LEMMA, POSTAG)
:param cutoff: Int with minimum number of matches to choose lemma
"""
token_count = hit_count = 0
# A context is considered 'useful' if it's not already lemmatized
# perfectly by the backoff lemmatizer.
useful_contexts = set()
# Count how many times each tag occurs in each context.
fd = ConditionalFreqDist()
for sentence in lemma_pos_corpus:
tokens, lemmas, poss = zip(*sentence)
for index, (token, lemma, pos) in enumerate(sentence):
# Record the event.
token_count += 1
context = self.context(tokens, index, poss)
if context is None: continue
fd[context][lemma] += 1
# If the backoff got it wrong, this context is useful:
if (self.backoff is None or lemma != self.backoff.tag_one(tokens, index, lemmas[:index])): # pylint: disable=line-too-long
useful_contexts.add(context)
# Build the context_to_lemmatize table -- for each context, figure
# out what the most likely lemma is. Only include contexts that
# we've seen at least `cutoff` times.
for context in useful_contexts:
best_lemma = fd[context].max()
hits = fd[context][best_lemma]
if hits > cutoff:
self._context_to_tag[context] = best_lemma
hit_count += hits
class NgramPOSLemmatizer(ContextPOSLemmatizer):
""""""
def __init__(self, n, train=None, model=None, include=None,
backoff=None, cutoff=0):
"""Setup for NgramPOSLemmatizer
:param n: Int with length of 'n'-gram
:param train: List of tuples of the form (TOKEN, LEMMA, POS)
:param model: Dict; DEPRECATED
:param include: List of tokens to consider
:param backoff: Next lemmatizer in backoff chain.
:param cutoff: Int with minimum number of matches to choose lemma
"""
self._n = n
self._check_params(train, model)
ContextPOSLemmatizer.__init__(self, model, include, backoff)
if train:
self._train(train, cutoff)
def context(self, tokens, index, history):
"""Redefines context with look-ahead of length n (not look behind
as in original method).
:param tokens: List of tokens to be lemmatized
:param index: Int with current token
:param history: List with tokens that have already been
tagged/lemmatized
:return: Tuple of the form (TOKEN, (CONTEXT)); CONTEXT will
depend on ngram value, e.g. for bigram ('cum', ('n',)) but
for trigram ('cum', ('n', 'n', ))
"""
lemma_context = tuple(history[index + 1: index + self._n])
return tokens[index], lemma_context
class BigramPOSLemmatizer(NgramPOSLemmatizer):
""""""
def __init__(self, train=None, model=None, include=None,
backoff=None, cutoff=0):
"""Setup for BigramPOSLemmatizer()"""
NgramPOSLemmatizer.__init__(self, 2, train, model,
include, backoff, cutoff)
#class TrigramPOSLemmatizer(NgramPOSLemmatizer):
# """"""
# def __init__(self, train=None, model=None, include=None,
# backoff=None, cutoff=0):
# """Setup for TrigramPOSLemmatizer()"""
# NgramPOSLemmatizer.__init__(self, 3, train, model, include,
# backoff, cutoff)
class BackoffLatinLemmatizer(object):
"""Suggested backoff chain; includes at least on of each
type of major sequential backoff class from backoff.py
### Putting it all together
### BETA Version of the Backoff Lemmatizer AKA BackoffLatinLemmatizer
### For comparison, there is also a TrainLemmatizer that replicates the
### original Latin lemmatizer from cltk.stem
"""
def __init__(self, train, seed=3):
self.train = train
self.seed = seed
rel_path = os.path.join('~/cltk_data/latin/model/latin_models_cltk/lemmata/backoff')
path = os.path.expanduser(rel_path)
# Check for presence of LATIN_OLD_MODEL
file = 'latin_lemmata_cltk.pickle'
old_model_path = os.path.join(path, file)
if os.path.isfile(old_model_path):
self.LATIN_OLD_MODEL = open_pickle(old_model_path)
else:
self.LATIN_OLD_MODEL = {}
print('The file %s is not available in cltk_data' % file)
# Check for presence of LATIN_MODEL
file = 'latin_model.pickle'
model_path = os.path.join(path, file)
if os.path.isfile(model_path):
self.LATIN_MODEL = open_pickle(model_path)
else:
self.LATIN_MODEL = {}
print('The file %s is not available in cltk_data' % file)
# Check for presence of misc_patterns
self.latin_sub_patterns = latin_sub_patterns
# Check for presence of verb_patterns
self.latin_verb_patterns = latin_verb_patterns
# Check for presence of latin_pps
self.latin_pps = latin_pps
def _randomize_data(train, seed):
import random
random.seed(seed)
random.shuffle(train)
pos_train_sents = train[:4000]
lem_train_sents = [[(item[0], item[1]) for item in sent] for sent in train]
train_sents = lem_train_sents[:4000]
test_sents = lem_train_sents[4000:5000]
return pos_train_sents, train_sents, test_sents
self.pos_train_sents, self.train_sents, self.test_sents = _randomize_data(self.train, self.seed)
def _define_lemmatizer(self):
# Suggested backoff chain--should be tested for optimal order
backoff0 = None
backoff1 = IdentityLemmatizer()
backoff2 = TrainLemmatizer(model=self.LATIN_OLD_MODEL, backoff=backoff1)
backoff3 = PPLemmatizer(regexps=self.latin_verb_patterns, pps=self.latin_pps, backoff=backoff2)
backoff4 = RegexpLemmatizer(self.latin_sub_patterns, backoff=backoff3)
backoff5 = UnigramLemmatizer(self.train_sents, backoff=backoff4)
backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
#backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
#lemmatizer = backoff7
lemmatizer = backoff6
return lemmatizer
def lemmatize(self, tokens):
lemmatizer = self._define_lemmatizer()
lemmas = lemmatizer.lemmatize(tokens)
return lemmas
def evaluate(self):
lemmatizer = self._define_lemmatizer()
return lemmatizer.evaluate(self.test_sents)
# Accuracty test available below——keep? delete?
#if __name__ == "__main__":
#
# # Set up training sentences
# rel_path = os.path.join('~/cltk_data/latin/model/latin_models_cltk/lemmata/backoff')
# path = os.path.expanduser(rel_path)
#
# # Check for presence of latin_pos_lemmatized_sents
# file = 'latin_pos_lemmatized_sents.pickle'
#
# latin_pos_lemmatized_sents_path = os.path.join(path, file)
# if os.path.isfile(latin_pos_lemmatized_sents_path):
# latin_pos_lemmatized_sents = open_pickle(latin_pos_lemmatized_sents_path)
# else:
# latin_pos_lemmatized_sents = []
# print('The file %s is not available in cltk_data' % file)
#
#
# RUN = 10
# ACCURACIES = []
#
# for I in range(RUN):
# LEMMATIZER = BackoffLatinLemmatizer(latin_pos_lemmatized_sents)
# ACC = LEMMATIZER.evaluate()
# ACCURACIES.append(ACC)
# print('{:.2%}'.format(ACC))
#
# print('\nTOTAL (Run %d) times' % RUN)
# print('{:.2%}'.format(sum(ACCURACIES) / RUN))
| [
"[email protected]"
] | |
efefb440146c23d804a17792e39d091c1a94ae26 | 06476bc4cb7fc3ce378beb357fac7d5aacb87b3b | /Prototype/env/lib/python3.8/site-packages/Xlib/xobject/icccm.py | a328925ed9e58bccae33040d004ca1fabba4d98d | [
"MIT"
] | permissive | marc-ortuno/VOPEC | 44d3a74d3e0686474dd57fcb21e845fd5fd48897 | e7ed1f13cc1868a824f4036dd08ec6bed4266c08 | refs/heads/main | 2023-06-12T19:15:18.060897 | 2021-07-01T17:15:03 | 2021-07-01T17:15:03 | 344,433,646 | 0 | 0 | MIT | 2021-06-14T19:15:47 | 2021-03-04T10:22:05 | Python | UTF-8 | Python | false | false | 3,441 | py | # Xlib.xobject.icccm -- ICCCM structures
#
# Copyright (C) 2000 Peter Liljenberg <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from Xlib import X, Xutil
from Xlib.protocol import rq
Aspect = rq.Struct( rq.Int32('num'), rq.Int32('denum') )
WMNormalHints = rq.Struct( rq.Card32('flags'),
rq.Pad(16),
rq.Int32('min_width', default = 0),
rq.Int32('min_height', default = 0),
rq.Int32('max_width', default = 0),
rq.Int32('max_height', default = 0),
rq.Int32('width_inc', default = 0),
rq.Int32('height_inc', default = 0),
rq.Object('min_aspect', Aspect, default = (0, 0)),
rq.Object('max_aspect', Aspect, default = (0, 0)),
rq.Int32('base_width', default = 0),
rq.Int32('base_height', default = 0),
rq.Int32('win_gravity', default = 0),
)
WMHints = rq.Struct( rq.Card32('flags'),
rq.Card32('input', default = 0),
rq.Set('initial_state', 4,
# withdrawn is totally bogus according to
# ICCCM, but some window managers seem to
# use this value to identify dockapps.
# Oh well.
( Xutil.WithdrawnState,
Xutil.NormalState,
Xutil.IconicState ),
default = Xutil.NormalState),
rq.Pixmap('icon_pixmap', default = 0),
rq.Window('icon_window', default = 0),
rq.Int32('icon_x', default = 0),
rq.Int32('icon_y', default = 0),
rq.Pixmap('icon_mask', default = 0),
rq.Window('window_group', default = 0),
)
WMState = rq.Struct( rq.Set('state', 4,
( Xutil.WithdrawnState,
Xutil.NormalState,
Xutil.IconicState )),
rq.Window('icon', ( X.NONE, )),
)
WMIconSize = rq.Struct( rq.Card32('min_width'),
rq.Card32('min_height'),
rq.Card32('max_width'),
rq.Card32('max_height'),
rq.Card32('width_inc'),
rq.Card32('height_inc'),
)
| [
"[email protected]"
] | |
a1ad11fe81cbafd2634f7e88da34d940617525ed | 0728a2e165808cfe5651693a6e7f47804bfb085f | /get/2013/site/getmyad/tests/functional/test_private.py | bb8b9931e924edaa1d8c9b4539e9f53d599872dc | [] | no_license | testTemtProj/OLD_PROJECT | 5b026e072017f5135159b0940370fda860241d39 | 9e5b165f4e8acf9003536e05dcefd33a5ae46890 | refs/heads/master | 2020-05-18T15:30:24.543319 | 2013-07-23T15:17:32 | 2013-07-23T15:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from getmyad.tests import *
class TestPrivateController(TestController):
def test_index(self):
response = self.app.get(url(controller='private', action='index'))
# Test response...
| [
"[email protected]"
] | |
bb0ce9645fde1dd12f1cdcbc2c425aca062c074a | 981fcfe446a0289752790fd0c5be24020cbaee07 | /python2_Grammer/src/basic/zhengze/rool/字符集和数量/字符集/5_单个字符.py | a5c6ad067a51536c1c61d9cec1f9965226efcb1d | [] | no_license | lijianbo0130/My_Python | 7ba45a631049f6defec3977e680cd9bd75d138d1 | 8bd7548c97d2e6d2982070e949f1433232db9e07 | refs/heads/master | 2020-12-24T18:42:19.103529 | 2016-05-30T03:03:34 | 2016-05-30T03:03:34 | 58,097,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | #coding=utf-8
'''
Created on 2015年8月4日
@author: Administrator
'''
from __future__ import division
import sys
reload(sys)
sys.setdefaultencoding('utf-8') # @UndefinedVariable
import re
# 单个字母 \w [A-Za-z0-9_] 包含 '_'
# 非单词字符 \W
lis=re.findall("\w", "_ppa")#\w 包含_
print lis # ['_', 'p', 'p', 'a'] | [
"[email protected]"
] | |
f42bc817dcd318885005c9843c46c7c2fbb6a3a8 | 83934c40b2bd835464732345fa516b2c657a6259 | /Pyrado/scripts/training/qq-su_bayrn_power_sim2sim.py | bb7f330fbe57985f6ca1ae10001237a0591dbaea | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | 1abner1/SimuRLacra | e0427bf4f2459dcb992206d3b2f347beab68a5b4 | d7e9cd191ccb318d5f1e580babc2fc38b5b3675a | refs/heads/master | 2023-05-25T04:52:17.917649 | 2021-06-07T07:26:44 | 2021-06-07T07:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,840 | py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Train an agent to solve the Qube swing-up task using Bayesian Domain Randomization.
"""
import numpy as np
import pyrado
from pyrado.algorithms.episodic.power import PoWER
from pyrado.algorithms.meta.bayrn import BayRn
from pyrado.domain_randomization.default_randomizers import (
create_default_domain_param_map_qq,
create_zero_var_randomizer,
)
from pyrado.domain_randomization.utils import wrap_like_other_env
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive, MetaDomainRandWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.logger.experiment import save_dicts_to_yaml, setup_experiment
from pyrado.policies.special.environment_specific import QQubeSwingUpAndBalanceCtrl
from pyrado.spaces import BoxSpace
from pyrado.utils.argparser import get_argparser
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment (set seed before creating the modules)
ex_dir = setup_experiment(
QQubeSwingUpSim.name,
f"{BayRn.name}-{PoWER.name}_{QQubeSwingUpAndBalanceCtrl.name}",
f"sim2sim_rand-Mp-Mr_seed-{args.seed}",
)
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environments
env_sim_hparams = dict(dt=1 / 100.0, max_steps=600)
env_sim = QQubeSwingUpSim(**env_sim_hparams)
env_sim = DomainRandWrapperLive(env_sim, create_zero_var_randomizer(env_sim))
dp_map = create_default_domain_param_map_qq()
env_sim = MetaDomainRandWrapper(env_sim, dp_map)
env_real = QQubeSwingUpSim(**env_sim_hparams)
env_real.domain_param = dict(
Mp=0.024 * 1.1,
Mr=0.095 * 1.1,
)
env_real_hparams = env_sim_hparams
env_real = wrap_like_other_env(env_real, env_sim)
# PoWER and energy-based controller setup
policy_hparam = dict(energy_gain=0.587, ref_energy=0.827, acc_max=10.0)
policy = QQubeSwingUpAndBalanceCtrl(env_sim.spec, **policy_hparam)
subrtn_hparam = dict(
max_iter=5,
pop_size=50,
num_init_states_per_domain=4,
num_domains=10,
num_is_samples=5,
expl_std_init=2.0,
expl_std_min=0.02,
symm_sampling=False,
num_workers=12,
)
subrtn = PoWER(ex_dir, env_sim, policy, **subrtn_hparam)
# PoWER and linear policy setup
# policy_hparam = dict(
# feats=FeatureStack(identity_feat, sign_feat, abs_feat, squared_feat,
# MultFeat((2, 5)), MultFeat((3, 5)), MultFeat((4, 5)))
# )
# policy = LinearPolicy(spec=env_sim.spec, **policy_hparam)
# subrtn_hparam = dict(
# max_iter=20,
# pop_size=200,
# num_init_states_per_domain=6,
# num_is_samples=10,
# expl_std_init=2.0,
# expl_std_min=0.02,
# symm_sampling=False,
# num_workers=32,
# )
# subrtn = PoWER(ex_dir, env_sim, policy, **subrtn_hparam)
# Set the boundaries for the GP
dp_nom = QQubeSwingUpSim.get_nominal_domain_param()
ddp_space = BoxSpace(
bound_lo=np.array([0.8 * dp_nom["Mp"], 1e-8, 0.8 * dp_nom["Mr"], 1e-8]),
bound_up=np.array([1.2 * dp_nom["Mp"], 1e-7, 1.2 * dp_nom["Mr"], 1e-7]),
)
# Algorithm
bayrn_hparam = dict(
max_iter=15,
acq_fc="UCB",
acq_param=dict(beta=0.25),
acq_restarts=500,
acq_samples=1000,
num_init_cand=4,
warmstart=False,
num_eval_rollouts_real=100,
thold_succ_subrtn=300,
)
# Save the environments and the hyper-parameters (do it before the init routine of BayRn)
save_dicts_to_yaml(
dict(env_sim=env_sim_hparams, env_real=env_real_hparams, seed=args.seed),
dict(policy=policy_hparam),
dict(subrtn=subrtn_hparam, subrtn_name=PoWER.name),
dict(algo=bayrn_hparam, algo_name=BayRn.name, dp_map=dp_map),
save_dir=ex_dir,
)
algo = BayRn(ex_dir, env_sim, env_real, subrtn, ddp_space, **bayrn_hparam)
# Jeeeha
algo.train(snapshot_mode="latest", seed=args.seed)
| [
"[email protected]"
] | |
04f3348dcba79ceb132538619203da84de297413 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-maxExclusive-3-1.py | 2e778fde1dbab8cd334f90bfc4b9a342ede77976 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 260 | py | from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_max_exclusive_3_xsd.nistschema_sv_iv_atomic_byte_max_exclusive_3 import NistschemaSvIvAtomicByteMaxExclusive3
obj = NistschemaSvIvAtomicByteMaxExclusive3(
value=-128
)
| [
"[email protected]"
] | |
849dc7bec027beb9388173ab0f4d7875af17de51 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /src/tests/python-in/testmodule_pynsource.py | 735e0b98628d7ea19d2e62d5bcea3176a8213c7a | [] | no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 3,827 | py | # pynsource command line tool
import os
# from core_parser import *
from generate_code.gen_asciiart import CmdLinePythonToAsciiArt
from generate_code.gen_yuml import CmdLinePythonToYuml
from generate_code.gen_delphi import CmdLinePythonToDelphi
from generate_code.gen_java import CmdLinePythonToJava
import messages
def test():
# FILE = "..\\tests\\python-in\\testmodule01.py"
FILE = "..\\tests\\python-in\\testmodule66.py"
# p = PySourceAsText()
p = PySourceAsYuml()
# p.optionModuleAsClass = True
p.Parse(FILE)
# print '*'*20, 'parsing', FILE, '*'*20
print(p)
# print 'Done.'
def ParseArgsAndRun():
import sys, glob
import getopt # good doco http://www.doughellmann.com/PyMOTW/getopt/
# should possibly upgrade to using http://docs.python.org/library/argparse.html#module-argparse
SIMPLE = 0
globbed = []
optionVerbose = 0
optionModuleAsClass = 0
optionExportToJava = 0
optionExportToDelphi = 0
optionExportToYuml = False
optionExportTo_outdir = ""
if SIMPLE:
params = sys.argv[1]
globbed = glob.glob(params)
else:
listofoptionvaluepairs, params = getopt.getopt(sys.argv[1:], "amvy:j:d:")
# print listofoptionvaluepairs, params
# print dict(listofoptionvaluepairs) # turn e.g. [('-v', ''), ('-y', 'fred.png')] into nicer? dict e.g. {'-v': '', '-y': 'fred.png'}
def EnsurePathExists(outdir, outlanguagemsg):
assert outdir, "Need to specify output folder for %s output - got %s." % (
outlanguagemsg,
outdir,
)
if not os.path.exists(outdir):
raise RuntimeError(
"Output directory %s for %s file output does not exist."
% (outdir, outlanguagemsg)
)
for optionvaluepair in listofoptionvaluepairs:
if "-a" == optionvaluepair[0]:
pass # default is asciart, so don't need to specify
if "-m" == optionvaluepair[0]:
optionModuleAsClass = 1
if "-v" == optionvaluepair[0]:
optionVerbose = 1
if optionvaluepair[0] in ("-j", "-d"):
if optionvaluepair[0] == "-j":
optionExportToJava = 1
language = "Java"
else:
optionExportToDelphi = 1
language = "Delphi"
optionExportTo_outdir = optionvaluepair[1]
EnsurePathExists(optionExportTo_outdir, language)
if optionvaluepair[0] in ("-y"):
optionExportToYuml = True
optionExportTo_outpng = optionvaluepair[1]
for param in params:
files = glob.glob(param)
globbed += files
if globbed:
if optionExportToJava or optionExportToDelphi:
if optionExportToJava:
u = CmdLinePythonToJava(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
else:
u = CmdLinePythonToDelphi(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(optionExportTo_outdir)
elif optionExportToYuml:
u = CmdLinePythonToYuml(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(optionExportTo_outpng)
else:
u = CmdLinePythonToAsciiArt(
globbed, treatmoduleasclass=optionModuleAsClass, verbose=optionVerbose
)
u.ExportTo(None)
else:
print(messages.HELP_COMMAND_LINE_USAGE)
if __name__ == "__main__":
# test()
# exit(0)
ParseArgsAndRun()
| [
"[email protected]"
] | |
736177be6e62fa382ac47be5d33fbdc6148042ad | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/brkluk001/question2.py | 3b83362c9673632335a5e01fda0b228f12c0c017 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | validity = 'invalid.'
hours = eval(input('Enter the hours:\n'))
minutes = eval(input('Enter the minutes:\n'))
seconds = eval(input('Enter the seconds:\n'))
if 0 <= hours <= 23:
if 0 <= minutes <= 60:
if 0 <= seconds <= 60:
validity = 'valid.'
print('Your time is',validity)
| [
"[email protected]"
] | |
528771585ec232b3bbad088c1ceb97469132ec70 | baaeb8c1d335e258fd49b5ef024ac39790fd660f | /backend/test/test_invite.py | 8701475410a01fb8aadf206d8a29c2720cb644a6 | [] | no_license | ReactARDev/React_Redux_Python | f0b80a9d2a603b38f8e144966bc899c5aa3690e6 | afdb4a55f82fdff86686ad955448a4168d05c739 | refs/heads/master | 2021-10-10T19:28:05.142652 | 2019-01-15T21:24:06 | 2019-01-15T21:24:06 | 159,198,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,757 | py | import json
import test_app
import factories
import pprint
from app import db_session_users
from schemas.base_users import User, UserAgency, UserFollowedEntity, MarketingCampaign, UserTopic, Subscription, UserFolder, AggregatedAnnotations
num_of_default_agencies_at_signup = 5
class RegisterTest(test_app.AppTest):
def test_invite(self):
emails = ['[email protected]', '[email protected]']
for i, email in enumerate(emails):
num_users = test_app.db_session_users.query(test_app.base_users.User)\
.filter_by(email=email).count()
self.assertEqual(0, num_users)
# N.B. upper case the second example email in the initial invite request to simulate a scenario
# where the user first sent it to us upper cased. the value remains otherwise lower case, so validation
# below should all still work
req_body = json.dumps({'email': email.upper() if i == 1 else email})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertFalse(new_user.enabled)
reset_token = new_user.reset_token
self.assertIsNotNone(reset_token)
# don't allow a second submission
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert400(resp)
# fails for non-admin user
resp = self.client.post(
"/invite",
headers={'Authorization': self.token},
data=req_body
)
self.assert403(resp)
# ...unless resend is true
req_body = json.dumps({'email': email, 'resend': True})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
self.assertIn('resent_invite_time', new_user.properties)
self.assertNotEqual(reset_token, new_user.reset_token)
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": email,
"token": new_user.reset_token,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
db_session_users.refresh(new_user)
self.assertIsInstance(new_user.properties['activation_time'], unicode)
self.assertTrue(new_user.enabled)
def test_activation(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.reset_token = 'foo'
orig_props = { 'property': 'exists', 'arrayprop': [1,2,3,4]}
user.properties = orig_props
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
initial_hash = user.password_hash
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": user.email,
"token": "foo",
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"other_topics": "Something else",
'is_contributor': True
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=user.email).first()
self.assertIsNone(new_user.reset_token)
self.assertNotEqual(initial_hash, new_user.password_hash)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertTrue(new_user.enabled)
self.assertDictContainsSubset(orig_props, new_user.properties)
self.assertTrue('contributor' in new_user.roles)
subscription = db_session_users.query(Subscription).filter_by(user_id=user.id).first()
self.assertEqual('free_trial', subscription.stripe_id)
self.assertEqual(True, subscription.latest)
self.assertEqual('active', subscription.status)
folders = db_session_users.query(UserFolder).filter_by(user_id=user.id).all()
bookmarked = filter(lambda folder : folder.name == 'Bookmarked', folders)
read = filter(lambda folder : folder.name == 'Read', folders)
self.assertIsInstance(folders, list)
self.assertEqual(len(folders), 2)
self.assertEqual(len(bookmarked), 1)
self.assertEqual(len(read), 1)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=user.id).count()
self.assertEqual(len(AggregatedAnnotations.topic_id_name_mapping.keys()), num_user_topics)
# dry run should now fail
req_body = json.dumps({
'email': user.email,
'token': 'does not matter',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
self.assertRegexpMatches(resp.json['error'], r'enabled')
def test_activation_with_edu_email(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.email = '[email protected]'
user.reset_token = 'foo'
orig_props = { 'property': 'exists', 'arrayprop': [1,2,3,4]}
user.properties = orig_props
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
initial_hash = user.password_hash
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": user.email,
"token": "foo",
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"other_topics": "Something else",
'is_contributor': True
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
subscription = db_session_users.query(Subscription).filter_by(user_id=user.id).first()
self.assertEqual('free_trial_120months', subscription.stripe_id)
self.assertEqual(True, subscription.latest)
self.assertEqual('active', subscription.status)
def test_activation_dry_run(self):
user = factories.UserFactory.build(
first_name=None,
last_name=None,
)
user.reset_token = 'bar'
user.enabled = False
db_session_users.add(user)
db_session_users.flush()
db_session_users.refresh(user)
# try with a valid email/token first
req_body = json.dumps({
'email': user.email,
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertFalse(resp.json['marketing_campaign'])
# invalid token
req_body = json.dumps({
'email': user.email,
'token': 'baz',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# invalid email
req_body = json.dumps({
'email': '[email protected]',
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# missing email
req_body = json.dumps({
'email': None,
'token': 'bar',
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
def test_activation_marketing_campaign(self):
marketing_campaign = MarketingCampaign(name='foo', start_date="01/01/2017", end_date="01/05/2017", notes='bar', created_by_user_id=self.user.id)
marketing_campaign.gen_token()
db_session_users.add(marketing_campaign)
db_session_users.commit()
token = marketing_campaign.token
# try with a valid email/token first
req_body = json.dumps({
'token': token,
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertTrue(resp.json['marketing_campaign'])
signup_email = "[email protected]"
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": signup_email,
"token": token,
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertIsInstance(resp.json['jwt_token'], unicode)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 1)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": signup_email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": signup_email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
def test_activation_no_token(self):
# try with a valid email/token first
req_body = json.dumps({
'dry_run': True,
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertFalse(resp.json['marketing_campaign'])
signup_email = "[email protected]"
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": signup_email,
"new_password": "somethingnew",
"agencies": [80, 188, 78987958795], # one invalid id
# XXX these aren't really state agencies because they're not in the fixture:
"state_agencies": ["US-CA", "US-NY"],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
self.assertIsInstance(resp.json['jwt_token'], unicode)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 0)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": signup_email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": signup_email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=signup_email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
resp = self.client.post('/confirm', data=req_body)
self.assert400(resp)
def test_invite_mixed(self):
emails = ['[email protected]', '[email protected]']
for i, email in enumerate(emails):
num_users = test_app.db_session_users.query(test_app.base_users.User)\
.filter_by(email=email).count()
self.assertEqual(0, num_users)
# N.B. upper case the second example email in the initial invite request to simulate a scenario
# where the user first sent it to us upper cased. the value remains otherwise lower case, so validation
# below should all still work
req_body = json.dumps({'email': email.upper() if i == 1 else email})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertFalse(new_user.enabled)
reset_token = new_user.reset_token
self.assertIsNotNone(reset_token)
# don't allow a second submission
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert400(resp)
# fails for non-admin user
resp = self.client.post(
"/invite",
headers={'Authorization': self.token},
data=req_body
)
self.assert403(resp)
# ...unless resend is true
req_body = json.dumps({'email': email, 'resend': True})
resp = self.client.post(
"/invite",
headers={'Authorization': self.admin_user_token},
data=req_body
)
self.assert200(resp)
self.assertNotEqual(reset_token, new_user.reset_token)
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": email,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"other_state_agencies": "California dreams",
"topics": [1, 2, 3],
"other_topics": "Something else",
"state_agencies": ["US-CA", "US-NY"],
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
db_session_users.refresh(new_user)
self.assertIsInstance(new_user.properties['activation_time'], unicode)
self.assertFalse(new_user.enabled)
self.assertIsInstance(resp.json['jwt_token'], unicode)
self.assertIsInstance(new_user.reset_token, unicode)
self.assertEqual('First', new_user.first_name)
self.assertEqual('Last', new_user.last_name)
self.assertFalse(new_user.enabled)
self.assertIsInstance(new_user.password_hash, unicode)
self.assertEqual(len(new_user.marketing_campaigns), 0)
for p in ['other_topics', 'other_agencies', 'other_state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), unicode)
for p in ['agencies', 'state_agencies']:
self.assertIn(p, new_user.properties)
self.assertIsInstance(new_user.properties.get(p), list)
num_user_agencies = db_session_users.query(UserAgency).filter_by(user_id=new_user.id).count()
self.assertEqual(num_of_default_agencies_at_signup, num_user_agencies) # should not include invalid selection
num_user_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id).count()
self.assertEqual(4, num_user_entities)
num_news_entities = db_session_users.query(UserFollowedEntity).filter_by(user_id=new_user.id, entity_type='news_sources').count()
self.assertEqual(2, num_news_entities)
num_user_topics = db_session_users.query(UserTopic).filter_by(user_id=new_user.id).count()
self.assertEqual(3, num_user_topics)
# validate access works with temporary token
access_resp = self.client.get("/current_user", headers={'Authorization': resp.json['jwt_token']})
self.assert200(access_resp)
# run an extra api call that should fail on /activate with this email to confirm the token is not overwritten
req_body = json.dumps({
"email": email,
"new_password": "foo"
})
resp = self.client.post('/activate', data=req_body)
self.assert400(resp)
# finally, use the confirm route to enable the user
req_body = json.dumps({
"email": email,
"token": new_user.reset_token
})
resp = self.client.post('/confirm', data=req_body)
self.assert200(resp)
new_user = db_session_users.query(User).filter_by(email=email).first()
self.assertTrue(new_user.enabled)
self.assertIn('confirmed_date', new_user.properties)
resp = self.client.post('/confirm', data=req_body)
self.assert400(resp)
def test_check_email(self):
resp = self.client.get("/[email protected]")
self.assert200(resp)
self.assertIn('email_in_use', resp.json)
self.assertIsInstance(resp.json['email_in_use'], bool)
def test_resend_confirmation_email(self):
# first create a user that has signed up (not invited) and requires a confirmation
req_body = json.dumps({
"first_name": "First",
"last_name": "Last",
"email": '[email protected]',
"token": None,
"new_password": "somethingnew",
"agencies": [80, 188],
"other_agencies": "Something you didn't think of",
"topics": [1, 2, 3],
"other_topics": "Something else",
})
resp = self.client.post('/activate', data=req_body)
self.assert200(resp)
user = db_session_users.query(User).filter_by(email='[email protected]').first()
db_session_users.refresh(user)
# Now that the user is created lets resend them a confirmation email
req_body = json.dumps({'email': user.email })
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert200(resp)
self.assertIn('confirmation_resent_time', user.properties)
# Now lets test if we get the error we expect
req_body = json.dumps({})
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert400(resp)
#now lets send a false email
req_body = json.dumps({'email': '[email protected]'})
resp = self.client.post(
"/send_confirm_email",
headers={'Authorization': self.token},
data=req_body
)
self.assert400(resp)
| [
"[email protected]"
] | |
1484c55af6358e41228214378c276a467a0cf6f7 | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkLiThresholdCalculatorPython.py | 556f811f79e55e27f6ef3e8cafcd931ef76386cb | [] | no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,840 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkLiThresholdCalculatorPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkLiThresholdCalculatorPython', [dirname(__file__)])
except ImportError:
import _itkLiThresholdCalculatorPython
return _itkLiThresholdCalculatorPython
if fp is not None:
try:
_mod = imp.load_module('_itkLiThresholdCalculatorPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkLiThresholdCalculatorPython = swig_import_helper()
del swig_import_helper
else:
import _itkLiThresholdCalculatorPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkHistogramThresholdCalculatorPython
import itkHistogramPython
import itkArrayPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import pyBasePython
import itkSamplePython
import itkVectorPython
import vnl_vector_refPython
import itkFixedArrayPython
import ITKCommonBasePython
import itkSimpleDataObjectDecoratorPython
import itkRGBAPixelPython
import itkCovariantVectorPython
import itkRGBPixelPython
def itkLiThresholdCalculatorHFF_New():
return itkLiThresholdCalculatorHFF.New()
def itkLiThresholdCalculatorHDF_New():
return itkLiThresholdCalculatorHDF.New()
def itkLiThresholdCalculatorHFUS_New():
return itkLiThresholdCalculatorHFUS.New()
def itkLiThresholdCalculatorHDUS_New():
return itkLiThresholdCalculatorHDUS.New()
def itkLiThresholdCalculatorHFUC_New():
return itkLiThresholdCalculatorHFUC.New()
def itkLiThresholdCalculatorHDUC_New():
return itkLiThresholdCalculatorHDUC.New()
def itkLiThresholdCalculatorHFSS_New():
return itkLiThresholdCalculatorHFSS.New()
def itkLiThresholdCalculatorHDSS_New():
return itkLiThresholdCalculatorHDSS.New()
class itkLiThresholdCalculatorHDF(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDF):
"""Proxy of C++ itkLiThresholdCalculatorHDF class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDF_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDF_Pointer":
"""Clone(itkLiThresholdCalculatorHDF self) -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDF
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDF *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDF
Create a new object of the class itkLiThresholdCalculatorHDF and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDF.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDF.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDF.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDF.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_Clone, None, itkLiThresholdCalculatorHDF)
itkLiThresholdCalculatorHDF_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_swigregister
itkLiThresholdCalculatorHDF_swigregister(itkLiThresholdCalculatorHDF)
def itkLiThresholdCalculatorHDF___New_orig__() -> "itkLiThresholdCalculatorHDF_Pointer":
"""itkLiThresholdCalculatorHDF___New_orig__() -> itkLiThresholdCalculatorHDF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF___New_orig__()
def itkLiThresholdCalculatorHDF_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDF *":
"""itkLiThresholdCalculatorHDF_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDF_cast(obj)
class itkLiThresholdCalculatorHDSS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDSS):
"""Proxy of C++ itkLiThresholdCalculatorHDSS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDSS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDSS_Pointer":
"""Clone(itkLiThresholdCalculatorHDSS self) -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDSS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDSS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDSS
Create a new object of the class itkLiThresholdCalculatorHDSS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDSS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDSS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDSS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDSS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_Clone, None, itkLiThresholdCalculatorHDSS)
itkLiThresholdCalculatorHDSS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_swigregister
itkLiThresholdCalculatorHDSS_swigregister(itkLiThresholdCalculatorHDSS)
def itkLiThresholdCalculatorHDSS___New_orig__() -> "itkLiThresholdCalculatorHDSS_Pointer":
"""itkLiThresholdCalculatorHDSS___New_orig__() -> itkLiThresholdCalculatorHDSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS___New_orig__()
def itkLiThresholdCalculatorHDSS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDSS *":
"""itkLiThresholdCalculatorHDSS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDSS_cast(obj)
class itkLiThresholdCalculatorHDUC(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUC):
"""Proxy of C++ itkLiThresholdCalculatorHDUC class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDUC_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDUC_Pointer":
"""Clone(itkLiThresholdCalculatorHDUC self) -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDUC
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUC *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDUC
Create a new object of the class itkLiThresholdCalculatorHDUC and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDUC.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDUC.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDUC.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDUC.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_Clone, None, itkLiThresholdCalculatorHDUC)
itkLiThresholdCalculatorHDUC_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_swigregister
itkLiThresholdCalculatorHDUC_swigregister(itkLiThresholdCalculatorHDUC)
def itkLiThresholdCalculatorHDUC___New_orig__() -> "itkLiThresholdCalculatorHDUC_Pointer":
"""itkLiThresholdCalculatorHDUC___New_orig__() -> itkLiThresholdCalculatorHDUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC___New_orig__()
def itkLiThresholdCalculatorHDUC_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUC *":
"""itkLiThresholdCalculatorHDUC_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUC_cast(obj)
class itkLiThresholdCalculatorHDUS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDUS):
"""Proxy of C++ itkLiThresholdCalculatorHDUS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHDUS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHDUS_Pointer":
"""Clone(itkLiThresholdCalculatorHDUS self) -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHDUS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHDUS
Create a new object of the class itkLiThresholdCalculatorHDUS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHDUS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHDUS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHDUS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHDUS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_Clone, None, itkLiThresholdCalculatorHDUS)
itkLiThresholdCalculatorHDUS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_swigregister
itkLiThresholdCalculatorHDUS_swigregister(itkLiThresholdCalculatorHDUS)
def itkLiThresholdCalculatorHDUS___New_orig__() -> "itkLiThresholdCalculatorHDUS_Pointer":
"""itkLiThresholdCalculatorHDUS___New_orig__() -> itkLiThresholdCalculatorHDUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS___New_orig__()
def itkLiThresholdCalculatorHDUS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHDUS *":
"""itkLiThresholdCalculatorHDUS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHDUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHDUS_cast(obj)
class itkLiThresholdCalculatorHFF(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF):
"""Proxy of C++ itkLiThresholdCalculatorHFF class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFF_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFF_Pointer":
"""Clone(itkLiThresholdCalculatorHFF self) -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFF
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFF *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFF
Create a new object of the class itkLiThresholdCalculatorHFF and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFF.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFF.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFF.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFF.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_Clone, None, itkLiThresholdCalculatorHFF)
itkLiThresholdCalculatorHFF_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_swigregister
itkLiThresholdCalculatorHFF_swigregister(itkLiThresholdCalculatorHFF)
def itkLiThresholdCalculatorHFF___New_orig__() -> "itkLiThresholdCalculatorHFF_Pointer":
"""itkLiThresholdCalculatorHFF___New_orig__() -> itkLiThresholdCalculatorHFF_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF___New_orig__()
def itkLiThresholdCalculatorHFF_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFF *":
"""itkLiThresholdCalculatorHFF_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFF"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFF_cast(obj)
class itkLiThresholdCalculatorHFSS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFSS):
"""Proxy of C++ itkLiThresholdCalculatorHFSS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFSS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFSS_Pointer":
"""Clone(itkLiThresholdCalculatorHFSS self) -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFSS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFSS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFSS
Create a new object of the class itkLiThresholdCalculatorHFSS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFSS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFSS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFSS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFSS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_Clone, None, itkLiThresholdCalculatorHFSS)
itkLiThresholdCalculatorHFSS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_swigregister
itkLiThresholdCalculatorHFSS_swigregister(itkLiThresholdCalculatorHFSS)
def itkLiThresholdCalculatorHFSS___New_orig__() -> "itkLiThresholdCalculatorHFSS_Pointer":
"""itkLiThresholdCalculatorHFSS___New_orig__() -> itkLiThresholdCalculatorHFSS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS___New_orig__()
def itkLiThresholdCalculatorHFSS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFSS *":
"""itkLiThresholdCalculatorHFSS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFSS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFSS_cast(obj)
class itkLiThresholdCalculatorHFUC(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUC):
"""Proxy of C++ itkLiThresholdCalculatorHFUC class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFUC_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFUC_Pointer":
"""Clone(itkLiThresholdCalculatorHFUC self) -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFUC
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUC *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFUC
Create a new object of the class itkLiThresholdCalculatorHFUC and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFUC.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFUC.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFUC.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFUC.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_Clone, None, itkLiThresholdCalculatorHFUC)
itkLiThresholdCalculatorHFUC_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_swigregister
itkLiThresholdCalculatorHFUC_swigregister(itkLiThresholdCalculatorHFUC)
def itkLiThresholdCalculatorHFUC___New_orig__() -> "itkLiThresholdCalculatorHFUC_Pointer":
"""itkLiThresholdCalculatorHFUC___New_orig__() -> itkLiThresholdCalculatorHFUC_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC___New_orig__()
def itkLiThresholdCalculatorHFUC_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUC *":
"""itkLiThresholdCalculatorHFUC_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUC"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUC_cast(obj)
class itkLiThresholdCalculatorHFUS(itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUS):
"""Proxy of C++ itkLiThresholdCalculatorHFUS class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def __New_orig__() -> "itkLiThresholdCalculatorHFUS_Pointer":
"""__New_orig__() -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def Clone(self) -> "itkLiThresholdCalculatorHFUS_Pointer":
"""Clone(itkLiThresholdCalculatorHFUS self) -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_Clone(self)
__swig_destroy__ = _itkLiThresholdCalculatorPython.delete_itkLiThresholdCalculatorHFUS
def cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUS *":
"""cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_cast(obj)
cast = staticmethod(cast)
def New(*args, **kargs):
"""New() -> itkLiThresholdCalculatorHFUS
Create a new object of the class itkLiThresholdCalculatorHFUS and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkLiThresholdCalculatorHFUS.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkLiThresholdCalculatorHFUS.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkLiThresholdCalculatorHFUS.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkLiThresholdCalculatorHFUS.Clone = new_instancemethod(_itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_Clone, None, itkLiThresholdCalculatorHFUS)
itkLiThresholdCalculatorHFUS_swigregister = _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_swigregister
itkLiThresholdCalculatorHFUS_swigregister(itkLiThresholdCalculatorHFUS)
def itkLiThresholdCalculatorHFUS___New_orig__() -> "itkLiThresholdCalculatorHFUS_Pointer":
"""itkLiThresholdCalculatorHFUS___New_orig__() -> itkLiThresholdCalculatorHFUS_Pointer"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS___New_orig__()
def itkLiThresholdCalculatorHFUS_cast(obj: 'itkLightObject') -> "itkLiThresholdCalculatorHFUS *":
"""itkLiThresholdCalculatorHFUS_cast(itkLightObject obj) -> itkLiThresholdCalculatorHFUS"""
return _itkLiThresholdCalculatorPython.itkLiThresholdCalculatorHFUS_cast(obj)
def li_threshold_calculator(*args, **kwargs):
"""Procedural interface for LiThresholdCalculator"""
import itk
instance = itk.LiThresholdCalculator.New(*args, **kwargs)
return instance.__internal_call__()
def li_threshold_calculator_init_docstring():
import itk
import itkTemplate
if isinstance(itk.LiThresholdCalculator, itkTemplate.itkTemplate):
li_threshold_calculator.__doc__ = itk.LiThresholdCalculator.values()[0].__doc__
else:
li_threshold_calculator.__doc__ = itk.LiThresholdCalculator.__doc__
| [
"[email protected]"
] | |
a4edb019c8bb611867382bd41f3e6b01771f81a0 | 1ac2594314c0de24528b171c8f9120566a3cd4be | /tests/test_api/test_application_auth.py | 0a6f5f693cb4951d759daff7ef217d31d039c1b5 | [
"MIT"
] | permissive | Zheaoli/huskar | a4c7e7b02bef301b5283519b1e1608489d79d95b | 395775c59c7da97c46efe9756365cad028b7c95a | refs/heads/master | 2022-07-11T06:54:34.810211 | 2020-01-01T08:00:57 | 2020-01-01T08:00:57 | 218,746,862 | 0 | 0 | MIT | 2019-10-31T11:06:11 | 2019-10-31T11:06:10 | null | UTF-8 | Python | false | false | 8,603 | py | from __future__ import absolute_import
from pytest import fixture, mark
from huskar_api.models.auth import User, ApplicationAuth, Authority
from ..utils import assert_response_ok
@fixture
def add_user(faker):
def factory(names):
for name in names:
if isinstance(name, list):
name, email = name
else:
email = '%[email protected]' % name
User.create_normal(
name, faker.password(), email=email,
is_active=True)
return factory
@fixture
def add_application_auth(db, test_application, test_application_token):
def factory(names):
for name in names:
username, authority = name.split(':', 1)
user_id = db.query(User.id).filter_by(username=username).scalar()
authority = Authority(authority)
test_application.ensure_auth(authority, user_id)
return factory
@fixture
def list_application_auth(db, test_application):
def generator():
for auth in db.query(ApplicationAuth).filter_by(
application_id=test_application.id).all():
user = db.query(User).get(auth.user_id)
if not user.is_application:
yield '%s:%s' % (user.username, auth.authority)
return generator
@fixture
def format_values(test_application):
def factory(d):
template_vars = {'test_application': test_application.application_name}
r = dict(d)
r.update((k, v % template_vars) for k, v in d.items()
if isinstance(v, basestring))
return r
return factory
@mark.xparametrize
def test_add_application_auth(
last_audit_log, add_user, add_application_auth, list_application_auth,
present_user, present_auth, request_auth, expected_resp, expected_auth,
client, test_application, admin_token):
add_user(present_user)
add_application_auth(present_auth)
username, authority = request_auth.split(':', 1)
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': username, 'authority': authority},
headers={'Authorization': admin_token})
assert r.status_code == expected_resp['status_code']
assert r.json == expected_resp['content']
assert set(list_application_auth()) == set(expected_auth)
audit_log = last_audit_log()
if expected_resp['status_code'] == 200:
assert audit_log.action_name == 'GRANT_APPLICATION_AUTH'
assert audit_log.action_json['application_name'] == \
test_application.application_name
assert audit_log.action_json['username'] == username
assert audit_log.action_json['authority'] == authority
else:
assert audit_log is None
def test_add_application_auth_to_invalid_application(
db, client, faker, add_user, admin_token, last_audit_log,
test_application):
add_user(['foo'])
name = faker.uuid4()
test_application.archive()
r = client.post(
'/api/auth/application/%s' % name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 404
assert r.json['status'] == 'NotFound'
assert r.json['message'] == 'application %s does not exist' % name
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 404
assert r.json['status'] == 'NotFound'
assert r.json['message'] == ('application %s does not exist' %
test_application.application_name)
assert last_audit_log() is None
def test_add_application_auth_to_invalid_user(
client, faker, add_user, admin_token, last_audit_log,
test_application):
add_user(['foo'])
user = User.get_by_name('foo')
user.archive()
application_name = test_application.application_name
unknow_user = faker.uuid4()[:6]
r = client.post(
'/api/auth/application/%s' % application_name,
data={'username': unknow_user, 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['message'] == 'user %s does not exist' % unknow_user
r = client.post(
'/api/auth/application/%s' % application_name,
data={'username': 'foo', 'authority': 'read'},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['message'] == 'user foo does not exist'
assert last_audit_log() is None
@mark.xparametrize
def test_delete_application_auth(
add_user, add_application_auth, list_application_auth, format_values,
present_user, present_auth, request_auth, expected_resp, expected_auth,
client, test_application, admin_token, last_audit_log):
add_user(present_user)
add_application_auth(present_auth)
username, authority = request_auth.split(':', 1)
r = client.delete(
'/api/auth/application/%s' % test_application.application_name,
data={'username': username, 'authority': authority},
headers={'Authorization': admin_token})
assert r.status_code == expected_resp['status_code']
assert r.json == format_values(expected_resp['content'])
assert set(list_application_auth()) == set(expected_auth)
audit_log = last_audit_log()
if expected_resp['status_code'] == 200:
assert audit_log.action_name == 'DISMISS_APPLICATION_AUTH'
assert audit_log.action_json['application_name'] == \
test_application.application_name
assert audit_log.action_json['username'] == username
assert audit_log.action_json['authority'] == authority
else:
assert audit_log is None
@mark.xparametrize
def test_list_application_auth(
add_user, add_application_auth, list_application_auth, format_values,
present_user, present_auth, expected_data,
client, test_application, admin_token):
add_user(present_user)
add_application_auth(present_auth)
r = client.get(
'/api/auth/application/%s' % test_application.application_name,
headers={'Authorization': admin_token})
assert_response_ok(r)
for item, expected_item in zip(
reversed(r.json['data']['application_auth']), # order by key desc
expected_data['application_auth']):
ex = format_values(expected_item)
assert item['authority'] == ex['authority']
assert item['user']['username'] == ex['username']
assert item['user']['is_application'] == ex['is_application']
assert item['user']['is_active'] is True
assert item['user']['is_admin'] is False
def test_add_application_without_permission(
client, test_user, test_token, test_application,
list_application_auth, last_audit_log):
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': 'read'},
headers={'Authorization': test_token})
assert r.status_code == 400
assert r.json['status'] == 'NoAuthError'
assert r.json['data'] is None
assert set(list_application_auth()) == set([])
assert last_audit_log() is None
@mark.parametrize('test_authority', ['unknow'])
def test_add_application_with_unknown_authority(
client, test_user, test_application, test_authority, admin_token,
list_application_auth, last_audit_log):
r = client.post(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': test_authority},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['data'] is None
assert set(list_application_auth()) == set([])
assert last_audit_log() is None
@mark.parametrize('test_authority', ['unknow'])
def test_delete_application_with_unknown_authority(
client, test_user, test_application, test_authority, admin_token):
r = client.delete(
'/api/auth/application/%s' % test_application.application_name,
data={'username': test_user.username, 'authority': test_authority},
headers={'Authorization': admin_token})
assert r.status_code == 400
assert r.json['status'] == 'BadRequest'
assert r.json['data'] is None
| [
"[email protected]"
] | |
d4d08f73436f51abbf9249999f8bd5b6dce1cb2a | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_add_campaign_feed_response_wrapper_body.py | 2f27de5df0deda8130743c0c31f94739b41f8938 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 928 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.campaignfeed.model.campaign_feed_type import CampaignFeedType
globals()['CampaignFeedType'] = CampaignFeedType
from baiduads.campaignfeed.model.add_campaign_feed_response_wrapper_body import AddCampaignFeedResponseWrapperBody
class TestAddCampaignFeedResponseWrapperBody(unittest.TestCase):
"""AddCampaignFeedResponseWrapperBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddCampaignFeedResponseWrapperBody(self):
"""Test AddCampaignFeedResponseWrapperBody"""
# FIXME: construct object with mandatory attributes with example values
# model = AddCampaignFeedResponseWrapperBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5e9f5edc1885013a836356ef125492c6de7d6b52 | 8ddda8fb6e5853126dcdafa3281c75071ada45c1 | /vyperlogix/trees/BinarySearchTree/__init__.py | 9e650390474f68b25b15dfda4a0d59db47884e97 | [
"CC0-1.0"
] | permissive | raychorn/chrome_gui | a48f3f9d931922a018e894f891ccd952476cd1ee | f1fade70b61af12ee43c55c075aa9cfd32caa962 | refs/heads/master | 2022-12-19T19:46:04.656032 | 2020-10-08T14:45:14 | 2020-10-08T14:45:14 | 299,167,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,463 | py | __copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
from vyperlogix.classes.CooperativeClass import Cooperative
class Node(Cooperative):
def __init__(self, lchild=None, rchild=None, value=-1, data=None):
self.lchild = lchild
self.rchild = rchild
self.value = value
self.data = data
class BinaryTree(Cooperative):
"""Implement Binary Search Tree."""
def __init__(self):
self.l = [] # Nodes
self.root = None
def add(self, key, dt):
"""Add a node in tree."""
if self.root == None:
self.root = Node(value=key, data=dt)
self.l.append(self.root)
return 0
else:
self.p = self.root
while True:
if self.p.value > key:
if self.p.lchild == None:
self.p.lchild = Node(value=key, data=dt)
return 0 # Success
else:
self.p = self.p.lchild
elif self.p.value == key:
return -1 # Value already in tree
else:
if self.p.rchild == None:
self.p.rchild = Node(value=key, data=dt)
return 0 # Success
else:
self.p = self.p.rchild
return -2 # Should never happen
def search(self, key):
"""Search Tree for a key and return data; if not found return None."""
self.p = self.root
if self.p == None:
return None
while True:
# print self.p.value, self.p.data
if self.p.value > key:
if self.p.lchild == None:
return None # Not found
else:
self.p = self.p.lchild
elif self.p.value == key:
return self.p.data
else:
if self.p.rchild == None:
return None # Not found
else:
self.p = self.p.rchild
return None # Should never happen
def deleteNode(self, key):
"""Delete node with value == key."""
if self.root.value == key:
if self.root.rchild == None:
if self.root.lchild == None:
self.root = None
else: self.root = self.root.lchild
else:
self.root.rchild.lchild = self.root.lchild
self.root = self.root.rchild
return 1
self.p = self.root
while True:
if self.p.value > key:
if self.p.lchild == None:
return 0 # Not found anything to delete
elif self.p.lchild.value == key:
self.p.lchild = self.proceed(self.p, self.p.lchild)
return 1
else:
self.p = self.p.lchild
# There's no way for self.p.value to be equal to key:
if self.p.value < key:
if self.p.rchild == None:
return 0 # Not found anything to delete
elif self.p.rchild.value == key:
self.p.rchild = self.proceed(self.p, self.p.rchild)
return 1
else:
self.p = self.p.rchild
return 0
def proceed(self, parent, delValue):
if delValue.lchild == None and delValue.rchild == None:
return None
elif delValue.rchild == None:
return delValue.lchild
else:
return delValue.rchild
def sort(self):
self.__traverse__(self.root, mode=1)
def __traverse__(self, v, mode=0):
"""Traverse in: preorder = 0, inorder = 1, postorder = 2."""
if v == None:
return
if mode == 0:
print (v.value, v.data)
self.__traverse__(v.lchild)
self.__traverse__(v.rchild)
elif mode == 1:
self.__traverse__(v.lchild, 1)
print (v.value, v.data)
self.__traverse__(v.rchild, 1)
else:
self.__traverse__(v.lchild, 2)
self.__traverse__(v.rchild, 2)
print (v.value, v.data)
if (__name__ == "__main__"):
import sys
print >>sys.stdout, __copyright__
print >>sys.stderr, __copyright__
tree = BinaryTree()
tree.add(4, "test1")
tree.add(10, "test2")
tree.add(23, "test3")
tree.add(1, "test4")
tree.add(3, "test5")
tree.add(2, "test6")
tree.sort()
print tree.search(3)
print tree.deleteNode(10)
print tree.deleteNode(23)
print tree.deleteNode(4)
print tree.search(3)
tree.sort()
| [
"[email protected]"
] | |
47193ec756fca771cdefe437d9cfd48ad786e116 | a9ac3c537fc778b34cb32d4528e2d1190e65e19e | /scripts/quantum_hall/plot_soi_vs_density.py | 1d23715075d28101edc610eb3beca90b6a499c9b | [
"MIT"
] | permissive | wms269/shabanipy | 9f770cfdf113ca8e8af69cd793be2f8bf9b0141a | 1e751631e031c528e18d5e0d8ff4fa1457f4107e | refs/heads/master | 2022-09-23T15:43:43.875608 | 2020-04-09T17:49:24 | 2020-04-09T17:49:24 | 265,638,022 | 1 | 0 | MIT | 2020-05-20T17:25:40 | 2020-05-20T17:25:39 | null | UTF-8 | Python | false | false | 2,360 | py | # -*- coding: utf-8 -*-
"""Plot Rashba and mobility vs density from extracted parameters.
The csv file to read is expected to have been generated by
extract_soi_from_wal.py
"""
# =============================================================================
# --- Parameters --------------------------------------------------------------
# =============================================================================
#: Path to the csv fild holding the data
PATH = ('/Users/mdartiailh/Documents/PostDocNYU/DataAnalysis/WAL/JS124/'
'average_rashba_only/JS138_124HB_BM003_004_wal_analysis_avg.csv')
#: Density column
DENSITY_COLUMN = 'Density (m^-2)'
#: Mobility column
MOBILITY_COLUMN = 'Mobility (m^2V^-1s^-1)'
#: SOI column
SOI_COLUMN = 'Rashba SOI (meV.A)'
#: Densities to mark on the SOI plot (in 1e12 cm^-2)
DENSITIES = [1.3, 1.9, 3.8]
#: Number of points on which to average the SOI strength to compute stderr
STDERR_COMPUTATION = 3
# =============================================================================
# --- Execution ---------------------------------------------------------------
# =============================================================================
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'pdf.fonttype': 42})
data = pd.read_csv(PATH, comment='#')
density = np.array(data[DENSITY_COLUMN][1:])
mobility = np.array(data[MOBILITY_COLUMN][1:])
rashba = np.array(data[SOI_COLUMN][1:])
density = np.mean(density.reshape((-1, STDERR_COMPUTATION)), axis=1)
mean_mob = np.mean(mobility.reshape((-1, STDERR_COMPUTATION)), axis=1)
std_mob = np.std(mobility.reshape((-1, STDERR_COMPUTATION)), axis=1)
mean_soi = np.mean(rashba.reshape((-1, STDERR_COMPUTATION)), axis=1)
std_soi = np.std(rashba.reshape((-1, STDERR_COMPUTATION)), axis=1)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, constrained_layout=True, figsize=(15,4))
ax1.errorbar(density/1e4, mean_mob*1e4, std_mob*1e4, fmt='+', color='C2')
ax1.set_ylabel('Mobility (cm$^{-2}$V${^-1}$s$^{-1}$)')
ax1.set_xlabel('Density (cm$^{-2}$)')
ax2.errorbar(density/1e4, mean_soi, std_soi, fmt='+',)
ax2.set_ylabel('Rashba SOI (meV.A)')
ax2.set_xlabel('Density (cm$^{-2}$)')
for n in DENSITIES:
ax2.axvline(n*1e12, ymin=0.95, color='k')
plt.show()
| [
"[email protected]"
] | |
9c32089e5865258988d73e8474c68a70f34955e7 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /Django/进阶部分/day67课上代码两个项目哦/day67/mysite67/app01/urls.py | 59ca711f596bdc197689aaf3513219e0abe2620d | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 338 | py | from django.conf.urls import url
from app01 import views
urlpatterns = [
url(r'^home/', views.home, {"age": 18}, name="home"),
# 位置参数
url(r'^book/([0-9]{2,4})/([a-zA-Z]{2})/$', views.book, name="book"),
# 关键字参数
# url(r'^book/(?P<year>[0-9]{2,4})/(?P<title>[a-zA-Z]{2})/$', views.book, name="book")
] | [
"[email protected]"
] | |
28630e10caac44c62f98b0f86af906f33d97d559 | b1f801f4f805467491c0b7c2db01c7806c10f4ea | /hockey/oilers.py | e86e659297f1c98fe3f825a975a73fb97d18d29d | [
"MIT"
] | permissive | Obliviatum/Trusty-cogs | 2fd00effade8cb45c139a85aac53b791d1a278f9 | f2297675f92b8cfc25993271b8ad6abccbec7230 | refs/heads/master | 2022-12-16T15:51:05.072770 | 2020-09-10T23:40:16 | 2020-09-10T23:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | import asyncio
from phue import Bridge
import functools
class Oilers:
def __init__(self, bot):
self.bot = bot
self.bridge = Bridge("192.168.50.123")
self.lights = self.bridge.lights
self.bridge2 = Bridge("192.168.50.163")
self.lights2 = self.bridge2.lights
self.cur_lights = {}
self.cur_lights2 = {}
def goal_lights(self):
async def task():
task = functools.partial(self.get_current_lights_setting)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return
for i in range(5):
task = functools.partial(self.oilers_hex_set, x=1.0, y=1.0)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
pass
# await self.oilers_hex_set(1.0, 1.0)
await asyncio.sleep(0.5)
task = functools.partial(self.oilers_hex_set, x=0, y=0)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
pass
# await self.oilers_hex_set(0, 0)
await asyncio.sleep(0.5)
task = functools.partial(self.reset_light_setting)
task = self.bot.loop.run_in_executor(None, task)
try:
await asyncio.wait_for(task, timeout=60)
except asyncio.TimeoutError:
return
return self.bot.loop.create_task(task())
def reset_light_setting(self):
for light in self.lights:
old_temp = self.cur_lights[light.name][1]
if old_temp < 154:
old_temp = 154
if old_temp > 500:
old_temp = 499
light.colortemp = old_temp
light.on = self.cur_lights[light.name][0]
for light in self.lights2:
old_temp = self.cur_lights2[light.name][1]
if old_temp < 154:
old_temp = 154
if old_temp > 500:
old_temp = 499
light.colortemp = old_temp
light.on = self.cur_lights2[light.name][0]
return
def get_current_lights_setting(self):
for light in self.lights:
self.cur_lights[light.name] = [light.on, light.colortemp]
for light in self.lights2:
self.cur_lights2[light.name] = [light.on, light.colortemp]
return
def oilers_hex_set(self, x: float, y: float):
"""Sets the colour for Oilers Goals"""
if x > 1.0 or x < 0.0:
x = 1.0
if y > 1.0 or y < 0.0:
y = 1.0
for light in self.lights:
if not light.on:
light.on = True
light.xy = [x, y]
for light in self.lights2:
if not light.on:
light.on = True
light.xy = [x, y]
| [
"[email protected]"
] | |
2b4d92d3292e81047c5230dabf58430a113fa1b0 | 453d2e699d218fdb3bc1e535a707988194ac6717 | /dash/render/renderer.py | 87d2dcebc49cfb1dfa4d0322c90249b714943d80 | [
"MIT"
] | permissive | defgsus/thegame | d54ffcd343c7e1805d2c11e24cd38b02243e73d4 | 38a627d9108f1418b94b08831fd640dd87fbba83 | refs/heads/master | 2023-07-23T06:32:40.297591 | 2022-04-11T12:02:32 | 2022-04-11T12:02:32 | 127,875,178 | 1 | 0 | MIT | 2023-07-06T22:07:07 | 2018-04-03T08:21:31 | Python | UTF-8 | Python | false | false | 3,349 | py | import time
import math
from typing import Optional
from pyglet import gl
import glm
from lib.opengl import *
from lib.math import FollowFilter
from .._path import ASSET_PATH
from ..game import Game
from .rs import GameRenderSettings
from .tilemap_node import TileMapNode
from .objects_node import ObjectsNode
from .object_debug_node import ObjectDebugNode
from .constraint_debug_node import ConstraintDebugNode
class GameRenderer:
def __init__(self, game: Game):
self.game = game
self.graph: Optional[RenderGraph] = None
self.pipeline: Optional[RenderPipeline] = None
self.render_settings = GameRenderSettings(32, 32)
self.debug_node = None
self.frame_number = 0
self.camera_pos = glm.vec2(-1, -5)
self.camera_rotation = 0.
self._target_speed_filter = FollowFilter(follow_up=.03, follow_down=.01)
def update(self, time: float, dt: float):
target = self.game.player
#target_speed = self._target_speed_filter(target.average_speed)
target_pos = glm.vec2(target.position) #+ target.direction_of_movement * target_speed * .5
self.camera_pos += min(1., dt * 3.) * (target_pos - self.camera_pos)
# self.camera_rotation += min(1., dt*.3) * (self.game.player.rotation - self.camera_rotation)
self.pipeline.update(self.render_settings, dt)
def render(self):
self.render_settings.projection.location = self.camera_pos
self.render_settings.projection.rotation_deg = self.camera_rotation
if self.graph is None:
self.graph = self.create_render_graph()
if self.pipeline is None:
self.pipeline = self.graph.create_pipeline()
self.pipeline.dump()
# self.pipeline.verbose = 5
#if self.frame_number % 100 == 0:
# self.tile_render_node.upload_map(self.game.tile_map.get_map(0, 0, 32, 32))
self.pipeline.render(self.render_settings)
self.pipeline.render_to_screen(self.render_settings)
self.frame_number += 1
def create_render_graph(self) -> RenderGraph:
graph = RenderGraph()
tile_tex = Texture2DNode(
ASSET_PATH /
"tileset03.png"
)
graph.add_node(tile_tex)
self.tile_render_node = TileMapNode(
"tilerender", self.game.world.tile_map,
tile_size=(16, 16),
tile_set_size=(10, 6),
)
graph.add_node(self.tile_render_node)
graph.connect(tile_tex, 0, self.tile_render_node, mag_filter=gl.GL_NEAREST)
self.object_node = ObjectsNode(
"objects", self.game.world.objects,
tile_size=(16, 16),
tile_set_size=(10, 6),
)
graph.add_node(self.object_node)
graph.connect(tile_tex, 0, self.object_node, mag_filter=gl.GL_NEAREST)
if 1:
self.debug_node = ConstraintDebugNode(
"debug", self.game.world.objects,
)
graph.add_node(self.debug_node)
mix_node = graph.add_node(postproc.Add("mix", count=3 if self.debug_node else 2))
graph.connect(self.tile_render_node, 0, mix_node, 0)
graph.connect(self.object_node, 0, mix_node, 1)
if self.debug_node:
graph.connect(self.debug_node, 0, mix_node, 2)
return graph
| [
"[email protected]"
] | |
c8db3a8e226cb70ad8c96b08f2330917343112c1 | 58141d7fc37854efad4ad64c74891a12908192ed | /config/coconut/node_272.py | 6b2d09a25a2e728fba4ec6858f853d862492788a | [] | no_license | stanleylio/fishie | b028a93b2093f59a8ceee4f78b55a91bb1f69506 | 0685045c07e4105934d713a0fd58c4bc28821ed6 | refs/heads/master | 2022-08-14T13:08:55.548830 | 2022-07-29T01:32:28 | 2022-07-29T01:32:28 | 30,433,819 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | name = 'controller02'
location = 'Coconut Island'
note = "v0 code: {'neutral':0, 'heating':1, 'cooling':2, 'flush':3}"
latitude = 21.4347
longitude = -157.7990
deployment_status = 'deployed'
conf = [
{
'dbtag':'ts',
'description':'Device clock',
'interval':60,
},
{
'dbtag':'t0',
'unit':'\u00b0C',
'description':'Water Temperature',
'lb':22,
'ub':35,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'c0',
'unit':'\u00b0C',
'description':'Probe offset',
'lb':-1,
'ub':1,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'s0',
'unit':'\u00b0C',
'description':'Setpoint',
'lb':0,
'ub':50,
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'v0',
'unit':'-',
'description':'Valve state',
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'k',
'unit':'-',
'description':'Tank number',
'interval':60,
'plot_range':14*24,
},
{
'dbtag':'uptime_second',
'description':'Uptime in seconds',
'lb':24*60*60,
'interval':60,
'plot_range':2*24,
},
{
'dbtag':'freeMB',
'unit':'MB',
'description':'Remaining free disk space',
'lb':800,
'interval':60,
'plot_range':2*24,
},
{
'dbtag':'cpu_temp',
'unit':'\u00b0C',
'description':'CPU Temperature',
'lb':5,
'ub':68,
'interval':60,
'plot_range':2*24,
},
]
if '__main__' == __name__:
for c in conf:
print('- - -')
for k,v in c.items():
print(k,':',v)
import sys
sys.path.append('../..')
from os.path import basename
from storage.storage2 import create_table
create_table(conf, basename(__file__).split('.')[0].replace('_','-'))
| [
"[email protected]"
] | |
c6e59b6836ac31b3775c83db628c1e1a2d0c6413 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/freestyle/styles/split_at_highest_2d_curvatures.py | 68a80d89ea7c7b23302858dd2ddfe88b93707121 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 1,852 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : split_at_highest_2d_curvature.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Draws the visible lines (chaining follows same nature lines)
# (most basic style module)
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.functions import pyInverseCurvature2DAngleF0D
from freestyle.predicates import (
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueUP1D,
pyHigherLengthUP1D,
pyParameterUP0D,
)
from freestyle.shaders import (
ConstantThicknessShader,
IncreasingColorShader,
)
from freestyle.types import Operators
Operators.select(QuantitativeInvisibilityUP1D(0))
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(QuantitativeInvisibilityUP1D(0)))
func = pyInverseCurvature2DAngleF0D()
Operators.recursive_split(func, pyParameterUP0D(0.4, 0.6), NotUP1D(pyHigherLengthUP1D(100)), 2)
shaders_list = [
ConstantThicknessShader(10),
IncreasingColorShader(1, 0, 0, 1, 0, 1, 0, 1),
]
Operators.create(TrueUP1D(), shaders_list)
| [
"[email protected]"
] | |
5d5f19e950b769abbcaddf745393f2ddc66ce44e | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/cyphon/cyphon/responder/couriers/migrations/0001_initial.py | 63f51235b83605b3ed2479a1caecf6f191b6d741 | [
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 2,005 | py | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
#
# Generated by Django 1.10.1 on 2017-03-20 16:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('visas', '0001_initial'),
('passports', '0001_initial'),
('actions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Courier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('endpoints', models.ManyToManyField(related_name='emissaries', related_query_name='emissary', to='actions.Action', verbose_name='actions')),
('passport', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='passports.Passport')),
('visa', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='visas.Visa')),
],
options={
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='courier',
unique_together=set([('passport', 'visa')]),
),
]
| [
"[email protected]"
] | |
fe477ca8839d24ef5c701f59cf5ec4ec9470a23a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03547/s710097147.py | ae361a95c0c868de2d887122c718078902ffa546 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | X,Y = input().split()
S=[X,Y]
S.sort()
if X==Y:
print("=")
else:
if S[0]==X:
print("<")
elif S[0]==Y:
print(">") | [
"[email protected]"
] | |
1194dfd86a3d043f09ac701fb4b1c43643524106 | 4da72085e8b3adc68a6ec967025caf9576a75363 | /tapiriik/services/api.py | 0d61cea0af5e343af7a8d1f00d2a42d8eb991179 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | blakearnold/tapiriik | ec9063b3bc234dccc5dc63fcbe2f31bbcabc6e96 | bf2e803cc8825a6c21bf7eae115044683dc98837 | refs/heads/master | 2021-01-20T01:10:52.259265 | 2014-07-21T00:58:58 | 2014-07-21T00:58:58 | 18,734,981 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | class ServiceExceptionScope:
Account = "account"
Service = "service"
class ServiceException(Exception):
def __init__(self, message, scope=ServiceExceptionScope.Service, block=False, user_exception=None):
Exception.__init__(self, message)
self.Message = message
self.UserException = user_exception
self.Block = block
self.Scope = scope
def __str__(self):
return self.Message + " (user " + str(self.UserException) + " )"
class ServiceWarning(ServiceException):
pass
class APIException(ServiceException):
pass
class APIWarning(ServiceWarning):
pass
# Theoretically, APIExcludeActivity should actually be a ServiceException with block=True, scope=Activity
# It's on the to-do list.
class APIExcludeActivity(Exception):
def __init__(self, message, activity=None, activityId=None, permanent=True, userException=None):
Exception.__init__(self, message)
self.Message = message
self.Activity = activity
self.ExternalActivityID = activityId
self.Permanent = permanent
self.UserException = userException
def __str__(self):
return self.Message + " (activity " + str(self.ExternalActivityID) + ")"
class UserExceptionType:
# Account-level exceptions (not a hardcoded thing, just to keep these seperate)
Authorization = "auth"
AccountFull = "full"
AccountExpired = "expired"
AccountUnpaid = "unpaid" # vs. expired, which implies it was at some point function, via payment or trial or otherwise.
# Activity-level exceptions
FlowException = "flow"
Private = "private"
NotTriggered = "notrigger"
RateLimited = "ratelimited"
MissingCredentials = "credentials_missing" # They forgot to check the "Remember these details" box
NotConfigured = "config_missing" # Don't think this error is even possible any more.
StationaryUnsupported = "stationary"
NonGPSUnsupported = "nongps"
TypeUnsupported = "type_unsupported"
DownloadError = "download"
ListingError = "list" # Cases when a service fails listing, so nothing can be uploaded to it.
UploadError = "upload"
SanityError = "sanity"
Corrupt = "corrupt" # Kind of a scary term for what's generally "some data is missing"
Untagged = "untagged"
LiveTracking = "live"
UnknownTZ = "tz_unknown"
System = "system"
Other = "other"
class UserException:
def __init__(self, type, extra=None, intervention_required=False, clear_group=None):
self.Type = type
self.Extra = extra # Unimplemented - displayed as part of the error message.
self.InterventionRequired = intervention_required # Does the user need to dismiss this error?
self.ClearGroup = clear_group if clear_group else type # Used to group error messages displayed to the user, and let them clear a group that share a common cause.
| [
"[email protected]"
] | |
24fa541ba8035e7771c837154211bd159e7bd92e | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/ai/reader/read-records.py | ad82b741cf38c8653e3c5b8df2f1402d4a8f7ed8 | [] | no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file inference.py
# \author chenghuige
# \date 2018-02-05 20:05:25.123740
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('input', './mount/temp/ai2018/sentiment/tfrecord/valid/*record,', '')
flags.DEFINE_integer('batch_size_', 512, '')
flags.DEFINE_string('type', 'debug', '')
flags.DEFINE_string('base', './mount/temp/ai2018/sentiment/tfrecord/', '')
#flags.DEFINE_integer('fold', None, '')
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import sys, os
from sklearn import metrics
import pandas as pd
import numpy as np
import gezi
import pickle
from wenzheng.utils import ids2text
import melt
logging = melt.logging
from dataset import Dataset
from tqdm import tqdm
# TODO by default save all ? so do not need to change the code ?
# _asdict() https://stackoverflow.com/questions/26180528/python-named-tuple-to-dictionary
# err... valid and test data share same id...
def deal(dataset, infos):
for x, _ in tqdm(dataset, ascii=True):
for key in x:
x[key] = x[key].numpy()
if type(x[key][0]) == bytes:
x[key] = gezi.decode(x[key])
ids = x['id']
for j in range(len(ids)):
infos[ids[j]] = {}
for key in x:
infos[ids[j]][key] = x[key][j]
def main(_):
base = FLAGS.base
logging.set_logging_path('./mount/tmp/')
vocab_path = f'{base}/vocab.txt'
ids2text.init(vocab_path)
FLAGS.vocab = f'{base}/vocab.txt'
# FLAGS.length_index = 2
# FLAGS.buckets = '100,400'
# FLAGS.batch_sizes = '64,64,32'
input_ = FLAGS.input
if FLAGS.type == 'test':
input_ = input_.replace('valid', 'test')
inputs = gezi.list_files(input_)
inputs.sort()
if FLAGS.fold is not None:
inputs = [x for x in inputs if not x.endswith('%d.record' % FLAGS.fold)]
if FLAGS.type == 'debug':
print('type', FLAGS.type, 'inputs', inputs, file=sys.stderr)
dataset = Dataset('valid')
dataset = dataset.make_batch(FLAGS.batch_size_, inputs)
print('dataset', dataset)
timer = gezi.Timer('read record')
for i, (x, y) in enumerate(dataset):
# if i % 10 == 1:
# print(x['id'])
# print(x['content'][0])
# print(ids2text.ids2text(x['content'][0], sep='|'))
# print(x['content'])
# print(type(x['id'].numpy()[0]) == bytes)
# break
x['id'] = gezi.decode(x['id'].numpy())
x['content_str'] = gezi.decode(x['content_str'].numpy())
for j, id in enumerate(x['id']):
if id == '573':
print(id, x['content_str'][j])
elif FLAGS.type == 'dump':
valid_infos = {}
test_infos = {}
inputs = gezi.list_files(f'{base}/train/*record')
dataset = Dataset('valid')
dataset = dataset.make_batch(1, inputs)
deal(dataset, valid_infos)
print('after valid', len(valid_infos))
inputs = gezi.list_files(f'{base}/test/*record')
dataset = Dataset('test')
dataset = dataset.make_batch(1, inputs)
deal(dataset, test_infos)
print('after test', len(test_infos))
for key in valid_infos:
print(valid_infos[key])
print(ids2text.ids2text(valid_infos[key]['content']))
break
ofile = f'{base}/info.pkl'
with open(ofile, 'wb') as out:
pickle.dump(valid_infos, out)
ofile = ofile.replace('.pkl', '.test.pkl')
with open(ofile, 'wb') as out:
pickle.dump(test_infos, out)
elif FLAGS.type == 'show_info':
valid_infos = pickle.load(open(f'{base}/info.pkl', 'rb'))
lens = [len(valid_infos[key]['content']) for key in valid_infos]
unks = [list(valid_infos[key]['content']).count(1) for key in valid_infos]
print('num unks per doc:', sum(unks) / len(unks))
print('num doc with unk ratio:', len([x for x in unks if x != 0]) / len(unks))
print('un unk tokens ratio:', sum(unks) / sum(lens))
print('len max:', np.max(lens))
print('len min:', np.min(lens))
print('len mean:', np.mean(lens))
else:
raise ValueError(FLAGS.type)
if __name__ == '__main__':
tf.compat.v1.app.run()
| [
"[email protected]"
] | |
ab23e48873e7ca764d6bfc1216f93ed33e7f1c28 | 69c185d0dfed894234506a1aa6c6bf863849c589 | /web服务器最初引进/wsgi/ctime.py | ac9c1f737f07361c21f766c78184b8254b451ce7 | [] | no_license | haha479/Socket_CS_project | 19599edc47dda61a60afc55dae16a6b59c78fdd5 | 5b54ef8db0b10d63bf9e6f980a32a45c4055238a | refs/heads/master | 2020-04-08T11:23:16.514181 | 2018-11-30T04:26:08 | 2018-11-30T04:26:08 | 159,304,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import time
#将返回给浏览器的数据解耦到次脚本文件中
def get_time(env,start_response):
statu = "200 OK"
headers = [('Content-Type', 'text/html')]
start_response(statu,headers)
return time.ctime()
def get_love(env,start_response):
statu = "200 OK"
headers = [('Content-Type', 'text/html')]
start_response(statu,headers)
return "Love"
| [
"[email protected]"
] | |
91c211a6e01d7c3e851c89671af6973faa5c1296 | 8bc025f27f451f245bd371b66f3d58253e4587d3 | /src/Foundation/Standard/practice12.py | 9c2c3dd3ebc73f06f3f63c509ae4d052354cdf83 | [
"MIT"
] | permissive | mryyomutga/PracticePython | 8f2c5cdef074091eb8fcf76bd78800b959024e02 | e191d73064248d0983344b137fbe6b69e5eb1d12 | refs/heads/master | 2021-05-15T14:31:16.365211 | 2017-10-29T04:46:51 | 2017-10-29T04:46:51 | 107,212,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # -*- coding:utf-8 -*-
# ジェネレータ
# 処理の途中で値を返し、必要に応じて処理を再開できる
def sample_generator():
print("call 1")
yield "1st step"
print("call 2")
yield "2nd step"
print("call 3")
yield "3rd step"
# ジェネレータオブジェクトを作成
gen_func = sample_generator()
text = gen_func.__next__() # yieldまで実行
print(text) # 1st step
text = gen_func.__next__()
print(text) # 2nd step
text = gen_func.__next__()
print(text) # 3rd step
print()
# ループ処理でジェネレータ関数を実行
def sample_generator():
print("call 1")
yield "1st step"
print("call 2")
yield "2nd step"
print("call 3")
yield "3rd step"
gen_func = sample_generator()
for text in gen_func:
print(text)
# フィボナッチ数列を返すジェネレータ
def fibonacci_generator():
f0, f1 = 0, 1
while True: # この中が10回繰り返される
yield f0
f0, f1 = f1, f0 + f1
gen_func = fibonacci_generator()
for i in range(0, 10):
# 10個取得する
num = next(gen_func)
print(num)
print()
# send()メソッド
# 待機中のジェネレータに値を設定する
def sample_generator():
text = yield "Good Morning"
yield text
yield text
gen_func = sample_generator()
text = next(gen_func)
print(text)
text = gen_func.send("Hello")
print(text)
text = next(gen_func)
print(text)
# thorw()メソッド
# 待機中のジェネレータに例外を送信
# close()メソッド
# 待機中のジェネレータを正常終了させる
| [
"[email protected]"
] | |
7c69ed213923a672ef47819e263bc2c7a18b0dae | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4/Common module/06 sys module-FLS-MPB.py | 8b07eaf64b395655b35a8a69cb85bf7d9ab01420 | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # !/usr/bin/env python3
# _*_coding:utf-8_*_
# __author__:FLS
from pprint import pprint
import sys
print(sys.argv[0])
# 重定向标准输出:
saveout=sys.stdout
flog=open('t2.log.sh','w',encoding='utf-8')
sys.stdout=flog
print('12345323')
flog.close()
sys.stdout=saveout
print('zhengcheng')
print(sys.builtin_module_names)
pprint(sys.path)
pprint(sys.platform)
| [
"[email protected]"
] | |
661ecdd01b1742556a9e7a99a743c13e13548b0f | 06d3156837abec83be6e038e21ee4bfd0f6c0a23 | /mysite/settings.py | be5097c4338860646302a2ba8e43adb57d314010 | [] | no_license | Igorskie/my-first-blog | 2f4c94380ab61024c009f24f6f7cf3d0ac0df0b3 | 431a35144803cb9768e597d945116c94ced6ea13 | refs/heads/master | 2020-07-06T01:14:07.806438 | 2019-08-17T08:03:57 | 2019-08-17T08:03:57 | 202,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b&pf-&z59!43(r882u2*k36s4fbtpw##$z1=570m!cjb13+$-a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"[email protected]"
] | |
e72f0caffcab32a6b1d54c4d895be2149304c7d8 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/PyParsing/macro_expander.py | 0c15b33b30ac328bf08b6d947d174a4f430e5943 | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 3,443 | py | #!/usr/bin/env python
from argparse import ArgumentParser, FileType
import imp
import sys
import types
from pyparsing import Regex, Literal, ZeroOrMore, Group
class UndefinedMacroError(Exception):
'''Class encoding an exception for an undefined macro encountered
while parsing a text'''
def __init__(self, function_name):
'''Constructor, takes the unknown macro name as an argument'''
super(UndefinedMacroError, self).__init__()
self._msg = "unknown macro '{0}'".format(function_name.strip('\\'))
def __str__(self):
'''method to stringify the exception'''
return repr(self._msg)
class MacroExpander(object):
'''Macro expansion class, macros are encoded as
\\macro_name{param_1}...{param_n}'''
def __init__(self):
'''Constructor'''
self._macros = {}
text = Regex(r'[^\\]+').leaveWhitespace()
lb = Literal('{').suppress()
rb = Literal('}').suppress()
param_value = Regex(r'[^}\\]+')
param = lb + ZeroOrMore(param_value) + rb
params = Group(ZeroOrMore(param)).setResultsName('params')
macro_name = Regex(r'\\\w+').setResultsName('macro')
macro_call = macro_name + params
text_file = ZeroOrMore(text | macro_call)
def macro_action(toks):
macro_name = toks['macro']
params = toks['params']
if self._has_macro(macro_name):
return self._macros[macro_name](*params)
else:
raise UndefinedMacroError(macro_name)
macro_call.addParseAction(macro_action)
self._grammar = text_file
def add_macro(self, macro_name, macro_impl):
'''method to add a new macro to the macro expander, given
the function name, and its implementation as arguments'''
self._macros['\\' + macro_name] = macro_impl
def _has_macro(self, macro_name):
'''internal method to check whether the parser has a
definition for the given macro name'''
return macro_name in self._macros
def expand(self, text):
'''method to perform the macro expansion on the given text'''
results = self._grammar.parseString(text)
return ''.join(results)
def main():
arg_parser = ArgumentParser(description='macro expansion utility')
arg_parser.add_argument('--file', type=FileType('r'),
action='store', dest='file',
required=True, help='file to expand')
arg_parser.add_argument('--def', type=str, action='store',
default='macro_defs', dest='defs',
help='macro definitions module name')
try:
options = arg_parser.parse_args()
text = ''.join(options.file)
module_info = imp.find_module(options.defs)
macro_module = imp.load_module(options.defs, *module_info)
expander = MacroExpander()
for macro_def in macro_module.__dict__.values():
if isinstance(macro_def, types.FunctionType):
expander.add_macro(macro_def.__name__, macro_def)
print(expander.expand(text))
except UndefinedMacroError as error:
sys.stderr.write('### error: ' + str(error) + '\n')
sys.exit(2)
except Exception as error:
sys.stderr.write('### error: ' + str(error) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
099cc4fda3d3ac97352d0deffc1bdb6a06428dc2 | 6c28060d11ec001b48a091760d0f883b23a72eaa | /notification/context_processor.py | cd66be1f866e7ca6946f8fafb29de4b9f29741eb | [] | no_license | s-soroosh/rose | 8b37904781d382fbac58fbaf9668391dddee2fc7 | 1f7ab356656696de06c56f8a86808ae59474c649 | refs/heads/master | 2021-05-26T18:22:37.349231 | 2014-07-02T07:25:54 | 2014-07-02T07:25:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from assign.models import Assign
__author__ = 'soroosh'
def notification(request):
count = Assign.objects.filter(target__id=request.user.id, status='pending').count()
return {'notifications_count': count}
| [
"[email protected]"
] | |
0621261bead3ecfcb35630fd2ffb1926684431d1 | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/chexpert-r18/no_basetrain/5000-iters.py | eea6cd8ca1b09c010a379fe19bbf022ffb5a8f90 | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 206 | py | _base_="../base-chexpert-r18-config.py"
# this will merge with the parent
# epoch related
total_iters=5000
checkpoint_config = dict(interval=total_iters)
checkpoint_config = dict(interval=total_iters//2)
| [
"[email protected]"
] | |
4b22bc1672d0cb5edf940c5940d9748062ae83bf | 4c117ea3617a576ddd07d8ea8aaab1a925fc402f | /bin/Race/Statistic/StatPlotRace.py | 4c58180d8fb9441a99b11f09fe6b364418a4f2eb | [] | no_license | 452990729/Rep-seq | 7be6058ba3284bea81282f2db7fd3bd7895173ba | e217b115791e0aba064b2426e4502a5c1b032a94 | refs/heads/master | 2021-12-11T14:27:46.912144 | 2019-06-04T03:49:40 | 2019-06-04T03:49:40 | 190,124,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,436 | py | #!/usr/bin/env python
import re
from glob import glob
import os
import sys
import random
from copy import deepcopy
from collections import Counter
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def get_cmap(n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.
'''
cap = plt.cm.get_cmap(name, n+1)
list_tmp = [cap(i) for i in range(n)]
random.shuffle(list_tmp)
return list_tmp
def PlotCDR3(list_path, xlb, tp):
paths = []
if tp == 'n':
for path in list_path:
paths += glob(path+'/*.nCDR3.len.stat')
st = 'Distribution of CDR3 Length(Nucleotide)'
sf = 'CDR3LengthOfNucleotide.png'
ylim = 90
elif tp == 'a':
for path in list_path:
paths += glob(path+'/*.aCDR3.len.stat')
st = 'Distribution of CDR3 Length(Amino)'
sf = 'CDR3LengthOfAmino.png'
ylim = 30
label = []
list_np = []
medians = []
fig, axes = plt.subplots()
for path in paths:
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
list_np.append(np_in)
medians.append(np.median(np_in))
label.append(re.split('\.', os.path.basename(path))[0])
vplot = axes.violinplot(list_np, showmeans=False,\
showmedians=False, showextrema=False, widths=0.2)
bplot = axes.boxplot(list_np, vert=True, patch_artist=True,\
showfliers=False, widths=0.03, medianprops={'linestyle': 'None'})
inds = np.arange(1, len(medians)+1)
axes.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
for patch in bplot['boxes']:
patch.set_facecolor('black')
for patch, color in zip(vplot['bodies'], get_cmap(len(label))):
patch.set_color(color)
axes.set_xticks([y+1 for y in range(len(label))], )
axes.set_xlabel(xlb)
axes.set_ylabel('Length(bp)')
axes.set_xticklabels(label)
axes.set_ylim(0, ylim)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_title(st)
plt.savefig(sf)
def PlotCDR3Bar(list_path, tp, pathout):
paths = []
if tp == 'n':
for path in list_path:
paths += glob(path+'/*.nCDR3.len.stat')
st = 'Distribution of CDR3 Length(Nucleotide)'
sf = 'CDR3LengthOfNucleotide.png'
xlim = 120
elif tp == 'a':
for path in list_path:
paths += glob(path+'/*.aCDR3.len.stat')
st = 'Distribution of CDR3 Length(Amino)'
sf = 'CDR3LengthOfAmino.png'
xlim = 40
if len(list_path) == 2:
colors = ['g','r']
fig, axes = plt.subplots()
m = 0
labels = []
for path in sorted(paths):
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
label = re.split('\.', os.path.basename(path))[0]
dict_tmp = dict(Counter(list(np_in)))
keys = sorted(dict_tmp.keys())
x = np.array(keys)
tl = len(np_in)
y = np.array([round(float(dict_tmp[i])*100/tl, 2) for i in keys])
axes.bar(x, y, width=0.8, align='center', color=colors[m], alpha=0.4, label=label)
m += 1
axes.legend(loc='upper right')
axes.set_xlim(0, xlim)
axes.set_xlabel('Length')
axes.set_ylabel('Pecentage (%)')
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_title(st)
else:
colors = get_cmap(len(list_path))
if len(list_path)%2 == 0:
fig, axes = plt.subplots(nrows=len(list_path)/2, ncols=2, figsize=(10, len(list_path)), dpi=300)
handle = len(paths)/2
else:
fig, axes = plt.subplots(nrows=len(list_path)/2+1, ncols=2, figsize=(10, (len(list_path)/2+1)*2), dpi=300)
axes[-1,-1].spines['right'].set_visible(False)
axes[-1,-1].spines['left'].set_visible(False)
axes[-1,-1].spines['top'].set_visible(False)
axes[-1,-1].spines['bottom'].set_visible(False)
axes[-1,-1].set_xticks([])
axes[-1,-1].set_yticks([])
handle = len(paths)/2+1
m = 0
n = 0
c = 0
for path in sorted(paths):
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[:, 1].astype('int')
label = re.split('\.', os.path.basename(path))[0]
dict_tmp = dict(Counter(list(np_in)))
keys = sorted(dict_tmp.keys())
x = np.array(keys)
tl = len(np_in)
y = np.array([round(float(dict_tmp[i])*100/tl, 2) for i in keys])
axes[m, n].bar(x, y, width=0.8, align='center', color=colors[c],\
alpha=0.8, label=label)
axes[m, n].legend(loc='upper right')
axes[m, n].set_xlim(0, xlim)
# axes[m ,n].set_xlabel('Length')
axes[m, n].spines['right'].set_visible(False)
axes[m, n].spines['top'].set_visible(False)
c += 1
if c < handle:
m += 1
d = deepcopy(m)
# axes[m, n].set_ylabel('Pecentage (%)')
else:
m = c-d-1
n = 1
# axes[0, 0].set_ylabel('Pecentage (%)')
fig.subplots_adjust(hspace=0.4)
plt.savefig(os.path.join(pathout, sf))
def PlotVDJ(path_in, xlb):
fig, ax = plt.subplots(2,2)
axs = ax.flatten()
def PlotPie(np_in, ax_in, ns):
nps = np_in[:,1].astype('int')
porcent = 100.*nps/nps.sum()
patches, texts = ax_in.pie(nps, colors=get_cmap(len(np_in[:,0])),\
shadow=True, startangle=90)
labels = ['{0} - {1:1.2f} %'.format(i,j) for i,j in zip(np_in[:,0], porcent)]
if len(labels) <= 6:
ax_in.legend(patches, labels, loc='center left', bbox_to_anchor=(-0.9, 0.5),
fontsize=8)
else:
ax_in.legend(patches[:6], labels[0:], loc='center left', bbox_to_anchor=(-0.9, 0.5),
fontsize=8)
ax_in.set_title('Fraction of {}'.format(ns))
V = glob(path_in+'/*.V.stat')[0]
D = glob(path_in+'/*.D.stat')[0]
J = glob(path_in+'/*.J.stat')[0]
list_tmp = ['V', 'D', 'J']
name = re.split('\.', os.path.basename(V))[0]
dir_s = os.path.dirname(V)
i = 0
for path in [V,D,J]:
np_tmp = np.loadtxt(path, dtype='S10')
PlotPie(np_tmp, axs[i], list_tmp[i])
i += 1
axs[-1].axis('off')
fig.subplots_adjust(wspace=1)
fig.suptitle('Usage of VDJ genes ({})'.format(xlb))
plt.savefig(os.path.join(dir_s, 'FractionOfVDJOf{}.png'.format(xlb)), bbox_inches='tight')
def ReplaceLabel(array_in):
dict_tmp = {}
m = 0
n = 0
array_out = np.zeros(array_in.shape)
for i in array_in:
if i not in dict_tmp:
dict_tmp[i] = m
m += 1
array_out[n] = dict_tmp[i]
else:
array_out[n] = dict_tmp[i]
n += 1
return array_out
def PlotVJComb(path_in, xlb):
'''
plot 3d-hist of VJ combination
'''
fig = plt.figure(figsize=(20, 10), dpi=300)
ax = fig.add_subplot(111, projection='3d')
VJ = glob(path_in+'/*.VJCom.stat')[0]
name = re.split('\.', os.path.basename(VJ))[0]
dir_s = os.path.dirname(VJ)
np_tmp = np.loadtxt(VJ, dtype='S10')
list_s = []
ms = 0
for ay in np_tmp:
ms += 1
if ay[0] not in list_s:
list_s.append(ay[0])
if len(list_s) == 30:
break
np_tmp = np_tmp[:ms,:]
x = np_tmp[:,0]
xpos = ReplaceLabel(x)
y = np_tmp[:,1]
ypos = ReplaceLabel(y)
z = np.zeros(x.shape)
dx = 0.5*np.ones_like(z)
dy = dx.copy()
dz = np_tmp[:,2].astype('int')
col = get_cmap(len(set(list(ypos))))
colors = np.array([col[i] for i in ypos.astype('int')])
ax.bar3d(xpos, ypos, z, dx, dy, dz, color=colors, zsort='average',\
alpha=0.5)
ax.w_xaxis.set_ticks(xpos)
ax.w_xaxis.set_ticklabels(x, rotation=20, va='center', ha='right', fontsize=6)
ax.set_xlabel('V Gene')
ax.w_yaxis.set_ticks(ypos)
ax.w_yaxis.set_ticklabels(y)
ax.set_ylabel('J Gene')
ax.set_zlabel('Count')
ax.xaxis.labelpad=15
ax.yaxis.labelpad=15
ax.zaxis.labelpad=15
fig.suptitle('Distribution of VJ combination ({})'.format(xlb))
plt.savefig(os.path.join(dir_s,\
'DistributionOfVJCombinationOf{}.png'.format(xlb)), bbox_inches='tight')
def CountBar(np_in):
a,b,c,d,e,f,g,h = 0,0,0,0,0,0,0,0
tl = len(np_in)
for i in np_in:
if i <= 5:
a += 1
elif 5<i<=10:
b += 1
elif 10<i<=30:
c += 1
elif 30<i<=50:
d += 1
elif 50<i<=100:
e += 1
elif 100<i<=1000:
f += 1
elif 1000<i<=10000:
g += 1
elif i>10000:
h += 1
np_c = np.array([int(round(m, 2)*100) for m in [float(n)/tl for n in\
[a,b,c,d,e,f,g,h]]])
return np.array(['0','<5','5-10','10-30','30-50','50-100',\
'100-1000','1000-10000', '>10000']),np_c
def PlotConst(path_in, xlb):
path = glob(path_in+'/*_atleast-2_headers.tab')[0]
fig, axes = plt.subplots()
np_tmp = np.loadtxt(path, dtype='S10')
np_in = np_tmp[1:, 1].astype('int')
label, y = CountBar(np_in)
axes.bar(range(1,9),y)
axes.set_xlabel(xlb)
axes.set_ylabel('Percentage(%)')
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.set_xticklabels(label, fontsize=7)
axes.set_title('Distribution of CONSCOUNT')
plt.savefig('DistributionOfCONSCOUNTOf{}.png'.format(xlb))
def main():
if sys.argv[1] == 'PlotCDR3Bar':
list_path = re.split(',', sys.argv[2])
sample = sys.argv[3]
PlotCDR3Bar(list_path, sample, sys.argv[4])
elif sys.argv[1] == 'PlotVDJ':
PlotVDJ(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'PlotVJComb':
PlotVJComb(sys.argv[2], sys.argv[3])
elif sys.argv[1] == 'PlotConst':
PlotConst(sys.argv[2], sys.argv[3])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
157d369702bec630b730984870fff4996b38d54e | b28df8f2cd9a4b4fe274eb1688e7410ae19f9da1 | /kwippy/models/login.py | f89b8a0d9b01ef11b043d89d16a026f3e3f39269 | [] | no_license | kwippy-com/kwippycore | ba2d8b584e2171fd5322446df409e6983e23409b | d0647405cf77c4490cb40194b35e385955d56707 | refs/heads/master | 2020-06-14T14:56:35.169865 | 2014-12-08T16:44:20 | 2014-12-08T16:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.db import models
from django.contrib.auth.models import *
class Login(models.Model):
user = models.ForeignKey(User)
login_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self) :
return '%s' % (self.user)
class Meta:
app_label="kwippy"
| [
"[email protected]"
] | |
cce00240af79b52f5debad52fa91b451574aaca4 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/primat.py | 30563ebedac47163924a1ceba50956c489a8723a | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 508 | py | ii = [('CookGHP3.py', 2), ('CoolWHM2.py', 1), ('SadlMLP.py', 1), ('CookGHP.py', 2), ('ShawHDE.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 4), ('ClarGE2.py', 3), ('ClarGE.py', 9), ('DaltJMA.py', 47), ('WestJIT2.py', 1), ('DibdTRL2.py', 1), ('WadeJEB.py', 6), ('FerrSDO2.py', 1), ('NewmJLP.py', 2), ('LeakWTI4.py', 9), ('LeakWTI.py', 4), ('SoutRD.py', 1), ('MereHHB3.py', 1), ('MackCNH.py', 1), ('WestJIT.py', 1), ('MackCNH2.py', 2), ('WilbRLW3.py', 1), ('BrewDTO.py', 2), ('ClarGE3.py', 2), ('TaylIF.py', 3)] | [
"[email protected]"
] | |
5566d9fe68f4a8e90970c0c0c27916071980e61a | 2ec14fd1724fc8959e1d3a1b4d3f61d5c0cf6f48 | /test/functional/feature_uacomment.py | e8b6937d62c8a3a6b6640d2715077b8465f8deaf | [
"MIT"
] | permissive | vitae-labs/Vitae | 7ddf8142d1e663f406399ec17de1c7bbba5e32fd | fa301e714cb26e742cfe29164a25961f1ff6d52c | refs/heads/main | 2022-07-28T15:48:24.765770 | 2022-01-29T06:13:19 | 2022-01-29T06:13:19 | 451,559,855 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Copyright (c) 2020-2021 The Vitae Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import VitaeTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(VitaeTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')', '₿', '🏃']:
expected = r"Error: User Agent comment \(" + re.escape(unsafe_char) + r"\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| [
"[email protected]"
] | |
d09d651c8b884b3ed825d329a4531ec94b0b54d5 | ad71c89863122dfb4093db0d9f9c40d962d567ff | /Week 10/3-HorizontalHistogram.py | 487bedcb770ac0989049098209474854bb385e10 | [] | no_license | jacktnp/PSIT60 | 8958e7cca278c81d2c5d3af6956728c35425628d | b63c63d8d9c1e97ce66bbb0b884b1f19fecf7b6b | refs/heads/master | 2021-08-16T07:53:33.900161 | 2017-11-19T10:07:02 | 2017-11-19T10:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """ PSIT Week 10
Wiput Pootong (60070090)
HorizontalHistogram
"""
def main():
""" Display histogram of alphabets """
text = input()
upper = {}
lower = {}
for char in text:
if char.isupper():
if char not in upper:
upper[char] = 0
upper[char] += 1
else:
if char not in lower:
lower[char] = 0
lower[char] += 1
for char in sorted(lower):
print("%s : " %char, end='')
for count in range(lower[char]):
print("-", end='')
if count % 5 == 4 and count != (lower[char] - 1):
print("|", end='')
print()
for char in sorted(upper):
print("%s : " %char, end='')
for count in range(upper[char]):
print("-", end='')
if count % 5 == 4 and count != (upper[char] - 1):
print("|", end='')
print()
main()
| [
"[email protected]"
] | |
c17ad1ba1dfe17e3fa802c32622852702517642a | 3424161b573d2fe8873905d434d459a28336e87c | /head_soccer_06_3/source/database/mysql.py | fbb8e3d541273717b2f80f718259dc62c29cae0d | [] | no_license | newtonis/Head-Soccer-Network | 412f7717b97bcb2216bc8086ef131e9e9a4f3908 | fd76920c486fb4af903b0e92b0d014a7d254f124 | refs/heads/master | 2023-05-23T23:08:46.952852 | 2021-06-27T00:20:12 | 2021-06-27T00:20:12 | 30,889,769 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | __author__ = 'Dylan'
import _mysql
import time
try:
con = _mysql.connect('db4free.net','grandt','1221dylan','headsoccerdb')
except:
print "[Info] Not able to reach internet"
con = None
class SQLEngine:
def CheckDeadServers(self):
actual = self.GetServers()
for x in actual:
if float(x["Created"]) < time.time() - 120:
self.RemoveServer(x["IP"],x["Name"])
def AddServer(self,name,ip):
### Add server to Servers database ###
con.query("SELECT * FROM Servers WHERE Name = '"+name+"'")
if con.store_result().num_rows() == 0:
con.query("INSERT INTO Servers (Name,IP,Created) VALUES ('"+name+"','"+ip+"',"+str(time.time())+")")
return True
else:
return False
def RemoveServer(self,ip,name):
### Remove server from Servers database by IP
con.query("DELETE FROM Servers WHERE IP = '"+ip+"' AND Name = '"+name+"'")
def GetServers(self):
### Return list of servers ###
if not con:
return []
con.query("SELECT * FROM Servers")
res = con.store_result()
servers = []
for x in range(res.num_rows()):
data = list(res.fetch_row())[0]
servers.append({"Name":data[0],"IP":data[1],"Created":data[2]})
return servers
def UpdateServer(self,ip,name):
try:
con.query("UPDATE Servers SET Created="+str(time.time())+" WHERE IP = '"+ip+"' AND Name = '"+name+"'")
except:
pass
MySQL = SQLEngine() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.