code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
#!
# Copyright (c) 2017 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Wyoming Upper Air Data Request
==============================
This routine gathers sounding data from the Wyoming upper air archive and generates two text files containing sounding
data and meta data, also a png of the sounding is provided.
This example shows how to use siphon's `simplewebswervice` support to create a query to
the Wyoming upper air archive.
necessary packages:
conda install -c conda-forge metpy
conda install -c conda-forge siphon
Args:
**date (integer): format YYYYMMDD
**hour (integer): e.g. 12
**sation (string): station identifier, e.g. Punta-Arenas: SCCI
**folder (string): path to folder where output is saved, default save where script is executed
Return:
[date]_[hour].png: plot of sounding
[date]_[hour]_metadata.txt: meta data of sounding
[date]_[hour]_sounding.txt: actual data of sounding
Example:
python download_plot_sounding.py date=20181214 hour=12 station=SCCI
"""
import sys
sys.path.append('../')
sys.path.append('.')
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from metpy.units import units
import metpy.calc as mpcalc
from metpy.plots import Hodograph, SkewT
import pyLARDA
import pyLARDA.helpers as h
import pyLARDA.wyoming as w
####################################################
# Create a datetime object for the sounding and string of the station identifier.
# gather arguments
station = 'SCCI' # punta arenas chile
station = 'EHDB' # nehterlands near cabauw, hours possible: 00, 12
method_name, args, kwargs = h._method_info_from_argv(sys.argv)
if 'date' in kwargs:
date_str = str(kwargs['date'])
year, month, day = int(date_str[:4]), int(date_str[4:6]), int(date_str[6:])
else:
year, month, day = 2019, 3, 15
station = kwargs['station'] if 'station' in kwargs else 'SCII'
hour = int(kwargs['hour']) if 'hour' in kwargs else 12
output_path = kwargs['folder'] if 'folder' in kwargs else ''
date = datetime(year, month, day, hour)
####################################################
# Make the request (a pandas dataframe is returned).
df = w.WyomingUpperAir.request_data(date, station)
# Drop any rows with all NaN values for T, Td, winds
# df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
# 'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
u_wind = df['u_wind'].values * units(df.units['u_wind'])
v_wind = df['v_wind'].values * units(df.units['v_wind'])
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatically.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
skew.ax.set_xlabel('Temperature [°C]')
skew.ax.set_ylabel('Pressure [hPa]')
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Add a title
plt.title(str(date) + ' ' + station + ' Punta Arenas')
# Add Legend
plt.legend(['Temperature', 'Dew Point', 'LCL', 'parcel profile'])
# Save the Figure and the data
filename = str(date.year) + str(date.month).zfill(2) + str(date.day).zfill(2) + '_' + str(date.hour) + '_' + station
file = output_path + filename + '.png'
fig.savefig(file, dpi=100, format='png')
plt.close()
df.to_csv(output_path + filename + '_sounding' + '.txt', sep='\t', index=None)
with open(output_path + filename + '_metadata' + '.txt', 'w') as f:
for item in df._metadata:
for item1, item2 in item.items():
f.write(str(item1) + '\t' + str(item2) + '\n')
print(' Save File :: ' + file)
print(' Save File :: ' + output_path + filename + '_metadata' + '.txt')
print(' Save File :: ' + output_path + filename + '_sounding' + '.txt')
| [
"datetime.datetime",
"metpy.calc.wind_components",
"metpy.plots.SkewT",
"matplotlib.use",
"pyLARDA.helpers._method_info_from_argv",
"metpy.calc.parcel_profile",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"metpy.units.units",
"metpy.calc.lcl",
"sys.path.append",
"pyLARDA.wyoming.WyomingUpperAir.request_data",
"matplotlib.pyplot.legend"
] | [((1128, 1150), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1143, 1150), False, 'import sys\n'), ((1151, 1171), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (1166, 1171), False, 'import sys\n'), ((1222, 1243), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1236, 1243), False, 'import matplotlib\n'), ((1742, 1776), 'pyLARDA.helpers._method_info_from_argv', 'h._method_info_from_argv', (['sys.argv'], {}), '(sys.argv)\n', (1766, 1776), True, 'import pyLARDA.helpers as h\n'), ((2162, 2194), 'datetime.datetime', 'datetime', (['year', 'month', 'day', 'hour'], {}), '(year, month, day, hour)\n', (2170, 2194), False, 'from datetime import datetime\n'), ((2307, 2352), 'pyLARDA.wyoming.WyomingUpperAir.request_data', 'w.WyomingUpperAir.request_data', (['date', 'station'], {}), '(date, station)\n', (2337, 2352), True, 'import pyLARDA.wyoming as w\n'), ((2953, 2997), 'metpy.calc.wind_components', 'mpcalc.wind_components', (['wind_speed', 'wind_dir'], {}), '(wind_speed, wind_dir)\n', (2975, 2997), True, 'import metpy.calc as mpcalc\n'), ((3822, 3851), 'metpy.calc.lcl', 'mpcalc.lcl', (['p[0]', 'T[0]', 'Td[0]'], {}), '(p[0], T[0], Td[0])\n', (3832, 3851), True, 'import metpy.calc as mpcalc\n'), ((4405, 4431), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (4415, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4462), 'metpy.plots.SkewT', 'SkewT', (['fig'], {'rotation': '(30)'}), '(fig, rotation=30)\n', (4444, 4462), False, 'from metpy.plots import Hodograph, SkewT\n'), ((5378, 5443), 'matplotlib.pyplot.legend', 'plt.legend', (["['Temperature', 'Dew Point', 'LCL', 'parcel profile']"], {}), "(['Temperature', 'Dew Point', 'LCL', 'parcel profile'])\n", (5388, 5443), True, 'import matplotlib.pyplot as plt\n'), ((5674, 5685), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5683, 5685), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3054), 'metpy.units.units', 'units', (["df.units['u_wind']"], {}), "(df.units['u_wind'])\n", (3034, 3054), False, 'from metpy.units import units\n'), ((3086, 3111), 'metpy.units.units', 'units', (["df.units['v_wind']"], {}), "(df.units['v_wind'])\n", (3091, 3111), False, 'from metpy.units import units\n'), ((3937, 3974), 'metpy.calc.parcel_profile', 'mpcalc.parcel_profile', (['p', 'T[0]', 'Td[0]'], {}), '(p, T[0], Td[0])\n', (3958, 3974), True, 'import metpy.calc as mpcalc\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa import tensor
from singa.tensor import Tensor
from singa import autograd
from singa import opt
import numpy as np
from singa import device
import argparse
np_dtype = {"float16": np.float16, "float32": np.float32}
singa_dtype = {"float16": tensor.float16, "float32": tensor.float32}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p',
choices=['float32', 'float16'],
default='float32',
dest='precision')
parser.add_argument('-m',
'--max-epoch',
default=1001,
type=int,
help='maximum epochs',
dest='max_epoch')
args = parser.parse_args()
np.random.seed(0)
autograd.training = True
# prepare training data in numpy array
# generate the boundary
f = lambda x: (5 * x + 1)
bd_x = np.linspace(-1.0, 1, 200)
bd_y = f(bd_x)
# generate the training data
x = np.random.uniform(-1, 1, 400)
y = f(x) + 2 * np.random.randn(len(x))
# convert training data to 2d space
label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)])
data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)
def to_categorical(y, num_classes):
"""
Converts a class vector (integers) to binary class matrix.
Args:
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
Returns:
A binary matrix representation of the input.
"""
y = np.array(y, dtype="int")
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
return categorical
label = to_categorical(label, 2).astype(np.float32)
print("train_data_shape:", data.shape)
print("train_label_shape:", label.shape)
precision = singa_dtype[args.precision]
np_precision = np_dtype[args.precision]
dev = device.create_cuda_gpu()
inputs = Tensor(data=data, device=dev)
target = Tensor(data=label, device=dev)
inputs = inputs.as_type(precision)
target = target.as_type(tensor.int32)
w0_np = np.random.normal(0, 0.1, (2, 3)).astype(np_precision)
w0 = Tensor(data=w0_np,
device=dev,
dtype=precision,
requires_grad=True,
stores_grad=True)
b0 = Tensor(shape=(3,),
device=dev,
dtype=precision,
requires_grad=True,
stores_grad=True)
b0.set_value(0.0)
w1_np = np.random.normal(0, 0.1, (3, 2)).astype(np_precision)
w1 = Tensor(data=w1_np,
device=dev,
dtype=precision,
requires_grad=True,
stores_grad=True)
b1 = Tensor(shape=(2,),
device=dev,
dtype=precision,
requires_grad=True,
stores_grad=True)
b1.set_value(0.0)
sgd = opt.SGD(0.05, 0.8)
# training process
for i in range(args.max_epoch):
x = autograd.matmul(inputs, w0)
x = autograd.add_bias(x, b0)
x = autograd.relu(x)
x = autograd.matmul(x, w1)
x = autograd.add_bias(x, b1)
loss = autograd.softmax_cross_entropy(x, target)
sgd(loss)
if i % 100 == 0:
print("%d, training loss = " % i, tensor.to_numpy(loss)[0])
| [
"numpy.random.normal",
"singa.device.create_cuda_gpu",
"singa.autograd.matmul",
"singa.autograd.softmax_cross_entropy",
"singa.autograd.relu",
"singa.tensor.to_numpy",
"argparse.ArgumentParser",
"singa.autograd.add_bias",
"singa.tensor.Tensor",
"numpy.array",
"numpy.linspace",
"singa.opt.SGD",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.arange"
] | [((1126, 1151), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1149, 1151), False, 'import argparse\n'), ((1589, 1606), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1603, 1606), True, 'import numpy as np\n'), ((1751, 1776), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1)', '(200)'], {}), '(-1.0, 1, 200)\n', (1762, 1776), True, 'import numpy as np\n'), ((1838, 1867), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(400)'], {}), '(-1, 1, 400)\n', (1855, 1867), True, 'import numpy as np\n'), ((2890, 2914), 'singa.device.create_cuda_gpu', 'device.create_cuda_gpu', ([], {}), '()\n', (2912, 2914), False, 'from singa import device\n'), ((2929, 2958), 'singa.tensor.Tensor', 'Tensor', ([], {'data': 'data', 'device': 'dev'}), '(data=data, device=dev)\n', (2935, 2958), False, 'from singa.tensor import Tensor\n'), ((2972, 3002), 'singa.tensor.Tensor', 'Tensor', ([], {'data': 'label', 'device': 'dev'}), '(data=label, device=dev)\n', (2978, 3002), False, 'from singa.tensor import Tensor\n'), ((3161, 3250), 'singa.tensor.Tensor', 'Tensor', ([], {'data': 'w0_np', 'device': 'dev', 'dtype': 'precision', 'requires_grad': '(True)', 'stores_grad': '(True)'}), '(data=w0_np, device=dev, dtype=precision, requires_grad=True,\n stores_grad=True)\n', (3167, 3250), False, 'from singa.tensor import Tensor\n'), ((3320, 3409), 'singa.tensor.Tensor', 'Tensor', ([], {'shape': '(3,)', 'device': 'dev', 'dtype': 'precision', 'requires_grad': '(True)', 'stores_grad': '(True)'}), '(shape=(3,), device=dev, dtype=precision, requires_grad=True,\n stores_grad=True)\n', (3326, 3409), False, 'from singa.tensor import Tensor\n'), ((3568, 3657), 'singa.tensor.Tensor', 'Tensor', ([], {'data': 'w1_np', 'device': 'dev', 'dtype': 'precision', 'requires_grad': '(True)', 'stores_grad': '(True)'}), '(data=w1_np, device=dev, dtype=precision, requires_grad=True,\n stores_grad=True)\n', (3574, 3657), False, 'from singa.tensor import Tensor\n'), ((3727, 3816), 'singa.tensor.Tensor', 'Tensor', ([], {'shape': '(2,)', 'device': 'dev', 'dtype': 'precision', 'requires_grad': '(True)', 'stores_grad': '(True)'}), '(shape=(2,), device=dev, dtype=precision, requires_grad=True,\n stores_grad=True)\n', (3733, 3816), False, 'from singa.tensor import Tensor\n'), ((3910, 3928), 'singa.opt.SGD', 'opt.SGD', (['(0.05)', '(0.8)'], {}), '(0.05, 0.8)\n', (3917, 3928), False, 'from singa import opt\n'), ((2480, 2504), 'numpy.array', 'np.array', (['y'], {'dtype': '"""int"""'}), "(y, dtype='int')\n", (2488, 2504), True, 'import numpy as np\n'), ((2550, 2576), 'numpy.zeros', 'np.zeros', (['(n, num_classes)'], {}), '((n, num_classes))\n', (2558, 2576), True, 'import numpy as np\n'), ((4001, 4028), 'singa.autograd.matmul', 'autograd.matmul', (['inputs', 'w0'], {}), '(inputs, w0)\n', (4016, 4028), False, 'from singa import autograd\n'), ((4041, 4065), 'singa.autograd.add_bias', 'autograd.add_bias', (['x', 'b0'], {}), '(x, b0)\n', (4058, 4065), False, 'from singa import autograd\n'), ((4078, 4094), 'singa.autograd.relu', 'autograd.relu', (['x'], {}), '(x)\n', (4091, 4094), False, 'from singa import autograd\n'), ((4107, 4129), 'singa.autograd.matmul', 'autograd.matmul', (['x', 'w1'], {}), '(x, w1)\n', (4122, 4129), False, 'from singa import autograd\n'), ((4142, 4166), 'singa.autograd.add_bias', 'autograd.add_bias', (['x', 'b1'], {}), '(x, b1)\n', (4159, 4166), False, 'from singa import autograd\n'), ((4182, 4223), 'singa.autograd.softmax_cross_entropy', 'autograd.softmax_cross_entropy', (['x', 'target'], {}), '(x, target)\n', (4212, 4223), False, 'from singa import autograd\n'), ((3098, 3130), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(2, 3)'], {}), '(0, 0.1, (2, 3))\n', (3114, 3130), True, 'import numpy as np\n'), ((3505, 3537), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(3, 2)'], {}), '(0, 0.1, (3, 2))\n', (3521, 3537), True, 'import numpy as np\n'), ((2597, 2609), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2606, 2609), True, 'import numpy as np\n'), ((4314, 4335), 'singa.tensor.to_numpy', 'tensor.to_numpy', (['loss'], {}), '(loss)\n', (4329, 4335), False, 'from singa import tensor\n')] |
"""
To get standard out, run nosetests as follows:
$ nosetests -s tests
"""
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
import random
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_basic_ensemble_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.make_titanic_ensemble(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived, verbose=0)
# Very rough ensembles don't do as well on this problem as a standard GradientBoostingClassifier does
# Right now we're getting a score of -.22
# Make sure our score is good, but not unreasonably good
assert -0.225 < test_score < -0.17
def test_saving_basic_ensemble_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.make_titanic_ensemble(df_titanic_train)
file_name = ml_predictor.save(str(random.random()))
with open(file_name, 'rb') as read_file:
saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
probas = saved_ml_pipeline.predict_proba(df_titanic_test)
probas = [proba[1] for proba in probas]
# print(probas)
test_score = utils.calculate_brier_score_loss(df_titanic_test.survived, probas)
print('test_score')
print(test_score)
# Very rough ensembles don't do as well on this problem as a standard GradientBoostingClassifier does
# Right now we're getting a score of -.22
# Make sure our score is good, but not unreasonably good
assert -0.225 < test_score < -0.17
def test_get_basic_ensemble_predictions_one_at_a_time_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.make_titanic_ensemble(df_titanic_train)
file_name = ml_predictor.save(str(random.random()))
with open(file_name, 'rb') as read_file:
saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# These predictions take a while. So we'll cut out 80% of our data to make this run much faster
df_titanic_test_dictionaries, df_titanic_test_dictionaries_ignored, df_titanic_test, df_titanic_test_ignored = train_test_split(df_titanic_test_dictionaries, df_titanic_test, train_size=0.05, random_state=0)
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
prediction = saved_ml_pipeline.predict_proba(row)
predictions.append(prediction)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
assert -0.235 < first_score < -0.17
# 2. make sure the speed is reasonable (do it a few extra times)
# data_length = len(df_titanic_test_dictionaries)
# start_time = datetime.datetime.now()
# for idx in range(1000):
# row_num = idx % data_length
# saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
# end_time = datetime.datetime.now()
# duration = end_time - start_time
# print('duration.total_seconds()')
# print(duration.total_seconds())
# # It's very difficult to set a benchmark for speed that will work across all machines.
# # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# # That's about 1 millisecond per prediction
# # Assuming we might be running on a test box that's pretty weak, multiply by 3
# # Also make sure we're not running unreasonably quickly
# assert 0.4 < duration.total_seconds() < 3
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row))
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
# Make sure our score is good, but not unreasonably good
assert -0.235 < second_score < -0.17
# All these tests hang on scikit-learn's GSCV multiprocessing bug
def test_ml_ensemble_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.make_titanic_ensemble(df_titanic_train, method='ml')
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived, verbose=0)
# Very rough ensembles don't do as well on this problem as a standard GradientBoostingClassifier does
# Right now we're getting a score of -.22
# Make sure our score is good, but not unreasonably good
print('test_score')
print(test_score)
assert -0.225 < test_score < -0.17
def test_saving_ml_ensemble_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.make_titanic_ensemble(df_titanic_train, method='ml')
file_name = ml_predictor.save(str(random.random()))
with open(file_name, 'rb') as read_file:
saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
probas = saved_ml_pipeline.predict_proba(df_titanic_test)
probas = [proba[1] for proba in probas]
# print(probas)
test_score = utils.calculate_brier_score_loss(df_titanic_test.survived, probas)
print('test_score')
print(test_score)
# Very rough ensembles don't do as well on this problem as a standard GradientBoostingClassifier does
# Right now we're getting a score of -.22
# Make sure our score is good, but not unreasonably good
assert -0.225 < test_score < -0.17
# def test_get_ml_ensemble_predictions_one_at_a_time_classifier():
# np.random.seed(0)
# df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
# ml_predictor = utils.make_titanic_ensemble(df_titanic_train, method='ml')
# file_name = ml_predictor.save(str(random.random()))
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
# os.remove(file_name)
# df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# # These predictions take a while. So we'll cut out 80% of our data to make this run much faster
# df_titanic_test_dictionaries, df_titanic_test_dictionaries_ignored, df_titanic_test, df_titanic_test_ignored = train_test_split(df_titanic_test_dictionaries, df_titanic_test, train_size=0.05, random_state=0)
# # 1. make sure the accuracy is the same
# predictions = []
# for row in df_titanic_test_dictionaries:
# prediction = saved_ml_pipeline.predict_proba(row)
# # print(prediction)
# # print(type(prediction))
# # print(prediction[0])
# # print(type(prediction[0]))
# predictions.append(prediction)
# print('predictions inside our failing test')
# print(predictions)
# first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
# print('first_score')
# print(first_score)
# # Make sure our score is good, but not unreasonably good
# assert -0.235 < first_score < -0.17
# # 2. make sure the speed is reasonable (do it a few extra times)
# # data_length = len(df_titanic_test_dictionaries)
# # start_time = datetime.datetime.now()
# # for idx in range(1000):
# # row_num = idx % data_length
# # saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
# # end_time = datetime.datetime.now()
# # duration = end_time - start_time
# # print('duration.total_seconds()')
# # print(duration.total_seconds())
# # # It's very difficult to set a benchmark for speed that will work across all machines.
# # # On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# # # That's about 1 millisecond per prediction
# # # Assuming we might be running on a test box that's pretty weak, multiply by 3
# # # Also make sure we're not running unreasonably quickly
# # assert 0.4 < duration.total_seconds() < 3
# # 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
# predictions = []
# for row in df_titanic_test_dictionaries:
# predictions.append(saved_ml_pipeline.predict_proba(row))
# second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
# # Make sure our score is good, but not unreasonably good
# assert -0.235 < second_score < -0.17
| [
"sklearn.model_selection.train_test_split",
"utils_testing.calculate_brier_score_loss",
"utils_testing.make_titanic_ensemble",
"dill.load",
"os.path.dirname",
"numpy.random.seed",
"random.random",
"utils_testing.get_titanic_binary_classification_dataset",
"os.remove"
] | [((407, 424), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (421, 424), True, 'import numpy as np\n'), ((466, 515), 'utils_testing.get_titanic_binary_classification_dataset', 'utils.get_titanic_binary_classification_dataset', ([], {}), '()\n', (513, 515), True, 'import utils_testing as utils\n'), ((535, 580), 'utils_testing.make_titanic_ensemble', 'utils.make_titanic_ensemble', (['df_titanic_train'], {}), '(df_titanic_train)\n', (562, 580), True, 'import utils_testing as utils\n'), ((975, 992), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (989, 992), True, 'import numpy as np\n'), ((1034, 1083), 'utils_testing.get_titanic_binary_classification_dataset', 'utils.get_titanic_binary_classification_dataset', ([], {}), '()\n', (1081, 1083), True, 'import utils_testing as utils\n'), ((1103, 1148), 'utils_testing.make_titanic_ensemble', 'utils.make_titanic_ensemble', (['df_titanic_train'], {}), '(df_titanic_train)\n', (1130, 1148), True, 'import utils_testing as utils\n'), ((1305, 1325), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (1314, 1325), False, 'import os\n'), ((1472, 1538), 'utils_testing.calculate_brier_score_loss', 'utils.calculate_brier_score_loss', (['df_titanic_test.survived', 'probas'], {}), '(df_titanic_test.survived, probas)\n', (1504, 1538), True, 'import utils_testing as utils\n'), ((1912, 1929), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1926, 1929), True, 'import numpy as np\n'), ((1971, 2020), 'utils_testing.get_titanic_binary_classification_dataset', 'utils.get_titanic_binary_classification_dataset', ([], {}), '()\n', (2018, 2020), True, 'import utils_testing as utils\n'), ((2040, 2085), 'utils_testing.make_titanic_ensemble', 'utils.make_titanic_ensemble', (['df_titanic_train'], {}), '(df_titanic_train)\n', (2067, 2085), True, 'import utils_testing as utils\n'), ((2241, 2261), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (2250, 2261), False, 'import os\n'), ((2549, 2650), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_titanic_test_dictionaries', 'df_titanic_test'], {'train_size': '(0.05)', 'random_state': '(0)'}), '(df_titanic_test_dictionaries, df_titanic_test, train_size=\n 0.05, random_state=0)\n', (2565, 2650), False, 'from sklearn.model_selection import train_test_split\n'), ((2874, 2945), 'utils_testing.calculate_brier_score_loss', 'utils.calculate_brier_score_loss', (['df_titanic_test.survived', 'predictions'], {}), '(df_titanic_test.survived, predictions)\n', (2906, 2945), True, 'import utils_testing as utils\n'), ((4300, 4371), 'utils_testing.calculate_brier_score_loss', 'utils.calculate_brier_score_loss', (['df_titanic_test.survived', 'predictions'], {}), '(df_titanic_test.survived, predictions)\n', (4332, 4371), True, 'import utils_testing as utils\n'), ((4582, 4599), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4596, 4599), True, 'import numpy as np\n'), ((4641, 4690), 'utils_testing.get_titanic_binary_classification_dataset', 'utils.get_titanic_binary_classification_dataset', ([], {}), '()\n', (4688, 4690), True, 'import utils_testing as utils\n'), ((4710, 4768), 'utils_testing.make_titanic_ensemble', 'utils.make_titanic_ensemble', (['df_titanic_train'], {'method': '"""ml"""'}), "(df_titanic_train, method='ml')\n", (4737, 4768), True, 'import utils_testing as utils\n'), ((5206, 5223), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5220, 5223), True, 'import numpy as np\n'), ((5265, 5314), 'utils_testing.get_titanic_binary_classification_dataset', 'utils.get_titanic_binary_classification_dataset', ([], {}), '()\n', (5312, 5314), True, 'import utils_testing as utils\n'), ((5334, 5392), 'utils_testing.make_titanic_ensemble', 'utils.make_titanic_ensemble', (['df_titanic_train'], {'method': '"""ml"""'}), "(df_titanic_train, method='ml')\n", (5361, 5392), True, 'import utils_testing as utils\n'), ((5549, 5569), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (5558, 5569), False, 'import os\n'), ((5716, 5782), 'utils_testing.calculate_brier_score_loss', 'utils.calculate_brier_score_loss', (['df_titanic_test.survived', 'probas'], {}), '(df_titanic_test.survived, probas)\n', (5748, 5782), True, 'import utils_testing as utils\n'), ((1280, 1300), 'dill.load', 'dill.load', (['read_file'], {}), '(read_file)\n', (1289, 1300), False, 'import dill\n'), ((2216, 2236), 'dill.load', 'dill.load', (['read_file'], {}), '(read_file)\n', (2225, 2236), False, 'import dill\n'), ((5524, 5544), 'dill.load', 'dill.load', (['read_file'], {}), '(read_file)\n', (5533, 5544), False, 'import dill\n'), ((128, 153), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (143, 153), False, 'import os\n'), ((1188, 1203), 'random.random', 'random.random', ([], {}), '()\n', (1201, 1203), False, 'import random\n'), ((2124, 2139), 'random.random', 'random.random', ([], {}), '()\n', (2137, 2139), False, 'import random\n'), ((5432, 5447), 'random.random', 'random.random', ([], {}), '()\n', (5445, 5447), False, 'import random\n')] |
#plotting parameters
import matplotlib.cm as cm
import numpy as np
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
import scipy
from scipy import stats
from scipy.optimize import curve_fit
from nuc_chain import geometry as ncg
from nuc_chain import linkers as ncl
from MultiPoint import propagator
from nuc_chain import fluctuations as wlc
from nuc_chain.linkers import convert
from mpl_toolkits.axes_grid1 import make_axes_locatable
#These follow Andy's plotting preferences for
params = {'axes.edgecolor': 'black', 'axes.grid': False, 'axes.facecolor': 'white', 'axes.titlesize': 20.0,
'axes.linewidth': 0.75, 'backend': 'pdf','axes.labelsize': 18,'legend.fontsize': 18,
'xtick.labelsize': 18,'ytick.labelsize': 18,'text.usetex': False,'figure.figsize': [7, 5],
'mathtext.fontset': 'stixsans', 'savefig.format': 'pdf', 'xtick.bottom':True, 'xtick.major.pad': 5, 'xtick.major.size': 5, 'xtick.major.width': 0.5,
'ytick.left':True, 'ytick.right':False, 'ytick.major.pad': 5, 'ytick.major.size': 5, 'ytick.major.width': 0.5, 'ytick.minor.right':False, 'lines.linewidth':2}
plt.rcParams.update(params)
#Mouse data
def plot_looping_within_contact_radius(a=10, lp=50, loglog=True):
"""Plot probability that 2 ends will form a loop within contact radius a, in nm,
as a function of dimensionless chain length N=Rmax/2lp, where lp is in nm.
Plots kinked model vs. bare WLC looping probabilities for both Rlinks and Llinks."""
#convert a and lp to basepairs
a_in_bp = a / ncg.dna_params['lpb']
lp_in_bp = lp / ncg.dna_params['lpb']
#load in linker lengths used to simulate nucleosome positions in mice (mu=45bp)
links = np.load('csvs/Bprops/0unwraps/heterogenous/Sarah/mice2/linker_lengths_101nucs.npy')
#Rlinks -- linkers right of TSS, starting with 60bp between -1 and +1 nuc
Rlinks = links[50:]
#Llinks -- linkers left of TSS, starting with 15bp between -2 and -1 nuc
Llinks = links[0:50]
#reverse so linker from -1 to -2 nuc is first
Llinks_rev = Llinks[::-1]
total_links = np.concatenate((Llinks_rev, Rlinks))
#cumulative chain length including burried basepairs
unwrap = 0
#plot as positive distance from TSS in bp
ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp
#plot as negative distance from TSS in bp
ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks_rev, unwraps=unwrap) #max WLC chain length in bp
#load in calculated looping probabilities
loops = pd.read_csv('../../data_for_Sarah/mice2_looping_probs_a=10nm_102nucs.csv')
#50th nucleosome or row 49, corresponds to -2nuc
loops_to_plot = loops['49']
#only interested in plotting looping with nucleation site (-2 nuc)
Lloops = np.concatenate((loops_to_plot[0:49], [loops_to_plot[50]]))
Lloops = Lloops[::-1]
#linkers left of -2 nuc
#ldna_leftnuc = ldna_Llinks[1:]
#linkers right of -2 nuc
Rloops = loops_to_plot[51:]
#ldna_rightnuc = np.concatenate(([ldna_Llinks[0]], ldna_Rlinks))
fig, ax = plt.subplots(figsize=(6.25, 4.89))
colors = sns.color_palette("BrBG", 9)
ax.plot(ldna_Rlinks, Rloops, '-o', lw=2, markersize=4, color=colors[1], label='Right of TSS')
ax.plot(ldna_Llinks, Lloops, '-o', lw=2, markersize=4, color=colors[-2], label='Left of TSS')
plt.xlabel(r'Distance from TSS (bp)')
plt.ylabel(f'Looping probability, a={a}nm')
plt.subplots_adjust(left=0.16, bottom=0.15, top=0.98, right=0.98)
plt.yscale('log')
plt.legend(loc=0)
#plt.ylim([10**-4.5, 10**-1.8])
plt.savefig(f'plots/lp{lp}nm_a{a}nm_left_right_contact_probability_mice2.png')
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"seaborn.color_palette",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.yscale",
"numpy.concatenate",
"numpy.load",
"nuc_chain.linkers.convert.genomic_length_from_links_unwraps",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
] | [((1105, 1132), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (1124, 1132), True, 'from matplotlib import pyplot as plt\n'), ((1680, 1768), 'numpy.load', 'np.load', (['"""csvs/Bprops/0unwraps/heterogenous/Sarah/mice2/linker_lengths_101nucs.npy"""'], {}), "(\n 'csvs/Bprops/0unwraps/heterogenous/Sarah/mice2/linker_lengths_101nucs.npy')\n", (1687, 1768), True, 'import numpy as np\n'), ((2066, 2102), 'numpy.concatenate', 'np.concatenate', (['(Llinks_rev, Rlinks)'], {}), '((Llinks_rev, Rlinks))\n', (2080, 2102), True, 'import numpy as np\n'), ((2240, 2305), 'nuc_chain.linkers.convert.genomic_length_from_links_unwraps', 'convert.genomic_length_from_links_unwraps', (['Rlinks'], {'unwraps': 'unwrap'}), '(Rlinks, unwraps=unwrap)\n', (2281, 2305), False, 'from nuc_chain.linkers import convert\n'), ((2558, 2632), 'pandas.read_csv', 'pd.read_csv', (['"""../../data_for_Sarah/mice2_looping_probs_a=10nm_102nucs.csv"""'], {}), "('../../data_for_Sarah/mice2_looping_probs_a=10nm_102nucs.csv')\n", (2569, 2632), True, 'import pandas as pd\n'), ((2803, 2861), 'numpy.concatenate', 'np.concatenate', (['(loops_to_plot[0:49], [loops_to_plot[50]])'], {}), '((loops_to_plot[0:49], [loops_to_plot[50]]))\n', (2817, 2861), True, 'import numpy as np\n'), ((3099, 3133), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6.25, 4.89)'}), '(figsize=(6.25, 4.89))\n', (3111, 3133), True, 'from matplotlib import pyplot as plt\n'), ((3147, 3175), 'seaborn.color_palette', 'sns.color_palette', (['"""BrBG"""', '(9)'], {}), "('BrBG', 9)\n", (3164, 3175), True, 'import seaborn as sns\n'), ((3376, 3412), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance from TSS (bp)"""'], {}), "('Distance from TSS (bp)')\n", (3386, 3412), True, 'from matplotlib import pyplot as plt\n'), ((3418, 3461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Looping probability, a={a}nm"""'], {}), "(f'Looping probability, a={a}nm')\n", (3428, 3461), True, 'from matplotlib import pyplot as plt\n'), ((3466, 3531), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.16)', 'bottom': '(0.15)', 'top': '(0.98)', 'right': '(0.98)'}), '(left=0.16, bottom=0.15, top=0.98, right=0.98)\n', (3485, 3531), True, 'from matplotlib import pyplot as plt\n'), ((3536, 3553), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3546, 3553), True, 'from matplotlib import pyplot as plt\n'), ((3558, 3575), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (3568, 3575), True, 'from matplotlib import pyplot as plt\n'), ((3616, 3694), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/lp{lp}nm_a{a}nm_left_right_contact_probability_mice2.png"""'], {}), "(f'plots/lp{lp}nm_a{a}nm_left_right_contact_probability_mice2.png')\n", (3627, 3694), True, 'from matplotlib import pyplot as plt\n'), ((2401, 2470), 'nuc_chain.linkers.convert.genomic_length_from_links_unwraps', 'convert.genomic_length_from_links_unwraps', (['Llinks_rev'], {'unwraps': 'unwrap'}), '(Llinks_rev, unwraps=unwrap)\n', (2442, 2470), False, 'from nuc_chain.linkers import convert\n')] |
from enum import Enum
from time import time
from typing import List
from uuid import uuid4
from pydantic import BaseModel, Field
from app.core.models import User, UserRoles
def uuid_str() -> str:
return str(uuid4())
def time_int() -> int:
return int(time())
class TokenType(str, Enum):
ACCESS = "access"
REFRESH = "refresh"
class BaseJWTPayload(BaseModel):
exp: int
jti: str = Field(default_factory=uuid_str)
nbf: int = Field(default_factory=time_int)
token_type: TokenType
@staticmethod
def calc_exp(seconds_from_now: int = 0) -> int:
return int(time()) + seconds_from_now
class AccessTokenPayload(BaseJWTPayload):
roles: List[UserRoles]
sid: str
token_type: TokenType = TokenType.ACCESS
user_id: int
@staticmethod
def from_info(expiration_seconds: int, session_id: str, user: User):
return AccessTokenPayload(
exp=AccessTokenPayload.calc_exp(expiration_seconds),
roles=user.roles,
sid=session_id,
user_id=user.id,
)
class RefreshTokenPayload(BaseJWTPayload):
jti = str
token_type: TokenType = TokenType.REFRESH
@staticmethod
def from_info(expiration_seconds: int, session_id: str):
return RefreshTokenPayload(
exp=RefreshTokenPayload.calc_exp(expiration_seconds), jti=session_id,
)
| [
"pydantic.Field",
"time.time",
"uuid.uuid4"
] | [((411, 442), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid_str'}), '(default_factory=uuid_str)\n', (416, 442), False, 'from pydantic import BaseModel, Field\n'), ((458, 489), 'pydantic.Field', 'Field', ([], {'default_factory': 'time_int'}), '(default_factory=time_int)\n', (463, 489), False, 'from pydantic import BaseModel, Field\n'), ((215, 222), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (220, 222), False, 'from uuid import uuid4\n'), ((264, 270), 'time.time', 'time', ([], {}), '()\n', (268, 270), False, 'from time import time\n'), ((606, 612), 'time.time', 'time', ([], {}), '()\n', (610, 612), False, 'from time import time\n')] |
import pytest
from service.api import create_app, Config
from service.db import create_engine, create_session
engine = create_engine(Config.dsn)
@pytest.fixture(scope='session')
def app():
app = create_app()
app.config['TESTING'] = True
ctx = app.app_context()
ctx.push()
yield app
ctx.pop()
@pytest.fixture(scope='function')
def client(app):
"""
This is a client fixture but it
also sets up the transactional
session so that we don't pollute
the DB in tests.
"""
conn = engine.connect()
transaction = conn.begin()
app.db_session = create_session(conn)
yield app.test_client()
# No need to retain the junk we
# put in the DB for testing.
transaction.rollback()
conn.close()
app.db_session.remove()
| [
"pytest.fixture",
"service.db.create_engine",
"service.db.create_session",
"service.api.create_app"
] | [((122, 147), 'service.db.create_engine', 'create_engine', (['Config.dsn'], {}), '(Config.dsn)\n', (135, 147), False, 'from service.db import create_engine, create_session\n'), ((151, 182), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (165, 182), False, 'import pytest\n'), ((324, 356), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (338, 356), False, 'import pytest\n'), ((204, 216), 'service.api.create_app', 'create_app', ([], {}), '()\n', (214, 216), False, 'from service.api import create_app, Config\n'), ((599, 619), 'service.db.create_session', 'create_session', (['conn'], {}), '(conn)\n', (613, 619), False, 'from service.db import create_engine, create_session\n')] |
"""Testing arxiv_script.py"""
from click.testing import CliRunner
from src.arxiv_script import cli
def test_show():
runner = CliRunner()
result = runner.invoke(cli, ["show", "math.GT/0309136"])
result_full = runner.invoke(cli, ["show", "--full", "math.GT/0309136"])
assert result.exit_code == 0
assert result_full.exit_code == 0
assert "Springer fibers" in result.output
assert "Main subject" in result_full.output
def test_settings():
"""Only test if false input gets rejected
(for tests of actual settings, see test_path_control)
"""
runner = CliRunner()
result_dir = runner.invoke(cli, ["--set-directory", "a/fantasy/path/dir"])
result_bib = runner.invoke(cli, ["--set-bib-file", "a/fantasy/bib/file"])
assert result_dir.exit_code == 0
assert result_bib.exit_code == 0
assert "Not a correct path" in result_dir.output
assert "Not a correct path" in result_bib.output
def test_get():
runner = CliRunner()
result = runner.invoke(cli, ["get", "fantasy/ax_id"])
result_false_dir = runner.invoke(cli, ["get", "-d", "fantasy/dir/", "2011.02123"])
assert result.exit_code == 0
assert result_false_dir.exit_code == 0
assert "Not a correct arXiv identifier." in result.output
assert "Please give a valid absolute path" in result_false_dir.output
def test_bib():
"""Just test axs bib without interacting with an actual file."""
runner = CliRunner()
result_false_bib = runner.invoke(
cli, ["bib", "-a", "fantasy.bib", "math.GT/0309136"]
)
result_bib = runner.invoke(cli, ["bib", "math.GT/0309136"])
# to add: give a valid bib file
assert result_false_bib.exit_code == 0
assert "The given path does not point to a bib-file" in result_false_bib.output
assert result_bib.exit_code == 0
assert "Here is the requested BibTeX entry:" in result_bib.output
def test_bib_file(tmp_path):
# create a temporary bib-file
axs_tmp = tmp_path / "axs_tmp"
axs_tmp.mkdir()
tmp_bib = axs_tmp / "tmp.bib"
tmp_bib.touch()
# invoke 'axs bib'
runner = CliRunner()
# without adding the bibtex key to tmp_bib (input = 'n')
result_tmp_bib = runner.invoke(
cli, ["bib", "-a", str(tmp_bib), "math.GT/0309136"], input="n"
)
# adding the to tmp_bib
result_tmp_write = runner.invoke(
cli, ["bib", "-a", str(tmp_bib), "math.GT/0309136"], input="y"
)
# actual tests
assert result_tmp_bib.exit_code == 0
assert result_tmp_write.exit_code == 0
# check output
assert "Do you want to add this BibTeX entry" in result_tmp_bib.output
assert "BibTeX entry successfully added." in result_tmp_write.output
# check if writing to tmp_bib was successful (if so, the arXiv identifier
# is in the first line)
with tmp_bib.open() as bib_file:
first_line = bib_file.readline()
assert "math.GT/0309136" in first_line
| [
"click.testing.CliRunner"
] | [((132, 143), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (141, 143), False, 'from click.testing import CliRunner\n'), ((594, 605), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (603, 605), False, 'from click.testing import CliRunner\n'), ((974, 985), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (983, 985), False, 'from click.testing import CliRunner\n'), ((1443, 1454), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1452, 1454), False, 'from click.testing import CliRunner\n'), ((2105, 2116), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2114, 2116), False, 'from click.testing import CliRunner\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.integrate import quad
import seaborn as sns
from numba import jit
import time
plt.style.use('seaborn')
@jit
def Fokker_Planck_Equation(x, N, u, v, s):
"""
Returns a function for stationary distribution of Moran process.
"""
return np.power(x, N*v - 1)*np.power((1 - x), (N*u - 1))*np.exp(N*s*x)
@jit
def Fokker_Planck_Mean(x, N, u, v, s):
"""
Returns the mean of Moran process at stationary using Fokker-Planck.
"""
return x*np.power(x, N*v - 1)*np.power((1 - x), (N*u - 1))*np.exp(N*s*x)
@jit
def Fixation_Prob_Moran(N, w_a, w_A):
"""
Returns an approximation for fixation probability for Moran process.
"""
return ((w_A/w_a) - 1)/(np.power(w_A/w_a, N) - 1)
@jit
def Fixation_Prob_Fisher(N, s):
"""
Returns an approximation for fixation probability for Wright-Fisher process.
"""
return (1 - np.exp(-s))/(1 - np.exp(-N*s))
@jit
def Moran_Process(N, w_a, w_A, u, v,
num_a, num_A, max_steps,
dt, num_sim):
"""
N -- population size,
w_a, w_A -- weights,
u, v -- mutation rates,
dt -- time step.
"""
step = 0
a_counts = []
while step < max_steps:
prob_birth = ((w_a * num_a)*(1 - u) / (w_a * num_a + w_A * num_A)
+ (num_A * v) / (w_a * num_a + w_A * num_A)) * num_A / N
prob_death = ((num_A)*(1 - v) / (w_a * num_a + w_A * num_A)
+ (w_a * num_a)*u / (w_a * num_a + w_A * num_A)) * num_a / N
r = np.random.rand(1)
if r <= prob_birth*dt:
num_a += 1
num_A -= 1
elif r <= (prob_birth + prob_death)*dt:
num_a -= 1
num_A += 1
step += 1
a_counts.append(num_a)
return a_counts[-1]
# Plot the Fokker Planck distribution
population_sizes = [10, 100, 1000]
mutation_rates = [0.2, 0.02, 0.002]
s_list = [0.2, 0.02, 0.002]
w_A = 1
max_steps = 500000
dt = 1
num_sims = 1000
frequency = []
Fokker_Planck = []
for i in range(len(population_sizes)):
x = np.linspace(0, 1, population_sizes[i])
P = Fokker_Planck_Equation(x, population_sizes[i], mutation_rates[i],
mutation_rates[i], s_list[i])
print("Population Size: ", population_sizes[i])
simulations = [Moran_Process(population_sizes[i], 1+s_list[i], w_A,
mutation_rates[i], mutation_rates[i],
population_sizes[i]//2,
population_sizes[i]-(population_sizes[i]//2),
max_steps, dt, j) for j in range(num_sims)]
Fokker_Planck.append(P)
a_counts = np.asarray(simulations)
frequency.append(a_counts)
fig, ax = plt.subplots(1,len(population_sizes), figsize=(20,5))
fig.suptitle('Stationary Distribution For Different Population Sizes',
fontsize=18, y=1.12)
fig.text(0.5, -0.02, 'Frequency of a', ha='center', fontsize=16)
fig.text(-0.02, 0.5, 'Density', va='center', rotation='vertical', fontsize=16)
for i in range(len(population_sizes)):
x = np.linspace(0, 1, population_sizes[i])
data = frequency[i]/population_sizes[i]
FP = Fokker_Planck[i]
ax[i].plot(x, 2*FP, label='1', color='r')
sns.kdeplot(data, ax=ax[i], label='2', color='b')
ax[i].axvline(np.mean(data), color='teal', linestyle='dashed',
linewidth=2, alpha=0.75)
ax[i].legend(['Fokker Planck solution', 'Simulations'], fontsize=14)
ax[i].set_title('N = {0}'.format(population_sizes[i]), fontsize=14)
ax[i].set_xlim([-0.01,1.01])
plt.tight_layout()
# Plot the fixation probabilities
population_sizes = [10, 100, 1000, 10000]
mutation_rate = 0
s_list = [0.2, 0.02, 0.002, 0.0002]
w_A = 1
max_steps = 500000
dt = 1
num_sims = 1000
fix_prob_sim = []
fix_prob_Moran = []
fix_prob_wf = []
for i in range(len(population_sizes)):
print("Population Size: ", population_sizes[i])
f_M = Fixation_Prob_Moran(population_sizes[i], 1+s_list[i], w_A)
f_WF = Fixation_Prob_Fisher(population_sizes[i], s_list[i])
simulations = [Moran_Process(population_sizes[i], 1+s_list[i], w_A,
mutation_rate, mutation_rate,
1, population_sizes[i] - 1, max_steps,
dt, j) for j in range(num_sims)]
fix_prob = simulations.count(population_sizes[i])/num_sims
fix_prob_sim.append(fix_prob)
fix_prob_Moran.append(f_M)
fix_prob_wf.append(f_WF)
plt.scatter(population_sizes, fix_prob_sim, color='b', label='simulation')
plt.scatter(population_sizes, fix_prob_Moran, color='r', label='Moran-Analytical')
plt.scatter(population_sizes, fix_prob_wf, color='orange', label='WF-Analytical')
plt.xscale('Log')
plt.xlabel('population size')
plt.ylabel('fixation probability at N')
plt.ylim([-0.01,0.3])
plt.legend()
plt.show()
| [
"numpy.mean",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"numpy.exp",
"numpy.linspace",
"seaborn.kdeplot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show"
] | [((202, 226), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (215, 226), True, 'import matplotlib.pyplot as plt\n'), ((4645, 4719), 'matplotlib.pyplot.scatter', 'plt.scatter', (['population_sizes', 'fix_prob_sim'], {'color': '"""b"""', 'label': '"""simulation"""'}), "(population_sizes, fix_prob_sim, color='b', label='simulation')\n", (4656, 4719), True, 'import matplotlib.pyplot as plt\n'), ((4720, 4807), 'matplotlib.pyplot.scatter', 'plt.scatter', (['population_sizes', 'fix_prob_Moran'], {'color': '"""r"""', 'label': '"""Moran-Analytical"""'}), "(population_sizes, fix_prob_Moran, color='r', label=\n 'Moran-Analytical')\n", (4731, 4807), True, 'import matplotlib.pyplot as plt\n'), ((4803, 4889), 'matplotlib.pyplot.scatter', 'plt.scatter', (['population_sizes', 'fix_prob_wf'], {'color': '"""orange"""', 'label': '"""WF-Analytical"""'}), "(population_sizes, fix_prob_wf, color='orange', label=\n 'WF-Analytical')\n", (4814, 4889), True, 'import matplotlib.pyplot as plt\n'), ((4885, 4902), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""Log"""'], {}), "('Log')\n", (4895, 4902), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4932), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""population size"""'], {}), "('population size')\n", (4913, 4932), True, 'import matplotlib.pyplot as plt\n'), ((4933, 4972), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fixation probability at N"""'], {}), "('fixation probability at N')\n", (4943, 4972), True, 'import matplotlib.pyplot as plt\n'), ((4973, 4995), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.01, 0.3]'], {}), '([-0.01, 0.3])\n', (4981, 4995), True, 'import matplotlib.pyplot as plt\n'), ((4995, 5007), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5005, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5016, 5018), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2208), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'population_sizes[i]'], {}), '(0, 1, population_sizes[i])\n', (2181, 2208), True, 'import numpy as np\n'), ((2798, 2821), 'numpy.asarray', 'np.asarray', (['simulations'], {}), '(simulations)\n', (2808, 2821), True, 'import numpy as np\n'), ((3219, 3257), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'population_sizes[i]'], {}), '(0, 1, population_sizes[i])\n', (3230, 3257), True, 'import numpy as np\n'), ((3378, 3427), 'seaborn.kdeplot', 'sns.kdeplot', (['data'], {'ax': 'ax[i]', 'label': '"""2"""', 'color': '"""b"""'}), "(data, ax=ax[i], label='2', color='b')\n", (3389, 3427), True, 'import seaborn as sns\n'), ((3720, 3738), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3736, 3738), True, 'import matplotlib.pyplot as plt\n'), ((424, 441), 'numpy.exp', 'np.exp', (['(N * s * x)'], {}), '(N * s * x)\n', (430, 441), True, 'import numpy as np\n'), ((636, 653), 'numpy.exp', 'np.exp', (['(N * s * x)'], {}), '(N * s * x)\n', (642, 653), True, 'import numpy as np\n'), ((1633, 1650), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1647, 1650), True, 'import numpy as np\n'), ((3446, 3459), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (3453, 3459), True, 'import numpy as np\n'), ((374, 396), 'numpy.power', 'np.power', (['x', '(N * v - 1)'], {}), '(x, N * v - 1)\n', (382, 396), True, 'import numpy as np\n'), ((395, 421), 'numpy.power', 'np.power', (['(1 - x)', '(N * u - 1)'], {}), '(1 - x, N * u - 1)\n', (403, 421), True, 'import numpy as np\n'), ((607, 633), 'numpy.power', 'np.power', (['(1 - x)', '(N * u - 1)'], {}), '(1 - x, N * u - 1)\n', (615, 633), True, 'import numpy as np\n'), ((812, 834), 'numpy.power', 'np.power', (['(w_A / w_a)', 'N'], {}), '(w_A / w_a, N)\n', (820, 834), True, 'import numpy as np\n'), ((990, 1000), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (996, 1000), True, 'import numpy as np\n'), ((1007, 1021), 'numpy.exp', 'np.exp', (['(-N * s)'], {}), '(-N * s)\n', (1013, 1021), True, 'import numpy as np\n'), ((586, 608), 'numpy.power', 'np.power', (['x', '(N * v - 1)'], {}), '(x, N * v - 1)\n', (594, 608), True, 'import numpy as np\n')] |
import os
from collections import defaultdict
from typing import Sequence
import pandas as pd
import regex as re
from models.article import Article
from tools.src.annotations import Annotations
from tools.src.annotation import Annotation
class ArticlesLoader:
""" A simple class that loads the article files in a provided directory as
articles.
The articles are provided by the workshop organizers in separate files in a
directory. Each article consists of title and content sentences written
separately on new lines (each). The name of the file contains the id of the
article.
"""
def __init__(self, data_dir,
article_file_id_pattern,
article_label_pattern_slc, article_label_pattern_flc,
labels_dir_slc=None, labels_dir_flc=None):
self.data_dir = data_dir
self.labels_dir_slc = labels_dir_slc
self.labels_dir_flc = labels_dir_flc
self.article_file_id_pattern = article_file_id_pattern
self.article_label_pattern_slc = article_label_pattern_slc
self.article_label_pattern_flc = article_label_pattern_flc
def load_data(self) -> Sequence[Article]:
""" Loads all the articles from the files in the provided directory.
Returns a list of Article objects
"""
article_files = os.listdir(self.data_dir)
articles = [self.__map_to_article(os.path.join(self.data_dir, article))
for article in article_files]
load_slc_labels: bool = self.labels_dir_slc is not None
load_flc_labels: bool = self.labels_dir_flc is not None
if load_slc_labels:
for article in articles:
self.__load_slc_labels(article)
if load_flc_labels:
for article in articles:
self.__load_flc_labels(article)
print("{} articles loaded".format(len(articles)))
return articles
def __map_to_article(self, file_path) -> Article:
"""Helper method that constructs an Article object from an article
file"""
with open(file_path) as file:
article_id = re \
.search(self.article_file_id_pattern, file.name, 0) \
.group(1)
content = file.readlines()
return Article(article_id, content)
def __load_slc_labels(self, article: Article):
file_name = os.path.join(self.labels_dir_slc,
self.article_label_pattern_slc
.format(article.article_id))
with open(file_name, mode="r") as file:
slc_labels = pd.read_csv(file, sep="\t", names=["article_id",
"sentence_id",
"technique"])
article.slc_labels = slc_labels.technique.values
def __load_flc_labels(self, article: Article):
article_id = article.article_id
# print("Loading flc annotations for {}".format(article_id))
file_name = os.path.join(self.labels_dir_flc,
self.article_label_pattern_flc
.format(article_id))
article_annotations = Annotations()
article_annotations.load_annotation_list_from_file(file_name)
if article_annotations.has_article(article_id):
annotations = article_annotations.get_article_annotations(article_id)
spans = annotations.get_article_annotations()
else:
spans = []
# convert the article annotations to sentence annotations
sentence_annotations = self.__convert_annotations(article, spans)
article.set_flc_annotations(sentence_annotations)
@staticmethod
def __convert_annotations(article, spans):
"""
Converts an article-based annotation to an annotation inside a sentence
:param article:
:param spans: list of article-wide spans. E.g. each span start and end
position is based on the article length, across sentences.
:return: list of spans covering the sentences of the article. Each entry
in the list is a bound inside a sentence.
"""
article_text = "".join(article.article_sentences)
article_annotations = []
for i, sent in enumerate(article.article_sentences):
sent_start = article_text.find(sent)
assert sent_start != -1
sentence_annotations = []
sent_end = sent_start + len(sent)
for span in spans:
span_start = span.get_start_offset()
span_end = span.get_end_offset()
span_starts_in_sentence = sent_start <= span_start < sent_end
span_ends_in_sentence = span_start < sent_start < span_end <= sent_end
if span_starts_in_sentence:
sentence_annotation_start = span_start - sent_start
sentence_annotation_end = min(sent_end, span_end) - sent_start
sentence_annotation = Annotation(span.get_label(),
sentence_annotation_start,
sentence_annotation_end)
sentence_annotations.append(sentence_annotation)
assert sentence_annotation_start <= sentence_annotation_end
elif span_ends_in_sentence:
sentence_annotation_start = 0
sentence_annotation_end = min(sent_end, span_end) - sent_start
sentence_annotation = Annotation(span.get_label(),
sentence_annotation_start,
sentence_annotation_end)
sentence_annotations.append(sentence_annotation)
assert sentence_annotation_start <= sentence_annotation_end
article_annotations.append(sentence_annotations)
return article_annotations
| [
"regex.search",
"os.listdir",
"tools.src.annotations.Annotations",
"models.article.Article",
"pandas.read_csv",
"os.path.join"
] | [((1347, 1372), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (1357, 1372), False, 'import os\n'), ((3270, 3283), 'tools.src.annotations.Annotations', 'Annotations', ([], {}), '()\n', (3281, 3283), False, 'from tools.src.annotations import Annotations\n'), ((2311, 2339), 'models.article.Article', 'Article', (['article_id', 'content'], {}), '(article_id, content)\n', (2318, 2339), False, 'from models.article import Article\n'), ((2646, 2723), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""', 'names': "['article_id', 'sentence_id', 'technique']"}), "(file, sep='\\t', names=['article_id', 'sentence_id', 'technique'])\n", (2657, 2723), True, 'import pandas as pd\n'), ((1415, 1451), 'os.path.join', 'os.path.join', (['self.data_dir', 'article'], {}), '(self.data_dir, article)\n', (1427, 1451), False, 'import os\n'), ((2152, 2205), 'regex.search', 're.search', (['self.article_file_id_pattern', 'file.name', '(0)'], {}), '(self.article_file_id_pattern, file.name, 0)\n', (2161, 2205), True, 'import regex as re\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class NatGatewayRule(pulumi.CustomResource):
dst_ip: pulumi.Output[str]
"""
The private ip of instance bound to the jNAT gateway.
"""
dst_port_range: pulumi.Output[str]
"""
The range of port numbers of the private ip, range: 1-65535. (eg: `port` or `port1-port2`).
"""
name: pulumi.Output[str]
nat_gateway_id: pulumi.Output[str]
"""
The ID of the Nat Gateway.
"""
protocol: pulumi.Output[str]
"""
The protocol of the Nat Gateway Rule. Possible values: `tcp`, `udp`.
"""
src_eip_id: pulumi.Output[str]
"""
The ID of eip associate to the Nat Gateway.
"""
src_port_range: pulumi.Output[str]
"""
The range of port numbers of the eip, range: 1-65535. (eg: `port` or `port1-port2`).
"""
def __init__(__self__, resource_name, opts=None, dst_ip=None, dst_port_range=None, name=None, nat_gateway_id=None, protocol=None, src_eip_id=None, src_port_range=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Nat Gateway resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dst_ip: The private ip of instance bound to the jNAT gateway.
:param pulumi.Input[str] dst_port_range: The range of port numbers of the private ip, range: 1-65535. (eg: `port` or `port1-port2`).
:param pulumi.Input[str] nat_gateway_id: The ID of the Nat Gateway.
:param pulumi.Input[str] protocol: The protocol of the Nat Gateway Rule. Possible values: `tcp`, `udp`.
:param pulumi.Input[str] src_eip_id: The ID of eip associate to the Nat Gateway.
:param pulumi.Input[str] src_port_range: The range of port numbers of the eip, range: 1-65535. (eg: `port` or `port1-port2`).
> This content is derived from https://github.com/terraform-providers/terraform-provider-ucloud/blob/master/website/docs/r/nat_gateway_rule.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if dst_ip is None:
raise TypeError("Missing required property 'dst_ip'")
__props__['dst_ip'] = dst_ip
if dst_port_range is None:
raise TypeError("Missing required property 'dst_port_range'")
__props__['dst_port_range'] = dst_port_range
__props__['name'] = name
if nat_gateway_id is None:
raise TypeError("Missing required property 'nat_gateway_id'")
__props__['nat_gateway_id'] = nat_gateway_id
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if src_eip_id is None:
raise TypeError("Missing required property 'src_eip_id'")
__props__['src_eip_id'] = src_eip_id
if src_port_range is None:
raise TypeError("Missing required property 'src_port_range'")
__props__['src_port_range'] = src_port_range
super(NatGatewayRule, __self__).__init__(
'ucloud:vpc/natGatewayRule:NatGatewayRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, dst_ip=None, dst_port_range=None, name=None, nat_gateway_id=None, protocol=None, src_eip_id=None, src_port_range=None):
"""
Get an existing NatGatewayRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dst_ip: The private ip of instance bound to the jNAT gateway.
:param pulumi.Input[str] dst_port_range: The range of port numbers of the private ip, range: 1-65535. (eg: `port` or `port1-port2`).
:param pulumi.Input[str] nat_gateway_id: The ID of the Nat Gateway.
:param pulumi.Input[str] protocol: The protocol of the Nat Gateway Rule. Possible values: `tcp`, `udp`.
:param pulumi.Input[str] src_eip_id: The ID of eip associate to the Nat Gateway.
:param pulumi.Input[str] src_port_range: The range of port numbers of the eip, range: 1-65535. (eg: `port` or `port1-port2`).
> This content is derived from https://github.com/terraform-providers/terraform-provider-ucloud/blob/master/website/docs/r/nat_gateway_rule.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["dst_ip"] = dst_ip
__props__["dst_port_range"] = dst_port_range
__props__["name"] = name
__props__["nat_gateway_id"] = nat_gateway_id
__props__["protocol"] = protocol
__props__["src_eip_id"] = src_eip_id
__props__["src_port_range"] = src_port_range
return NatGatewayRule(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"warnings.warn",
"pulumi.ResourceOptions"
] | [((2370, 2445), 'warnings.warn', 'warnings.warn', (['"""explicit use of __name__ is deprecated"""', 'DeprecationWarning'], {}), "('explicit use of __name__ is deprecated', DeprecationWarning)\n", (2383, 2445), False, 'import warnings\n'), ((2528, 2627), 'warnings.warn', 'warnings.warn', (['"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', 'DeprecationWarning'], {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)\n', (2541, 2627), False, 'import warnings\n'), ((2696, 2720), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (2718, 2720), False, 'import pulumi\n'), ((5811, 5840), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (5833, 5840), False, 'import pulumi\n')] |
import argparse
def decode_Caesar_cipher(s, n):
alpha = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',.?!"
s = s.strip()
text = ''
for c in s:
text += alpha[(alpha.index(c) + n) % len(alpha)]
print(f'Decoded text: "{text}"')
info = "Reads file then decodes it via ceasar cipher."
parser = argparse.ArgumentParser(info)
parser.add_argument("-f", "--file", help="I need a file to decode!")
parser.add_argument("-r", "--rotation", default=-13, type=int,
help="Rotation can be either positive or negative number.")
args = parser.parse_args()
filename = args.file
if(args.file):
with open(args.file) as f:
encoded_text = f.read()
decode_Caesar_cipher(encoded_text, args.rotation)
else:
print("I need a file to decode!")
| [
"argparse.ArgumentParser"
] | [((330, 359), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['info'], {}), '(info)\n', (353, 359), False, 'import argparse\n')] |
import inspect
import os
import discord
from collections import namedtuple
import importlib
import logging as log
from traceback import format_exc
Command = namedtuple("Command", "name aliases function description ")
loaded_plugins = dict()
client = None
def set_client(c: discord.client):
global client
client = c
def load_plugins():
for plugin in os.listdir("plugins/"):
name = os.path.splitext(plugin)[0]
if not name.startswith("__"):
load_plugin(name)
def load_plugin(name: str, package: str = "plugins"):
try:
cmd = importlib.import_module("{package}.{plugin}".format(plugin=name, package=package))
except ImportError as e:
log.error("couldn't import {package}/{name}\n{e}".format(name=name, package=package, e=format_exc(e)))
return False
loaded_plugins[name] = cmd
return True
async def call_reload(name: str):
""" Initiates reload of plugin. """
# See if the plugin has an on_reload() function, and call that
if hasattr(loaded_plugins[name], "on_reload"):
if callable(loaded_plugins[name].on_reload):
result = loaded_plugins[name].on_reload(name)
if inspect.isawaitable(result):
await result
else:
await on_reload(name)
async def reload_plugins():
for plugin in loaded_plugins.values():
name = plugin.__name__.rsplit(".")[-1]
if not name.startswith("__"):
await call_reload(name)
def get_plugin(name: str):
if name in loaded_plugins:
return loaded_plugins[name]
return None
def get_command(trigger: str):
for plugin in loaded_plugins.values():
commands = getattr(plugin, "__commands", None)
if not commands:
continue
for cmd in plugin.__commands:
if trigger == cmd.name or trigger in cmd.aliases:
return cmd
else:
continue
def command(**options):
def decorator(func):
# create command
name = options.get("name", func.__name__)
aliases = options.get("aliases", list())
description = options.get("description", "No description")
cmd = Command(name, aliases, func, description)
# add command to internal list in plugin
plugin = inspect.getmodule(func)
commands = getattr(plugin, "__commands", list())
# TODO: se om attributt er brukt
commands.append(cmd)
setattr(plugin, "__commands", commands)
# TODO: fra pcbot - hva gjør denne?
# Add the cmd attribute to this function, in order to get the command assigned to the function
# setattr(func, "cmd", cmd)
log.info("Created command {} from {}".format(name, plugin.__name__))
return func
return decorator
# Default commands
def format_help(cmd: Command, guild: discord.Guild):
return "help i didnt understand" # TODO
async def on_reload(name: str):
if name in loaded_plugins:
# Remove all registered commands
if hasattr(loaded_plugins[name], "__commands"):
delattr(loaded_plugins[name], "__commands")
loaded_plugins[name] = importlib.reload(loaded_plugins[name])
log.debug("Reloaded plugin {}".format(name))
| [
"traceback.format_exc",
"os.listdir",
"collections.namedtuple",
"inspect.isawaitable",
"inspect.getmodule",
"os.path.splitext",
"importlib.reload"
] | [((158, 217), 'collections.namedtuple', 'namedtuple', (['"""Command"""', '"""name aliases function description """'], {}), "('Command', 'name aliases function description ')\n", (168, 217), False, 'from collections import namedtuple\n'), ((367, 389), 'os.listdir', 'os.listdir', (['"""plugins/"""'], {}), "('plugins/')\n", (377, 389), False, 'import os\n'), ((2304, 2327), 'inspect.getmodule', 'inspect.getmodule', (['func'], {}), '(func)\n', (2321, 2327), False, 'import inspect\n'), ((3179, 3217), 'importlib.reload', 'importlib.reload', (['loaded_plugins[name]'], {}), '(loaded_plugins[name])\n', (3195, 3217), False, 'import importlib\n'), ((406, 430), 'os.path.splitext', 'os.path.splitext', (['plugin'], {}), '(plugin)\n', (422, 430), False, 'import os\n'), ((1193, 1220), 'inspect.isawaitable', 'inspect.isawaitable', (['result'], {}), '(result)\n', (1212, 1220), False, 'import inspect\n'), ((788, 801), 'traceback.format_exc', 'format_exc', (['e'], {}), '(e)\n', (798, 801), False, 'from traceback import format_exc\n')] |
import requests
import zipfile
import xmltodict
# 본인의 API-KEY로 변경하세요.
api_key = "<KEY>"
url = "https://opendart.fss.or.kr/api/corpCode.xml"
params = {
"crtfc_key": api_key
}
resp = requests.get(url, params=params)
print(resp)
f = open('corp_code.zip', "wb")
f.write(resp.content)
f.close()
zf = zipfile.ZipFile('corp_code.zip')
zf.extractall()
zf.close()
f = open("CORPCODE.xml", encoding="utf-8")
data = f.read()
f.close()
result = xmltodict.parse(data)
for item in result['result']['list'][:5]:
print(item['corp_name'], item['corp_code'])
| [
"zipfile.ZipFile",
"xmltodict.parse",
"requests.get"
] | [((192, 224), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (204, 224), False, 'import requests\n'), ((308, 340), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""corp_code.zip"""'], {}), "('corp_code.zip')\n", (323, 340), False, 'import zipfile\n'), ((448, 469), 'xmltodict.parse', 'xmltodict.parse', (['data'], {}), '(data)\n', (463, 469), False, 'import xmltodict\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from datetime import timedelta
import requests
from django.conf import settings as ontask_settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import reverse, redirect
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _, ugettext
from rest_framework import status
from ontask.permissions import is_instructor
from .models import OnTaskOAuthUserTokens
from ontask import get_action_payload
return_url_key = 'oauth_return_url'
oauth_hash_key = 'oauth_hash'
callback_url_key = 'callback_url'
def get_initial_token_step1(request, oauth_info, return_url):
"""
Obtain a OAuth2 token for the user in this request from the oauth instance
encoded in the oauth_info
:param request: Received request
:param oauth_info: a dict with the following fields:
# {
# domain_port: VALUE,
# client_id: VALUE,
# client_secret: VALUE ,
# authorize_url: VALUE (format {0} for domain_port),
# access_token_url: VALUE (format {0} for domain_port),
# aux_params: DICT with additional parameters)
# }
:param return_url: URL to store as return URL after obtaining the token
:return: Http response
"""
# Remember the URL from which we are making the request so that we
# can return once the Token has been obtained
request.session[return_url_key] = return_url
# Store in the session a random hash key to make sure the call back goes
# back to the right request
request.session[oauth_hash_key] = get_random_string()
# Store the callback URL in the session
request.session[callback_url_key] = request.build_absolute_uri(
reverse('ontask_oauth:callback')
)
# The parameters for the request are described in:
# https://canvas.instructure.com/doc/api/file.oauth_endpoints.html
domain = oauth_info['domain_port']
return redirect(
requests.Request('GET',
oauth_info['authorize_url'].format(domain),
params={
'client_id': oauth_info['client_id'],
'response_type': 'code',
'redirect_uri': request.session[callback_url_key],
'scopes':'url:POST|/api/v1/conversations',
'state': request.session[oauth_hash_key],
}).prepare().url
)
def refresh_token(user_token, oauth_instance, oauth_info):
"""
Obtain a OAuth2 token for the user in this request from the oauth instance
encoded in the oauth_info
:param user_token: User token to be refreshed
:param oauth_instance: Name of the oauth info below
:param oauth_info: a dict with the following fields:
# {
# domain_port: VALUE,
# client_id: VALUE,
# client_secret: VALUE ,
# authorize_url: VALUE (format {0} for domain_port),
# access_token_url: VALUE (format {0} for domain_port),
# aux_params: DICT with additional parameters)
# }
:return: Updated token object (or exception if any anomaly is detected
"""
# At this point we have the payload, the token and the OAuth configuration
# information.
domain = oauth_info['domain_port']
response = requests.post(
oauth_info['access_token_url'].format(domain),
{'grant_type': 'refresh_token',
'client_id': oauth_info['client_id'],
'client_secret': oauth_info['client_secret'],
'refresh_token': user_token.refresh_token,
'redirect_uri': reverse('ontask_oauth:callback')}
)
if response.status_code != status.HTTP_200_OK:
raise Exception(_('Unable to refresh access token from OAuth'))
# Response is correct. Parse and extract elements
response_data = response.json()
# Get the new token and expire datetime and save the token
user_token.access_token = response_data['access_token']
user_token.valid_until = timezone.now() + \
timedelta(seconds=response_data.get('expires_in',
0))
user_token.save()
return user_token.access_token
@user_passes_test(is_instructor)
def callback(request):
"""
Callback received from the server. This is supposed to contain the token
so it is saved to the database and then redirects to a page previously
stored in the session object.
:param request: Request object
:return: Redirection to the stored page
"""
# Get the payload from the session
payload = get_action_payload(request)
# If there is no payload, something went wrong.
if payload is None:
# Something is wrong with this execution. Return to action table.
messages.error(request,
_('Incorrect Canvas callback invocation.'))
return redirect('action:index')
# Check first if there has been some error
error_string = request.GET.get('error', None)
if error_string:
messages.error(
request,
ugettext('Error in OAuth2 step 1 ({0})').format(error_string)
)
return redirect('action:index')
# Verify if the state is the one expected (stored in the session)
if request.session[oauth_hash_key] != request.GET.get('state'):
# This call back does not match the appropriate request. Something
# went wrong.
messages.error(request,
_('Inconsistent OAuth response. Unable to authorize'))
return redirect('action:index')
# Get the information from the payload
oauth_instance = payload.get('target_url')
if not oauth_instance:
messages.error(request, _('Internal error. Empty OAuth Instance name'))
return redirect('action:index')
oauth_info = ontask_settings.CANVAS_INFO_DICT.get(oauth_instance)
if not oauth_info:
messages.error(request, _('Internal error. Invalid OAuth Dict element'))
return redirect('action:index')
# Correct response from a previous request. Obtain the access token,
# the refresh token, and the expiration date.
domain = oauth_info['domain_port']
response = requests.post(oauth_info['access_token_url'].format(domain),
{'grant_type': 'authorization_code',
'client_id': oauth_info['client_id'],
'client_secret': oauth_info['client_secret'],
'redirect_uri': request.session[callback_url_key],
'code': request.GET.get('code')})
if response.status_code != status.HTTP_200_OK:
# POST request was not successful
messages.error(request,
_('Unable to obtain access token from OAuth'))
return redirect('action:index')
# Response is correct. Parse and extract elements
response_data = response.json()
# Create the new token for the user
utoken = OnTaskOAuthUserTokens(
user=request.user,
instance_name=oauth_instance,
access_token=response_data['access_token'],
refresh_token=response_data.get('refresh_token', None),
valid_until=timezone.now() + \
timedelta(seconds=response_data.get('expires_in', 0))
)
utoken.save()
return redirect(request.session.get(return_url_key,
reverse('action:index')))
| [
"django.utils.translation.ugettext_lazy",
"django.conf.settings.CANVAS_INFO_DICT.get",
"django.utils.crypto.get_random_string",
"django.contrib.auth.decorators.user_passes_test",
"django.utils.timezone.now",
"django.shortcuts.redirect",
"django.shortcuts.reverse",
"django.utils.translation.ugettext",
"ontask.get_action_payload"
] | [((4390, 4421), 'django.contrib.auth.decorators.user_passes_test', 'user_passes_test', (['is_instructor'], {}), '(is_instructor)\n', (4406, 4421), False, 'from django.contrib.auth.decorators import user_passes_test\n'), ((1717, 1736), 'django.utils.crypto.get_random_string', 'get_random_string', ([], {}), '()\n', (1734, 1736), False, 'from django.utils.crypto import get_random_string\n'), ((4780, 4807), 'ontask.get_action_payload', 'get_action_payload', (['request'], {}), '(request)\n', (4798, 4807), False, 'from ontask import get_action_payload\n'), ((6028, 6080), 'django.conf.settings.CANVAS_INFO_DICT.get', 'ontask_settings.CANVAS_INFO_DICT.get', (['oauth_instance'], {}), '(oauth_instance)\n', (6064, 6080), True, 'from django.conf import settings as ontask_settings\n'), ((1858, 1890), 'django.shortcuts.reverse', 'reverse', (['"""ontask_oauth:callback"""'], {}), "('ontask_oauth:callback')\n", (1865, 1890), False, 'from django.shortcuts import reverse, redirect\n'), ((4162, 4176), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4174, 4176), False, 'from django.utils import timezone\n'), ((5073, 5097), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (5081, 5097), False, 'from django.shortcuts import reverse, redirect\n'), ((5361, 5385), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (5369, 5385), False, 'from django.shortcuts import reverse, redirect\n'), ((5747, 5771), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (5755, 5771), False, 'from django.shortcuts import reverse, redirect\n'), ((5985, 6009), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (5993, 6009), False, 'from django.shortcuts import reverse, redirect\n'), ((6200, 6224), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (6208, 6224), False, 'from django.shortcuts import reverse, redirect\n'), ((7030, 7054), 'django.shortcuts.redirect', 'redirect', (['"""action:index"""'], {}), "('action:index')\n", (7038, 7054), False, 'from django.shortcuts import reverse, redirect\n'), ((3754, 3786), 'django.shortcuts.reverse', 'reverse', (['"""ontask_oauth:callback"""'], {}), "('ontask_oauth:callback')\n", (3761, 3786), False, 'from django.shortcuts import reverse, redirect\n'), ((3870, 3916), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to refresh access token from OAuth"""'], {}), "('Unable to refresh access token from OAuth')\n", (3871, 3916), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((5014, 5056), 'django.utils.translation.ugettext_lazy', '_', (['"""Incorrect Canvas callback invocation."""'], {}), "('Incorrect Canvas callback invocation.')\n", (5015, 5056), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((5677, 5730), 'django.utils.translation.ugettext_lazy', '_', (['"""Inconsistent OAuth response. Unable to authorize"""'], {}), "('Inconsistent OAuth response. Unable to authorize')\n", (5678, 5730), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((5922, 5968), 'django.utils.translation.ugettext_lazy', '_', (['"""Internal error. Empty OAuth Instance name"""'], {}), "('Internal error. Empty OAuth Instance name')\n", (5923, 5968), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((6136, 6183), 'django.utils.translation.ugettext_lazy', '_', (['"""Internal error. Invalid OAuth Dict element"""'], {}), "('Internal error. Invalid OAuth Dict element')\n", (6137, 6183), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((6968, 7013), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to obtain access token from OAuth"""'], {}), "('Unable to obtain access token from OAuth')\n", (6969, 7013), True, 'from django.utils.translation import ugettext_lazy as _, ugettext\n'), ((7638, 7661), 'django.shortcuts.reverse', 'reverse', (['"""action:index"""'], {}), "('action:index')\n", (7645, 7661), False, 'from django.shortcuts import reverse, redirect\n'), ((7424, 7438), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7436, 7438), False, 'from django.utils import timezone\n'), ((5274, 5314), 'django.utils.translation.ugettext', 'ugettext', (['"""Error in OAuth2 step 1 ({0})"""'], {}), "('Error in OAuth2 step 1 ({0})')\n", (5282, 5314), False, 'from django.utils.translation import ugettext_lazy as _, ugettext\n')] |
import json
from nova_dveri_ru.utils import get_from_yaml
card_list = list(get_from_yaml('output/product_cards.yaml'))
indent = ' ' * 4
with open('output/product_cards.json', 'w', encoding='utf-8') as json_out:
json.dump(card_list, json_out, ensure_ascii=False, indent=indent)
| [
"nova_dveri_ru.utils.get_from_yaml",
"json.dump"
] | [((78, 120), 'nova_dveri_ru.utils.get_from_yaml', 'get_from_yaml', (['"""output/product_cards.yaml"""'], {}), "('output/product_cards.yaml')\n", (91, 120), False, 'from nova_dveri_ru.utils import get_from_yaml\n'), ((218, 283), 'json.dump', 'json.dump', (['card_list', 'json_out'], {'ensure_ascii': '(False)', 'indent': 'indent'}), '(card_list, json_out, ensure_ascii=False, indent=indent)\n', (227, 283), False, 'import json\n')] |
import pandas as pd
from financialdata.crawler.taiwan_stock_price import (
clear_data,
colname_zh2en,
convert_change,
convert_date,
crawler,
gen_task_paramter_list,
is_weekend,
set_column,
twse_header,
tpex_header,
crawler_twse,
crawler_tpex,
)
from financialdata.schema.dataset import (
check_schema,
)
def test_is_weekend_false():
"""
測試, 非周末, 輸入周一 1, 回傳 False
"""
result = is_weekend(day=1) # 執行結果
expected = False
# 先寫好預期結果, 這樣即使不執行程式,
# 單純看測試, 也能了解這個程式的執行結果
assert (
result == expected
) # 檢查, 執行結果 == 預期結果
def test_is_weekend_true():
"""
測試, 是周末, 輸入週日 0, 回傳 False
"""
result = is_weekend(day=0) # 執行結果
expected = True
# 先寫好預期結果, 這樣即使不執行程式,
# 單純看測試, 也能了解這個程式的執行結果
assert (
result == expected
) # 檢查, 執行結果 == 預期結果
def test_gen_task_paramter_list():
"""
測試建立 task 參數列表, 2021-01-01 ~ 2021-01-05
"""
result = gen_task_paramter_list(
start_date="2021-01-01",
end_date="2021-01-05",
) # 執行結果
expected = [
{
"date": "2021-01-01",
"data_source": "twse",
},
{
"date": "2021-01-01",
"data_source": "tpex",
},
{
"date": "2021-01-02",
"data_source": "twse",
},
{
"date": "2021-01-02",
"data_source": "tpex",
},
{
"date": "2021-01-05",
"data_source": "twse",
},
{
"date": "2021-01-05",
"data_source": "tpex",
},
]
# 預期得到 2021-01-01 ~ 2021-01-05 的任務參數列表
# 再發送這些參數到 rabbitmq, 給每個 worker 單獨執行爬蟲
assert (
result == expected
) # 檢查, 執行結果 == 預期結果
def test_clear_data():
# 準備好 input 的假資料
df = pd.DataFrame(
[
{
"StockID": "0050",
"TradeVolume": "4,962,514",
"Transaction": "6,179",
"TradeValue": "616,480,760",
"Open": "124.20",
"Max": "124.65",
"Min": "123.75",
"Close": "124.60",
"Change": 0.25,
"Date": "2021-01-05",
},
{
"StockID": "0051",
"TradeVolume": "175,269",
"Transaction": "44",
"TradeValue": "7,827,387",
"Open": "44.60",
"Max": "44.74",
"Min": "44.39",
"Close": "44.64",
"Change": 0.04,
"Date": "2021-01-05",
},
{
"StockID": "0052",
"TradeVolume": "1,536,598",
"Transaction": "673",
"TradeValue": "172,232,526",
"Open": "112.10",
"Max": "112.90",
"Min": "111.15",
"Close": "112.90",
"Change": 0.8,
"Date": "2021-01-05",
},
]
)
result_df = clear_data(
df.copy()
) # 輸入函數, 得到結果
expected_df = pd.DataFrame(
[
{
"StockID": "0050",
"TradeVolume": "4962514",
"Transaction": "6179",
"TradeValue": "616480760",
"Open": "124.20",
"Max": "124.65",
"Min": "123.75",
"Close": "124.60",
"Change": "0.25",
"Date": "2021-01-05",
},
{
"StockID": "0051",
"TradeVolume": "175269",
"Transaction": "44",
"TradeValue": "7827387",
"Open": "44.60",
"Max": "44.74",
"Min": "44.39",
"Close": "44.64",
"Change": "0.04",
"Date": "2021-01-05",
},
{
"StockID": "0052",
"TradeVolume": "1536598",
"Transaction": "673",
"TradeValue": "172232526",
"Open": "112.10",
"Max": "112.90",
"Min": "111.15",
"Close": "112.90",
"Change": "0.8",
"Date": "2021-01-05",
},
]
)
# 預期結果, 做完資料清理
# 將原先的會計數字, 如 1,536,598
# 轉換為一般數字 1536598
assert (
pd.testing.assert_frame_equal(
result_df, expected_df
)
is None
) # 檢查, 執行結果 == 預期結果
def test_colname_zh2en():
# 準備好 input 的假資料
result_df = pd.DataFrame(
[
{
0: "0050",
1: "元大台灣50",
2: "4,962,514",
3: "6,179",
4: "616,480,760",
5: "124.20",
6: "124.65",
7: "123.75",
8: "124.60",
9: "<p style= color:red>+</p>",
10: "0.25",
11: "124.55",
12: "123",
13: "124.60",
14: "29",
15: "0.00",
},
{
0: "0051",
1: "元大中型100",
2: "175,269",
3: "44",
4: "7,827,387",
5: "44.60",
6: "44.74",
7: "44.39",
8: "44.64",
9: "<p style= color:red>+</p>",
10: "0.04",
11: "44.64",
12: "20",
13: "44.74",
14: "2",
15: "0.00",
},
]
)
colname = [
"證券代號",
"證券名稱",
"成交股數",
"成交筆數",
"成交金額",
"開盤價",
"最高價",
"最低價",
"收盤價",
"漲跌(+/-)",
"漲跌價差",
"最後揭示買價",
"最後揭示買量",
"最後揭示賣價",
"最後揭示賣量",
"本益比",
]
result_df = colname_zh2en(
result_df.copy(), colname
) # 輸入函數, 得到結果
expected_df = pd.DataFrame(
[
{
"StockID": "0050",
"TradeVolume": "4,962,514",
"Transaction": "6,179",
"TradeValue": "616,480,760",
"Open": "124.20",
"Max": "124.65",
"Min": "123.75",
"Close": "124.60",
"Dir": "<p style= color:red>+</p>",
"Change": "0.25",
},
{
"StockID": "0051",
"TradeVolume": "175,269",
"Transaction": "44",
"TradeValue": "7,827,387",
"Open": "44.60",
"Max": "44.74",
"Min": "44.39",
"Close": "44.64",
"Dir": "<p style= color:red>+</p>",
"Change": "0.04",
},
]
)
# 預期結果, 將 raw data , 包含中文欄位,
# 轉換成英文欄位, 以便存進資料庫
assert (
pd.testing.assert_frame_equal(
result_df, expected_df
)
is None
) # 檢查, 執行結果 == 預期結果
def test_twse_header():
result = twse_header()
expected = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Connection": "keep-alive",
"Host": "www.twse.com.tw",
"Referer": "https://www.twse.com.tw/zh/page/trading/exchange/MI_INDEX.html",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
assert result == expected
def test_tpex_header():
result = tpex_header()
expected = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7",
"Connection": "keep-alive",
"Host": "www.tpex.org.tw",
"Referer": "https://www.tpex.org.tw/web/stock/aftertrading/otc_quotes_no1430/stk_wn1430.php?l=zh-tw",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
assert result == expected
def test_set_column():
# 準備好 input 的假資料
df = pd.DataFrame(
[
{
0: "00679B",
2: "44.91",
3: "-0.08",
4: "45.00",
5: "45.00",
6: "44.85",
7: "270,000",
8: "12,127,770",
9: "147",
},
{
0: "00687B",
2: "47.03",
3: "-0.09",
4: "47.13",
5: "47.13",
6: "47.00",
7: "429,000",
8: "20,181,570",
9: "39",
},
{
0: "00694B",
2: "37.77",
3: "-0.07",
4: "37.84",
5: "37.84",
6: "37.72",
7: "343,000",
8: "12,943,630",
9: "35",
},
]
)
result_df = set_column(
df
) # 輸入函數, 得到結果
expected_df = pd.DataFrame(
[
{
"StockID": "00679B",
"Close": "44.91",
"Change": "-0.08",
"Open": "45.00",
"Max": "45.00",
"Min": "44.85",
"TradeVolume": "270,000",
"TradeValue": "12,127,770",
"Transaction": "147",
},
{
"StockID": "00687B",
"Close": "47.03",
"Change": "-0.09",
"Open": "47.13",
"Max": "47.13",
"Min": "47.00",
"TradeVolume": "429,000",
"TradeValue": "20,181,570",
"Transaction": "39",
},
{
"StockID": "00694B",
"Close": "37.77",
"Change": "-0.07",
"Open": "37.84",
"Max": "37.84",
"Min": "37.72",
"TradeVolume": "343,000",
"TradeValue": "12,943,630",
"Transaction": "35",
},
]
)
# 預期結果, 根據資料的位置, 設置對應的欄位名稱
assert (
pd.testing.assert_frame_equal(
result_df, expected_df
)
is None
) # 檢查, 執行結果 == 預期結果
def test_crawler_twse_data9():
"""
測試在證交所, 2021 正常爬到資料的情境,
data 在 response 底下的 key, data9
一般政府網站, 長時間的資料, 格式常常不一致
"""
result_df = crawler_twse(
date="2021-01-05"
) # 執行結果
assert (
len(result_df) == 20596
) # 檢查, 資料量是否正確
assert list(result_df.columns) == [
"StockID",
"TradeVolume",
"Transaction",
"TradeValue",
"Open",
"Max",
"Min",
"Close",
"Change",
"Date",
] # 檢查, 資料欄位是否正確
def test_crawler_twse_data8():
"""
測試在證交所, 2008 正常爬到資料的情境, 時間不同, 資料格式不同
data 在 response 底下的 key, data8
一般政府網站, 長時間的資料, 格式常常不一致
"""
result_df = crawler_twse(
date="2008-01-04"
)
assert (
len(result_df) == 2760
) # 檢查, 資料量是否正確
assert list(result_df.columns) == [
"StockID",
"TradeVolume",
"Transaction",
"TradeValue",
"Open",
"Max",
"Min",
"Close",
"Change",
"Date",
] # 檢查, 資料欄位是否正確
def test_crawler_twse_no_data():
"""
測試沒 data 的時間點, 爬蟲是否正常
"""
result_df = crawler_twse(
date="2000-01-04"
)
assert (
len(result_df) == 0
) # 沒 data, 回傳 0
# 沒 data, 一樣要回傳 pd.DataFrame 型態
assert isinstance(
result_df, pd.DataFrame
)
def test_crawler_twse_error(mocker):
"""
測試對方網站回傳例外狀況時, 或是被 ban IP 時, 爬蟲是否會失敗
這邊使用特別的技巧, mocker,
因為在測試階段, 無法保證對方一定會給錯誤的結果
因此使用 mocker, 對 requests 做"替換", 換成我們設定的結果
如下
"""
# 將特定路徑下的 requests 替換掉
mock_requests = mocker.patch(
"financialdata.crawler.taiwan_stock_price.requests"
)
# 將 requests.get 的回傳值 response, 替換掉成 ""
# 如此一來, 當我們在測試爬蟲時,
# 發送 requests 得到的 response, 就會是 ""
mock_requests.get.return_value = ""
result_df = crawler_twse(
date="2000-01-04"
)
assert (
len(result_df) == 0
) # 沒 data, 回傳 0
# 沒 data, 一樣要回傳 pd.DataFrame 型態
assert isinstance(
result_df, pd.DataFrame
)
def test_crawler_tpex_success():
"""
測試櫃買中心, 爬蟲成功時的狀況
"""
result_df = crawler_tpex(
date="2021-01-05"
) # 執行結果
assert (
len(result_df) == 6609
) # 檢查, 資料量是否正確
assert list(result_df.columns) == [
"StockID",
"Close",
"Change",
"Open",
"Max",
"Min",
"TradeVolume",
"TradeValue",
"Transaction",
"Date",
]
def test_crawler_tpex_no_data():
"""
測試沒 data 的時間點, 爬蟲是否正常
"""
result_df = crawler_tpex(
date="2021-01-01"
)
assert (
len(result_df) == 0
) # 沒 data, 回傳 0
# 沒 data, 一樣要回傳 pd.DataFrame 型態
assert isinstance(
result_df, pd.DataFrame
)
def test_convert_change():
# 準備好 input 的假資料
df = pd.DataFrame(
[
{
"StockID": "0050",
"TradeVolume": "4,680,733",
"Transaction": "5,327",
"TradeValue": "649,025,587",
"Open": "139.00",
"Max": "139.20",
"Min": "138.05",
"Close": "138.30",
"Dir": "<p style= color:green>-</p>",
"Change": "0.65",
"Date": "2021-07-01",
},
{
"StockID": "0051",
"TradeVolume": "175,374",
"Transaction": "120",
"TradeValue": "10,152,802",
"Open": "58.20",
"Max": "59.10",
"Min": "57.40",
"Close": "57.90",
"Dir": "<p style= color:green>-</p>",
"Change": "0.30",
"Date": "2021-07-01",
},
{
"StockID": "0052",
"TradeVolume": "514,042",
"Transaction": "270",
"TradeValue": "64,127,738",
"Open": "125.00",
"Max": "125.20",
"Min": "124.35",
"Close": "124.35",
"Dir": "<p style= color:green>-</p>",
"Change": "0.65",
"Date": "2021-07-01",
},
]
)
result_df = convert_change(
df
) # 執行結果
expected_df = pd.DataFrame(
[
{
"StockID": "0050",
"TradeVolume": "4,680,733",
"Transaction": "5,327",
"TradeValue": "649,025,587",
"Open": "139.00",
"Max": "139.20",
"Min": "138.05",
"Close": "138.30",
"Change": -0.65,
"Date": "2021-07-01",
},
{
"StockID": "0051",
"TradeVolume": "175,374",
"Transaction": "120",
"TradeValue": "10,152,802",
"Open": "58.20",
"Max": "59.10",
"Min": "57.40",
"Close": "57.90",
"Change": -0.3,
"Date": "2021-07-01",
},
{
"StockID": "0052",
"TradeVolume": "514,042",
"Transaction": "270",
"TradeValue": "64,127,738",
"Open": "125.00",
"Max": "125.20",
"Min": "124.35",
"Close": "124.35",
"Change": -0.65,
"Date": "2021-07-01",
},
]
)
# 預期結果,
# 將 Dir (正負號) 與 Change (漲跌幅) 結合
assert (
pd.testing.assert_frame_equal(
result_df, expected_df
)
is None
) # 檢查, 執行結果 == 預期結果
def test_convert_date():
date = (
"2021-07-01" # 準備好 input 的假資料
)
result = convert_date(date) # 執行結果
expected = "110/07/01" # 預期結果
assert (
result == expected
) # 檢查, 執行結果 == 預期結果
def test_crawler_twse():
# 測試證交所爬蟲, end to end test
result_df = crawler(
parameter={
"date": "2021-01-05",
"data_source": "twse",
}
)
result_df = check_schema(
result_df, "TaiwanStockPrice"
)
assert len(result_df) > 0
def test_crawler_tpex():
# 測試櫃買中心爬蟲, end to end test
result_df = crawler(
parameter={
"date": "2021-01-05",
"data_source": "tpex",
}
)
result_df = check_schema(
result_df, "TaiwanStockPrice"
)
assert len(result_df) > 0
| [
"financialdata.schema.dataset.check_schema",
"financialdata.crawler.taiwan_stock_price.set_column",
"financialdata.crawler.taiwan_stock_price.is_weekend",
"financialdata.crawler.taiwan_stock_price.convert_change",
"financialdata.crawler.taiwan_stock_price.crawler_twse",
"financialdata.crawler.taiwan_stock_price.crawler_tpex",
"financialdata.crawler.taiwan_stock_price.twse_header",
"financialdata.crawler.taiwan_stock_price.convert_date",
"financialdata.crawler.taiwan_stock_price.crawler",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame",
"financialdata.crawler.taiwan_stock_price.tpex_header",
"financialdata.crawler.taiwan_stock_price.gen_task_paramter_list"
] | [((447, 464), 'financialdata.crawler.taiwan_stock_price.is_weekend', 'is_weekend', ([], {'day': '(1)'}), '(day=1)\n', (457, 464), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((702, 719), 'financialdata.crawler.taiwan_stock_price.is_weekend', 'is_weekend', ([], {'day': '(0)'}), '(day=0)\n', (712, 719), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((977, 1047), 'financialdata.crawler.taiwan_stock_price.gen_task_paramter_list', 'gen_task_paramter_list', ([], {'start_date': '"""2021-01-01"""', 'end_date': '"""2021-01-05"""'}), "(start_date='2021-01-01', end_date='2021-01-05')\n", (999, 1047), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((1850, 2519), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '0050', 'TradeVolume': '4,962,514', 'Transaction': '6,179',\n 'TradeValue': '616,480,760', 'Open': '124.20', 'Max': '124.65', 'Min':\n '123.75', 'Close': '124.60', 'Change': 0.25, 'Date': '2021-01-05'}, {\n 'StockID': '0051', 'TradeVolume': '175,269', 'Transaction': '44',\n 'TradeValue': '7,827,387', 'Open': '44.60', 'Max': '44.74', 'Min':\n '44.39', 'Close': '44.64', 'Change': 0.04, 'Date': '2021-01-05'}, {\n 'StockID': '0052', 'TradeVolume': '1,536,598', 'Transaction': '673',\n 'TradeValue': '172,232,526', 'Open': '112.10', 'Max': '112.90', 'Min':\n '111.15', 'Close': '112.90', 'Change': 0.8, 'Date': '2021-01-05'}]"], {}), "([{'StockID': '0050', 'TradeVolume': '4,962,514', 'Transaction':\n '6,179', 'TradeValue': '616,480,760', 'Open': '124.20', 'Max': '124.65',\n 'Min': '123.75', 'Close': '124.60', 'Change': 0.25, 'Date':\n '2021-01-05'}, {'StockID': '0051', 'TradeVolume': '175,269',\n 'Transaction': '44', 'TradeValue': '7,827,387', 'Open': '44.60', 'Max':\n '44.74', 'Min': '44.39', 'Close': '44.64', 'Change': 0.04, 'Date':\n '2021-01-05'}, {'StockID': '0052', 'TradeVolume': '1,536,598',\n 'Transaction': '673', 'TradeValue': '172,232,526', 'Open': '112.10',\n 'Max': '112.90', 'Min': '111.15', 'Close': '112.90', 'Change': 0.8,\n 'Date': '2021-01-05'}])\n", (1862, 2519), True, 'import pandas as pd\n'), ((3154, 3817), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '0050', 'TradeVolume': '4962514', 'Transaction': '6179',\n 'TradeValue': '616480760', 'Open': '124.20', 'Max': '124.65', 'Min':\n '123.75', 'Close': '124.60', 'Change': '0.25', 'Date': '2021-01-05'}, {\n 'StockID': '0051', 'TradeVolume': '175269', 'Transaction': '44',\n 'TradeValue': '7827387', 'Open': '44.60', 'Max': '44.74', 'Min':\n '44.39', 'Close': '44.64', 'Change': '0.04', 'Date': '2021-01-05'}, {\n 'StockID': '0052', 'TradeVolume': '1536598', 'Transaction': '673',\n 'TradeValue': '172232526', 'Open': '112.10', 'Max': '112.90', 'Min':\n '111.15', 'Close': '112.90', 'Change': '0.8', 'Date': '2021-01-05'}]"], {}), "([{'StockID': '0050', 'TradeVolume': '4962514', 'Transaction':\n '6179', 'TradeValue': '616480760', 'Open': '124.20', 'Max': '124.65',\n 'Min': '123.75', 'Close': '124.60', 'Change': '0.25', 'Date':\n '2021-01-05'}, {'StockID': '0051', 'TradeVolume': '175269',\n 'Transaction': '44', 'TradeValue': '7827387', 'Open': '44.60', 'Max':\n '44.74', 'Min': '44.39', 'Close': '44.64', 'Change': '0.04', 'Date':\n '2021-01-05'}, {'StockID': '0052', 'TradeVolume': '1536598',\n 'Transaction': '673', 'TradeValue': '172232526', 'Open': '112.10',\n 'Max': '112.90', 'Min': '111.15', 'Close': '112.90', 'Change': '0.8',\n 'Date': '2021-01-05'}])\n", (3166, 3817), True, 'import pandas as pd\n'), ((4642, 5194), 'pandas.DataFrame', 'pd.DataFrame', (["[{(0): '0050', (1): '元大台灣50', (2): '4,962,514', (3): '6,179', (4):\n '616,480,760', (5): '124.20', (6): '124.65', (7): '123.75', (8):\n '124.60', (9): '<p style= color:red>+</p>', (10): '0.25', (11):\n '124.55', (12): '123', (13): '124.60', (14): '29', (15): '0.00'}, {(0):\n '0051', (1): '元大中型100', (2): '175,269', (3): '44', (4): '7,827,387', (5\n ): '44.60', (6): '44.74', (7): '44.39', (8): '44.64', (9):\n '<p style= color:red>+</p>', (10): '0.04', (11): '44.64', (12): '20', (\n 13): '44.74', (14): '2', (15): '0.00'}]"], {}), "([{(0): '0050', (1): '元大台灣50', (2): '4,962,514', (3): '6,179',\n (4): '616,480,760', (5): '124.20', (6): '124.65', (7): '123.75', (8):\n '124.60', (9): '<p style= color:red>+</p>', (10): '0.25', (11):\n '124.55', (12): '123', (13): '124.60', (14): '29', (15): '0.00'}, {(0):\n '0051', (1): '元大中型100', (2): '175,269', (3): '44', (4): '7,827,387', (5\n ): '44.60', (6): '44.74', (7): '44.39', (8): '44.64', (9):\n '<p style= color:red>+</p>', (10): '0.04', (11): '44.64', (12): '20', (\n 13): '44.74', (14): '2', (15): '0.00'}])\n", (4654, 5194), True, 'import pandas as pd\n'), ((6079, 6560), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '0050', 'TradeVolume': '4,962,514', 'Transaction': '6,179',\n 'TradeValue': '616,480,760', 'Open': '124.20', 'Max': '124.65', 'Min':\n '123.75', 'Close': '124.60', 'Dir': '<p style= color:red>+</p>',\n 'Change': '0.25'}, {'StockID': '0051', 'TradeVolume': '175,269',\n 'Transaction': '44', 'TradeValue': '7,827,387', 'Open': '44.60', 'Max':\n '44.74', 'Min': '44.39', 'Close': '44.64', 'Dir':\n '<p style= color:red>+</p>', 'Change': '0.04'}]"], {}), "([{'StockID': '0050', 'TradeVolume': '4,962,514', 'Transaction':\n '6,179', 'TradeValue': '616,480,760', 'Open': '124.20', 'Max': '124.65',\n 'Min': '123.75', 'Close': '124.60', 'Dir': '<p style= color:red>+</p>',\n 'Change': '0.25'}, {'StockID': '0051', 'TradeVolume': '175,269',\n 'Transaction': '44', 'TradeValue': '7,827,387', 'Open': '44.60', 'Max':\n '44.74', 'Min': '44.39', 'Close': '44.64', 'Dir':\n '<p style= color:red>+</p>', 'Change': '0.04'}])\n", (6091, 6560), True, 'import pandas as pd\n'), ((7170, 7183), 'financialdata.crawler.taiwan_stock_price.twse_header', 'twse_header', ([], {}), '()\n', (7181, 7183), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((7796, 7809), 'financialdata.crawler.taiwan_stock_price.tpex_header', 'tpex_header', ([], {}), '()\n', (7807, 7809), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((8465, 8899), 'pandas.DataFrame', 'pd.DataFrame', (["[{(0): '00679B', (2): '44.91', (3): '-0.08', (4): '45.00', (5): '45.00', (6\n ): '44.85', (7): '270,000', (8): '12,127,770', (9): '147'}, {(0):\n '00687B', (2): '47.03', (3): '-0.09', (4): '47.13', (5): '47.13', (6):\n '47.00', (7): '429,000', (8): '20,181,570', (9): '39'}, {(0): '00694B',\n (2): '37.77', (3): '-0.07', (4): '37.84', (5): '37.84', (6): '37.72', (\n 7): '343,000', (8): '12,943,630', (9): '35'}]"], {}), "([{(0): '00679B', (2): '44.91', (3): '-0.08', (4): '45.00', (5):\n '45.00', (6): '44.85', (7): '270,000', (8): '12,127,770', (9): '147'},\n {(0): '00687B', (2): '47.03', (3): '-0.09', (4): '47.13', (5): '47.13',\n (6): '47.00', (7): '429,000', (8): '20,181,570', (9): '39'}, {(0):\n '00694B', (2): '37.77', (3): '-0.07', (4): '37.84', (5): '37.84', (6):\n '37.72', (7): '343,000', (8): '12,943,630', (9): '35'}])\n", (8477, 8899), True, 'import pandas as pd\n'), ((9380, 9394), 'financialdata.crawler.taiwan_stock_price.set_column', 'set_column', (['df'], {}), '(df)\n', (9390, 9394), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((9441, 10041), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '00679B', 'Close': '44.91', 'Change': '-0.08', 'Open': '45.00',\n 'Max': '45.00', 'Min': '44.85', 'TradeVolume': '270,000', 'TradeValue':\n '12,127,770', 'Transaction': '147'}, {'StockID': '00687B', 'Close':\n '47.03', 'Change': '-0.09', 'Open': '47.13', 'Max': '47.13', 'Min':\n '47.00', 'TradeVolume': '429,000', 'TradeValue': '20,181,570',\n 'Transaction': '39'}, {'StockID': '00694B', 'Close': '37.77', 'Change':\n '-0.07', 'Open': '37.84', 'Max': '37.84', 'Min': '37.72', 'TradeVolume':\n '343,000', 'TradeValue': '12,943,630', 'Transaction': '35'}]"], {}), "([{'StockID': '00679B', 'Close': '44.91', 'Change': '-0.08',\n 'Open': '45.00', 'Max': '45.00', 'Min': '44.85', 'TradeVolume':\n '270,000', 'TradeValue': '12,127,770', 'Transaction': '147'}, {\n 'StockID': '00687B', 'Close': '47.03', 'Change': '-0.09', 'Open':\n '47.13', 'Max': '47.13', 'Min': '47.00', 'TradeVolume': '429,000',\n 'TradeValue': '20,181,570', 'Transaction': '39'}, {'StockID': '00694B',\n 'Close': '37.77', 'Change': '-0.07', 'Open': '37.84', 'Max': '37.84',\n 'Min': '37.72', 'TradeVolume': '343,000', 'TradeValue': '12,943,630',\n 'Transaction': '35'}])\n", (9453, 10041), True, 'import pandas as pd\n'), ((10873, 10904), 'financialdata.crawler.taiwan_stock_price.crawler_twse', 'crawler_twse', ([], {'date': '"""2021-01-05"""'}), "(date='2021-01-05')\n", (10885, 10904), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((11408, 11439), 'financialdata.crawler.taiwan_stock_price.crawler_twse', 'crawler_twse', ([], {'date': '"""2008-01-04"""'}), "(date='2008-01-04')\n", (11420, 11439), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((11858, 11889), 'financialdata.crawler.taiwan_stock_price.crawler_twse', 'crawler_twse', ([], {'date': '"""2000-01-04"""'}), "(date='2000-01-04')\n", (11870, 11889), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((12554, 12585), 'financialdata.crawler.taiwan_stock_price.crawler_twse', 'crawler_twse', ([], {'date': '"""2000-01-04"""'}), "(date='2000-01-04')\n", (12566, 12585), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((12848, 12879), 'financialdata.crawler.taiwan_stock_price.crawler_tpex', 'crawler_tpex', ([], {'date': '"""2021-01-05"""'}), "(date='2021-01-05')\n", (12860, 12879), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((13290, 13321), 'financialdata.crawler.taiwan_stock_price.crawler_tpex', 'crawler_tpex', ([], {'date': '"""2021-01-01"""'}), "(date='2021-01-01')\n", (13302, 13321), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((13556, 14353), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '0050', 'TradeVolume': '4,680,733', 'Transaction': '5,327',\n 'TradeValue': '649,025,587', 'Open': '139.00', 'Max': '139.20', 'Min':\n '138.05', 'Close': '138.30', 'Dir': '<p style= color:green>-</p>',\n 'Change': '0.65', 'Date': '2021-07-01'}, {'StockID': '0051',\n 'TradeVolume': '175,374', 'Transaction': '120', 'TradeValue':\n '10,152,802', 'Open': '58.20', 'Max': '59.10', 'Min': '57.40', 'Close':\n '57.90', 'Dir': '<p style= color:green>-</p>', 'Change': '0.30', 'Date':\n '2021-07-01'}, {'StockID': '0052', 'TradeVolume': '514,042',\n 'Transaction': '270', 'TradeValue': '64,127,738', 'Open': '125.00',\n 'Max': '125.20', 'Min': '124.35', 'Close': '124.35', 'Dir':\n '<p style= color:green>-</p>', 'Change': '0.65', 'Date': '2021-07-01'}]"], {}), "([{'StockID': '0050', 'TradeVolume': '4,680,733', 'Transaction':\n '5,327', 'TradeValue': '649,025,587', 'Open': '139.00', 'Max': '139.20',\n 'Min': '138.05', 'Close': '138.30', 'Dir':\n '<p style= color:green>-</p>', 'Change': '0.65', 'Date': '2021-07-01'},\n {'StockID': '0051', 'TradeVolume': '175,374', 'Transaction': '120',\n 'TradeValue': '10,152,802', 'Open': '58.20', 'Max': '59.10', 'Min':\n '57.40', 'Close': '57.90', 'Dir': '<p style= color:green>-</p>',\n 'Change': '0.30', 'Date': '2021-07-01'}, {'StockID': '0052',\n 'TradeVolume': '514,042', 'Transaction': '270', 'TradeValue':\n '64,127,738', 'Open': '125.00', 'Max': '125.20', 'Min': '124.35',\n 'Close': '124.35', 'Dir': '<p style= color:green>-</p>', 'Change':\n '0.65', 'Date': '2021-07-01'}])\n", (13568, 14353), True, 'import pandas as pd\n'), ((14960, 14978), 'financialdata.crawler.taiwan_stock_price.convert_change', 'convert_change', (['df'], {}), '(df)\n', (14974, 14978), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((15019, 15690), 'pandas.DataFrame', 'pd.DataFrame', (["[{'StockID': '0050', 'TradeVolume': '4,680,733', 'Transaction': '5,327',\n 'TradeValue': '649,025,587', 'Open': '139.00', 'Max': '139.20', 'Min':\n '138.05', 'Close': '138.30', 'Change': -0.65, 'Date': '2021-07-01'}, {\n 'StockID': '0051', 'TradeVolume': '175,374', 'Transaction': '120',\n 'TradeValue': '10,152,802', 'Open': '58.20', 'Max': '59.10', 'Min':\n '57.40', 'Close': '57.90', 'Change': -0.3, 'Date': '2021-07-01'}, {\n 'StockID': '0052', 'TradeVolume': '514,042', 'Transaction': '270',\n 'TradeValue': '64,127,738', 'Open': '125.00', 'Max': '125.20', 'Min':\n '124.35', 'Close': '124.35', 'Change': -0.65, 'Date': '2021-07-01'}]"], {}), "([{'StockID': '0050', 'TradeVolume': '4,680,733', 'Transaction':\n '5,327', 'TradeValue': '649,025,587', 'Open': '139.00', 'Max': '139.20',\n 'Min': '138.05', 'Close': '138.30', 'Change': -0.65, 'Date':\n '2021-07-01'}, {'StockID': '0051', 'TradeVolume': '175,374',\n 'Transaction': '120', 'TradeValue': '10,152,802', 'Open': '58.20',\n 'Max': '59.10', 'Min': '57.40', 'Close': '57.90', 'Change': -0.3,\n 'Date': '2021-07-01'}, {'StockID': '0052', 'TradeVolume': '514,042',\n 'Transaction': '270', 'TradeValue': '64,127,738', 'Open': '125.00',\n 'Max': '125.20', 'Min': '124.35', 'Close': '124.35', 'Change': -0.65,\n 'Date': '2021-07-01'}])\n", (15031, 15690), True, 'import pandas as pd\n'), ((16527, 16545), 'financialdata.crawler.taiwan_stock_price.convert_date', 'convert_date', (['date'], {}), '(date)\n', (16539, 16545), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((16729, 16793), 'financialdata.crawler.taiwan_stock_price.crawler', 'crawler', ([], {'parameter': "{'date': '2021-01-05', 'data_source': 'twse'}"}), "(parameter={'date': '2021-01-05', 'data_source': 'twse'})\n", (16736, 16793), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((16859, 16902), 'financialdata.schema.dataset.check_schema', 'check_schema', (['result_df', '"""TaiwanStockPrice"""'], {}), "(result_df, 'TaiwanStockPrice')\n", (16871, 16902), False, 'from financialdata.schema.dataset import check_schema\n'), ((17022, 17086), 'financialdata.crawler.taiwan_stock_price.crawler', 'crawler', ([], {'parameter': "{'date': '2021-01-05', 'data_source': 'tpex'}"}), "(parameter={'date': '2021-01-05', 'data_source': 'tpex'})\n", (17029, 17086), False, 'from financialdata.crawler.taiwan_stock_price import clear_data, colname_zh2en, convert_change, convert_date, crawler, gen_task_paramter_list, is_weekend, set_column, twse_header, tpex_header, crawler_twse, crawler_tpex\n'), ((17152, 17195), 'financialdata.schema.dataset.check_schema', 'check_schema', (['result_df', '"""TaiwanStockPrice"""'], {}), "(result_df, 'TaiwanStockPrice')\n", (17164, 17195), False, 'from financialdata.schema.dataset import check_schema\n'), ((4458, 4511), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result_df', 'expected_df'], {}), '(result_df, expected_df)\n', (4487, 4511), True, 'import pandas as pd\n'), ((7013, 7066), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result_df', 'expected_df'], {}), '(result_df, expected_df)\n', (7042, 7066), True, 'import pandas as pd\n'), ((10599, 10652), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result_df', 'expected_df'], {}), '(result_df, expected_df)\n', (10628, 10652), True, 'import pandas as pd\n'), ((16310, 16363), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result_df', 'expected_df'], {}), '(result_df, expected_df)\n', (16339, 16363), True, 'import pandas as pd\n')] |
from kombu.mixins import ConsumerMixin
import pull_global_vars as gv
import uuid
import time
import os
#from util import *
import json
import traceback
from pull_util import *
from kombu import Connection
from kombu.pools import producers
#from pull_util import trans2json, g_logger
from redis_oper import *
from kombu import Exchange, Queue
JSONRPC_ERROR = 122001
RESULTS_ERROR = 122009
COPYRIGHT = 0
UNCOPYRIGHT = 1
UNDETECT = 2
WORKING = 3
OVERALL_RESULTS = (COPYRIGHT, UNCOPYRIGHT, UNDETECT, WORKING)
class worker_result(ConsumerMixin):
def __init__(self, connection, exchange, queue, routing_key):
self.connection = connection
self.exchange = exchange
self.queue = queue
self.routing_key = routing_key
self.pushresult_connection = Connection(gv.pushresult_url)
self.pushresult_exchange = Exchange(gv.pushresult_exchange)
def get_consumers(self, Consumer, channel):
task_exchange = Exchange(self.exchange)
task_queues = Queue(
self.queue, task_exchange, routing_key=self.routing_key)
return [Consumer(queues=task_queues,
accept=['pickle', 'json'],
callbacks=[self.process_task])]
def on_consume_ready(self, connection, channel, consumers):
for consumer in consumers:
consumer.qos(prefetch_count=1)
def check_input_params(self, data):
result = [0, 0]
if not data.has_key('jsonrpc'):
result[0] = 122001
result[1] = "There is no key named jsonrpc"
g_logger.error(
trans2json("input params check failed: %s" % result[1]))
elif not data.has_key('results'):
result[0] = 122009
result[1] = "There is no key named result"
g_logger.error(
trans2json("input result check failed: %s" % result[1]))
else:
g_logger.info(trans2json("----Params check done.----"))
return result
def trans_error_json(self, result, data):
res = {}
res['jsonrpc'] = "2.0"
if data.has_key('id'):
res['id'] = data['id']
res['error'] = {}
res['error']['code'] = result[0]
res['error']['message'] = result[1]
error_message = json.dumps(res)
return error_message
def parse_data_pushresult(self, data, set_redis):
for result in data['results']:
message = {}
message['params'] = {}
message['jsonrpc'] = '2.0'
message['id'] = 2
message['params']['client_id'] = result['extra_info']['client_id']
if result['extra_info'].has_key('url'):
message['params']['url'] = result['extra_info']['url']
if result['extra_info'].has_key('seed_file'):
message['params']['seed_file'] = result[
'extra_info']['seed_file']
message['params']['result'] = []
if len(set_redis) > 0:
for i in set_redis:
i = eval(i)
result_tmp = {}
for i in i.values():
if i in OVERALL_RESULTS:
result_tmp['status'] = i
else:
result_tmp['file_path'] = i
message['params']['result'].append(result_tmp)
else:
result_tmp = {}
result_tmp['file_path'] = result['extra_info']['file_path']
if len(result['matches']) > 0:
result_tmp['status'] = COPYRIGHT
else:
result_tmp['status'] = UNCOPYRIGHT
message['params']['result'].append(result_tmp)
self.send_task_pushresult(message)
def send_task_pushresult(self, data):
message = json.dumps(data)
g_logger_info.info(
trans2json("send to push result message %s" % (data),"qb_pull_send_result"))
with producers[self.pushresult_connection].acquire(block=True) as producer:
producer.publish(message,
serializer='json',
compression='bzip2',
exchange=self.pushresult_exchange,
declare=[self.pushresult_exchange],
routing_key=gv.pushresult_routing_key)
def process_task(self, body, message):
try:
#data = body
g_logger_info.info(
trans2json("receive vddb query result message %s" % (body),"qb_pull_receive_vddb"))
data = json.loads(body)
result = self.check_input_params(data)
if result[0] != 0:
error_message = self.trans_error_json(result, data)
g_logger.error("response info %s" % error_message)
message.ack()
else:
# check redis
#ret_code, set_redis = checkDnaFromRedis(data)
if ret_code == gv.RESULT_WAIT:
message.ack()
return
# push to result
self.parse_data_pushresult(data, set_redis)
message.ack()
except Exception:
message.ack()
g_logger.error(trans2json(
"fetch_query_result_process errors happend %s" % str(traceback.format_exc())))
| [
"traceback.format_exc",
"kombu.Queue",
"json.loads",
"json.dumps",
"kombu.Connection",
"kombu.Exchange"
] | [((784, 813), 'kombu.Connection', 'Connection', (['gv.pushresult_url'], {}), '(gv.pushresult_url)\n', (794, 813), False, 'from kombu import Connection\n'), ((849, 881), 'kombu.Exchange', 'Exchange', (['gv.pushresult_exchange'], {}), '(gv.pushresult_exchange)\n', (857, 881), False, 'from kombu import Exchange, Queue\n'), ((955, 978), 'kombu.Exchange', 'Exchange', (['self.exchange'], {}), '(self.exchange)\n', (963, 978), False, 'from kombu import Exchange, Queue\n'), ((1001, 1063), 'kombu.Queue', 'Queue', (['self.queue', 'task_exchange'], {'routing_key': 'self.routing_key'}), '(self.queue, task_exchange, routing_key=self.routing_key)\n', (1006, 1063), False, 'from kombu import Exchange, Queue\n'), ((2296, 2311), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (2306, 2311), False, 'import json\n'), ((3885, 3901), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3895, 3901), False, 'import json\n'), ((4669, 4685), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (4679, 4685), False, 'import json\n'), ((5435, 5457), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5455, 5457), False, 'import traceback\n')] |
# Copyright 2018 <NAME><<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 2018/01/13
@author: nob0tate14
'''
import argparse
import random
import secrets
import string
class PyPagen(object):
'''
ordinary password generator
'''
SEQ_FUL = string.ascii_lowercase + string.ascii_uppercase + \
string.digits + string.punctuation
SEQ_ALW = "abcdefghijklmnopqrstuvwxyz"
SEQ_AUP = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
SEQ_NUM = "0123456789"
SEQ_SBL = "\"#$%&'()*+,-./:;<=>?@[]^_`{|}~"
def __init__(self, *, seq_alw=SEQ_ALW, seq_aup=SEQ_AUP, seq_num=SEQ_NUM,
seq_sbl=SEQ_SBL, lenmin=8, lenmax=8,
up=True, low=True, num=True, symbol=True, strict=True):
'''
Constructor
'''
self.set_param(seq_alw=seq_alw, seq_aup=seq_aup, seq_num=seq_num,
seq_sbl=seq_sbl, lenmin=lenmin, lenmax=lenmax,
up=up, low=low, num=num, symbol=symbol, strict=strict)
def set_param(self, *, seq_alw=SEQ_ALW, seq_aup=SEQ_AUP, seq_num=SEQ_NUM,
seq_sbl=SEQ_SBL, lenmin=8, lenmax=8,
up=True, low=True, num=True, symbol=True, strict=True):
self.seq_alw = seq_alw
self.seq_aup = seq_aup
self.seq_num = seq_num
self.seq_sbl = seq_sbl
self.lenmin = lenmin
self.lenmax = lenmax
self.up = up
self.low = low
self.num = num
self.symbol = symbol
self.strict = strict
def get_secret(self, *, seq_alw=None, seq_aup=None, seq_num=None,
seq_sbl=None, lenmin=None, lenmax=None,
up=None, low=None, num=None, symbol=None, strict=None):
if seq_alw is None:
seq_alw = self.seq_alw
if seq_aup is None:
seq_aup = self.seq_aup
if seq_num is None:
seq_num = self.seq_num
if seq_sbl is None:
seq_sbl = self.seq_sbl
if lenmin is None:
lenmin = self.lenmin
if lenmax is None:
lenmax = self.lenmax
if up is None:
up = self.up
if low is None:
low = self.low
if num is None:
num = self.num
if symbol is None:
symbol = self.symbol
if strict is None:
strict = self.strict
secrt = ""
loopcount = lenmin
if lenmin >= lenmax:
loopcount = lenmax
if lenmin < lenmax:
loopcount = random.randrange(lenmin, lenmax, 1)
seq_list = []
if low is True:
seq_list.append(seq_alw)
if strict is True:
secrt += secrets.choice(seq_alw)
if up is True:
seq_list.append(seq_aup)
if strict is True:
secrt += secrets.choice(seq_aup)
if num is True:
seq_list.append(seq_num)
if strict is True:
secrt += secrets.choice(seq_num)
if symbol is True:
seq_list.append(seq_sbl)
if strict is True:
secrt += secrets.choice(seq_sbl)
for dummy in range(len(secrt), loopcount):
ii = secrets.randbelow(len(seq_list))
# print(seq_list[ii - 1])
secrt += secrets.choice(seq_list[ii - 1])
secrt = "".join(random.sample(secrt, len(secrt)))
return secrt
def get_secret_list(self, count=1):
ret_list = []
for dummy in range(count):
ret_list.append(self.get_secret())
return ret_list
def get_secret(lenmax=8):
pg = PyPagen(lenmin=lenmax, lenmax=lenmax)
return(pg.get_secret())
def do_parse_args():
parser = argparse.ArgumentParser(
prog="pa.py",
usage="python3 pa.py 8",
description="ordinary password generator",
add_help=True)
parser.add_argument("length", nargs="?", action="store", type=int,
default=8, help="password length")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = do_parse_args()
pg = PyPagen(lenmax=args.length)
print(pg.get_secret())
''' end of line
'''
| [
"secrets.choice",
"argparse.ArgumentParser",
"random.randrange"
] | [((4243, 4368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""pa.py"""', 'usage': '"""python3 pa.py 8"""', 'description': '"""ordinary password generator"""', 'add_help': '(True)'}), "(prog='pa.py', usage='python3 pa.py 8', description=\n 'ordinary password generator', add_help=True)\n", (4266, 4368), False, 'import argparse\n'), ((3037, 3072), 'random.randrange', 'random.randrange', (['lenmin', 'lenmax', '(1)'], {}), '(lenmin, lenmax, 1)\n', (3053, 3072), False, 'import random\n'), ((3823, 3855), 'secrets.choice', 'secrets.choice', (['seq_list[ii - 1]'], {}), '(seq_list[ii - 1])\n', (3837, 3855), False, 'import secrets\n'), ((3213, 3236), 'secrets.choice', 'secrets.choice', (['seq_alw'], {}), '(seq_alw)\n', (3227, 3236), False, 'import secrets\n'), ((3353, 3376), 'secrets.choice', 'secrets.choice', (['seq_aup'], {}), '(seq_aup)\n', (3367, 3376), False, 'import secrets\n'), ((3494, 3517), 'secrets.choice', 'secrets.choice', (['seq_num'], {}), '(seq_num)\n', (3508, 3517), False, 'import secrets\n'), ((3638, 3661), 'secrets.choice', 'secrets.choice', (['seq_sbl'], {}), '(seq_sbl)\n', (3652, 3661), False, 'import secrets\n')] |
# encoding=utf-8
"""
@author: <NAME>
@contact: <EMAIL>
@time: 2021-12-30 14:30
"""
import os
from ppgan.apps.wav2lip_predictor import Wav2LipPredictor
from ppgan.apps.first_order_predictor import FirstOrderPredictor
# 热加载
wav2lip_predictor = Wav2LipPredictor(static=False,
face_enhancement=True)
def wav2lip(input_video, input_audio, output):
wav2lip_predictor.run(input_video, input_audio, output)
return output
def run_predictor(source_image, driving_video, output_path):
output, filename = os.path.split(output_path)
first_order_predictor = FirstOrderPredictor(output=output,
filename=filename,
face_enhancement=True,
ratio=0.4,
relative=True,
image_size=512,
adapt_scale=True)
first_order_predictor.run(source_image, driving_video)
return os.path.join(output, filename)
| [
"os.path.split",
"ppgan.apps.first_order_predictor.FirstOrderPredictor",
"os.path.join",
"ppgan.apps.wav2lip_predictor.Wav2LipPredictor"
] | [((243, 296), 'ppgan.apps.wav2lip_predictor.Wav2LipPredictor', 'Wav2LipPredictor', ([], {'static': '(False)', 'face_enhancement': '(True)'}), '(static=False, face_enhancement=True)\n', (259, 296), False, 'from ppgan.apps.wav2lip_predictor import Wav2LipPredictor\n'), ((547, 573), 'os.path.split', 'os.path.split', (['output_path'], {}), '(output_path)\n', (560, 573), False, 'import os\n'), ((602, 742), 'ppgan.apps.first_order_predictor.FirstOrderPredictor', 'FirstOrderPredictor', ([], {'output': 'output', 'filename': 'filename', 'face_enhancement': '(True)', 'ratio': '(0.4)', 'relative': '(True)', 'image_size': '(512)', 'adapt_scale': '(True)'}), '(output=output, filename=filename, face_enhancement=True,\n ratio=0.4, relative=True, image_size=512, adapt_scale=True)\n', (621, 742), False, 'from ppgan.apps.first_order_predictor import FirstOrderPredictor\n'), ((1097, 1127), 'os.path.join', 'os.path.join', (['output', 'filename'], {}), '(output, filename)\n', (1109, 1127), False, 'import os\n')] |
import http
import json
import os
import random
import string
import unittest
import uuid
import boto3
from boto3.dynamodb.conditions import Key
from moto import mock_dynamodb2
from common.constants import Constants
from common.encoders import encode_resource, encode_file_metadata, encode_files, encode_creator, encode_metadata
from common.helpers import remove_none_values
from data.creator import Creator
from data.file import File
from data.file_metadata import FileMetadata
from data.metadata import Metadata
from data.resource import Resource
from data.title import Title
def unittest_lambda_handler(event, context):
unittest.TextTestRunner().run(
unittest.TestLoader().loadTestsFromTestCase(TestHandlerCase))
def remove_mock_database(dynamodb):
dynamodb.Table(os.environ[Constants.ENV_VAR_TABLE_NAME]).delete()
def generate_mock_event(operation, resource):
body = Body(operation, resource)
body_value = json.dumps(body, default=encode_body)
return {
'body': body_value
}
@mock_dynamodb2
class TestHandlerCase(unittest.TestCase):
EXISTING_RESOURCE_IDENTIFIER = 'ebf20333-35a5-4a06-9c58-68ea688a9a8b'
EXISTING_RESOURCE_IDENTIFIER_MISSING_CREATED_DATE = 'acf20333-35a5-4a06-9c58-68ea688a9a9c'
def setUp(self):
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ[Constants.ENV_VAR_TABLE_NAME] = 'testing'
os.environ[Constants.ENV_VAR_REGION] = 'eu-west-1'
def tearDown(self):
pass
def setup_mock_database(self):
dynamodb = boto3.resource('dynamodb', region_name=os.environ[Constants.ENV_VAR_REGION])
table_connection = dynamodb.create_table(TableName=os.environ[Constants.ENV_VAR_TABLE_NAME],
KeySchema=[{'AttributeName': 'resource_identifier', 'KeyType': 'HASH'},
{'AttributeName': 'modifiedDate', 'KeyType': 'RANGE'}],
AttributeDefinitions=[
{'AttributeName': 'resource_identifier', 'AttributeType': 'S'},
{'AttributeName': 'modifiedDate', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1})
table_connection.put_item(
Item={
'resource_identifier': self.EXISTING_RESOURCE_IDENTIFIER,
'modifiedDate': '2019-10-24T12:57:02.655994Z',
'createdDate': '2019-10-24T12:57:02.655994Z',
'metadata': {
'titles': {
'no': 'En tittel'
}
},
'files': {},
'owner': '<EMAIL>'
}
)
table_connection.put_item(
Item={
'resource_identifier': self.EXISTING_RESOURCE_IDENTIFIER_MISSING_CREATED_DATE,
'modifiedDate': '2019-10-24T12:57:02.655994Z',
'metadata': {
'titles': {
'no': 'En tittel'
}
},
'files': {},
'owner': '<EMAIL>'
}
)
return dynamodb
def generate_random_resource(self, time_created, time_modified=None, uuid=uuid.uuid4().__str__()):
if time_modified is None:
time_modified = time_created
return {
'resource_identifier': uuid,
'modifiedDate': time_modified,
'createdDate': time_created,
'metadata': {
'titles': {
'no': self.random_word(6)
}
},
'files': {},
'owner': '<EMAIL>'
}
def random_word(self, length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def generate_mock_resource(self, time_created=None, time_modified=None, uuid=uuid.uuid4().__str__()):
title_1 = Title('no', self.random_word(6))
title_2 = Title('en', self.random_word(6))
titles = {title_1.language_code: title_1.title, title_2.language_code: title_2.title}
creator_one = Creator('AUTHORITY_IDENTIFIER_1')
creator_two = Creator('AUTHORITY_IDENTIFIER_2')
creators = [creator_one, creator_two]
metadata = Metadata(creators, 'https://hdl.handle.net/11250.1/1', 'LICENSE_IDENTIFIER_1', '2019', 'Unit',
titles, 'text')
file_metadata_1 = FileMetadata(self.random_word(6) + '.txt', 'text/plain', '595f44fec1e92a71d3e9e77456ba80d1',
'987654321')
file_metadata_2 = FileMetadata(self.random_word(6) + '.pdf', 'application/pdf',
'71f920fa275127a7b60fa4d4d41432a3', '123456789')
file_1 = File('FILE_IDENTIFIER_1', file_metadata_1)
file_2 = File('FILE_IDENTIFIER_2', file_metadata_2)
files = dict()
files[file_1.identifier] = file_1.file_metadata
files[file_2.identifier] = file_2.file_metadata
return Resource(uuid, time_modified, time_created, metadata, files, '<EMAIL>')
def test_handler_insert_resource(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource()
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
self.assertEqual(handler_insert_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.CREATED,
'HTTP Status code not 201')
remove_mock_database(dynamodb)
def test_handler_insert_resource_missing_resource_metadata(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource()
resource.metadata = None
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
self.assertEqual(handler_insert_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_insert_resource_missing_resource_files(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource()
resource.files = None
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
self.assertEqual(handler_insert_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_insert_resource_invalid_resource_metadata_type_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"INSERT\",\"resource\": {\"resource_identifier\": "
"\"fbf20333-35a5-4a06-9c58-68ea688a9a8b\", \"owner\": \"<EMAIL>.no\", \"files\": {}, \"metadata\": \"invalid type\"}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_insert_resource_invalid_resource_files_type_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"INSERT\",\"resource\": {\"resource_identifier\": "
"\"fbf20333-35a5-4a06-9c58-68ea688a9a8b\", \"owner\": \"<EMAIL>\", \"files\": \"invalid type\", \"metadata\": {}}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_insert_resource_missing_resource_owner_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource()
resource.owner = None
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
self.assertEqual(handler_insert_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.OK,
'HTTP Status code not 200')
remove_mock_database(dynamodb)
def test_handler_modify_resource_missing_resource_identifier(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
resource.resource_identifier = None
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_missing_resource_metadata_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
resource.metadata = None
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_missing_resource_owner_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
resource.owner = None
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_missing_resource_files_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
resource.files = None
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_empty_resource_metadata_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
resource.metadata = Metadata(None, None, None, None, None, None)
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.OK,
'HTTP Status code not 200')
remove_mock_database(dynamodb)
def test_handler_modify_resource_invalid_resource_metadata_type_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"MODIFY\",\"resource\": {\"resource_identifier\": "
"\"ebf20333-35a5-4a06-9c58-68ea688a9a8b\", \"owner\": \"<EMAIL>\", \"files\": {}, \"metadata\": \"invalid type\"}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
self.assertEqual(handler_modify_response[Constants.RESPONSE_BODY],
'Resource with identifier ebf20333-35a5-4a06-9c58-68ea688a9a8b has invalid attribute type for metadata',
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_invalid_files_type_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"MODIFY\",\"resource\": {\"resource_identifier\": "
"\"ebf20333-35a5-4a06-9c58-68ea688a9a8b\", \"owner\": \"<EMAIL>\", \"files\": \"invalid type\", \"metadata\": {}}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
self.assertEqual(handler_modify_response[Constants.RESPONSE_BODY],
'Resource with identifier ebf20333-35a5-4a06-9c58-68ea688a9a8b has invalid attribute type for files',
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_invalid_resource_identifier_field_json_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"MODIFY\",\"resource\": {\"identifer\": "
"\"ebf20333-35a5-4a06-9c58-68ea688a9a8b\", \"owner\": \"<EMAIL>\", \"files\": \"{}\", \"metadata\": {}}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_unexpected_resource_field_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = {
"body": "{\"operation\": \"MODIFY\",\"resource\": {\"resource_identifier\": "
"\"ebf20333-35a5-4a06-9c58-68ea688a9a8b\", \"registrator\": \"<EMAIL>\", \"owner\": \"<EMAIL>\", \"files\": \"{}\", \"metadata\": {}}}"
}
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_created_date_missing_in_existing_resource(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER_MISSING_CREATED_DATE)
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
self.assertEqual(handler_modify_response[Constants.RESPONSE_BODY],
'Resource with identifier acf20333-35a5-4a06-9c58-68ea688a9a9c has no ' + Constants.DDB_FIELD_CREATED_DATE + ' in DB',
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_modify_resource_unknown_resource_identifier_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx')
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
self.assertEqual(handler_modify_response['body'],
'Resource with identifier xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx not found',
'Did not get expected error message')
remove_mock_database(dynamodb)
def test_handler_unknown_operation_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
event = generate_mock_event('UNKNOWN_OPERATION', resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_missing_resource_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
event = generate_mock_event(Constants.OPERATION_INSERT, None)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_missing_operation_in_event_body(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, self.EXISTING_RESOURCE_IDENTIFIER)
event = generate_mock_event(None, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_handler_missing_event(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
handler_modify_response = request_handler.handler(None, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
remove_mock_database(dynamodb)
def test_insert_resource(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, None)
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
resource_dict_from_json = json.loads(event[Constants.EVENT_BODY]).get(Constants.JSON_ATTRIBUTE_NAME_RESOURCE)
resource_inserted = Resource.from_dict(resource_dict_from_json)
self.assertEqual(handler_insert_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.CREATED,
'HTTP Status code not 201')
resource_identifier = json.loads(handler_insert_response[Constants.RESPONSE_BODY]).get('resource_identifier')
query_results = request_handler.get_table_connection().query(
KeyConditionExpression=Key(Constants.DDB_FIELD_RESOURCE_IDENTIFIER).eq(resource_identifier),
ScanIndexForward=True
)
inserted_resource = query_results[Constants.DDB_RESPONSE_ATTRIBUTE_NAME_ITEMS][0]
self.assertIsNotNone(inserted_resource[Constants.DDB_FIELD_CREATED_DATE], 'Value not persisted as expected')
self.assertIsNotNone(inserted_resource[Constants.DDB_FIELD_MODIFIED_DATE], 'Value not persisted as expected')
self.assertIsNotNone(inserted_resource[Constants.DDB_FIELD_METADATA], 'Value not persisted as expected')
self.assertEqual(inserted_resource[Constants.DDB_FIELD_MODIFIED_DATE],
inserted_resource[Constants.DDB_FIELD_CREATED_DATE],
'Value not persisted as expected')
self.assertEqual(inserted_resource[Constants.DDB_FIELD_METADATA], resource_inserted.metadata,
'Value not persisted as expected')
remove_mock_database(dynamodb)
def test_modify_resource(self):
from src.classes.RequestHandler import RequestHandler
dynamodb = self.setup_mock_database()
request_handler = RequestHandler(dynamodb)
resource = self.generate_mock_resource(None, None, None)
event = generate_mock_event(Constants.OPERATION_INSERT, resource)
handler_insert_response = request_handler.handler(event, None)
created_resource_identifier = json.loads(handler_insert_response[Constants.RESPONSE_BODY]).get(
'resource_identifier')
resource_dict_from_json = json.loads(event[Constants.EVENT_BODY]).get(Constants.JSON_ATTRIBUTE_NAME_RESOURCE)
resource_inserted = Resource.from_dict(resource_dict_from_json)
resource_inserted.resource_identifier = created_resource_identifier
for counter in range(2):
resource = self.generate_mock_resource(None, None, resource_inserted.resource_identifier)
event = generate_mock_event(Constants.OPERATION_MODIFY, resource)
handler_modify_response = request_handler.handler(event, None)
self.assertEqual(handler_modify_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.OK,
'HTTP Status code not 200')
query_results = request_handler.get_table_connection().query(
KeyConditionExpression=Key(Constants.DDB_FIELD_RESOURCE_IDENTIFIER).eq(
resource_inserted.resource_identifier),
ScanIndexForward=True
)
self.assertEqual(len(query_results[Constants.DDB_RESPONSE_ATTRIBUTE_NAME_ITEMS]), 3,
'Value not persisted as expected')
initial_resource = query_results[Constants.DDB_RESPONSE_ATTRIBUTE_NAME_ITEMS][0]
first_modification_resource = query_results[Constants.DDB_RESPONSE_ATTRIBUTE_NAME_ITEMS][1]
second_modification_resource = query_results[Constants.DDB_RESPONSE_ATTRIBUTE_NAME_ITEMS][2]
resource_created_date = initial_resource[Constants.DDB_FIELD_CREATED_DATE]
self.assertEqual(first_modification_resource[Constants.DDB_FIELD_CREATED_DATE],
resource_created_date,
'Value not persisted as expected')
self.assertEqual(second_modification_resource[Constants.DDB_FIELD_CREATED_DATE],
resource_created_date,
'Value not persisted as expected')
self.assertEqual(initial_resource[Constants.DDB_FIELD_MODIFIED_DATE],
resource_created_date,
'Value not persisted as expected')
self.assertNotEqual(first_modification_resource[Constants.DDB_FIELD_MODIFIED_DATE],
resource_created_date,
'Value not persisted as expected')
self.assertNotEqual(second_modification_resource[Constants.DDB_FIELD_MODIFIED_DATE],
resource_created_date,
'Value not persisted as expected')
self.assertNotEqual(first_modification_resource[Constants.DDB_FIELD_MODIFIED_DATE],
second_modification_resource[Constants.DDB_FIELD_MODIFIED_DATE],
'Value not persisted as expected')
self.assertNotEqual(initial_resource[Constants.DDB_FIELD_METADATA],
first_modification_resource[Constants.DDB_FIELD_METADATA],
'Value not persisted as expected')
self.assertNotEqual(initial_resource[Constants.DDB_FIELD_METADATA],
second_modification_resource[Constants.DDB_FIELD_METADATA],
'Value not persisted as expected')
self.assertNotEqual(first_modification_resource[Constants.DDB_FIELD_METADATA],
second_modification_resource[Constants.DDB_FIELD_METADATA],
'Value not persisted as expected')
remove_mock_database(dynamodb)
def test_encoders(self):
self.assertRaises(TypeError, encode_file_metadata, '')
self.assertRaises(TypeError, encode_files, '')
self.assertRaises(TypeError, encode_creator, '')
self.assertRaises(TypeError, encode_metadata, '')
self.assertEqual(encode_metadata(Metadata(None, None, None, None, None, dict(), None)), {},
'Unexpected metadata')
self.assertRaises(TypeError, encode_resource, '')
def test_app(self):
from src import app
self.assertRaises(ValueError, app.handler, None, None)
event = {
"body": "{\"operation\": \"UNKNOWN_OPERATION\"} "
}
handler_response = app.handler(event, None)
self.assertEqual(handler_response[Constants.RESPONSE_STATUS_CODE], http.HTTPStatus.BAD_REQUEST,
'HTTP Status code not 400')
if __name__ == '__main__':
unittest.main()
class Body:
def __init__(self, operation: str, resource: Resource):
self.operation = operation
self.resource = resource
def encode_body(instance):
if isinstance(instance, Body):
temp_value = {
Constants.JSON_ATTRIBUTE_NAME_OPERATION: instance.operation,
Constants.JSON_ATTRIBUTE_NAME_RESOURCE: encode_resource(instance.resource)
}
return remove_none_values(temp_value)
else:
type_name = instance.__class__.__name__
raise TypeError(f"Object of type '{type_name}' is not JSON serializable")
| [
"unittest.TestLoader",
"random.choice",
"data.file.File",
"json.loads",
"data.resource.Resource",
"common.encoders.encode_resource",
"json.dumps",
"data.creator.Creator",
"uuid.uuid4",
"boto3.resource",
"src.classes.RequestHandler.RequestHandler",
"data.metadata.Metadata",
"boto3.dynamodb.conditions.Key",
"unittest.main",
"common.helpers.remove_none_values",
"data.resource.Resource.from_dict",
"unittest.TextTestRunner",
"src.app.handler"
] | [((942, 979), 'json.dumps', 'json.dumps', (['body'], {'default': 'encode_body'}), '(body, default=encode_body)\n', (952, 979), False, 'import json\n'), ((28705, 28720), 'unittest.main', 'unittest.main', ([], {}), '()\n', (28718, 28720), False, 'import unittest\n'), ((1750, 1826), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'region_name': 'os.environ[Constants.ENV_VAR_REGION]'}), "('dynamodb', region_name=os.environ[Constants.ENV_VAR_REGION])\n", (1764, 1826), False, 'import boto3\n'), ((4608, 4641), 'data.creator.Creator', 'Creator', (['"""AUTHORITY_IDENTIFIER_1"""'], {}), "('AUTHORITY_IDENTIFIER_1')\n", (4615, 4641), False, 'from data.creator import Creator\n'), ((4664, 4697), 'data.creator.Creator', 'Creator', (['"""AUTHORITY_IDENTIFIER_2"""'], {}), "('AUTHORITY_IDENTIFIER_2')\n", (4671, 4697), False, 'from data.creator import Creator\n'), ((4763, 4877), 'data.metadata.Metadata', 'Metadata', (['creators', '"""https://hdl.handle.net/11250.1/1"""', '"""LICENSE_IDENTIFIER_1"""', '"""2019"""', '"""Unit"""', 'titles', '"""text"""'], {}), "(creators, 'https://hdl.handle.net/11250.1/1',\n 'LICENSE_IDENTIFIER_1', '2019', 'Unit', titles, 'text')\n", (4771, 4877), False, 'from data.metadata import Metadata\n'), ((5266, 5308), 'data.file.File', 'File', (['"""FILE_IDENTIFIER_1"""', 'file_metadata_1'], {}), "('FILE_IDENTIFIER_1', file_metadata_1)\n", (5270, 5308), False, 'from data.file import File\n'), ((5326, 5368), 'data.file.File', 'File', (['"""FILE_IDENTIFIER_2"""', 'file_metadata_2'], {}), "('FILE_IDENTIFIER_2', file_metadata_2)\n", (5330, 5368), False, 'from data.file import File\n'), ((5519, 5590), 'data.resource.Resource', 'Resource', (['uuid', 'time_modified', 'time_created', 'metadata', 'files', '"""<EMAIL>"""'], {}), "(uuid, time_modified, time_created, metadata, files, '<EMAIL>')\n", (5527, 5590), False, 'from data.resource import Resource\n'), ((5770, 5794), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (5784, 5794), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((6393, 6417), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (6407, 6417), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((7050, 7074), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (7064, 7074), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((7726, 7750), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (7740, 7750), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((8503, 8527), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (8517, 8527), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((9272, 9296), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (9286, 9296), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((9903, 9927), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (9917, 9927), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((10568, 10592), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (10582, 10592), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((11298, 11322), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (11312, 11322), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((12014, 12038), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (12028, 12038), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((12727, 12751), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (12741, 12751), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((13441, 13465), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (13455, 13465), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((13588, 13632), 'data.metadata.Metadata', 'Metadata', (['None', 'None', 'None', 'None', 'None', 'None'], {}), '(None, None, None, None, None, None)\n', (13596, 13632), False, 'from data.metadata import Metadata\n'), ((14196, 14220), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (14210, 14220), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((15219, 15243), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (15233, 15243), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((16259, 16283), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (16273, 16283), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((17011, 17035), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (17025, 17035), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((17805, 17829), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (17819, 17829), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((18786, 18810), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (18800, 18810), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((19674, 19698), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (19688, 19698), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((20328, 20352), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (20342, 20352), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((20892, 20916), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (20906, 20916), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((21514, 21538), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (21528, 21538), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((21983, 22007), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (21997, 22007), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((22366, 22409), 'data.resource.Resource.from_dict', 'Resource.from_dict', (['resource_dict_from_json'], {}), '(resource_dict_from_json)\n', (22384, 22409), False, 'from data.resource import Resource\n'), ((23938, 23962), 'src.classes.RequestHandler.RequestHandler', 'RequestHandler', (['dynamodb'], {}), '(dynamodb)\n', (23952, 23962), False, 'from src.classes.RequestHandler import RequestHandler\n'), ((24460, 24503), 'data.resource.Resource.from_dict', 'Resource.from_dict', (['resource_dict_from_json'], {}), '(resource_dict_from_json)\n', (24478, 24503), False, 'from data.resource import Resource\n'), ((28490, 28514), 'src.app.handler', 'app.handler', (['event', 'None'], {}), '(event, None)\n', (28501, 28514), False, 'from src import app\n'), ((29136, 29166), 'common.helpers.remove_none_values', 'remove_none_values', (['temp_value'], {}), '(temp_value)\n', (29154, 29166), False, 'from common.helpers import remove_none_values\n'), ((631, 656), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (654, 656), False, 'import unittest\n'), ((29076, 29110), 'common.encoders.encode_resource', 'encode_resource', (['instance.resource'], {}), '(instance.resource)\n', (29091, 29110), False, 'from common.encoders import encode_resource, encode_file_metadata, encode_files, encode_creator, encode_metadata\n'), ((670, 691), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (689, 691), False, 'import unittest\n'), ((3695, 3707), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3705, 3707), False, 'import uuid\n'), ((4236, 4258), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (4249, 4258), False, 'import random\n'), ((4365, 4377), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4375, 4377), False, 'import uuid\n'), ((22254, 22293), 'json.loads', 'json.loads', (['event[Constants.EVENT_BODY]'], {}), '(event[Constants.EVENT_BODY])\n', (22264, 22293), False, 'import json\n'), ((22602, 22662), 'json.loads', 'json.loads', (['handler_insert_response[Constants.RESPONSE_BODY]'], {}), '(handler_insert_response[Constants.RESPONSE_BODY])\n', (22612, 22662), False, 'import json\n'), ((24213, 24273), 'json.loads', 'json.loads', (['handler_insert_response[Constants.RESPONSE_BODY]'], {}), '(handler_insert_response[Constants.RESPONSE_BODY])\n', (24223, 24273), False, 'import json\n'), ((24348, 24387), 'json.loads', 'json.loads', (['event[Constants.EVENT_BODY]'], {}), '(event[Constants.EVENT_BODY])\n', (24358, 24387), False, 'import json\n'), ((22796, 22840), 'boto3.dynamodb.conditions.Key', 'Key', (['Constants.DDB_FIELD_RESOURCE_IDENTIFIER'], {}), '(Constants.DDB_FIELD_RESOURCE_IDENTIFIER)\n', (22799, 22840), False, 'from boto3.dynamodb.conditions import Key\n'), ((25139, 25183), 'boto3.dynamodb.conditions.Key', 'Key', (['Constants.DDB_FIELD_RESOURCE_IDENTIFIER'], {}), '(Constants.DDB_FIELD_RESOURCE_IDENTIFIER)\n', (25142, 25183), False, 'from boto3.dynamodb.conditions import Key\n')] |
import abc
import sys
from uqbar.cli.CLI import CLI
class CLIAggregator(CLI):
"""
Aggregates CLI scripts.
::
>>> import uqbar.cli
>>> class ExampleAggregator(uqbar.cli.CLIAggregator):
... @property
... def cli_classes(self):
... return []
...
>>> script = ExampleAggregator()
>>> try:
... script('--help')
... except SystemExit:
... pass
...
usage: example-aggregator [-h] [--version] {help,list} ...
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
<BLANKLINE>
subcommands:
{help,list}
help print subcommand help
list list subcommands
"""
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def __call__(self, arguments=None):
if arguments is None:
arguments = self.argument_parser.parse_known_args()
else:
if isinstance(arguments, str):
arguments = arguments.split()
elif not isinstance(arguments, (list, tuple)):
message = "must be str, list, tuple or none: {!r}."
message = message.format(arguments)
raise ValueError(message)
arguments = self.argument_parser.parse_known_args(arguments)
self._process_args(arguments)
sys.exit(0)
### PRIVATE METHODS ###
def _handle_help_command(self, unknown_args):
aliases = self.cli_aliases
program_names = self.cli_program_names
cli_class = None
if (
len(unknown_args) == 2
and unknown_args[0] in aliases
and unknown_args[1] in aliases[unknown_args[0]]
):
cli_class = aliases[unknown_args[0]][unknown_args[1]]
elif (
len(unknown_args) == 1
and unknown_args[0] in aliases
and not isinstance(aliases[unknown_args[0]], dict)
):
cli_class = aliases[unknown_args[0]]
elif len(unknown_args) == 1 and unknown_args[0] in program_names:
cli_class = program_names[unknown_args[0]]
elif not len(unknown_args):
self(["--help"])
return
if cli_class:
instance = cli_class()
print(instance.formatted_help)
else:
print("Cannot resolve {} to subcommand.".format(unknown_args))
def _handle_list_command(self):
by_scripting_group = {}
for cli_class in self.cli_classes:
instance = cli_class()
scripting_group = getattr(instance, "scripting_group", None)
group = by_scripting_group.setdefault(scripting_group, [])
group.append(instance)
print()
if None in by_scripting_group:
group = by_scripting_group.pop(None)
for instance in sorted(group, key=lambda x: x.alias):
message = "{}: {}".format(instance.alias, instance.short_description)
print(message)
print()
for group, instances in sorted(by_scripting_group.items()):
print("[{}]".format(group))
for instance in sorted(instances, key=lambda x: x.alias):
message = " {}: {}".format(
instance.alias, instance.short_description
)
print(message)
print()
def _process_args(self, arguments):
arguments, unknown_args = arguments
if arguments.subparser_name == "help":
self._handle_help_command(unknown_args)
elif arguments.subparser_name == "list":
self._handle_list_command()
else:
if hasattr(arguments, "subsubparser_name"):
cli_class = self.cli_aliases[arguments.subparser_name][
arguments.subsubparser_name
]
elif getattr(arguments, "subparser_name"):
cli_class = self.cli_aliases[arguments.subparser_name]
elif getattr(arguments, "subparser_name") is None:
self(["--help"])
return
instance = cli_class()
instance(unknown_args)
def _setup_argument_parser(self, parser):
subparsers = parser.add_subparsers(dest="subparser_name", title="subcommands")
subparsers.add_parser("help", add_help=False, help="print subcommand help")
subparsers.add_parser("list", add_help=False, help="list subcommands")
alias_map = self.cli_aliases
for key in sorted(alias_map):
if not isinstance(alias_map[key], dict):
cli_class = alias_map[key]
instance = cli_class()
subparsers.add_parser(
key, add_help=False, help=instance.short_description
)
else:
subkeys = sorted(alias_map[key])
group_subparser = subparsers.add_parser(
key, help="{{{}}} subcommand(s)".format(", ".join(subkeys))
)
group_subparsers = group_subparser.add_subparsers(
dest="subsubparser_name", title="{} subcommands".format(key)
)
for subkey in subkeys:
cli_class = alias_map[key][subkey]
instance = cli_class()
group_subparsers.add_parser(
subkey, add_help=False, help=instance.short_description
)
### PUBLIC PROPERTIES ###
@property
def cli_aliases(self):
"""
Developer script aliases.
"""
scripting_groups = []
aliases = {}
for cli_class in self.cli_classes:
instance = cli_class()
if getattr(instance, "alias", None):
scripting_group = getattr(instance, "scripting_group", None)
if scripting_group:
scripting_groups.append(scripting_group)
entry = (scripting_group, instance.alias)
if (scripting_group,) in aliases:
message = "alias conflict between scripting group"
message += " {!r} and {}"
message = message.format(
scripting_group, aliases[(scripting_group,)].__name__
)
raise Exception(message)
if entry in aliases:
message = "alias conflict between {} and {}"
message = message.format(
aliases[entry].__name__, cli_class.__name__
)
raise Exception(message)
aliases[entry] = cli_class
else:
entry = (instance.alias,)
if entry in scripting_groups:
message = "alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.alias)
raise Exception(message)
if entry in aliases:
message = "alias conflict be {} and {}"
message = message.format(cli_class.__name__, aliases[entry])
raise Exception(message)
aliases[(instance.alias,)] = cli_class
else:
if instance.program_name in scripting_groups:
message = "Alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.program_name)
raise Exception(message)
aliases[(instance.program_name,)] = cli_class
alias_map = {}
for key, value in aliases.items():
if len(key) == 1:
alias_map[key[0]] = value
else:
if key[0] not in alias_map:
alias_map[key[0]] = {}
alias_map[key[0]][key[1]] = value
return alias_map
@abc.abstractproperty
def cli_classes(self):
"""
Developer scripts classes.
"""
return []
@property
def cli_program_names(self):
"""
Developer script program names.
"""
program_names = {}
for cli_class in self.cli_classes:
instance = cli_class()
program_names[instance.program_name] = cli_class
return program_names
| [
"sys.exit"
] | [((1524, 1535), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1532, 1535), False, 'import sys\n')] |
# -*- coding: UTF-8 -*-
import time
def fibonacci():
numbers = []
while True:
if len(numbers) < 2:
numbers.append(1)
else:
numbers.append(sum(numbers))
numbers.pop(0)
yield numbers[-1]
if __name__ == '__main__':
for i in fibonacci():
print (i)
time.sleep(0.5)
| [
"time.sleep"
] | [((335, 350), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (345, 350), False, 'import time\n')] |
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available(), torch.backends.cudnn.enabled)
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
''' Actor model '''
class Actor(nn.Module):
def __init__(
self, state_dim, action_dim, hidden_dim, max_action, is_recurrent=True, init_w=3e-3
):
super(Actor, self).__init__()
self.recurrent = is_recurrent
if self.recurrent:
self.l1 = nn.LSTM(state_dim, hidden_dim, batch_first=True)
else:
self.l1 = nn.Linear(state_dim, hidden_dim)
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, action_dim)
self.max_action = max_action
self.l3.weight.data.uniform_(-init_w, init_w)
self.l3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, hidden):
if self.recurrent:
self.l1.flatten_parameters()
a, h = self.l1(state, hidden)
else:
a, h = F.relu(self.l1(state)), None
a = F.relu(self.l2(a))
a = torch.sigmoid(self.l3(a))
return self.max_action * a, h
""" Critic model """
class Critic(nn.Module):
def __init__(
self, state_dim, action_dim, hidden_dim, is_recurrent=True, init_w=3e-3
):
super(Critic, self).__init__()
self.recurrent = is_recurrent
if self.recurrent:
self.l1 = nn.LSTM(
state_dim + action_dim, hidden_dim, batch_first=True)
self.l4 = nn.LSTM(
state_dim + action_dim, hidden_dim, batch_first=True)
else:
self.l1 = nn.Linear(state_dim + action_dim, hidden_dim)
self.l4 = nn.Linear(state_dim + action_dim, hidden_dim)
# Q1 architecture
self.l2 = nn.Linear(hidden_dim, hidden_dim)
self.l3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.l5 = nn.Linear(hidden_dim, hidden_dim)
self.l6 = nn.Linear(hidden_dim, 1)
self.l3.weight.data.uniform_(-init_w, init_w)
self.l3.bias.data.uniform_(-init_w, init_w)
self.l6.weight.data.uniform_(-init_w, init_w)
self.l6.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action, hidden1, hidden2):
sa = torch.cat([state, action], -1)
if self.recurrent:
self.l1.flatten_parameters()
self.l4.flatten_parameters()
q1, hidden1 = self.l1(sa, hidden1)
q2, hidden2 = self.l4(sa, hidden2)
else:
q1, hidden1 = F.relu(self.l1(sa)), None
q2, hidden2 = F.relu(self.l4(sa)), None
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action, hidden1):
sa = torch.cat([state, action], -1)
if self.recurrent:
self.l1.flatten_parameters()
q1, hidden1 = self.l1(sa, hidden1)
else:
q1, hidden1 = F.relu(self.l1(sa)), None
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
''' TD3 Class '''
class TD3(object):
def __init__(
self,state_dim,action_dim, max_action, hidden_dim,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
lr=3e-4,
recurrent_actor=True,
recurrent_critic=True,
):
self.on_policy = False
self.recurrent = recurrent_actor
self.actor = Actor( state_dim, action_dim, hidden_dim, max_action, is_recurrent=recurrent_actor).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=lr)
self.critic = Critic( state_dim, action_dim, hidden_dim, is_recurrent=recurrent_critic).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(), lr=lr)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def get_initial_states(self):
h_0, c_0 = None, None
if self.actor.recurrent:
h_0 = torch.zeros((
self.actor.l1.num_layers,
1,
self.actor.l1.hidden_size),
dtype=torch.float)
h_0 = h_0.to(device=device)
c_0 = torch.zeros((
self.actor.l1.num_layers,
1,
self.actor.l1.hidden_size),
dtype=torch.float)
c_0 = c_0.to(device=device)
return (h_0, c_0)
def select_action(self, state, hidden, test=True):
if self.recurrent:
state = torch.FloatTensor(
state.reshape(1, -1)).to(device)[:, None, :]
else:
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
action, hidden = self.actor(state, hidden)
return action.cpu().data.numpy().flatten(), hidden
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done, hidden, next_hidden = \
replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state, next_hidden)[0] + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(
next_state, next_action, next_hidden, next_hidden)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action, hidden, hidden)
# critic_loss = F.SmoothL1Loss(current_Q1, target_Q) + \
# Compute critic loss
# critic_loss = F.mse_loss(current_Q1, target_Q) + \
# F.mse_loss(current_Q2, target_Q)
critic_loss = F.smooth_l1_loss(current_Q1, target_Q) + \
F.smooth_l1_loss(current_Q2, target_Q)
# nn.SmoothL1Loss
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(
state, self.actor(state, hidden)[0], hidden).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(
self.critic.parameters(), self.critic_target.parameters()
):
target_param.data.copy_(
self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(
self.actor.parameters(), self.actor_target.parameters()
):
target_param.data.copy_(
self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(),
filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(),
filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(
torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(
torch.load(filename + "_actor_optimizer"))
def eval_mode(self):
self.actor.eval()
self.critic.eval()
def train_mode(self):
self.actor.train()
self.critic.train()
| [
"torch.nn.LSTM",
"torch.load",
"torch.min",
"torch.nn.functional.smooth_l1_loss",
"torch.randn_like",
"torch.cuda.is_available",
"torch.nn.Linear",
"copy.deepcopy",
"torch.no_grad",
"torch.zeros",
"torch.cat"
] | [((157, 182), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (180, 182), False, 'import torch\n'), ((113, 138), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (136, 138), False, 'import torch\n'), ((761, 794), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (770, 794), True, 'import torch.nn as nn\n'), ((813, 846), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'action_dim'], {}), '(hidden_dim, action_dim)\n', (822, 846), True, 'import torch.nn as nn\n'), ((1969, 2002), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1978, 2002), True, 'import torch.nn as nn\n'), ((2021, 2045), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (2030, 2045), True, 'import torch.nn as nn\n'), ((2091, 2124), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (2100, 2124), True, 'import torch.nn as nn\n'), ((2143, 2167), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (2152, 2167), True, 'import torch.nn as nn\n'), ((2454, 2484), 'torch.cat', 'torch.cat', (['[state, action]', '(-1)'], {}), '([state, action], -1)\n', (2463, 2484), False, 'import torch\n'), ((3005, 3035), 'torch.cat', 'torch.cat', (['[state, action]', '(-1)'], {}), '([state, action], -1)\n', (3014, 3035), False, 'import torch\n'), ((3865, 3890), 'copy.deepcopy', 'copy.deepcopy', (['self.actor'], {}), '(self.actor)\n', (3878, 3890), False, 'import copy\n'), ((4121, 4147), 'copy.deepcopy', 'copy.deepcopy', (['self.critic'], {}), '(self.critic)\n', (4134, 4147), False, 'import copy\n'), ((624, 672), 'torch.nn.LSTM', 'nn.LSTM', (['state_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(state_dim, hidden_dim, batch_first=True)\n', (631, 672), True, 'import torch.nn as nn\n'), ((709, 741), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'hidden_dim'], {}), '(state_dim, hidden_dim)\n', (718, 741), True, 'import torch.nn as nn\n'), ((1593, 1654), 'torch.nn.LSTM', 'nn.LSTM', (['(state_dim + action_dim)', 'hidden_dim'], {'batch_first': '(True)'}), '(state_dim + action_dim, hidden_dim, batch_first=True)\n', (1600, 1654), True, 'import torch.nn as nn\n'), ((1694, 1755), 'torch.nn.LSTM', 'nn.LSTM', (['(state_dim + action_dim)', 'hidden_dim'], {'batch_first': '(True)'}), '(state_dim + action_dim, hidden_dim, batch_first=True)\n', (1701, 1755), True, 'import torch.nn as nn\n'), ((1810, 1855), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', 'hidden_dim'], {}), '(state_dim + action_dim, hidden_dim)\n', (1819, 1855), True, 'import torch.nn as nn\n'), ((1878, 1923), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', 'hidden_dim'], {}), '(state_dim + action_dim, hidden_dim)\n', (1887, 1923), True, 'import torch.nn as nn\n'), ((4598, 4691), 'torch.zeros', 'torch.zeros', (['(self.actor.l1.num_layers, 1, self.actor.l1.hidden_size)'], {'dtype': 'torch.float'}), '((self.actor.l1.num_layers, 1, self.actor.l1.hidden_size), dtype\n =torch.float)\n', (4609, 4691), False, 'import torch\n'), ((4811, 4904), 'torch.zeros', 'torch.zeros', (['(self.actor.l1.num_layers, 1, self.actor.l1.hidden_size)'], {'dtype': 'torch.float'}), '((self.actor.l1.num_layers, 1, self.actor.l1.hidden_size), dtype\n =torch.float)\n', (4822, 4904), False, 'import torch\n'), ((5657, 5672), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5670, 5672), False, 'import torch\n'), ((6223, 6254), 'torch.min', 'torch.min', (['target_Q1', 'target_Q2'], {}), '(target_Q1, target_Q2)\n', (6232, 6254), False, 'import torch\n'), ((6665, 6703), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['current_Q1', 'target_Q'], {}), '(current_Q1, target_Q)\n', (6681, 6703), True, 'import torch.nn.functional as F\n'), ((6720, 6758), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['current_Q2', 'target_Q'], {}), '(current_Q2, target_Q)\n', (6736, 6758), True, 'import torch.nn.functional as F\n'), ((8301, 8333), 'torch.load', 'torch.load', (["(filename + '_critic')"], {}), "(filename + '_critic')\n", (8311, 8333), False, 'import torch\n'), ((8394, 8436), 'torch.load', 'torch.load', (["(filename + '_critic_optimizer')"], {}), "(filename + '_critic_optimizer')\n", (8404, 8436), False, 'import torch\n'), ((8473, 8504), 'torch.load', 'torch.load', (["(filename + '_actor')"], {}), "(filename + '_actor')\n", (8483, 8504), False, 'import torch\n'), ((8564, 8605), 'torch.load', 'torch.load', (["(filename + '_actor_optimizer')"], {}), "(filename + '_actor_optimizer')\n", (8574, 8605), False, 'import torch\n'), ((5782, 5806), 'torch.randn_like', 'torch.randn_like', (['action'], {}), '(action)\n', (5798, 5806), False, 'import torch\n')] |
from utils import inv_tra
from .base_trainer import BaseTrainer
from loss import ReconstructCriterion
import torch
from torchvision.utils import make_grid, save_image
import os
class AutoEncocderTrainer(BaseTrainer):
def __init__(self, model, optimizer, cfg, train_loader, val_loader):
super(AutoEncocderTrainer, self).__init__(model, optimizer, cfg)
self.train_loader = train_loader
self.val_loader = val_loader
self.main_metric = 'MSE'
self.device = cfg.device
self.args = cfg
self.logger.info(cfg)
self.criterion = ReconstructCriterion()
self.loss_monitor.add_loss(self.criterion)
def step(self, epoch):
self.model.train()
for batch_idx, (img, target) in enumerate(self.train_loader):
self.optimizer.zero_grad()
img, target = img.to(self.device), target.to(self.device)
_, _, emb = self.model(img)
recon = self.model.decoder(emb)
loss = self.criterion(recon, target / 255.)
self.criterion.update(loss.item(), img.size(0))
loss.backward()
# self.scheduler.step()
self.optimizer.step()
if batch_idx % 20 == 0:
print('Epoch: [{}][{}/{}]\t'.format(epoch, batch_idx, len(self.train_loader)), end='')
print(self.loss_monitor.summary())
return self.loss_monitor.results
def eval(self, epoch):
self.model.eval()
def eval_mean_square(input, target):
return ((input - target) ** 2).mean()
metric = []
for batch_idx, (img, target) in enumerate(self.val_loader):
img, target = img.to(self.device), target.to(self.device)
with torch.no_grad():
_, _, emb = self.model(img)
recon = self.model.decoder(emb) * 255
score = eval_mean_square(recon.detach().cpu(), target.detach().cpu())
metric.append(score)
if batch_idx == 0:
recon = recon.detach().cpu()
for i in range(recon.size(0)):
recon[i] = inv_tra(recon[i])
grid = make_grid(recon, nrow=10)
save_image(grid, os.path.join(self.save_dir, 'recon_epoch_{}.jpg'.format(epoch)))
return {'MSE': -sum(metric) / len(metric)}
def get_state_for_save(self, epoch):
state = {
'encoder': self.model.state_dict(),
'epoch': epoch,
'optimizer': self.optimizer.state_dict(),
}
return state
| [
"torch.no_grad",
"torchvision.utils.make_grid",
"utils.inv_tra",
"loss.ReconstructCriterion"
] | [((588, 610), 'loss.ReconstructCriterion', 'ReconstructCriterion', ([], {}), '()\n', (608, 610), False, 'from loss import ReconstructCriterion\n'), ((1750, 1765), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1763, 1765), False, 'import torch\n'), ((2184, 2209), 'torchvision.utils.make_grid', 'make_grid', (['recon'], {'nrow': '(10)'}), '(recon, nrow=10)\n', (2193, 2209), False, 'from torchvision.utils import make_grid, save_image\n'), ((2143, 2160), 'utils.inv_tra', 'inv_tra', (['recon[i]'], {}), '(recon[i])\n', (2150, 2160), False, 'from utils import inv_tra\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 15:57:30 2020
@author: <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.stats.stats import linregress
import datetime
def read_ERA_csv(fpath, filename):
'''
Function to read ERA5 data from selected file in csv format.
Inputs:
fpath - string for file path
filename - string for data file name
Outputs:
ncols - number of columns found on first row of data file
ndates - number of rows, each with data for a different date
header - top row of file contains header for data
datarr - data below the top row containing dates only in string format
data - data below the top row (each row is a separate time-point)
'''
fnam = fpath+filename
header = np.genfromtxt(fnam, delimiter=',', dtype='str', max_rows=1)
ncols = len(header)
datarr = np.genfromtxt(fnam, delimiter=',', skip_header=1, usecols=0, dtype='str')
data = np.genfromtxt(fnam, delimiter=',', skip_header=1)
ndates = len(data[:,0])
return ncols, ndates, header, datarr, data
def read_ENTSOE_csv(fpath, filename):
'''
Function to read ENTSO-E daily data from selected file in csv format.
Note that the units of national electricity load are in GW.
Inputs:
fpath - string for file path
filename - string for data file name
Outputs:
ncols - number of columns found on first row of data file
ndates - number of rows, each with data for a different date
header - top row of file contains header for data
data - data below the top row (each row is a separate time-point)
'''
fnam = fpath+filename
header = np.genfromtxt(fnam, delimiter=',', dtype='str', max_rows=1)
ncols = len(header)
data = np.genfromtxt(fnam, delimiter=',', skip_header=1)
ndates = len(data[:,0])
return ncols, ndates, header, data
def read_allvariables(fpath):
'''
Function to read all the relevant data files and to arrange the data into
suitable arrays.
Inputs:
fpath - string for file path
Outputs:
datarr - array containing dates in the datetime format
t2m_uk - temperature (at 2m) data
demand_uk - simulated demand data using ERA5
entso - metered demand data from the ENTSOE database
'''
#
# Set path for data files and file names.
#
erapath = fpath+'ERA5_reanalysis_models/demand_model_outputs/'
t2m_file = 'ERA5_T2m_1979_2018.csv'
demand_file = 'ERA5_weather_dependent_demand_1979_2018.csv'
entpath = fpath+'ENTSOE/'
ENTSO_file = 'GB-daily-load-values_2010-2015.csv'
#
# Read the ERA5 2m temperature and modelled demand data
#
tncols, ntdates, theader, ttim, tdata = read_ERA_csv(erapath, t2m_file)
dncols, nddates, dheader, dtim, demand = read_ERA_csv(erapath, demand_file)
#
# Convert the dates read in as strings into the datetime format.
# This can be used in calculations as a real time variable and plotting.
#
datlist = [datetime.datetime.strptime(dtimelement, "%Y-%m-%d") for dtimelement in dtim]
datarr = np.asarray(datlist)
#
# Check for consistency between the datasets
#
assert nddates == ntdates, 'Error: number of time points in T and demand do not match'
assert dncols == tncols, 'Error: number of country columns in T and demand do not match'
#
# Find UK data, knowing that it is in the last column of demand array
#
print(dheader[dncols-1])
demand_uk = demand[:,dncols-1]
#
# Note that the date format for the T2m file is not the same, but the date rows correspond.
# This means that there is an extra column in the tdata array (compared with theader).
#
print(theader[tncols-1])
tukpos = len(tdata[0,:])
t2m_uk = tdata[:,tukpos-1]
#
# Read the daily ENTSO-E data for the UK.
# Note that both ENTSO-E data and modelled demand are in Giga-Watts.
#
encols, nedates, eheader, entso = read_ENTSOE_csv(entpath, ENTSO_file)
print()
print('header for ENTSO-E data')
print(eheader)
return datarr, t2m_uk, demand_uk, entso
if __name__ == '__main__':
#Run the main program
'''
Main script for Lab 1.
Calibrating electricity demand versus temperature.
Using linear regression to create demand time series.
'''
#
# Set your own filepath pointing to the data in your directories
#
fpath = '../data/'
#
# Read in daily data for T2m and modelled demand from ERA5 and
# also the measured UK electricity demand from ENTSO-E.
#
datarr, t2m_uk, demand_uk, entso = read_allvariables(fpath)
#
ndates = len(datarr)
print(datarr[0],' is the first date in the ERA files')
print(datarr[ndates-1], ' is the last date')
print('')
print('Now over to you from here on!!')
| [
"datetime.datetime.strptime",
"numpy.asarray",
"numpy.genfromtxt"
] | [((856, 915), 'numpy.genfromtxt', 'np.genfromtxt', (['fnam'], {'delimiter': '""","""', 'dtype': '"""str"""', 'max_rows': '(1)'}), "(fnam, delimiter=',', dtype='str', max_rows=1)\n", (869, 915), True, 'import numpy as np\n'), ((955, 1028), 'numpy.genfromtxt', 'np.genfromtxt', (['fnam'], {'delimiter': '""","""', 'skip_header': '(1)', 'usecols': '(0)', 'dtype': '"""str"""'}), "(fnam, delimiter=',', skip_header=1, usecols=0, dtype='str')\n", (968, 1028), True, 'import numpy as np\n'), ((1041, 1090), 'numpy.genfromtxt', 'np.genfromtxt', (['fnam'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(fnam, delimiter=',', skip_header=1)\n", (1054, 1090), True, 'import numpy as np\n'), ((1792, 1851), 'numpy.genfromtxt', 'np.genfromtxt', (['fnam'], {'delimiter': '""","""', 'dtype': '"""str"""', 'max_rows': '(1)'}), "(fnam, delimiter=',', dtype='str', max_rows=1)\n", (1805, 1851), True, 'import numpy as np\n'), ((1889, 1938), 'numpy.genfromtxt', 'np.genfromtxt', (['fnam'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(fnam, delimiter=',', skip_header=1)\n", (1902, 1938), True, 'import numpy as np\n'), ((3273, 3292), 'numpy.asarray', 'np.asarray', (['datlist'], {}), '(datlist)\n', (3283, 3292), True, 'import numpy as np\n'), ((3182, 3233), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dtimelement', '"""%Y-%m-%d"""'], {}), "(dtimelement, '%Y-%m-%d')\n", (3208, 3233), False, 'import datetime\n')] |
#!flask/bin/python3
import string
import os
import random
import flask
from werkzeug import utils as wz_utils
from prometheus_flask_exporter import PrometheusMetrics
from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics
from . import alphabot_exceptions
from . import dna
DEFAULT_IP_ADDR = '0.0.0.0'
DEFAULT_PORT = 8000
IP_ADDR = os.getenv('EDGE_SERVER_IP_ADDR')
if IP_ADDR is None:
print("Environmemnt variable 'EDGE_SERVER_IP_ADDR' is not set, using "
"default ip: %s" % DEFAULT_IP_ADDR)
IP_ADDR = DEFAULT_IP_ADDR
PORT = os.getenv('EDGE_SERVER_PORT')
if PORT is None:
print("Environmemnt variable 'EDGE_SERVER_PORT' is not set, using "
"default port: %d" % DEFAULT_PORT)
PORT = DEFAULT_PORT
else:
PORT = int(PORT)
D = dna.Dna()
app = flask.Flask(__name__)
metrics = GunicornPrometheusMetrics(app)
metrics = PrometheusMetrics(app)
@app.route('/', methods=['GET', 'POST'])
def post_image():
if flask.request.method != 'POST':
return "%s \n" % flask.request.method
file = flask.request.files['file']
filename = wz_utils.secure_filename(file.filename)
random_string = ''.join(
random.choice(string.ascii_uppercase +
string.digits) for _ in range(15))
final_image_name = '{}_{}'.format(filename, random_string)
file.save(final_image_name)
dirr = os.getcwd()
osname = os.path.join(dirr, '')
dest_img = osname + final_image_name
try:
results = D.find_distance_and_angle(dest_img) ### pairnei path
os.remove(dest_img)
return flask.jsonify(results)
except alphabot_exceptions.BeaconNotFoundError:
os.remove(dest_img)
return flask.abort(404)
def main():
app.run(host=IP_ADDR, port=PORT)#, threaded=True)
if __name__ == '__main__':
main()
| [
"random.choice",
"prometheus_flask_exporter.multiprocess.GunicornPrometheusMetrics",
"os.getenv",
"flask.Flask",
"flask.jsonify",
"os.path.join",
"os.getcwd",
"werkzeug.utils.secure_filename",
"flask.abort",
"prometheus_flask_exporter.PrometheusMetrics",
"os.remove"
] | [((357, 389), 'os.getenv', 'os.getenv', (['"""EDGE_SERVER_IP_ADDR"""'], {}), "('EDGE_SERVER_IP_ADDR')\n", (366, 389), False, 'import os\n'), ((569, 598), 'os.getenv', 'os.getenv', (['"""EDGE_SERVER_PORT"""'], {}), "('EDGE_SERVER_PORT')\n", (578, 598), False, 'import os\n'), ((805, 826), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (816, 826), False, 'import flask\n'), ((837, 867), 'prometheus_flask_exporter.multiprocess.GunicornPrometheusMetrics', 'GunicornPrometheusMetrics', (['app'], {}), '(app)\n', (862, 867), False, 'from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics\n'), ((878, 900), 'prometheus_flask_exporter.PrometheusMetrics', 'PrometheusMetrics', (['app'], {}), '(app)\n', (895, 900), False, 'from prometheus_flask_exporter import PrometheusMetrics\n'), ((1101, 1140), 'werkzeug.utils.secure_filename', 'wz_utils.secure_filename', (['file.filename'], {}), '(file.filename)\n', (1125, 1140), True, 'from werkzeug import utils as wz_utils\n'), ((1380, 1391), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1389, 1391), False, 'import os\n'), ((1405, 1427), 'os.path.join', 'os.path.join', (['dirr', '""""""'], {}), "(dirr, '')\n", (1417, 1427), False, 'import os\n'), ((1558, 1577), 'os.remove', 'os.remove', (['dest_img'], {}), '(dest_img)\n', (1567, 1577), False, 'import os\n'), ((1593, 1615), 'flask.jsonify', 'flask.jsonify', (['results'], {}), '(results)\n', (1606, 1615), False, 'import flask\n'), ((1178, 1231), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (1191, 1231), False, 'import random\n'), ((1676, 1695), 'os.remove', 'os.remove', (['dest_img'], {}), '(dest_img)\n', (1685, 1695), False, 'import os\n'), ((1711, 1727), 'flask.abort', 'flask.abort', (['(404)'], {}), '(404)\n', (1722, 1727), False, 'import flask\n')] |
from multiprocessing.connection import Listener
from multiprocessing.reduction import send_handle
import socket
import os
def server():
work_serv = Listener('\0singe', authkey=b'peekaboo')
worker = work_serv.accept()
print ('Got a worker')
worker_pid = worker.recv()
print (worker_pid)
client = os.open("/", os.O_RDONLY)
send_handle(worker, client, worker_pid)
if __name__ == '__main__':
server()
| [
"multiprocessing.reduction.send_handle",
"multiprocessing.connection.Listener",
"os.open"
] | [((151, 193), 'multiprocessing.connection.Listener', 'Listener', (["'\\x00singe'"], {'authkey': "b'peekaboo'"}), "('\\x00singe', authkey=b'peekaboo')\n", (159, 193), False, 'from multiprocessing.connection import Listener\n'), ((309, 334), 'os.open', 'os.open', (['"""/"""', 'os.O_RDONLY'], {}), "('/', os.O_RDONLY)\n", (316, 334), False, 'import os\n'), ((337, 376), 'multiprocessing.reduction.send_handle', 'send_handle', (['worker', 'client', 'worker_pid'], {}), '(worker, client, worker_pid)\n', (348, 376), False, 'from multiprocessing.reduction import send_handle\n')] |
import hashlib
import random
import logging.config
import threading
import time
from ast import literal_eval
from core.data.block_of_beings import EmptyBlock, BlockListOfBeings
from core.data.block_of_garbage import BodyOfGarbageBlock
from core.data.block_of_times import BodyOfTimesBlock
from core.data.node_info import NodeInfo
from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics
from core.data.genesis_block import GenesisBlock
from core.storage.storage_of_garbage import StorageOfGarbage
from core.user.user import User
from core.node.main_node import MainNode
from core.network.net import PUB, SUB, Server, Client
from core.consensus.node_management import NodeManager
from core.consensus.block_generate import CurrentMainNode, NewBlockOfBeings, NewBlockOfTimes, NewBlockOfGarbage
from core.consensus.vote_compute import VoteCount
from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, \
ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete
from core.consensus.data import NodeDelApplicationForm
from core.consensus.block_verify import BlockVerify
from core.storage.storage_of_beings import StorageOfBeings
from core.storage.storage_of_temp import StorageOfTemp
from core.storage.storage_of_galaxy import StorageOfGalaxy
from core.utils.ciphersuites import CipherSuites
from core.utils.server_sdk import SDK, ChainAsset
from core.utils.serialization import SerializationBeings, SerializationApplicationForm, \
SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, \
SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, \
SerializationReplyApplicationFormActiveDelete
from core.utils.network_request import MainNodeIp
from core.utils.system_time import STime
from core.utils.download import RemoteChainAsset
from core.config.cycle_Info import ElectionPeriodValue
logger = logging.getLogger("main")
class APP:
def __init__(self, sk_string, pk_string, server_url):
self.currentEpoch = 0 # 当前epoch
self.storageOfBeings = StorageOfBeings() # 众生区块存储类
self.storageOfTemp = StorageOfTemp() # 临时区存储类
self.storageOfGalaxy = StorageOfGalaxy() # 时代区块存储类
self.storageOfGarbage = StorageOfGarbage() # 垃圾区块存储类
self.user = User() # 用户
self.user.login(sk_string, pk_string)
self.mainNode = MainNode(self.user, server_url, self.storageOfTemp) # 主节点
self.nodeManager = NodeManager(user=self.user, main_node=self.mainNode,
storage_of_temp=self.storageOfTemp) # 节点管理
self.voteCount = VoteCount(storage_of_beings=self.storageOfBeings, storage_of_temp=self.storageOfTemp,
main_node=self.mainNode, storage_of_times=self.storageOfGalaxy,
storage_of_garbage=self.storageOfGarbage) # 票数计算
self.blockVerify = BlockVerify(storage_of_beings=self.storageOfBeings)
self.pub = PUB() # 发布者
self.pub.start()
self.subList = [] # 订阅列表
self.client = Client(main_node_list=self.mainNode.mainNodeList) # 客户端
self.server = Server(user=self.user, node_manager=self.nodeManager, pub=self.pub, main_node=self.mainNode,
storage_of_temp=self.storageOfTemp, vote_count=self.voteCount, getEpoch=self.getEpoch,
getElectionPeriod=self.getElectionPeriod) # 服务端
self.server.start()
# 后端sdk
self.webServerSDK = SDK()
# server部分的区块资源
self.chainAsset = ChainAsset()
# 其他主节点的区块资源
self.remoteChainAsset = RemoteChainAsset()
def addEpoch(self):
self.currentEpoch += 1
self.storageOfTemp.setEpoch(self.currentEpoch)
def getEpoch(self):
return self.currentEpoch
def setEpoch(self, epoch):
self.currentEpoch = epoch
self.storageOfTemp.setEpoch(epoch)
def getElectionPeriod(self):
current_election_period = int(self.getEpoch() / ElectionPeriodValue)
return current_election_period
# 周期处理的事件,单独线程执行
def dealPeriodicEvents(self_out):
class PeriodicEvents(threading.Thread):
def __init__(self):
super().__init__()
self.name = "periodic_events"
logger.info("周期事件处理器初始化完成")
def run(self) -> None:
logger.info("周期事件处理器启动完成")
while True:
try:
if self_out.getEpoch() % ElectionPeriodValue == 0 and self_out.getEpoch() != 0:
logger.info("周期事件处理器暂停半小时")
time.sleep(60)
else:
time.sleep(30)
logger.info("周期事件开始处理")
# 读取待发布的众生区块
if self_out.storageOfTemp.getDataCount() < 5:
logger.info("检测server是否有待发布的区块")
webserver_beings_list = self_out.webServerSDK.getBeings()
self_out.storageOfTemp.saveBatchData(webserver_beings_list)
# 检测有无已经审核通过的,提交在本节点的申请书
logger.info("检测有无已经审核通过的,提交在本节点的申请书")
self_out.applyNewNodeJoin()
# 检测有无已经审核通过的,从其他主节点接受到的申请书
logger.info("检测有无已经审核通过的,从其他主节点接受到的申请书")
self_out.replyNewNodeJoin()
# 检测是否有投票完成确认加入或被拒绝加入主节点的申请书
logger.info("检测是否有投票完成确认加入或被拒绝加入主节点的申请书")
self_out.checkNewNodeJoin()
# 检测有无已经审核通过的,提交在本节点的申请书(主动删除节点)
logger.info("检测有无待广播的该主节点提交的申请书(主动删除节点)")
self_out.applyNodeDelete()
# 检测有无已经审核通过的,从其他主节点接受到的申请书(主动删除节点)
logger.info("检测有无已经审核通过的,从其他主节点接受到的申请书(主动删除节点)")
self_out.replyNodeActiveDelete()
# 检测是否有投票完成,同意删除节点或不同意删除
logger.info("检测是否有投票完成,同意删除节点或不同意删除")
self_out.checkNodeActiveDelete()
logger.info("检测是否有待广播的短期票投票消息")
# 检测是否有待广播的短期票投票消息
self_out.broadcastVotingInfo()
# 检测是否有待广播的长期票消息
logger.info("检测是否有待广播的长期票消息")
self_out.broadcastLongTermVotingInfo()
# 检测是否有待生成的时代区块
logger.info("检测是否有待生成的时代区块")
self_out.checkAndGenerateBlockOfTimes()
# 检测是否有待生成的垃圾区块
logger.info("检测是否有待生成的垃圾区块")
self_out.checkAndGenerateBlockOfGarbage()
logger.info("周期事件处理完成")
except Exception as err:
logger.error("周期事件出现错误")
logger.error(err, exc_info=True)
periodic_events = PeriodicEvents()
periodic_events.start()
# 增加订阅
def addSub(self, ip):
sub = SUB(ip=ip, pub=self.pub, blockListOfBeings=self.mainNode.currentBlockList,
web_server_sdk=self.webServerSDK, storage_of_garbage=self.storageOfGarbage,
user=self.user, vote_count=self.voteCount, node_manager=self.nodeManager,
main_node=self.mainNode, reSubscribe=self.reSubscribe, storage_of_temp=self.storageOfTemp,
getEpoch=self.getEpoch, getElectionPeriod=self.getElectionPeriod,
storage_of_galaxy=self.storageOfGalaxy, storage_of_beings=self.storageOfBeings,
node_del_application_form_list=self.mainNode.nodeDelApplicationFormList, client=self.client)
sub.start()
self.subList.append(sub)
# 删除订阅
def delSub(self, ip: str):
count = len(self.subList)
for i in range(count):
if ip == self.subList[i].name:
self.subList[i].stop()
del self.subList[i]
break
# 删除所有订阅
def stopAllSub(self, last_sub):
lastSub = last_sub
for sub_i in lastSub:
ip = sub_i.name
self.delSub(ip)
logger.info("已删除之前订阅,当前订阅数量为" + str(self.mainNode.mainNodeList.getNodeCount()))
# 重新订阅32个链接
def reSubscribe(self):
logger.info("开始重新订阅")
lastSub = self.subList.copy()
node = self.mainNode.mainNodeList.getNodeCount()
NUMBER_OF_SUBSCRIPTION = 32
count = NUMBER_OF_SUBSCRIPTION
if node < NUMBER_OF_SUBSCRIPTION:
count = node
node_list = random.sample(self.mainNode.mainNodeList.getNodeList(), count)
for node_i in node_list:
ip = node_i["node_info"]["node_ip"]
self.addSub(ip)
logger.info("订阅完成,当前订阅数量为" + str(self.mainNode.mainNodeList.getNodeCount()))
# 删除之前订阅
self.stopAllSub(lastSub)
logger.info("重新订阅完成")
# 读入主节点列表,通过配置文件提供的种子IP
def loadMainNodeListBySeed(self):
ip_list = MainNodeIp().getIpList()
logger.info("已经获得主节点列表")
logger.info(ip_list)
data = NetworkMessage(mess_type=NetworkMessageType.Get_Main_Node_List, message=None)
serial_data = SerializationNetworkMessage.serialization(data)
is_get = False
for ip in ip_list:
logger.info("连接主节点IP:" + str(ip))
try:
res = self.client.sendMessageByIP(ip=ip, data=str(serial_data).encode("utf-8"))
self.mainNode.mainNodeList.setNodeList(literal_eval(bytes(res).decode("utf-8")))
logger.info("已经连接主节点,IP:" + str(ip))
is_get = True
break
except Exception as err:
logger.error(err, exc_info=True)
return is_get
# 通过其他主节点获取当前epoch
def getCurrentEpochByOtherMainNode(self):
node_ip_list = []
for main_node in self.mainNode.mainNodeList.getNodeList():
node_ip_list.append(main_node["node_info"]["node_ip"])
random.shuffle(node_ip_list)
serial_data = SerializationNetworkMessage.serialization(
NetworkMessage(NetworkMessageType.Get_Current_Epoch, message=None))
while True:
ip = random.choice(node_ip_list)
try:
res = self.client.sendMessageByIP(ip=ip, data=str(serial_data).encode("utf-8"))
if self.getEpoch() == int(res):
return True
else:
self.setEpoch(int(res))
return False
except Exception as err:
time.sleep(1)
logger.error(err, exc_info=True)
# 同步众生区块
def synchronizedBlockOfBeings(self):
server_url_list = []
for main_node in self.mainNode.mainNodeList.getNodeList():
server_url_list.append(main_node["node_info"]["server_url"])
logger.info("众生区块开始同步")
# 检测已经存储的区块
storage_epoch = self.storageOfBeings.getMaxEpoch()
logger.info("目前已经存储的区块的epoch为:" + str(storage_epoch))
verify_epoch = self.blockVerify.verifyBlockOfBeings(storage_epoch)
logger.info("经过验证的存储区块的epoch为:" + str(verify_epoch))
self.storageOfBeings.delBlocksByEpoch(verify_epoch, storage_epoch)
if self.getEpoch() > 0:
start_epoch = verify_epoch
while True:
server_url = random.choice(server_url_list)
try:
if start_epoch + 1024 < self.getEpoch():
end_epoch = start_epoch + 1024
else:
end_epoch = self.getEpoch()
epoch_list = self.remoteChainAsset.getEpochListOfBeingsChain(server_url, start_epoch,
end_epoch - start_epoch)
if epoch_list == "500":
logger.warning("获取epoch列表失败,server_url:" + server_url)
time.sleep(1)
continue
for epoch_i in epoch_list:
logger.info("众生区块同步中,epoch:" + str(epoch_i))
server_url = random.choice(server_url_list)
block_list_of_beings = self.remoteChainAsset.getChainOfBeings(url=server_url, epoch=epoch_i)
i = 0
while block_list_of_beings == "500":
i += 1
logger.warning("第" + str(i) + "次尝试,epoch:" + str(epoch_i) + "server_url:" + server_url)
server_url = random.choice(server_url_list)
block_list_of_beings = self.remoteChainAsset.getChainOfBeings(url=server_url, epoch=epoch_i)
self.chainAsset.saveBatchBlockOfBeings(block_list_of_beings)
self.storageOfBeings.saveBatchBlock(block_list_of_beings)
logger.info("区块Epoch已经同步至:" + str(end_epoch))
if end_epoch == self.getEpoch():
logger.info("众生区块同步完成")
break
start_epoch = end_epoch
except Exception as err:
logger.warning("众生区块同步获取失败,远程主节点url:" + server_url)
logger.error(err, exc_info=True)
time.sleep(1)
# 同步时代区块
def synchronizedBlockOfTimes(self):
server_url_list = []
for main_node in self.mainNode.mainNodeList.getNodeList():
server_url_list.append(main_node["node_info"]["server_url"])
logger.info("时代区块开始同步")
# 检测已经同步至
start_election_period = self.storageOfGalaxy.getMaxElectionPeriod()
if self.getEpoch() > 0:
while True:
server_url = random.choice(server_url_list)
try:
res = self.remoteChainAsset.getChainOfTimes(url=server_url, election_period=start_election_period)
if res != "500":
if res == "404":
start_election_period += 1
else:
self.storageOfGalaxy.addBatchBlockOfGalaxy(block_list_of_galaxy=res)
logger.info("已经保存到数据库")
self.chainAsset.saveBlockOfTimes(res)
logger.info("已经保存为静态文件")
start_election_period += 1
else:
logger.warning("时代区块同步获取失败,status_code:500,远程主节点url:" + server_url)
time.sleep(1)
if start_election_period >= self.getElectionPeriod():
logger.info("时代区块同步完成")
break
logger.info("时代区块同步至,election_period:" + str(start_election_period))
except Exception as err:
logger.warning("时代区块同步获取失败,远程主节点url:" + server_url)
logger.error(err, exc_info=True)
time.sleep(1)
# 同步垃圾区块
def synchronizedBlockOfGarbage(self):
server_url_list = []
for main_node in self.mainNode.mainNodeList.getNodeList():
server_url_list.append(main_node["node_info"]["server_url"])
logger.info("时代区块开始同步")
start_election_period = self.storageOfGarbage.getMaxElectionPeriod()
if self.getEpoch() > 0:
while True:
server_url = random.choice(server_url_list)
try:
res = self.remoteChainAsset.getChainOfGarbage(url=server_url, election_period=start_election_period)
if res != "500":
if res == "404":
start_election_period += 1
else:
self.storageOfGarbage.addBatchBlockOfGarbage(block_list_of_garbage=res)
self.chainAsset.saveBlockOfGarbage(res)
start_election_period += 1
else:
logger.warning("垃圾区块同步获取失败,远程主节点url:" + server_url)
time.sleep(1)
if start_election_period >= self.getElectionPeriod():
logger.info("垃圾区块同步完成")
break
logger.info("垃圾区块同步至,election_period:" + str(start_election_period))
except Exception as err:
logger.warning("垃圾区块同步获取失败,远程主节点url:" + server_url)
logger.error(err, exc_info=True)
time.sleep(1)
# 数据恢复
def blockRecoveryOfBeings(self):
logger.info("开始恢复众生区块")
info_list = []
for main_node in self.mainNode.mainNodeList.getNodeList():
info_list.append([main_node["node_info"]["server_url"], main_node["node_info"]["node_ip"]])
info = random.choice(info_list)
logger.info("获取最新期次")
i = 0
is_sync = self.remoteChainAsset.getCurrentEpoch(self.getEpoch, self.client, info[1])
while is_sync <= 0:
i += 1
logger.warning("获取最新期次失败,第" + str(i) + "次尝试")
info = random.choice(info_list)
is_sync = self.remoteChainAsset.getCurrentEpoch(self.getEpoch, self.client, info[1])
epoch_list = self.remoteChainAsset.getEpochListOfBeingsChain(url=info[0], offset=self.getEpoch(), count=is_sync)
logger.info("获取未同步的期次列表")
i = 0
while epoch_list == "500":
i += 1
logger.warning("获取未同步的期次列表失败,第" + str(i) + "次尝试")
epoch_list = self.remoteChainAsset.getEpochListOfBeingsChain(url=info[0], offset=self.getEpoch(),
count=is_sync)
for epoch_i in epoch_list:
info = random.choice(info_list)
block_list_of_beings = self.remoteChainAsset.getChainOfBeings(url=info[0], epoch=epoch_i)
while block_list_of_beings == "500":
info = random.choice(info_list)
block_list_of_beings = self.remoteChainAsset.getChainOfBeings(url=info[0], epoch=epoch_i)
# 此时该期次的区块已经同步完成
self.storageOfBeings.saveBatchBlock(block_list_of_beings)
self.chainAsset.saveBatchBlockOfBeings(block_list_of_beings)
logger.info("epoch:" + str(epoch_i) + ",众生区块恢复完成")
self.mainNode.currentBlockList.setFinish()
logger.info("众生区块全部恢复完成")
# 存储创世区块
def storageGenesisBlock(self):
genesis_block = GenesisBlock()
block_list_of_beings = BlockListOfBeings()
block_list_of_beings.addBlock(block=genesis_block.getBlockOfBeings())
self.storageOfBeings.saveCurrentBlockOfBeings(blockListOfBeings=block_list_of_beings)
self.chainAsset.saveBlockOfBeings(block_list_of_beings=block_list_of_beings)
logger.info("众生创世区块存储完成")
block_of_times = genesis_block.getBlockOfTimes()
self.storageOfGalaxy.addBlockOfGalaxy(block_of_galaxy=block_of_times)
self.chainAsset.saveBlockOfTimes([block_of_times])
logger.info("时代创世区块存储完成")
block_of_garbage = genesis_block.getBlockOfGarbage()
self.storageOfGarbage.addBlockOfGarbage(block_of_garbage=block_of_garbage)
self.chainAsset.saveBlockOfGarbage([block_of_garbage])
logger.info("垃圾创世区块存储完成")
# 通过检测数据库中的node_join_other表,当存在is_audit=1或2时,即有消息要回复
# 回复新节点加入申请,同意或拒绝
def replyNewNodeJoin(self):
application_form_list = self.storageOfTemp.getListOfFinishAuditApplicationForm()
for info in application_form_list:
reply_application_form = ReplyApplicationForm(new_node_id=info["node_id"], new_node_user_pk=info["user_pk"],
start_time=info["node_create_time"],
is_agree=info["is_audit"])
reply_signature = self.user.sign(str(reply_application_form.getInfo()).encode("utf-8"))
reply_application_form.setSignature(reply_signature)
reply_application_form.setUserPk(self.user.getUserPKString())
serial_reply_application_form = SerializationReplyApplicationForm.serialization(reply_application_form)
# 消息签名
network_message = NetworkMessage(mess_type=NetworkMessageType.ReplayNewNodeApplicationJoin,
message=serial_reply_application_form)
network_message.setClientInfo(user_pk=info["main_node_user_pk"])
client_signature = self.user.sign(network_message.getClientAndMessageDigest())
network_message.setSignature(client_signature)
serial_network_message = SerializationNetworkMessage.serialization(network_message)
self.client.sendMessageByMainNodeUserPk(user_pk=info["main_node_user_pk"],
data=str(serial_network_message).encode("utf-8"))
# 通过检测数据库中的node_delete_other_active表,当存在is_audit=1或2时,即有消息要回复
# 回复主节点删除申请
def replyNodeActiveDelete(self):
application_form_active_delete = self.storageOfTemp.getFinishApplicationFormActiveDelete()
if application_form_active_delete is None:
return
del_node_id = application_form_active_delete["del_node_id"]
application_content = application_form_active_delete["application_content"]
application_time = application_form_active_delete["application_time"]
is_audit = application_form_active_delete["is_audit"]
if is_audit == 1:
logger.info("发送同意删除消息")
else:
logger.info("发送拒绝删除消息")
main_node_user_pk = application_form_active_delete["main_node_user_pk"]
main_node_signature = application_form_active_delete["main_node_signature"]
reply_application_form_active_delete = ReplyApplicationFormActiveDelete(del_node_id=del_node_id,
start_time=application_time,
is_agree=is_audit,
apply_user_pk=main_node_user_pk)
reply_signature = self.user.sign(str(reply_application_form_active_delete.getInfo()).encode("utf-8"))
reply_application_form_active_delete.setSignature(reply_signature)
reply_application_form_active_delete.setUserPk(self.user.getUserPKString())
serial_reply_application_form_active_delete = SerializationReplyApplicationFormActiveDelete.serialization(
reply_application_form_active_delete)
# 消息签名
network_message = NetworkMessage(mess_type=NetworkMessageType.ReplyNodeActiveDeleteApplication,
message=serial_reply_application_form_active_delete)
network_message.setClientInfo(user_pk=self.user.getUserPKString())
client_signature = self.user.sign(network_message.getClientAndMessageDigest())
network_message.setSignature(client_signature)
serial_network_message = SerializationNetworkMessage.serialization(network_message)
self.client.sendMessageByMainNodeUserPk(user_pk=main_node_user_pk,
data=str(serial_network_message).encode("utf-8"))
logger.info("消息发送完成")
# 向全网广播新节点申请请求
# 此时,当前主节点已经审核通过
def applyNewNodeJoin(self):
# 调用SDK读取审核通过,但是待广播的主节点加入申请书
application_form_dict_list = self.webServerSDK.getApplicationForm()
for application_form_dict in application_form_dict_list:
node_id = application_form_dict["node_id"]
user_pk = application_form_dict["user_pk"]
node_ip = application_form_dict["node_ip"]
server_url = application_form_dict["server_url"]
node_create_time = int(application_form_dict["node_create_time"])
node_signature = application_form_dict["node_signature"]
application = application_form_dict["application"]
application_time = STime.getTimestamp()
application_signature = application_form_dict["application_signature"]
node_info = NodeInfo(node_id=node_id, user_pk=user_pk, node_ip=node_ip, create_time=node_create_time,
server_url=server_url)
node_info.nodeSignature = node_signature
application_form = ApplicationForm(node_info=node_info, start_time=application_time, content=application,
application_signature_by_new_node=application_signature)
# 验证新节点信息和签名
if not CipherSuites.verify(pk=user_pk, signature=node_signature,
message=str(node_info.getInfo()).encode("utf-8")):
# 新节点信息与签名不匹配
logger.warning("新节点信息与签名不匹配")
continue
# 验证申请书和签名
if not CipherSuites.verify(pk=user_pk, signature=application_signature,
message=str(application).encode("utf-8")):
# 申请书与新节点签名不匹配
logger.warning("申请书与新节点签名不匹配")
continue
# 增加当前主节点签名
main_node_signature = self.user.sign(str(application_form.application).encode("utf-8"))
application_form.setMainNodeSignature(main_node_signature)
application_form.setMainNodeUserPk(self.user.getUserPKString())
# 添加数据库数据,准备接受其他主节点的意见
self.storageOfTemp.insertApplicationForm(node_id=node_id, user_pk=user_pk, node_ip=node_ip,
server_url=server_url,
node_create_time=node_create_time, node_signature=node_signature,
application=application, application_time=application_time,
application_signature=application_signature,
agree_count=1, main_node_user_pk=self.user.getUserPKString(),
main_node_signature=main_node_signature)
serial_application_form = SerializationApplicationForm.serialization(application_form)
# 全网广播
self.pub.sendMessage(topic=SubscribeTopics.getNodeTopicOfApplyJoin(), message=serial_application_form)
# 向全网广播主动删除节点申请请求
def applyNodeDelete(self):
# 调用SDK读取,待广播的主动删除某主节点的申请书
application_form_active_delete_dict_list = self.webServerSDK.getApplicationFormActiveDelete()
for application_form_active_delete_dict in application_form_active_delete_dict_list:
node_id = application_form_active_delete_dict["node_id"]
application_content = application_form_active_delete_dict["application_content"]
application_time = STime.getTimestamp()
application_form_active_delete = ApplicationFormActiveDelete(del_node_id=node_id,
start_time=application_time,
content=application_content)
signature = self.user.sign(str(application_form_active_delete.getInfo()).encode("utf-8"))
application_form_active_delete.setMainNodeSignature(signature)
application_form_active_delete.setMainNodeUserPk(self.user.getUserPKString())
# 验证申请书和签名
if not CipherSuites.verify(pk=application_form_active_delete.getMainNodeUserPk(),
signature=application_form_active_delete.getMainNodeSignature(),
message=str(application_form_active_delete.getInfo()).encode("utf-8")):
# 申请书与新节点签名不匹配
logger.warning("申请书签名不匹配")
continue
# 增加当前主节点签名
# 添加数据库数据,准备接受其他主节点的意见
self.storageOfTemp.insertApplicationFormActiveDelete(node_id=node_id,
application_content=application_content,
application_time=application_time,
main_node_signature=signature,
main_node_user_pk=self.user.getUserPKString())
serial_application_form_active_delete = SerializationApplicationFormActiveDelete.serialization(
application_form_active_delete)
# 广播
self.pub.sendMessage(topic=SubscribeTopics.getNodeTopicOfActiveApplyDelete(),
message=serial_application_form_active_delete)
# 通过检测数据库中的node_join表
# 当agree_count的值达到一定标准时,立即广播节点加入确认消息
# 当超过规定时间还未收到确认消息时,删除该申请信息
def checkNewNodeJoin(self):
# 获取node_join表中所有is_audit=0的申请表
for node_id in self.storageOfTemp.getNodeIdListOfWaitingAuditApplicationForm():
# 检测是否超过有效时间,若超过删除该申请书
if not self.nodeManager.isTimeReplyApplicationForm(node_id):
logger.info("该申请书已经超过有效时间,申请书新节点ID为:" + node_id)
continue
# 检测是否达到成为新节点的条件
res = self.nodeManager.isSuccessReplyApplicationForm(node_id)
if res[0]:
list_of_serial_reply_application_form = res[1]
application_form = self.storageOfTemp.getApplicationFormByNodeId(new_node_id=node_id)
serial_application_form = SerializationApplicationForm.serialization(application_form)
# 全网广播节点加入确认消息
self.pub.sendMessage(topic=SubscribeTopics.getNodeTopicOfJoin(),
message=[serial_application_form, list_of_serial_reply_application_form])
# 将该节点加入主节点列表
application_form = self.storageOfTemp.getApplicationFormByNodeId(new_node_id=node_id)
node_info = NodeInfo(node_id=application_form.newNodeInfo["node_id"],
user_pk=application_form.newNodeInfo["user_pk"],
node_ip=application_form.newNodeInfo["node_ip"],
create_time=application_form.newNodeInfo["create_time"],
server_url=application_form.newNodeInfo["server_url"])
node_info.setNodeSignature(application_form.newNodeSignature)
# 检测主节点列表中是否已经有该节点
if not self.mainNode.mainNodeList.userPKisExit(user_pk=node_info.userPk):
self.mainNode.mainNodeList.addMainNode(node_info=node_info)
logger.info("新节点已加入,节点信息为:")
logger.info(node_info.getInfo())
# 将该申请书设置为已经完成申请
self.storageOfTemp.finishApplicationFormByNodeId(node_id)
# 重新计算订阅列表,重新创建32个订阅链接
self.reSubscribe()
else:
logger.warning("节点已经存在,节点ID为:" + node_info.nodeId)
# 当agree_count的值达到一定标准时,立即广播消息
# 当超过规定时间还未收到确认消息时,删除该申请信息
def checkNodeActiveDelete(self):
# 获取node_active_delete表中所有is_audit=0的申请表
for node_id in self.storageOfTemp.getNodeIdListOfApplicationFormActiveDeleteInProgress():
# 检测是否超过有效时间,若超过删除该申请书
if not self.nodeManager.isTimeReplyApplicationFormActiveDelete(node_id):
logger.info("该申请书已经超过有效时间,申请删除的节点ID为:" + node_id)
continue
# 检测是否达到删除节点的条件
res = self.nodeManager.isSuccessReplyApplicationFormActiveDelete(node_id)
if res[0]:
list_of_serial_reply_application_form_active_delete = res[1]
application_form_active_delete = self.storageOfTemp.getApplicationFormActiveDeleteByNodeId(
del_node_id=node_id,
is_audit=1)
serial_application_form_active_delete = SerializationApplicationFormActiveDelete.serialization(
application_form_active_delete)
# 删除主节点
self.mainNode.mainNodeList.delMainNodeById(node_id=application_form_active_delete.delNodeId)
# 重新订阅
self.reSubscribe()
# 全网广播节点加入确认消息
self.pub.sendMessage(topic=SubscribeTopics.getNodeTopicOfActiveConfirmDelete(),
message=[serial_application_form_active_delete,
list_of_serial_reply_application_form_active_delete])
# 众生区块生成周期
# 0-30S
def startNewEpoch(self):
logger.info("众生区块生成周期开始,Epoch:" + str(self.getEpoch()) + ",ElectionPeriod:" + str(self.getElectionPeriod()))
# 计算本次产生区块的节点列表
self.mainNode.currentMainNode = CurrentMainNode(self.mainNode.mainNodeList,
self.storageOfBeings.getLastBlockByCache(),
self.getEpoch).getNodeListOfGenerateBlock()
#
# 若本次主节点被选中产生区块,则检查暂存区数据数量,若大于0,则直接产生区块,若等于0,则调用后端sdk获取数据。只有在没获得数据的情况,
# 才广播不产生区块的消息。
# 若本次主节点没有被选中产生区块,则检查暂存区数据数量,若数量大于5,则不进行任何操作,若小于5,则调用后端SDK获取数据。
#
for node in self.mainNode.currentMainNode.getNodeList():
# 当前节点是否生成区块
if node["node_info"]["node_id"] == self.mainNode.nodeInfo.nodeId:
logger.info("当前节点已被共识机制选中")
# 判断临时存储区是否有数据,若有数据,则生成区块,否则发送不生成区块的消息
temp_beings_count = self.storageOfTemp.getDataCount()
if (temp_beings_count > 0) or (self.webServerSDK.getBeingsCount() > 0):
logger.info("当前节点生成区块")
# 生成区块
if temp_beings_count <= 0:
logger.info("调用web server SDK 读入数据")
webserver_beings_list = self.webServerSDK.getBeings()
self.storageOfTemp.saveBatchData(webserver_beings_list)
data = self.storageOfTemp.getTopData()
body = data["body"]
user_pk = [data["user_pk"], self.user.getUserPKString()]
main_node_user_signature = self.user.sign(body)
body_signature = [data["body_signature"], main_node_user_signature]
prev_block_header = []
pre_block = []
for block in self.storageOfBeings.getLastBlockList().getListOfOrthogonalOrder():
prev_block_header.append(block.getBlockHeaderSHA256())
pre_block.append(block.getBlockSHA256())
epoch = self.getEpoch()
try:
new_block = NewBlockOfBeings(user_pk=user_pk, body_signature=body_signature, body=body,
epoch=epoch, pre_block=pre_block,
prev_block_header=prev_block_header).getBlock()
serialization_block = SerializationBeings.serialization(block_of_beings=new_block)
# 广播消息
serial_block_mess = SerializationNetworkMessage.serialization(
NetworkMessage(mess_type=NetworkMessageType.NEW_BLOCK, message=serialization_block))
self.pub.sendMessage(topic=SubscribeTopics.getBlockTopicOfBeings(), message=serial_block_mess)
# 保存至当前区块列表
self.mainNode.currentBlockList.addBlock(block=new_block)
except Exception as err:
# 产生错误(如签名验证错误)后,发送不产生区块消息
logger.error(err, exc_info=True)
# 广播无区块产生的消息
logger.info("当前节点不生成区块")
empty_block = EmptyBlock(user_pk=self.user.getUserPKString(), epoch=self.getEpoch())
signature = self.user.sign(str(empty_block.getInfo()).encode("utf-8"))
empty_block.setSignature(signature)
mess = NetworkMessage(mess_type=NetworkMessageType.NO_BLOCK, message=empty_block.getMessage())
serial_mess = SerializationNetworkMessage.serialization(mess)
# 保存至当前区块列表
self.mainNode.currentBlockList.addMessageOfNoBlock(empty_block=empty_block)
self.pub.sendMessage(topic=SubscribeTopics.getBlockTopicOfBeings(), message=serial_mess)
else:
# 广播无区块产生的消息
logger.info("当前节点不生成区块")
empty_block = EmptyBlock(user_pk=self.user.getUserPKString(), epoch=self.getEpoch())
signature = self.user.sign(str(empty_block.getInfo()).encode("utf-8"))
empty_block.setSignature(signature)
mess = NetworkMessage(mess_type=NetworkMessageType.NO_BLOCK, message=empty_block.getMessage())
serial_mess = SerializationNetworkMessage.serialization(mess)
# 保存至当前区块列表
self.mainNode.currentBlockList.addMessageOfNoBlock(empty_block=empty_block)
self.pub.sendMessage(topic=SubscribeTopics.getBlockTopicOfBeings(), message=serial_mess)
break
# 新周期开始30秒后,检查并执行
def startCheckAndApplyDeleteNode(self):
logger.info("众生区块生成周期开始30秒,Epoch:" + str(self.getEpoch()) + ",ElectionPeriod:" + str(self.getElectionPeriod()))
node_of_check_node = CurrentMainNode(self.mainNode.mainNodeList, self.storageOfBeings.getLastBlockByCache(),
self.getEpoch).getNodeListOfCheckNode()
# 检测当前节点是否为本次发布节点的其中之一
# 检测当前节点是否为有权限发送广播
# 满足上述一项即可
if self.mainNode.currentMainNode.userPKisExit(
user_pk=self.user.getUserPKString()) or \
node_of_check_node.userPKisExit(user_pk=self.user.getUserPKString()):
for node in self.mainNode.currentMainNode.getNodeList():
user_pk = node["node_info"]["user_pk"]
node_id = node["node_info"]["node_id"]
# 检查是否存在应该收到,但是未收到的区块
if not self.mainNode.currentBlockList.userPkIsExit(user_pk=user_pk):
logger.info("存在应该产生区块,但是未收到信息的节点")
logger.info("节点ID为:" + node_id)
# 没有收到该节点产生的区块或消息
# 制作申请书,删除该节点
node_del_application_form = NodeDelApplicationForm(del_node_id=node_id, del_user_pk=user_pk,
current_epoch=self.getEpoch())
signature = self.user.sign(str(node_del_application_form.getInfo()).encode("utf-8"))
node_del_application_form.setApplySignature(signature)
node_del_application_form.setApplyUserPk(self.user.getUserPKString())
# 广播申请删除该节点的消息
self.pub.sendMessage(topic=SubscribeTopics.getNodeTopicOfApplyDelete(),
message=node_del_application_form.getMessage())
# 暂存该申请书
# 在遇到其他节点申请时直接同意或收到区块后取消
self.mainNode.nodeDelApplicationFormList.append(node_del_application_form)
# 检查是否收集完成所有区块,收集完成后保存到数据库
# 每秒检查一次
def startCheckAndSave(self) -> bool:
logger.info("众生区块生成周期开始40秒后,Epoch:" + str(self.getEpoch()) + ",ElectionPeriod:" + str(self.getElectionPeriod()))
is_finish = True
if not self.mainNode.currentBlockList.isFinish:
logger.debug("收集到的空区块消息")
for emptyBlock in self.mainNode.currentBlockList.listOfNoBlock:
logger.debug(emptyBlock.getMessage())
logger.debug("收集到的区块消息")
for block in self.mainNode.currentBlockList.list:
logger.debug(block.getBlockHeader())
for node in self.mainNode.currentMainNode.getNodeList():
user_pk = node["node_info"]["user_pk"]
node_id = node["node_info"]["node_id"]
# 检查是否存在应该收到,但是未收到的区块
# 区块消息
if not self.chainAsset.beingsIsExitByEpoch(self.getEpoch()):
if (not self.mainNode.currentBlockList.userPkIsBlock(
user_pk) and (not self.mainNode.currentBlockList.userPkIsEmptyBlock(user_pk))):
is_finish = False
logger.info("存在未收到的区块,应产生该区块的节点ID为:" + str(node_id))
if is_finish:
if not self.chainAsset.beingsIsExitByEpoch(self.getEpoch()):
self.storageOfBeings.saveCurrentBlockOfBeings(blockListOfBeings=self.mainNode.currentBlockList)
self.chainAsset.saveBlockOfBeings(block_list_of_beings=self.mainNode.currentBlockList)
# 存储完成,重置当前区块列表,准备下一个epoch收集
self.mainNode.currentBlockList.reset()
return is_finish
# 每个选举周期开始前
# 初始化所有主节点的投票信息
# 初始化所有拥有长期票的普通用户的票数信息
def initVote(self_out):
class InitVoteOfMainNode(threading.Thread):
def __init__(self):
super().__init__()
self.name = "init_vote"
self.current_election_period = self_out.getElectionPeriod()
logger.info("init_vote初始化完成")
def run(self) -> None:
logger.info("开始计算所有主节点的票数信息")
# 主节点用户
# 初始化本次的票数数据
self_out.voteCount.initVotesOfMainNode(current_election_cycle=self.current_election_period)
# 普通用户的长期票
self_out.voteCount.initPermanentVotesOfSimpleUser(current_election_cycle=self.current_election_period)
logger.info("计算完成")
init_vote_of_main_node = InitVoteOfMainNode()
init_vote_of_main_node.start()
def saveAssetOfTimesAndGarbage(self_out):
class SaveAsset(threading.Thread):
def __init__(self):
super().__init__()
self.name = "save_asset"
self.current_election_period = self_out.getElectionPeriod()
self.chainAsset = self_out.chainAsset
self.storageOfGalaxy = self_out.storageOfGalaxy
self.storageOfGarbage = self_out.storageOfGarbage
logger.info("save_asset初始化完成")
def run(self) -> None:
logger.info("开始保存时代区块列表和垃圾区块列表,选举周期为:" + str(self.current_election_period))
list_of_times = self.storageOfGalaxy.getListOfGalaxyBlockByElectionPeriod(
start=self.current_election_period - 1,
end=self.current_election_period)
self.chainAsset.saveBlockOfTimes(block_list_of_times=list_of_times)
list_of_garbage = self.storageOfGarbage.getListOfGarbageBlockByElectionPeriod(
start=self.current_election_period - 1,
end=self.current_election_period)
self.chainAsset.saveBlockOfGarbage(block_list_of_garbage=list_of_garbage)
logger.info("保存完成")
save_asset = SaveAsset()
save_asset.start()
# 广播短期票投票消息
def broadcastVotingInfo(self):
logger.info("广播短期票投票消息")
wait_vote_list = self.storageOfTemp.getVoteMessage(status=0)
for wait_vote in wait_vote_list:
logger.debug("待广播短期票投票消息")
logger.debug(wait_vote.getMessage())
# 验证投票信息签名
if not CipherSuites.verify(pk=wait_vote.simpleUserPk, signature=wait_vote.getSignature(),
message=str(wait_vote.getInfoOfSignature()).encode("utf-8")):
logger.warning("签名验证失败,短期票投票信息为:")
logger.warning(wait_vote.getInfo())
# 将待广播短期票投票信息状态设为2
self.storageOfTemp.modifyStatusOfWaitVote(status=2, wait_vote=wait_vote)
continue
# 封装短期票投票消息
# 将toNodeId转为toNodeUserPk
# 将普通用户公钥转为主节点用户公钥
vote_message = VoteMessage()
to_main_node_info = self.mainNode.mainNodeList.getMainNodeByNodeId(node_id=wait_vote.toNodeId)
vote_message.setVoteInfo(to_main_node_user_pk=to_main_node_info["node_info"]["user_pk"],
block_id=wait_vote.blockId, vote_type=wait_vote.voteType,
election_period=wait_vote.electionPeriod, number_of_vote=wait_vote.vote,
main_user_pk=self.user.getUserPKString())
main_node_signature = self.user.sign(str(vote_message.getVoteInfo()).encode("utf-8"))
vote_message.setSignature(main_node_signature)
# 增加普通用户和主节点用户已使用的票数
self.webServerSDK.addUsedVoteOfSimpleUser(user_pk=wait_vote.simpleUserPk, used_vote=wait_vote.vote,
election_period=self.getElectionPeriod())
self.storageOfTemp.addUsedVoteByNodeUserPk(vote=wait_vote.vote,
main_node_user_pk=self.user.getUserPKString())
# 暂存短期票投票消息摘要
self.storageOfTemp.addVoteDigest(election_period=vote_message.electionPeriod, block_id=vote_message.blockId,
vote_message_digest=hashlib.md5(
str(vote_message.getVoteMessage()).encode("utf-8")).hexdigest())
# 该短期票投票是否是针对当前主节点推荐的区块
if self.webServerSDK.isExitTimesBlockQueueByBlockId(
vote_message.blockId) and self.user.getUserPKString() == vote_message.toMainNodeUserPk and int(
vote_message.voteType) == 1:
self.webServerSDK.addVoteOfTimesBlockQueue(beings_block_id=vote_message.blockId,
vote_message=vote_message)
if self.webServerSDK.isExitGarbageBlockQueueByBlockId(
vote_message.blockId) and self.user.getUserPKString() == vote_message.toMainNodeUserPk and int(
vote_message.voteType) == 2:
self.webServerSDK.addVoteOfGarbageBlockQueue(beings_block_id=vote_message.blockId,
vote_message=vote_message)
# 修改读取到的短期票投票信息状态
self.storageOfTemp.modifyStatusOfWaitVote(status=1, wait_vote=wait_vote)
# 广播
self.pub.sendMessage(topic=SubscribeTopics.getVoteMessage(),
message=SerializationVoteMessage.serialization(vote_message=vote_message))
logger.debug("短期票广播完成")
# 广播长期票投票消息
def broadcastLongTermVotingInfo(self):
logger.info("广播长期票投票消息")
long_term_wait_vote_list = self.storageOfTemp.getLongTermVoteMessage(status=0)
for long_term_wait_vote in long_term_wait_vote_list:
logger.debug("待广播长期票投票消息")
logger.debug(long_term_wait_vote.getMessage())
# 验证投票信息签名
if not CipherSuites.verify(pk=long_term_wait_vote.simpleUserPk,
signature=long_term_wait_vote.getSignature(),
message=str(long_term_wait_vote.getInfoOfSignature()).encode("utf-8")):
logger.warning("签名验证失败,长期投票信息为:")
logger.warning(long_term_wait_vote.getInfo())
# 将待广播短期票投票信息状态设为2
self.storageOfTemp.modifyStatusOfLongTermWaitVote(status=2, wait_vote=long_term_wait_vote)
continue
# 增加普通用户已使用的长期票票数
self.storageOfTemp.addUsedPermanentVoteOfSimpleUser(vote=long_term_wait_vote.vote,
simple_user_pk=long_term_wait_vote.simpleUserPk)
# 封装长期票消息
long_term_vote_message = LongTermVoteMessage()
long_term_vote_message.setVoteInfo(to_main_node_id=long_term_wait_vote.toNodeId,
block_id=long_term_wait_vote.blockId,
vote_type=long_term_wait_vote.voteType,
election_period=long_term_wait_vote.electionPeriod,
number_of_vote=long_term_wait_vote.vote,
simple_user_pk=long_term_wait_vote.simpleUserPk)
long_term_vote_message.setSignature(signature=long_term_wait_vote.getSignature())
# 暂存短期票投票消息摘要
self.storageOfTemp.addVoteDigest(election_period=long_term_vote_message.electionPeriod,
block_id=long_term_vote_message.blockId,
vote_message_digest=hashlib.md5(
str(long_term_vote_message.getVoteMessage()).encode(
"utf-8")).hexdigest())
# 该长期票投票是否是针对当前主节点推荐的区块
to_main_node_info = self.mainNode.mainNodeList.getMainNodeByNodeId(node_id=long_term_wait_vote.toNodeId)
to_main_node_user_pk = to_main_node_info["node_info"]["user_pk"]
if self.webServerSDK.isExitTimesBlockQueueByBlockId(
long_term_vote_message.blockId) and self.user.getUserPKString() == to_main_node_user_pk and long_term_vote_message.voteType == 1:
self.webServerSDK.addPermanentVoteOfTimesBlockQueue(beings_block_id=long_term_vote_message.blockId,
long_term_vote_message=long_term_vote_message)
if self.webServerSDK.isExitGarbageBlockQueueByBlockId(
long_term_vote_message.blockId) and self.user.getUserPKString() == to_main_node_user_pk and long_term_vote_message.voteType == 2:
self.webServerSDK.addPermanentVoteOfGarbageBlockQueue(beings_block_id=long_term_vote_message.blockId,
long_term_vote_message=long_term_vote_message)
# 修改读取到的长期票投票信息状态
self.storageOfTemp.modifyStatusOfLongTermWaitVote(status=1, wait_vote=long_term_wait_vote)
# 广播
self.pub.sendMessage(topic=SubscribeTopics.getLongTermVoteMessage(),
message=SerializationLongTermVoteMessage.serialization(
long_term_vote_message=long_term_vote_message))
logger.debug("长期票广播完成")
# 检测并且生成时代区块
def checkAndGenerateBlockOfTimes(self):
logger.info("检测并且生成时代区块")
# 检测是否有投票数量达到要求的时代区块
vote_of_times_block = self.voteCount.getVotesOfTimesBlockGenerate()
res = self.webServerSDK.getTimesBlockQueueByVotes(votes=vote_of_times_block)
if res is not None:
# 再次验证投票
beings_block_id = res["beings_block_id"]
serial_vote_list = res["vote_list"]
vote_message_list = []
for vote_i in serial_vote_list:
# 判断是长期票还是短期票
if "main_user_pk" in vote_i:
# 短期票
vote_message_list.append(SerializationVoteMessage.deserialization(str(vote_i).encode("utf-8")))
else:
# 长期票
vote_message_list.append(
SerializationLongTermVoteMessage.deserialization(str(vote_i).encode("utf-8")))
if self.voteCount.checkVotesOfGenerateTimesBlock(beings_block_id=beings_block_id,
vote_message_list=vote_message_list):
logger.info("生在生成时代区块,原众生区块ID为:" + beings_block_id)
# 修改状态
self.webServerSDK.modifyStatusOfTimesBlockQueue(beings_block_id=beings_block_id, status=2)
beings_users_pk = self.storageOfBeings.getUserPkByBlockId(block_id=beings_block_id)
body_of_times_block = BodyOfTimesBlock(users_pk=beings_users_pk, block_id=beings_block_id)
body_signature = self.user.sign(str(body_of_times_block.getBody()).encode("utf-8"))
current_election_period = self.getElectionPeriod() - 1
prev_block_header, prev_block = self.storageOfGalaxy.getBlockAbstractByElectionPeriod(
election_period=current_election_period)
while prev_block_header is None:
# 此时表明上一选举时期没有时代区块产生,继续向前寻找
logger.info("current_election_period:" + str(current_election_period) + "没有时代区块产生,继续向前寻找")
current_election_period -= 1
prev_block_header, prev_block = self.storageOfGalaxy.getBlockAbstractByElectionPeriod(
election_period=current_election_period)
logger.debug("上一区块的区块头部哈希和区块哈希")
logger.debug(prev_block_header)
logger.debug(prev_block)
new_block_of_times = NewBlockOfTimes(user_pk=[self.user.getUserPKString()],
election_period=self.getElectionPeriod(),
body_signature=[body_signature], body=body_of_times_block,
pre_block=prev_block,
prev_block_header=prev_block_header).getBlock()
# 保存时代区块
logger.info("保存时代区块,时代区块ID:" + new_block_of_times.getBlockID())
self.storageOfGalaxy.addBlockOfGalaxy(block_of_galaxy=new_block_of_times)
# 广播生成时代区块的投票信息和生成的时代区块
logger.info("广播生成时代区块的投票信息和生成的时代区块")
serial_block_of_times = SerializationTimes.serialization(new_block_of_times)
self.pub.sendMessage(topic=SubscribeTopics.getBlockTopicOfTimes(),
message=[serial_vote_list, serial_block_of_times])
else:
# 修改状态
self.webServerSDK.modifyStatusOfTimesBlockQueue(beings_block_id=beings_block_id, status=4)
logger.warning("再次验证投票发现投票数量未达到生成时代区块标准")
# 检测并且生成垃圾区块
def checkAndGenerateBlockOfGarbage(self):
logger.info("检测并且生成垃圾区块")
# 检测是否有投票数量达到要求的垃圾区块
vote_of_garbage_block = self.voteCount.getVotesOfGarbageBlockGenerate()
res = self.webServerSDK.getGarbageBlockQueueByVotes(votes=vote_of_garbage_block)
if res is not None:
# 再次验证投票
beings_block_id = res["beings_block_id"]
serial_vote_list = res["vote_list"]
vote_message_list = []
for vote_i in serial_vote_list:
# 判断是长期票还是短期票
if "main_user_pk" in vote_i:
# 短期票
vote_message_list.append(SerializationVoteMessage.deserialization(str(vote_i).encode("utf-8")))
else:
# 长期票
vote_message_list.append(
SerializationLongTermVoteMessage.deserialization(str(vote_i).encode("utf-8")))
if self.voteCount.checkVotesOfGenerateGarbageBlock(beings_block_id=beings_block_id,
vote_message_list=vote_message_list):
logger.info("生在生成垃圾区块,原众生区块ID为:" + beings_block_id)
# 修改状态
self.webServerSDK.modifyStatusOfGarbageBlockQueue(beings_block_id=beings_block_id, status=2)
beings_users_pk = self.storageOfBeings.getUserPkByBlockId(block_id=beings_block_id)
body_of_garbage_block = BodyOfGarbageBlock(users_pk=beings_users_pk, block_id=beings_block_id)
body_signature = self.user.sign(str(body_of_garbage_block.getBody()).encode("utf-8"))
current_election_period = self.getElectionPeriod() - 1
prev_block_header, prev_block = self.storageOfGarbage.getBlockAbstractByElectionPeriod(
election_period=current_election_period)
while prev_block_header is None:
# 此时表明上一选举时期没有垃圾区块产生,继续向前寻找
logger.info("current_election_period:" + str(current_election_period) + "没有垃圾区块产生,继续向前寻找")
current_election_period -= 1
prev_block_header, prev_block = self.storageOfGarbage.getBlockAbstractByElectionPeriod(
election_period=current_election_period)
logger.debug("上一区块的区块头部哈希和区块哈希")
logger.debug(prev_block_header)
logger.debug(prev_block)
new_block_of_garbage = NewBlockOfGarbage(user_pk=[self.user.getUserPKString()],
election_period=self.getElectionPeriod(),
body_signature=[body_signature], body=body_of_garbage_block,
pre_block=prev_block,
prev_block_header=prev_block_header).getBlock()
# 保存时代区块
logger.info("保存垃圾区块,垃圾区块ID:" + new_block_of_garbage.getBlockID())
self.storageOfGarbage.addBlockOfGarbage(block_of_garbage=new_block_of_garbage)
# 广播生成时代区块的投票信息和生成的时代区块
logger.info("广播生成时代区块的投票信息和生成的垃圾区块")
serial_block_of_garbage = SerializationGarbage.serialization(new_block_of_garbage)
self.pub.sendMessage(topic=SubscribeTopics.getBlockTopicOfGarbage(),
message=[serial_vote_list, serial_block_of_garbage])
else:
# 修改状态
self.webServerSDK.modifyStatusOfGarbageBlockQueue(beings_block_id=beings_block_id, status=4)
logger.warning("再次验证投票发现投票数量未达到生成垃圾区块标准")
| [
"core.consensus.block_generate.NewBlockOfBeings",
"core.utils.network_request.MainNodeIp",
"core.utils.serialization.SerializationApplicationFormActiveDelete.serialization",
"core.storage.storage_of_beings.StorageOfBeings",
"core.data.genesis_block.GenesisBlock",
"core.storage.storage_of_galaxy.StorageOfGalaxy",
"core.data.node_info.NodeInfo",
"core.consensus.data.ReplyApplicationFormActiveDelete",
"core.consensus.data.ReplyApplicationForm",
"time.sleep",
"core.data.network_message.SubscribeTopics.getBlockTopicOfBeings",
"core.utils.server_sdk.SDK",
"core.data.network_message.SubscribeTopics.getLongTermVoteMessage",
"core.data.block_of_times.BodyOfTimesBlock",
"core.consensus.block_verify.BlockVerify",
"core.network.net.Server",
"core.storage.storage_of_temp.StorageOfTemp",
"core.utils.serialization.SerializationTimes.serialization",
"core.utils.system_time.STime.getTimestamp",
"core.utils.serialization.SerializationLongTermVoteMessage.serialization",
"core.data.block_of_garbage.BodyOfGarbageBlock",
"core.utils.serialization.SerializationApplicationForm.serialization",
"core.consensus.data.VoteMessage",
"core.utils.serialization.SerializationReplyApplicationForm.serialization",
"core.consensus.data.LongTermVoteMessage",
"core.utils.serialization.SerializationGarbage.serialization",
"core.data.network_message.NetworkMessage",
"core.utils.download.RemoteChainAsset",
"core.data.network_message.SubscribeTopics.getBlockTopicOfTimes",
"random.choice",
"core.network.net.SUB",
"random.shuffle",
"core.utils.serialization.SerializationReplyApplicationFormActiveDelete.serialization",
"core.data.network_message.SubscribeTopics.getVoteMessage",
"core.consensus.data.ApplicationForm",
"core.data.network_message.SubscribeTopics.getNodeTopicOfApplyDelete",
"core.utils.server_sdk.ChainAsset",
"core.data.network_message.SubscribeTopics.getNodeTopicOfActiveConfirmDelete",
"core.network.net.Client",
"core.utils.serialization.SerializationVoteMessage.serialization",
"core.consensus.vote_compute.VoteCount",
"core.storage.storage_of_garbage.StorageOfGarbage",
"core.data.block_of_beings.BlockListOfBeings",
"core.node.main_node.MainNode",
"core.consensus.node_management.NodeManager",
"core.data.network_message.SubscribeTopics.getNodeTopicOfJoin",
"core.utils.serialization.SerializationBeings.serialization",
"core.data.network_message.SubscribeTopics.getNodeTopicOfActiveApplyDelete",
"core.network.net.PUB",
"core.consensus.data.ApplicationFormActiveDelete",
"core.data.network_message.SubscribeTopics.getBlockTopicOfGarbage",
"core.data.network_message.SubscribeTopics.getNodeTopicOfApplyJoin",
"core.utils.serialization.SerializationNetworkMessage.serialization",
"core.user.user.User"
] | [((2162, 2179), 'core.storage.storage_of_beings.StorageOfBeings', 'StorageOfBeings', ([], {}), '()\n', (2177, 2179), False, 'from core.storage.storage_of_beings import StorageOfBeings\n'), ((2220, 2235), 'core.storage.storage_of_temp.StorageOfTemp', 'StorageOfTemp', ([], {}), '()\n', (2233, 2235), False, 'from core.storage.storage_of_temp import StorageOfTemp\n'), ((2277, 2294), 'core.storage.storage_of_galaxy.StorageOfGalaxy', 'StorageOfGalaxy', ([], {}), '()\n', (2292, 2294), False, 'from core.storage.storage_of_galaxy import StorageOfGalaxy\n'), ((2338, 2356), 'core.storage.storage_of_garbage.StorageOfGarbage', 'StorageOfGarbage', ([], {}), '()\n', (2354, 2356), False, 'from core.storage.storage_of_garbage import StorageOfGarbage\n'), ((2389, 2395), 'core.user.user.User', 'User', ([], {}), '()\n', (2393, 2395), False, 'from core.user.user import User\n'), ((2472, 2523), 'core.node.main_node.MainNode', 'MainNode', (['self.user', 'server_url', 'self.storageOfTemp'], {}), '(self.user, server_url, self.storageOfTemp)\n', (2480, 2523), False, 'from core.node.main_node import MainNode\n'), ((2558, 2651), 'core.consensus.node_management.NodeManager', 'NodeManager', ([], {'user': 'self.user', 'main_node': 'self.mainNode', 'storage_of_temp': 'self.storageOfTemp'}), '(user=self.user, main_node=self.mainNode, storage_of_temp=self.\n storageOfTemp)\n', (2569, 2651), False, 'from core.consensus.node_management import NodeManager\n'), ((2719, 2920), 'core.consensus.vote_compute.VoteCount', 'VoteCount', ([], {'storage_of_beings': 'self.storageOfBeings', 'storage_of_temp': 'self.storageOfTemp', 'main_node': 'self.mainNode', 'storage_of_times': 'self.storageOfGalaxy', 'storage_of_garbage': 'self.storageOfGarbage'}), '(storage_of_beings=self.storageOfBeings, storage_of_temp=self.\n storageOfTemp, main_node=self.mainNode, storage_of_times=self.\n storageOfGalaxy, storage_of_garbage=self.storageOfGarbage)\n', (2728, 2920), False, 'from core.consensus.vote_compute import VoteCount\n'), ((3016, 3067), 'core.consensus.block_verify.BlockVerify', 'BlockVerify', ([], {'storage_of_beings': 'self.storageOfBeings'}), '(storage_of_beings=self.storageOfBeings)\n', (3027, 3067), False, 'from core.consensus.block_verify import BlockVerify\n'), ((3087, 3092), 'core.network.net.PUB', 'PUB', ([], {}), '()\n', (3090, 3092), False, 'from core.network.net import PUB, SUB, Server, Client\n'), ((3181, 3230), 'core.network.net.Client', 'Client', ([], {'main_node_list': 'self.mainNode.mainNodeList'}), '(main_node_list=self.mainNode.mainNodeList)\n', (3187, 3230), False, 'from core.network.net import PUB, SUB, Server, Client\n'), ((3260, 3495), 'core.network.net.Server', 'Server', ([], {'user': 'self.user', 'node_manager': 'self.nodeManager', 'pub': 'self.pub', 'main_node': 'self.mainNode', 'storage_of_temp': 'self.storageOfTemp', 'vote_count': 'self.voteCount', 'getEpoch': 'self.getEpoch', 'getElectionPeriod': 'self.getElectionPeriod'}), '(user=self.user, node_manager=self.nodeManager, pub=self.pub,\n main_node=self.mainNode, storage_of_temp=self.storageOfTemp, vote_count\n =self.voteCount, getEpoch=self.getEpoch, getElectionPeriod=self.\n getElectionPeriod)\n', (3266, 3495), False, 'from core.network.net import PUB, SUB, Server, Client\n'), ((3619, 3624), 'core.utils.server_sdk.SDK', 'SDK', ([], {}), '()\n', (3622, 3624), False, 'from core.utils.server_sdk import SDK, ChainAsset\n'), ((3675, 3687), 'core.utils.server_sdk.ChainAsset', 'ChainAsset', ([], {}), '()\n', (3685, 3687), False, 'from core.utils.server_sdk import SDK, ChainAsset\n'), ((3741, 3759), 'core.utils.download.RemoteChainAsset', 'RemoteChainAsset', ([], {}), '()\n', (3757, 3759), False, 'from core.utils.download import RemoteChainAsset\n'), ((7250, 7840), 'core.network.net.SUB', 'SUB', ([], {'ip': 'ip', 'pub': 'self.pub', 'blockListOfBeings': 'self.mainNode.currentBlockList', 'web_server_sdk': 'self.webServerSDK', 'storage_of_garbage': 'self.storageOfGarbage', 'user': 'self.user', 'vote_count': 'self.voteCount', 'node_manager': 'self.nodeManager', 'main_node': 'self.mainNode', 'reSubscribe': 'self.reSubscribe', 'storage_of_temp': 'self.storageOfTemp', 'getEpoch': 'self.getEpoch', 'getElectionPeriod': 'self.getElectionPeriod', 'storage_of_galaxy': 'self.storageOfGalaxy', 'storage_of_beings': 'self.storageOfBeings', 'node_del_application_form_list': 'self.mainNode.nodeDelApplicationFormList', 'client': 'self.client'}), '(ip=ip, pub=self.pub, blockListOfBeings=self.mainNode.currentBlockList,\n web_server_sdk=self.webServerSDK, storage_of_garbage=self.\n storageOfGarbage, user=self.user, vote_count=self.voteCount,\n node_manager=self.nodeManager, main_node=self.mainNode, reSubscribe=\n self.reSubscribe, storage_of_temp=self.storageOfTemp, getEpoch=self.\n getEpoch, getElectionPeriod=self.getElectionPeriod, storage_of_galaxy=\n self.storageOfGalaxy, storage_of_beings=self.storageOfBeings,\n node_del_application_form_list=self.mainNode.nodeDelApplicationFormList,\n client=self.client)\n', (7253, 7840), False, 'from core.network.net import PUB, SUB, Server, Client\n'), ((9320, 9397), 'core.data.network_message.NetworkMessage', 'NetworkMessage', ([], {'mess_type': 'NetworkMessageType.Get_Main_Node_List', 'message': 'None'}), '(mess_type=NetworkMessageType.Get_Main_Node_List, message=None)\n', (9334, 9397), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((9420, 9467), 'core.utils.serialization.SerializationNetworkMessage.serialization', 'SerializationNetworkMessage.serialization', (['data'], {}), '(data)\n', (9461, 9467), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((10225, 10253), 'random.shuffle', 'random.shuffle', (['node_ip_list'], {}), '(node_ip_list)\n', (10239, 10253), False, 'import random\n'), ((17139, 17163), 'random.choice', 'random.choice', (['info_list'], {}), '(info_list)\n', (17152, 17163), False, 'import random\n'), ((18810, 18824), 'core.data.genesis_block.GenesisBlock', 'GenesisBlock', ([], {}), '()\n', (18822, 18824), False, 'from core.data.genesis_block import GenesisBlock\n'), ((18856, 18875), 'core.data.block_of_beings.BlockListOfBeings', 'BlockListOfBeings', ([], {}), '()\n', (18873, 18875), False, 'from core.data.block_of_beings import EmptyBlock, BlockListOfBeings\n'), ((22160, 22303), 'core.consensus.data.ReplyApplicationFormActiveDelete', 'ReplyApplicationFormActiveDelete', ([], {'del_node_id': 'del_node_id', 'start_time': 'application_time', 'is_agree': 'is_audit', 'apply_user_pk': 'main_node_user_pk'}), '(del_node_id=del_node_id, start_time=\n application_time, is_agree=is_audit, apply_user_pk=main_node_user_pk)\n', (22192, 22303), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((22862, 22964), 'core.utils.serialization.SerializationReplyApplicationFormActiveDelete.serialization', 'SerializationReplyApplicationFormActiveDelete.serialization', (['reply_application_form_active_delete'], {}), '(\n reply_application_form_active_delete)\n', (22921, 22964), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((23014, 23154), 'core.data.network_message.NetworkMessage', 'NetworkMessage', ([], {'mess_type': 'NetworkMessageType.ReplyNodeActiveDeleteApplication', 'message': 'serial_reply_application_form_active_delete'}), '(mess_type=NetworkMessageType.\n ReplyNodeActiveDeleteApplication, message=\n serial_reply_application_form_active_delete)\n', (23028, 23154), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((23436, 23494), 'core.utils.serialization.SerializationNetworkMessage.serialization', 'SerializationNetworkMessage.serialization', (['network_message'], {}), '(network_message)\n', (23477, 23494), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((10331, 10397), 'core.data.network_message.NetworkMessage', 'NetworkMessage', (['NetworkMessageType.Get_Current_Epoch'], {'message': 'None'}), '(NetworkMessageType.Get_Current_Epoch, message=None)\n', (10345, 10397), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((10436, 10463), 'random.choice', 'random.choice', (['node_ip_list'], {}), '(node_ip_list)\n', (10449, 10463), False, 'import random\n'), ((17425, 17449), 'random.choice', 'random.choice', (['info_list'], {}), '(info_list)\n', (17438, 17449), False, 'import random\n'), ((18085, 18109), 'random.choice', 'random.choice', (['info_list'], {}), '(info_list)\n', (18098, 18109), False, 'import random\n'), ((19917, 20069), 'core.consensus.data.ReplyApplicationForm', 'ReplyApplicationForm', ([], {'new_node_id': "info['node_id']", 'new_node_user_pk': "info['user_pk']", 'start_time': "info['node_create_time']", 'is_agree': "info['is_audit']"}), "(new_node_id=info['node_id'], new_node_user_pk=info[\n 'user_pk'], start_time=info['node_create_time'], is_agree=info['is_audit'])\n", (19937, 20069), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((20464, 20535), 'core.utils.serialization.SerializationReplyApplicationForm.serialization', 'SerializationReplyApplicationForm.serialization', (['reply_application_form'], {}), '(reply_application_form)\n', (20511, 20535), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((20585, 20701), 'core.data.network_message.NetworkMessage', 'NetworkMessage', ([], {'mess_type': 'NetworkMessageType.ReplayNewNodeApplicationJoin', 'message': 'serial_reply_application_form'}), '(mess_type=NetworkMessageType.ReplayNewNodeApplicationJoin,\n message=serial_reply_application_form)\n', (20599, 20701), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((21007, 21065), 'core.utils.serialization.SerializationNetworkMessage.serialization', 'SerializationNetworkMessage.serialization', (['network_message'], {}), '(network_message)\n', (21048, 21065), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((24416, 24436), 'core.utils.system_time.STime.getTimestamp', 'STime.getTimestamp', ([], {}), '()\n', (24434, 24436), False, 'from core.utils.system_time import STime\n'), ((24544, 24661), 'core.data.node_info.NodeInfo', 'NodeInfo', ([], {'node_id': 'node_id', 'user_pk': 'user_pk', 'node_ip': 'node_ip', 'create_time': 'node_create_time', 'server_url': 'server_url'}), '(node_id=node_id, user_pk=user_pk, node_ip=node_ip, create_time=\n node_create_time, server_url=server_url)\n', (24552, 24661), False, 'from core.data.node_info import NodeInfo\n'), ((24774, 24922), 'core.consensus.data.ApplicationForm', 'ApplicationForm', ([], {'node_info': 'node_info', 'start_time': 'application_time', 'content': 'application', 'application_signature_by_new_node': 'application_signature'}), '(node_info=node_info, start_time=application_time, content=\n application, application_signature_by_new_node=application_signature)\n', (24789, 24922), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((26614, 26674), 'core.utils.serialization.SerializationApplicationForm.serialization', 'SerializationApplicationForm.serialization', (['application_form'], {}), '(application_form)\n', (26656, 26674), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((27286, 27306), 'core.utils.system_time.STime.getTimestamp', 'STime.getTimestamp', ([], {}), '()\n', (27304, 27306), False, 'from core.utils.system_time import STime\n'), ((27352, 27463), 'core.consensus.data.ApplicationFormActiveDelete', 'ApplicationFormActiveDelete', ([], {'del_node_id': 'node_id', 'start_time': 'application_time', 'content': 'application_content'}), '(del_node_id=node_id, start_time=\n application_time, content=application_content)\n', (27379, 27463), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((28911, 29002), 'core.utils.serialization.SerializationApplicationFormActiveDelete.serialization', 'SerializationApplicationFormActiveDelete.serialization', (['application_form_active_delete'], {}), '(\n application_form_active_delete)\n', (28965, 29002), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((44618, 44631), 'core.consensus.data.VoteMessage', 'VoteMessage', ([], {}), '()\n', (44629, 44631), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((48459, 48480), 'core.consensus.data.LongTermVoteMessage', 'LongTermVoteMessage', ([], {}), '()\n', (48478, 48480), False, 'from core.consensus.data import ApplicationForm, ReplyApplicationForm, VoteMessage, LongTermVoteMessage, ApplicationFormActiveDelete, ReplyApplicationFormActiveDelete\n'), ((9218, 9230), 'core.utils.network_request.MainNodeIp', 'MainNodeIp', ([], {}), '()\n', (9228, 9230), False, 'from core.utils.network_request import MainNodeIp\n'), ((11604, 11634), 'random.choice', 'random.choice', (['server_url_list'], {}), '(server_url_list)\n', (11617, 11634), False, 'import random\n'), ((14036, 14066), 'random.choice', 'random.choice', (['server_url_list'], {}), '(server_url_list)\n', (14049, 14066), False, 'import random\n'), ((15709, 15739), 'random.choice', 'random.choice', (['server_url_list'], {}), '(server_url_list)\n', (15722, 15739), False, 'import random\n'), ((18285, 18309), 'random.choice', 'random.choice', (['info_list'], {}), '(info_list)\n', (18298, 18309), False, 'import random\n'), ((29992, 30052), 'core.utils.serialization.SerializationApplicationForm.serialization', 'SerializationApplicationForm.serialization', (['application_form'], {}), '(application_form)\n', (30034, 30052), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((30436, 30718), 'core.data.node_info.NodeInfo', 'NodeInfo', ([], {'node_id': "application_form.newNodeInfo['node_id']", 'user_pk': "application_form.newNodeInfo['user_pk']", 'node_ip': "application_form.newNodeInfo['node_ip']", 'create_time': "application_form.newNodeInfo['create_time']", 'server_url': "application_form.newNodeInfo['server_url']"}), "(node_id=application_form.newNodeInfo['node_id'], user_pk=\n application_form.newNodeInfo['user_pk'], node_ip=application_form.\n newNodeInfo['node_ip'], create_time=application_form.newNodeInfo[\n 'create_time'], server_url=application_form.newNodeInfo['server_url'])\n", (30444, 30718), False, 'from core.data.node_info import NodeInfo\n'), ((32440, 32531), 'core.utils.serialization.SerializationApplicationFormActiveDelete.serialization', 'SerializationApplicationFormActiveDelete.serialization', (['application_form_active_delete'], {}), '(\n application_form_active_delete)\n', (32494, 32531), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((52619, 52687), 'core.data.block_of_times.BodyOfTimesBlock', 'BodyOfTimesBlock', ([], {'users_pk': 'beings_users_pk', 'block_id': 'beings_block_id'}), '(users_pk=beings_users_pk, block_id=beings_block_id)\n', (52635, 52687), False, 'from core.data.block_of_times import BodyOfTimesBlock\n'), ((54393, 54445), 'core.utils.serialization.SerializationTimes.serialization', 'SerializationTimes.serialization', (['new_block_of_times'], {}), '(new_block_of_times)\n', (54425, 54445), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((56299, 56369), 'core.data.block_of_garbage.BodyOfGarbageBlock', 'BodyOfGarbageBlock', ([], {'users_pk': 'beings_users_pk', 'block_id': 'beings_block_id'}), '(users_pk=beings_users_pk, block_id=beings_block_id)\n', (56317, 56369), False, 'from core.data.block_of_garbage import BodyOfGarbageBlock\n'), ((58110, 58166), 'core.utils.serialization.SerializationGarbage.serialization', 'SerializationGarbage.serialization', (['new_block_of_garbage'], {}), '(new_block_of_garbage)\n', (58144, 58166), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((10809, 10822), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10819, 10822), False, 'import time\n'), ((26733, 26774), 'core.data.network_message.SubscribeTopics.getNodeTopicOfApplyJoin', 'SubscribeTopics.getNodeTopicOfApplyJoin', ([], {}), '()\n', (26772, 26774), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((29071, 29120), 'core.data.network_message.SubscribeTopics.getNodeTopicOfActiveApplyDelete', 'SubscribeTopics.getNodeTopicOfActiveApplyDelete', ([], {}), '()\n', (29118, 29120), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((37547, 37594), 'core.utils.serialization.SerializationNetworkMessage.serialization', 'SerializationNetworkMessage.serialization', (['mess'], {}), '(mess)\n', (37588, 37594), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((47071, 47103), 'core.data.network_message.SubscribeTopics.getVoteMessage', 'SubscribeTopics.getVoteMessage', ([], {}), '()\n', (47101, 47103), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((47146, 47211), 'core.utils.serialization.SerializationVoteMessage.serialization', 'SerializationVoteMessage.serialization', ([], {'vote_message': 'vote_message'}), '(vote_message=vote_message)\n', (47184, 47211), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((50909, 50949), 'core.data.network_message.SubscribeTopics.getLongTermVoteMessage', 'SubscribeTopics.getLongTermVoteMessage', ([], {}), '()\n', (50947, 50949), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((50992, 51090), 'core.utils.serialization.SerializationLongTermVoteMessage.serialization', 'SerializationLongTermVoteMessage.serialization', ([], {'long_term_vote_message': 'long_term_vote_message'}), '(long_term_vote_message=\n long_term_vote_message)\n', (51038, 51090), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((12209, 12222), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12219, 12222), False, 'import time\n'), ((12409, 12439), 'random.choice', 'random.choice', (['server_url_list'], {}), '(server_url_list)\n', (12422, 12439), False, 'import random\n'), ((13588, 13601), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13598, 13601), False, 'import time\n'), ((14835, 14848), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14845, 14848), False, 'import time\n'), ((15276, 15289), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15286, 15289), False, 'import time\n'), ((16394, 16407), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16404, 16407), False, 'import time\n'), ((16835, 16848), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16845, 16848), False, 'import time\n'), ((30127, 30163), 'core.data.network_message.SubscribeTopics.getNodeTopicOfJoin', 'SubscribeTopics.getNodeTopicOfJoin', ([], {}), '()\n', (30161, 30163), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((32813, 32864), 'core.data.network_message.SubscribeTopics.getNodeTopicOfActiveConfirmDelete', 'SubscribeTopics.getNodeTopicOfActiveConfirmDelete', ([], {}), '()\n', (32862, 32864), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((35561, 35621), 'core.utils.serialization.SerializationBeings.serialization', 'SerializationBeings.serialization', ([], {'block_of_beings': 'new_block'}), '(block_of_beings=new_block)\n', (35594, 35621), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((54489, 54527), 'core.data.network_message.SubscribeTopics.getBlockTopicOfTimes', 'SubscribeTopics.getBlockTopicOfTimes', ([], {}), '()\n', (54525, 54527), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((58210, 58250), 'core.data.network_message.SubscribeTopics.getBlockTopicOfGarbage', 'SubscribeTopics.getBlockTopicOfGarbage', ([], {}), '()\n', (58248, 58250), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((4773, 4787), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4783, 4787), False, 'import time\n'), ((4846, 4860), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (4856, 4860), False, 'import time\n'), ((12840, 12870), 'random.choice', 'random.choice', (['server_url_list'], {}), '(server_url_list)\n', (12853, 12870), False, 'import random\n'), ((35768, 35856), 'core.data.network_message.NetworkMessage', 'NetworkMessage', ([], {'mess_type': 'NetworkMessageType.NEW_BLOCK', 'message': 'serialization_block'}), '(mess_type=NetworkMessageType.NEW_BLOCK, message=\n serialization_block)\n', (35782, 35856), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((36749, 36796), 'core.utils.serialization.SerializationNetworkMessage.serialization', 'SerializationNetworkMessage.serialization', (['mess'], {}), '(mess)\n', (36790, 36796), False, 'from core.utils.serialization import SerializationBeings, SerializationApplicationForm, SerializationReplyApplicationForm, SerializationNetworkMessage, SerializationVoteMessage, SerializationTimes, SerializationLongTermVoteMessage, SerializationGarbage, SerializationApplicationFormActiveDelete, SerializationReplyApplicationFormActiveDelete\n'), ((37770, 37809), 'core.data.network_message.SubscribeTopics.getBlockTopicOfBeings', 'SubscribeTopics.getBlockTopicOfBeings', ([], {}), '()\n', (37807, 37809), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((39567, 39610), 'core.data.network_message.SubscribeTopics.getNodeTopicOfApplyDelete', 'SubscribeTopics.getNodeTopicOfApplyDelete', ([], {}), '()\n', (39608, 39610), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((35251, 35401), 'core.consensus.block_generate.NewBlockOfBeings', 'NewBlockOfBeings', ([], {'user_pk': 'user_pk', 'body_signature': 'body_signature', 'body': 'body', 'epoch': 'epoch', 'pre_block': 'pre_block', 'prev_block_header': 'prev_block_header'}), '(user_pk=user_pk, body_signature=body_signature, body=body,\n epoch=epoch, pre_block=pre_block, prev_block_header=prev_block_header)\n', (35267, 35401), False, 'from core.consensus.block_generate import CurrentMainNode, NewBlockOfBeings, NewBlockOfTimes, NewBlockOfGarbage\n'), ((35904, 35943), 'core.data.network_message.SubscribeTopics.getBlockTopicOfBeings', 'SubscribeTopics.getBlockTopicOfBeings', ([], {}), '()\n', (35941, 35943), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n'), ((36984, 37023), 'core.data.network_message.SubscribeTopics.getBlockTopicOfBeings', 'SubscribeTopics.getBlockTopicOfBeings', ([], {}), '()\n', (37021, 37023), False, 'from core.data.network_message import NetworkMessageType, NetworkMessage, SubscribeTopics\n')] |
from flask_wtf import FlaskForm
import wtforms as form
import wtforms.validators as validator
class PostForm(FlaskForm):
file = form.FileField(label='Файл', validators=[validator.DataRequired()])
type_mutator = form.SelectField(label='Тип мутации', validators=[validator.DataRequired()], choices=[
(0, "Выберите типы мутации"),
(1, "Склейщик"),
(2, "Криптор"),
(3, "RAR")
])
virus = form.SelectField(label="Вирус", validators=[validator.DataRequired()],
choices=[
(0, "Выберите вирус"),
(1, "Стиллер"),
(2, "Троян"),
(3, "DarkComet"),
(4, "Винлокер"),
(5, "Червь"),
])
submit = form.SubmitField('Отправить')
| [
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((890, 919), 'wtforms.SubmitField', 'form.SubmitField', (['"""Отправить"""'], {}), "('Отправить')\n", (906, 919), True, 'import wtforms as form\n'), ((179, 203), 'wtforms.validators.DataRequired', 'validator.DataRequired', ([], {}), '()\n', (201, 203), True, 'import wtforms.validators as validator\n'), ((281, 305), 'wtforms.validators.DataRequired', 'validator.DataRequired', ([], {}), '()\n', (303, 305), True, 'import wtforms.validators as validator\n'), ((483, 507), 'wtforms.validators.DataRequired', 'validator.DataRequired', ([], {}), '()\n', (505, 507), True, 'import wtforms.validators as validator\n')] |
from random import randint
numComputador = randint(0, 10)
numJogador = -1
tentativas = 0
print('-=-' *20)
print('\033[34mPENSEI EM UM NÚMERO INTEIRO ENTRE 0 e 10. TENTE ADIVINHAR\033[m')
print('-=-'*20)
while numJogador != numComputador:
numJogador = int(input('Advinhe o número: '))
tentativas += 1
if numJogador > numComputador:
print('\033[31mMenos... Tente novamente.\033[m')
elif numJogador < numComputador:
print('\033[31mMais... Tente novamente.\033[m')
print('\033[32mParabéns você acertou!\033[m')
print('Foram necessários {} tentativas até você acertar.'.format(tentativas))
| [
"random.randint"
] | [((43, 57), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (50, 57), False, 'from random import randint\n')] |
from gym_aero.envs import random_waypoint_nh_env
from gym_aero.pid_envs import pid_env_base
from math import sin, cos, acos
import gym
import numpy as np
class PIDRandomWaypointNHEnv(random_waypoint_nh_env.RandomWaypointNHEnv, pid_env_base.PIDEnv):
def __init__(self):
super(PIDRandomWaypointNHEnv, self).__init__()
self.name = "PIDRandomWaypointNH-v0"
def step(self, errors):
xyz, zeta, uvw, pqr = super(PIDRandomWaypointNHEnv, self).pid_step(errors)
xyz_dot = self.get_xyz_dot()
sin_zeta = [sin(z) for z in zeta]
cos_zeta = [cos(z) for z in zeta]
curr_rpm = self.get_rpm()
normalized_rpm = [rpm/self.max_rpm for rpm in curr_rpm]
self.set_current_dists((xyz, sin_zeta, cos_zeta, xyz_dot, pqr), errors)
reward, info = self.reward((xyz, sin_zeta, cos_zeta, xyz_dot, pqr), errors)
self.t += 1
done = self.terminal((xyz, zeta, xyz_dot, pqr))
obs = self.get_state_obs((xyz, sin_zeta, cos_zeta, xyz_dot, pqr), errors, normalized_rpm)
self.set_prev_dists((xyz, sin_zeta, cos_zeta, xyz_dot, pqr), errors)
return obs, reward, done, info
def render(self, mode='human', video=False, close=False):
super(pid_env_base.PIDEnv, self).render(mode=mode, close=close)
self.ani.draw_goal(self.goal_xyz)
self.ani.draw_vector(self.goal_xyz, self.goal_zeta)
self.ani.draw()
if video: self.ani.save_frame(self.name)
if close:
self.ani.close_window()
self.init_rendering = False | [
"math.cos",
"math.sin"
] | [((551, 557), 'math.sin', 'sin', (['z'], {}), '(z)\n', (554, 557), False, 'from math import sin, cos, acos\n'), ((593, 599), 'math.cos', 'cos', (['z'], {}), '(z)\n', (596, 599), False, 'from math import sin, cos, acos\n')] |
#!/usr/bin/env python
# encoding: utf-8
# File : StopPlot.py
# Author : <NAME>
# Contact : <EMAIL>
# Date : 2015 Jul 23
#
# Description : A main code for making plots for the stop analysis
# class StopPlot():
# def __init__(self):
# pass
from rootpy.interactive import wait
from PyAna import PyAna
from PyDraw import PyDraw
Lumi = 10 * 1000
if __name__ == "__main__":
test = PyAna("./Files/", Lumi)
Draw = PyDraw(Lumi)
test.FormProcesses()
out = Draw.DrawLineComparison(test.GetHist([], "Top_G3Top20", "CutFlow"))
# out = Draw.DrawLineComparison(test.GetHist("TTbar", "Top_G3Top20", "MissTopTagPT"))
# out = Draw.DrawLineComparison(test.GetHist("Top_G3Top20", "MissTopTagEta"))
out.SaveAs("test.root")
out.SaveAs("test.png")
wait()
| [
"rootpy.interactive.wait",
"PyDraw.PyDraw",
"PyAna.PyAna"
] | [((422, 445), 'PyAna.PyAna', 'PyAna', (['"""./Files/"""', 'Lumi'], {}), "('./Files/', Lumi)\n", (427, 445), False, 'from PyAna import PyAna\n'), ((457, 469), 'PyDraw.PyDraw', 'PyDraw', (['Lumi'], {}), '(Lumi)\n', (463, 469), False, 'from PyDraw import PyDraw\n'), ((806, 812), 'rootpy.interactive.wait', 'wait', ([], {}), '()\n', (810, 812), False, 'from rootpy.interactive import wait\n')] |
"""
http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.html#install-opencv-python-in-windows
http://www.lfd.uci.edu/~gohlke/pythonlibs/#opencv
http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy
pip install D:\Installation\opencv_python-3.3.0-cp36-cp36m-win_amd64.whl
opencv_python‑3.3.0‑cp36‑cp36m‑win_amd64.whl
D:\Installation\numpy-1.13.1+mkl-cp36-cp36m-win_amd64.whl
http://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
http://docs.opencv.org/trunk/df/dac/group__photo__render.html
"""
import os
import numpy as np
import cv2
from PIL import Image
fld = r"C:\MapServer\apps\test\images"
os.chdir(fld)
fn = os.path.join(fld, "test.png")
ofn = os.path.join(fld, "test2.png")
img = cv2.imread(fn, 1)
# https://stackoverflow.com/questions/30506126/open-cv-error-215-scn-3-scn-4-in-function-cvtcolor
dst_gray, dst_color = cv2.pencilSketch(img, sigma_s=60, sigma_r=0.07, shade_factor=0.04) # shade_factor=0.05
cv2.imwrite(ofn, dst_gray)
stylize = cv2.stylization(img, sigma_s=60, sigma_r=0.07)
cv2.imwrite("style.png", stylize)
print("Done!") | [
"cv2.imwrite",
"cv2.pencilSketch",
"os.path.join",
"os.chdir",
"cv2.stylization",
"cv2.imread"
] | [((684, 697), 'os.chdir', 'os.chdir', (['fld'], {}), '(fld)\n', (692, 697), False, 'import os\n'), ((703, 732), 'os.path.join', 'os.path.join', (['fld', '"""test.png"""'], {}), "(fld, 'test.png')\n", (715, 732), False, 'import os\n'), ((739, 769), 'os.path.join', 'os.path.join', (['fld', '"""test2.png"""'], {}), "(fld, 'test2.png')\n", (751, 769), False, 'import os\n'), ((777, 794), 'cv2.imread', 'cv2.imread', (['fn', '(1)'], {}), '(fn, 1)\n', (787, 794), False, 'import cv2\n'), ((917, 983), 'cv2.pencilSketch', 'cv2.pencilSketch', (['img'], {'sigma_s': '(60)', 'sigma_r': '(0.07)', 'shade_factor': '(0.04)'}), '(img, sigma_s=60, sigma_r=0.07, shade_factor=0.04)\n', (933, 983), False, 'import cv2\n'), ((1005, 1031), 'cv2.imwrite', 'cv2.imwrite', (['ofn', 'dst_gray'], {}), '(ofn, dst_gray)\n', (1016, 1031), False, 'import cv2\n'), ((1043, 1089), 'cv2.stylization', 'cv2.stylization', (['img'], {'sigma_s': '(60)', 'sigma_r': '(0.07)'}), '(img, sigma_s=60, sigma_r=0.07)\n', (1058, 1089), False, 'import cv2\n'), ((1091, 1124), 'cv2.imwrite', 'cv2.imwrite', (['"""style.png"""', 'stylize'], {}), "('style.png', stylize)\n", (1102, 1124), False, 'import cv2\n')] |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
#
# update/sqlite_delete.py
#
# Sep/06/2016
#
# --------------------------------------------------------
import sys
import sqlite3
#
sys.path.append ('/var/www/data_base/common/python_common')
from sql_manipulate import sql_delete_proc
#
# --------------------------------------------------------
sys.stderr.write ("*** 開始 ***\n")
#
file_in = sys.argv[1]
key_in = sys.argv[2]
print ("%s" % key_in)
#
conn = sqlite3.connect (file_in)
conn.text_factory=str
#
cursor = conn.cursor ()
#
sql_delete_proc (cursor,key_in)
#
conn.commit ()
#
cursor.close ()
conn.close ()
#
sys.stderr.write ("*** 終了 ***\n")
#
# --------------------------------------------------------
| [
"sys.stderr.write",
"sys.path.append",
"sqlite3.connect",
"sql_manipulate.sql_delete_proc"
] | [((181, 239), 'sys.path.append', 'sys.path.append', (['"""/var/www/data_base/common/python_common"""'], {}), "('/var/www/data_base/common/python_common')\n", (196, 239), False, 'import sys\n'), ((345, 377), 'sys.stderr.write', 'sys.stderr.write', (['"""*** 開始 ***\n"""'], {}), "('*** 開始 ***\\n')\n", (361, 377), False, 'import sys\n'), ((455, 479), 'sqlite3.connect', 'sqlite3.connect', (['file_in'], {}), '(file_in)\n', (470, 479), False, 'import sqlite3\n'), ((531, 562), 'sql_manipulate.sql_delete_proc', 'sql_delete_proc', (['cursor', 'key_in'], {}), '(cursor, key_in)\n', (546, 562), False, 'from sql_manipulate import sql_delete_proc\n'), ((614, 646), 'sys.stderr.write', 'sys.stderr.write', (['"""*** 終了 ***\n"""'], {}), "('*** 終了 ***\\n')\n", (630, 646), False, 'import sys\n')] |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test of pose estimation using MoveNet Multipose."""
import logging
from typing import List
import unittest
import cv2
from data import BodyPart
from data import KeyPoint
from .movenet_multipose import MoveNetMultiPose
import numpy as np
import pandas as pd
_MODEL_MOVENET_MULTILPOSE = 'movenet_multipose'
_IMAGE_TEST1 = 'test_data/image1.png'
_IMAGE_TEST2 = 'test_data/image2.jpeg'
_GROUND_TRUTH_CSV = 'test_data/pose_landmark_truth.csv'
_ALLOWED_DISTANCE = 41
class MovenetMultiPoseTest(unittest.TestCase):
def setUp(self):
super().setUp()
image_1 = cv2.imread(_IMAGE_TEST1)
image_2 = cv2.imread(_IMAGE_TEST2)
# Merge image_1 and image_2 into a single image for testing MultiPose model.
image = cv2.hconcat([image_1, image_2])
# Initialize the MultiPose model.
detector = MoveNetMultiPose(_MODEL_MOVENET_MULTILPOSE)
# Run detection on the merged image
self.list_persons = detector.detect(image)
# Sort the results so that the person on the right side come first.
self.list_persons.sort(key=lambda person: person.bounding_box.start_point.x)
# Load the pose landmarks ground truth.
pose_landmarks_truth = pd.read_csv(_GROUND_TRUTH_CSV)
keypoints_truth_1 = pose_landmarks_truth.iloc[0].to_numpy().reshape((17, 2))
keypoints_truth_2 = pose_landmarks_truth.iloc[1].to_numpy().reshape((17, 2))
# Shift keypoints_truth_2 to the right to account for the space occupied by
# image1.
for idx in range(keypoints_truth_2.shape[0]):
keypoints_truth_2[idx][0] += image_1.shape[1]
self.keypoints_truth = [keypoints_truth_1, keypoints_truth_2]
def _assert(self, keypoints: List[KeyPoint],
keypoints_truth: np.ndarray) -> None:
"""Assert if the detection result is close to ground truth.
Args:
keypoints: List Keypoint detected by from the Movenet Multipose model.
keypoints_truth: Ground truth keypoints.
"""
for idx in range(len(BodyPart)):
kpt_estimate = np.array(
[keypoints[idx].coordinate.x, keypoints[idx].coordinate.y])
kpt_truth = keypoints_truth[idx]
distance = np.linalg.norm(kpt_estimate - kpt_truth, np.inf)
self.assertGreaterEqual(
_ALLOWED_DISTANCE, distance,
'{0} is too far away ({1}) from ground truth data.'.format(
BodyPart(idx).name, int(distance)))
logging.debug('Detected %s close to expected result (%d)',
BodyPart(idx).name, int(distance))
def test_pose_estimation_image1_multipose(self):
"""Test if MoveNet Multipose's detection is close to ground truth of image1."""
keypoints = self.list_persons[0].keypoints
self._assert(keypoints, self.keypoints_truth[0])
def test_pose_estimation_image2_multipose(self):
"""Test if MoveNet Multipose's detection is close to ground truth of image2."""
keypoints = self.list_persons[1].keypoints
self._assert(keypoints, self.keypoints_truth[1])
if __name__ == '__main__':
unittest.main()
| [
"data.BodyPart",
"pandas.read_csv",
"numpy.array",
"numpy.linalg.norm",
"unittest.main",
"cv2.imread",
"cv2.hconcat"
] | [((3606, 3621), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3619, 3621), False, 'import unittest\n'), ((1183, 1207), 'cv2.imread', 'cv2.imread', (['_IMAGE_TEST1'], {}), '(_IMAGE_TEST1)\n', (1193, 1207), False, 'import cv2\n'), ((1222, 1246), 'cv2.imread', 'cv2.imread', (['_IMAGE_TEST2'], {}), '(_IMAGE_TEST2)\n', (1232, 1246), False, 'import cv2\n'), ((1341, 1372), 'cv2.hconcat', 'cv2.hconcat', (['[image_1, image_2]'], {}), '([image_1, image_2])\n', (1352, 1372), False, 'import cv2\n'), ((1785, 1815), 'pandas.read_csv', 'pd.read_csv', (['_GROUND_TRUTH_CSV'], {}), '(_GROUND_TRUTH_CSV)\n', (1796, 1815), True, 'import pandas as pd\n'), ((2607, 2675), 'numpy.array', 'np.array', (['[keypoints[idx].coordinate.x, keypoints[idx].coordinate.y]'], {}), '([keypoints[idx].coordinate.x, keypoints[idx].coordinate.y])\n', (2615, 2675), True, 'import numpy as np\n'), ((2743, 2791), 'numpy.linalg.norm', 'np.linalg.norm', (['(kpt_estimate - kpt_truth)', 'np.inf'], {}), '(kpt_estimate - kpt_truth, np.inf)\n', (2757, 2791), True, 'import numpy as np\n'), ((3068, 3081), 'data.BodyPart', 'BodyPart', (['idx'], {}), '(idx)\n', (3076, 3081), False, 'from data import BodyPart\n'), ((2947, 2960), 'data.BodyPart', 'BodyPart', (['idx'], {}), '(idx)\n', (2955, 2960), False, 'from data import BodyPart\n')] |
# solve eq x=cos(x)
from math import cos
def main():
x = 0.0
eps = 1e-7
itertion = 1000000
is_convergence = False
for i in range(itertion):
xnext = cos(x)
if(abs(xnext-x) < eps):
is_convergence = True
break
x = xnext
if is_convergence:
print("X=", xnext, ", iter=", i+1)
print("X-cos(X)=", xnext-cos(xnext))
else:
print("No Convergence")
if __name__ == '__main__':
main()
| [
"math.cos"
] | [((179, 185), 'math.cos', 'cos', (['x'], {}), '(x)\n', (182, 185), False, 'from math import cos\n'), ((388, 398), 'math.cos', 'cos', (['xnext'], {}), '(xnext)\n', (391, 398), False, 'from math import cos\n')] |
#!/usr/bin/env python
import os
import math
import numpy as np
import cst.sord
# FIXME: prestress not correct
dx = 100.0
dt = dx / 12500.0
nx = int(16500.0 / dx + 21.5)
ny = int(16500.0 / dx + 21.5)
nz = int(12000.0 / dx + 120.5)
nt = int(8.0 / dt + 1.5)
alpha = math.sin(math.pi / 3.0)
prm = {
'shape': [nx, ny, nz, nt],
'delta': [dx, dx, dx, dt],
'nproc3': [1, 1, 2],
'bc1': ['-node', 'free', 'free'],
'bc2': ['pml', 'pml', 'free'],
'n1expand': [0, 0, 50],
'n2expand': [0, 0, 50],
'affine': [
[1.0, 0.0, 0.0],
[0.0, alpha, 0.0],
[0.0, 0.5, 1.0],
],
'hourglass': [1.0, 2.0],
'rho': [2700.0],
'vp': [5716.0],
'vs': [3300.0],
'faultnormal': '+z',
'co': [200000.0],
'dc': [0.5],
'mud': [0.1],
'sxx': [([0, []], '=>', 'sxx.bin')],
'syy': [([0, []], '=>', 'syy.bin')],
'szz': [([0, []], '=>', 'szz.bin')],
}
# hypocenter
y = 12000.0 / dx
z = nz // 2 - 0.5
prm['hypocenter'] = hypo = [0.0, y, z]
# near-fault volume
i = int(15000.0 / dx + 0.5)
l0 = int(z - 3000.0 / dx + 0.5)
l1 = int(z + 3000.0 / dx + 0.5)
prm['gam'] = [0.2, ([[i], [i], [l0, l1]], '=', 0.02)]
prm['mus'] = [10000.0, ([[i+1], [i+1]], '=', 0.7)]
prm['trup'] = [([[i+1], [i+1], -1], '=>', 'trup.bin')]
# nucleation
k = int(hypo[1])
m = int(1500.0 / dx + 0.5)
n = int(1500.0 / dx + 1.5)
prm['mus'] += [
([[n], [k-n, k+n+1]], '=', 0.66),
([[n], [k-m, k+m+1]], '=', 0.62),
([[m], [k-n, k+n+1]], '=', 0.62),
([[m], [k-m, k+m+1]], '=', 0.54),
]
# slip, slip velocity, and shear traction time histories
for j, k in [
[0, 0],
[45, 0],
[120, 0],
[0, 15],
[0, 30],
[0, 45],
[0, 75],
[45, 75],
[120, 75],
[0, 120],
]:
x = j * 100.0 / dx
y = k * 100.0 / dx
for f in (
'sux', 'suy', 'suz',
'svx', 'svy', 'svz',
'tsx', 'tsy', 'tsz', 'tnm'
):
s = 'faultst%03ddp%03d-%s.bin' % (j, k, f)
if f not in prm:
prm[f] = []
prm[f] += [([x, y, []], '.>', s)]
# displacement and velocity time histories
for j, k, l in [
[0, 0, -30],
[0, 0, -20],
[0, 0, -10],
[0, 0, 10],
[0, 0, 20],
[0, 0, 30],
[0, 3, -10],
[0, 3, -5],
[0, 3, 5],
[0, 3, 10],
[120, 0, -30],
[120, 0, 30],
]:
x = j * 100.0 / dx
y = k * 100.0 / dx / alpha
z = l * 100.0 / dx + hypo[2]
for f in 'ux', 'uy', 'uz', 'vx', 'vy', 'vz':
s = 'body%03dst%03ddp%03d-%s.bin' % (j, k, l, f)
s = s.replace('body-', 'body-0')
if f not in prm:
prm[f] = []
prm[f] += [([x, y, z, []], '.>', s)]
# pre-stress
d = np.arange(ny) * alpha * dx
x = d * 9.8 * -1147.16
y = d * 9.8 * -1700.0
z = d * 9.8 * -594.32
k = int(13800.0 / dx + 1.5)
x[k:] = y[k:]
z[k:] = y[k:]
d = 'repo/TPV12'
os.mkdir(d)
os.chdir(d)
x.astype('f').tofile('sxx.bin')
y.astype('f').tofile('syy.bin')
z.astype('f').tofile('szz.bin')
cst.sord.run(prm)
| [
"os.chdir",
"math.sin",
"os.mkdir",
"numpy.arange"
] | [((267, 290), 'math.sin', 'math.sin', (['(math.pi / 3.0)'], {}), '(math.pi / 3.0)\n', (275, 290), False, 'import math\n'), ((2819, 2830), 'os.mkdir', 'os.mkdir', (['d'], {}), '(d)\n', (2827, 2830), False, 'import os\n'), ((2831, 2842), 'os.chdir', 'os.chdir', (['d'], {}), '(d)\n', (2839, 2842), False, 'import os\n'), ((2651, 2664), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (2660, 2664), True, 'import numpy as np\n')] |
# LICENSE
# Copyright (c) 2013-2016, <NAME> (<EMAIL>)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# csv.py
#
# Defines classes for working with .csv format data files. You can parse such
# a file by creating an instance of CsvDataset with a file object as the
# parameter.
#
# Known issues:
#
# History:
# [2010/11/05:hostetje] Created by copying code from arff.py
# [2012/02/10:hostetje] Added option to CsvDataset constructor to specify
# that the data has no column headers.
# [2013/01/28:hostetje] Copied from 'bwposthoc' source tree.
# [2015/01/28:hostetje] Fixed erroneous type check in 'attribute_index()'
# ----------------------------------------------------------------------------
import re
import sys
class _ParserState:
HEADER = "header"
BODY = "body"
def __init__( self, init=HEADER ):
self.section = init
class ParseError(RuntimeError):
"""Indicates an error parsing a .csv file.
"""
def __init__( self, value ):
self.value = value
def __str__( self ):
return repr( self.value )
class CsvAttribute:
"""Represents a single attribute of a .csv dataset.
.name = The name of the attribute.
"""
def __init__( self, name ):
self.name = name
def __copy__( self ):
return CsvAttribute( self.name )
def __repr__( self ):
return self.name
def _dequote( name ):
if len(name) < 2:
return name
if name[0] == "\"" and name[-1] == "\"":
return name[1:-1]
return name
class CsvDataset:
@staticmethod
def from_arff_dataset( arff_dataset ):
headers = [CsvAttribute( _dequote( attr.name ) ) for attr in arff_dataset.attributes]
return CsvDataset( attributes=headers, feature_vectors=[[str(e) for e in fv] for fv in arff_dataset.feature_vectors] )
"""Represents a .csv dataset, including the column names and the feature
vectors.
.attributes = An array of CsvAttribute objects, in the same order as they
appear in the input file.
.feature_vectors = An array of arrays representing the feature vectors,
in the same order as they appear in the input file.
"""
def __init__( self, *args, **kwargs ):
"""Creates an object to represent the dataset stored in 'csv_file'.
"""
self.attributes = []
self.feature_vectors = []
if len(args) == 1:
if isinstance(args[0], CsvDataset):
# Copy constructor
that = args[0]
self.attributes = that.attributes[:]
self.feature_vectors = that.feature_vectors[:]
else:
# Construct from iterable (ie. file)
try: headers = kwargs["headers"]
except KeyError: headers = True
if headers:
state = _ParserState( init=_ParserState.HEADER )
else:
state = _ParserState( init=_ParserState.BODY )
for line in args[0]:
self._process_line( line, state )
elif len(args) == 0:
self.attributes = kwargs["attributes"]
self.feature_vectors = kwargs["feature_vectors"]
else:
raise Exception( "Bad call to CsvDataset constructor" )
def __copy__( self ):
return CsvDataset( self )
def attribute( self, name ):
"""Retrieve the CsvAttribute with name 'name'.
Raises a KeyError if an attribute with the specified name is not in
the dataset.
"""
for attr in self.attributes:
if attr.name == name:
return attr
raise KeyError( name )
def attribute_index( self, arg ):
"""Get the index of an attribute.
If 'arg' is an CsvAttribute, its 'name' property is compared to the
names of the attributes in the CsvDataset. Otherwise, it is assumed
that 'arg' is a string containing the name of an attribute.
Raises a KeyError if an attribute with the specified name is not in
the dataset.
"""
if isinstance(arg, CsvAttribute):
name = arg.name
else:
assert( type(arg) is str )
name = arg
index = 0;
for attr in self.attributes:
if attr.name == name:
return index
index += 1
raise KeyError( arg )
def _process_line( self, line, state ):
"""Processes one line of input.
Delegates to process_tag() if the line is a @tag, and to
process_feature_vector() if it is not a tag.
"""
line = line.strip()
if line == "":
return
if state.section == _ParserState.HEADER:
self._process_header( line, state )
else:
self._process_feature_vector( line, state )
def _process_header( self, line, state ):
"""Processes the header, which just defines the column names.
"""
for name in re.split( ",", line ):
self.attributes.append( CsvAttribute( name ) )
state.section = _ParserState.BODY
def _process_feature_vector( self, line, state ):
"""Processes a 'feature vector', which is any non-empty line that is not
the column header line.
"""
self.feature_vectors.append( re.split( ",", line ) )
def __repr__( self ):
result = []
result.append( ",".join( [repr(attr) for attr in self.attributes] ) )
for v in self.feature_vectors:
result.append( ",".join( map(str, v) ) )
return "\n".join( result )
| [
"re.split"
] | [((5646, 5665), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (5654, 5665), False, 'import re\n'), ((5945, 5964), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (5953, 5964), False, 'import re\n')] |
'''
Problem 23 (Non-abundant sums)
A perfect number is a number for which the sum of its proper divisors is
exactly equal to the number. For example, the sum of the proper divisors of 28
would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less
than n and it is called abundant if this sum exceeds n.
As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest
number that can be written as the sum of two abundant numbers is 24.
By mathematical analysis, it can be shown that all integers greater than 28123
can be written as the sum of two abundant numbers. However, this upper limit
cannot be reduced any further by analysis even though it is known that the
greatest number that cannot be expressed as the sum of two abundant
numbers is less than this limit.
Find the sum of all the positive integers which cannot be written as the sum of
two abundant numbers.
'''
def divisors(n):
result = set((1,))
for i in range(2, round(n**.5)+1):
if n%i == 0:
result.add(i)
result.add(n//i)
return result
def gen_abundant(limit):
i = 1
while i <= limit:
if sum(divisors(i)) > i:
yield i
i += 1
if __name__ == "__main__":
import time
start = time.time()
abuns = tuple(gen_abundant(28123+1))
alen = len(abuns)
abun_sieve = [False]*28124
for i,k in enumerate(abuns):
for j in range(i, alen):
cur_s = k + abuns[j]
if cur_s > 28123:
break
abun_sieve[cur_s] = cur_s
print(28123*28124/2 - sum(abun_sieve))
print(time.time() - start)
| [
"time.time"
] | [((1334, 1345), 'time.time', 'time.time', ([], {}), '()\n', (1343, 1345), False, 'import time\n'), ((1683, 1694), 'time.time', 'time.time', ([], {}), '()\n', (1692, 1694), False, 'import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
from scipy.stats import dirichlet
import os
grain = 250 #how many points along each axis to plot
edgedist = 0.008 #How close to an extreme value of say [1,0,0] are we willing to plot.
weight = np.linspace(0, 1, grain)
#Dirichlet parameter
alpha = 0.1
alphavec = np.array([1, 1, 1])*alpha
#Most extreme corners of the sample space
Corner1 = np.array([1.0 - edgedist*2, edgedist, edgedist])
Corner2 = np.array([edgedist, 1.0 - edgedist*2, edgedist])
Corner3 = np.array([edgedist, edgedist, 1.0 - edgedist*2])
#Probability density function that accepts 2D coordiantes
def dpdf(v1,v2):
if (v1 + v2)>1:
out = np.nan
else:
vec = v1 * Corner1 + v2 * Corner2 + (1.0 - v1 - v2)*Corner3
out = dirichlet.pdf(vec, alphavec)
return(out)
probs = np.array([dpdf(v1, v2) for v1 in weight for v2 in weight]).reshape(-1,grain)
fig = plt.figure(figsize=(20,15))
ax = fig.add_subplot(111, projection='3d')
X,Y = np.meshgrid(weight, weight)
ax.plot_surface(Y, X, probs, cmap = 'jet', vmin=0, vmax=3,rstride=1,cstride=1, linewidth=0)
ax.view_init(elev=25, azim=230)
#ax.view_init(elev=25, azim=20)
ax.set_zlabel('p')
ax.set_title(r'$\alpha$'+'='+str(alpha))
plt.show()
plt.savefig(os.path.join('figures', 'DirSimplex%d.pdf' % (alpha*10)))
| [
"os.path.join",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"scipy.stats.dirichlet.pdf",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] | [((285, 309), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'grain'], {}), '(0, 1, grain)\n', (296, 309), True, 'import numpy as np\n'), ((434, 484), 'numpy.array', 'np.array', (['[1.0 - edgedist * 2, edgedist, edgedist]'], {}), '([1.0 - edgedist * 2, edgedist, edgedist])\n', (442, 484), True, 'import numpy as np\n'), ((493, 543), 'numpy.array', 'np.array', (['[edgedist, 1.0 - edgedist * 2, edgedist]'], {}), '([edgedist, 1.0 - edgedist * 2, edgedist])\n', (501, 543), True, 'import numpy as np\n'), ((552, 602), 'numpy.array', 'np.array', (['[edgedist, edgedist, 1.0 - edgedist * 2]'], {}), '([edgedist, edgedist, 1.0 - edgedist * 2])\n', (560, 602), True, 'import numpy as np\n'), ((948, 976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (958, 976), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1052), 'numpy.meshgrid', 'np.meshgrid', (['weight', 'weight'], {}), '(weight, weight)\n', (1036, 1052), True, 'import numpy as np\n'), ((1269, 1279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1277, 1279), True, 'import matplotlib.pyplot as plt\n'), ((355, 374), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (363, 374), True, 'import numpy as np\n'), ((1292, 1350), 'os.path.join', 'os.path.join', (['"""figures"""', "('DirSimplex%d.pdf' % (alpha * 10))"], {}), "('figures', 'DirSimplex%d.pdf' % (alpha * 10))\n", (1304, 1350), False, 'import os\n'), ((810, 838), 'scipy.stats.dirichlet.pdf', 'dirichlet.pdf', (['vec', 'alphavec'], {}), '(vec, alphavec)\n', (823, 838), False, 'from scipy.stats import dirichlet\n')] |
import os
import sys
def main():
# requirement check
try:
relative_path = sys.argv[1]
files = os.listdir(relative_path).copy()
except IndexError as e:
print("IndexError: please provide relative_path")
exit()
except FileNotFoundError as e:
print("FileNotFoundError: no such relative_path")
exit()
try:
prefix = sys.argv[2]
except IndexError as e:
prefix = ""
count = 0
for i in range(len(files)):
file_name = files[i]
if not file_name.startswith("."):
file_extension = file_name.split(".")[-1]
src = f"{relative_path}{file_name}"
dst = f"{relative_path}{prefix}{count}.{file_extension}"
os.rename(src, dst)
count += 1
if __name__ == '__main__':
main()
| [
"os.rename",
"os.listdir"
] | [((748, 767), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (757, 767), False, 'import os\n'), ((119, 144), 'os.listdir', 'os.listdir', (['relative_path'], {}), '(relative_path)\n', (129, 144), False, 'import os\n')] |
# autovc mel spectrogram shared by autovc/dvector/wavenet_vocoder
import os
import pickle
import numpy as np
import soundfile as sf
from scipy import signal
from scipy.signal import get_window
from librosa.filters import mel
from numpy.random import RandomState
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def pySTFT(x, fft_length, hop_length):
x = np.pad(x, int(fft_length//2), mode='reflect')
noverlap = fft_length - hop_length
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length)
strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
fft_window = get_window('hann', fft_length, fftbins=True)
result = np.fft.rfft(fft_window * result, n=fft_length).T
return np.abs(result)
def log_melsp_01(x,
sr=16000,
n_fft=1024,
hop_length=256,
n_mels=80,
fmin=80,
fmax=8000):
'''
'''
mel_basis = mel(sr, n_fft, fmin=fmin, fmax=fmax, n_mels=n_mels).T
min_level = np.exp(-100 / 20 * np.log(10))
b, a = butter_highpass(30, 16000, order=5)
#
# Remove drifting noise
y = signal.filtfilt(b, a, x)
# Ddd a little random noise for model roubstness
prng = RandomState()
wav = y * 0.96 + (prng.rand(y.shape[0])-0.5)*1e-06
# Compute spect
D = pySTFT(wav, fft_length=n_fft, hop_length=hop_length).T
# Convert to mel and normalize
D_mel = np.dot(D, mel_basis)
D_db = 20 * np.log10(np.maximum(min_level, D_mel)) - 16
S = np.clip((D_db + 100) / 100, 0, 1)
return S.astype(np.float32)
| [
"numpy.clip",
"numpy.abs",
"scipy.signal.filtfilt",
"numpy.log",
"scipy.signal.butter",
"numpy.lib.stride_tricks.as_strided",
"numpy.fft.rfft",
"numpy.dot",
"librosa.filters.mel",
"numpy.maximum",
"scipy.signal.get_window",
"numpy.random.RandomState"
] | [((370, 433), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoff'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='high', analog=False)\n", (383, 433), False, 'from scipy import signal\n'), ((760, 824), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['x'], {'shape': 'shape', 'strides': 'strides'}), '(x, shape=shape, strides=strides)\n', (791, 824), True, 'import numpy as np\n'), ((892, 936), 'scipy.signal.get_window', 'get_window', (['"""hann"""', 'fft_length'], {'fftbins': '(True)'}), "('hann', fft_length, fftbins=True)\n", (902, 936), False, 'from scipy.signal import get_window\n'), ((1015, 1029), 'numpy.abs', 'np.abs', (['result'], {}), '(result)\n', (1021, 1029), True, 'import numpy as np\n'), ((1377, 1401), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x'], {}), '(b, a, x)\n', (1392, 1401), False, 'from scipy import signal\n'), ((1466, 1479), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (1477, 1479), False, 'from numpy.random import RandomState\n'), ((1665, 1685), 'numpy.dot', 'np.dot', (['D', 'mel_basis'], {}), '(D, mel_basis)\n', (1671, 1685), True, 'import numpy as np\n'), ((1754, 1787), 'numpy.clip', 'np.clip', (['((D_db + 100) / 100)', '(0)', '(1)'], {}), '((D_db + 100) / 100, 0, 1)\n', (1761, 1787), True, 'import numpy as np\n'), ((950, 996), 'numpy.fft.rfft', 'np.fft.rfft', (['(fft_window * result)'], {'n': 'fft_length'}), '(fft_window * result, n=fft_length)\n', (961, 996), True, 'import numpy as np\n'), ((1185, 1236), 'librosa.filters.mel', 'mel', (['sr', 'n_fft'], {'fmin': 'fmin', 'fmax': 'fmax', 'n_mels': 'n_mels'}), '(sr, n_fft, fmin=fmin, fmax=fmax, n_mels=n_mels)\n', (1188, 1236), False, 'from librosa.filters import mel\n'), ((1274, 1284), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (1280, 1284), True, 'import numpy as np\n'), ((1711, 1739), 'numpy.maximum', 'np.maximum', (['min_level', 'D_mel'], {}), '(min_level, D_mel)\n', (1721, 1739), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 The Project Cargamos Authors
from flask_restful import reqparse
from app.stock.models import StockMove as StockMoveModel, \
next_move, stock_product_warehouse
from app.product.models import Product as ProductModel
from app.warehouse.models import Warehouse as WarehouseModel
from app import db
from app.commun import rp, BaseResource, is_int
def stock_move_reqparse():
post_parse = reqparse.RequestParser()
post_parse.add_argument('qty', type=int, dest='qty', required=True,
help="The quantity")
post_parse.add_argument('product_id', type=int, dest='product_id', required=True,
help="The product id")
post_parse.add_argument('warehouse_id', type=int, dest='warehouse_id', required=True,
help="The warehouse id")
return post_parse.parse_args()
def stock_move_transfer(ttype):
args = stock_move_reqparse()
if args.qty <= 0:
return rp(message="Quantity must be an integer and greater to zero"), 404
if args.product_id <= 0:
return rp(message="Product ID must be an integer. Check /VERSION/product"), 404
if args.warehouse_id <= 0:
return rp(message="Warehouse ID must be an integer. Check /VERSION/warehouse"), 404
product_id = ProductModel.by(id=args.product_id)
warehouse_id = WarehouseModel.by(id=args.warehouse_id)
if ttype == 'sale':
stock = stock_product_warehouse(
product_id=product_id.id,
warehouse_id=warehouse_id.id)
if isinstance(stock, Exception):
return rp(message=str(stock)), 400
product_stock = stock[0].get('qty')
if int(args.qty) > product_stock:
return rp(
message="The product %s only has %s stock" % (product_id.id, product_stock),
), 400
type_move = 'out'
else: # defautl is purchase
type_move = 'in'
name = next_move(ttype)
stock_move = StockMoveModel(name, type_move, args.qty,
product_id=product_id.id, warehouse_id=warehouse_id.id)
err =stock_move.save()
if err != None:
return rp(message=str(err)), 500
return rp(success=True, payload=stock_move.serialize()), 201
class Sale(BaseResource):
def post(self):
return stock_move_transfer('sale')
class Purchase(BaseResource):
def post(self):
return stock_move_transfer('purchase')
| [
"flask_restful.reqparse.RequestParser",
"app.stock.models.next_move",
"app.stock.models.stock_product_warehouse",
"app.product.models.Product.by",
"app.commun.rp",
"app.stock.models.StockMove",
"app.warehouse.models.Warehouse.by"
] | [((437, 461), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (459, 461), False, 'from flask_restful import reqparse\n'), ((1269, 1304), 'app.product.models.Product.by', 'ProductModel.by', ([], {'id': 'args.product_id'}), '(id=args.product_id)\n', (1284, 1304), True, 'from app.product.models import Product as ProductModel\n'), ((1324, 1363), 'app.warehouse.models.Warehouse.by', 'WarehouseModel.by', ([], {'id': 'args.warehouse_id'}), '(id=args.warehouse_id)\n', (1341, 1363), True, 'from app.warehouse.models import Warehouse as WarehouseModel\n'), ((1918, 1934), 'app.stock.models.next_move', 'next_move', (['ttype'], {}), '(ttype)\n', (1927, 1934), False, 'from app.stock.models import StockMove as StockMoveModel, next_move, stock_product_warehouse\n'), ((1953, 2054), 'app.stock.models.StockMove', 'StockMoveModel', (['name', 'type_move', 'args.qty'], {'product_id': 'product_id.id', 'warehouse_id': 'warehouse_id.id'}), '(name, type_move, args.qty, product_id=product_id.id,\n warehouse_id=warehouse_id.id)\n', (1967, 2054), True, 'from app.stock.models import StockMove as StockMoveModel, next_move, stock_product_warehouse\n'), ((1405, 1484), 'app.stock.models.stock_product_warehouse', 'stock_product_warehouse', ([], {'product_id': 'product_id.id', 'warehouse_id': 'warehouse_id.id'}), '(product_id=product_id.id, warehouse_id=warehouse_id.id)\n', (1428, 1484), False, 'from app.stock.models import StockMove as StockMoveModel, next_move, stock_product_warehouse\n'), ((944, 1005), 'app.commun.rp', 'rp', ([], {'message': '"""Quantity must be an integer and greater to zero"""'}), "(message='Quantity must be an integer and greater to zero')\n", (946, 1005), False, 'from app.commun import rp, BaseResource, is_int\n'), ((1055, 1122), 'app.commun.rp', 'rp', ([], {'message': '"""Product ID must be an integer. Check /VERSION/product"""'}), "(message='Product ID must be an integer. Check /VERSION/product')\n", (1057, 1122), False, 'from app.commun import rp, BaseResource, is_int\n'), ((1174, 1245), 'app.commun.rp', 'rp', ([], {'message': '"""Warehouse ID must be an integer. Check /VERSION/warehouse"""'}), "(message='Warehouse ID must be an integer. Check /VERSION/warehouse')\n", (1176, 1245), False, 'from app.commun import rp, BaseResource, is_int\n'), ((1703, 1782), 'app.commun.rp', 'rp', ([], {'message': "('The product %s only has %s stock' % (product_id.id, product_stock))"}), "(message='The product %s only has %s stock' % (product_id.id, product_stock))\n", (1705, 1782), False, 'from app.commun import rp, BaseResource, is_int\n')] |
#!/usr/bin/env python
# Copyright 2015-2016 Arbor Technologies, Inc. https://arbor.io
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import getpass
import gzip
import json
import logging
import os
import os.path
import re
import requests
import sys
import urlparse
class StreamDownloader(object):
"""StreamDownloader fetches new data from streams, producing a gzip'd batch
file in the output directory with all stream content produced since the
last invocation. An additional JSON METADATA file is stored in the
directory, which captures metadata used to perform incremental downloads
across invocations."""
# Headers returned by Arbor, which are parsed by the client and allow for
# direct retrieval of content from a cloud-storage provider.
CONTENT_RANGE_HEADER = 'Content-Range'
CONTENT_RANGE_REGEXP = 'bytes\s+(\d+)-\d+/\d+'
FRAGMENT_LOCATION_HEADER = 'X-Fragment-Location'
FRAGMENT_NAME_HEADER = 'X-Fragment-Name'
FRAGMENT_WRITE_HEAD_HEADER = 'X-Write-Head'
# Buffer size for bulk copy.
BUFFER_SIZE = 1 << 15 # 32768
# Timeout between received bytes. Aborts a download if no data is received
# on the connection for more than the given number of seconds. The timeout
# resets any time more data is received on the socket.
SOCKET_TIMEOUT_SECONDS = 120
def __init__(self, output_dir, metadata_path, session):
self.output_dir = output_dir
self.metadata_path = metadata_path
self.session = session
self._metadata = self._load_metadata()
def fetch_some(self, stream_url):
"""Retrieves new content from the last-processed & stored offset."""
offset = self._metadata['offsets'].get(stream_url, 0)
# Perform a HEAD request to check for a directly fetch-able fragment.
full_url = "%s?offset=%d&block=false" % (stream_url, offset)
response = self.session.head(full_url, verify=True,
timeout=self.SOCKET_TIMEOUT_SECONDS)
logging.debug("HEAD %s (%s)\n\t%s", full_url, response.status_code,
response.headers)
if response.status_code == requests.codes.range_not_satisfiable:
# No futher content is available. We're done.
return False
else:
# Expect a 20X response.
response.raise_for_status()
offset = self._parse_response_offset(response.headers)
fragment = self._parse_fragment_name(response.headers)
location = response.headers.get(self.FRAGMENT_LOCATION_HEADER)
write_head = int(response.headers[self.FRAGMENT_WRITE_HEAD_HEADER])
basename = stream_url.split('/')[-1]
path_tmp = os.path.join(self.output_dir,
".%s.%016x.CURRENT" % (basename, offset))
with open(path_tmp, 'w') as output:
# Check if the fragment is available to be directly downloaded (eg,
# from cloud storage. Omit file:// URLs (returned in some Arbor
# test environments).
if location and not location.startswith('file://'):
# Transmission from cloud storage is always gzipped. Request
# the raw gzip so we don't have to do anything.
delta = self._transfer_from_location(offset, fragment, location,
output)
else:
delta = self._transfer_from_broker(stream_url, offset, output)
# Close and move to final location.
path_final = os.path.join(self.output_dir, "%s.%016x.%016x.gz" % (
basename, offset, offset+delta))
self._rename(path_tmp, path_final)
self._metadata['offsets'][stream_url] = offset + delta
self._store_metadata()
logging.info('wrote %s (%d bytes at offset %d)',
path_final, delta, offset)
# If we've read through the write head (at the time of the response),
# don't attempt another read. Otherwise we can get into loops reading
# small amounts of newly-written content.
return offset + delta < write_head
def _transfer_from_location(self, offset, fragment, location, stream_out):
"""Transfers to |stream_out| starting at |offset| from the named
|location| and |fragment|."""
skip_delta = offset - fragment[0]
if skip_delta < 0:
raise RuntimeError("Unexpected offset: %d (%r)", offset, fragment)
response = self.session.get(location, stream=True, verify=True)
response.raise_for_status()
# This code assumes from here on out that GCS always returns compressed
# fragments.
assert(response.headers['Content-Encoding'] == 'gzip')
if skip_delta > 0:
# As |skip_delta| refers to a skip offset of the uncompressed data,
# we must decompress to skip, then recompress afterward.
with gzip.GzipFile(fileobj=stream_out) as gzipped:
return self._transfer(response.raw, gzipped, skip_delta)
else:
# Alternatively, if we are reading the entire gzipped fragment,
# no conversion required. As the delta, return the size of this
# fragment, as we will have transferred fewer actual bytes.
self._transfer(response.raw, stream_out, skip_delta)
return fragment[1] - fragment[0]
def _transfer_from_broker(self, stream_url, offset, stream_out):
full_url = "%s?offset=%d&block=false" % (stream_url, offset)
response = self.session.get(full_url,
timeout=self.SOCKET_TIMEOUT_SECONDS,
stream=True, verify=True)
logging.debug("GET %s (%s)\n\t%s", full_url, response.status_code,
response.headers)
# Expect a 20X response.
response.raise_for_status()
# Compress the uncompressed stream data from the broker.
with gzip.GzipFile(fileobj=stream_out) as gzipped:
return self._transfer(response.raw, gzipped)
def _transfer(self, stream_in, stream_out, skip_delta=0):
"""Transfers from |stream_in| to |stream_out|, skipping |skip_delta|
leading decompressed bytes. The number of bytes transferred *after*
|skip_delta| is returned."""
delta = 0
decode_content = skip_delta > 0
while True:
buf = stream_in.read(self.BUFFER_SIZE,
decode_content=decode_content)
if not buf:
return delta
if skip_delta > len(buf):
skip_delta -= len(buf)
continue
elif skip_delta > 0:
buf = buf[skip_delta:]
skip_delta = 0
stream_out.write(buf)
delta += len(buf)
return delta
def _parse_fragment_name(self, headers):
"""Parses a stream fragment name (as begin-offset, end-offset,
content-sum)."""
first, last, sha_sum = headers[self.FRAGMENT_NAME_HEADER].split('-')
first, last = int(first, 16), int(last, 16)
return (first, last, sha_sum)
def _parse_response_offset(self, headers):
content_range = headers[self.CONTENT_RANGE_HEADER]
m = re.match(self.CONTENT_RANGE_REGEXP, content_range)
if m is None:
raise RuntimeError("invalid range %s" % content_range)
return int(m.group(1), 10)
def _load_metadata(self):
"""Reads and returns a metadata bundle, or returns a newly-initialized
bundle if none exists."""
if not os.path.isfile(self.metadata_path):
logging.debug("%s not a file: returning empty metadata",
self.metadata_path)
return {'offsets': {}}
return json.load(open(self.metadata_path))
def _store_metadata(self):
"""Atomically writes the current metadata bundle."""
path_tmp = self.metadata_path + '.TMP'
out = open(path_tmp, 'w')
json.dump(self._metadata, out)
out.close()
self._rename(path_tmp, self.metadata_path)
logging.debug("wrote metadata: %s", self.metadata_path)
def _rename(self, src_path, dst_path):
# Windows does not support atomic file operations, so we must remove
# |dst_path| before attempting a rename.
if os.name == 'nt' and os.path.exists(dst_path):
os.remove(dst_path)
os.rename(src_path, dst_path)
def new_authenticated_session(auth_url, user, password):
"""
Constructs a requests Session pre-configured with authentication
tokens for |user| and |password|. If no password is set, one is
read via stdin.
"""
session = requests.Session()
# If credentials are provided, obtain a signed authentication cookie.
if user is not None:
# Support optionally reading password directly from stdin.
if password is None:
password = getpass.getpass("%r password: " % user)
payload = {'username': user, 'password': password}
response = session.post(auth_url, data=json.dumps(payload))
response.raise_for_status()
return session
def main(argv):
parser = argparse.ArgumentParser(description='Provides batch record '
'download from an Arbor stream.')
parser.add_argument('--url', required=True, help='Stream URL to '
'download (ex, https://pippio.com/api/stream/records)')
parser.add_argument('--user', help='Username to authenticate as')
parser.add_argument('--password', help='Optional password to authenticate '
'with. %s will prompt for a password if one is not '
'provided' % os.path.basename(argv[0]))
parser.add_argument('--output-dir',
help='Optional output directory for downloads. '
'Defaults to the current directory',
default='.')
parser.add_argument('--metadata',
help='Optional path for storing download metadata '
'between invocations. Defaults to METADATA in '
'--output-dir if not set')
parser.add_argument('--verbose', action='store_true',
help='Enable verbose logging')
args = parser.parse_args(argv[1:])
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not args.metadata:
args.metadata = os.path.join(args.output_dir, 'METADATA')
# Obtain an authenticated session.
parsed_url = urlparse.urlparse(args.url)
auth_url = "%s://%s/api/auth" % (parsed_url.scheme, parsed_url.netloc)
session = new_authenticated_session(auth_url, args.user, args.password)
# Download while content remains.
downloader = StreamDownloader(args.output_dir, args.metadata, session)
while downloader.fetch_some(args.url):
pass
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"logging.basicConfig",
"os.path.exists",
"logging.debug",
"requests.Session",
"argparse.ArgumentParser",
"os.rename",
"json.dumps",
"os.path.join",
"re.match",
"getpass.getpass",
"os.path.isfile",
"gzip.GzipFile",
"os.remove",
"os.path.basename",
"logging.info",
"json.dump",
"urlparse.urlparse"
] | [((9903, 9921), 'requests.Session', 'requests.Session', ([], {}), '()\n', (9919, 9921), False, 'import requests\n'), ((10397, 10493), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Provides batch record download from an Arbor stream."""'}), "(description=\n 'Provides batch record download from an Arbor stream.')\n", (10420, 10493), False, 'import argparse\n'), ((11850, 11877), 'urlparse.urlparse', 'urlparse.urlparse', (['args.url'], {}), '(args.url)\n', (11867, 11877), False, 'import urlparse\n'), ((3080, 3171), 'logging.debug', 'logging.debug', (['"""HEAD %s (%s)\n\t%s"""', 'full_url', 'response.status_code', 'response.headers'], {}), '("""HEAD %s (%s)\n\t%s""", full_url, response.status_code,\n response.headers)\n', (3093, 3171), False, 'import logging\n'), ((3775, 3846), 'os.path.join', 'os.path.join', (['self.output_dir', "('.%s.%016x.CURRENT' % (basename, offset))"], {}), "(self.output_dir, '.%s.%016x.CURRENT' % (basename, offset))\n", (3787, 3846), False, 'import os\n'), ((4624, 4716), 'os.path.join', 'os.path.join', (['self.output_dir', "('%s.%016x.%016x.gz' % (basename, offset, offset + delta))"], {}), "(self.output_dir, '%s.%016x.%016x.gz' % (basename, offset, \n offset + delta))\n", (4636, 4716), False, 'import os\n'), ((4892, 4967), 'logging.info', 'logging.info', (['"""wrote %s (%d bytes at offset %d)"""', 'path_final', 'delta', 'offset'], {}), "('wrote %s (%d bytes at offset %d)', path_final, delta, offset)\n", (4904, 4967), False, 'import logging\n'), ((6843, 6932), 'logging.debug', 'logging.debug', (['"""GET %s (%s)\n\t%s"""', 'full_url', 'response.status_code', 'response.headers'], {}), "('GET %s (%s)\\n\\t%s', full_url, response.status_code, response\n .headers)\n", (6856, 6932), False, 'import logging\n'), ((8425, 8475), 're.match', 're.match', (['self.CONTENT_RANGE_REGEXP', 'content_range'], {}), '(self.CONTENT_RANGE_REGEXP, content_range)\n', (8433, 8475), False, 'import re\n'), ((9181, 9211), 'json.dump', 'json.dump', (['self._metadata', 'out'], {}), '(self._metadata, out)\n', (9190, 9211), False, 'import json\n'), ((9292, 9347), 'logging.debug', 'logging.debug', (['"""wrote metadata: %s"""', 'self.metadata_path'], {}), "('wrote metadata: %s', self.metadata_path)\n", (9305, 9347), False, 'import logging\n'), ((9615, 9644), 'os.rename', 'os.rename', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (9624, 9644), False, 'import os\n'), ((11601, 11641), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (11620, 11641), False, 'import logging\n'), ((11660, 11699), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (11679, 11699), False, 'import logging\n'), ((11751, 11792), 'os.path.join', 'os.path.join', (['args.output_dir', '"""METADATA"""'], {}), "(args.output_dir, 'METADATA')\n", (11763, 11792), False, 'import os\n'), ((7099, 7132), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'stream_out'}), '(fileobj=stream_out)\n', (7112, 7132), False, 'import gzip\n'), ((8760, 8794), 'os.path.isfile', 'os.path.isfile', (['self.metadata_path'], {}), '(self.metadata_path)\n', (8774, 8794), False, 'import os\n'), ((8808, 8884), 'logging.debug', 'logging.debug', (['"""%s not a file: returning empty metadata"""', 'self.metadata_path'], {}), "('%s not a file: returning empty metadata', self.metadata_path)\n", (8821, 8884), False, 'import logging\n'), ((9549, 9573), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (9563, 9573), False, 'import os\n'), ((9587, 9606), 'os.remove', 'os.remove', (['dst_path'], {}), '(dst_path)\n', (9596, 9606), False, 'import os\n'), ((10141, 10180), 'getpass.getpass', 'getpass.getpass', (["('%r password: ' % user)"], {}), "('%r password: ' % user)\n", (10156, 10180), False, 'import getpass\n'), ((6047, 6080), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'stream_out'}), '(fileobj=stream_out)\n', (6060, 6080), False, 'import gzip\n'), ((10288, 10307), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (10298, 10307), False, 'import json\n'), ((10943, 10968), 'os.path.basename', 'os.path.basename', (['argv[0]'], {}), '(argv[0])\n', (10959, 10968), False, 'import os\n')] |
from visions.core.model.relations import InferenceRelation
from visions.core.implementations.types.visions_ordinal import to_ordinal
def check_consecutive(l) -> bool:
return sorted(l) == list(range(min(l), max(l) + 1))
def is_ordinal_cat(c) -> bool:
s = c.astype(str)
if s.str.len().max() == 1:
unique_values = list(s[s.notna()].str.lower().unique())
return "a" in unique_values and check_consecutive(list(map(ord, unique_values)))
else:
return False
def categorical_to_ordinal(cls) -> InferenceRelation:
from visions.core.implementations.types import visions_categorical
return InferenceRelation(
cls, visions_categorical, relationship=is_ordinal_cat, transformer=to_ordinal
)
| [
"visions.core.model.relations.InferenceRelation"
] | [((634, 734), 'visions.core.model.relations.InferenceRelation', 'InferenceRelation', (['cls', 'visions_categorical'], {'relationship': 'is_ordinal_cat', 'transformer': 'to_ordinal'}), '(cls, visions_categorical, relationship=is_ordinal_cat,\n transformer=to_ordinal)\n', (651, 734), False, 'from visions.core.model.relations import InferenceRelation\n')] |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.provenance_participant_role import (
ProvenanceParticipantRole as ProvenanceParticipantRole_,
)
__all__ = ["ProvenanceParticipantRole"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ProvenanceParticipantRole(ProvenanceParticipantRole_):
"""
Provenance participant role
The role that a provenance participant played
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/provenance-agent-role
"""
class Meta:
resource = _resource
| [
"pathlib.Path"
] | [((330, 344), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (334, 344), False, 'from pathlib import Path\n')] |
# This code has been licensed by the
# DO WHAT THE F*CK YOU WANT TO PUBLIC LICENSE
# Version 2
import sys
import argparse
import googletrans
from googletrans import Translator
parser = argparse.ArgumentParser(description="Translates a piece of text in the worst way possible.", epilog="This program has the WTFPL license. DO WHAT THE F*CK YOU WANT TO.")
parser.add_argument('--phrase', '-p', type=str, help="The phrase you want to translate. Make sure it's spelt correctly!", default="The quick brown fox jumps over the lazy dog.")
parser.add_argument('--source_language', '-s', type=str, help="The source text language. Use two letter codes or else it won't work. If you don't know what the language is, you can type 'detect'.", default="en")
parser.add_argument('--destination_language', '-d', type=str, help="The source text language. Same as source language; use two letter codes!", default="es")
parser.add_argument('--check_languages', help="Checks for languages possible on googletrans.", action='store_true')
parser.add_argument('--clean', help="Makes the translation output the only thing to appear.", action='store_true')
parser.add_argument('--boomerang', '-b', help="Translates it back to source language after translation. If --clean is on it will only display the boomeranged translation.", action='store_true')
parser.add_argument('--version', action='version', version='%(prog)s 0.1.0')
args, unknown = parser.parse_known_args()
# Checks if you are using the check_languages argument; if so it will display languages and close
if args.check_languages == True:
print("The languages possible are:")
print(googletrans.LANGUAGES)
print("The program will now close.")
sys.exit()
# Activates the translator
translator = Translator()
detection = "detection" # so the program won't error out when checking for a logographic language
if args.source_language == "detect":
detection = translator.detect(args.phrase).lang
if args.clean == False:
print("\n")
print("Phrase was detected as", detection + ".")
print("\n")
# Checks if the detection it's a logographic language and splits the phrase into parts if so
if detection == "ar" or args.source_language == "ar":
words = [i for i in args.phrase]
elif detection == "zh-TW" or args.source_language == "zh-TW":
words = [i for i in args.phrase]
elif detection == "zh-CN" or args.source_language == "zh-CN":
words = [i for i in args.phrase]
elif detection == "jw" or args.source_language == "jw":
words = [i for i in args.phrase]
elif detection == "ja" or args.source_language == "ja":
words = [i for i in args.phrase]
elif detection == "ko" or args.source_language == "ko":
words = [i for i in args.phrase]
else:
words = args.phrase.split(" ") # For other languages it just splits the phrase into words
if args.source_language == "detect":
translations = translator.translate(words, src=detection, dest=args.destination_language) # If the argument source_language uses detect it will use this
else:
translations = translator.translate(words, src=args.source_language, dest=args.destination_language)
# Prints out the result
if args.clean == False: # If false it will display the original and the arrows
print(args.phrase)
print("\n")
print("↓↓↓↓↓ (" + args.destination_language + ")")
print("\n")
if args.clean == False:
for translation in translations:
print(translation.text, end=" ")
print("\n")
elif args.boomerang == False:
for translation in translations:
print(translation.text, end=" ")
print("\n")
# Next part is for the boomerang argument
if args.boomerang == True:
# Checks if the detection it's a logographic language and splits the phrase into parts if so
if args.destination_language == "ar":
boomerwords = [i for i in args.phrase]
elif args.destination_language == "zh-TW":
boomerwords = [i for i in args.phrase]
elif args.destination_language == "zh-CN":
boomerwords = [i for i in args.phrase]
elif args.destination_language == "jw":
boomerwords = [i for i in args.phrase]
elif args.destination_language == "ja":
boomerwords = [i for i in args.phrase]
elif args.destination_language == "ko":
boomerwords = [i for i in args.phrase]
else:
boomerwords = args.phrase.split(" ") # For other languages it just splits the phrase into words
if args.source_language == "detect":
boomertranslations = translator.translate(boomerwords, src=args.destination_language, dest=detection) # If the argument source_language uses detect it will use this
else:
boomertranslations = translator.translate(boomerwords, src=args.destination_language, dest=args.source_language)
if args.clean == False: # If false it will display the original and the arrows
print("\n")
if args.source_language == "detect":
print("↓↓↓↓↓ (" + detection + ")")
else:
print("↓↓↓↓↓ (" + args.source_language + ")")
print("\n")
for translation in boomertranslations:
print(translation.text, end=" ")
print("\n") | [
"googletrans.Translator",
"argparse.ArgumentParser",
"sys.exit"
] | [((240, 418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Translates a piece of text in the worst way possible."""', 'epilog': '"""This program has the WTFPL license. DO WHAT THE F*CK YOU WANT TO."""'}), "(description=\n 'Translates a piece of text in the worst way possible.', epilog=\n 'This program has the WTFPL license. DO WHAT THE F*CK YOU WANT TO.')\n", (263, 418), False, 'import argparse\n'), ((1806, 1818), 'googletrans.Translator', 'Translator', ([], {}), '()\n', (1816, 1818), False, 'from googletrans import Translator\n'), ((1754, 1764), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1762, 1764), False, 'import sys\n')] |
"""Common serializers."""
import logging
from rest_framework import serializers
from ..models import Facility, System, UserFacilityAllotment
from .base_serializers import BaseSerializer
LOGGER = logging.getLogger(__name__)
class FacilitySerializer(BaseSerializer):
class Meta(BaseSerializer.Meta):
model = Facility
fields = "__all__"
class SystemSerializer(BaseSerializer):
class Meta(BaseSerializer.Meta):
model = System
fields = "__all__"
class UserFacilityAllotmentSerializer(BaseSerializer):
user_name = serializers.ReadOnlyField(source="user.__str__")
allotment_type_name = serializers.ReadOnlyField(source="get_allotment_type_display")
class Meta(BaseSerializer.Meta):
model = UserFacilityAllotment
fields = "__all__"
| [
"logging.getLogger",
"rest_framework.serializers.ReadOnlyField"
] | [((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((562, 610), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""user.__str__"""'}), "(source='user.__str__')\n", (587, 610), False, 'from rest_framework import serializers\n'), ((637, 699), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""get_allotment_type_display"""'}), "(source='get_allotment_type_display')\n", (662, 699), False, 'from rest_framework import serializers\n')] |
import sys
from chr_order import category_chr_order
import pandas as pd
if len(sys.argv) < 5: # first, 0th arg is the name of this script
print("ERROR: you should specify args:")
print(" #1 GWAS summary statistics file, ")
print(" #2 output file name, i.e. GWAS SS file, sorted by Chr and BP")
print(" #3 Chr column index")
print(" #4 BP column index")
exit(1)
# GWAS_FILE has to be in a tabular tab-sep format
GWAS_FILE = sys.argv[1]
SORTED_FILE = sys.argv[2]
Chr_col_i = int(sys.argv[3])
BP_col_i = int(sys.argv[4])
GWAS_df = pd.read_csv(GWAS_FILE, sep="\t")
# set order for the data type of Chr column
GWAS_df[GWAS_df.columns[Chr_col_i]] = GWAS_df[GWAS_df.columns[Chr_col_i]].astype(str).astype(category_chr_order) # type: ignore
GWAS_df[GWAS_df.columns[BP_col_i]] = GWAS_df[GWAS_df.columns[BP_col_i]].astype('Int64')
GWAS_df.sort_values(
by=[GWAS_df.columns[Chr_col_i], GWAS_df.columns[BP_col_i]],
ascending=[True, True],
inplace=True
) # type: ignore
GWAS_df.to_csv(SORTED_FILE, index=False, sep="\t")
| [
"pandas.read_csv"
] | [((563, 595), 'pandas.read_csv', 'pd.read_csv', (['GWAS_FILE'], {'sep': '"""\t"""'}), "(GWAS_FILE, sep='\\t')\n", (574, 595), True, 'import pandas as pd\n')] |
import torch
import nibabel as nib
import numpy as np
from nilearn.image import resample_img
from torchvision import transforms
import scipy.misc
from .options import Options
from .model import Model
from PIL import Image
def _toTensor(nibImg):
img = Image.fromarray(nibImg).convert('RGB')
img = transforms.ToTensor()(img)
img = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)
img = img.view(1, img.shape[0], img.shape[1], img.shape[2])
return img
def _RGBtoGray(A):
gray = A[:,0, ...] * 0.299 + A[:,1, ...] * 0.587 + A[:,2, ...] * 0.114
return gray
def main():
opt = Options().parse()
assert(opt.input.endswith('nii.gz'))
inputVolume = nib.load(opt.input)
N = inputVolume.shape[2]
target_shape = (opt.fineSize, opt.fineSize, N)
data = resample_img(inputVolume, inputVolume.affine, target_shape=target_shape).get_data()
model = Model()
model.initialize(opt)
output = torch.FloatTensor(N, 3, opt.fineSize, opt.fineSize)
for i in range(N):
if opt.verbose:
print('process slice %d' % i)
model.set_input({'A': _toTensor(data[:,:,i])})
model.forward()
output[i] = model.fake_B.detach().cpu()
output = _RGBtoGray(output)
outputImg = nib.Nifti1Image(output.permute(1,2,0).numpy(), inputVolume.affine)
outputfile = opt.output
if not outputfile.endswith("nii.gz"):
outputfile = "%s.nii.gz" % (outputfile)
print('save output as %s' % outputfile)
nib.save(outputImg, outputfile)
if __name__ == "__main__":
main()
| [
"PIL.Image.fromarray",
"nibabel.save",
"nibabel.load",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"torch.FloatTensor",
"nilearn.image.resample_img"
] | [((698, 717), 'nibabel.load', 'nib.load', (['opt.input'], {}), '(opt.input)\n', (706, 717), True, 'import nibabel as nib\n'), ((953, 1004), 'torch.FloatTensor', 'torch.FloatTensor', (['N', '(3)', 'opt.fineSize', 'opt.fineSize'], {}), '(N, 3, opt.fineSize, opt.fineSize)\n', (970, 1004), False, 'import torch\n'), ((1503, 1534), 'nibabel.save', 'nib.save', (['outputImg', 'outputfile'], {}), '(outputImg, outputfile)\n', (1511, 1534), True, 'import nibabel as nib\n'), ((311, 332), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (330, 332), False, 'from torchvision import transforms\n'), ((348, 402), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (368, 402), False, 'from torchvision import transforms\n'), ((257, 280), 'PIL.Image.fromarray', 'Image.fromarray', (['nibImg'], {}), '(nibImg)\n', (272, 280), False, 'from PIL import Image\n'), ((809, 881), 'nilearn.image.resample_img', 'resample_img', (['inputVolume', 'inputVolume.affine'], {'target_shape': 'target_shape'}), '(inputVolume, inputVolume.affine, target_shape=target_shape)\n', (821, 881), False, 'from nilearn.image import resample_img\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import pandas as pd
import pytest
from eemeter.transform import (
as_freq,
day_counts,
get_baseline_data,
get_reporting_data,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (791,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (27,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417, )
as_hourly = as_freq(temperature_data, freq="H", series_type='instantaneous')
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417, )
as_daily = as_freq(temperature_data, freq="D", series_type='instantaneous')
assert as_daily.shape == (810,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417, )
as_month_start = as_freq(temperature_data, freq="MS", series_type='instantaneous')
assert as_month_start.shape == (28,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq='MS')).mean()
assert temperature_data.shape == (28, )
as_daily = as_freq(temperature_data, freq="D", series_type='instantaneous')
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_remove_duplicates_df():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
df = pd.DataFrame({"value": [1, 2, 3]}, index=index)
assert df.shape == (3, 1)
df_dedupe = remove_duplicates(df)
assert df_dedupe.shape == (2, 1)
assert list(df_dedupe.value) == [1, 2]
def test_remove_duplicates_series():
index = pd.DatetimeIndex(["2017-01-01", "2017-01-02", "2017-01-02"])
series = pd.Series([1, 2, 3], index=index)
assert series.shape == (3,)
series_dedupe = remove_duplicates(series)
assert series_dedupe.shape == (2,)
assert list(series_dedupe) == [1, 2]
| [
"pandas.Series",
"pandas.isnull",
"eemeter.transform.get_baseline_data",
"pandas.DatetimeIndex",
"pandas.Grouper",
"pandas.Timestamp",
"eemeter.transform.day_counts",
"eemeter.transform.as_freq",
"pytest.raises",
"eemeter.transform.get_reporting_data",
"pandas.DataFrame",
"datetime.timedelta",
"eemeter.transform.remove_duplicates"
] | [((1404, 1439), 'eemeter.transform.as_freq', 'as_freq', (['meter_data.value'], {'freq': '"""H"""'}), "(meter_data.value, freq='H')\n", (1411, 1439), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((1753, 1788), 'eemeter.transform.as_freq', 'as_freq', (['meter_data.value'], {'freq': '"""D"""'}), "(meter_data.value, freq='D')\n", (1760, 1788), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((2110, 2146), 'eemeter.transform.as_freq', 'as_freq', (['meter_data.value'], {'freq': '"""MS"""'}), "(meter_data.value, freq='MS')\n", (2117, 2146), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((2501, 2565), 'eemeter.transform.as_freq', 'as_freq', (['temperature_data'], {'freq': '"""H"""', 'series_type': '"""instantaneous"""'}), "(temperature_data, freq='H', series_type='instantaneous')\n", (2508, 2565), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((2910, 2974), 'eemeter.transform.as_freq', 'as_freq', (['temperature_data'], {'freq': '"""D"""', 'series_type': '"""instantaneous"""'}), "(temperature_data, freq='D', series_type='instantaneous')\n", (2917, 2974), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((3310, 3375), 'eemeter.transform.as_freq', 'as_freq', (['temperature_data'], {'freq': '"""MS"""', 'series_type': '"""instantaneous"""'}), "(temperature_data, freq='MS', series_type='instantaneous')\n", (3317, 3375), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((3773, 3837), 'eemeter.transform.as_freq', 'as_freq', (['temperature_data'], {'freq': '"""D"""', 'series_type': '"""instantaneous"""'}), "(temperature_data, freq='D', series_type='instantaneous')\n", (3780, 3837), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((3964, 3991), 'pandas.DataFrame', 'pd.DataFrame', (["{'value': []}"], {}), "({'value': []})\n", (3976, 3991), True, 'import pandas as pd\n'), ((4015, 4050), 'eemeter.transform.as_freq', 'as_freq', (['meter_data.value'], {'freq': '"""H"""'}), "(meter_data.value, freq='H')\n", (4022, 4050), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((4231, 4253), 'eemeter.transform.day_counts', 'day_counts', (['data.index'], {}), '(data.index)\n', (4241, 4253), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((4332, 4358), 'pandas.isnull', 'pd.isnull', (['counts.iloc[-1]'], {}), '(counts.iloc[-1])\n', (4341, 4358), True, 'import pandas as pd\n'), ((4442, 4462), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[]'], {}), '([])\n', (4458, 4462), True, 'import pandas as pd\n'), ((4496, 4522), 'pandas.Series', 'pd.Series', (['[]'], {'index': 'index'}), '([], index=index)\n', (4505, 4522), True, 'import pandas as pd\n'), ((4536, 4558), 'eemeter.transform.day_counts', 'day_counts', (['data.index'], {}), '(data.index)\n', (4546, 4558), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((4743, 4772), 'eemeter.transform.get_baseline_data', 'get_baseline_data', (['meter_data'], {}), '(meter_data)\n', (4760, 4772), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((5108, 5162), 'eemeter.transform.get_baseline_data', 'get_baseline_data', (['meter_data'], {'end': 'blackout_start_date'}), '(meter_data, end=blackout_start_date)\n', (5125, 5162), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((5509, 5578), 'eemeter.transform.get_baseline_data', 'get_baseline_data', (['meter_data'], {'end': 'blackout_start_date', 'max_days': 'None'}), '(meter_data, end=blackout_start_date, max_days=None)\n', (5526, 5578), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((6239, 6281), 'eemeter.transform.get_baseline_data', 'get_baseline_data', (['meter_data'], {'start': 'start'}), '(meter_data, start=start)\n', (6256, 6281), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((6967, 7020), 'eemeter.transform.get_baseline_data', 'get_baseline_data', (['meter_data'], {'end': 'end', 'max_days': 'None'}), '(meter_data, end=end, max_days=None)\n', (6984, 7020), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((7639, 7669), 'eemeter.transform.get_reporting_data', 'get_reporting_data', (['meter_data'], {}), '(meter_data)\n', (7657, 7669), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((8006, 8061), 'eemeter.transform.get_reporting_data', 'get_reporting_data', (['meter_data'], {'start': 'blackout_end_date'}), '(meter_data, start=blackout_end_date)\n', (8024, 8061), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((8409, 8479), 'eemeter.transform.get_reporting_data', 'get_reporting_data', (['meter_data'], {'start': 'blackout_end_date', 'max_days': 'None'}), '(meter_data, start=blackout_end_date, max_days=None)\n', (8427, 8479), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((9144, 9202), 'eemeter.transform.get_reporting_data', 'get_reporting_data', (['meter_data'], {'start': 'start', 'max_days': 'None'}), '(meter_data, start=start, max_days=None)\n', (9162, 9202), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((9908, 9947), 'eemeter.transform.get_reporting_data', 'get_reporting_data', (['meter_data'], {'end': 'end'}), '(meter_data, end=end)\n', (9926, 9947), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((10463, 10523), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2017-01-01', '2017-01-02', '2017-01-02']"], {}), "(['2017-01-01', '2017-01-02', '2017-01-02'])\n", (10479, 10523), True, 'import pandas as pd\n'), ((10533, 10580), 'pandas.DataFrame', 'pd.DataFrame', (["{'value': [1, 2, 3]}"], {'index': 'index'}), "({'value': [1, 2, 3]}, index=index)\n", (10545, 10580), True, 'import pandas as pd\n'), ((10627, 10648), 'eemeter.transform.remove_duplicates', 'remove_duplicates', (['df'], {}), '(df)\n', (10644, 10648), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((10780, 10840), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["['2017-01-01', '2017-01-02', '2017-01-02']"], {}), "(['2017-01-01', '2017-01-02', '2017-01-02'])\n", (10796, 10840), True, 'import pandas as pd\n'), ((10854, 10887), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {'index': 'index'}), '([1, 2, 3], index=index)\n', (10863, 10887), True, 'import pandas as pd\n'), ((10940, 10965), 'eemeter.transform.remove_duplicates', 'remove_duplicates', (['series'], {}), '(series)\n', (10957, 10965), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((1147, 1172), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1160, 1172), False, 'import pytest\n'), ((1182, 1211), 'eemeter.transform.as_freq', 'as_freq', (['meter_data'], {'freq': '"""H"""'}), "(meter_data, freq='H')\n", (1189, 1211), False, 'from eemeter.transform import as_freq, day_counts, get_baseline_data, get_reporting_data, remove_duplicates, NoBaselineDataError, NoReportingDataError\n'), ((5903, 5937), 'pytest.raises', 'pytest.raises', (['NoBaselineDataError'], {}), '(NoBaselineDataError)\n', (5916, 5937), False, 'import pytest\n'), ((6191, 6208), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6200, 6208), False, 'from datetime import datetime, timedelta\n'), ((6919, 6936), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6928, 6936), False, 'from datetime import datetime, timedelta\n'), ((8802, 8837), 'pytest.raises', 'pytest.raises', (['NoReportingDataError'], {}), '(NoReportingDataError)\n', (8815, 8837), False, 'import pytest\n'), ((9095, 9112), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9104, 9112), False, 'from datetime import datetime, timedelta\n'), ((9859, 9876), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9868, 9876), False, 'from datetime import datetime, timedelta\n'), ((3684, 3705), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""MS"""'}), "(freq='MS')\n", (3694, 3705), True, 'import pandas as pd\n'), ((5981, 6001), 'pandas.Timestamp', 'pd.Timestamp', (['"""2000"""'], {}), "('2000')\n", (5993, 6001), True, 'import pandas as pd\n'), ((8884, 8904), 'pandas.Timestamp', 'pd.Timestamp', (['"""2030"""'], {}), "('2030')\n", (8896, 8904), True, 'import pandas as pd\n')] |
# coding: utf-8
#!/usr/bin/env python
# sudo python3 Python/fuck_machine/main.py
import atexit
from flask import Flask, render_template, url_for, request
import json
import config
from speed_control import Speed_Control
# Logging
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# globals
app = Flask(__name__)
speed_control = Speed_Control(
speed_limit=config.SPEED_LIMIT,
invert=config.INVERT,
res_max = config.RES_MAX
)
# base html page
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
# this is the route to the receiver which is where javascript updates events
# from the various HTML widgets
""" json keys and values
action: [statusChk|sendSpeed],
"""
@app.route('/receiver', methods = ['POST'])
def worker():
data = request.form
if data == None:
return sendStatus()
if data['action'] == "statusChk":
return sendStatus()
if data['action'] == "sendSpeed":
print("Setting Speed to " + data['speed'])
setSpeed(int(data['speed']))
return sendStatus()
return "Invalid Request"
# returns a json string with status values
def sendStatus():
status = {
'status' : 'OK',
'online' : True,
'speed' : speed_control.speed
}
return json.dumps(status)
def setSpeed(inputSpeed):
speed_control.speed = inputSpeed
def cleanUp():
speed_control.cleanup()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=config.PORT, debug=False)
atexit.register(cleanUp)
| [
"logging.getLogger",
"flask.render_template",
"flask.Flask",
"json.dumps",
"speed_control.Speed_Control",
"atexit.register"
] | [((263, 292), 'logging.getLogger', 'logging.getLogger', (['"""werkzeug"""'], {}), "('werkzeug')\n", (280, 292), False, 'import logging\n'), ((344, 359), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (349, 359), False, 'from flask import Flask, render_template, url_for, request\n'), ((377, 473), 'speed_control.Speed_Control', 'Speed_Control', ([], {'speed_limit': 'config.SPEED_LIMIT', 'invert': 'config.INVERT', 'res_max': 'config.RES_MAX'}), '(speed_limit=config.SPEED_LIMIT, invert=config.INVERT, res_max\n =config.RES_MAX)\n', (390, 473), False, 'from speed_control import Speed_Control\n'), ((581, 610), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (596, 610), False, 'from flask import Flask, render_template, url_for, request\n'), ((1302, 1320), 'json.dumps', 'json.dumps', (['status'], {}), '(status)\n', (1312, 1320), False, 'import json\n'), ((1530, 1554), 'atexit.register', 'atexit.register', (['cleanUp'], {}), '(cleanUp)\n', (1545, 1554), False, 'import atexit\n')] |
import os
import json
import argparse
from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark,\
FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark
from tabular_benchmarks import NASCifar10A, NASCifar10B, NASCifar10C
parser = argparse.ArgumentParser()
parser.add_argument('--run_id', default=0, type=int, nargs='?', help='unique number to identify this run')
parser.add_argument('--benchmark', default="protein_structure", type=str, nargs='?', help='specifies the benchmark')
parser.add_argument('--n_iters', default=100, type=int, nargs='?', help='number of iterations for optimization method')
parser.add_argument('--output_path', default="./", type=str, nargs='?',
help='specifies the path where the results will be saved')
parser.add_argument('--data_dir', default="./", type=str, nargs='?', help='specifies the path to the tabular data')
args = parser.parse_args()
if args.benchmark == "nas_cifar10a":
b = NASCifar10A(data_dir=args.data_dir)
elif args.benchmark == "nas_cifar10b":
b = NASCifar10B(data_dir=args.data_dir)
elif args.benchmark == "nas_cifar10c":
b = NASCifar10C(data_dir=args.data_dir)
elif args.benchmark == "protein_structure":
b = FCNetProteinStructureBenchmark(data_dir=args.data_dir)
elif args.benchmark == "slice_localization":
b = FCNetSliceLocalizationBenchmark(data_dir=args.data_dir)
elif args.benchmark == "naval_propulsion":
b = FCNetNavalPropulsionBenchmark(data_dir=args.data_dir)
elif args.benchmark == "parkinsons_telemonitoring":
b = FCNetParkinsonsTelemonitoringBenchmark(data_dir=args.data_dir)
output_path = os.path.join(args.output_path, "random_search")
os.makedirs(os.path.join(output_path), exist_ok=True)
cs = b.get_configuration_space()
runtime = []
regret = []
curr_incumbent = None
curr_inc_value = None
rt = 0
X = []
for i in range(args.n_iters):
config = cs.sample_configuration()
b.objective_function(config)
if args.benchmark == "nas_cifar10a" or args.benchmark == "nas_cifar10b" or args.benchmark == "nas_cifar10c":
res = b.get_results(ignore_invalid_configs=True)
else:
res = b.get_results()
fh = open(os.path.join(output_path, 'run_%d.json' % args.run_id), 'w')
json.dump(res, fh)
fh.close()
| [
"tabular_benchmarks.FCNetProteinStructureBenchmark",
"tabular_benchmarks.FCNetParkinsonsTelemonitoringBenchmark",
"argparse.ArgumentParser",
"os.path.join",
"tabular_benchmarks.FCNetSliceLocalizationBenchmark",
"tabular_benchmarks.FCNetNavalPropulsionBenchmark",
"tabular_benchmarks.NASCifar10A",
"tabular_benchmarks.NASCifar10C",
"tabular_benchmarks.NASCifar10B",
"json.dump"
] | [((289, 314), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (312, 314), False, 'import argparse\n'), ((1667, 1714), 'os.path.join', 'os.path.join', (['args.output_path', '"""random_search"""'], {}), "(args.output_path, 'random_search')\n", (1679, 1714), False, 'import os\n'), ((2258, 2276), 'json.dump', 'json.dump', (['res', 'fh'], {}), '(res, fh)\n', (2267, 2276), False, 'import json\n'), ((1000, 1035), 'tabular_benchmarks.NASCifar10A', 'NASCifar10A', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1011, 1035), False, 'from tabular_benchmarks import NASCifar10A, NASCifar10B, NASCifar10C\n'), ((1727, 1752), 'os.path.join', 'os.path.join', (['output_path'], {}), '(output_path)\n', (1739, 1752), False, 'import os\n'), ((2197, 2251), 'os.path.join', 'os.path.join', (['output_path', "('run_%d.json' % args.run_id)"], {}), "(output_path, 'run_%d.json' % args.run_id)\n", (2209, 2251), False, 'import os\n'), ((1084, 1119), 'tabular_benchmarks.NASCifar10B', 'NASCifar10B', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1095, 1119), False, 'from tabular_benchmarks import NASCifar10A, NASCifar10B, NASCifar10C\n'), ((1168, 1203), 'tabular_benchmarks.NASCifar10C', 'NASCifar10C', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1179, 1203), False, 'from tabular_benchmarks import NASCifar10A, NASCifar10B, NASCifar10C\n'), ((1257, 1311), 'tabular_benchmarks.FCNetProteinStructureBenchmark', 'FCNetProteinStructureBenchmark', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1287, 1311), False, 'from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark, FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark\n'), ((1366, 1421), 'tabular_benchmarks.FCNetSliceLocalizationBenchmark', 'FCNetSliceLocalizationBenchmark', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1397, 1421), False, 'from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark, FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark\n'), ((1474, 1527), 'tabular_benchmarks.FCNetNavalPropulsionBenchmark', 'FCNetNavalPropulsionBenchmark', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1503, 1527), False, 'from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark, FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark\n'), ((1589, 1651), 'tabular_benchmarks.FCNetParkinsonsTelemonitoringBenchmark', 'FCNetParkinsonsTelemonitoringBenchmark', ([], {'data_dir': 'args.data_dir'}), '(data_dir=args.data_dir)\n', (1627, 1651), False, 'from tabular_benchmarks import FCNetProteinStructureBenchmark, FCNetSliceLocalizationBenchmark, FCNetNavalPropulsionBenchmark, FCNetParkinsonsTelemonitoringBenchmark\n')] |
import os
import buildbot
import buildbot.process.factory
from buildbot.steps.source import SVN
from buildbot.steps.shell import Configure, ShellCommand
from buildbot.steps.shell import WarningCountingShellCommand
from buildbot.process.properties import WithProperties
from zorg.buildbot.commands.LitTestCommand import LitTestCommand
from Util import getConfigArgs
def getLLVMCMakeBuildFactory(
clean = True, # "clean-llvm" step is requested if true.
test = True, # "test-llvm" step is requested if true.
jobs = '%(jobs)s', # Number of concurrent jobs.
timeout = 20, # Timeout if no activity seen (minutes).
make = 'make', # Make command.
enable_shared = False, # Enable shared (-DBUILD_SHARED_LIBS=ON configure parameters added) if true.
defaultBranch = 'trunk', # Branch to build.
config_name = 'Debug', # Configuration name.
env = None, # Environmental variables for all steps.
extra_cmake_args = []): # Extra args for the cmake step.
# Prepare environmental variables. Set here all env we want everywhere.
merged_env = {
'TERM' : 'dumb' # Make sure Clang doesn't use color escape sequences.
}
if env is not None:
merged_env.update(env) # Overwrite pre-set items with the given ones, so user can set anything.
llvm_srcdir = "llvm.src"
llvm_objdir = "llvm.obj"
f = buildbot.process.factory.BuildFactory()
# Determine the build directory.
f.addStep(
buildbot.steps.shell.SetProperty(
name = "get_builddir",
command = ["pwd"],
property = "builddir",
description = "set build dir",
workdir = ".",
env = merged_env))
# Checkout sources.
f.addStep(
SVN(
name = 'svn-llvm',
mode = 'update', baseURL='http://llvm.org/svn/llvm-project/llvm/',
defaultBranch = defaultBranch,
workdir = llvm_srcdir))
cmake_args = ['cmake']
cmake_args += ["-DCMAKE_BUILD_TYPE="+config_name]
if enable_shared:
cmake_args.append('-DBUILD_SHARED_LIBS=ON')
cmake_args.extend(extra_cmake_args)
cmake_args += ['../' + llvm_srcdir]
f.addStep(
Configure(
command = cmake_args,
description = ['configuring', config_name],
descriptionDone = ['configure', config_name],
workdir = llvm_objdir,
env = merged_env))
if clean:
f.addStep(
WarningCountingShellCommand(
name = "clean-llvm",
command = [make, 'clean'],
haltOnFailure = True,
description = "cleaning llvm",
descriptionDone = "clean llvm",
workdir = llvm_objdir,
env = merged_env))
f.addStep(
WarningCountingShellCommand(
name = "compile",
command = ['nice', '-n', '10',
make, WithProperties("-j%s" % jobs)],
haltOnFailure = True,
description = "compiling llvm",
descriptionDone = "compile llvm",
workdir = llvm_objdir,
env = merged_env,
timeout = timeout * 60))
if test:
litTestArgs = '-v -j %s' % jobs
f.addStep(
LitTestCommand(
name = 'test-llvm',
command = [make, "check-all", "VERBOSE=1",
WithProperties("-j%s" % jobs),
WithProperties("LIT_ARGS=%s" % litTestArgs)],
description = ["testing", "llvm"],
descriptionDone = ["test", "llvm"],
workdir = llvm_objdir,
env = merged_env))
return f
| [
"buildbot.steps.source.SVN",
"buildbot.steps.shell.Configure",
"buildbot.process.properties.WithProperties",
"buildbot.process.factory.BuildFactory",
"buildbot.steps.shell.WarningCountingShellCommand",
"buildbot.steps.shell.SetProperty"
] | [((1754, 1793), 'buildbot.process.factory.BuildFactory', 'buildbot.process.factory.BuildFactory', ([], {}), '()\n', (1791, 1793), False, 'import buildbot\n'), ((1855, 2013), 'buildbot.steps.shell.SetProperty', 'buildbot.steps.shell.SetProperty', ([], {'name': '"""get_builddir"""', 'command': "['pwd']", 'property': '"""builddir"""', 'description': '"""set build dir"""', 'workdir': '"""."""', 'env': 'merged_env'}), "(name='get_builddir', command=['pwd'],\n property='builddir', description='set build dir', workdir='.', env=\n merged_env)\n", (1887, 2013), False, 'import buildbot\n'), ((2165, 2309), 'buildbot.steps.source.SVN', 'SVN', ([], {'name': '"""svn-llvm"""', 'mode': '"""update"""', 'baseURL': '"""http://llvm.org/svn/llvm-project/llvm/"""', 'defaultBranch': 'defaultBranch', 'workdir': 'llvm_srcdir'}), "(name='svn-llvm', mode='update', baseURL=\n 'http://llvm.org/svn/llvm-project/llvm/', defaultBranch=defaultBranch,\n workdir=llvm_srcdir)\n", (2168, 2309), False, 'from buildbot.steps.source import SVN\n'), ((2642, 2803), 'buildbot.steps.shell.Configure', 'Configure', ([], {'command': 'cmake_args', 'description': "['configuring', config_name]", 'descriptionDone': "['configure', config_name]", 'workdir': 'llvm_objdir', 'env': 'merged_env'}), "(command=cmake_args, description=['configuring', config_name],\n descriptionDone=['configure', config_name], workdir=llvm_objdir, env=\n merged_env)\n", (2651, 2803), False, 'from buildbot.steps.shell import Configure, ShellCommand\n'), ((2946, 3142), 'buildbot.steps.shell.WarningCountingShellCommand', 'WarningCountingShellCommand', ([], {'name': '"""clean-llvm"""', 'command': "[make, 'clean']", 'haltOnFailure': '(True)', 'description': '"""cleaning llvm"""', 'descriptionDone': '"""clean llvm"""', 'workdir': 'llvm_objdir', 'env': 'merged_env'}), "(name='clean-llvm', command=[make, 'clean'],\n haltOnFailure=True, description='cleaning llvm', descriptionDone=\n 'clean llvm', workdir=llvm_objdir, env=merged_env)\n", (2973, 3142), False, 'from buildbot.steps.shell import WarningCountingShellCommand\n'), ((3488, 3517), 'buildbot.process.properties.WithProperties', 'WithProperties', (["('-j%s' % jobs)"], {}), "('-j%s' % jobs)\n", (3502, 3517), False, 'from buildbot.process.properties import WithProperties\n'), ((4029, 4058), 'buildbot.process.properties.WithProperties', 'WithProperties', (["('-j%s' % jobs)"], {}), "('-j%s' % jobs)\n", (4043, 4058), False, 'from buildbot.process.properties import WithProperties\n'), ((4095, 4138), 'buildbot.process.properties.WithProperties', 'WithProperties', (["('LIT_ARGS=%s' % litTestArgs)"], {}), "('LIT_ARGS=%s' % litTestArgs)\n", (4109, 4138), False, 'from buildbot.process.properties import WithProperties\n')] |
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAdminUser
from apps.core.views import poster
from . import tasks
@api_view(['POST'])
@permission_classes([IsAdminUser])
def publish_view(request):
return poster(tasks.fetch_and_publish)
@api_view(['POST'])
@permission_classes([IsAdminUser])
def blank_publish_view(request):
"""Save posts into db w/o publishing."""
return poster(lambda: tasks.fetch_and_publish(blank=True, force=True))
| [
"rest_framework.decorators.permission_classes",
"rest_framework.decorators.api_view",
"apps.core.views.poster"
] | [((177, 195), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (185, 195), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((197, 230), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAdminUser]'], {}), '([IsAdminUser])\n', (215, 230), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((304, 322), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (312, 322), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((324, 357), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAdminUser]'], {}), '([IsAdminUser])\n', (342, 357), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((269, 300), 'apps.core.views.poster', 'poster', (['tasks.fetch_and_publish'], {}), '(tasks.fetch_and_publish)\n', (275, 300), False, 'from apps.core.views import poster\n')] |
import asyncio
import functools
import pathlib
import threading
from http import server
import pytest
from digslash import sites
def get_dir(dirname):
return pathlib.os.path.join(
pathlib.os.path.dirname(__file__),
dirname
)
@pytest.fixture
def website1():
web_dir = get_dir('website-1')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/')
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
@pytest.fixture
def website1_with_duplicates():
web_dir = get_dir('website-1')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/', deduplicate=False)
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
@pytest.fixture
def website2():
web_dir = get_dir('website-2')
httpd = server.HTTPServer(
('127.0.0.1', 8000),
functools.partial(server.SimpleHTTPRequestHandler, directory=web_dir)
)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
site = sites.Site('http://127.0.0.1:8000/')
yield site
httpd.server_close()
httpd.shutdown()
httpd_thread.join()
def test_handle_duplicates(website1):
asyncio.run(website1.crawl())
assert set(website1.results.keys()) == {
'http://127.0.0.1:8000/',
'http://127.0.0.1:8000/pages/contact.html',
'http://127.0.0.1:8000/pages/about.html',
'http://127.0.0.1:8000/pages/feedback.html',
'http://127.0.0.1:8000/js/script.js',
'http://127.0.0.1:8000/scripts/feedback.html',
}
def test_keep_duplicates(website1_with_duplicates):
asyncio.run(website1_with_duplicates.crawl())
assert set(website1_with_duplicates.results.keys()) == {
'http://127.0.0.1:8000/',
'http://127.0.0.1:8000/pages/contact.html',
'http://127.0.0.1:8000/pages/about.html',
'http://127.0.0.1:8000/pages/feedback.html',
'http://127.0.0.1:8000/js/script.js',
'http://127.0.0.1:8000/scripts/feedback.html',
'http://127.0.0.1:8000/index.html',
}
def test_site_response_content_type(website2):
asyncio.run(website2.crawl())
assert website2.results == {
'http://127.0.0.1:8000/': {
'checksum': '4d651f294542b8829a46d8dc191838bd',
'content_type': 'text/html',
'encoding': 'utf-8',
'source': '',
},
'http://127.0.0.1:8000/code.js': {
'checksum': 'b4577eafb339aab8076a1e069e62d2c5',
'content_type': 'application/javascript',
'encoding': 'ascii',
'source': 'http://127.0.0.1:8000/page.html',
},
'http://127.0.0.1:8000/page.html': {
'checksum': '091ee4d646a8e62a6bb4092b439b07a1',
'content_type': 'text/html',
'encoding': 'latin_1',
'source': 'http://127.0.0.1:8000/',
}
}
| [
"threading.Thread",
"digslash.sites.Site",
"functools.partial",
"pathlib.os.path.dirname"
] | [((485, 529), 'threading.Thread', 'threading.Thread', ([], {'target': 'httpd.serve_forever'}), '(target=httpd.serve_forever)\n', (501, 529), False, 'import threading\n'), ((597, 633), 'digslash.sites.Site', 'sites.Site', (['"""http://127.0.0.1:8000/"""'], {}), "('http://127.0.0.1:8000/')\n", (607, 633), False, 'from digslash import sites\n'), ((967, 1011), 'threading.Thread', 'threading.Thread', ([], {'target': 'httpd.serve_forever'}), '(target=httpd.serve_forever)\n', (983, 1011), False, 'import threading\n'), ((1079, 1134), 'digslash.sites.Site', 'sites.Site', (['"""http://127.0.0.1:8000/"""'], {'deduplicate': '(False)'}), "('http://127.0.0.1:8000/', deduplicate=False)\n", (1089, 1134), False, 'from digslash import sites\n'), ((1452, 1496), 'threading.Thread', 'threading.Thread', ([], {'target': 'httpd.serve_forever'}), '(target=httpd.serve_forever)\n', (1468, 1496), False, 'import threading\n'), ((1564, 1600), 'digslash.sites.Site', 'sites.Site', (['"""http://127.0.0.1:8000/"""'], {}), "('http://127.0.0.1:8000/')\n", (1574, 1600), False, 'from digslash import sites\n'), ((196, 229), 'pathlib.os.path.dirname', 'pathlib.os.path.dirname', (['__file__'], {}), '(__file__)\n', (219, 229), False, 'import pathlib\n'), ((390, 459), 'functools.partial', 'functools.partial', (['server.SimpleHTTPRequestHandler'], {'directory': 'web_dir'}), '(server.SimpleHTTPRequestHandler, directory=web_dir)\n', (407, 459), False, 'import functools\n'), ((872, 941), 'functools.partial', 'functools.partial', (['server.SimpleHTTPRequestHandler'], {'directory': 'web_dir'}), '(server.SimpleHTTPRequestHandler, directory=web_dir)\n', (889, 941), False, 'import functools\n'), ((1357, 1426), 'functools.partial', 'functools.partial', (['server.SimpleHTTPRequestHandler'], {'directory': 'web_dir'}), '(server.SimpleHTTPRequestHandler, directory=web_dir)\n', (1374, 1426), False, 'import functools\n')] |
"""
Logic for the set ground speed API endpoint
"""
from flask_restful import reqparse
from flask_restful import Resource
import bluebird.api.resources.utils.utils as utils
from bluebird.api.resources.utils.responses import checked_resp
from bluebird.utils.types import Callsign
from bluebird.utils.types import GroundSpeed
_PARSER = reqparse.RequestParser()
_PARSER.add_argument(
utils.CALLSIGN_LABEL, type=Callsign, location="json", required=True
)
_PARSER.add_argument("gspd", type=GroundSpeed, location="json", required=True)
class Gspd(Resource):
"""Contains logic for the SPD endpoint"""
@staticmethod
def post():
"""
Logic for POST events. If the request contains an existing aircraft ID, then a
request is sent to alter its ground speed
"""
req_args = utils.parse_args(_PARSER)
callsign = req_args[utils.CALLSIGN_LABEL]
resp = utils.check_exists(utils.sim_proxy(), callsign)
if resp:
return resp
err = utils.sim_proxy().aircraft.set_ground_speed(callsign, req_args["gspd"])
return checked_resp(err)
| [
"bluebird.api.resources.utils.utils.sim_proxy",
"flask_restful.reqparse.RequestParser",
"bluebird.api.resources.utils.utils.parse_args",
"bluebird.api.resources.utils.responses.checked_resp"
] | [((337, 361), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (359, 361), False, 'from flask_restful import reqparse\n'), ((823, 848), 'bluebird.api.resources.utils.utils.parse_args', 'utils.parse_args', (['_PARSER'], {}), '(_PARSER)\n', (839, 848), True, 'import bluebird.api.resources.utils.utils as utils\n'), ((1107, 1124), 'bluebird.api.resources.utils.responses.checked_resp', 'checked_resp', (['err'], {}), '(err)\n', (1119, 1124), False, 'from bluebird.api.resources.utils.responses import checked_resp\n'), ((934, 951), 'bluebird.api.resources.utils.utils.sim_proxy', 'utils.sim_proxy', ([], {}), '()\n', (949, 951), True, 'import bluebird.api.resources.utils.utils as utils\n'), ((1019, 1036), 'bluebird.api.resources.utils.utils.sim_proxy', 'utils.sim_proxy', ([], {}), '()\n', (1034, 1036), True, 'import bluebird.api.resources.utils.utils as utils\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
###############################################################################
#Non-Standard Imports
###############################################################################
import addpath
import dunlin as dn
import dunlin._utils_model.events as uev
import dunlin._utils_model.dun_file_reader as dfr
import dunlin._utils_model.ivp as ivp
import dunlin._utils_model.ode_coder as odc
if __name__ == '__main__':
#Some overhead for testing
plt.close('all')
###############################################################################
#Part 1: Manual Instantiation
###############################################################################
def plot(t, y, AX, label='_nolabel'):
for i, ax in enumerate(AX):
ax.plot(t, y[i], label=label)
top = np.max(y[i])
top = top*1.2 if top else 1
top = np.maximum(top, ax.get_ylim()[1])
bottom = -top*.05
ax.set_ylim(bottom=bottom, top=top)
if label != '_nolabel':
ax.legend()
def execute1(t, y, p):
new_y = y.copy()
new_p = p.copy()
new_y[0] = 3
return new_y, new_p
def execute2(t, y, p):
new_y = y.copy()
new_p = p.copy()
new_y[0] = 0.5 + new_y[0]
return new_y, new_p
def execute3(t, y, p):
new_y = y.copy()
new_p = p.copy()
new_y[0] = 1
return new_y, new_p
def execute4(t, y, p):
new_y = y.copy()
new_p = p.copy()
new_p[0] = 0.03
return new_y, new_p
def trigger_func1(t, y, p):
return y[1] - 0.2
def trigger_func2(t, y, p):
return y[2] - 2.5
def trigger_func3(t, y, p):
return 0.2 - y[0]
def func(t, y, p):
x0 = y[0]
x1 = y[1]
x2 = y[2]
p0 = p[0]
p1 = p[1]
r0 = p0*x0
r1 = p1*x1
d_x0 = -r0
d_x1 = +r0 -r1
d_x2 = r1
return np.array([d_x0, d_x1, d_x2])
y0 = np.array([1, 0, 0])
p = np.array([0.01, 0.01])
tspan = np.linspace(0, 1000, 101)
fig = plt.figure()
AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
rtol = 1e-3
###For saving###
#df = pd.DataFrame(np.concatenate(([t], y), axis=0))
#df.to_csv('event_test_files/simulate_event_.csv', index=False)
################
#Case 1: Timer
print('Case 1: Timer')
#Expect: Spike at 209
event0 = uev.Event(name='E0', execute=execute1, trigger_func=lambda t, *args: t-209)
# event0.remove = True
events = [event0]
t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
plot(t, y, AX, 'Case 1: Timer')
df = pd.read_csv('event_test_files/simulate_event_1.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 2: Event
print('Case 2: Event')
#Expect: Spike at 25
event0 = uev.Event(name='E0', trigger_func=trigger_func1, execute=execute1)
events = [event0]
t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
plot(t, y, AX, 'Case 2: Event')
df = pd.read_csv('event_test_files/simulate_event_2.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 3: Event with delay
print('Case 3: Event with delay')
#Expect: Spike at 425 and 830
event0 = uev.Event(delay=400, persistent=True, name='E0', trigger_func=trigger_func1, execute=execute1)
events = [event0]
t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
plot(t, y, AX, 'Case 3: Event with delay')
df = pd.read_csv('event_test_files/simulate_event_3.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 4: Event with delay and not persistent
print('Case 4: Event with delay and not persistent')
#Expect: No spike
event0 = uev.Event(delay=400, persistent=False, name='E0', trigger_func=trigger_func1, execute=execute1)
events = [event0]
t, y = ivp.integrate(func, tspan, y0, p, events=events)
plot(t, y, AX, 'Case 4: Event with delay and not persistent')
df = pd.read_csv('event_test_files/simulate_event_4.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 5: Trigger at start
print('Case 5: Trigger at start')
#Expect: Spike at 10
flipped = lambda *args: -trigger_func1(*args)
event0 = uev.Event(delay=10, persistent=True, name='E0', trigger_func=flipped, execute=execute1)
events = [event0]
t, y = ivp.integrate(func, tspan, y0, p, events=events)
plot(t, y, AX, 'Case 5: Trigger at start')
df = pd.read_csv('event_test_files/simulate_event_5.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 6: Multiple events
print('Case 6: Multiple events')
#Expect: Spike at 25 and 300
event0 = uev.Event(delay=0, name='E0', trigger_func=trigger_func1, execute=execute1)
event1 = uev.Event(delay=0, name='E1', trigger_func=trigger_func2, execute=execute1)
events = [event0, event1]
t, y = ivp.integrate(func, tspan, y0, p, events=events)
plot(t, y, AX, 'Case 6: Multiple events')
df = pd.read_csv('event_test_files/simulate_event_6.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 7: Prioritized events
print('Case 7: Prioritized events')
#Expect: Spike at 800 with new value of 3.5
event0 = uev.Event(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *args: t-800)
event1 = uev.Event(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *args: t-800)
events = [event0, event1]
t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
plot(t, y, AX, 'Case 7: Prioritized events')
df = pd.read_csv('event_test_files/simulate_event_7.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
# ###############################################################################
# #Part 1A: Other Events
# ###############################################################################
# y0 = np.array([1, 0, 0])
# p = np.array([0.01, 0.01])
# tspan = np.linspace(0, 1000, 101)
# fig = plt.figure()
# AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
# #Case 1a: Change in parameter
# print('Case 1a: Change in parameter')
# #Expect: Change in rate at 200
# event0 = uev.Event(name='E0', trigger_func=lambda t, *args: t-200, execute=execute4)
# events = [event0]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
# plot(t, y, AX, 'Case 1a: Change in parameter')
# df = pd.read_csv('event_test_files/simulate_event_1a.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# #Case 2a: Assignment affects r
# print('Case 2a: Assignment affects r')
# #Expect: x0 is reset cyclically
# event0 = uev.Event(name='E0', trigger_func=trigger_func3, execute=execute3)
# events = [event0]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
# plot(t, y, AX, 'Case 2a: Assignment affects r ')
# df = pd.read_csv('event_test_files/simulate_event_2a.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# #Case 3: Test reset
# print('Case 3: Test reset')
# #Expect: x0 is reset cyclically
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True)
# plot(t, y, AX, 'Case 3: Test reset')
# df = pd.read_csv('event_test_files/simulate_event_2a.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# ###############################################################################
# #Part 2: Integration and Formatting Options
# ###############################################################################
# y0 = np.array([1, 0, 0])
# p = np.array([0.01, 0.01])
# tspan = np.linspace(0, 1000, 101)
# fig = plt.figure()
# AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
# #Case 8: Unprioritized events
# print('Case 8: Unprioritized events')
# #Expect: Spike at 900 with new value of 3
# event0 = uev.Event(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *args: t-900)
# event1 = uev.Event(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *args: t-900)
# events = [event0, event1]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=True, _sort=False)
# plot(t, y, AX, 'Case 8: Unprioritized events')
# df = pd.read_csv('event_test_files/simulate_event_8.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# #Case 9: Exclude events
# print('Case 9: Exclude events')
# #Expect: Spike at 450 with new value of 3.5. 455 is not in t.
# event0 = uev.Event(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *args: t-455)
# event1 = uev.Event(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *args: t-455)
# events = [event0, event1]
# # events = [event0]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=True, include_events=False)
# plot(t, y, AX, 'Case 9: Exclude events')
# t_, y_ = t, y
# df = pd.read_csv('event_test_files/simulate_event_9.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# #Case 10: Exclude events and overlap
# print('Case 10: Exclude events and overlap')
# #Expect: Spike at 450 but slants instead of going to new value of 3.5. 455 is not in t.
# event0 = uev.Event(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *args: t-455)
# event1 = uev.Event(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *args: t-455)
# events = [event0, event1]
# # events = [event0]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=False, include_events=False)
# plot(t, y, AX, 'Case 10: Exclude events and overlap')
# df = pd.read_csv('event_test_files/simulate_event_10.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
# #Case 11: Exclude overlap
# print('Case 11: Exclude overlap')
# #Expect: Spike at 455. 455 appears once in t.
# event0 = uev.Event(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *args: t-455)
# event1 = uev.Event(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *args: t-455)
# events = [event0, event1]
# t, y = ivp.integrate(func, tspan, y0, p, events=events, overlap=False, include_events=True)
# plot(t, y, AX, 'Case 11: Exclude overlap')
# df = pd.read_csv('event_test_files/simulate_event_11.csv')
# answer = df.values
# values = np.concatenate(([t], y), axis=0)
# assert np.all( np.isclose(answer, values, rtol=rtol))
###############################################################################
#Part 3: Dynamic Instantiation
###############################################################################
#Set up
dun_data = dfr.read_file('event_test_files/M1.dun')
model_data = dun_data['M1']
func_data = odc.make_ode_data(model_data)
event_objs = uev.make_events(func_data, model_data)
func_ = func_data['rhs'][1]
y0 = np.array([1, 0, 0])
p = np.array([0.01, 0.01])
tspan = np.linspace(0, 1000, 101)
fig = plt.figure()
AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
#Case 21: Dynamically generated Timer
print('Case 21: Dynamically generated Timer')
#Expect: Spike at 200
events_ = event_objs[:1]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 21: Dynamically generated Timer')
df = pd.read_csv('event_test_files/simulate_event_1.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 22: Dynamically generated Event
print('Case 22: Dynamically generated Event')
#Expect: Spike at 160
events_ = event_objs[1:2]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 22: Dynamically generated Event')
df = pd.read_csv('event_test_files/simulate_event_2.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 23: Dynamically generated Event with delay
print('Case 23: Dynamically generated Event with delay')
#Expect: Spike at 425 and 830
events_ = event_objs[2:3]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 23: Dynamically generated Event with delay')
df = pd.read_csv('event_test_files/simulate_event_3.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 24: Dynamically generated Event with delay and not persistent
print('Case 24: Dynamically generated Event with delay and not persistent')
#Expect: No spike
events_ = event_objs[3:4]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 24: Dynamically generated Event with delay and not persistent')
df = pd.read_csv('event_test_files/simulate_event_4.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 25: Dynamically generated Event with delay and not persistent
print('Case 25: Dynamically generated Event with trigger at start')
#Expect: Spike at 10
events_ = event_objs[4:5]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 25: Dynamically generated Event with trigger at start')
df = pd.read_csv('event_test_files/simulate_event_5.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 26: Dynamically generated Event with multiple events
print('Case 26: Dynamically generated Event with multiple events')
#Expect: Spike at 25 and 300
events_ = [event_objs[1], event_objs[5]]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 26: Dynamically generated Event with multiple events')
df = pd.read_csv('event_test_files/simulate_event_6.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
#Case 27: Dynamically generated Event with prioritized events
print('Case 27: Dynamically generated Event with prioritized events')
#Expect: Spike at 800 with new value of 3.5
events_ = [event_objs[6], event_objs[7]]
t, y = ivp.integrate(func_, tspan, y0, p, events=events_, overlap=True, include_events=True)
plot(t, y, AX, 'Case 27: Dynamically generated Event with prioritized events')
df = pd.read_csv('event_test_files/simulate_event_7.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
| [
"dunlin._utils_model.dun_file_reader.read_file",
"dunlin._utils_model.events.Event",
"numpy.isclose",
"pandas.read_csv",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"dunlin._utils_model.ode_coder.make_ode_data",
"numpy.concatenate",
"dunlin._utils_model.ivp.integrate",
"dunlin._utils_model.events.make_events"
] | [((605, 621), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (614, 621), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2344), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2333, 2344), True, 'import numpy as np\n'), ((2357, 2379), 'numpy.array', 'np.array', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (2365, 2379), True, 'import numpy as np\n'), ((2392, 2417), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(101)'], {}), '(0, 1000, 101)\n', (2403, 2417), True, 'import numpy as np\n'), ((2434, 2446), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2444, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2866), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'name': '"""E0"""', 'execute': 'execute1', 'trigger_func': '(lambda t, *args: t - 209)'}), "(name='E0', execute=execute1, trigger_func=lambda t, *args: t - 209)\n", (2798, 2866), True, 'import dunlin._utils_model.events as uev\n'), ((2926, 3013), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events', 'overlap': '(True)', 'include_events': '(True)'}), '(func, tspan, y0, p, events=events, overlap=True,\n include_events=True)\n', (2939, 3013), True, 'import dunlin._utils_model.ivp as ivp\n'), ((3060, 3112), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_1.csv"""'], {}), "('event_test_files/simulate_event_1.csv')\n", (3071, 3112), True, 'import pandas as pd\n'), ((3149, 3181), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (3163, 3181), True, 'import numpy as np\n'), ((3330, 3396), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'name': '"""E0"""', 'trigger_func': 'trigger_func1', 'execute': 'execute1'}), "(name='E0', trigger_func=trigger_func1, execute=execute1)\n", (3339, 3396), True, 'import dunlin._utils_model.events as uev\n'), ((3431, 3518), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events', 'overlap': '(True)', 'include_events': '(True)'}), '(func, tspan, y0, p, events=events, overlap=True,\n include_events=True)\n', (3444, 3518), True, 'import dunlin._utils_model.ivp as ivp\n'), ((3565, 3617), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_2.csv"""'], {}), "('event_test_files/simulate_event_2.csv')\n", (3576, 3617), True, 'import pandas as pd\n'), ((3654, 3686), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (3668, 3686), True, 'import numpy as np\n'), ((3866, 3964), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'delay': '(400)', 'persistent': '(True)', 'name': '"""E0"""', 'trigger_func': 'trigger_func1', 'execute': 'execute1'}), "(delay=400, persistent=True, name='E0', trigger_func=trigger_func1,\n execute=execute1)\n", (3875, 3964), True, 'import dunlin._utils_model.events as uev\n'), ((3995, 4082), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events', 'overlap': '(True)', 'include_events': '(True)'}), '(func, tspan, y0, p, events=events, overlap=True,\n include_events=True)\n', (4008, 4082), True, 'import dunlin._utils_model.ivp as ivp\n'), ((4140, 4192), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_3.csv"""'], {}), "('event_test_files/simulate_event_3.csv')\n", (4151, 4192), True, 'import pandas as pd\n'), ((4229, 4261), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (4243, 4261), True, 'import numpy as np\n'), ((4467, 4567), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'delay': '(400)', 'persistent': '(False)', 'name': '"""E0"""', 'trigger_func': 'trigger_func1', 'execute': 'execute1'}), "(delay=400, persistent=False, name='E0', trigger_func=\n trigger_func1, execute=execute1)\n", (4476, 4567), True, 'import dunlin._utils_model.events as uev\n'), ((4597, 4645), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events'}), '(func, tspan, y0, p, events=events)\n', (4610, 4645), True, 'import dunlin._utils_model.ivp as ivp\n'), ((4726, 4778), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_4.csv"""'], {}), "('event_test_files/simulate_event_4.csv')\n", (4737, 4778), True, 'import pandas as pd\n'), ((4815, 4847), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (4829, 4847), True, 'import numpy as np\n'), ((5068, 5159), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'delay': '(10)', 'persistent': '(True)', 'name': '"""E0"""', 'trigger_func': 'flipped', 'execute': 'execute1'}), "(delay=10, persistent=True, name='E0', trigger_func=flipped,\n execute=execute1)\n", (5077, 5159), True, 'import dunlin._utils_model.events as uev\n'), ((5190, 5238), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events'}), '(func, tspan, y0, p, events=events)\n', (5203, 5238), True, 'import dunlin._utils_model.ivp as ivp\n'), ((5300, 5352), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_5.csv"""'], {}), "('event_test_files/simulate_event_5.csv')\n", (5311, 5352), True, 'import pandas as pd\n'), ((5389, 5421), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (5403, 5421), True, 'import numpy as np\n'), ((5598, 5673), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'delay': '(0)', 'name': '"""E0"""', 'trigger_func': 'trigger_func1', 'execute': 'execute1'}), "(delay=0, name='E0', trigger_func=trigger_func1, execute=execute1)\n", (5607, 5673), True, 'import dunlin._utils_model.events as uev\n'), ((5688, 5763), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'delay': '(0)', 'name': '"""E1"""', 'trigger_func': 'trigger_func2', 'execute': 'execute1'}), "(delay=0, name='E1', trigger_func=trigger_func2, execute=execute1)\n", (5697, 5763), True, 'import dunlin._utils_model.events as uev\n'), ((5806, 5854), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events'}), '(func, tspan, y0, p, events=events)\n', (5819, 5854), True, 'import dunlin._utils_model.ivp as ivp\n'), ((5915, 5967), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_6.csv"""'], {}), "('event_test_files/simulate_event_6.csv')\n", (5926, 5967), True, 'import pandas as pd\n'), ((6004, 6036), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (6018, 6036), True, 'import numpy as np\n'), ((6234, 6328), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'name': '"""E0"""', 'execute': 'execute2', 'priority': '(0)', 'trigger_func': '(lambda t, *args: t - 800)'}), "(name='E0', execute=execute2, priority=0, trigger_func=lambda t, *\n args: t - 800)\n", (6243, 6328), True, 'import dunlin._utils_model.events as uev\n'), ((6336, 6430), 'dunlin._utils_model.events.Event', 'uev.Event', ([], {'name': '"""E1"""', 'execute': 'execute1', 'priority': '(1)', 'trigger_func': '(lambda t, *args: t - 800)'}), "(name='E1', execute=execute1, priority=1, trigger_func=lambda t, *\n args: t - 800)\n", (6345, 6430), True, 'import dunlin._utils_model.events as uev\n'), ((6466, 6553), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'events': 'events', 'overlap': '(True)', 'include_events': '(True)'}), '(func, tspan, y0, p, events=events, overlap=True,\n include_events=True)\n', (6479, 6553), True, 'import dunlin._utils_model.ivp as ivp\n'), ((6613, 6665), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_7.csv"""'], {}), "('event_test_files/simulate_event_7.csv')\n", (6624, 6665), True, 'import pandas as pd\n'), ((6702, 6734), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (6716, 6734), True, 'import numpy as np\n'), ((12544, 12584), 'dunlin._utils_model.dun_file_reader.read_file', 'dfr.read_file', (['"""event_test_files/M1.dun"""'], {}), "('event_test_files/M1.dun')\n", (12557, 12584), True, 'import dunlin._utils_model.dun_file_reader as dfr\n'), ((12634, 12663), 'dunlin._utils_model.ode_coder.make_ode_data', 'odc.make_ode_data', (['model_data'], {}), '(model_data)\n', (12651, 12663), True, 'import dunlin._utils_model.ode_coder as odc\n'), ((12681, 12719), 'dunlin._utils_model.events.make_events', 'uev.make_events', (['func_data', 'model_data'], {}), '(func_data, model_data)\n', (12696, 12719), True, 'import dunlin._utils_model.events as uev\n'), ((12769, 12788), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (12777, 12788), True, 'import numpy as np\n'), ((12801, 12823), 'numpy.array', 'np.array', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (12809, 12823), True, 'import numpy as np\n'), ((12836, 12861), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(101)'], {}), '(0, 1000, 101)\n', (12847, 12861), True, 'import numpy as np\n'), ((12878, 12890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12888, 12890), True, 'import matplotlib.pyplot as plt\n'), ((13112, 13201), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (13125, 13201), True, 'import dunlin._utils_model.ivp as ivp\n'), ((13271, 13323), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_1.csv"""'], {}), "('event_test_files/simulate_event_1.csv')\n", (13282, 13323), True, 'import pandas as pd\n'), ((13360, 13392), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (13374, 13392), True, 'import numpy as np\n'), ((13615, 13704), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (13628, 13704), True, 'import dunlin._utils_model.ivp as ivp\n'), ((13774, 13826), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_2.csv"""'], {}), "('event_test_files/simulate_event_2.csv')\n", (13785, 13826), True, 'import pandas as pd\n'), ((13863, 13895), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (13877, 13895), True, 'import numpy as np\n'), ((14148, 14237), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (14161, 14237), True, 'import dunlin._utils_model.ivp as ivp\n'), ((14318, 14370), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_3.csv"""'], {}), "('event_test_files/simulate_event_3.csv')\n", (14329, 14370), True, 'import pandas as pd\n'), ((14407, 14439), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (14421, 14439), True, 'import numpy as np\n'), ((14718, 14807), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (14731, 14807), True, 'import dunlin._utils_model.ivp as ivp\n'), ((14907, 14959), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_4.csv"""'], {}), "('event_test_files/simulate_event_4.csv')\n", (14918, 14959), True, 'import pandas as pd\n'), ((14996, 15028), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (15010, 15028), True, 'import numpy as np\n'), ((15302, 15391), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (15315, 15391), True, 'import dunlin._utils_model.ivp as ivp\n'), ((15483, 15535), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_5.csv"""'], {}), "('event_test_files/simulate_event_5.csv')\n", (15494, 15535), True, 'import pandas as pd\n'), ((15572, 15604), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (15586, 15604), True, 'import numpy as np\n'), ((15891, 15980), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (15904, 15980), True, 'import dunlin._utils_model.ivp as ivp\n'), ((16071, 16123), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_6.csv"""'], {}), "('event_test_files/simulate_event_6.csv')\n", (16082, 16123), True, 'import pandas as pd\n'), ((16160, 16192), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (16174, 16192), True, 'import numpy as np\n'), ((16500, 16589), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'events': 'events_', 'overlap': '(True)', 'include_events': '(True)'}), '(func_, tspan, y0, p, events=events_, overlap=True,\n include_events=True)\n', (16513, 16589), True, 'import dunlin._utils_model.ivp as ivp\n'), ((16683, 16735), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_7.csv"""'], {}), "('event_test_files/simulate_event_7.csv')\n", (16694, 16735), True, 'import pandas as pd\n'), ((16772, 16804), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (16786, 16804), True, 'import numpy as np\n'), ((2279, 2307), 'numpy.array', 'np.array', (['[d_x0, d_x1, d_x2]'], {}), '([d_x0, d_x1, d_x2])\n', (2287, 2307), True, 'import numpy as np\n'), ((3201, 3238), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (3211, 3238), True, 'import numpy as np\n'), ((3706, 3743), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (3716, 3743), True, 'import numpy as np\n'), ((4281, 4318), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (4291, 4318), True, 'import numpy as np\n'), ((4867, 4904), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (4877, 4904), True, 'import numpy as np\n'), ((5441, 5478), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (5451, 5478), True, 'import numpy as np\n'), ((6056, 6093), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (6066, 6093), True, 'import numpy as np\n'), ((6754, 6791), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (6764, 6791), True, 'import numpy as np\n'), ((13412, 13449), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (13422, 13449), True, 'import numpy as np\n'), ((13915, 13952), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (13925, 13952), True, 'import numpy as np\n'), ((14459, 14496), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (14469, 14496), True, 'import numpy as np\n'), ((15048, 15085), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (15058, 15085), True, 'import numpy as np\n'), ((15624, 15661), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (15634, 15661), True, 'import numpy as np\n'), ((16212, 16249), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (16222, 16249), True, 'import numpy as np\n'), ((16824, 16861), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (16834, 16861), True, 'import numpy as np\n'), ((967, 979), 'numpy.max', 'np.max', (['y[i]'], {}), '(y[i])\n', (973, 979), True, 'import numpy as np\n')] |
from flask import render_template
from . import app, globals
@app.route("/")
def index() -> str:
collections = [{"id": key, "name": value["display_name"]}
for key, value in globals.workspace.config.get("collections", {}).items()]
scanner_state = globals.factory.new_scanner_state()
scanned_files = scanner_state.get_all_scanned()
return render_template(
'index.html',
collections=collections,
scanned_files=scanned_files,
)
| [
"flask.render_template"
] | [((369, 457), 'flask.render_template', 'render_template', (['"""index.html"""'], {'collections': 'collections', 'scanned_files': 'scanned_files'}), "('index.html', collections=collections, scanned_files=\n scanned_files)\n", (384, 457), False, 'from flask import render_template\n')] |
import numpy as np
import tensorflow.keras.backend as K
# Use custom metrics for training and evalution
# Recall
def micro_recall(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
possible_pos = K.sum(y_true, axis=0)
rec = K.sum(true_pos) / (K.sum(possible_pos) + K.epsilon())
return rec
def macro_recall(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
possible_pos = K.sum(y_true, axis=0)
rec = K.mean(true_pos / (possible_pos + K.epsilon()))
return rec
def weighted_recall(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
possible_pos = K.sum(y_true, axis=0)
wts = K.sum(y_true, axis=0) / K.sum(y_true)
rec = K.mean((true_pos * wts) / (possible_pos + K.epsilon()))
return rec
# Precision
def micro_precision(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
pred_pos = K.sum(y_pred, axis=0)
prec = K.sum(true_pos) / (K.sum(pred_pos) + K.epsilon())
return prec
def macro_precision(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
pred_pos = K.sum(y_pred, axis=0)
prec = K.mean(true_pos / (pred_pos + K.epsilon()))
return prec
def weighted_precision(y_true, y_pred):
y_true_and_pred = K.cast(y_true * y_pred, 'float')
true_pos = K.sum(y_true_and_pred, axis=0)
pred_pos = K.sum(y_pred, axis=0)
wts = K.sum(y_true, axis=0) / K.sum(y_true)
rec = K.mean((true_pos * wts) / (pred_pos + K.epsilon()))
return rec
#F1 score
# get one-hot rep of given tensor
def get_one_hot(y):
return K.one_hot(K.argmax(y,axis=1), y.shape[1])
def fbeta(prec, rec, beta=1.0):
f_score = ((1+beta) * prec * rec)/(beta*prec + rec + K.epsilon())
return f_score
def micro_f1(y_true, y_pred):
y_pred = get_one_hot(y_pred)
prec = micro_precision(y_true, y_pred)
rec = micro_recall(y_true, y_pred)
f1 = fbeta(prec, rec)
return f1
def macro_f1(y_true, y_pred):
y_pred = get_one_hot(y_pred)
prec = macro_precision(y_true, y_pred)
rec = macro_recall(y_true, y_pred)
f1 = fbeta(prec, rec)
return f1
def weighted_f1(y_true, y_pred):
y_pred = get_one_hot(y_pred)
prec = weighted_precision(y_true, y_pred)
rec = weighted_recall(y_true, y_pred)
f1 = fbeta(prec, rec)
return f1
| [
"tensorflow.keras.backend.argmax",
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.backend.cast",
"tensorflow.keras.backend.sum"
] | [((169, 201), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (175, 201), True, 'import tensorflow.keras.backend as K\n'), ((215, 245), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (220, 245), True, 'import tensorflow.keras.backend as K\n'), ((263, 284), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (268, 284), True, 'import tensorflow.keras.backend as K\n'), ((415, 447), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (421, 447), True, 'import tensorflow.keras.backend as K\n'), ((461, 491), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (466, 491), True, 'import tensorflow.keras.backend as K\n'), ((509, 530), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (514, 530), True, 'import tensorflow.keras.backend as K\n'), ((658, 690), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (664, 690), True, 'import tensorflow.keras.backend as K\n'), ((704, 734), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (709, 734), True, 'import tensorflow.keras.backend as K\n'), ((752, 773), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (757, 773), True, 'import tensorflow.keras.backend as K\n'), ((967, 999), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (973, 999), True, 'import tensorflow.keras.backend as K\n'), ((1013, 1043), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (1018, 1043), True, 'import tensorflow.keras.backend as K\n'), ((1057, 1078), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (1062, 1078), True, 'import tensorflow.keras.backend as K\n'), ((1210, 1242), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (1216, 1242), True, 'import tensorflow.keras.backend as K\n'), ((1256, 1286), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (1261, 1286), True, 'import tensorflow.keras.backend as K\n'), ((1300, 1321), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (1305, 1321), True, 'import tensorflow.keras.backend as K\n'), ((1450, 1482), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (1456, 1482), True, 'import tensorflow.keras.backend as K\n'), ((1496, 1526), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true_and_pred'], {'axis': '(0)'}), '(y_true_and_pred, axis=0)\n', (1501, 1526), True, 'import tensorflow.keras.backend as K\n'), ((1540, 1561), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (1545, 1561), True, 'import tensorflow.keras.backend as K\n'), ((293, 308), 'tensorflow.keras.backend.sum', 'K.sum', (['true_pos'], {}), '(true_pos)\n', (298, 308), True, 'import tensorflow.keras.backend as K\n'), ((782, 803), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (787, 803), True, 'import tensorflow.keras.backend as K\n'), ((806, 819), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {}), '(y_true)\n', (811, 819), True, 'import tensorflow.keras.backend as K\n'), ((1088, 1103), 'tensorflow.keras.backend.sum', 'K.sum', (['true_pos'], {}), '(true_pos)\n', (1093, 1103), True, 'import tensorflow.keras.backend as K\n'), ((1570, 1591), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (1575, 1591), True, 'import tensorflow.keras.backend as K\n'), ((1594, 1607), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {}), '(y_true)\n', (1599, 1607), True, 'import tensorflow.keras.backend as K\n'), ((1766, 1785), 'tensorflow.keras.backend.argmax', 'K.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1774, 1785), True, 'import tensorflow.keras.backend as K\n'), ((312, 331), 'tensorflow.keras.backend.sum', 'K.sum', (['possible_pos'], {}), '(possible_pos)\n', (317, 331), True, 'import tensorflow.keras.backend as K\n'), ((334, 345), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (343, 345), True, 'import tensorflow.keras.backend as K\n'), ((1107, 1122), 'tensorflow.keras.backend.sum', 'K.sum', (['pred_pos'], {}), '(pred_pos)\n', (1112, 1122), True, 'import tensorflow.keras.backend as K\n'), ((1125, 1136), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1134, 1136), True, 'import tensorflow.keras.backend as K\n'), ((1886, 1897), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1895, 1897), True, 'import tensorflow.keras.backend as K\n'), ((573, 584), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (582, 584), True, 'import tensorflow.keras.backend as K\n'), ((870, 881), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (879, 881), True, 'import tensorflow.keras.backend as K\n'), ((1361, 1372), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1370, 1372), True, 'import tensorflow.keras.backend as K\n'), ((1654, 1665), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1663, 1665), True, 'import tensorflow.keras.backend as K\n')] |
# Create your models here.
from django.db import models
from django.db.models import Count, Sum, Q
from simple_history.models import HistoricalRecords
from offices.models import *
from employees.models import *
from accounts.models import *
class STDocument(models.Model):
branch = models.ForeignKey(Branch, null=False, blank=False, related_name='branch_stock_document')
file_name = models.CharField(max_length=255, blank=True)
file = models.FileField(upload_to='uploads/property')
obj_id = models.CharField(max_length=20, default=None, null=True, blank=True)
obj_type = models.CharField(max_length=50, default=None, null=True, blank=True)
date_uploaded = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stock_document')
history = HistoricalRecords()
def filename(self):
return os.path.basename(self.file.name)
class STComment(models.Model):
comments = models.CharField(max_length=2000, default=None, null=False, blank=False)
commented = models.DateTimeField(auto_now_add=True)
comment_type = models.CharField(max_length=120, default=None, null=True, blank=True)
obj_id = models.IntegerField(null=True, blank=True,)
branch = models.ForeignKey(Branch, null=True, blank=True, related_name='branch_stock_comments')
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stock_comments')
history = HistoricalRecords()
class STRequisition(models.Model):
quote_number = models.CharField(max_length=120, default=None, null=False, blank=False)
obj_id = models.IntegerField(null=True, blank=True,)
requisition_type = models.CharField(max_length=120, default=None, null=False, blank=False)
description = models.CharField(max_length=2000, default=None, null=True, blank=True)
branch = models.ForeignKey(Branch, null=False, blank=False, related_name='branches_stock_requisitions')
requested_by = models.ForeignKey('employees.Employee', null=False, blank=False, related_name='employee_stock_requisitions')
requested = models.DateTimeField(auto_now_add=True)
supplier = models.CharField(max_length=120, default=None, null=False, blank=False)
vat_included = models.BooleanField(blank=True, default=True)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stock_requisitions')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_stock_rerequisitions')
history = HistoricalRecords()
def get_sub_total(self):
sub_total = STRequisitionItem.objects.filter(requisition_no=self).aggregate(sub_total=Sum('line_total'))["sub_total"]
if sub_total:
return sub_total
else:
return 0
def get_total_tax(self):
VAT = self.vat_included
if not VAT:
return 0.14*self.get_sub_total()
else:
return 0
def get_total(self):
return self.get_sub_total() + self.get_total_tax()
class STRequisitionItem(models.Model):
requisition_no = models.ForeignKey(STRequisition, null=False, blank=False, related_name='requisition_stock_requistionItems')
item_code = models.CharField(max_length=200, default=None, null=True, blank=True)
line_item = models.CharField(max_length=200, default=None, null=True, blank=True)
qty = models.IntegerField(null=True, blank=True,)
unit_price = models.FloatField(null=True, blank=True, max_length=20, default=0)
line_total = models.FloatField(null=True, blank=True, max_length=20, default=0)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stock_requisitionitems')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_stock_rerequisitionitems')
history = HistoricalRecords()
class StockItem(models.Model):
item_name = models.CharField(max_length=200, default=None, null=True, blank=True)
item_code = models.CharField(max_length=200, default=None, null=True, blank=True)
category = models.CharField(max_length=20, default=None, null=True, blank=True,
choices=(
('grocery', 'Grocery'),
('sanitation', 'Sanitation'),
('stationary', 'Stationary'),
))
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stockitems')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_stockitems')
history = HistoricalRecords()
@property
def item_description(self):
return "%s-%s"%(self.item_code, self.item_name)
def __unicode__(self):
return self.item_description
class BranchStock(models.Model):
branch = models.ForeignKey(Branch, null=False, blank=False, related_name='branch_branchstockitems')
item = models.ForeignKey(StockItem, null=False, blank=False, related_name='item_branchstockitems')
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
reorder_quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_branchstocktems')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_branchstocktems')
history = HistoricalRecords()
class StockTake(models.Model):
branch = models.ForeignKey(Branch, null=False, blank=False, related_name='branch_stocktakes')
floor = models.ForeignKey(Floor, null=False, blank=False, related_name='floor_stocktakes')
item = models.ForeignKey(StockItem, null=False, blank=False, related_name='item_stocktakes')
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
checker = models.ForeignKey('employees.Employee', null=False, blank=False, related_name='checker_stocktakes')
date_checked = models.DateField(null=True, blank=True, auto_now=False, editable=True)
time_checked = models.TimeField(null=True, blank=True, auto_now=False, editable=True)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stocktakes')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_stocktakes')
history = HistoricalRecords()
class StockReplenishment(models.Model):
branch = models.ForeignKey(Branch, null=False, blank=False, related_name='branch_stockreplenishments')
item = models.ForeignKey(StockItem, null=False, blank=False, related_name='item_stockreplenishmennts')
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
replenisher = models.ForeignKey('employees.Employee', null=False, blank=False, related_name='checker_stockreplenishments')
date_replenished = models.DateField(null=True, blank=True, auto_now=False, editable=True)
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_stockreplenishments')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_stockreplenishments')
history = HistoricalRecords()
class Book(models.Model):
book_type = models.CharField(max_length=120, null=True, blank=True)
written_by = models.CharField(max_length=20,default=None, null=True, blank=True,
choices=(
('sales', 'Sales Department'),
('marketing', 'Marketing Departrment')
))
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
def __unicode__(self):
return self.book_type
class BookReplenishment(models.Model):
book = models.ForeignKey(Book, null=False, blank=False, related_name='book_bookreplenishment')
range_from = models.CharField(max_length=120, default='Operational', null=True, blank=True,)
range_to = models.CharField(max_length=120, default='Operational', null=True, blank=True,)
in_stock = models.FloatField(null=True, blank=True, max_length=20, default=0)
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
date_ordered = models.DateField(null=True, blank=True, auto_now=False, editable=True)
date_recieved = models.DateField(null=True, blank=True, auto_now=False, editable=True)
recieved = models.BooleanField(blank=True, default=False)
accept = models.BooleanField(blank=True, default=False)
authorize = models.CharField(max_length=20, default='Pending', null=True, blank=True,
choices=(
('Pending', 'Pending'),
('Aproved', 'Authorize'),
('Declined', 'Decline')
))
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_bookreplenishment')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_bookreplenishment')
history = HistoricalRecords()
class BookAllocation(models.Model):
book = models.ForeignKey(Book, null=False, blank=False, related_name='book_bookallocations')
range_from = models.CharField(max_length=120, null=True, blank=True,)
range_to = models.CharField(max_length=120, null=True, blank=True,)
quantity = models.FloatField(null=True, blank=True, max_length=20, default=0)
regional_admin_manager = models.ForeignKey('employees.Employee', null=False, blank=False, related_name='ram_bookallocations')
region = models.ForeignKey(Region, null=False, blank=False, related_name='region_bookallocations')
date_allocated = models.DateField(null=True, blank=True, auto_now=False, editable=True)
accept = models.BooleanField(blank=True, default=False)
authorize = models.CharField(max_length=20, default='Pending', null=True, blank=True,
choices=(
('Pending', 'Pending'),
('Aproved', 'Authorize'),
('Declined', 'Decline')
))
created_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_bookallocations')
modified_by = models.ForeignKey('accounts.ElopsysUser', null=True, blank=True, related_name='user_modified_bookallocations')
history = HistoricalRecords()
| [
"django.db.models.Sum",
"django.db.models.FloatField",
"django.db.models.DateField",
"django.db.models.TimeField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"simple_history.models.HistoricalRecords",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((288, 381), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""branch_stock_document"""'}), "(Branch, null=False, blank=False, related_name=\n 'branch_stock_document')\n", (305, 381), False, 'from django.db import models\n'), ((390, 434), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (406, 434), False, 'from django.db import models\n'), ((443, 489), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads/property"""'}), "(upload_to='uploads/property')\n", (459, 489), False, 'from django.db import models\n'), ((500, 568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, default=None, null=True, blank=True)\n', (516, 568), False, 'from django.db import models\n'), ((581, 649), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, default=None, null=True, blank=True)\n', (597, 649), False, 'from django.db import models\n'), ((667, 706), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (687, 706), False, 'from django.db import models\n'), ((721, 825), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stock_document"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stock_document')\n", (738, 825), False, 'from django.db import models\n'), ((833, 852), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (850, 852), False, 'from simple_history.models import HistoricalRecords\n'), ((962, 1034), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'default': 'None', 'null': '(False)', 'blank': '(False)'}), '(max_length=2000, default=None, null=False, blank=False)\n', (978, 1034), False, 'from django.db import models\n'), ((1048, 1087), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1068, 1087), False, 'from django.db import models\n'), ((1104, 1173), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=120, default=None, null=True, blank=True)\n', (1120, 1173), False, 'from django.db import models\n'), ((1184, 1226), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1203, 1226), False, 'from django.db import models\n'), ((1238, 1329), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""branch_stock_comments"""'}), "(Branch, null=True, blank=True, related_name=\n 'branch_stock_comments')\n", (1255, 1329), False, 'from django.db import models\n'), ((1339, 1443), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stock_comments"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stock_comments')\n", (1356, 1443), False, 'from django.db import models\n'), ((1451, 1470), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (1468, 1470), False, 'from simple_history.models import HistoricalRecords\n'), ((1524, 1595), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': 'None', 'null': '(False)', 'blank': '(False)'}), '(max_length=120, default=None, null=False, blank=False)\n', (1540, 1595), False, 'from django.db import models\n'), ((1607, 1649), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1626, 1649), False, 'from django.db import models\n'), ((1671, 1742), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': 'None', 'null': '(False)', 'blank': '(False)'}), '(max_length=120, default=None, null=False, blank=False)\n', (1687, 1742), False, 'from django.db import models\n'), ((1758, 1828), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=2000, default=None, null=True, blank=True)\n', (1774, 1828), False, 'from django.db import models\n'), ((1839, 1938), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""branches_stock_requisitions"""'}), "(Branch, null=False, blank=False, related_name=\n 'branches_stock_requisitions')\n", (1856, 1938), False, 'from django.db import models\n'), ((1950, 2062), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""employees.Employee"""'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""employee_stock_requisitions"""'}), "('employees.Employee', null=False, blank=False,\n related_name='employee_stock_requisitions')\n", (1967, 2062), False, 'from django.db import models\n'), ((2072, 2111), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2092, 2111), False, 'from django.db import models\n'), ((2124, 2195), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': 'None', 'null': '(False)', 'blank': '(False)'}), '(max_length=120, default=None, null=False, blank=False)\n', (2140, 2195), False, 'from django.db import models\n'), ((2212, 2257), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(True)'}), '(blank=True, default=True)\n', (2231, 2257), False, 'from django.db import models\n'), ((2272, 2380), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stock_requisitions"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stock_requisitions')\n", (2289, 2380), False, 'from django.db import models\n'), ((2393, 2512), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_stock_rerequisitions"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_stock_rerequisitions')\n", (2410, 2512), False, 'from django.db import models\n'), ((2520, 2539), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (2537, 2539), False, 'from simple_history.models import HistoricalRecords\n'), ((3009, 3121), 'django.db.models.ForeignKey', 'models.ForeignKey', (['STRequisition'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""requisition_stock_requistionItems"""'}), "(STRequisition, null=False, blank=False, related_name=\n 'requisition_stock_requistionItems')\n", (3026, 3121), False, 'from django.db import models\n'), ((3130, 3199), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, default=None, null=True, blank=True)\n', (3146, 3199), False, 'from django.db import models\n'), ((3213, 3282), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, default=None, null=True, blank=True)\n', (3229, 3282), False, 'from django.db import models\n'), ((3290, 3332), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3309, 3332), False, 'from django.db import models\n'), ((3348, 3414), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (3365, 3414), False, 'from django.db import models\n'), ((3429, 3495), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (3446, 3495), False, 'from django.db import models\n'), ((3510, 3622), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stock_requisitionitems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stock_requisitionitems')\n", (3527, 3622), False, 'from django.db import models\n'), ((3635, 3758), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_stock_rerequisitionitems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_stock_rerequisitionitems')\n", (3652, 3758), False, 'from django.db import models\n'), ((3766, 3785), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (3783, 3785), False, 'from simple_history.models import HistoricalRecords\n'), ((3833, 3902), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, default=None, null=True, blank=True)\n', (3849, 3902), False, 'from django.db import models\n'), ((3916, 3985), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, default=None, null=True, blank=True)\n', (3932, 3985), False, 'from django.db import models\n'), ((3998, 4169), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': 'None', 'null': '(True)', 'blank': '(True)', 'choices': "(('grocery', 'Grocery'), ('sanitation', 'Sanitation'), ('stationary',\n 'Stationary'))"}), "(max_length=20, default=None, null=True, blank=True,\n choices=(('grocery', 'Grocery'), ('sanitation', 'Sanitation'), (\n 'stationary', 'Stationary')))\n", (4014, 4169), False, 'from django.db import models\n'), ((4249, 4349), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stockitems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stockitems')\n", (4266, 4349), False, 'from django.db import models\n'), ((4362, 4471), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_stockitems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_stockitems')\n", (4379, 4471), False, 'from django.db import models\n'), ((4479, 4498), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (4496, 4498), False, 'from simple_history.models import HistoricalRecords\n'), ((4690, 4785), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""branch_branchstockitems"""'}), "(Branch, null=False, blank=False, related_name=\n 'branch_branchstockitems')\n", (4707, 4785), False, 'from django.db import models\n'), ((4789, 4885), 'django.db.models.ForeignKey', 'models.ForeignKey', (['StockItem'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""item_branchstockitems"""'}), "(StockItem, null=False, blank=False, related_name=\n 'item_branchstockitems')\n", (4806, 4885), False, 'from django.db import models\n'), ((4893, 4959), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (4910, 4959), False, 'from django.db import models\n'), ((4980, 5046), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (4997, 5046), False, 'from django.db import models\n'), ((5061, 5166), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_branchstocktems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_branchstocktems')\n", (5078, 5166), False, 'from django.db import models\n'), ((5179, 5293), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_branchstocktems"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_branchstocktems')\n", (5196, 5293), False, 'from django.db import models\n'), ((5301, 5320), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (5318, 5320), False, 'from simple_history.models import HistoricalRecords\n'), ((5364, 5453), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""branch_stocktakes"""'}), "(Branch, null=False, blank=False, related_name=\n 'branch_stocktakes')\n", (5381, 5453), False, 'from django.db import models\n'), ((5458, 5545), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Floor'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""floor_stocktakes"""'}), "(Floor, null=False, blank=False, related_name=\n 'floor_stocktakes')\n", (5475, 5545), False, 'from django.db import models\n'), ((5549, 5639), 'django.db.models.ForeignKey', 'models.ForeignKey', (['StockItem'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""item_stocktakes"""'}), "(StockItem, null=False, blank=False, related_name=\n 'item_stocktakes')\n", (5566, 5639), False, 'from django.db import models\n'), ((5647, 5713), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (5664, 5713), False, 'from django.db import models\n'), ((5726, 5829), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""employees.Employee"""'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""checker_stocktakes"""'}), "('employees.Employee', null=False, blank=False,\n related_name='checker_stocktakes')\n", (5743, 5829), False, 'from django.db import models\n'), ((5842, 5912), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (5858, 5912), False, 'from django.db import models\n'), ((5930, 6000), 'django.db.models.TimeField', 'models.TimeField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (5946, 6000), False, 'from django.db import models\n'), ((6015, 6115), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stocktakes"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stocktakes')\n", (6032, 6115), False, 'from django.db import models\n'), ((6128, 6237), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_stocktakes"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_stocktakes')\n", (6145, 6237), False, 'from django.db import models\n'), ((6245, 6264), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (6262, 6264), False, 'from simple_history.models import HistoricalRecords\n'), ((6317, 6415), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Branch'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""branch_stockreplenishments"""'}), "(Branch, null=False, blank=False, related_name=\n 'branch_stockreplenishments')\n", (6334, 6415), False, 'from django.db import models\n'), ((6419, 6519), 'django.db.models.ForeignKey', 'models.ForeignKey', (['StockItem'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""item_stockreplenishmennts"""'}), "(StockItem, null=False, blank=False, related_name=\n 'item_stockreplenishmennts')\n", (6436, 6519), False, 'from django.db import models\n'), ((6527, 6593), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (6544, 6593), False, 'from django.db import models\n'), ((6610, 6722), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""employees.Employee"""'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""checker_stockreplenishments"""'}), "('employees.Employee', null=False, blank=False,\n related_name='checker_stockreplenishments')\n", (6627, 6722), False, 'from django.db import models\n'), ((6739, 6809), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (6755, 6809), False, 'from django.db import models\n'), ((6824, 6933), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_stockreplenishments"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_stockreplenishments')\n", (6841, 6933), False, 'from django.db import models\n'), ((6946, 7064), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_stockreplenishments"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_stockreplenishments')\n", (6963, 7064), False, 'from django.db import models\n'), ((7072, 7091), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (7089, 7091), False, 'from simple_history.models import HistoricalRecords\n'), ((7133, 7188), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'null': '(True)', 'blank': '(True)'}), '(max_length=120, null=True, blank=True)\n', (7149, 7188), False, 'from django.db import models\n'), ((7203, 7360), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': 'None', 'null': '(True)', 'blank': '(True)', 'choices': "(('sales', 'Sales Department'), ('marketing', 'Marketing Departrment'))"}), "(max_length=20, default=None, null=True, blank=True,\n choices=(('sales', 'Sales Department'), ('marketing',\n 'Marketing Departrment')))\n", (7219, 7360), False, 'from django.db import models\n'), ((7426, 7492), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (7443, 7492), False, 'from django.db import models\n'), ((7589, 7681), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Book'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""book_bookreplenishment"""'}), "(Book, null=False, blank=False, related_name=\n 'book_bookreplenishment')\n", (7606, 7681), False, 'from django.db import models\n'), ((7691, 7769), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': '"""Operational"""', 'null': '(True)', 'blank': '(True)'}), "(max_length=120, default='Operational', null=True, blank=True)\n", (7707, 7769), False, 'from django.db import models\n'), ((7783, 7861), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'default': '"""Operational"""', 'null': '(True)', 'blank': '(True)'}), "(max_length=120, default='Operational', null=True, blank=True)\n", (7799, 7861), False, 'from django.db import models\n'), ((7875, 7941), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (7892, 7941), False, 'from django.db import models\n'), ((7954, 8020), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (7971, 8020), False, 'from django.db import models\n'), ((8037, 8107), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (8053, 8107), False, 'from django.db import models\n'), ((8125, 8195), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (8141, 8195), False, 'from django.db import models\n'), ((8208, 8254), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(False)'}), '(blank=True, default=False)\n', (8227, 8254), False, 'from django.db import models\n'), ((8265, 8311), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(False)'}), '(blank=True, default=False)\n', (8284, 8311), False, 'from django.db import models\n'), ((8326, 8492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""Pending"""', 'null': '(True)', 'blank': '(True)', 'choices': "(('Pending', 'Pending'), ('Aproved', 'Authorize'), ('Declined', 'Decline'))"}), "(max_length=20, default='Pending', null=True, blank=True,\n choices=(('Pending', 'Pending'), ('Aproved', 'Authorize'), ('Declined',\n 'Decline')))\n", (8342, 8492), False, 'from django.db import models\n'), ((8563, 8670), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_bookreplenishment"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_bookreplenishment')\n", (8580, 8670), False, 'from django.db import models\n'), ((8683, 8799), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_bookreplenishment"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_bookreplenishment')\n", (8700, 8799), False, 'from django.db import models\n'), ((8807, 8826), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (8824, 8826), False, 'from simple_history.models import HistoricalRecords\n'), ((8873, 8963), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Book'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""book_bookallocations"""'}), "(Book, null=False, blank=False, related_name=\n 'book_bookallocations')\n", (8890, 8963), False, 'from django.db import models\n'), ((8973, 9028), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'null': '(True)', 'blank': '(True)'}), '(max_length=120, null=True, blank=True)\n', (8989, 9028), False, 'from django.db import models\n'), ((9042, 9097), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(120)', 'null': '(True)', 'blank': '(True)'}), '(max_length=120, null=True, blank=True)\n', (9058, 9097), False, 'from django.db import models\n'), ((9111, 9177), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)', 'max_length': '(20)', 'default': '(0)'}), '(null=True, blank=True, max_length=20, default=0)\n', (9128, 9177), False, 'from django.db import models\n'), ((9205, 9309), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""employees.Employee"""'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""ram_bookallocations"""'}), "('employees.Employee', null=False, blank=False,\n related_name='ram_bookallocations')\n", (9222, 9309), False, 'from django.db import models\n'), ((9316, 9410), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Region'], {'null': '(False)', 'blank': '(False)', 'related_name': '"""region_bookallocations"""'}), "(Region, null=False, blank=False, related_name=\n 'region_bookallocations')\n", (9333, 9410), False, 'from django.db import models\n'), ((9424, 9494), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'blank': '(True)', 'auto_now': '(False)', 'editable': '(True)'}), '(null=True, blank=True, auto_now=False, editable=True)\n', (9440, 9494), False, 'from django.db import models\n'), ((9505, 9551), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(False)'}), '(blank=True, default=False)\n', (9524, 9551), False, 'from django.db import models\n'), ((9566, 9732), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""Pending"""', 'null': '(True)', 'blank': '(True)', 'choices': "(('Pending', 'Pending'), ('Aproved', 'Authorize'), ('Declined', 'Decline'))"}), "(max_length=20, default='Pending', null=True, blank=True,\n choices=(('Pending', 'Pending'), ('Aproved', 'Authorize'), ('Declined',\n 'Decline')))\n", (9582, 9732), False, 'from django.db import models\n'), ((9803, 9908), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_bookallocations"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_bookallocations')\n", (9820, 9908), False, 'from django.db import models\n'), ((9921, 10035), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""accounts.ElopsysUser"""'], {'null': '(True)', 'blank': '(True)', 'related_name': '"""user_modified_bookallocations"""'}), "('accounts.ElopsysUser', null=True, blank=True,\n related_name='user_modified_bookallocations')\n", (9938, 10035), False, 'from django.db import models\n'), ((10043, 10062), 'simple_history.models.HistoricalRecords', 'HistoricalRecords', ([], {}), '()\n', (10060, 10062), False, 'from simple_history.models import HistoricalRecords\n'), ((2655, 2672), 'django.db.models.Sum', 'Sum', (['"""line_total"""'], {}), "('line_total')\n", (2658, 2672), False, 'from django.db.models import Count, Sum, Q\n')] |
import os
import json
from typing import Tuple
import pickle as pkl
import torch
import torchaudio
from torch import Tensor
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from torchaudio.datasets.utils import (
download_url,
extract_archive,
walk_files,
)
from data_help import LetterTransform, WordTransform
import pdb
PRE_TRAINED_MODEL = "bert-base-uncased"
MAX_LEN = 128
DO_LOWER_CASE = True
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriSpeech"
_CHECKSUMS = {
"http://www.openslr.org/resources/12/dev-clean.tar.gz":
"76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3",
"http://www.openslr.org/resources/12/dev-other.tar.gz":
"12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365",
"http://www.openslr.org/resources/12/test-clean.tar.gz":
"39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23",
"http://www.openslr.org/resources/12/test-other.tar.gz":
"d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29",
"http://www.openslr.org/resources/12/train-clean-100.tar.gz":
"d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz":
"146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf",
"http://www.openslr.org/resources/12/train-other-500.tar.gz":
"ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2"
}
def data_processing(data, data_type = "train"):
if data_type == 'train':
audio_transforms = nn.Sequential(
torchaudio.transforms.FrequencyMasking(freq_mask_param=15),
torchaudio.transforms.TimeMasking(time_mask_param=35)
)
else:
audio_transforms = torchaudio.transforms.MelSpectrogram()
spectrograms = []
labels = []
words = []
input_lengths = []
label_lengths = []
word_lengths = []
for (waveform, melspectrum, _, utterance, letter_list, word_list) in data:
spec = melspectrum.squeeze(0).transpose(0,1)
spectrograms.append(spec)
labels.append(letter_list)
words.append(word_list)
input_lengths.append(spec.shape[0])
label_lengths.append(len(letter_list))
word_lengths.append(len(word_list))
spectrograms = pad_sequence(spectrograms, batch_first=True).unsqueeze(1).transpose(2, 3)
labels = pad_sequence(labels, batch_first=True)
words = pad_sequence(words, batch_first=True)
return spectrograms, labels, words, input_lengths, label_lengths, word_lengths
def load_librispeech_item(fileid: str,
path: str,
ext_wav: str,
ext_mel: str,
ext_txt: str) -> Tuple[Tensor, Tensor, int, str, Tensor, Tensor]:
speaker_id, chapter_id, utterance_id = fileid.split("-")
file_text = speaker_id + "-" + chapter_id + ext_txt
file_text = os.path.join(path, speaker_id, chapter_id, file_text)
fileid_audio = speaker_id + "-" + chapter_id + "-" + utterance_id
file_mel = fileid_audio + ext_mel
file_mel = os.path.join(path, speaker_id, chapter_id, file_mel)
file_wav = fileid_audio + ext_wav
file_wav = os.path.join(path, speaker_id, chapter_id, file_wav)
# Load audio
waveform, sample_rate = torchaudio.load(file_wav)
melspectrum = torch.load(file_mel)
letter_tranform = LetterTransform()
word_transform = WordTransform(PRE_TRAINED_MODEL, MAX_LEN, DO_LOWER_CASE)
# Load text
with open(file_text) as ft:
for line in ft:
fileid_text, utterance = line.strip().split(" ", 1)
utterance_letter = torch.tensor(letter_tranform.text_to_int(utterance.lower()))
utterance_word = torch.tensor(word_transform.text_to_int(utterance.lower()))
if fileid_audio == fileid_text:
break
else:
# Translation not found
raise FileNotFoundError("Translation not found for " + fileid_audio)
return (
waveform,
melspectrum,
sample_rate,
utterance,
utterance_letter,
utterance_word
)
class LIBRISPEECH(Dataset):
"""
Create a Dataset for LibriSpeech. Each item is a tuple of the form:
waveform, sample_rate, utterance, speaker_id, chapter_id, utterance_id
"""
_ext_txt = ".trans.txt"
_ext_wav = ".flac"
_ext_mel = ".pt"
def __init__(self,
root: str,
url: str,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/12/"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
audio_transforms = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_mels=128)
for root, dirs, files in os.walk(self._path):
if len(files) != 0:
for file in files:
if file.split('.')[-1]==self._ext_wav.split('.')[-1]:
file_audio = os.path.join(root, file)
waveform, _ = torchaudio.load(file_audio)
spec = audio_transforms(waveform)
file_spec = os.path.join(root, file.split('.')[0]+ self._ext_wav)
torch.save(spec, file_spec)
walker = walk_files(
self._path, suffix=self._ext_mel, prefix=False, remove_suffix=True
)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, Tensor, int, str, Tensor, Tensor]:
fileid = self._walker[n]
return load_librispeech_item(fileid, self._path, self._ext_wav, self._ext_mel, self._ext_txt)
def __len__(self) -> int:
return len(self._walker)
if __name__ == "__main__":
with open('../params.json') as json_file:
params = json.load(json_file)
data_params = params['data']
train_params = params['train']
print('hi')
train_dataset = LIBRISPEECH("../../data/audio_data/", url=data_params['train_url'], download=False)
print(train_dataset.__getitem__(10))
use_cuda = torch.cuda.is_available()
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = DataLoader(dataset=train_dataset,
batch_size=train_params['batch_size'],
shuffle=True,
collate_fn=lambda x: data_processing(x, data_type='train'),
**kwargs)
print(next(iter(train_loader)))
# test_dataset = LIBRISPEECH("../../data/audio_data/", url=data_params['test_url'], download=False)
| [
"torchaudio.load",
"torch.nn.utils.rnn.pad_sequence",
"torch.cuda.is_available",
"torchaudio.datasets.utils.download_url",
"torchaudio.datasets.utils.walk_files",
"os.walk",
"data_help.LetterTransform",
"torchaudio.datasets.utils.extract_archive",
"os.path.isdir",
"data_help.WordTransform",
"torchaudio.transforms.FrequencyMasking",
"os.path.isfile",
"torch.save",
"torchaudio.transforms.MelSpectrogram",
"torch.load",
"os.path.join",
"os.path.basename",
"json.load",
"torchaudio.transforms.TimeMasking"
] | [((2446, 2484), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['labels'], {'batch_first': '(True)'}), '(labels, batch_first=True)\n', (2458, 2484), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((2497, 2534), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['words'], {'batch_first': '(True)'}), '(words, batch_first=True)\n', (2509, 2534), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3007, 3060), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'file_text'], {}), '(path, speaker_id, chapter_id, file_text)\n', (3019, 3060), False, 'import os\n'), ((3185, 3237), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'file_mel'], {}), '(path, speaker_id, chapter_id, file_mel)\n', (3197, 3237), False, 'import os\n'), ((3292, 3344), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'file_wav'], {}), '(path, speaker_id, chapter_id, file_wav)\n', (3304, 3344), False, 'import os\n'), ((3390, 3415), 'torchaudio.load', 'torchaudio.load', (['file_wav'], {}), '(file_wav)\n', (3405, 3415), False, 'import torchaudio\n'), ((3434, 3454), 'torch.load', 'torch.load', (['file_mel'], {}), '(file_mel)\n', (3444, 3454), False, 'import torch\n'), ((3478, 3495), 'data_help.LetterTransform', 'LetterTransform', ([], {}), '()\n', (3493, 3495), False, 'from data_help import LetterTransform, WordTransform\n'), ((3517, 3573), 'data_help.WordTransform', 'WordTransform', (['PRE_TRAINED_MODEL', 'MAX_LEN', 'DO_LOWER_CASE'], {}), '(PRE_TRAINED_MODEL, MAX_LEN, DO_LOWER_CASE)\n', (3530, 3573), False, 'from data_help import LetterTransform, WordTransform\n'), ((7094, 7119), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7117, 7119), False, 'import torch\n'), ((1803, 1841), 'torchaudio.transforms.MelSpectrogram', 'torchaudio.transforms.MelSpectrogram', ([], {}), '()\n', (1839, 1841), False, 'import torchaudio\n'), ((5104, 5125), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (5120, 5125), False, 'import os\n'), ((5144, 5172), 'os.path.join', 'os.path.join', (['root', 'basename'], {}), '(root, basename)\n', (5156, 5172), False, 'import os\n'), ((5244, 5285), 'os.path.join', 'os.path.join', (['folder_in_archive', 'basename'], {}), '(folder_in_archive, basename)\n', (5256, 5285), False, 'import os\n'), ((5308, 5345), 'os.path.join', 'os.path.join', (['root', 'folder_in_archive'], {}), '(root, folder_in_archive)\n', (5320, 5345), False, 'import os\n'), ((6302, 6380), 'torchaudio.datasets.utils.walk_files', 'walk_files', (['self._path'], {'suffix': 'self._ext_mel', 'prefix': '(False)', 'remove_suffix': '(True)'}), '(self._path, suffix=self._ext_mel, prefix=False, remove_suffix=True)\n', (6312, 6380), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n'), ((6827, 6847), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (6836, 6847), False, 'import json\n'), ((1630, 1688), 'torchaudio.transforms.FrequencyMasking', 'torchaudio.transforms.FrequencyMasking', ([], {'freq_mask_param': '(15)'}), '(freq_mask_param=15)\n', (1668, 1688), False, 'import torchaudio\n'), ((1702, 1755), 'torchaudio.transforms.TimeMasking', 'torchaudio.transforms.TimeMasking', ([], {'time_mask_param': '(35)'}), '(time_mask_param=35)\n', (1735, 1755), False, 'import torchaudio\n'), ((5042, 5083), 'os.path.join', 'os.path.join', (['base_url', '(url + ext_archive)'], {}), '(base_url, url + ext_archive)\n', (5054, 5083), False, 'import os\n'), ((5657, 5724), 'torchaudio.transforms.MelSpectrogram', 'torchaudio.transforms.MelSpectrogram', ([], {'sample_rate': '(16000)', 'n_mels': '(128)'}), '(sample_rate=16000, n_mels=128)\n', (5693, 5724), False, 'import torchaudio\n'), ((5762, 5781), 'os.walk', 'os.walk', (['self._path'], {}), '(self._path)\n', (5769, 5781), False, 'import os\n'), ((5387, 5412), 'os.path.isdir', 'os.path.isdir', (['self._path'], {}), '(self._path)\n', (5400, 5412), False, 'import os\n'), ((5600, 5624), 'torchaudio.datasets.utils.extract_archive', 'extract_archive', (['archive'], {}), '(archive)\n', (5615, 5624), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n'), ((2359, 2403), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['spectrograms'], {'batch_first': '(True)'}), '(spectrograms, batch_first=True)\n', (2371, 2403), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((5437, 5460), 'os.path.isfile', 'os.path.isfile', (['archive'], {}), '(archive)\n', (5451, 5460), False, 'import os\n'), ((5539, 5583), 'torchaudio.datasets.utils.download_url', 'download_url', (['url', 'root'], {'hash_value': 'checksum'}), '(url, root, hash_value=checksum)\n', (5551, 5583), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n'), ((5977, 6001), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (5989, 6001), False, 'import os\n'), ((6044, 6071), 'torchaudio.load', 'torchaudio.load', (['file_audio'], {}), '(file_audio)\n', (6059, 6071), False, 'import torchaudio\n'), ((6256, 6283), 'torch.save', 'torch.save', (['spec', 'file_spec'], {}), '(spec, file_spec)\n', (6266, 6283), False, 'import torch\n')] |
import os,sys
from pathlib import Path
from os.path import realpath
import numpy as np
import argparse
import logging
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(format='%(asctime)s %(message)s')
logging.warning('is the time swaty simulation started.')
from swaty.auxiliary.text_reader_string import text_reader_string
from swaty.classes.swatpara import swatpara
from swaty.swaty_read_model_configuration_file import swaty_read_model_configuration_file
parser = argparse.ArgumentParser()
iFlag_standalone=1
iCase_index = 1
sDate='20220504'
sPath = realpath(str( Path().resolve() ))
#this is the temp path which has auxiliray data, not the SWAT input
sWorkspace_data = ( sPath + '/data/arw' )
#the actual path to input data
sWorkspace_input = str(Path(sWorkspace_data) / 'input')
#to extract the parameter, we need to know the name of parameter for watershed, subbasin and hru, soil
aParameter=list()
aPara_in=dict()
aParemeter_watershed = np.array(['esco','ai0', 'sftmp','smtmp','timp','epco'])
nParameter_watershed = len(aParemeter_watershed)
for j in np.arange(1, nParameter_watershed+1):
aPara_in['iParameter_type'] = 1
aPara_in['iIndex_subbasin'] = j
aPara_in['sName']= aParemeter_watershed[j-1]
aPara_in['dValue_init']=0.0
aPara_in['dValue_current']=0.01* j +0.01
aPara_in['dValue_lower']=-1
aPara_in['dValue_upper']=5
pParameter = swatpara(aPara_in)
aParameter.append( pParameter )
aParemeter_subbasin = np.array(['ch_n2','ch_k2','plaps','tlaps'])
nParameter_subbasin = len(aParemeter_subbasin)
for j in np.arange(1, nParameter_subbasin+1):
aPara_in['iParameter_type'] = 2
aPara_in['iIndex_subbasin'] = j
aPara_in['sName']= aParemeter_subbasin[j-1]
aPara_in['dValue_init']=0.0
aPara_in['dValue_current']=0.01* j +0.01
aPara_in['dValue_lower']=-1
aPara_in['dValue_upper']=5
pParameter = swatpara(aPara_in)
aParameter.append(pParameter)
aParemeter_hru = np.array(['cn2','rchrg_dp','gwqmn','gw_revap','revapmn','gw_delay','alpha_bf','ov_n'])
nParameter_hru = len(aParemeter_hru)
for j in np.arange(1, nParameter_hru+1):
aPara_in['iParameter_type'] = 3
aPara_in['iIndex_hru'] = j
aPara_in['sName']= aParemeter_hru[j-1]
aPara_in['dValue_init']=0.0
aPara_in['dValue_current']=0.01* j +0.01
aPara_in['dValue_lower']=-1
aPara_in['dValue_upper']=5
pParameter = swatpara(aPara_in)
aParameter.append(pParameter)
aParemeter_soil = np.array(['sol_k','sol_awc','sol_alb','sol_bd'])
nParameter_soil = len(aParemeter_soil)
for j in np.arange(1, nParameter_soil+1):
aPara_in['iParameter_type'] = 4
aPara_in['lIndex_soil_layer'] = j
aPara_in['sName']= aParemeter_soil[j-1]
aPara_in['dValue_init']=0.0
aPara_in['dValue_current']=0.01* j +0.01
aPara_in['dValue_lower']=-1
aPara_in['dValue_upper']=5
pParameter = swatpara(aPara_in)
aParameter.append(pParameter)
#the desired output workspace
sWorkspace_output = '/global/cscratch1/sd/liao313/04model/swat/arw/simulation'
#where is the swat binary is stored
sPath_bin = str(Path(sPath) / 'bin')
sFilename_configuration_in = sPath + '/tests/configurations/template.json'
oSwat = swaty_read_model_configuration_file(sFilename_configuration_in, \
iFlag_read_discretization_in=1,\
iFlag_standalone_in=iFlag_standalone,\
iCase_index_in=iCase_index,\
sDate_in=sDate, \
sWorkspace_input_in=sWorkspace_input, \
sWorkspace_output_in=sWorkspace_output)
#can change some members
print(oSwat.tojson())
#oSwat.extract_default_parameter_value(aParameter)
oSwat.generate_parameter_bounds()
print('Finished') | [
"logging.basicConfig",
"argparse.ArgumentParser",
"pathlib.Path",
"swaty.swaty_read_model_configuration_file.swaty_read_model_configuration_file",
"logging.warning",
"swaty.classes.swatpara.swatpara",
"numpy.array",
"logging.root.removeHandler",
"numpy.arange"
] | [((200, 253), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""'}), "(format='%(asctime)s %(message)s')\n", (219, 253), False, 'import logging\n'), ((254, 310), 'logging.warning', 'logging.warning', (['"""is the time swaty simulation started."""'], {}), "('is the time swaty simulation started.')\n", (269, 310), False, 'import logging\n'), ((522, 547), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (545, 547), False, 'import argparse\n'), ((1011, 1070), 'numpy.array', 'np.array', (["['esco', 'ai0', 'sftmp', 'smtmp', 'timp', 'epco']"], {}), "(['esco', 'ai0', 'sftmp', 'smtmp', 'timp', 'epco'])\n", (1019, 1070), True, 'import numpy as np\n'), ((1127, 1165), 'numpy.arange', 'np.arange', (['(1)', '(nParameter_watershed + 1)'], {}), '(1, nParameter_watershed + 1)\n', (1136, 1165), True, 'import numpy as np\n'), ((1525, 1571), 'numpy.array', 'np.array', (["['ch_n2', 'ch_k2', 'plaps', 'tlaps']"], {}), "(['ch_n2', 'ch_k2', 'plaps', 'tlaps'])\n", (1533, 1571), True, 'import numpy as np\n'), ((1625, 1662), 'numpy.arange', 'np.arange', (['(1)', '(nParameter_subbasin + 1)'], {}), '(1, nParameter_subbasin + 1)\n', (1634, 1662), True, 'import numpy as np\n'), ((2010, 2107), 'numpy.array', 'np.array', (["['cn2', 'rchrg_dp', 'gwqmn', 'gw_revap', 'revapmn', 'gw_delay', 'alpha_bf',\n 'ov_n']"], {}), "(['cn2', 'rchrg_dp', 'gwqmn', 'gw_revap', 'revapmn', 'gw_delay',\n 'alpha_bf', 'ov_n'])\n", (2018, 2107), True, 'import numpy as np\n'), ((2143, 2175), 'numpy.arange', 'np.arange', (['(1)', '(nParameter_hru + 1)'], {}), '(1, nParameter_hru + 1)\n', (2152, 2175), True, 'import numpy as np\n'), ((2516, 2567), 'numpy.array', 'np.array', (["['sol_k', 'sol_awc', 'sol_alb', 'sol_bd']"], {}), "(['sol_k', 'sol_awc', 'sol_alb', 'sol_bd'])\n", (2524, 2567), True, 'import numpy as np\n'), ((2613, 2646), 'numpy.arange', 'np.arange', (['(1)', '(nParameter_soil + 1)'], {}), '(1, nParameter_soil + 1)\n', (2622, 2646), True, 'import numpy as np\n'), ((3246, 3514), 'swaty.swaty_read_model_configuration_file.swaty_read_model_configuration_file', 'swaty_read_model_configuration_file', (['sFilename_configuration_in'], {'iFlag_read_discretization_in': '(1)', 'iFlag_standalone_in': 'iFlag_standalone', 'iCase_index_in': 'iCase_index', 'sDate_in': 'sDate', 'sWorkspace_input_in': 'sWorkspace_input', 'sWorkspace_output_in': 'sWorkspace_output'}), '(sFilename_configuration_in,\n iFlag_read_discretization_in=1, iFlag_standalone_in=iFlag_standalone,\n iCase_index_in=iCase_index, sDate_in=sDate, sWorkspace_input_in=\n sWorkspace_input, sWorkspace_output_in=sWorkspace_output)\n', (3281, 3514), False, 'from swaty.swaty_read_model_configuration_file import swaty_read_model_configuration_file\n'), ((163, 198), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (189, 198), False, 'import logging\n'), ((1443, 1461), 'swaty.classes.swatpara.swatpara', 'swatpara', (['aPara_in'], {}), '(aPara_in)\n', (1451, 1461), False, 'from swaty.classes.swatpara import swatpara\n'), ((1939, 1957), 'swaty.classes.swatpara.swatpara', 'swatpara', (['aPara_in'], {}), '(aPara_in)\n', (1947, 1957), False, 'from swaty.classes.swatpara import swatpara\n'), ((2442, 2460), 'swaty.classes.swatpara.swatpara', 'swatpara', (['aPara_in'], {}), '(aPara_in)\n', (2450, 2460), False, 'from swaty.classes.swatpara import swatpara\n'), ((2921, 2939), 'swaty.classes.swatpara.swatpara', 'swatpara', (['aPara_in'], {}), '(aPara_in)\n', (2929, 2939), False, 'from swaty.classes.swatpara import swatpara\n'), ((814, 835), 'pathlib.Path', 'Path', (['sWorkspace_data'], {}), '(sWorkspace_data)\n', (818, 835), False, 'from pathlib import Path\n'), ((3138, 3149), 'pathlib.Path', 'Path', (['sPath'], {}), '(sPath)\n', (3142, 3149), False, 'from pathlib import Path\n'), ((626, 632), 'pathlib.Path', 'Path', ([], {}), '()\n', (630, 632), False, 'from pathlib import Path\n')] |
import random
import re
import json
from datetime import datetime, timedelta, timezone
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse
from django.views import View
from django.views.generic.detail import DetailView
from django.conf import settings
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from .forms import FeadbackForm, ReviewForm#, FeadbackForm1
from .models import Portfolio, Review, ServiceCategory, Service, DetailedService#, Client, Feadback
from dgcrm.models import Client, Feadback
from viberbot import Api
from viberbot.api.bot_configuration import BotConfiguration
bot_configuration = BotConfiguration(
name='PythonSampleBot',
avatar='http://localhost:8000/avatar.jpg',
auth_token='<PASSWORD>'
)
viber = Api(bot_configuration)
class Main(View):
def __init__(self):
View.__init__(self)
portfolio = Portfolio.objects.all()
reviews = Review.objects.all().order_by("-id")
service_categories = ServiceCategory.objects.all().order_by("-id")
services = Service.objects.all()
self.context = {"portfolio": portfolio,
"reviews": reviews[:3],
"reviews_num": reviews.count(),
"service_categories": service_categories,
"services": services,
}
if service_categories.count() > 0:
col_size = 12//service_categories.count()
self.context["col_size"] = "s%s"%col_size
self.num_photo = 3
def make_pagination(self, request, some_list=None, num_items=2):
paginator = Paginator(some_list, num_items)
request = getattr(request, request.method)
page = request.get('page')
pages_list = [p for p in paginator.page_range]
print(pages_list)
try:
pags = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
pags = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
pags = paginator.page(paginator.num_items)
return pags, pages_list
def get(self, request):
self.context['portfolio_pags'], self.context['portfolio_pages_list'] = Main.make_pagination(self, request, self.context["portfolio"], self.num_photo)
self.context['feadback_form'] = FeadbackForm()
self.context['review_form'] = ReviewForm()
return render(request, "main.html", self.context)
def post(self, request):
feadback_form = FeadbackForm(request.POST)
review_form = ReviewForm(request.POST)
# check whether it's valid:
try:
submit_flag = request.POST["submit"]
if submit_flag == "feadback" and feadback_form.is_valid():
# first = feadback_form.cleaned_data['first']
# last = feadback_form.cleaned_data['last']
# tel = feadback_form.cleaned_data['tel']
# wish = feadback_form.cleaned_data['wish']
# email = feadback_form.cleaned_data['email']
# form.save()
feadback_data = feadback_form.cleaned_data
# return HttpResponse(feadback_data, client_data)
Feadback.objects.create(**feadback_data)
# print(form.cleaned_data.values())
# send_mail(
# 'Новая запись',
# 'Клиент: %s %s\nНомер телефона: %s\nИнформация: %s\nEmail: %s\n'%feadback_data.values(),
# settings.EMAIL_HOST_USER,
# ['<EMAIL>', '<EMAIL>'],
# fail_silently=False,
# )
return HttpResponse('/feadback_form/')
elif submit_flag == "review" and review_form.is_valid():
Review.objects.create(**review_form.cleaned_data)
return HttpResponse('/review_form/')
except MultiValueDictKeyError:
pass
if request.is_ajax():
portfolio_pags, portfolio_pages_list = Main.make_pagination(self, request, self.context["portfolio"], self.num_photo)
return render(request, "portfolio.html", {"portfolio_pags": portfolio_pags, "portfolio_pages_list": portfolio_pages_list})
# return render_to_response("portfolio.html", {"portfolio_pags": portfolio_pags})
return HttpResponse(review_form)
def more_reviews(request):
if request.is_ajax():
num = request.POST['num']
print(num)
reviews = Review.objects.all().order_by("-id")[:int(num)]
return render(request, "reviews.html", {"reviews": reviews})
else:
return Http404
class Price(View):
def __init__(self):
View.__init__(self)
service_categories = ServiceCategory.objects.all().order_by("-id")
self.context = {"service_categories": service_categories,
}
Price.cat_size(self, service_categories)
def cat_size(self, service_categories_quary):
if service_categories_quary.count() > 0:
cat_size = 100//service_categories_quary.count()
self.context["cat_size"] = "%s"%cat_size
def serv_size(self, services_quary, service_id):
if services_quary.count() > 0:
serv_size = 100//services_quary.count()
self.context["serv_size"] = "%s"%serv_size
def get(self, request):
category_id = request.GET.get("category")
service_id = request.GET.get("service")
self.context['services'] = Service.objects.filter(service_category=category_id)
Price.serv_size(self, self.context['services'], service_id)
if category_id:
self.context['category_id'] = int(category_id)
if self.context['services'].count() > 0:
if service_id == None:
service_id = self.context['services'][0].id
self.context['service_id'] = int(service_id)
self.context['detailed_services'] = DetailedService.objects.filter(service=service_id)
return render(request, "price.html", self.context)
def post(self, request):
category_id = request.POST.get("cat_id")
service_id = request.POST.get("serv_id")
print(category_id,service_id)
if request.is_ajax() and category_id:
self.context['services'] = Service.objects.filter(service_category=category_id)
self.context['category_id'] = int(category_id)
if self.context['services'].count() > 0:
if service_id == None:
service_id = self.context['services'][0].id
self.context['service_id'] = int(service_id)
print(service_id)
self.context['detailed_services'] = DetailedService.objects.filter(service=service_id)
print(self.context['detailed_services'])
Price.serv_size(self, self.context['services'], service_id)
return render(request, "services_block.html", self.context)
if request.is_ajax() and service_id:
self.context['detailed_services'] = DetailedService.objects.filter(service=service_id)
self.context['service_id'] = int(service_id)
return render(request, "detailed_services_block.html", self.context)
else:
self.context['detailed_services'] = []
self.context['service_id'] = int(0)
return render(request, "detailed_services_block.html", self.context)
# def ajax_price(request):
# print(request.POST["cat_id"])
# category_id = request.POST["cat_id"]
# if request.is_ajax() and request.POST["cat_id"]:
# services = Service.objects.filter(service_category=int(request.POST["cat_id"]))
# if services.count() > 0:
# serv_size = 100//services.count()
# return render(request, "services_block.html", {"services": services, "serv_size": serv_size, "category_id": cat_id})
| [
"django.shortcuts.render",
"dgcrm.models.Feadback.objects.create",
"django.http.HttpResponse",
"viberbot.Api",
"django.views.View.__init__",
"viberbot.api.bot_configuration.BotConfiguration",
"django.core.paginator.Paginator"
] | [((1131, 1244), 'viberbot.api.bot_configuration.BotConfiguration', 'BotConfiguration', ([], {'name': '"""PythonSampleBot"""', 'avatar': '"""http://localhost:8000/avatar.jpg"""', 'auth_token': '"""<PASSWORD>"""'}), "(name='PythonSampleBot', avatar=\n 'http://localhost:8000/avatar.jpg', auth_token='<PASSWORD>')\n", (1147, 1244), False, 'from viberbot.api.bot_configuration import BotConfiguration\n'), ((1262, 1284), 'viberbot.Api', 'Api', (['bot_configuration'], {}), '(bot_configuration)\n', (1265, 1284), False, 'from viberbot import Api\n'), ((1338, 1357), 'django.views.View.__init__', 'View.__init__', (['self'], {}), '(self)\n', (1351, 1357), False, 'from django.views import View\n'), ((2135, 2166), 'django.core.paginator.Paginator', 'Paginator', (['some_list', 'num_items'], {}), '(some_list, num_items)\n', (2144, 2166), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((3030, 3072), 'django.shortcuts.render', 'render', (['request', '"""main.html"""', 'self.context'], {}), "(request, 'main.html', self.context)\n", (3036, 3072), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((5013, 5038), 'django.http.HttpResponse', 'HttpResponse', (['review_form'], {}), '(review_form)\n', (5025, 5038), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse\n'), ((5227, 5280), 'django.shortcuts.render', 'render', (['request', '"""reviews.html"""', "{'reviews': reviews}"], {}), "(request, 'reviews.html', {'reviews': reviews})\n", (5233, 5280), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((5372, 5391), 'django.views.View.__init__', 'View.__init__', (['self'], {}), '(self)\n', (5385, 5391), False, 'from django.views import View\n'), ((6717, 6760), 'django.shortcuts.render', 'render', (['request', '"""price.html"""', 'self.context'], {}), "(request, 'price.html', self.context)\n", (6723, 6760), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((4774, 4893), 'django.shortcuts.render', 'render', (['request', '"""portfolio.html"""', "{'portfolio_pags': portfolio_pags, 'portfolio_pages_list': portfolio_pages_list\n }"], {}), "(request, 'portfolio.html', {'portfolio_pags': portfolio_pags,\n 'portfolio_pages_list': portfolio_pages_list})\n", (4780, 4893), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((7652, 7704), 'django.shortcuts.render', 'render', (['request', '"""services_block.html"""', 'self.context'], {}), "(request, 'services_block.html', self.context)\n", (7658, 7704), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((7928, 7989), 'django.shortcuts.render', 'render', (['request', '"""detailed_services_block.html"""', 'self.context'], {}), "(request, 'detailed_services_block.html', self.context)\n", (7934, 7989), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((8124, 8185), 'django.shortcuts.render', 'render', (['request', '"""detailed_services_block.html"""', 'self.context'], {}), "(request, 'detailed_services_block.html', self.context)\n", (8130, 8185), False, 'from django.shortcuts import render, redirect, get_object_or_404, render_to_response\n'), ((3864, 3904), 'dgcrm.models.Feadback.objects.create', 'Feadback.objects.create', ([], {}), '(**feadback_data)\n', (3887, 3904), False, 'from dgcrm.models import Client, Feadback\n'), ((4316, 4347), 'django.http.HttpResponse', 'HttpResponse', (['"""/feadback_form/"""'], {}), "('/feadback_form/')\n", (4328, 4347), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse\n'), ((4507, 4536), 'django.http.HttpResponse', 'HttpResponse', (['"""/review_form/"""'], {}), "('/review_form/')\n", (4519, 4536), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse\n')] |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.fs.archive import ZIP
from pants.util.contextutil import temporary_dir
def contains_exact_files(directory, expected_files, ignore_links=False):
"""Check if the only files which directory contains are expected_files.
:param str directory: Path to directory to search.
:param set expected_files: Set of filepaths relative to directory to search for.
:param bool ignore_links: Indicates to ignore any file links.
"""
found = []
for root, _, files in os.walk(directory, followlinks=not ignore_links):
for f in files:
p = os.path.join(root, f)
if ignore_links and os.path.islink(p):
continue
found.append(os.path.relpath(p, directory))
return sorted(expected_files) == sorted(found)
def check_file_content(path, expected_content):
"""Check file has expected content.
:param str path: Path to file.
:param str expected_content: Expected file content.
"""
with open(path) as input:
return expected_content == input.read()
def check_symlinks(directory, symlinks=True):
"""Check files under directory are symlinks.
:param str directory: Path to directory to search.
:param bool symlinks: If true, verify files are symlinks, if false, verify files are actual files.
"""
for root, _, files in os.walk(directory):
for f in files:
p = os.path.join(root, f)
if symlinks ^ os.path.islink(p):
return False
return True
def check_zip_file_content(zip_file, expected_files):
"""Check zip file contains expected files as well as verify their contents are as expected.
:param zip_file: Path to the zip file.
:param expected_files: A map from file path included in the zip to its content. Set content
to `None` to skip checking.
:return:
"""
with temporary_dir() as workdir:
ZIP.extract(zip_file, workdir)
if not contains_exact_files(workdir, expected_files.keys()):
return False
for rel_path in expected_files:
path = os.path.join(workdir, rel_path)
if expected_files[rel_path] and not check_file_content(path, expected_files[rel_path]):
return False
return True
| [
"pants.fs.archive.ZIP.extract",
"os.path.islink",
"os.path.join",
"pants.util.contextutil.temporary_dir",
"os.walk",
"os.path.relpath"
] | [((786, 834), 'os.walk', 'os.walk', (['directory'], {'followlinks': '(not ignore_links)'}), '(directory, followlinks=not ignore_links)\n', (793, 834), False, 'import os\n'), ((1584, 1602), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1591, 1602), False, 'import os\n'), ((2072, 2087), 'pants.util.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (2085, 2087), False, 'from pants.util.contextutil import temporary_dir\n'), ((2104, 2134), 'pants.fs.archive.ZIP.extract', 'ZIP.extract', (['zip_file', 'workdir'], {}), '(zip_file, workdir)\n', (2115, 2134), False, 'from pants.fs.archive import ZIP\n'), ((866, 887), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (878, 887), False, 'import os\n'), ((1634, 1655), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (1646, 1655), False, 'import os\n'), ((2269, 2300), 'os.path.join', 'os.path.join', (['workdir', 'rel_path'], {}), '(workdir, rel_path)\n', (2281, 2300), False, 'import os\n'), ((914, 931), 'os.path.islink', 'os.path.islink', (['p'], {}), '(p)\n', (928, 931), False, 'import os\n'), ((969, 998), 'os.path.relpath', 'os.path.relpath', (['p', 'directory'], {}), '(p, directory)\n', (984, 998), False, 'import os\n'), ((1676, 1693), 'os.path.islink', 'os.path.islink', (['p'], {}), '(p)\n', (1690, 1693), False, 'import os\n')] |
import xml.etree.ElementTree as et
from svg.path import parse_path
from sympathor.path import SymbolicPath
import os.path
import re
class ParsePaths():
def __init__(self, input):
raw_input = True
file_input = False
# check if input is valid file
if os.path.exists(input):
file_input = True
try:
xml = et.parse(input)
raw_input = False
except et.ParseError:
# raise ValueError("The file referred to does not seem to be in XML format.") from exc
pass
if not raw_input:
root = xml.getroot()
# check if file is an SVG
if root.tag == '{http://www.w3.org/2000/svg}svg':
xml_paths = root.findall(".//{http://www.w3.org/2000/svg}path")
self.__parse_paths_from_xml(xml_paths=xml_paths)
# or try to parse path element
elif root.tag == 'path':
self.__parse_paths_from_xml(xml_paths=[root])
else:
raise ValueError("The content of the file referred to could not be interpreted.")
# check if input is a path element
if raw_input:
if file_input:
input_ = open(input).read()
else:
input_ = input
xml_paths = self.__xml_from_raw(input_)
self.__parse_paths_from_xml(xml_paths=xml_paths)
def __xml_from_raw(self, input_):
input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" \
+ "<svg xmlns=\"http://www.w3.org/2000/svg\"><path d=\"" + input_ + "\" /></svg>"
try:
root = et.fromstring(input)
except et.ParseError as exc:
raise ValueError("The input provided could not be interpreted.") from exc
return root.findall(".//{http://www.w3.org/2000/svg}path")
def __parse_paths_from_xml(self, xml_paths):
self.paths = []
for xml in xml_paths:
desc = parse_path(xml.get('d'))
path = SymbolicPath(desc)
transform = self.__parse_transform_from_xml(xml)
for t in reversed(transform):
getattr(path, t['type'])(**t['kwargs'])
self.paths.append(path)
def __parse_transform_from_xml(self, xml):
str = xml.get('transform')
res = []
if not str:
return []
for match in re.finditer("([a-zA-Z]*)\\((.*?)\\)", str):
transform = match.groups()[0]
argstr = match.groups()[1]
if transform == 'matrix':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
res.append({
'type': 'matrix',
'kwargs': {
'a': float(vals[0]), 'b': float(vals[1]), 'c': float(vals[2]),
'd': float(vals[3]), 'e': float(vals[4]), 'f': float(vals[5])
}
})
elif transform == 'translate':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
if len(vals) == 2:
res.append({'type': 'translate', 'kwargs': {'dx': float(vals[0]), 'dy': float(vals[1])}})
else:
res.append({'type': 'translate', 'kwargs': {'dx': float(vals[0])}})
elif transform == 'rotate':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
if len(vals) == 3:
res.append({
'type': 'rotate',
'kwargs': {'theta': float(vals[0]), 'x': float(vals[1]), 'y': float(vals[2])}
})
else:
res.append({'type': 'rotate', 'kwargs': {'theta': float(vals[0])}})
elif transform == 'scale':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
if len(vals) == 2:
res.append({'type': 'scale', 'kwargs': {'x': float(vals[0]), 'y': float(vals[1])}})
else:
res.append({'type': 'scale', 'kwargs': {'x': float(vals[0])}})
elif transform == 'skewX':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
res.append({'type': 'skewX', 'kwargs': {'theta': float(vals[0])}})
elif transform == 'skewY':
vals = re.findall("(-?\\d+\\.?\\d*)", argstr)
res.append({'type': 'skewY', 'kwargs': {'theta': float(vals[0])}})
else:
raise NotImplementedError
return res
def __getitem__(self, index):
return self.paths[index]
def __len__(self):
return len(self.paths)
| [
"sympathor.path.SymbolicPath",
"xml.etree.ElementTree.fromstring",
"xml.etree.ElementTree.parse",
"re.finditer",
"re.findall"
] | [((2482, 2524), 're.finditer', 're.finditer', (['"""([a-zA-Z]*)\\\\((.*?)\\\\)"""', 'str'], {}), "('([a-zA-Z]*)\\\\((.*?)\\\\)', str)\n", (2493, 2524), False, 'import re\n'), ((1725, 1745), 'xml.etree.ElementTree.fromstring', 'et.fromstring', (['input'], {}), '(input)\n', (1738, 1745), True, 'import xml.etree.ElementTree as et\n'), ((2104, 2122), 'sympathor.path.SymbolicPath', 'SymbolicPath', (['desc'], {}), '(desc)\n', (2116, 2122), False, 'from sympathor.path import SymbolicPath\n'), ((379, 394), 'xml.etree.ElementTree.parse', 'et.parse', (['input'], {}), '(input)\n', (387, 394), True, 'import xml.etree.ElementTree as et\n'), ((2669, 2707), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (2679, 2707), False, 'import re\n'), ((3088, 3126), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (3098, 3126), False, 'import re\n'), ((3446, 3484), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (3456, 3484), False, 'import re\n'), ((3893, 3931), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (3903, 3931), False, 'import re\n'), ((4239, 4277), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (4249, 4277), False, 'import re\n'), ((4424, 4462), 're.findall', 're.findall', (['"""(-?\\\\d+\\\\.?\\\\d*)"""', 'argstr'], {}), "('(-?\\\\d+\\\\.?\\\\d*)', argstr)\n", (4434, 4462), False, 'import re\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker
years = 501
eaten = np.loadtxt(f'./data/eaten-{years}.txt')
cattle = eaten[:,0]
sheep = eaten[:,1]
print(cattle.shape)
print(sheep.shape)
# kg/只
MASS = [
[753, 87.5, 3.94625],
[0, 0, 0],
[0, 0, 0]
]
# calorie/kg
# 所有能量都按照 million 计算
ENERGY_PER_MASS = np.array([
[1250000, 1180000, 1020000],
[0, 0, 0],
[0, 0, 0]
]) / 1e6
# 只/km^2
DENSITY = [
[3.4130, 9.4514, 0],
[0, 0, 0],
[0, 0, 0]
]
fig, axes = plt.subplots(
nrows=1, ncols=1,
figsize=(12, 8)
)
axes.set_xlabel('Age/year')
# axes.set_ylabel('Energy/million calories', color='g')
axes.set_title('Energy expenditures of a dragon and number of preys eaten in two days changes with age')
axes.plot(np.arange(0, years), eaten[:,0] * ENERGY_PER_MASS[0][0] * MASS[0][0] + eaten[:,1] * ENERGY_PER_MASS[0][1] * MASS[0][1], 'r')
ybox1 = TextArea("Energy/million calories", textprops=dict(color="r", size=15,rotation=90,ha='left',va='bottom'))
ybox = VPacker(children=[ybox1],align="bottom", pad=0, sep=5)
anchored_ybox = AnchoredOffsetbox(loc=8, child=ybox, pad=0., frameon=False, bbox_to_anchor=(-0.08, 0.4),
bbox_transform=axes.transAxes, borderpad=0.)
axes.add_artist(anchored_ybox)
# Draw preys
ax_number = axes.twinx()
# ax_number.set_ylabel(r'Number of cattles \n \{textcolor}{}', color='g')
ax_number.plot(np.arange(0, years), eaten[:,1], label='Sheep', color='b')
ax_number.plot(np.arange(0, years), eaten[:,0], label='Cattle', color='g')
ax_number.legend(loc='upper left')
ybox2 = TextArea("Number of cattles", textprops=dict(color="g", size=15,rotation=90,ha='left',va='bottom'))
ybox = VPacker(children=[ybox2],align="bottom", pad=0, sep=5)
anchored_ybox = AnchoredOffsetbox(loc=8, child=ybox, pad=0., frameon=False, bbox_to_anchor=(1.10, 0.4),
bbox_transform=axes.transAxes, borderpad=0.)
ax_number.add_artist(anchored_ybox)
ax_number.set_ylim(0, 600)
ybox2 = TextArea("Number of sheep", textprops=dict(color="b", size=15,rotation=90,ha='left',va='bottom'))
ybox = VPacker(children=[ybox2],align="bottom", pad=0, sep=5)
anchored_ybox_sheep = AnchoredOffsetbox(loc=8, child=ybox, pad=0., frameon=False, bbox_to_anchor=(1.08, 0.4),
bbox_transform=axes.transAxes, borderpad=0.)
ax_number.add_artist(anchored_ybox_sheep)
# plt.show()
import save_fig as sf
sf.save_to_file(f'Prey intake-age={years}') | [
"matplotlib.offsetbox.AnchoredOffsetbox",
"matplotlib.offsetbox.VPacker",
"numpy.array",
"save_fig.save_to_file",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((151, 190), 'numpy.loadtxt', 'np.loadtxt', (['f"""./data/eaten-{years}.txt"""'], {}), "(f'./data/eaten-{years}.txt')\n", (161, 190), True, 'import numpy as np\n'), ((570, 617), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(12, 8)'}), '(nrows=1, ncols=1, figsize=(12, 8))\n', (582, 617), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1132), 'matplotlib.offsetbox.VPacker', 'VPacker', ([], {'children': '[ybox1]', 'align': '"""bottom"""', 'pad': '(0)', 'sep': '(5)'}), "(children=[ybox1], align='bottom', pad=0, sep=5)\n", (1084, 1132), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((1149, 1289), 'matplotlib.offsetbox.AnchoredOffsetbox', 'AnchoredOffsetbox', ([], {'loc': '(8)', 'child': 'ybox', 'pad': '(0.0)', 'frameon': '(False)', 'bbox_to_anchor': '(-0.08, 0.4)', 'bbox_transform': 'axes.transAxes', 'borderpad': '(0.0)'}), '(loc=8, child=ybox, pad=0.0, frameon=False, bbox_to_anchor\n =(-0.08, 0.4), bbox_transform=axes.transAxes, borderpad=0.0)\n', (1166, 1289), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((1764, 1819), 'matplotlib.offsetbox.VPacker', 'VPacker', ([], {'children': '[ybox2]', 'align': '"""bottom"""', 'pad': '(0)', 'sep': '(5)'}), "(children=[ybox2], align='bottom', pad=0, sep=5)\n", (1771, 1819), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((1836, 1974), 'matplotlib.offsetbox.AnchoredOffsetbox', 'AnchoredOffsetbox', ([], {'loc': '(8)', 'child': 'ybox', 'pad': '(0.0)', 'frameon': '(False)', 'bbox_to_anchor': '(1.1, 0.4)', 'bbox_transform': 'axes.transAxes', 'borderpad': '(0.0)'}), '(loc=8, child=ybox, pad=0.0, frameon=False, bbox_to_anchor\n =(1.1, 0.4), bbox_transform=axes.transAxes, borderpad=0.0)\n', (1853, 1974), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((2181, 2236), 'matplotlib.offsetbox.VPacker', 'VPacker', ([], {'children': '[ybox2]', 'align': '"""bottom"""', 'pad': '(0)', 'sep': '(5)'}), "(children=[ybox2], align='bottom', pad=0, sep=5)\n", (2188, 2236), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((2259, 2398), 'matplotlib.offsetbox.AnchoredOffsetbox', 'AnchoredOffsetbox', ([], {'loc': '(8)', 'child': 'ybox', 'pad': '(0.0)', 'frameon': '(False)', 'bbox_to_anchor': '(1.08, 0.4)', 'bbox_transform': 'axes.transAxes', 'borderpad': '(0.0)'}), '(loc=8, child=ybox, pad=0.0, frameon=False, bbox_to_anchor\n =(1.08, 0.4), bbox_transform=axes.transAxes, borderpad=0.0)\n', (2276, 2398), False, 'from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n'), ((2505, 2548), 'save_fig.save_to_file', 'sf.save_to_file', (['f"""Prey intake-age={years}"""'], {}), "(f'Prey intake-age={years}')\n", (2520, 2548), True, 'import save_fig as sf\n'), ((396, 457), 'numpy.array', 'np.array', (['[[1250000, 1180000, 1020000], [0, 0, 0], [0, 0, 0]]'], {}), '([[1250000, 1180000, 1020000], [0, 0, 0], [0, 0, 0]])\n', (404, 457), True, 'import numpy as np\n'), ((827, 846), 'numpy.arange', 'np.arange', (['(0)', 'years'], {}), '(0, years)\n', (836, 846), True, 'import numpy as np\n'), ((1479, 1498), 'numpy.arange', 'np.arange', (['(0)', 'years'], {}), '(0, years)\n', (1488, 1498), True, 'import numpy as np\n'), ((1553, 1572), 'numpy.arange', 'np.arange', (['(0)', 'years'], {}), '(0, years)\n', (1562, 1572), True, 'import numpy as np\n')] |
import re
import sys
from collections import defaultdict
from itertools import permutations
def main():
filename = "input.txt"
if len(sys.argv) > 1:
filename = sys.argv[1]
with open(filename) as f:
input_text = f.read().strip()
print(f"Answer: {calculate(input_text)}")
def calculate(input_text):
instructions = []
lights = defaultdict(int)
for line in input_text.splitlines():
onoff, _ = line.split()
if onoff == "on":
onoff = 1
elif onoff == "off":
onoff = 0
else:
raise ValueError(f"Unknown instructions {onoff} in line {line}")
nums = get_all_ints(line)
assert nums[1] >= nums[0]
assert nums[3] >= nums[2]
assert nums[5] >= nums[4]
if (
nums[0] <= 50
and nums[1] >= -50
and nums[2] <= 50
and nums[3] >= -50
and nums[4] <= 50
and nums[5] >= -50
):
nums[0] = max(nums[0], -50)
nums[1] = min(nums[1], 50)
nums[2] = max(nums[2], -50)
nums[3] = min(nums[3], 50)
nums[4] = max(nums[4], -50)
nums[5] = min(nums[5], 50)
instructions.append((onoff, nums))
for onoff, nums in instructions:
for x in range(nums[0], nums[1] + 1):
for y in range(nums[2], nums[3] + 1):
for z in range(nums[4], nums[5] + 1):
lights[x, y, z] = onoff
answer = sum(lights.values())
return answer
def get_all_ints(s):
return [int(i) for i in re.findall(r"(-?\d+)", s)]
example = """\
on x=10..12,y=10..12,z=10..12
on x=11..13,y=11..13,z=11..13
off x=9..11,y=9..11,z=9..11
on x=10..10,y=10..10,z=10..10"""
example_answer = 39
def test_example():
assert calculate(example) == example_answer
if __name__ == "__main__":
exit(main())
| [
"re.findall",
"collections.defaultdict"
] | [((368, 384), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (379, 384), False, 'from collections import defaultdict\n'), ((1609, 1634), 're.findall', 're.findall', (['"""(-?\\\\d+)"""', 's'], {}), "('(-?\\\\d+)', s)\n", (1619, 1634), False, 'import re\n')] |
### This python script discusses the cross-entropy loss function
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a session
sess = tf.Session()
###### Numerical Predictions for x-values######
x_nums = tf.linspace(-1., 1.3, 500) #start, stop, number of points
x_true = tf.constant(0.8)
# Define the graph model of calculate cross entropy loss: Loss = -x_true * (log(x_predict)) - (1-x_true)(log(1-x_predict))
cross_entropy_y = - tf.multiply(x_true, tf.log(x_nums)) - tf.multiply((1. - x_true), tf.log(1. - x_nums))
##obtain the cross_entropy values
cross_entropy_yo = sess.run(cross_entropy_y)
# Plot the cross_entropy loss vs x_vals
x_vals = sess.run(x_nums)
plt.plot(x_vals, cross_entropy_yo, 'b--', label='Cross Entropy Loss')
plt.ylim(-0.5, 4)
plt.xlim(-0.1, 1.1)
plt.grid()
plt.legend(loc='upper center', prop={'size': 14})
plt.show()
| [
"tensorflow.linspace",
"matplotlib.pyplot.grid",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"tensorflow.constant",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"tensorflow.log",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((168, 193), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (191, 193), False, 'from tensorflow.python.framework import ops\n'), ((221, 233), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (231, 233), True, 'import tensorflow as tf\n'), ((292, 319), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.3)', '(500)'], {}), '(-1.0, 1.3, 500)\n', (303, 319), True, 'import tensorflow as tf\n'), ((359, 375), 'tensorflow.constant', 'tf.constant', (['(0.8)'], {}), '(0.8)\n', (370, 375), True, 'import tensorflow as tf\n'), ((753, 822), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'cross_entropy_yo', '"""b--"""'], {'label': '"""Cross Entropy Loss"""'}), "(x_vals, cross_entropy_yo, 'b--', label='Cross Entropy Loss')\n", (761, 822), True, 'import matplotlib.pyplot as plt\n'), ((823, 840), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.5)', '(4)'], {}), '(-0.5, 4)\n', (831, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 860), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (849, 860), True, 'import matplotlib.pyplot as plt\n'), ((861, 871), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (869, 871), True, 'import matplotlib.pyplot as plt\n'), ((872, 921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'prop': "{'size': 14}"}), "(loc='upper center', prop={'size': 14})\n", (882, 921), True, 'import matplotlib.pyplot as plt\n'), ((922, 932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (930, 932), True, 'import matplotlib.pyplot as plt\n'), ((585, 605), 'tensorflow.log', 'tf.log', (['(1.0 - x_nums)'], {}), '(1.0 - x_nums)\n', (591, 605), True, 'import tensorflow as tf\n'), ((540, 554), 'tensorflow.log', 'tf.log', (['x_nums'], {}), '(x_nums)\n', (546, 554), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
from hamiltonian import Hamiltonian
import itertools
import numpy as np
import scipy
import scipy.sparse.linalg
class IsingJ1J2(Hamiltonian):
"""
This class is used to define Transverse Field Ising Hamiltonian.
Nearest neighbor interaction along z-axis with magnitude J_1,
next nearest neighbor interaction along z-axis with magntitude J_2,
external magnetic field along x-axis with magnitude h
$H_{IJ} = -J_1 \sum_{<i,j>} \sigma^z_i \sigma^z_j -J_2 \sum_{<<i,j>>} \sigma^z_i \sigma^z_j - h \sum_{i} \sigma^x_i $
"""
def __init__(self, graph, j1=1.0, h=1.0, j2=1.0):
"""
Construct an Ising J1-J2 model.
Args:
j1: magnitude of the nearest neighbor interaction along z-axis
h: magnitude of external magnetic field along x-axis
j2: magnitude of the next nearest neighbor interaction along z-axis
"""
Hamiltonian.__init__(self, graph)
self.j1 = j1
self.h = h
self.j2 = j2
def calculate_hamiltonian_matrix(self, samples, num_samples):
"""
Calculate the Hamiltonian matrix $H_{x,x'}$ from a given samples x.
Only non-zero elements are returned.
Args:
samples: The samples
num_samples: number of samples
Return:
The Hamiltonian where the first column contains the diagonal, which is $-J_1 \sum_{<i,j>} x_i x_j - J_2 \sum_{<<i,j>>} x_i, x_j$.
The rest of the column contains the off-diagonal, which is -h for every spin flip.
Therefore, the number of column equals the number of particles + 1 and the number of rows = num_samples
"""
# Diagonal element of the hamiltonian
# $-J \sum_{i,j} x_i x_j$
diagonal = tf.zeros((num_samples,))
for (s, s_2) in self.graph.bonds:
diagonal += -self.j1 * samples[:,s] * samples[:,s_2]
for (s, s_2) in self.graph.bonds_next:
diagonal += -self.j2 * samples[:,s] * samples[:,s_2]
diagonal = tf.reshape(diagonal, (num_samples, 1))
## Off-diagonal element of the hamiltonian
## $-h$ for every spin flip
off_diagonal = tf.fill((num_samples, self.graph.num_points), -self.h)
hamiltonian = tf.concat([diagonal, off_diagonal], axis=1)
return hamiltonian
def calculate_ratio(self, samples, model, num_samples):
"""
Calculate the ratio of \Psi(x') and \Psi(x) from a given x
as log(\Psi(x')) - log(\Psi(x))
\Psi is defined in the model.
However, the Hamiltonian determines which x' gives non-zero.
Args:
samples: the samples x
model: the model used to define \Psi
num_samples: the number of samples
Return:
The ratio where the first column contains \Psi(x) / \Psi(x).
The rest of the column contains the non-zero \Psi(x') / \Psi(x).
In the Ising model, this corresponds x' where exactly one of spin x is flipped.
Therefore, the number of column equals the number of particles + 1 and the number of rows = num_samples
"""
## Calculate log(\Psi(x)) - log(\Psi(x))
lvd = model.log_val_diff(samples, samples)
## Calculate log(\Psi(x')) - log(\Psi(x)) where x' is non-zero when x is flipped at one position.
for pos in range(self.graph.num_points):
## Flip spin as position pos
new_config = tf.identity(samples)
flipped = tf.reshape(new_config[:,pos] * -1, (num_samples, 1))
if pos == 0:
new_config = tf.concat((flipped, samples[:,pos+1:]), axis = 1)
elif pos == self.graph.num_points - 1:
new_config = tf.concat((samples[:, :pos], flipped), axis = 1)
else:
new_config = tf.concat((samples[:, :pos], flipped, samples[:,pos+1:]), axis = 1)
lvd = tf.concat((lvd, model.log_val_diff(new_config, samples)), axis=1)
return lvd
def diagonalize(self):
"""
Diagonalize hamiltonian with exact diagonalization.
Only works for small systems (<= 10)!
"""
num_particles = self.graph.num_points
## Initialize zeroes hamiltonian
H = np.zeros((2 ** num_particles, 2 ** num_particles))
## Calculate self energy
for i in range(num_particles):
togg_vect = np.zeros(num_particles)
togg_vect[i] = 1
temp = 1
for j in togg_vect:
if j == 1:
temp = np.kron(temp, self.SIGMA_X)
else:
temp = np.kron(temp, np.identity(2))
H -= self.h * temp
## Calculate interaction energy
for i, a in self.graph.bonds:
togg_vect = np.zeros(num_particles)
togg_vect[i] = 1
togg_vect[a] = 1
temp = 1
for j in togg_vect:
if j == 1:
temp = np.kron(temp, self.SIGMA_Z)
else:
temp = np.kron(temp, np.identity(2))
H -= self.j1 * temp
## Calculate interaction next nearest energy
for i, a in self.graph.bonds_next:
togg_vect = np.zeros(num_particles)
togg_vect[i] = 1
togg_vect[a] = 1
temp = 1
for j in togg_vect:
if j == 1:
temp = np.kron(temp, self.SIGMA_Z)
else:
temp = np.kron(temp, np.identity(2))
H -= self.j2 * temp
## Calculate the eigen value
self.eigen_values, self.eigen_vectors = np.linalg.eig(H)
self.hamiltonian = H
def diagonalize_sparse(self):
"""
Diagonalize hamiltonian with exact diagonalization with sparse matrix.
Only works for small (<= 20) systems!
"""
num_particles = self.graph.num_points
num_confs = 2 ** num_particles
## Constructing the COO sparse matrix
row_ind = []
col_ind = []
data = []
for row in range(num_confs):
# print row, num_confs
## configuration in binary 0 1
conf_bin = format(row, '#0%db' % (num_particles + 2))
## configuration in binary -1 1
conf = [1 if c == '1' else -1 for c in conf_bin[2:]]
## Diagonal = -J1 \sum SiSj -J2 \sum SiSj
row_ind.append(row)
col_ind.append(row)
total_j1 = 0
for (i,j) in self.graph.bonds:
total_j1 += conf[i] * conf[j]
total_j1 *= -self.j1
total_j2 = 0
for (i,j) in self.graph.bonds_next:
total_j2 += conf[i] * conf[j]
total_j2 *= -self.j2
data.append(total_j1 + total_j2)
## Flip one by one
xor = 1
for ii in range(num_particles):
## flipped the configuration
conf_flipped_bin = format(row ^ xor, '#0%db' % num_particles)
row_ind.append(row)
col_ind.append(row ^ xor)
data.append(-self.h)
# shift left to flip other bit locations
xor = xor << 1
row_ind = np.array(row_ind)
col_ind = np.array(col_ind)
data = np.array(data, dtype=float)
mat_coo = scipy.sparse.coo_matrix((data, (row_ind, col_ind)))
self.eigen_values, self.eigen_vectors = scipy.sparse.linalg.eigs(mat_coo, k=1, which='SR')
self.hamiltonian = mat_coo
def get_name(self):
"""
Get the name of the Hamiltonian
"""
if self.graph.pbc:
bc = 'pbc'
else:
bc = 'obc'
return 'isingj1j2_%dd_%d_%.3f_%.3f_%.3f_%s' % (
self.graph.dimension, self.graph.length, self.h,
self.j1, self.j2, bc)
def __str__(self):
return "Ising J1-J2 %dD, h=%.2f, j1=%.2f, j2=%.2f" % (self.graph.dimension, self.h, self.j1, self.j2)
def to_xml(self):
str = ""
str += "<hamiltonian>\n"
str += "\t<type>ising j1-j2</type>\n"
str += "\t<params>\n"
str += "\t\t<j1>%.2f</j1>\n" % self.j1
str += "\t\t<j2>%.2f</j2>\n" % self.j2
str += "\t\t<h>%.2f</h>\n" % self.h
str += "\t</params>\n"
str += "</hamiltonian>\n"
return str
| [
"numpy.identity",
"numpy.linalg.eig",
"tensorflow.fill",
"hamiltonian.Hamiltonian.__init__",
"numpy.kron",
"tensorflow.concat",
"numpy.zeros",
"numpy.array",
"scipy.sparse.coo_matrix",
"tensorflow.reshape",
"tensorflow.identity",
"scipy.sparse.linalg.eigs",
"tensorflow.zeros"
] | [((952, 985), 'hamiltonian.Hamiltonian.__init__', 'Hamiltonian.__init__', (['self', 'graph'], {}), '(self, graph)\n', (972, 985), False, 'from hamiltonian import Hamiltonian\n'), ((1822, 1846), 'tensorflow.zeros', 'tf.zeros', (['(num_samples,)'], {}), '((num_samples,))\n', (1830, 1846), True, 'import tensorflow as tf\n'), ((2087, 2125), 'tensorflow.reshape', 'tf.reshape', (['diagonal', '(num_samples, 1)'], {}), '(diagonal, (num_samples, 1))\n', (2097, 2125), True, 'import tensorflow as tf\n'), ((2237, 2291), 'tensorflow.fill', 'tf.fill', (['(num_samples, self.graph.num_points)', '(-self.h)'], {}), '((num_samples, self.graph.num_points), -self.h)\n', (2244, 2291), True, 'import tensorflow as tf\n'), ((2314, 2357), 'tensorflow.concat', 'tf.concat', (['[diagonal, off_diagonal]'], {'axis': '(1)'}), '([diagonal, off_diagonal], axis=1)\n', (2323, 2357), True, 'import tensorflow as tf\n'), ((4368, 4418), 'numpy.zeros', 'np.zeros', (['(2 ** num_particles, 2 ** num_particles)'], {}), '((2 ** num_particles, 2 ** num_particles))\n', (4376, 4418), True, 'import numpy as np\n'), ((5784, 5800), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (5797, 5800), True, 'import numpy as np\n'), ((7422, 7439), 'numpy.array', 'np.array', (['row_ind'], {}), '(row_ind)\n', (7430, 7439), True, 'import numpy as np\n'), ((7458, 7475), 'numpy.array', 'np.array', (['col_ind'], {}), '(col_ind)\n', (7466, 7475), True, 'import numpy as np\n'), ((7492, 7519), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (7500, 7519), True, 'import numpy as np\n'), ((7539, 7590), 'scipy.sparse.coo_matrix', 'scipy.sparse.coo_matrix', (['(data, (row_ind, col_ind))'], {}), '((data, (row_ind, col_ind)))\n', (7562, 7590), False, 'import scipy\n'), ((7640, 7690), 'scipy.sparse.linalg.eigs', 'scipy.sparse.linalg.eigs', (['mat_coo'], {'k': '(1)', 'which': '"""SR"""'}), "(mat_coo, k=1, which='SR')\n", (7664, 7690), False, 'import scipy\n'), ((3547, 3567), 'tensorflow.identity', 'tf.identity', (['samples'], {}), '(samples)\n', (3558, 3567), True, 'import tensorflow as tf\n'), ((3590, 3643), 'tensorflow.reshape', 'tf.reshape', (['(new_config[:, pos] * -1)', '(num_samples, 1)'], {}), '(new_config[:, pos] * -1, (num_samples, 1))\n', (3600, 3643), True, 'import tensorflow as tf\n'), ((4516, 4539), 'numpy.zeros', 'np.zeros', (['num_particles'], {}), '(num_particles)\n', (4524, 4539), True, 'import numpy as np\n'), ((4917, 4940), 'numpy.zeros', 'np.zeros', (['num_particles'], {}), '(num_particles)\n', (4925, 4940), True, 'import numpy as np\n'), ((5368, 5391), 'numpy.zeros', 'np.zeros', (['num_particles'], {}), '(num_particles)\n', (5376, 5391), True, 'import numpy as np\n'), ((3697, 3747), 'tensorflow.concat', 'tf.concat', (['(flipped, samples[:, pos + 1:])'], {'axis': '(1)'}), '((flipped, samples[:, pos + 1:]), axis=1)\n', (3706, 3747), True, 'import tensorflow as tf\n'), ((3827, 3873), 'tensorflow.concat', 'tf.concat', (['(samples[:, :pos], flipped)'], {'axis': '(1)'}), '((samples[:, :pos], flipped), axis=1)\n', (3836, 3873), True, 'import tensorflow as tf\n'), ((3923, 3991), 'tensorflow.concat', 'tf.concat', (['(samples[:, :pos], flipped, samples[:, pos + 1:])'], {'axis': '(1)'}), '((samples[:, :pos], flipped, samples[:, pos + 1:]), axis=1)\n', (3932, 3991), True, 'import tensorflow as tf\n'), ((4676, 4703), 'numpy.kron', 'np.kron', (['temp', 'self.SIGMA_X'], {}), '(temp, self.SIGMA_X)\n', (4683, 4703), True, 'import numpy as np\n'), ((5107, 5134), 'numpy.kron', 'np.kron', (['temp', 'self.SIGMA_Z'], {}), '(temp, self.SIGMA_Z)\n', (5114, 5134), True, 'import numpy as np\n'), ((5558, 5585), 'numpy.kron', 'np.kron', (['temp', 'self.SIGMA_Z'], {}), '(temp, self.SIGMA_Z)\n', (5565, 5585), True, 'import numpy as np\n'), ((4767, 4781), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4778, 4781), True, 'import numpy as np\n'), ((5198, 5212), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (5209, 5212), True, 'import numpy as np\n'), ((5649, 5663), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (5660, 5663), True, 'import numpy as np\n')] |
#!/usr/bin/python
import lldb
import re
def pof(debugger, command, result, internal_dict):
usage = "usage: %prog <regex> -- <expression>"
'''This command is meant to be used as po but with a regular expression to filter out lines of output.
'''
lastIndexOfDashDash = command.rfind('--')
regexString = command[:lastIndexOfDashDash].strip(" ")
exprString = command[lastIndexOfDashDash+2:].strip(" ")
print('Filtering out lines that match %(regexString)s from the output of %(exprString)s' % {"regexString" : regexString, "exprString" : exprString})
# re.search("^\[*\w*\s*\w*\]*", command)
result = lldb.SBCommandReturnObject()
commandInterpreter = debugger.GetCommandInterpreter()
commandInterpreter.HandleCommand('expr -O -- %(exprString)s' % {"exprString" : exprString}, result)
# TODO: Check return status from HandleCommand.
# re.search("^foo", searchText, re.M)
regex = re.compile(regexString)
output = result.GetOutput()
lines = output.split('\n')
# filteredOutput = filter(regex.search, lines) #[line for line in lines if regex.search(line)]
# filteredOutput = []
for line in lines:
# print 'Looking for %(regexString)s in "%(line)s"...' % {"regexString" : regexString, "line" : line}
if regex.search(line):
print('%(line)s\n' % {"line" : line})
# return 0
# print filteredOutput
# print 'Output contains %(count)s lines' % {"count" : len(lines)}
# # print output
# for line in lines[:]:
# print line
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f pof.pof pof')
print('The "pof" python command has been installed and is ready for use.')
| [
"lldb.SBCommandReturnObject",
"re.compile"
] | [((616, 644), 'lldb.SBCommandReturnObject', 'lldb.SBCommandReturnObject', ([], {}), '()\n', (642, 644), False, 'import lldb\n'), ((902, 925), 're.compile', 're.compile', (['regexString'], {}), '(regexString)\n', (912, 925), False, 'import re\n')] |
import time
import json
import hmac
import base64
import random
import hashlib
import datetime
# Authentication
class Auth:
def utf8(self, data):
return str(data.decode('utf8').replace('=', ''))
# Hash a string
def hash(self, string):
return hashlib.md5(string.encode()).hexdigest()
# Base64 Encode
def encode(self, data):
return self.utf8(base64.b64encode(bytes(str(data), 'utf8')))
# Base64 Decode
def decode(self, data):
try:
return self.utf8(base64.b64decode(data +'=='))
except Exception:
return False
# Signature with HMACSHA256
def sign(self, key, payload):
return self.encode(hmac.new(bytes(key, 'utf8'), bytes(payload, 'utf8'), hashlib.sha256).hexdigest())
# Random String
def random(self, length, type = False):
o = ''
n = '0123456789'
s = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if type:
s = n
else:
s += n
for i in range(length):
o += random.choice(s)
return str(o)
# Parse token
def parse(self, token):
# Parsing
encoded = token[86:]
decoded = self.decode(encoded[:len(encoded)-86])
if decoded:
try:
return {
'token': token,
'signature': encoded[len(encoded)-86:],
'payload': json.loads(decoded.replace("\'", "\""))
}
except Exception:
return None
# Verify Token
def verify(self, key, payload, signature):
sign = self.sign(key, str(payload))
if 'exp' in payload and payload['exp'] < time.time():
return None
else:
if sign and hmac.compare_digest(signature, sign):
return True
else:
return False
# Token Expiration
def expiration(self, exp = False):
today = datetime.datetime.now()
if 'days' not in exp:
exp['days'] = 0
if 'hours' not in exp:
exp['hours'] = 0
if 'minutes' not in exp:
exp['minutes'] = 0
return time.mktime((today + datetime.timedelta(minutes=exp['minutes'], hours=exp['hours'], days=exp['days'])).timetuple())
# Create Token
def createtoken(self, payload, key):
if 'exp' in payload:
payload['exp'] = self.expiration(payload['exp'])
return self.random(86) + self.encode(str(payload)) + self.sign(key, str(payload))
else:
raise Exception('Missing required field "exp"') | [
"random.choice",
"hmac.compare_digest",
"base64.b64decode",
"datetime.datetime.now",
"datetime.timedelta",
"time.time"
] | [((1911, 1934), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1932, 1934), False, 'import datetime\n'), ((1070, 1086), 'random.choice', 'random.choice', (['s'], {}), '(s)\n', (1083, 1086), False, 'import random\n'), ((538, 567), 'base64.b64decode', 'base64.b64decode', (["(data + '==')"], {}), "(data + '==')\n", (554, 567), False, 'import base64\n'), ((1668, 1679), 'time.time', 'time.time', ([], {}), '()\n', (1677, 1679), False, 'import time\n'), ((1730, 1766), 'hmac.compare_digest', 'hmac.compare_digest', (['signature', 'sign'], {}), '(signature, sign)\n', (1749, 1766), False, 'import hmac\n'), ((2138, 2223), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': "exp['minutes']", 'hours': "exp['hours']", 'days': "exp['days']"}), "(minutes=exp['minutes'], hours=exp['hours'], days=exp['days']\n )\n", (2156, 2223), False, 'import datetime\n')] |
from urlextract import URLExtract
import unicodedata
import re
extractor = URLExtract()
class TextFilter:
def __init__(self, skip_word=None, skip_line="", skip_prefix="",
remove_punctuation=True, remove_number=True, remove_link=True):
if skip_word is None:
skip_word = []
self.text = None
self.keyword_skip_word = skip_word
self.keyword_skip_line = skip_line
self.keyword_skip_prefix = skip_prefix
self._remove_punctuation = remove_punctuation
self._remove_number = remove_number
self._remove_link = remove_link
@classmethod
def remove_link(cls, text) -> str:
urls = extractor.find_urls(text)
if len(urls) > 0:
for link in urls:
text = text.replace(link, "[链接]")
return text
@classmethod
def remove_numbers(cls, text) -> str:
nums = re.findall(r'-?\d+\.?\d*', text)
nums = sorted(nums, key=len, reverse=True)
if len(nums) > 0:
for num in nums:
text = text.replace(num, "[数字]")
return text
@classmethod
def skip_word(cls, text, words) -> str:
for word in words:
if type(word) is list:
assert len(word) > 1, "Too few keywords in list."
assert len(word) == 2, "Too many keywords in one skip."
if word[0] in text and word[1] in text:
text = text.split(word[0])[0] + text.split(word[0])[1].split(word[1])[1]
elif type(word) is str:
assert len(word) > 0, "Cannot skip empty string."
if word in text:
segments = text.split(word)
text = ''
for segment in segments:
text += segment
else:
raise Exception('Unsupported skip words.')
return text
def fit_transform(self, text):
cur = unicodedata.normalize('NFKC', text)
if not self.keyword_skip_line != "" and self.keyword_skip_line in cur:
return ""
if self._remove_link:
cur = self.remove_link(cur)
if self._remove_number:
cur = self.remove_numbers(cur)
if len(self.keyword_skip_word) > 0:
cur = self.skip_word(cur, self.keyword_skip_word)
cur = re.sub(r'\[.*?\]', " ", cur)
if self.keyword_skip_prefix != "" and self.keyword_skip_prefix in cur:
cur = cur.split(self.keyword_skip_prefix)[1]
if self._remove_punctuation:
cur = re.sub(r'''[][【】“”‘’"'、,.。:;@#?!&$/()%~`-―〈〉「」・@+_*=《》^…¥-]+\ *''',
" ", cur, flags=re.VERBOSE)
return cur
| [
"re.sub",
"re.findall",
"urlextract.URLExtract",
"unicodedata.normalize"
] | [((76, 88), 'urlextract.URLExtract', 'URLExtract', ([], {}), '()\n', (86, 88), False, 'from urlextract import URLExtract\n'), ((912, 946), 're.findall', 're.findall', (['"""-?\\\\d+\\\\.?\\\\d*"""', 'text'], {}), "('-?\\\\d+\\\\.?\\\\d*', text)\n", (922, 946), False, 'import re\n'), ((1976, 2011), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKC"""', 'text'], {}), "('NFKC', text)\n", (1997, 2011), False, 'import unicodedata\n'), ((2378, 2407), 're.sub', 're.sub', (['"""\\\\[.*?\\\\]"""', '""" """', 'cur'], {}), "('\\\\[.*?\\\\]', ' ', cur)\n", (2384, 2407), False, 'import re\n'), ((2598, 2694), 're.sub', 're.sub', (['"""[][【】“”‘’"\'、,.。:;@#?!&$/()%~`-―〈〉「」・@+_*=《》^…¥-]+\\\\ *"""', '""" """', 'cur'], {'flags': 're.VERBOSE'}), '(\'[][【】“”‘’"\\\'、,.。:;@#?!&$/()%~`-―〈〉「」・@+_*=《》^…¥-]+\\\\ *\', \' \', cur,\n flags=re.VERBOSE)\n', (2604, 2694), False, 'import re\n')] |
from pathlib import Path
from typing import Union
class Provider:
def __init__(self, project_root_dir: Union[str, Path]):
"""Creates the provider instance
Args:
project_root_dir: the root directory of the project
"""
self.project_root_dir = Path(project_root_dir)
def build(self, *args, **kwargs):
raise NotImplementedError("Child classes must implement the build method.")
def run(self, *args, **kwargs):
raise NotImplementedError("Child classes must implement the run method.")
class CloudMixin:
def deploy(self, *args, **kwargs):
raise NotImplementedError("Child classes must implement the deploy method.")
| [
"pathlib.Path"
] | [((294, 316), 'pathlib.Path', 'Path', (['project_root_dir'], {}), '(project_root_dir)\n', (298, 316), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
# <EMAIL>
from colorama import init
from termcolor import colored
from print_tree import print_tree
init()
class Node(object):
def __init__(self, value, parent):
self.value = value
self.children = []
if parent is not None:
parent.children.append(self)
class print_custom_tree(print_tree):
def get_children(self, node):
return node.children
def get_node_str(self, node):
return str(node.value)
if __name__ == "__main__":
data_structure = Node(colored("Data Stucture", 'blue', 'on_white'), None)
vector = Node(colored("Vector", 'cyan'), data_structure)
list_ = Node(colored("List", 'cyan'), data_structure)
tree = Node(colored("Tree", 'cyan'), data_structure)
graph = Node(colored("Graph", 'cyan'), data_structure)
dag = Node(colored("DAG", 'magenta'), graph)
avl = Node(colored("AVL", 'magenta', attrs=['bold']), tree)
splay = Node(colored("Splay", 'magenta', attrs=['underline']), tree)
b = Node(colored("B", 'magenta', attrs=['dark']), tree)
quad = Node(colored("Quand", 'magenta', attrs=['blink']), tree)
kd = Node(colored("kd", attrs=['concealed']), tree)
print_custom_tree(data_structure)
| [
"termcolor.colored",
"colorama.init"
] | [((127, 133), 'colorama.init', 'init', ([], {}), '()\n', (131, 133), False, 'from colorama import init\n'), ((543, 587), 'termcolor.colored', 'colored', (['"""Data Stucture"""', '"""blue"""', '"""on_white"""'], {}), "('Data Stucture', 'blue', 'on_white')\n", (550, 587), False, 'from termcolor import colored\n'), ((614, 639), 'termcolor.colored', 'colored', (['"""Vector"""', '"""cyan"""'], {}), "('Vector', 'cyan')\n", (621, 639), False, 'from termcolor import colored\n'), ((674, 697), 'termcolor.colored', 'colored', (['"""List"""', '"""cyan"""'], {}), "('List', 'cyan')\n", (681, 697), False, 'from termcolor import colored\n'), ((731, 754), 'termcolor.colored', 'colored', (['"""Tree"""', '"""cyan"""'], {}), "('Tree', 'cyan')\n", (738, 754), False, 'from termcolor import colored\n'), ((789, 813), 'termcolor.colored', 'colored', (['"""Graph"""', '"""cyan"""'], {}), "('Graph', 'cyan')\n", (796, 813), False, 'from termcolor import colored\n'), ((847, 872), 'termcolor.colored', 'colored', (['"""DAG"""', '"""magenta"""'], {}), "('DAG', 'magenta')\n", (854, 872), False, 'from termcolor import colored\n'), ((896, 937), 'termcolor.colored', 'colored', (['"""AVL"""', '"""magenta"""'], {'attrs': "['bold']"}), "('AVL', 'magenta', attrs=['bold'])\n", (903, 937), False, 'from termcolor import colored\n'), ((962, 1010), 'termcolor.colored', 'colored', (['"""Splay"""', '"""magenta"""'], {'attrs': "['underline']"}), "('Splay', 'magenta', attrs=['underline'])\n", (969, 1010), False, 'from termcolor import colored\n'), ((1031, 1070), 'termcolor.colored', 'colored', (['"""B"""', '"""magenta"""'], {'attrs': "['dark']"}), "('B', 'magenta', attrs=['dark'])\n", (1038, 1070), False, 'from termcolor import colored\n'), ((1094, 1138), 'termcolor.colored', 'colored', (['"""Quand"""', '"""magenta"""'], {'attrs': "['blink']"}), "('Quand', 'magenta', attrs=['blink'])\n", (1101, 1138), False, 'from termcolor import colored\n'), ((1160, 1194), 'termcolor.colored', 'colored', (['"""kd"""'], {'attrs': "['concealed']"}), "('kd', attrs=['concealed'])\n", (1167, 1194), False, 'from termcolor import colored\n')] |
from pgreaper._globals import SQLIFY_PATH
import copy
import functools
import os
import warnings
import configparser
# Store configuration file in pgreaper's base directory
SQLIFY_CONF = configparser.ConfigParser()
SQLIFY_CONF_PATH = os.path.join(SQLIFY_PATH, 'config.ini')
SQLIFY_CONF.read(SQLIFY_CONF_PATH)
class DefaultSettings(dict):
'''
Sort of like a default dict
* A factory for producing new dicts
* If a key isn't specified or is None, it produces a dict with the
default value
'''
def __init__(self, section):
try:
# {k:SQLIFY_CONF[section][k] for k in SQLIFY_CONF[section]})
super(DefaultSettings, self).__init__()
for k in SQLIFY_CONF[section]:
if SQLIFY_CONF[section][k]:
self[k] = SQLIFY_CONF[section][k]
else:
self[k] = None
except KeyError:
raise KeyError('There is no section named {} in the settings.'.format(
section))
def __call__(self, **kwargs):
new_dict = copy.deepcopy(self)
for k in kwargs:
if (kwargs[k] is not None) and (k in self.keys()):
new_dict[k] = kwargs[k]
return new_dict
def to_string(self, dbname=None):
''' Produce a SQLAlchemy style connection string '''
if not dbname:
dbname = self['dbname']
return 'postgres+psycopg2://{user}:{password}@{host}/{dbname}'.format(
user=self['user'],
password=self['password'],
host=self['host'],
dbname=dbname
)
# def __setitem__(self, key, value):
# return self.__call__(key=value)
try:
PG_DEFAULTS = DefaultSettings('postgres_default')
except KeyError:
SQLIFY_CONF['postgres_default'] = {}
SQLIFY_CONF['postgres_default']['user'] = 'postgres'
SQLIFY_CONF['postgres_default']['password'] = ''
SQLIFY_CONF['postgres_default']['host'] = 'localhost'
SQLIFY_CONF['postgres_default']['dbname'] = 'postgres'
PG_DEFAULTS = DefaultSettings('postgres_default')
warnings.warn("No default Postgres settings found. Use"
"pgreaper.settings(user='', password='', dbname='', host='') to set them.")
def settings(hide=True, *args, **kwargs):
'''
Read, write, and modify configuration setttings. Currently,
the only settings are for the default PostgreSQL database.
**Arguments**
* hide: Obfuscate password with asterisks
**To view existing settings**
>>> import pgreaper
>>> pgreaper.settings()
**To set new settings, or modify existing ones**
* If creating settings for the first time, the `dbname`, `username`,
and `password` arguments should be used
* `hostname` will default to `localhost` if not specified
* To modify existing settings, you only need to specify the setting
you are changing.
>>> import pgreaper
>>> pgreaper.settings(dbname='postgres',
username='peytonmanning',
password='<PASSWORD>',
hostname='localhost')
.. note:: This stores your username and password in a plain-text INI file.
'''
# List of keywords suggesting user wants to modify Postgres settings
pg_kwargs = set(['user', 'password', 'host', 'dbname'])
# No arguments --> Print settings
if (not args) and (not kwargs):
print_settings(hide)
# Modify Postgres settings
elif pg_kwargs.intersection( set( kwargs.keys() ) ):
if 'postgres_default' not in SQLIFY_CONF.sections():
first_time = True
SQLIFY_CONF['postgres_default'] = {}
else:
first_time = False
# Record values of arguments that aren't nonsense
for key in pg_kwargs:
try:
SQLIFY_CONF['postgres_default'][key] = kwargs[key]
except KeyError:
if first_time and key != 'hostname':
# Require user to provide values for all keys first time
raise KeyError("Please specify a 'dbname', 'username' and "
"'password'. Optionally, you may also specify a 'hostname'"
" (default: 'localhost').")
pass
# If 'hostname' argument is missing, default to 'localhost'
if 'hostname' in kwargs:
SQLIFY_CONF['postgres_default']['host'] = kwargs['host']
else:
SQLIFY_CONF['postgres_default']['host'] = 'localhost'
with open(SQLIFY_CONF_PATH, 'w') as conf_file:
SQLIFY_CONF.write(conf_file)
print_settings(hide)
else:
raise ValueError('Invalid argument. Valid keyword arguments are {}.'.format(pg_kwargs))
def print_settings(hide):
''' Print current user settings '''
if SQLIFY_CONF.sections():
for section in SQLIFY_CONF.sections():
print('[{}]'.format(section))
for key in SQLIFY_CONF[section]:
val = SQLIFY_CONF[section][key]
if key == 'password' and hide:
val = '{0} (Type pgreaper.settings(hide=False) to show)'.format('*' * 15)
print('{0}: {space} {1}'.format(
key, val,
space=' ' * (12 - len(key))))
else:
print("No settings found.") | [
"warnings.warn",
"os.path.join",
"configparser.ConfigParser",
"copy.deepcopy"
] | [((189, 216), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (214, 216), False, 'import configparser\n'), ((236, 275), 'os.path.join', 'os.path.join', (['SQLIFY_PATH', '"""config.ini"""'], {}), "(SQLIFY_PATH, 'config.ini')\n", (248, 275), False, 'import os\n'), ((1098, 1117), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (1111, 1117), False, 'import copy\n'), ((2189, 2327), 'warnings.warn', 'warnings.warn', (['"""No default Postgres settings found. Usepgreaper.settings(user=\'\', password=\'\', dbname=\'\', host=\'\') to set them."""'], {}), '(\n "No default Postgres settings found. Usepgreaper.settings(user=\'\', password=\'\', dbname=\'\', host=\'\') to set them."\n )\n', (2202, 2327), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
import datetime
import pytest
def test_setup_and_info(store):
# GIVEN a store which is already setup
assert len(store.engine.table_names()) > 0
# THEN it should contain an Info entry with a default "created_at" date
info_obj = store.info()
assert isinstance(info_obj.created_at, datetime.datetime)
def test_track_update(store):
# GIVEN a store which is empty apart from the initialized info entry
assert store.info().updated_at is None
# WHEN updating the last updated date
store.set_latest_update_date()
# THEN it should update info entry with the current date
assert isinstance(store.info().updated_at, datetime.datetime)
def test_add_user(store):
# GIVEN an empty database
assert store.User.query.first() is None
name, email = "<NAME>", "<EMAIL>"
# WHEN adding a new user
new_user = store.add_user(name, email)
# THEN it should be stored in the database
assert store.User.query.filter_by(email=email).first() == new_user
def test_user(store):
# GIVEN a database with a user
name, email = "<NAME>", "<EMAIL>"
store.add_user(name, email)
assert store.User.query.filter_by(email=email).first().email == email
# WHEN querying for a user
user_obj = store.user(email)
# THEN it should be returned
assert user_obj.email == email
# WHEN querying for a user that doesn't exist
user_obj = store.user("<EMAIL>")
# THEN it should return as None
assert user_obj is None
def test_analysis(sample_store):
# GIVEN a store with an analysis
existing_analysis = sample_store.analyses().first()
# WHEN accessing it by ID
analysis_obj = sample_store.analysis(existing_analysis.id)
# THEN it should return the same analysis
assert analysis_obj == existing_analysis
# GIVEN an id that doesn't exist
missing_analysis_id = 12312423534
# WHEN accessing the analysis
analysis_obj = sample_store.analysis(missing_analysis_id)
# THEN it should return None
assert analysis_obj is None
@pytest.mark.parametrize(
"family, expected_bool",
[
("blazinginsect", True), # running
("nicemice", False), # completed
("lateraligator", False), # failed
("escapedgoat", True), # pending
],
)
def test_is_latest_analysis_ongoing(sample_store, family: str, expected_bool: bool):
# GIVEN an analysis
sample_store.update_ongoing_analyses()
analysis_objs = sample_store.analyses(case_id=family).first()
assert analysis_objs is not None
# WHEN checking if the family has an ongoing analysis status
is_ongoing = sample_store.is_latest_analysis_ongoing(case_id=family)
# THEN it should return the expected result
assert is_ongoing is expected_bool
@pytest.mark.parametrize(
"family, expected_bool",
[
("blazinginsect", False), # running
("nicemice", False), # completed
("lateraligator", True), # failed
("escapedgoat", False), # pending
],
)
def test_is_latest_analysis_failed(sample_store, family: str, expected_bool: bool):
# GIVEN an analysis
sample_store.update_ongoing_analyses()
analysis_objs = sample_store.analyses(case_id=family).first()
assert analysis_objs is not None
# WHEN checking if the family has a failed analysis status
is_failed = sample_store.is_latest_analysis_failed(case_id=family)
# THEN it should return the expected result
assert is_failed is expected_bool
@pytest.mark.parametrize(
"family, expected_bool",
[
("blazinginsect", False), # running
("nicemice", True), # completed
("lateraligator", False), # failed
("escapedgoat", False), # pending
],
)
def test_is_latest_analysis_completed(sample_store, family: str, expected_bool: bool):
# GIVEN an analysis
sample_store.update_ongoing_analyses()
analysis_objs = sample_store.analyses(case_id=family).first()
assert analysis_objs is not None
# WHEN checking if the family has a failed analysis status
is_failed = sample_store.is_latest_analysis_completed(case_id=family)
# THEN it should return the expected result
assert is_failed is expected_bool
@pytest.mark.parametrize(
"family, expected_status",
[
("blazinginsect", "running"), # running
("nicemice", "completed"), # completed
("lateraligator", "failed"), # failed
("escapedgoat", "pending"), # pending
],
)
def test_get_latest_analysis_status(sample_store, family: str, expected_status: str):
# GIVEN an analysis
sample_store.update_ongoing_analyses()
analysis_objs = sample_store.analyses(case_id=family).first()
assert analysis_objs is not None
# WHEN checking if the family has an analysis status
status = sample_store.get_latest_analysis_status(case_id=family)
# THEN it should return the expected result
assert status is expected_status
@pytest.mark.parametrize(
"case_id, status",
[
("blazinginsect", "running"),
("crackpanda", "failed"),
("daringpidgeon", "error"),
("emptydinosaur", "error"),
("escapedgoat", "pending"),
("fancymole", "completed"),
("happycow", "pending"),
("lateraligator", "failed"),
("liberatedunicorn", "error"),
("nicemice", "completed"),
("rarekitten", "canceled"),
("trueferret", "running"),
],
)
def test_update(sample_store, case_id, status):
# GIVEN an analysis
analysis_obj = sample_store.get_latest_analysis(case_id)
# WHEN database is updated once
sample_store.update_run_status(analysis_obj.id)
# THEN analysis status is changed to what is expected
assert analysis_obj.status == status
# WHEN database is updated a second time
sample_store.update_run_status(analysis_obj.id)
# THEN the status is still what is expected, and no database errors were raised
assert analysis_obj.status == status
def test_mark_analyses_deleted(sample_store):
# GIVEN case_id for a case that is not deleted
case_id = "liberatedunicorn"
analysis_obj = sample_store.get_latest_analysis(case_id)
assert not analysis_obj.is_deleted
# WHEN running command
sample_store.mark_analyses_deleted(case_id=case_id)
analysis_obj = sample_store.get_latest_analysis(case_id)
# THEN analysis is marked deleted
assert analysis_obj.is_deleted
| [
"pytest.mark.parametrize"
] | [((2081, 2230), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""family, expected_bool"""', "[('blazinginsect', True), ('nicemice', False), ('lateraligator', False), (\n 'escapedgoat', True)]"], {}), "('family, expected_bool', [('blazinginsect', True),\n ('nicemice', False), ('lateraligator', False), ('escapedgoat', True)])\n", (2104, 2230), False, 'import pytest\n'), ((2808, 2958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""family, expected_bool"""', "[('blazinginsect', False), ('nicemice', False), ('lateraligator', True), (\n 'escapedgoat', False)]"], {}), "('family, expected_bool', [('blazinginsect', False),\n ('nicemice', False), ('lateraligator', True), ('escapedgoat', False)])\n", (2831, 2958), False, 'import pytest\n'), ((3530, 3680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""family, expected_bool"""', "[('blazinginsect', False), ('nicemice', True), ('lateraligator', False), (\n 'escapedgoat', False)]"], {}), "('family, expected_bool', [('blazinginsect', False),\n ('nicemice', True), ('lateraligator', False), ('escapedgoat', False)])\n", (3553, 3680), False, 'import pytest\n'), ((4257, 4432), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""family, expected_status"""', "[('blazinginsect', 'running'), ('nicemice', 'completed'), ('lateraligator',\n 'failed'), ('escapedgoat', 'pending')]"], {}), "('family, expected_status', [('blazinginsect',\n 'running'), ('nicemice', 'completed'), ('lateraligator', 'failed'), (\n 'escapedgoat', 'pending')])\n", (4280, 4432), False, 'import pytest\n'), ((4992, 5394), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case_id, status"""', "[('blazinginsect', 'running'), ('crackpanda', 'failed'), ('daringpidgeon',\n 'error'), ('emptydinosaur', 'error'), ('escapedgoat', 'pending'), (\n 'fancymole', 'completed'), ('happycow', 'pending'), ('lateraligator',\n 'failed'), ('liberatedunicorn', 'error'), ('nicemice', 'completed'), (\n 'rarekitten', 'canceled'), ('trueferret', 'running')]"], {}), "('case_id, status', [('blazinginsect', 'running'), (\n 'crackpanda', 'failed'), ('daringpidgeon', 'error'), ('emptydinosaur',\n 'error'), ('escapedgoat', 'pending'), ('fancymole', 'completed'), (\n 'happycow', 'pending'), ('lateraligator', 'failed'), (\n 'liberatedunicorn', 'error'), ('nicemice', 'completed'), ('rarekitten',\n 'canceled'), ('trueferret', 'running')])\n", (5015, 5394), False, 'import pytest\n')] |
import os
# Formats and creates dictionary based on mercator results
mer_file = open("/path/to/mercator.results.txt", "r")
mer_file.readline()
mer_dict = {}
for i in mer_file:
if "pwa" in i.split("\t")[2]:
gene = i.split("\t")[2][1:-1].upper()
desc = [i.strip("\n").split("\t")[-2][1:-1].split("(original description: ")[1][:-1], i.strip("\n").split("\t")[-2][1:-1].split(" (original description:")[0], i.split("\t")[0][1:-1], i.split("\t")[1][1:-1]]
mer_dict[gene] = desc
mer_file.close()
#For annotating a single file
file_in = "/path/to/nbh.txt"
nbh_file = open(file_in, "r")
out = open(file_in.split(".txt")[0] + "_anno.txt", "w+")
for x in nbh_file:
gene2 = x.split("\t")[1].split("cds_")[1].split("_")[0]
out.write(x.split("\t")[0] + "\t" + x.split("\t")[1] + "\t" + x.strip("\n").split("\t")[-1]+ "\t" + mer_dict[gene2][0] + "\t" + mer_dict[gene2][2] + "\t" + mer_dict[gene2][3]+ "\t" + mer_dict[gene2][1] + "\n")
nbh_file.close()
out.close()
#For files multiple files stored in a directory.
curdir = "/path/to/dir/"
for i in os.listdir(curdir):
if "_nbh.txt" in i:
if "anno" not in i:
nbh_file = open(curdir + i, "r")
out = open(curdir + i.split(".txt")[0] + "_anno.txt", "w+")
for x in nbh_file:
gene2 = x.split("\t")[1].split("cds_")[1].split("_")[0]
out.write(x.split("\t")[0] + "\t" + x.split("\t")[1] + "\t" + x.strip("\n").split("\t")[-1]+ "\t" + mer_dict[gene2][0] + "\t" + mer_dict[gene2][2] + "\t" + mer_dict[gene2][3]+ "\t" + mer_dict[gene2][1] + "\n")
nbh_file.close()
out.close()
| [
"os.listdir"
] | [((1078, 1096), 'os.listdir', 'os.listdir', (['curdir'], {}), '(curdir)\n', (1088, 1096), False, 'import os\n')] |
import requests
import json
from xlwt import *
response = requests.get('https://api.github.com')
print(response.status_code)
if response:
print('Success!')
else:
print('An error has occurred.')
apiKey = '<KEY>'
url = 'https://api.github.com/user/repos'
response = requests.get(url, auth=('token',apiKey))
repoJSON = response.json()
print (response.status_code)
newFile = open("SMclRepos.json", "w")
json.dump(repoJSON, newFile, indent=4)
# url = https://github.com/SimonMcLain/datarepresentationstudent-aPrivateOne
# token = <KEY> | [
"json.dump",
"requests.get"
] | [((59, 97), 'requests.get', 'requests.get', (['"""https://api.github.com"""'], {}), "('https://api.github.com')\n", (71, 97), False, 'import requests\n'), ((275, 316), 'requests.get', 'requests.get', (['url'], {'auth': "('token', apiKey)"}), "(url, auth=('token', apiKey))\n", (287, 316), False, 'import requests\n'), ((412, 450), 'json.dump', 'json.dump', (['repoJSON', 'newFile'], {'indent': '(4)'}), '(repoJSON, newFile, indent=4)\n', (421, 450), False, 'import json\n')] |
from Relium import calcurate, parser
print(len(parser.parsefile(r"").TimingPoints))
| [
"Relium.parser.parsefile"
] | [((48, 68), 'Relium.parser.parsefile', 'parser.parsefile', (['""""""'], {}), "('')\n", (64, 68), False, 'from Relium import calcurate, parser\n')] |
#!/usr/local/bin/python3
import datetime
import more_itertools
def bits_iter(time=None):
fract = fraction_of_day(time)
while True:
fract *= 2
if fract >= 1:
yield True
fract -= 1
else:
yield False
def decode_bits(bits):
fract = 0.0
exp = 1
for bit in bits:
if bit:
fract += 1 / 2 ** exp
exp += 1
return (datetime.datetime(1970, 1, 1) + datetime.timedelta(days=fract)).time()
def decode_hex(hex_string):
for hex_digit in hex_string:
nybble = int(hex_digit, 16)
yield bool(nybble & 0b1000)
yield bool(nybble & 0b0100)
yield bool(nybble & 0b0010)
yield bool(nybble & 0b0001)
def fraction_of_day(time=None):
if time is None:
time = datetime.datetime.now().time()
return time.hour / 24 + time.minute / 1440 + time.second / 86400 + time.microsecond / 86400000000
def hex(bits):
for nybble in more_itertools.chunked(bits, 4):
while len(nybble) < 4:
nybble.append(False)
yield 8 * nybble[0] + 4 * nybble[1] + 2 * nybble[2] + 1 * nybble[3]
if __name__ == '__main__':
now = datetime.datetime.now()
print(''.join('{:X}'.format(nybble) for nybble in hex(more_itertools.take(12, bits_iter(now.time())))))
print('---')
print('{:%H:%M:%S}'.format(now.time()))
print('w{0[1]}.{0[2]}: {1:%Y-%m-%d}'.format(now.date().isocalendar(), now.date()))
| [
"datetime.datetime",
"datetime.datetime.now",
"datetime.timedelta",
"more_itertools.chunked"
] | [((968, 999), 'more_itertools.chunked', 'more_itertools.chunked', (['bits', '(4)'], {}), '(bits, 4)\n', (990, 999), False, 'import more_itertools\n'), ((1179, 1202), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1200, 1202), False, 'import datetime\n'), ((419, 448), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (436, 448), False, 'import datetime\n'), ((451, 481), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'fract'}), '(days=fract)\n', (469, 481), False, 'import datetime\n'), ((801, 824), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (822, 824), False, 'import datetime\n')] |
"""
Sync example of receiving Crownstone SSE events.
Created by <NAME>.
Last update on 06-05-2021.
"""
import logging
from crownstone_sse import CrownstoneSSE
from crownstone_sse.events import (
SwitchStateUpdateEvent,
SystemEvent,
PresenceEvent,
AbilityChangeEvent,
DataChangeEvent,
)
from crownstone_sse import (
EVENT_ABILITY_CHANGE,
EVENT_DATA_CHANGE,
EVENT_PRESENCE,
EVENT_PRESENCE_ENTER_LOCATION,
EVENT_SWITCH_STATE_UPDATE,
EVENT_SWITCH_STATE_UPDATE_CROWNSTONE,
EVENT_SYSTEM,
OPERATION_CREATE,
OPERATION_DELETE,
OPERATION_UPDATE,
)
# enable logging.
logging.basicConfig(format='%(levelname)s :%(message)s', level=logging.DEBUG)
def switch_update(event: SwitchStateUpdateEvent):
if event.sub_type == EVENT_SWITCH_STATE_UPDATE_CROWNSTONE:
print("Crownstone {} switch state changed to {}".format(event.cloud_id, event.switch_state))
def notify_stream_start(event: SystemEvent):
print(event.message)
def notify_presence_changed(event: PresenceEvent):
if event.sub_type == EVENT_PRESENCE_ENTER_LOCATION:
print("User {} has entered location {}".format(event.user_id, event.location_id))
def notify_ability_changed(event: AbilityChangeEvent):
print("Ability {} changed to {}".format(event.ability_type, event.ability_enabled))
def notify_data_changed(event: DataChangeEvent):
if event.operation == OPERATION_CREATE:
print("New data is available: {}".format(event.changed_item_name))
if event.operation == OPERATION_UPDATE:
print("Name of id {} has been updated to {}".format(event.changed_item_id, event.changed_item_name))
if event.operation == OPERATION_DELETE:
print("Data {} has been deleted".format(event.changed_item_name))
# Create a new instance of Crownstone SSE client.
# email (string): your Crownstone account email.
# password (string): your C<PASSWORD>stone account password.
# access_token (string) [optional]: Access token from a previous login to skip the login step.
# reconnection_time (int): time to wait before reconnection on connection loss.
sse_client = CrownstoneSSE(
email="<EMAIL>",
password="<PASSWORD>"
)
# Add listeners for event types of your liking, and the desired callback to be executed. see above.
sse_client.add_event_listener(EVENT_SYSTEM, notify_stream_start)
sse_client.add_event_listener(EVENT_SWITCH_STATE_UPDATE, switch_update)
sse_client.add_event_listener(EVENT_PRESENCE, notify_presence_changed)
sse_client.add_event_listener(EVENT_ABILITY_CHANGE, notify_ability_changed)
sse_client.add_event_listener(EVENT_DATA_CHANGE, notify_data_changed)
# Wait until the thread finishes.
# You can terminate the thread by using SIGINT (ctrl + c or stop button in IDE).
try:
sse_client.join()
except KeyboardInterrupt:
sse_client.stop()
| [
"logging.basicConfig",
"crownstone_sse.CrownstoneSSE"
] | [((618, 695), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s :%(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s :%(message)s', level=logging.DEBUG)\n", (637, 695), False, 'import logging\n'), ((2119, 2172), 'crownstone_sse.CrownstoneSSE', 'CrownstoneSSE', ([], {'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password='<PASSWORD>')\n", (2132, 2172), False, 'from crownstone_sse import CrownstoneSSE\n')] |
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
c = 342 # speed of sound
lx = 342/2 # length in meters
t = 2 # time in seconds
# TIME
Fs_t = 2000 # samples/second time is dependent of space
# SPACE
Fs_x = 2 # samples/meter
num_div_x = int(lx*Fs_x) # divisions of all the space
# Simulation steps in Time
num_div_t = int(Fs_t*t)
delta_t = t / num_div_t
t_axis = np.arange(0, t, delta_t)
# number of divisions in x axis
delta_x = lx / num_div_x
x_axis = np.arange(0, lx, delta_x)
# force signal
t_values = np.arange(0, num_div_t, 1)
x_values = np.arange(0, num_div_x, 1)
x_n = np.zeros([num_div_t, num_div_x])
k_x = 15
# x_n[:, 0] = np.cos((np.pi * k_x / num_div_x) * x_values)
k_t = 1 / ((2 * lx) / (k_x*c))
A = -100
# pos_x = int(num_div_x/2)
# pos_x = int(8*num_div_x/20)
pos_x = 0
x_n[:, pos_x] = A * np.sin((2*np.pi * k_t / Fs_t) * t_values)
# offset = 10
# x_n[:, pos_x] = A*gaussian(t_values, 38 + offset, 9) - A*gaussian(t_values, 74 + offset, 9)
# x_n[:, pos_x + 100] = gaussian(t_values, 5, 1) - gaussian(t_values, 10, 1)
# plt.figure()
# plt.imshow(x_n, cmap='hot')
plt.figure()
plt.plot(x_n[:, pos_x])
print("num_div_t %i " % num_div_t)
print("num_div_x %i " % num_div_x)
print("delta t: %f" % delta_t)
print("CFL Condition %f" % (delta_x/((3**0.5)*c)))
# Init Simulation time-stepping scheme----
p_n_minus1 = np.zeros(shape=[num_div_x, 1])
p_n = np.zeros(shape=[num_div_x, 1])
p_n_plus1 = np.zeros(shape=[num_div_x, 1])
k_matrix_global = np.zeros(shape=[num_div_x, num_div_x])
fdtd_kernel_6 = np.array([2, -27, 270, -490, 270, -27, 2])*(1/180)
# fdtd_kernel_6 = np.array([2, -27, 270, -490, 270, -27, 2])
# Creating K Laplace operator matrix
k_matrix_temp = np.zeros(shape=[num_div_x, num_div_x + 6])
for i in range(num_div_x):
k_matrix_temp[i, i:i+7] = fdtd_kernel_6.copy()
k_matrix_global = k_matrix_temp[:, 3:-3].copy()
# Rigid walls Nuemann boundary condition (partial_p/partial_x)=0 when x=0 and x=l_x
k_matrix_global[:, 0:3] = k_matrix_global[:, 0:3] + np.fliplr(k_matrix_temp[:, 0:3])
k_matrix_global[:, -3:num_div_x] = k_matrix_global[:, -3:num_div_x] + np.fliplr(k_matrix_temp[:, -3:num_div_x + 6])
# Two Partitions of equal size
k_matrix_local = k_matrix_global.copy()
p_1_k_matrix = k_matrix_local[0:int(num_div_x/2), :]
p_2_k_matrix = k_matrix_local[int(num_div_x/2):num_div_x, :]
# Both with boundaries conditions (partial_p/partial_x)=0 when x=0 and x=l_x
p_1_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)] = p_1_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)] \
+ np.fliplr(p_1_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3])
p_1_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3] = p_1_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3] \
- p_1_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3]
p_2_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3] = p_2_k_matrix[:, int(num_div_x/2):int(num_div_x/2)+3] \
+ np.fliplr(p_2_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)])
p_2_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)] = p_2_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)] \
- p_2_k_matrix[:, int(num_div_x/2)-3:int(num_div_x/2)]
# Laplace operator Residual = global - local
k_matrix_res = k_matrix_global - k_matrix_local
k_mini_matrix_res = k_matrix_res[int(num_div_x/2)-3:int(num_div_x/2)+3,int(num_div_x/2)-3:int(num_div_x/2)+3]
# Terms
lambda_2 = (c * delta_t / delta_x) ** 2
k_matrix_local = k_matrix_local*lambda_2
k_matrix_res = k_matrix_res*lambda_2
k_mini_matrix_res = k_mini_matrix_res*lambda_2
# Force init update
# f = (x_n[1, :].reshape([num_div_x, 1])).copy() + k_matrix_res.dot(p_n)
p_n_mini = p_n[int(num_div_x/2)-3:int(num_div_x/2)+3, :]
f = (delta_t ** 2) * (x_n[1, :].reshape([num_div_x, 1])).copy()
f[int(num_div_x/2)-3:int(num_div_x/2)+3, :] = f[int(num_div_x/2)-3:int(num_div_x/2)+3, :] + k_mini_matrix_res.dot(p_n_mini)
f_bound_list = []
plt.figure()
for time_step in range(2, int(num_div_t/10)):
# Local update
p_n_plus1 = 2 * p_n - p_n_minus1 + (k_matrix_local.dot(p_n)) + f
# Update Force
# f = (delta_t ** 2) * ((x_n[time_step, :].reshape([num_div_x, 1])).copy()) + k_matrix_res.dot(p_n_plus1)
p_n_mini = p_n_plus1[int(num_div_x / 2) - 3:int(num_div_x / 2) + 3, :]
f = (delta_t ** 2) * (x_n[time_step, :].reshape([num_div_x, 1])).copy()
f[int(num_div_x / 2) - 3:int(num_div_x / 2) + 3, :] = f[int(num_div_x / 2) - 3:int(num_div_x / 2) + 3,
:] + k_mini_matrix_res.dot(p_n_mini)
f_boundaries = f[int(num_div_x / 2) - 3:int(num_div_x / 2) + 3, :]
# f_bound_list.append(f_boundaries.copy())
# Update last temporal terms
p_n_minus1 = p_n.copy()
p_n = p_n_plus1.copy()
# Plot
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(p_n_plus1)
plt.axis([0, num_div_x, -0.005, +0.005])
plt.subplot(2, 1, 2)
plt.plot([0, 1, 2, 3, 4, 5], f_boundaries.reshape([6]), 'o', color='r')
plt.axis([0, 5, -1.3873669866559941e-05, 1.3873669866559924e-05])
plt.pause(0.00001)
| [
"numpy.power",
"numpy.fliplr",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplot",
"numpy.arange"
] | [((486, 510), 'numpy.arange', 'np.arange', (['(0)', 't', 'delta_t'], {}), '(0, t, delta_t)\n', (495, 510), True, 'import numpy as np\n'), ((580, 605), 'numpy.arange', 'np.arange', (['(0)', 'lx', 'delta_x'], {}), '(0, lx, delta_x)\n', (589, 605), True, 'import numpy as np\n'), ((635, 661), 'numpy.arange', 'np.arange', (['(0)', 'num_div_t', '(1)'], {}), '(0, num_div_t, 1)\n', (644, 661), True, 'import numpy as np\n'), ((673, 699), 'numpy.arange', 'np.arange', (['(0)', 'num_div_x', '(1)'], {}), '(0, num_div_x, 1)\n', (682, 699), True, 'import numpy as np\n'), ((707, 739), 'numpy.zeros', 'np.zeros', (['[num_div_t, num_div_x]'], {}), '([num_div_t, num_div_x])\n', (715, 739), True, 'import numpy as np\n'), ((1212, 1224), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1222, 1224), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1248), 'matplotlib.pyplot.plot', 'plt.plot', (['x_n[:, pos_x]'], {}), '(x_n[:, pos_x])\n', (1233, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1494), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_div_x, 1]'}), '(shape=[num_div_x, 1])\n', (1472, 1494), True, 'import numpy as np\n'), ((1501, 1531), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_div_x, 1]'}), '(shape=[num_div_x, 1])\n', (1509, 1531), True, 'import numpy as np\n'), ((1544, 1574), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_div_x, 1]'}), '(shape=[num_div_x, 1])\n', (1552, 1574), True, 'import numpy as np\n'), ((1594, 1632), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_div_x, num_div_x]'}), '(shape=[num_div_x, num_div_x])\n', (1602, 1632), True, 'import numpy as np\n'), ((1816, 1858), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_div_x, num_div_x + 6]'}), '(shape=[num_div_x, num_div_x + 6])\n', (1824, 1858), True, 'import numpy as np\n'), ((4185, 4197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4195, 4197), True, 'import matplotlib.pyplot as plt\n'), ((938, 979), 'numpy.sin', 'np.sin', (['(2 * np.pi * k_t / Fs_t * t_values)'], {}), '(2 * np.pi * k_t / Fs_t * t_values)\n', (944, 979), True, 'import numpy as np\n'), ((1650, 1692), 'numpy.array', 'np.array', (['[2, -27, 270, -490, 270, -27, 2]'], {}), '([2, -27, 270, -490, 270, -27, 2])\n', (1658, 1692), True, 'import numpy as np\n'), ((2124, 2156), 'numpy.fliplr', 'np.fliplr', (['k_matrix_temp[:, 0:3]'], {}), '(k_matrix_temp[:, 0:3])\n', (2133, 2156), True, 'import numpy as np\n'), ((2227, 2272), 'numpy.fliplr', 'np.fliplr', (['k_matrix_temp[:, -3:num_div_x + 6]'], {}), '(k_matrix_temp[:, -3:num_div_x + 6])\n', (2236, 2272), True, 'import numpy as np\n'), ((5044, 5053), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5051, 5053), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5079), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5070, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5103), 'matplotlib.pyplot.plot', 'plt.plot', (['p_n_plus1'], {}), '(p_n_plus1)\n', (5092, 5103), True, 'import matplotlib.pyplot as plt\n'), ((5108, 5148), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, num_div_x, -0.005, +0.005]'], {}), '([0, num_div_x, -0.005, +0.005])\n', (5116, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5154, 5174), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5165, 5174), True, 'import matplotlib.pyplot as plt\n'), ((5255, 5320), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 5, -1.3873669866559941e-05, 1.3873669866559924e-05]'], {}), '([0, 5, -1.3873669866559941e-05, 1.3873669866559924e-05])\n', (5263, 5320), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5342), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-05)'], {}), '(1e-05)\n', (5335, 5342), True, 'import matplotlib.pyplot as plt\n'), ((99, 120), 'numpy.power', 'np.power', (['(x - mu)', '(2.0)'], {}), '(x - mu, 2.0)\n', (107, 120), True, 'import numpy as np\n'), ((127, 145), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (135, 145), True, 'import numpy as np\n')] |
#!/usr/bin/python
import argparse
import os
import csv
import json
from pathlib import Path
import subprocess
import yaml
import re
import allantools
import numpy as np
import matplotlib.pyplot
import math
parser = argparse.ArgumentParser()
parser.add_argument("folder", help="Folder containing JSONL and video file")
parser.add_argument("-output", help="Output folder, if not current directory")
parser.add_argument("-nthframes", help="Every Nth frame, default 4", default=4)
parser.add_argument("-stationary", help="Process data as stationary calibration set, skips first&last 15s", action="store_true")
args = parser.parse_args()
SECONDS_TO_NS = 1000 * 1000 * 1000
# kalibr_bagcreater doesn't support timestamps smaller than 1 second, add an offset
TIME_OFFSET_NS = 1 * SECONDS_TO_NS
def log10(x):
return math.log(x, 10)
def getRandomWalkSegment(tau, sigma):
M = -0.5 # slope of random walk
i = 1
idx = 1
mindiff = 999
logTau = -999
while (logTau < 0):
logTau = log10(tau[i])
slope = (log10(sigma[i]) - log10(sigma[i - 1])) / (logTau - log10(tau[i - 1]))
diff = abs(slope - M)
if (diff < mindiff):
mindiff = diff
idx = i
i = i + 1
x1 = log10(tau[idx])
y1 = log10(sigma[idx])
x2 = 0
y2 = M * (x2 - x1) + y1
return (pow(10, x1), pow(10, y1), pow(10, x2), pow(10, y2))
def getBiasInstabilityPoint(tau, sigma):
i = 1
while (i < tau.size):
if (tau[i] > 1) and ((sigma[i] - sigma[i - 1]) > 0): # only check for tau > 10^0
break
i = i + 1
return (tau[i], sigma[i])
# Allan variance computed as described in https://github.com/GAVLab/allan_variance/blob/master/scripts/allan.py
def computeNoiseRandomWalk(imu, outputFolder):
firstTimeStamp = imu[0][0] / SECONDS_TO_NS
lastTimeStamp = imu[len(imu) - 1][0] / SECONDS_TO_NS
sampleRate = len(imu) / (lastTimeStamp - firstTimeStamp)
print("Computed sample rate: {}".format(sampleRate))
isDeltaType = False
numTau = 1000 # number of lags
# Form Tau Array
taus = [None]*numTau
cnt = 0
for i in np.linspace(-2.0, 5.0, num=numTau): # lags will span from 10^-2 to 10^5, log spaced
taus[cnt] = pow(10, i)
cnt = cnt + 1
N = len(imu) # number of measurement samples
data = np.zeros( (6, N) ) # preallocate vector of measurements
if isDeltaType:
scale = sampleRate
else:
scale = 1.0
cnt = 0
for imuSample in imu:
data[0,cnt] = imuSample[4] * scale
data[1,cnt] = imuSample[5] * scale
data[2,cnt] = imuSample[6] * scale
data[3,cnt] = imuSample[1] * scale
data[4,cnt] = imuSample[2] * scale
data[5,cnt] = imuSample[3] * scale
cnt = cnt + 1
# Allan Variance
results = []
figure, subplots = matplotlib.pyplot.subplots(2, 3, figsize=(10,10))
subplots = np.ravel(subplots)
for index in range(6):
(taus_used, adev, adev_err, adev_n) = allantools.oadev(data[index], data_type='freq', rate=float(sampleRate), taus=np.array(taus))
randomWalkSegment = getRandomWalkSegment(taus_used,adev)
biasInstabilityPoint = getBiasInstabilityPoint(taus_used,adev)
randomWalk = randomWalkSegment[3]
biasInstability = biasInstabilityPoint[1]
if (index == 0):
name = 'accelerometer_x'
elif (index == 1):
name = 'accelerometer_y'
elif (index == 2):
name = 'accelerometer_z'
elif (index == 3):
name = 'gyroscope_x'
elif (index == 4):
name = 'gyroscope_y'
elif (index == 5):
name = 'gyroscope_z'
with open(outputFolder + "/summary.txt", 'a') as f:
summary = "{}, randomWalk: {}, biasInstability: {}".format(name, randomWalk, biasInstability)
f.write(summary + "\n")
print(summary)
results.append([randomWalk, biasInstability])
# Plot Result
plt = subplots[index]
plt.set_yscale('log')
plt.set_xscale('log')
plt.plot(taus_used,adev)
plt.plot([randomWalkSegment[0], randomWalkSegment[2]],
[randomWalkSegment[1], randomWalkSegment[3]], 'k--')
plt.plot(1, randomWalk, 'rx', markeredgewidth=2.5, markersize=14.0)
plt.plot(biasInstabilityPoint[0], biasInstabilityPoint[1], 'ro')
plt.grid(True, which="both")
plt.title.set_text(name)
plt.set_xlabel('Tau (s)')
plt.set_ylabel('ADEV')
figure.savefig(outputFolder + "/plots.png")
with open(outputFolder + "/imu.yaml", 'wt') as f:
acc_random_walk = (results[0][0] + results[1][0] + results[2][0]) / 3
acc_bias = (results[0][1] + results[1][1] + results[2][1]) / 3
gyro_random_walk = (results[3][0] + results[4][0] + results[5][0]) / 3
gyro_bias = (results[3][1] + results[4][1] + results[5][1]) / 3
f.write("accelerometer_noise_density: {}\n".format(acc_random_walk))
f.write("accelerometer_random_walk: {}\n".format(acc_bias))
f.write("gyroscope_noise_density: {}\n".format(gyro_random_walk))
f.write("gyroscope_random_walk: {}\n".format(gyro_bias))
f.write("rostopic: {}\n".format("/imu0"))
f.write("update_rate: {}\n".format(sampleRate))
def getNanoseconds(seconds):
return int(seconds * SECONDS_TO_NS + TIME_OFFSET_NS)
def getVideoFile(folder, name):
for f in os.listdir(folder):
if re.search("{}\\.[avi|mp4|mov]".format(name), f):
return f
# Export given frame numbers from video into PNG files and rename them to timestamps
def exportFrames(videoFile, outputFolder, nthframes, timestamps):
os.makedirs(outputFolder, exist_ok=True)
cmd = "ffmpeg -i {} -vf select='not(mod(n\\,{}))' -vsync 0 {}/frame_%05d.png" \
.format(videoFile, nthframes, outputFolder)
subprocess.run(cmd, shell=True)
files = [f for f in os.listdir(outputFolder)]
anyExtra = False
for f in sorted(files):
index = int(f.split("_")[1].split(".")[0])
index = (index - 1) * nthframes
fpath = os.path.join(outputFolder, f)
if index in timestamps:
newFilename = str(timestamps[index]) + ".png"
os.rename(fpath, os.path.join(outputFolder, newFilename))
assert(not anyExtra) # extra frames at the end of the recording are OK
else:
anyExtra = True
print('WARNING: extra frame removed %s' % fpath)
os.remove(fpath)
# Read acc+gyro and frame timestamps, convert time to nanoseconds
def readJsonl(folder):
gyro = []
acc = []
frames = {}
with open(folder + "/data.jsonl") as f:
for line in f.readlines():
try:
entry = json.loads(line)
except:
print("Ignoring bad JSONL line:", line)
continue
if entry.get("sensor"):
values = entry["sensor"]["values"]
arr = [entry["time"], values[0], values[1], values[2]]
if entry["sensor"]["type"] == "gyroscope":
gyro.append(arr)
if entry["sensor"]["type"] == "accelerometer":
acc.append(arr)
elif entry.get("frames"):
if "number" in entry:
number = entry["number"]
else:
number = entry["frames"][0]["number"]
frames[number] = getNanoseconds(entry["time"])
# fix dropped frames
mapping = {}
for num in sorted(frames.keys()):
if num not in mapping:
mapping[num] = len(mapping)
mapped = { mapping[num]: frames[num] for num in frames.keys() }
frames = mapped
accStartIndex = 0
synced = []
for gyroSample in gyro:
closestAccSample = acc[0]
i = accStartIndex
while i < len(acc):
accSample = acc[i]
if abs(closestAccSample[0] - gyroSample[0]) > abs(accSample[0] - gyroSample[0]):
closestAccSample = accSample
accStartIndex = i # Always start finding match for next sample where we left off
if closestAccSample[0] > gyroSample[0]:
break
i += 1
try:
synced.append([getNanoseconds(gyroSample[0]), gyroSample[1], gyroSample[2], gyroSample[3], closestAccSample[1], closestAccSample[2], closestAccSample[3]])
except:
print("Failed {}".format(gyroSample[0]))
return synced, frames # synced gyro + nearest acc, frmae timestamps
def main(args):
imuData, frameTimestamps = readJsonl(args.folder)
outputFolder = args.output if args.output else "."
os.makedirs(outputFolder, exist_ok=True)
if args.stationary:
# For stationary calibration remove first and last 15 seconds when device was probably disturbed
firstTimestamp = imuData[0][0] + 15 * SECONDS_TO_NS
lastTimestamp = imuData[len(imuData) - 1][0] - 15 * SECONDS_TO_NS
clipped = []
for imuSample in imuData:
if imuSample[0] > firstTimestamp and imuSample[0] < lastTimestamp:
clipped.append(imuSample)
computeNoiseRandomWalk(clipped, outputFolder)
return
with open(outputFolder + "/imu0.csv", "w") as csvfile:
csvfile.write("timestamp,omega_x,omega_y,omega_z,alpha_x,alpha_y,alpha_z\n")
for imuSample in imuData:
csvfile.write(",".join([str(x) for x in imuSample]) + "\n")
video0 = getVideoFile(args.folder, "data")
exportFrames(args.folder + "/" + video0, outputFolder + "/cam0", args.nthframes, frameTimestamps)
video1 = getVideoFile(args.folder, "data2")
if video1:
exportFrames(args.folder + "/" + video1, outputFolder + "/cam1", args.nthframes, frameTimestamps)
if __name__ == "__main__":
main(args)
| [
"json.loads",
"os.listdir",
"argparse.ArgumentParser",
"os.makedirs",
"subprocess.run",
"os.path.join",
"math.log",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.ravel",
"os.remove"
] | [((217, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (240, 242), False, 'import argparse\n'), ((818, 833), 'math.log', 'math.log', (['x', '(10)'], {}), '(x, 10)\n', (826, 833), False, 'import math\n'), ((2139, 2173), 'numpy.linspace', 'np.linspace', (['(-2.0)', '(5.0)'], {'num': 'numTau'}), '(-2.0, 5.0, num=numTau)\n', (2150, 2173), True, 'import numpy as np\n'), ((2337, 2353), 'numpy.zeros', 'np.zeros', (['(6, N)'], {}), '((6, N))\n', (2345, 2353), True, 'import numpy as np\n'), ((2916, 2934), 'numpy.ravel', 'np.ravel', (['subplots'], {}), '(subplots)\n', (2924, 2934), True, 'import numpy as np\n'), ((5481, 5499), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (5491, 5499), False, 'import os\n'), ((5739, 5779), 'os.makedirs', 'os.makedirs', (['outputFolder'], {'exist_ok': '(True)'}), '(outputFolder, exist_ok=True)\n', (5750, 5779), False, 'import os\n'), ((5920, 5951), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (5934, 5951), False, 'import subprocess\n'), ((8760, 8800), 'os.makedirs', 'os.makedirs', (['outputFolder'], {'exist_ok': '(True)'}), '(outputFolder, exist_ok=True)\n', (8771, 8800), False, 'import os\n'), ((6158, 6187), 'os.path.join', 'os.path.join', (['outputFolder', 'f'], {}), '(outputFolder, f)\n', (6170, 6187), False, 'import os\n'), ((5976, 6000), 'os.listdir', 'os.listdir', (['outputFolder'], {}), '(outputFolder)\n', (5986, 6000), False, 'import os\n'), ((6546, 6562), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (6555, 6562), False, 'import os\n'), ((3085, 3099), 'numpy.array', 'np.array', (['taus'], {}), '(taus)\n', (3093, 3099), True, 'import numpy as np\n'), ((6307, 6346), 'os.path.join', 'os.path.join', (['outputFolder', 'newFilename'], {}), '(outputFolder, newFilename)\n', (6319, 6346), False, 'import os\n'), ((6817, 6833), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6827, 6833), False, 'import json\n')] |
# -*- coding: utf-8 -*-
import re
class DocumentoIdentificacao(object):
"""docstring for DocumentoIdentificacao"""
__valor = str()
def __new__(cls, *args, **kwargs):
if cls == DocumentoIdentificacao:
raise Exception('Esta classe não pode ser instanciada diretamente!')
else:
#return super(DocumentoIdentificacao, cls).__new__(cls, *args, **kwargs)
return super().__new__(cls)
def __init__(self, arg):
self.__valor = self.__sieve(arg)
def __repr__(self):
return "<{0}.{1}({2!r})>".format(self.__class__.__module__, self.__class__.__name__, self.rawValue)
def __str__(self):
pass
@property
def rawValue(self):
return self.__valor
def isValid(self):
return false
def __sieve(self, input):
"""
Filters out CNPJ formatting symbols. Symbols that are not used in the CNPJ formatting are left
unfiltered on purpose so that if fails other tests, because their presence indicate that the
input was somehow corrupted.
"""
p = re.compile('[ ./-]')
return p.sub('', str(input)) if input != None else None
| [
"re.compile"
] | [((970, 990), 're.compile', 're.compile', (['"""[ ./-]"""'], {}), "('[ ./-]')\n", (980, 990), False, 'import re\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-14 23:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ballot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submission_date', models.DateTimeField(blank=True, null=True)),
('poll_type', models.CharField(blank=True, max_length=10, null=True)),
('overall_rationale', models.TextField()),
],
),
migrations.CreateModel(
name='BallotEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rank', models.IntegerField()),
('rationale', models.TextField()),
('ballot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.Ballot')),
],
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('week', models.CharField(max_length=20)),
('open_date', models.DateTimeField()),
('close_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('handle', models.CharField(max_length=40)),
('name', models.CharField(max_length=80)),
('flair', models.FilePathField(path='E:\\workspace\\rcfbpoll\\rcfbpoll\\staticfiles\\images/full60')),
('logo', models.FilePathField(path='E:\\workspace\\rcfbpoll\\rcfbpoll\\staticfiles\\images/fullorig')),
('header', models.FilePathField(path='E:\\workspace\\rcfbpoll\\rcfbpoll\\staticfiles\\images/header240')),
('conference', models.CharField(max_length=10)),
('division', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32)),
('primary_affiliation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.Team')),
],
),
migrations.CreateModel(
name='UserSecondaryAffiliations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.Team')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.User')),
],
),
migrations.AddField(
model_name='ballotentry',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.Team'),
),
migrations.AddField(
model_name='ballot',
name='poll',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.Poll'),
),
migrations.AddField(
model_name='ballot',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.User'),
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.FilePathField",
"django.db.models.CharField"
] | [((3412, 3490), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.Team"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.Team')\n", (3429, 3490), False, 'from django.db import migrations, models\n'), ((3608, 3686), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.Poll"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.Poll')\n", (3625, 3686), False, 'from django.db import migrations, models\n'), ((3804, 3882), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.User"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.User')\n", (3821, 3882), False, 'from django.db import migrations, models\n'), ((399, 492), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (415, 492), False, 'from django.db import migrations, models\n'), ((527, 570), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (547, 570), False, 'from django.db import migrations, models\n'), ((603, 657), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (619, 657), False, 'from django.db import migrations, models\n'), ((698, 716), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (714, 716), False, 'from django.db import migrations, models\n'), ((853, 946), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (869, 946), False, 'from django.db import migrations, models\n'), ((970, 991), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (989, 991), False, 'from django.db import migrations, models\n'), ((1024, 1042), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1040, 1042), False, 'from django.db import migrations, models\n'), ((1072, 1157), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.Ballot"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.Ballot'\n )\n", (1089, 1157), False, 'from django.db import migrations, models\n'), ((1282, 1375), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1298, 1375), False, 'from django.db import migrations, models\n'), ((1399, 1420), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1418, 1420), False, 'from django.db import migrations, models\n'), ((1448, 1479), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1464, 1479), False, 'from django.db import migrations, models\n'), ((1512, 1534), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1532, 1534), False, 'from django.db import migrations, models\n'), ((1568, 1590), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1588, 1590), False, 'from django.db import migrations, models\n'), ((1720, 1813), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1736, 1813), False, 'from django.db import migrations, models\n'), ((1839, 1870), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1855, 1870), False, 'from django.db import migrations, models\n'), ((1898, 1929), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (1914, 1929), False, 'from django.db import migrations, models\n'), ((1958, 2053), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': '"""E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/full60"""'}), "(path=\n 'E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/full60')\n", (1978, 2053), False, 'from django.db import migrations, models\n'), ((2076, 2173), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': '"""E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/fullorig"""'}), "(path=\n 'E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/fullorig')\n", (2096, 2173), False, 'from django.db import migrations, models\n'), ((2198, 2296), 'django.db.models.FilePathField', 'models.FilePathField', ([], {'path': '"""E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/header240"""'}), "(path=\n 'E:\\\\workspace\\\\rcfbpoll\\\\rcfbpoll\\\\staticfiles\\\\images/header240')\n", (2218, 2296), False, 'from django.db import migrations, models\n'), ((2325, 2356), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (2341, 2356), False, 'from django.db import migrations, models\n'), ((2388, 2418), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)'}), '(max_length=4)\n', (2404, 2418), False, 'from django.db import migrations, models\n'), ((2548, 2641), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2564, 2641), False, 'from django.db import migrations, models\n'), ((2669, 2700), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (2685, 2700), False, 'from django.db import migrations, models\n'), ((2743, 2821), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.Team"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.Team')\n", (2760, 2821), False, 'from django.db import migrations, models\n'), ((2972, 3065), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2988, 3065), False, 'from django.db import migrations, models\n'), ((3089, 3167), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.Team"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.Team')\n", (3106, 3167), False, 'from django.db import migrations, models\n'), ((3195, 3273), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""poll.User"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='poll.User')\n", (3212, 3273), False, 'from django.db import migrations, models\n')] |
import os
from clisops import CONFIG
def test_local_config_loads():
assert "clisops:read" in CONFIG
assert "file_size_limit" in CONFIG["clisops:write"]
def test_dask_env_variables():
assert os.getenv("MKL_NUM_THREADS") == "1"
assert os.getenv("OPENBLAS_NUM_THREADS") == "1"
assert os.getenv("OMP_NUM_THREADS") == "1"
| [
"os.getenv"
] | [((207, 235), 'os.getenv', 'os.getenv', (['"""MKL_NUM_THREADS"""'], {}), "('MKL_NUM_THREADS')\n", (216, 235), False, 'import os\n'), ((254, 287), 'os.getenv', 'os.getenv', (['"""OPENBLAS_NUM_THREADS"""'], {}), "('OPENBLAS_NUM_THREADS')\n", (263, 287), False, 'import os\n'), ((306, 334), 'os.getenv', 'os.getenv', (['"""OMP_NUM_THREADS"""'], {}), "('OMP_NUM_THREADS')\n", (315, 334), False, 'import os\n')] |
import torch
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--plm', type=str, default='bert-base-cased')
parser.add_argument('--device', type=str, default='cuda:0')
args = parser.parse_args()
device = args.device
from datasets import load_dataset, concatenate_datasets
data_set = load_dataset("nlu_evaluation_data",split='train')
data_set = data_set.shuffle(seed=42)
all_labels = data_set.features['label'].names
all_scens = list(map(lambda x: x.split('_')[0], all_labels))
scenarios = set(all_scens)
scenarios = set(filter(lambda x : all_scens.count(x) > 1, scenarios))
# scenarios = {'alarm', 'audio'}
metadata = {}
raw_datasets = {}
for scenario in scenarios:
data_set_scenario = data_set.filter(lambda ex: ex['scenario'] == scenario)
classes = list(filter(lambda x: x.split('_')[0] == scenario, all_labels))
min_label = list(map(lambda x: x.split('_')[0] == scenario, all_labels)).index(True)
def change_labels(ex):
ex['label'] = ex['label'] - min_label
return ex
data_set_scenario = data_set_scenario.map(change_labels)
data_set_scenario = data_set_scenario.rename_column("label", "labels")
metadata[scenario] = { 'classes': classes }
dataset_train_all=[]
dataset_valid_all=[]
dataset_test_all=[]
for label in classes:
data_set_scenario_label = data_set_scenario.filter(lambda ex: all_labels[min_label + ex['labels']] == label)
trainvalid_test_dataset = data_set_scenario_label.train_test_split(test_size=0.3)
train_valid_dataset =trainvalid_test_dataset["train"].train_test_split(test_size=0.3)
dataset_train_all.append(train_valid_dataset["train"])
dataset_test_all.append(trainvalid_test_dataset["test"])
dataset_valid_all.append(train_valid_dataset["test"])
dataset_train_all = concatenate_datasets(dataset_train_all)
dataset_valid_all = concatenate_datasets(dataset_valid_all)
dataset_test_all = concatenate_datasets(dataset_test_all)
raw_datasets[scenario] = {'train': dataset_train_all, 'valid': dataset_valid_all, 'test': dataset_test_all }
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained(args.plm)
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", max_length=64, truncation=True, return_tensors="pt")
import numpy as np
from datasets import load_metric
from transformers import AdamW
from transformers import get_scheduler
from tqdm.auto import tqdm
metric = load_metric("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
def evaluate(dataloader):
metric = load_metric("accuracy")
model.eval()
for batch in dataloader:
gbatch = {}
for k, v in batch.items():
if k in ["input_ids", "attention_mask"]:
gbatch[k] = v[0].to(device)
elif k == "labels":
gbatch[k] = v.to(device)
with torch.no_grad():
outputs = model(**gbatch)
logits = outputs.logits
predictions = torch.argmax(logits, dim=-1)
metric.add_batch(predictions=predictions, references=batch["labels"])
return metric.compute()
valid_accs = []
test_accs = []
num_params = 0
for scenario in metadata.keys():
tokenized_dataset = {}
for split in ['train', 'valid', 'test']:
data_set = raw_datasets[scenario][split].map(tokenize_function)
# data_set = data_set.rename_column("label", "labels")
data_set.set_format('torch', columns=["input_ids", "attention_mask", "labels"])
tokenized_dataset[split] = data_set
model = AutoModelForSequenceClassification.from_pretrained(args.plm, num_labels=len(metadata[scenario]["classes"])).to(device)
optimizer = AdamW(model.parameters(), lr=2e-5)
num_epochs = 20
batch_size = 32
num_training_steps = num_epochs * int( len(tokenized_dataset["train"]) / batch_size )
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
train_dataloader = torch.utils.data.DataLoader(tokenized_dataset["train"], batch_size=batch_size)
eval_dataloader = torch.utils.data.DataLoader(tokenized_dataset["valid"], batch_size=batch_size)
test_dataloader = torch.utils.data.DataLoader(tokenized_dataset["test"], batch_size=batch_size)
num_params += sum([p.numel() for p in model.parameters() if p.requires_grad])
progress_bar = tqdm(range(num_training_steps))
best_yet = 0.0
model.train()
for epoch in range(num_epochs):
for batch in train_dataloader:
gbatch = {}
for k, v in batch.items():
if k in ["input_ids", "attention_mask"]:
gbatch[k] = v[0].to(device)
elif k == "labels":
gbatch[k] = v.to(device)
outputs = model(**gbatch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
val_acc = evaluate(eval_dataloader)['accuracy']
# print(f"scenario {scenario} epoch {epoch} val acc {val_acc}")
if val_acc > best_yet:
test_acc = evaluate(test_dataloader)['accuracy']
# print(f"scenario {scenario} test acc: {test_acc}")
valid_accs.append(val_acc)
test_accs.append(test_acc)
valid_accs = np.array(valid_accs)
test_accs = np.array(test_accs)
print(f"Number of parameters {num_params}")
val_acc_mean, val_acc_std = np.mean(valid_accs), np.std(valid_accs)
test_acc_mean, test_acc_std = np.mean(test_accs), np.std(test_accs)
print(f"Mean/std accuracies for valid data is {val_acc_mean}/{val_acc_std}")
print(f"Mean/std accuracies for test data is {test_acc_mean}/{test_acc_std}")
| [
"numpy.mean",
"datasets.load_metric",
"argparse.ArgumentParser",
"transformers.get_scheduler",
"numpy.std",
"numpy.argmax",
"numpy.array",
"datasets.load_dataset",
"transformers.AutoTokenizer.from_pretrained",
"torch.utils.data.DataLoader",
"torch.no_grad",
"datasets.concatenate_datasets",
"torch.argmax"
] | [((39, 64), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (62, 64), False, 'import argparse\n'), ((310, 360), 'datasets.load_dataset', 'load_dataset', (['"""nlu_evaluation_data"""'], {'split': '"""train"""'}), "('nlu_evaluation_data', split='train')\n", (322, 360), False, 'from datasets import load_dataset, concatenate_datasets\n'), ((2228, 2267), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.plm'], {}), '(args.plm)\n', (2257, 2267), False, 'from transformers import AutoTokenizer, AutoModelForSequenceClassification\n'), ((2575, 2598), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (2586, 2598), False, 'from datasets import load_metric\n'), ((5688, 5708), 'numpy.array', 'np.array', (['valid_accs'], {}), '(valid_accs)\n', (5696, 5708), True, 'import numpy as np\n'), ((5721, 5740), 'numpy.array', 'np.array', (['test_accs'], {}), '(test_accs)\n', (5729, 5740), True, 'import numpy as np\n'), ((1858, 1897), 'datasets.concatenate_datasets', 'concatenate_datasets', (['dataset_train_all'], {}), '(dataset_train_all)\n', (1878, 1897), False, 'from datasets import load_dataset, concatenate_datasets\n'), ((1922, 1961), 'datasets.concatenate_datasets', 'concatenate_datasets', (['dataset_valid_all'], {}), '(dataset_valid_all)\n', (1942, 1961), False, 'from datasets import load_dataset, concatenate_datasets\n'), ((1985, 2023), 'datasets.concatenate_datasets', 'concatenate_datasets', (['dataset_test_all'], {}), '(dataset_test_all)\n', (2005, 2023), False, 'from datasets import load_dataset, concatenate_datasets\n'), ((2682, 2708), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (2691, 2708), True, 'import numpy as np\n'), ((2821, 2844), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (2832, 2844), False, 'from datasets import load_metric\n'), ((4138, 4245), 'transformers.get_scheduler', 'get_scheduler', (['"""linear"""'], {'optimizer': 'optimizer', 'num_warmup_steps': '(0)', 'num_training_steps': 'num_training_steps'}), "('linear', optimizer=optimizer, num_warmup_steps=0,\n num_training_steps=num_training_steps)\n", (4151, 4245), False, 'from transformers import get_scheduler\n'), ((4304, 4382), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["tokenized_dataset['train']"], {'batch_size': 'batch_size'}), "(tokenized_dataset['train'], batch_size=batch_size)\n", (4331, 4382), False, 'import torch\n'), ((4405, 4483), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["tokenized_dataset['valid']"], {'batch_size': 'batch_size'}), "(tokenized_dataset['valid'], batch_size=batch_size)\n", (4432, 4483), False, 'import torch\n'), ((4506, 4583), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["tokenized_dataset['test']"], {'batch_size': 'batch_size'}), "(tokenized_dataset['test'], batch_size=batch_size)\n", (4533, 4583), False, 'import torch\n'), ((5815, 5834), 'numpy.mean', 'np.mean', (['valid_accs'], {}), '(valid_accs)\n', (5822, 5834), True, 'import numpy as np\n'), ((5836, 5854), 'numpy.std', 'np.std', (['valid_accs'], {}), '(valid_accs)\n', (5842, 5854), True, 'import numpy as np\n'), ((5885, 5903), 'numpy.mean', 'np.mean', (['test_accs'], {}), '(test_accs)\n', (5892, 5903), True, 'import numpy as np\n'), ((5905, 5922), 'numpy.std', 'np.std', (['test_accs'], {}), '(test_accs)\n', (5911, 5922), True, 'import numpy as np\n'), ((3250, 3278), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3262, 3278), False, 'import torch\n'), ((3140, 3155), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3153, 3155), False, 'import torch\n')] |
import base64
import hashlib
import hmac
import json
import logging
import re
import requests
import threading
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import HttpResponse
from django.utils.encoding import force_bytes, force_str
from django.views.decorators.csrf import csrf_exempt
from main.helpers import get_message_history, message_for_ts, send_state
logger = logging.getLogger(__name__)
### helpers ###
def verify_slack_request(request):
""" Raise SuspiciousOperation if request was not signed by Slack. """
basestring = b":".join([
b"v0",
force_bytes(request.META.get("HTTP_X_SLACK_REQUEST_TIMESTAMP", b"")),
request.body
])
expected_signature = 'v0=' + hmac.new(settings.SLACK['signing_secret'], basestring, hashlib.sha256).hexdigest()
if not hmac.compare_digest(expected_signature, force_str(request.META.get("HTTP_X_SLACK_SIGNATURE", ""))):
raise SuspiciousOperation("Slack signature verification failed")
colors = ['black', 'red', 'orange', 'yellow', 'green', 'blue', 'purple']
def handle_reactions(message, is_most_recent):
old_color = message['color']
new_color = None
for reaction in message['reactions']:
if 'night' in reaction:
new_color = "#000"
else:
new_color = next((
color
for color in colors
if color in reaction
), None)
if new_color:
break
if old_color != new_color:
message["color"] = new_color or '#fff'
if is_most_recent:
send_state({"color": message["color"]})
def store_image(id, file_response):
""" Add requested file to message_history """
encoded_image = "<img src='data:%s;base64,%s'>" % (
file_response.headers['Content-Type'],
base64.b64encode(file_response.content).decode())
store_message(id, encoded_image)
def store_message(id, html):
with get_message_history() as message_history:
message_history.append({
"id": id,
"html": html,
"reactions": [],
"color": "#fff",
})
send_state(message_history[-1])
def delete_message(id):
with get_message_history() as message_history:
message, is_most_recent = message_for_ts(message_history, id)
if message:
message_history.remove(message)
if is_most_recent and message_history:
send_state(message_history[-1])
### views ###
@csrf_exempt
def slack_event(request):
""" Handle message from Slack. """
verify_slack_request(request)
# handle event in a background thread so Slack doesn't resend if it takes too long
threading.Thread(target=handle_slack_event, args=(request,)).start()
# 200 to tell Slack not to resend
return HttpResponse()
def handle_slack_event(request):
event = json.loads(request.body.decode("utf-8"))
logger.info(event)
# url verification
if event["type"] == "url_verification":
return HttpResponse(event["challenge"], content_type='text/plain')
event = event["event"]
# message in channel
if event["type"] == "message":
message_type = event.get("subtype")
# handle uploaded image
if message_type == "file_share":
# {
# 'type': 'message',
# 'files': [{
# 'filetype': 'png',
# 'url_private': 'https://files.slack.com/files-pri/T02RW19TT-FBY895N1Z/image.png'
# }],
# 'ts': '1532713362.000505',
# 'subtype': 'file_share',
# }
file_info = event["files"][0]
if file_info["filetype"] in ("jpg", "gif", "png"):
# if image, fetch file and send to listeners
file_response = requests.get(file_info["url_private"], headers={"Authorization": "Bearer %s" % settings.SLACK["bot_access_token"]})
if file_response.headers['Content-Type'].startswith('text/html'):
logger.error("Failed to fetch image; check bot_access_token")
else:
store_image(event['ts'], file_response)
# handle pasted URL
elif message_type == "message_changed":
# this is what we get when slack unfurls an image URL -- a nested message with attachments
message = event['message']
if message.get('attachments'):
attachment = message['attachments'][0]
# video URL
if 'video_html' in attachment:
# {
# 'type': 'message',
# 'subtype': 'message_changed',
# 'message': {
# 'attachments': [{
# 'video_html': '<iframe width="400" height="225" ...></iframe>'
# }],
# 'ts': '1532713362.000505',
# },
# }
html = attachment['video_html']
html = re.sub(r'width="\d+" height="\d+" ', '', html)
store_message(message['ts'], html)
# image URL
elif 'image_url' in attachment:
# {
# 'type': 'message',
# 'subtype': 'message_changed',
# 'message': {
# 'attachments': [{
# 'image_url': 'some external url'
# }],
# 'ts': '1532713362.000505',
# },
# }
try:
file_response = requests.get(attachment['image_url'])
assert file_response.ok
assert any(file_response.headers['Content-Type'].startswith(prefix) for prefix in ('image/jpeg', 'image/gif', 'image/png'))
except (requests.RequestException, AssertionError) as e:
logger.error("Failed to fetch URL: %s" % e)
else:
store_image(message['ts'], file_response)
elif event['previous_message'].get('attachments'):
# if edited message doesn't have attachment but previous_message did, attachment was hidden -- delete
delete_message(event['previous_message']['ts'])
# handle message deleted
elif message_type == "message_deleted" and event.get('previous_message'):
delete_message(event['previous_message']['ts'])
# handle reactions
elif event["type"] == "reaction_added":
# {
# 'type': 'reaction_added',
# 'user': 'U02RXC5JN',
# 'item': {'type': 'message', 'channel': 'CBU9W589K', 'ts': '1532713362.000505'},
# 'reaction': 'rage',
# 'item_user': 'U02RXC5JN',
# 'event_ts': '1532713400.000429'
# }
with get_message_history() as message_history:
message, is_most_recent = message_for_ts(message_history, event['item']['ts'])
if message:
message['reactions'].insert(0, event["reaction"])
handle_reactions(message, is_most_recent)
elif event["type"] == "reaction_removed":
with get_message_history() as message_history:
message, is_most_recent = message_for_ts(message_history, event['item']['ts'])
if message:
try:
message['reactions'].remove(event["reaction"])
except ValueError:
pass
else:
handle_reactions(message, is_most_recent)
| [
"logging.getLogger",
"hmac.new",
"main.helpers.message_for_ts",
"django.http.HttpResponse",
"main.helpers.get_message_history",
"base64.b64encode",
"requests.get",
"re.sub",
"threading.Thread",
"main.helpers.send_state",
"django.core.exceptions.SuspiciousOperation"
] | [((430, 457), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'import logging\n'), ((2878, 2892), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (2890, 2892), False, 'from django.http import HttpResponse\n'), ((977, 1035), 'django.core.exceptions.SuspiciousOperation', 'SuspiciousOperation', (['"""Slack signature verification failed"""'], {}), "('Slack signature verification failed')\n", (996, 1035), False, 'from django.core.exceptions import SuspiciousOperation\n'), ((1998, 2019), 'main.helpers.get_message_history', 'get_message_history', ([], {}), '()\n', (2017, 2019), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((2198, 2229), 'main.helpers.send_state', 'send_state', (['message_history[-1]'], {}), '(message_history[-1])\n', (2208, 2229), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((2264, 2285), 'main.helpers.get_message_history', 'get_message_history', ([], {}), '()\n', (2283, 2285), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((2340, 2375), 'main.helpers.message_for_ts', 'message_for_ts', (['message_history', 'id'], {}), '(message_history, id)\n', (2354, 2375), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((3086, 3145), 'django.http.HttpResponse', 'HttpResponse', (["event['challenge']"], {'content_type': '"""text/plain"""'}), "(event['challenge'], content_type='text/plain')\n", (3098, 3145), False, 'from django.http import HttpResponse\n'), ((1634, 1673), 'main.helpers.send_state', 'send_state', (["{'color': message['color']}"], {}), "({'color': message['color']})\n", (1644, 1673), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((2759, 2819), 'threading.Thread', 'threading.Thread', ([], {'target': 'handle_slack_event', 'args': '(request,)'}), '(target=handle_slack_event, args=(request,))\n', (2775, 2819), False, 'import threading\n'), ((769, 839), 'hmac.new', 'hmac.new', (["settings.SLACK['signing_secret']", 'basestring', 'hashlib.sha256'], {}), "(settings.SLACK['signing_secret'], basestring, hashlib.sha256)\n", (777, 839), False, 'import hmac\n'), ((2507, 2538), 'main.helpers.send_state', 'send_state', (['message_history[-1]'], {}), '(message_history[-1])\n', (2517, 2538), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((3891, 4011), 'requests.get', 'requests.get', (["file_info['url_private']"], {'headers': "{'Authorization': 'Bearer %s' % settings.SLACK['bot_access_token']}"}), "(file_info['url_private'], headers={'Authorization': \n 'Bearer %s' % settings.SLACK['bot_access_token']})\n", (3903, 4011), False, 'import requests\n'), ((7087, 7108), 'main.helpers.get_message_history', 'get_message_history', ([], {}), '()\n', (7106, 7108), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((7167, 7219), 'main.helpers.message_for_ts', 'message_for_ts', (['message_history', "event['item']['ts']"], {}), "(message_history, event['item']['ts'])\n", (7181, 7219), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((1872, 1911), 'base64.b64encode', 'base64.b64encode', (['file_response.content'], {}), '(file_response.content)\n', (1888, 1911), False, 'import base64\n'), ((7428, 7449), 'main.helpers.get_message_history', 'get_message_history', ([], {}), '()\n', (7447, 7449), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((7508, 7560), 'main.helpers.message_for_ts', 'message_for_ts', (['message_history', "event['item']['ts']"], {}), "(message_history, event['item']['ts'])\n", (7522, 7560), False, 'from main.helpers import get_message_history, message_for_ts, send_state\n'), ((5162, 5209), 're.sub', 're.sub', (['"""width="\\\\d+" height="\\\\d+" """', '""""""', 'html'], {}), '(\'width="\\\\d+" height="\\\\d+" \', \'\', html)\n', (5168, 5209), False, 'import re\n'), ((5812, 5849), 'requests.get', 'requests.get', (["attachment['image_url']"], {}), "(attachment['image_url'])\n", (5824, 5849), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 14:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('organisations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000)),
('created_date', models.DateTimeField(auto_now_add=True,
verbose_name='DateCreated')),
('organisation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='projects',
to='organisations.Organisation')),
],
options={
'ordering': ['id'],
},
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((445, 538), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (461, 538), False, 'from django.db import migrations, models\n'), ((602, 635), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2000)'}), '(max_length=2000)\n', (618, 635), False, 'from django.db import migrations, models\n'), ((671, 738), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""DateCreated"""'}), "(auto_now_add=True, verbose_name='DateCreated')\n", (691, 738), False, 'from django.db import migrations, models\n'), ((828, 953), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""projects"""', 'to': '"""organisations.Organisation"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='projects', to='organisations.Organisation')\n", (845, 953), False, 'from django.db import migrations, models\n')] |
import os
import stat
from tempfile import TemporaryDirectory
from unittest import TestCase
from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset
from pyshrimp.exception import IllegalArgumentException
from pyshrimp.utils.filesystem import ls
def _make_file(dir_path, file_name):
path = os.path.join(dir_path, file_name)
with open(path, 'w'):
pass
return path
def _get_file_mode(file_path):
return os.stat(file_path).st_mode & 0o7777
class Test(TestCase):
def setUp(self) -> None:
# {temp_dir}/a/b/
# -> file1.py
# -> file2.txt
# -> c
# -> file3.py
self._temp_dir_obj = TemporaryDirectory('_pyshrimp_filesystem_test')
self.temp_dir = self._temp_dir_obj.name
self.dir_path_a = os.path.join(self.temp_dir, 'a')
self.dir_path_b = os.path.join(self.temp_dir, 'a', 'b')
self.dir_path_c = os.path.join(self.temp_dir, 'a', 'b', 'c')
os.makedirs(self.dir_path_c)
self.file_path_1_b_f1_py = _make_file(self.dir_path_b, 'f1.py')
self.file_path_2_b_f2_txt = _make_file(self.dir_path_b, 'f2.txt')
self.file_path_3_c_f3_py = _make_file(self.dir_path_c, 'f3.py')
def tearDown(self) -> None:
self._temp_dir_obj.cleanup()
def assertEqualAfterSort(self, a, b, msg=None):
self.assertEqual(
list(sorted(a)),
list(sorted(b)),
msg=msg
)
def test_ls_should_list_files(self):
self.assertEqualAfterSort(
[self.file_path_1_b_f1_py, self.file_path_2_b_f2_txt],
ls(
self.dir_path_b,
files_only=True
)
)
def test_ls_should_list_directories(self):
self.assertEqualAfterSort(
[self.dir_path_c],
ls(
self.dir_path_b,
dirs_only=True
)
)
def test_ls_should_accept_path_segments(self):
self.assertEqualAfterSort(
[self.dir_path_c],
ls(
self.dir_path_a, 'b',
dirs_only=True
)
)
def test_ls_should_raise_when_both_files_only_and_dirs_only_were_requested(self):
with self.assertRaisesRegex(
IllegalArgumentException,
r'Illegal arguments - dirs_only and files_only cannot be true together'
):
ls(
self.dir_path_a,
dirs_only=True,
files_only=True
)
def test_glob_ls_should_raise_when_both_files_only_and_dirs_only_were_requested(self):
with self.assertRaisesRegex(
IllegalArgumentException,
r'Illegal arguments - dirs_only and files_only cannot be true together'
):
glob_ls(
self.dir_path_a,
dirs_only=True,
files_only=True
)
def test_glob_ls_should_use_glob_matching_for_files(self):
self.assertEqualAfterSort(
[self.file_path_1_b_f1_py, self.file_path_3_c_f3_py],
glob_ls(
self.dir_path_b, '**', '*.py',
files_only=True,
recursive=True
)
)
def test_glob_ls_should_use_glob_matching_for_directories(self):
self.assertEqualAfterSort(
[self.dir_path_b, self.dir_path_c],
glob_ls(
self.dir_path_a, '**', '*',
dirs_only=True,
recursive=True
)
)
self.assertEqualAfterSort(
[self.dir_path_b],
glob_ls(
self.dir_path_a, '**', 'b',
dirs_only=True,
recursive=True
)
)
def test_glob_ls_should_remove_trailing_separator_from_results(self):
self.assertEqualAfterSort(
[self.dir_path_a, self.dir_path_b, self.dir_path_c],
glob_ls(
# due to ** usage the self.dir_path_a would normally be included with / at the end
# the trailing separator removal deals with this
self.dir_path_a, '**',
dirs_only=True,
recursive=True
)
)
def test_glob_ls_should_not_use_recursion_by_default(self):
self.assertEqualAfterSort(
[self.file_path_1_b_f1_py],
glob_ls(
# with recursive=False the ** acts as *
# so this is the same as a/*/*.py
self.dir_path_a, '**', '*.py',
files_only=True
)
)
def test_glob_ls_should_return_empty_list_when_nothing_matched(self):
self.assertEqualAfterSort(
[],
glob_ls(
os.path.join(self.dir_path_a, '**', 'no-such-file'),
files_only=True
)
)
def test_write_to_file_should_write_to_file_and_read_file_should_read_it_back(self):
file_path = os.path.join(self.dir_path_a, 'test_file_1.txt')
content = 'a\nb\nc'
write_to_file(file_path, content)
content_read = read_file(file_path)
self.assertEqual(content, content_read)
def test_write_and_read_file_should_work_fine_with_utf8_characters(self):
file_path = os.path.join(self.dir_path_a, 'test_file_1.txt')
content = '☕❤️'
write_to_file(file_path, content)
content_read = read_file(file_path)
self.assertEqual(content, content_read)
def test_write_to_file_should_write_to_file_with_binary_mode_and_read_file_bin_should_read_it_back(self):
file_path = os.path.join(self.dir_path_a, 'test_file_1.txt')
content = b'a\nb\nc'
write_to_file(file_path, content, open_mode='wb')
content_read = read_file_bin(file_path)
self.assertEqual(content, content_read)
def test_chmod_set_should_set_requested_flags(self):
# reset mode
os.chmod(self.file_path_1_b_f1_py, 0)
self.assertEqual(0, _get_file_mode(self.file_path_1_b_f1_py))
# set flag
chmod_set(self.file_path_1_b_f1_py, stat.S_IXUSR)
self.assertEqual(stat.S_IXUSR, _get_file_mode(self.file_path_1_b_f1_py))
# set another flag
chmod_set(self.file_path_1_b_f1_py, stat.S_IWGRP)
self.assertEqual(stat.S_IXUSR | stat.S_IWGRP, _get_file_mode(self.file_path_1_b_f1_py))
def test_chmod_unset_should_unset_requested_flags(self):
# set some flags
os.chmod(
self.file_path_1_b_f1_py,
stat.S_IRUSR | stat.S_IWUSR # user: 6
| stat.S_IXGRP # group: 1
| stat.S_IROTH # other: 4
)
self.assertEqual(0o614, _get_file_mode(self.file_path_1_b_f1_py))
# remove flag
chmod_unset(self.file_path_1_b_f1_py, stat.S_IRUSR)
self.assertEqual(
stat.S_IWUSR # user: 2
| stat.S_IXGRP # group: 1
| stat.S_IROTH, # other: 4
_get_file_mode(self.file_path_1_b_f1_py)
)
# remove another flag
chmod_unset(self.file_path_1_b_f1_py, stat.S_IROTH)
self.assertEqual(
stat.S_IWUSR # user: 2
| stat.S_IXGRP, # group: 1
_get_file_mode(self.file_path_1_b_f1_py)
)
| [
"pyshrimp.read_file",
"tempfile.TemporaryDirectory",
"os.makedirs",
"pyshrimp.read_file_bin",
"os.path.join",
"pyshrimp.chmod_unset",
"os.chmod",
"pyshrimp.write_to_file",
"pyshrimp.glob_ls",
"os.stat",
"pyshrimp.chmod_set",
"pyshrimp.utils.filesystem.ls"
] | [((334, 367), 'os.path.join', 'os.path.join', (['dir_path', 'file_name'], {}), '(dir_path, file_name)\n', (346, 367), False, 'import os\n'), ((706, 753), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', (['"""_pyshrimp_filesystem_test"""'], {}), "('_pyshrimp_filesystem_test')\n", (724, 753), False, 'from tempfile import TemporaryDirectory\n'), ((828, 860), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""a"""'], {}), "(self.temp_dir, 'a')\n", (840, 860), False, 'import os\n'), ((887, 924), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""a"""', '"""b"""'], {}), "(self.temp_dir, 'a', 'b')\n", (899, 924), False, 'import os\n'), ((951, 993), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""a"""', '"""b"""', '"""c"""'], {}), "(self.temp_dir, 'a', 'b', 'c')\n", (963, 993), False, 'import os\n'), ((1002, 1030), 'os.makedirs', 'os.makedirs', (['self.dir_path_c'], {}), '(self.dir_path_c)\n', (1013, 1030), False, 'import os\n'), ((5041, 5089), 'os.path.join', 'os.path.join', (['self.dir_path_a', '"""test_file_1.txt"""'], {}), "(self.dir_path_a, 'test_file_1.txt')\n", (5053, 5089), False, 'import os\n'), ((5126, 5159), 'pyshrimp.write_to_file', 'write_to_file', (['file_path', 'content'], {}), '(file_path, content)\n', (5139, 5159), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((5183, 5203), 'pyshrimp.read_file', 'read_file', (['file_path'], {}), '(file_path)\n', (5192, 5203), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((5351, 5399), 'os.path.join', 'os.path.join', (['self.dir_path_a', '"""test_file_1.txt"""'], {}), "(self.dir_path_a, 'test_file_1.txt')\n", (5363, 5399), False, 'import os\n'), ((5432, 5465), 'pyshrimp.write_to_file', 'write_to_file', (['file_path', 'content'], {}), '(file_path, content)\n', (5445, 5465), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((5489, 5509), 'pyshrimp.read_file', 'read_file', (['file_path'], {}), '(file_path)\n', (5498, 5509), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((5689, 5737), 'os.path.join', 'os.path.join', (['self.dir_path_a', '"""test_file_1.txt"""'], {}), "(self.dir_path_a, 'test_file_1.txt')\n", (5701, 5737), False, 'import os\n'), ((5775, 5824), 'pyshrimp.write_to_file', 'write_to_file', (['file_path', 'content'], {'open_mode': '"""wb"""'}), "(file_path, content, open_mode='wb')\n", (5788, 5824), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((5848, 5872), 'pyshrimp.read_file_bin', 'read_file_bin', (['file_path'], {}), '(file_path)\n', (5861, 5872), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((6008, 6045), 'os.chmod', 'os.chmod', (['self.file_path_1_b_f1_py', '(0)'], {}), '(self.file_path_1_b_f1_py, 0)\n', (6016, 6045), False, 'import os\n'), ((6144, 6193), 'pyshrimp.chmod_set', 'chmod_set', (['self.file_path_1_b_f1_py', 'stat.S_IXUSR'], {}), '(self.file_path_1_b_f1_py, stat.S_IXUSR)\n', (6153, 6193), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((6311, 6360), 'pyshrimp.chmod_set', 'chmod_set', (['self.file_path_1_b_f1_py', 'stat.S_IWGRP'], {}), '(self.file_path_1_b_f1_py, stat.S_IWGRP)\n', (6320, 6360), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((6552, 6650), 'os.chmod', 'os.chmod', (['self.file_path_1_b_f1_py', '(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH)'], {}), '(self.file_path_1_b_f1_py, stat.S_IRUSR | stat.S_IWUSR | stat.\n S_IXGRP | stat.S_IROTH)\n', (6560, 6650), False, 'import os\n'), ((6844, 6895), 'pyshrimp.chmod_unset', 'chmod_unset', (['self.file_path_1_b_f1_py', 'stat.S_IRUSR'], {}), '(self.file_path_1_b_f1_py, stat.S_IRUSR)\n', (6855, 6895), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((7139, 7190), 'pyshrimp.chmod_unset', 'chmod_unset', (['self.file_path_1_b_f1_py', 'stat.S_IROTH'], {}), '(self.file_path_1_b_f1_py, stat.S_IROTH)\n', (7150, 7190), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((468, 486), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (475, 486), False, 'import os\n'), ((1642, 1678), 'pyshrimp.utils.filesystem.ls', 'ls', (['self.dir_path_b'], {'files_only': '(True)'}), '(self.dir_path_b, files_only=True)\n', (1644, 1678), False, 'from pyshrimp.utils.filesystem import ls\n'), ((1861, 1896), 'pyshrimp.utils.filesystem.ls', 'ls', (['self.dir_path_b'], {'dirs_only': '(True)'}), '(self.dir_path_b, dirs_only=True)\n', (1863, 1896), False, 'from pyshrimp.utils.filesystem import ls\n'), ((2083, 2123), 'pyshrimp.utils.filesystem.ls', 'ls', (['self.dir_path_a', '"""b"""'], {'dirs_only': '(True)'}), "(self.dir_path_a, 'b', dirs_only=True)\n", (2085, 2123), False, 'from pyshrimp.utils.filesystem import ls\n'), ((2449, 2501), 'pyshrimp.utils.filesystem.ls', 'ls', (['self.dir_path_a'], {'dirs_only': '(True)', 'files_only': '(True)'}), '(self.dir_path_a, dirs_only=True, files_only=True)\n', (2451, 2501), False, 'from pyshrimp.utils.filesystem import ls\n'), ((2838, 2895), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_a'], {'dirs_only': '(True)', 'files_only': '(True)'}), '(self.dir_path_a, dirs_only=True, files_only=True)\n', (2845, 2895), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((3135, 3206), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_b', '"""**"""', '"""*.py"""'], {'files_only': '(True)', 'recursive': '(True)'}), "(self.dir_path_b, '**', '*.py', files_only=True, recursive=True)\n", (3142, 3206), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((3444, 3511), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_a', '"""**"""', '"""*"""'], {'dirs_only': '(True)', 'recursive': '(True)'}), "(self.dir_path_a, '**', '*', dirs_only=True, recursive=True)\n", (3451, 3511), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((3663, 3730), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_a', '"""**"""', '"""b"""'], {'dirs_only': '(True)', 'recursive': '(True)'}), "(self.dir_path_a, '**', 'b', dirs_only=True, recursive=True)\n", (3670, 3730), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((3990, 4052), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_a', '"""**"""'], {'dirs_only': '(True)', 'recursive': '(True)'}), "(self.dir_path_a, '**', dirs_only=True, recursive=True)\n", (3997, 4052), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((4441, 4496), 'pyshrimp.glob_ls', 'glob_ls', (['self.dir_path_a', '"""**"""', '"""*.py"""'], {'files_only': '(True)'}), "(self.dir_path_a, '**', '*.py', files_only=True)\n", (4448, 4496), False, 'from pyshrimp import glob_ls, write_to_file, read_file, read_file_bin, chmod_set, chmod_unset\n'), ((4822, 4873), 'os.path.join', 'os.path.join', (['self.dir_path_a', '"""**"""', '"""no-such-file"""'], {}), "(self.dir_path_a, '**', 'no-such-file')\n", (4834, 4873), False, 'import os\n')] |
import threading
'''
An abstract class for scanning data from physical media to program
'''
class Scanner(threading.Thread):
def __init__(self, parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
pass
def pause(self):
pass
def resume(self):
pass | [
"threading.Thread.__init__"
] | [((166, 197), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (191, 197), False, 'import threading\n')] |
# Copyright 2019 PrivateStorage.io, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for our custom Hypothesis strategies.
"""
from allmydata.client import config_from_string
from fixtures import TempDir
from hypothesis import given, note
from hypothesis.strategies import data, just, one_of
from testtools import TestCase
from .strategies import share_parameters, tahoe_config_texts
class TahoeConfigsTests(TestCase):
"""
Tests for ``tahoe_configs``.
"""
@given(data())
def test_parses(self, data):
"""
Configurations built by the strategy can be parsed.
"""
tempdir = self.useFixture(TempDir())
config_text = data.draw(
tahoe_config_texts(
storage_client_plugins={},
shares=one_of(
just((None, None, None)),
share_parameters(),
),
),
)
note(config_text)
config_from_string(
tempdir.join("tahoe.ini"),
"tub.port",
config_text.encode("utf-8"),
)
| [
"hypothesis.strategies.just",
"hypothesis.note",
"hypothesis.strategies.data",
"fixtures.TempDir"
] | [((1443, 1460), 'hypothesis.note', 'note', (['config_text'], {}), '(config_text)\n', (1447, 1460), False, 'from hypothesis import given, note\n'), ((996, 1002), 'hypothesis.strategies.data', 'data', ([], {}), '()\n', (1000, 1002), False, 'from hypothesis.strategies import data, just, one_of\n'), ((1155, 1164), 'fixtures.TempDir', 'TempDir', ([], {}), '()\n', (1162, 1164), False, 'from fixtures import TempDir\n'), ((1325, 1349), 'hypothesis.strategies.just', 'just', (['(None, None, None)'], {}), '((None, None, None))\n', (1329, 1349), False, 'from hypothesis.strategies import data, just, one_of\n')] |
#!/usr/bin/env python3
import os
#if can read file:
if os.path.isfile('/Users/m_lves/Downloads/toys-datasets/diabetes.data'):
print('I have a file to process')
file=open('/Users/m_lves/Downloads/toys-datasets/diabetes.data')
for line in file.readlines():
print(line)
else:
print('Boo, no file for me.')
| [
"os.path.isfile"
] | [((56, 125), 'os.path.isfile', 'os.path.isfile', (['"""/Users/m_lves/Downloads/toys-datasets/diabetes.data"""'], {}), "('/Users/m_lves/Downloads/toys-datasets/diabetes.data')\n", (70, 125), False, 'import os\n')] |
from lxml import etree
from span_data import Cspan
class Ccoreference:
def __init__(self,node=None,type='NAF'):
self.type = type
if node is None:
self.node = etree.Element('coref')
else:
self.node = node
def get_id(self):
if self.type == 'NAF':
return self.node.get('id')
elif self.type == 'KAF':
return self.node.get('coid')
def get_spans(self):
for node_span in self.node.findall('span'):
yield Cspan(node_span)
class Ccoreferences:
def __init__(self,node=None, type='NAF'):
self.type = type
if node is None:
self.node = etree.Element('coreferences')
else:
self.node = node
def __get_corefs_nodes(self):
for coref_node in self.node.findall('coref'):
yield coref_node
def get_corefs(self):
for coref_node in self.__get_corefs_nodes():
yield Ccoreference(coref_node,self.type)
def to_kaf(self):
if self.type == 'NAF':
for node_coref in self.__get_corefs_nodes():
node_coref.set('coid',node_coref.get('id'))
del node_coref.attrib['id']
def to_naf(self):
if self.type == 'KAF':
for node_coref in self.__get_corefs_nodes():
node_coref.set('id',node_coref.get('coid'))
del node_coref.attrib['coid']
| [
"lxml.etree.Element",
"span_data.Cspan"
] | [((191, 213), 'lxml.etree.Element', 'etree.Element', (['"""coref"""'], {}), "('coref')\n", (204, 213), False, 'from lxml import etree\n'), ((713, 742), 'lxml.etree.Element', 'etree.Element', (['"""coreferences"""'], {}), "('coreferences')\n", (726, 742), False, 'from lxml import etree\n'), ((536, 552), 'span_data.Cspan', 'Cspan', (['node_span'], {}), '(node_span)\n', (541, 552), False, 'from span_data import Cspan\n')] |
import pygame
import os
import random
pygame.init()
Width, Height = 770,770
Win = pygame.display.set_mode((Width, Height))
Bg = (0,0,0)
Cross = pygame.transform.scale(pygame.image.load(os.path.join("Assets", "cross.png")), (Width//3, Height//3))
Circle = pygame.transform.scale(pygame.image.load(os.path.join("Assets", "circle.png")), (Width//3, Height//3))
clock = pygame.time.Clock()
def fill(surface, color):
w,h = surface.get_size()
r,g,b,_ = color
for x in range(w):
for y in range(h):
a = surface.get_at((x,y))[3]
surface.set_at((x,y), pygame.Color(r,g,b,a))
def check_game(board, player):
for row in board:
if row[0] == row[1] == row[2] == player:
print(player, "gagne")
return True
for col in range(len(board)):
check = []
for row in board:
check.append(row[col])
if check.count(player) == len(check) and check[0] != 0:
print(player, "gagne")
return True
diags = []
for indx in range(len(board)):
diags.append(board[indx][indx])
if diags.count(player) == len(diags) and diags[0] != 0:
print(player, "gagne")
return True
diags_2 = []
for indx, rev_indx in enumerate(reversed(range(len(board)))):
print(indx, rev_indx)
diags_2.append(board[indx][rev_indx])
if diags_2.count(player) == len(diags_2) and diags_2[0] != 0:
print(player, "gagne")
return True
if len(empty_cells(board)) == 0:
print("personne ne gagne")
return True
def create_board():
new_board = [[0 for i in range(3)] for j in range(3)]
return new_board
def empty_cells(board):
empty_cells = []
for y, row in enumerate(board):
for x,case in enumerate(row):
if case == 0:
empty_cells.append([x,y])
return empty_cells
def valid_locations(board,x,y):
if [x,y] in empty_cells(board):
return True
else:
return False
def set_locations(game_board,x,y,player):
if valid_locations(game_board,x,y):
game_board[y][x] = player
return True
else:
return False
def draw_board(Win):
for i in range(1,3):
pygame.draw.line(Win, (255,255,255), (Width*(i/3), 0), (Width*(i/3), Height), 1)
for j in range(1,3):
pygame.draw.line(Win, (255,255,255), (0, Width*(j/3)), (Width, Width*(j/3)), 1)
def draw_pieces(Win, board):
for x in range(len(board)):
for y in range(len(board)):
if board[y][x] == -1:
Win.blit(Circle, (x*(Width//3), y*(Width//3)))
elif board[y][x] == 1:
Win.blit(Cross, (x*(Width//3), y*(Width//3)))
def reset_board(game_board):
for x,row in enumerate(game_board):
for y in range(len(row)):
game_board[y][x] = 0
def redraw_window(Win, board):
Win.fill(Bg)
draw_board(Win)
draw_pieces(Win, board)
pygame.display.update()
def main():
start = [-1,1]
player = random.choice(start)
run = True
game_over = False
game_board = create_board()
FPS = 120
green = (0,255,0,0)
while run:
fill(Circle, green)
clock.tick(FPS)
redraw_window(Win, game_board)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and game_over:
reset_board(game_board)
game_over = False
if event.type == pygame.MOUSEBUTTONDOWN and not game_over:
pos = pygame.mouse.get_pos()
if set_locations(game_board, pos[0]//(Width//3), pos[1]//(Width//3), player):
if check_game(game_board, player):
print("over")
game_over = True
if player == -1:
player = 1
else:
player = -1
main()
| [
"random.choice",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"os.path.join",
"pygame.mouse.get_pos",
"pygame.time.Clock",
"pygame.Color",
"pygame.display.update"
] | [((39, 52), 'pygame.init', 'pygame.init', ([], {}), '()\n', (50, 52), False, 'import pygame\n'), ((85, 125), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(Width, Height)'], {}), '((Width, Height))\n', (108, 125), False, 'import pygame\n'), ((372, 391), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (389, 391), False, 'import pygame\n'), ((2972, 2995), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2993, 2995), False, 'import pygame\n'), ((3041, 3061), 'random.choice', 'random.choice', (['start'], {}), '(start)\n', (3054, 3061), False, 'import random\n'), ((190, 225), 'os.path.join', 'os.path.join', (['"""Assets"""', '"""cross.png"""'], {}), "('Assets', 'cross.png')\n", (202, 225), False, 'import os\n'), ((301, 337), 'os.path.join', 'os.path.join', (['"""Assets"""', '"""circle.png"""'], {}), "('Assets', 'circle.png')\n", (313, 337), False, 'import os\n'), ((2245, 2340), 'pygame.draw.line', 'pygame.draw.line', (['Win', '(255, 255, 255)', '(Width * (i / 3), 0)', '(Width * (i / 3), Height)', '(1)'], {}), '(Win, (255, 255, 255), (Width * (i / 3), 0), (Width * (i / \n 3), Height), 1)\n', (2261, 2340), False, 'import pygame\n'), ((2360, 2453), 'pygame.draw.line', 'pygame.draw.line', (['Win', '(255, 255, 255)', '(0, Width * (j / 3))', '(Width, Width * (j / 3))', '(1)'], {}), '(Win, (255, 255, 255), (0, Width * (j / 3)), (Width, Width *\n (j / 3)), 1)\n', (2376, 2453), False, 'import pygame\n'), ((3297, 3315), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3313, 3315), False, 'import pygame\n'), ((593, 617), 'pygame.Color', 'pygame.Color', (['r', 'g', 'b', 'a'], {}), '(r, g, b, a)\n', (605, 617), False, 'import pygame\n'), ((3694, 3716), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (3714, 3716), False, 'import pygame\n')] |
from django.test import TestCase
from django.urls import reverse
class TestAutomaticLoginMiddleware(TestCase):
def setUp(self):
self.admin_url = reverse('admin:index')
def test_automatic_login_middleware_does_not_require_login_to_access_admin(self):
ret = self.client.get(self.admin_url)
# Would be 302 redirect to the login page without the middleware
self.assertEqual(ret.status_code, 200)
| [
"django.urls.reverse"
] | [((159, 181), 'django.urls.reverse', 'reverse', (['"""admin:index"""'], {}), "('admin:index')\n", (166, 181), False, 'from django.urls import reverse\n')] |
# -*- coding: utf-8 -*-
import Gradient
import pygame
class GradientMode:
def __init__(self):
self.isActive = False
self.surface = None
self.gradientWidth = 1000
self.grad1 = Gradient.polylinear_gradient(
["#5522aa", "#ff0000", "#11aaff", "#0f0f00"], self.gradientWidth)
self.grad2 = Gradient.polylinear_gradient(
["#ffff00", "#ffffff", "#11aaff", "#0000ff"], self.gradientWidth)
self.grad1Alpha = 255
self.grad2Alpha = 0
self.way = 1
def run(self):
if self.surface is not None and self.isActive is True:
mod = 0
self.grad2Alpha += self.way
if self.grad2Alpha > 255:
self.way = -1;
if self.grad2Alpha <= 0:
self.way = 1
surface1 = pygame.Surface(self.surface.get_size())
surface1.set_alpha(self.grad1Alpha)
surface2 = pygame.Surface(self.surface.get_size())
surface2.set_alpha(self.grad2Alpha)
for y in range(self.surface.get_height()):
if mod >= len(self.grad1.get('g')):
mod = 0
pygame.draw.rect(surface1,
(self.grad1.get('r')[mod],self.grad1.get('g')[mod],self.grad1.get('b')[mod]),
(0,y,self.surface.get_width(),2)
)
pygame.draw.rect(surface2,
(self.grad2.get('r')[mod],self.grad2.get('g')[mod],self.grad2.get('b')[mod]),
(0,y,self.surface.get_width(),2)
)
mod +=1
self.surface.blit(surface1, (0, 0))
self.surface.blit(surface2, (0, 0))
| [
"Gradient.polylinear_gradient"
] | [((214, 312), 'Gradient.polylinear_gradient', 'Gradient.polylinear_gradient', (["['#5522aa', '#ff0000', '#11aaff', '#0f0f00']", 'self.gradientWidth'], {}), "(['#5522aa', '#ff0000', '#11aaff', '#0f0f00'],\n self.gradientWidth)\n", (242, 312), False, 'import Gradient\n'), ((343, 441), 'Gradient.polylinear_gradient', 'Gradient.polylinear_gradient', (["['#ffff00', '#ffffff', '#11aaff', '#0000ff']", 'self.gradientWidth'], {}), "(['#ffff00', '#ffffff', '#11aaff', '#0000ff'],\n self.gradientWidth)\n", (371, 441), False, 'import Gradient\n')] |
#!-*- coding:utf-8 -*-
#!/usr/bin/env python
#MOPERプレイヤー
import os
import template_select
from google.appengine.ext import webapp
from google.appengine.ext import db
from myapp.MesThread import MesThread
from myapp.Bbs import Bbs
from myapp.MappingId import MappingId
from myapp.Alert import Alert
from myapp.CssDesign import CssDesign
class MoperPlayer(webapp.RequestHandler):
def get(self):
host_url=MappingId.mapping_host_with_scheme(self.request)+"/"
bbs_key=self.request.get("bbs_key")
thread_key=self.request.get("thread_key")
try:
bbs = db.get(self.request.get("bbs_key"))
thread = db.get(self.request.get("thread_key"))
except:
bbs = None
thread = None
if(bbs==None or thread==None):
self.response.out.write(Alert.alert_msg("スレッドが見つかりません。",self.request.host))
return
image=thread.image_key#db.get(thread.image);
width=400
height=400
if(image.width):
width=image.width
if(image.height):
height=image.height
if(self.request.get("width")):
width=self.request.get("width")
if(self.request.get("height")):
height=self.request.get("height")
if(CssDesign.is_iphone(self)==1):
if(width>=300):
height=300*height/width
width=300
template_values = {
'host': host_url,
'bbs': bbs,
'bbs_key': bbs_key,
'thread': thread,
'thread_key': thread_key,
'width': width,
'height': height
}
path = '/html/moper/moper_embedded.htm'
self.response.out.write(template_select.render(path, template_values))
| [
"myapp.Alert.Alert.alert_msg",
"myapp.MappingId.MappingId.mapping_host_with_scheme",
"template_select.render",
"myapp.CssDesign.CssDesign.is_iphone"
] | [((409, 457), 'myapp.MappingId.MappingId.mapping_host_with_scheme', 'MappingId.mapping_host_with_scheme', (['self.request'], {}), '(self.request)\n', (443, 457), False, 'from myapp.MappingId import MappingId\n'), ((1119, 1144), 'myapp.CssDesign.CssDesign.is_iphone', 'CssDesign.is_iphone', (['self'], {}), '(self)\n', (1138, 1144), False, 'from myapp.CssDesign import CssDesign\n'), ((1453, 1498), 'template_select.render', 'template_select.render', (['path', 'template_values'], {}), '(path, template_values)\n', (1475, 1498), False, 'import template_select\n'), ((752, 803), 'myapp.Alert.Alert.alert_msg', 'Alert.alert_msg', (['"""スレッドが見つかりません。"""', 'self.request.host'], {}), "('スレッドが見つかりません。', self.request.host)\n", (767, 803), False, 'from myapp.Alert import Alert\n')] |
#!/usr/bin/env python3
#-------------------------------------------------------------------+
#
# Advent of Code - Day 1 - Part 2
#
#-------------------------------------------------------------------+
#-------------------------------------------------------------------+
# dependencier
#-------------------------------------------------------------------+
import os
from typing import List
#-------------------------------------------------------------------+
# main algorithm
#-------------------------------------------------------------------+
class Algorithm:
def __init__(self):
pass
# loads data from input file
def _load_data(self, file:str) -> List[int]:
try:
entries = [int(data.rstrip()) for data in open(file)]
except Exception as e:
print("Exception: {}".format(e))
return [-1]
else:
return entries
# finds a matching pair
def _find_match(self, entries:List[int]) -> List[int]:
pos_x = -1
pos_y = -1
found = False
max_entry = len(entries)
for rx in range(max_entry):
for ry in range(max_entry):
for rz in range(max_entry):
sum_entries = entries[rx] + entries[ry] + entries[rz]
if sum_entries != 2020: # no match? go to next entry combo
continue
# else do calculations
found = True
pos_x = rx
pos_y = ry
pos_z = rz
break
#endfor
if found == True:
break
#endfor
return [pos_x, pos_y, pos_z]
# call this "public" function for execute algorithm
def execute(self, file: str) -> int:
entries = self._load_data(file)
[found_x, found_y, found_z] = self._find_match(entries)
x = entries[found_x]
y = entries[found_y]
z = entries[found_z]
prod_lines = x * y * z
print("{} + {} + {} = 2020".format(x, y, z))
return prod_lines
#-------------------------------------------------------------------+
# startup
#-------------------------------------------------------------------+
if __name__ == "__main__":
separator = "\r\n+----------------------------+\r\n"
filename = "{}/input.txt".format(os.path.dirname(__file__))
print(separator)
alg = Algorithm()
data = alg.execute(filename)
print("\nProduct total: {}".format(data))
print(separator) | [
"os.path.dirname"
] | [((2035, 2060), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2050, 2060), False, 'import os\n')] |
Subsets and Splits