id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3327850 | <filename>src/api/views/countdown.py
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK
from api.serializers.countdown_seralizers import (
CountdownSerializer,
CountdownDeleteSerializer,
)
from covid19.models import Countdown
class CountdownGetAPI(APIView):
def get(self, request):
countdowns = Countdown.objects.order_by("target_date").values(
"id", "target", "target_date"
)
return Response(status=HTTP_200_OK, data=countdowns)
class CountdownCreateAPI(APIView):
def post(self, request):
serializer = CountdownSerializer(data=request.data)
if not serializer.is_valid(raise_exception=True):
return Response(status=HTTP_400_BAD_REQUEST)
countdown = serializer.save()
return Response(status=HTTP_200_OK, data={"id": countdown.id})
class CountdownDeleteAPI(APIView):
def post(self, request):
serializer = CountdownDeleteSerializer(data=request.data)
if not serializer.is_valid(raise_exception=True):
return Response(status=HTTP_400_BAD_REQUEST)
id = serializer.validated_data["id"]
countdown = Countdown.objects.get(id=id)
countdown.delete()
return Response(status=HTTP_200_OK)
| StarcoderdataPython |
150123 | <filename>DICOMOFFIS/admin.py<gh_stars>0
from django.contrib import admin
from .models import eintrag
# Register your models here.
admin.site.register(eintrag) | StarcoderdataPython |
118711 | <reponame>jamesjh-lee/xor
import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
sys.setrecursionlimit(1500)
import argparse
import numpy as np
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.compat.v1.keras.callbacks import EarlyStopping, LearningRateScheduler
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1 import Session, ConfigProto, GPUOptions, get_default_graph, global_variables_initializer
import tensorflow as tf
from bcolors import *
from utils import *
from sympy import *
import sympy.plotting as plt
HOME = os.path.split(os.path.abspath(__file__))[0]
def parse_args():
desc = 'Plot the output of XOR trained by DNN'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--hidden_layers', type=int, default=1, help='a number of the hidden layers, default 2')
parser.add_argument('--units', type=int, default=2, help='the unit size of a Dense layer, default 2')
parser.add_argument('--activation', type=str, default='sigmoid', help='the type of a activation function, default sigmoid')
parser.add_argument('--optimizer', type=str, default='adam', help='optimizer, default Adam')
parser.add_argument('--loss', type=str, default='mse', help='loss function, default mse')
parser.add_argument('--metrics', type=bool, default=False, help='metrics(accuracy) to compile model')
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')
parser.add_argument('--scheduler', type=bool, default=False, help='scheduler for learning rate')
parser.add_argument('--epochs', type=int, default=1000, help='a number of epoch')
parser.add_argument('--patience', type=int, default=50, help='number of patience')
parser.add_argument('--save_filename', type=str, default='xor.png', help='image filename')
return check_args(parser.parse_args())
def check_args(args):
# check hidden layers
try:
assert args.hidden_layers >= 1
except:
print(WARNING + 'WARNING the number of hidden_layers must be greater than 0, set hidden_layers = 1' + ENDC)
args.hidden_layers = 1
# check activation
try:
# args.activation = args.activation.lower()
assert args.activation in ('sigmoid', 'tanh', 'linear')
except:
print('activation function is among [sigmoid, tanh], set activation = sigmoid')
args.activation = 'sigmoid'
# check optimizer
try:
args.optimizer = args.optimizer.lower()
assert args.optimizer in ('adam', 'sgd', 'rmsprop', 'adadelta', 'adamax', 'adagrad', 'nadam')
except:
args.optimizer = 'adam'
# check loss
try:
args.loss = args.loss.lower()
assert args.optimizer in ('mse', 'mae', 'binary_crossentropy')
except:
args.optimizer = 'mse'
return args
def build(args):
# input layer
inputs = Input(shape=(2,))
hidden = None
for i in range(args.hidden_layers):
if i == 0:
hidden = Dense(args.units, activation=args.activation)(inputs)
else:
hidden = Dense(args.units, activation=args.activation)(hidden)
if args.activation == 'linear':
outputs = Dense(1, activation='linear')(hidden)
else:
outputs = Dense(1, activation='sigmoid')(hidden)
return Model(inputs,outputs)
def get_optimizer(optimizer):
if optimizer == 'sgd':
return SGD
return Adam
def build_output(args, weights):
output = None
# set input variables
x1, x2 = symbols('x1, x2')
# create symbols
variables = {}
ws = {}
bs = {}
w = 1
b = 1
W = []
B = []
for i, weight in enumerate(weights):
layer = int(np.ceil((i+1) / 2))
tmp_w = []
tmp_b = []
xx = []
yy = []
if len(weight.shape) >= 2:
for c in weight:
for d in c:
if i % 2 == 0:
# set weight
tmp_w.append(d)
ww = 'w' + str(w)
tmp = symbols(ww)
xx.append(tmp)
variables[ww] = None
w += 1
else:
# set bias
tmp_b.append(d)
bb = 'w' + str(b)
tmp = symbols(bb)
yy.append(tmp)
variables[bb] = None
b += 1
else:
for d in weight:
if i % 2 == 0:
# set weight
tmp_w.append(d)
ww = 'w' + str(w)
tmp = symbols(ww)
xx.append(tmp)
variables[ww] = None
w += 1
else:
# set bias
tmp_b.append(d)
bb = 'b' + str(b)
tmp = symbols(bb)
yy.append(tmp)
variables[bb] = None
b += 1
if tmp_w:
ws[layer] = tmp_w
else:
bs[layer] = tmp_b
if xx:
W.append(xx)
if yy:
B.append(yy)
# set weights and bias
bias = 1
weight = 1
for l in range(1, args.hidden_layers+1):
for k in bs[l]:
variables['b'+str(bias)] = k
bias += 1
tmp = []
for c in range(args.units):
tmp.append([x for idx, x in enumerate(ws[l]) if idx % args.units == c ])
tmp = np.array(tmp).flatten()
for k in tmp:
variables['w'+str(weight)] = k
weight += 1
ll = list(bs.keys())[-1]
for x in ws[ll]:
variables['w'+str(weight)] = x
weight += 1
variables['b'+str(bias)] = bs[ll][0]
# set formular
s = symbols('s')
l, a = symbols('lambda, alpha')
f = None
if args.activation == 'sigmoid':
f = 1/(1+exp(-s))
elif args.activation == 'tanh':
f = (exp(2*s)-1)/(exp(2*s)+1)
X = Matrix([[x1, x2]])
for i, v in enumerate(zip(W,B)):
w, b = v
if i == 0:
tmp = []
for c in range(2):
tmp.append([x for idx, x in enumerate(w) if idx % 2 == c ])
W[i] = Matrix(tmp)
B[i] = Matrix([b])
l = X*W[i] + B[i]
L = Matrix([[f.subs(s, l[x]) for x in range(args.units)]])
elif i == args.hidden_layers:
W[i] = Matrix(w)
B[i] = Matrix(b)
l = L * W[i] + B[i]
L = f.subs(s, l[0])
else:
tmp = []
for c in range(args.units):
tmp.append([x for idx, x in enumerate(w) if idx % args.units == c ])
W[i] = Matrix(tmp)
B[i] = Matrix([b])
l = L * W[i] + B[i]
L = Matrix([[f.subs(s, l[x]) for x in range(args.units)]])
output = L.subs(variables)
print(output)
return output
def main(args):
# set xor dataset
x_train = np.array([[0,0],[0,1],[1,0], [1,1]], dtype=np.float32)
y_train = np.array([[0],[1],[1],[0]], dtype=np.float32)
print('Start building a DNN model')
# fitting model
sched, early = None, None
callbacks = []
early = EarlyStopping(monitor='loss', patience=args.patience)
callbacks.append(early)
if args.scheduler:
sched = LearningRateScheduler(scheduler, verbose=1)
callbacks.append(sched)
y_pred = None
cnt = 1
while acc(y_train, y_pred) != 1.0:
# build model
model = build(args)
print(cnt, 'built a model')
# model.summary()
# compile model
optimizer = get_optimizer(args.optimizer)
if args.metrics:
model.compile(optimizer=optimizer(args.learning_rate), loss=args.loss, metrics=[acc])
else:
model.compile(optimizer=optimizer(args.learning_rate), loss=args.loss)
print(cnt, 'compiled a model')
print(cnt, 'start fitting a model')
hist = model.fit(x_train, y_train, epochs=args.epochs, callbacks=callbacks, verbose=0)
y_pred = np.where(model.predict(x_train) > 0.5, 1, 0)
print(cnt, 'finished training, train loss:', hist.history['loss'][-1])
cnt += 1
# check output
output = build_output(args, model.get_weights())
# plot output
x1, x2 = symbols('x1, x2')
print('plotting output of a model')
p = plt.plot3d(output, (x1, -2, 3), (x2, -2, 3), show=False)
p.save(HOME+'/results/'+args.save_filename)
p.show()
if __name__ == '__main__':
# parse arguments
args = parse_args()
if args is None:
sys.exit()
# main
print(args)
main(args)
| StarcoderdataPython |
194735 | #!/usr/bin/env python
###########################################################################################
# Implementation of illustrating results. (Average reward for each episode)
# Author for codes: <NAME>(<EMAIL>)
# Reference: https://github.com/Kchu/LifelongRL
###########################################################################################
# Python imports.
import os
from simple_rl.utils import chart_utils
def _get_MDP_name(data_dir):
'''
Args:
data_dir (str)
Returns:
(list)
'''
try:
params_file = open(os.path.join(data_dir, "parameters.txt"), "r")
except IOError:
# No param file.
return [agent_file.replace(".csv", "") for agent_file in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, agent_file)) and ".csv" in agent_file]
MDP_name = []
for line in params_file.readlines():
if "lifelong-" in line:
MDP_name = line.split(" ")[0].strip()
break
return MDP_name
def main():
'''
Summary:
For manual plotting.
'''
# Parameter
data_dir = ["D:\\MyPapers\\Results_vs_Episodes\\Q-FourRoom\\", "D:\\MyPapers\\Results_vs_Episodes\\Q-Lava\\",
"D:\\MyPapers\\Results_vs_Episodes\\Q-Maze\\", "D:\\MyPapers\\Results_vs_Episodes\\DelayedQ-FourRoom\\",
"D:\\MyPapers\\Results_vs_Episodes\\DelayedQ-Lava\\",
"D:\\MyPapers\\Results_vs_Episodes\\DelayedQ-Maze\\"]
output_dir = "D:\\MyPapers\\Plots\\"
for index in range(len(data_dir)):
cumulative = False
# Format data dir
# data_dir[index] = ''.join(data_dir[index])
# print(data_dir[index])
if data_dir[index][-1] != "\\":
data_dir[index] = data_dir[index] + "\\"
# Set output file name
exp_dir_split_list = data_dir[index].split("\\")
file_name = output_dir + exp_dir_split_list[-2] + '-Episode.pdf'
# Grab agents.
agent_names = chart_utils._get_agent_names(data_dir[index])
if len(agent_names) == 0:
raise ValueError("Error: no csv files found.")
# Grab experiment settings
episodic = chart_utils._is_episodic(data_dir[index])
track_disc_reward = chart_utils._is_disc_reward(data_dir[index])
mdp_name = _get_MDP_name(data_dir[index])
# Plot.
chart_utils.make_plots(data_dir[index], agent_names, cumulative=cumulative, episodic=episodic, track_disc_reward=track_disc_reward, figure_title=mdp_name, plot_file_name=file_name)
if __name__ == "__main__":
main() | StarcoderdataPython |
1667511 | <reponame>MStarmans91/WORC
#!/usr/bin/env python
# Copyright 2016-2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import numpy as np
from WORC.IOparser.file_io import load_features
import pandas as pd
import WORC.addexceptions as ae
import tikzplotlib
def plot_errors(featurefiles, patientinfo, label_type, featurenames,
posteriors_csv=None, agesex=True, output_png=None,
output_tex=None):
"""Scatterplot of all objects with marking of errors."""
# check some input
if len(featurenames) != 2:
raise ae.WORCValueError(f'Featurenames should be list with two strings, got {featurenames}.')
# Read the features and classification data
print("Reading features and label data.")
label_data, image_features =\
load_features(featurefiles, patientinfo, label_type)
# Read in the scores and labels
if posteriors_csv is not None:
data = pd.read_csv(posteriors_csv)
PIDs = data['PatientID'].values
labels = data['TrueLabel'].values.tolist()
probabilities = data['Probability'].values
# Convert probabilities to correct / not
correct = list()
for label, prob in zip(labels, probabilities):
if prob >= 0.5 and label == 1.0:
correct.append(True)
elif prob < 0.5 and label == 0.0:
correct.append(True)
else:
# incorrect prediction
correct.append(False)
# Select indices of features we need
feature_labels = image_features[0][1]
if featurenames[0] not in feature_labels:
raise ae.WORCKeyError(f'Feature {featurenames[0]} not in feature names.')
else:
feature_1_index = feature_labels.index(featurenames[0])
if featurenames[1] not in feature_labels:
raise ae.WORCKeyError(f'Feature {featurenames[1]} not in feature names.')
else:
feature_2_index = feature_labels.index(featurenames[1])
# Match probabilities to features
feature_1 = list()
feature_2 = list()
for pid, prob in zip(PIDs, probabilities):
if pid not in label_data['patient_IDs']:
raise ae.WORCKeyError(f'Features for {pid} not provided.')
else:
index = label_data['patient_IDs'].tolist().index(pid)
feature_1.append(image_features[index][0][feature_1_index])
feature_2.append(image_features[index][0][feature_2_index])
# Resort based on PID
order = np.argsort(PIDs)
feature_1 = [feature_1[index] for index in order]
feature_2 = [feature_2[index] for index in order]
correct = [correct[index] for index in order]
labels = [labels[index] for index in order]
# Actual plotting
f = plt.figure(figsize=(20, 15))
ax = plt.subplot(1, 1, 1)
coordinates = list()
for index, label in enumerate(labels):
# Check if coordinate has already been plotted
coordinate = (feature_1[index], feature_2[index])
addoffset = 1
sign = 1
while coordinate in coordinates:
# Coordinate plotted, add an x-offset
offset = sign * 0.01 * addoffset
coordinate = (feature_1[index] + offset, feature_2[index])
if sign == 1:
sign = -1
else:
sign = 1
addoffset += 1
coordinates.append(coordinate)
# Red border if classification is incorrect
if not correct[index]:
ax.scatter(coordinate[0], coordinate[1], s=80, marker='o', color='red')
s = 30
else:
s = 50
# Plot point in feature space
if label == 1.0:
ax.scatter(coordinate[0], coordinate[1], s=s, marker='o', color='#7dcfe2')
else:
ax.scatter(coordinate[0], coordinate[1], s=s, marker='o', color='blue')
# Add some labelling etc to the plot
if agesex:
plt.xlabel('Sex', size=12)
plt.ylabel('Age', size=12)
plt.xticks([0, 1], ['Female', 'Male'], size=8)
else:
plt.xlabel(featurenames[0], size=12)
plt.ylabel(featurenames[1], size=12)
# Save output
if output_png is not None:
plt.savefig(output_png, bbox_inches='tight', pad_inches=0)
print(f"Plot saved as {output_png}!")
if output_tex is not None:
tikzplotlib.save(output_tex)
print(f"Plot saved as {output_tex}!")
| StarcoderdataPython |
18723 | #B
def average(As :list) -> float:
return float(sum(As)/len(As))
def main():
# input
As = list(map(int, input().split()))
# compute
# output
print(average(As))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3203677 | <reponame>MiiRaGe/miilibrary
import pytest
import responses
import mock
from celery.exceptions import Retry
from django.core.cache import cache
from django.test import SimpleTestCase
from django.test import TestCase, override_settings
from pyfakefs.fake_filesystem_unittest import TestCase as FakeFsTestCase
from mii_rss.factories import FeedEntriesFactory, FeedFilterFactory
from mii_rss.logic import already_exists, match, get_or_create_downloading_object, get_dict_from_feeds
from mii_rss.models import FeedDownloaded, FeedEntries
from mii_rss.tasks import check_feed_and_download_torrents, recheck_feed_and_download_torrents, \
add_torrent_to_transmission, get_hashed_link
from mii_sorter.models import Season, Episode
from mii_sorter.models import Serie
class TestRSS(TestCase):
def test_match(self):
entry = {
'title': 'homeland s04e09 theres something else going on 1080i hdtv dd5 1 mpeg2-topkek [no rar]'
}
filters = {
'^homeland.*720p',
'^star.wars.rebels.*720p',
'^better.call.saul.*720p'
}
assert not match(entry, filters)[0]
entry = {
'title': 'homeland s04e09 theres something else going on 720p hdtv dd5 1 mpeg2-topkek [no rar]'
}
assert match(entry, filters)[0]
entry = {
'title': 'better call saul s01e01 720p hdtv x264-killers [no rar]'
}
assert match(entry, filters)[0]
def test_episode_does_not_already_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert not already_exists(db_name, title)
def test_episode_already_exists(self):
serie = Serie.objects.create(name='Saitama')
season = Season.objects.create(number=1, serie=serie)
Episode.objects.create(number=1, season=season, file_size=100, file_path='')
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert already_exists(db_name, title)
def test_season_does_not_exist(self):
db_name = 'Saitama'
title = 'Saitama.S01.rar'
assert already_exists(db_name, title)
def test_get_or_create_downloading_object_episode_create(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_episode_get(self):
db_name = 'Saitama'
title = 'Saitama.S01E01.mkv'
FeedDownloaded.objects.create(re_filter=db_name, episode=1, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_create(self):
db_name = 'Saitama'
title = 'Saitama.S01'
assert get_or_create_downloading_object(db_name, title)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get(self):
db_name = 'Saitama'
title = 'Saitama.S01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_or_create_downloading_object_season_get_blocks_episode(self):
db_name = 'Saitama'
title = 'Saitama.S01E01'
FeedDownloaded.objects.create(re_filter=db_name, season=1)
assert not get_or_create_downloading_object(db_name, title)
def test_get_entry_from_feed(self):
class Feed(object):
def __getitem__(self, item):
return item
list_of_feed = [Feed() for x in range(0, 5)]
resulting_dict = get_dict_from_feeds(list_of_feed)
assert resulting_dict == {'entries': [{'title': 'title', 'link': 'link'} for x in range(0, 5)]}
@override_settings(TORRENT_WATCHED_FOLDER='/')
class TestTask(FakeFsTestCase, TestCase):
def setUp(self):
self.setUpPyfakefs()
FeedFilterFactory.create(regex='non_matching', name='test_entry')
@mock.patch('mii_rss.tasks.logger')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_error(self, feedparser, logger):
feedparser.parse.return_value = {'status': 500}
check_feed_and_download_torrents()
assert logger.error.called
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_dumping_entries(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': []}
check_feed_and_download_torrents()
assert FeedEntries.objects.all()
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed(self, feedparser):
feedparser.parse.return_value = {'status': 200, 'entries': [{'title': 'arrow', 'link': None}]}
check_feed_and_download_torrents()
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_already_exist(self, feedparser, add_torrent_to_transmission):
self.fs.CreateFile('/test.torrent')
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
def test_task_feed_matching_downloading(self, feedparser, add_torrent_to_transmission):
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.add_torrent_to_transmission')
@mock.patch('mii_rss.tasks.feedparser')
@mock.patch('mii_rss.tasks.get_or_create_downloading_object')
def test_task_feed_matching_already_downloading(self, get_or_create, feedparser, add_torrent_to_transmission):
get_or_create.return_value = False
feedparser.parse.return_value = {'status': 200,
'entries': [{'title': 'non_matching', 'link': '/test.torrent?'}]
}
check_feed_and_download_torrents()
assert not add_torrent_to_transmission.delay.called
@mock.patch('mii_rss.tasks.process_feeds')
def test_recheck_feeds(self, process_feeds):
FeedEntriesFactory.create_batch(10)
recheck_feed_and_download_torrents()
assert process_feeds.called
@responses.activate
@override_settings(TRANSMISSION_RPC_URL='http://url/')
class TestTaskTransmission(SimpleTestCase):
def test_add_t_to_transmission_retry(self):
url_link = 'http://t_link'
responses.add(responses.GET, url_link,
body='base64,dummy_test', status=200,
content_type='application/text')
responses.add(responses.POST, 'http://url/',
status=409,
headers={'X-Transmission-Session-Id': 'special_key'})
res = add_torrent_to_transmission(url_link)
assert isinstance(res, Retry)
assert cache.get('X-Transmission-Session-Id') == 'special_key'
def test_with_header_and_content_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'success'},
headers={'X-Transmission-Session-Id': 'special_key'})
def test_with_header_and_content_almost_success(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=200,
json={'result': 'not a success'},
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_500(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=500,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
def test_with_header_and_content_400(self):
url_link = 'http://t_link'
cache.set(get_hashed_link(url_link), 'dummy')
cache.set('X-Transmission-Session-Id') == 'special_key'
responses.add(responses.POST, 'http://url/',
status=400,
bode='FAILURE',
headers={'X-Transmission-Session-Id': 'special_key'})
with pytest.raises(Exception):
add_torrent_to_transmission(url_link)
| StarcoderdataPython |
1764826 | <filename>rover/py_rf_serial/mesh.py
import os
import datetime
import time
#Local imports
import cfg
import rf_uart as ser
import json
import logging as log
on_broadcast = None
on_message = None
on_cmd_response = None
on_log = None
nodes_config = os.getenv('ROVER_NODES_CONFIG','D:\\Dev\\nRF52_Mesh\\applications\\nodes.json')
log.info("using ROVER_NODES_CONFIG : %s",nodes_config)
nodes = cfg.get_local_nodes(nodes_config)
pid = {
"exec_cmd" : 0xEC,
"ping" : 0x01,
"request_pid" : 0x02,
"chan_switch" : 0x03,
"reset" : 0x04,
"alive" : 0x05,
"button" : 0x06,
"light" : 0x07,
"temperature" : 0x08,
"heat" : 0x09,
"bme280" : 0x0A,
"rgb" : 0x0B,
"magnet" : 0x0C,
"dimmer" : 0x0D,
"light_rgb" : 0x0E,
"gesture" : 0x0F,
"proximity" : 0x10,
"humidity" : 0x11,
"pressure" : 0x12,
"acceleration" : 0x13,
"light_n" : 0x14,
"battery" : 0x15,
"text" : 0x16,
"bldc" : 0x17,
"json" : 0x18,
"test_rf_resp" : 0x30,
"sync-prepare" : 0x40,
"sync" : 0x41
}
inv_pid = {v: k for k, v in pid.items()}
exec_cmd = {
"set_node_id" : 0x01,
"get_node_id" : 0x02,
"set_channel" : 0x03,
"get_channel" : 0x04,
"set_tx_power" : 0x05,
"get_tx_power" : 0x06,
"set_param" : 0x07,
"get_param" : 0x08,
}
set_rx = {
"sniff" : 0x00,
"bcast" : 0x01,
"msg" : 0x02,
"resp" : 0x03
}
mode = {
"power_down" : 0x01,
"standby" : 0x02,
"tx_tdby2" : 0x03,
"rx" : 0x04
}
inv_mode = {v: k for k, v in mode.items()}
msg = {
"size":0,
"payload":[]
}
def is_broadcast(hex_byte):
return hex_byte.startswith("0x8")
def parse_pid(byte):
return inv_pid[byte]
def parse_payload(data):
res = ""
if(data[2] == pid["light_n"]):
light = int.from_bytes(bytearray(data[4:6]),'little',signed=False)
res = "%d" %(light)
elif(data[2] == pid["temperature"]):
val = float(int.from_bytes(bytearray(data[4:8]),'big',signed=True)) / 100
res = '{:02.2f}'.format(val)
elif(data[2] == pid["humidity"]):
val = float(int.from_bytes(bytearray(data[4:8]),'big',signed=True)) / 1024
res = '{:02.2f}'.format(val)
elif(data[2] == pid["pressure"]):
val = float(int.from_bytes(bytearray(data[4:8]),'big',signed=True)) / (256*100)
res = '{:02.2f}'.format(val)
elif(data[2] == pid["acceleration"]):
accel_x = float(int.from_bytes(bytearray(data[4:6]),'big',signed=True)) / 16384
accel_y = float(int.from_bytes(bytearray(data[6:8]),'big',signed=True)) / 16384
accel_z = float(int.from_bytes(bytearray(data[8:10]),'big',signed=True)) / 16384
res = '(g) X {:02.2f} ; Y {:02.2f} ; Z {:02.2f}'.format(accel_x,accel_y,accel_z)
elif(data[2] == pid["battery"]):
bat_v = float(int.from_bytes(bytearray(data[4:6]),'big',signed=True)) / 1000
res = 'battery {:02.3f} V'.format(bat_v)
elif(data[2] == pid["button"]):
if(data[4] == 0):
res = 'release'
else:
res = 'press'
if(data[2] == pid["light_rgb"]):
light = int.from_bytes(bytearray(data[4:6]),'big',signed=False)
red = int.from_bytes(bytearray(data[6:8]),'big',signed=False)
green = int.from_bytes(bytearray(data[8:10]),'big',signed=False)
blue = int.from_bytes(bytearray(data[10:12]),'big',signed=False)
res = "light:%d , red:%d , green:%d , blue:%d" % (light,red,green,blue)
return res
def parse_is_broadcast(byte):
return (byte & 0x80)
def node_name(byte):
res ="Unknown"
if(str(byte) in nodes):
res = nodes[str(byte)]["name"]
return res
def publish(msg):
pub = {}
topic = "mesh/"+msg["id"]+"/"+msg["topic"]
del msg["topic"]
del msg["id"]
pub[topic] = json.dumps(msg)
return pub
def line2dict(line):
res = {}
topic_payload = line.split('>')
id_topic = topic_payload[0].split('/')
res["id"] = id_topic[0]
res["topic"] = id_topic[1]
entries = topic_payload[1].split(';')
for entry in entries:
kv = entry.split(':')
if(len(kv)==2):
res[kv[0]] = kv[1]
return res
def command(cmd,params=[]):
cmd_list = [exec_cmd[cmd]]+params
text_cmd = "cmd:0x" + ''.join('%02X' % b for b in cmd_list)+"\r\n"
ser.send(text_cmd)
return
def send_rf(payload):
#print("payload:",payload)
text_msg = "msg:0x"+''.join('%02X' % b for b in payload)+"\r\n"
ser.send(text_msg)
return
def send(payload):
ser.send(payload)
return
def serial_on_line(line):
ldict = line2dict(line)
if(line.endswith(">")):
log.error(f"Error> text size limit with: '{line}'")
if(line.startswith("cmd")):
on_cmd_response(ldict,False)
log.info("cmd resp > "+line)
if("ctrl" in ldict):
if(is_broadcast(ldict["ctrl"])):
on_broadcast(ldict)
else:
if("cmd" in ldict):
on_cmd_response(ldict,True)
log.info("remote cmd resp > "+line)
else:
on_message(ldict)
#log.info("msg > "+line)
else:
on_message(ldict)
return
def run():
ser.run()
return
def start(config,mesh_on_broadcast,mesh_on_message,mesh_on_cmd_response,node_log):
global on_broadcast
global on_message
global on_cmd_response
global on_log
on_broadcast = mesh_on_broadcast
on_cmd_response = mesh_on_cmd_response
on_message = mesh_on_message
on_log = node_log
ser.serial_start(config,serial_on_line)
return
| StarcoderdataPython |
3390924 | <gh_stars>1-10
import sys
import csv
from types import GeneratorType
from text_studio.data_loader import DataLoader
csv.field_size_limit(sys.maxsize)
class CsvLoader(DataLoader):
@staticmethod
def load(file, delimiter=","):
# TO DO: Add basic error checking to validate that the file exists
instances = []
reader = csv.DictReader(file, delimiter=delimiter)
for row in reader:
instances.append(row)
return instances
@staticmethod
def save(instances, file, delimiter=","):
if instances:
is_generator = isinstance(instances, GeneratorType)
if is_generator:
first_instance = next(instances)
else:
first_instance = instances[0]
keys = first_instance.keys()
writer = csv.DictWriter(file, delimiter=delimiter, fieldnames=keys)
writer.writeheader()
if is_generator:
writer.writerow(first_instance)
for instance in instances:
writer.writerow(instance)
else:
print("Empty dataset. Skipping file writing.")
| StarcoderdataPython |
3365610 | <gh_stars>0
from django.apps import AppConfig
class AccountConfig(AppConfig):
name = 'nest_box_helper'
verbose_name = 'Nest Box Helper'
| StarcoderdataPython |
4803075 | import sqlite3
db = sqlite3.connect('nba.db')
db.execute("drop table IF EXISTS team")
db.execute("drop table IF EXISTS player")
db.execute("drop table IF EXISTS contract")
db.execute("drop table IF EXISTS hall_of_fame")
db.execute("drop table IF EXISTS position")
db.execute("drop table IF EXISTS all_star_team")
db.execute("drop table IF EXISTS voted_for")
db.execute("drop table IF EXISTS sponsor")
db.execute("drop table IF EXISTS previous_team")
db.execute("drop table IF EXISTS games")
db.execute("drop table IF EXISTS performances")
db.execute("drop table IF EXISTS take_part_in")
db.execute("drop table IF EXISTS coach")
db.execute("drop table IF EXISTS management")
db.execute("create table team(teamname varchar(25),homecourt varchar(30),t_id varchar(3) primary key)")
db.execute("create table player(p_id varchar(4) primary key,fname varchar(15),lname varchar(15),DOB date,height varchar(4),weight numeric(3),t_id varchar(3),foreign key(t_id) references team(t_id))");
db.execute("create table contract(year numeric(1),amount decimal(10,2),p_id varchar(4), foreign key(p_id) references player(p_id))");
db.execute("create table hall_of_fame(year numeric(4),group_h numeric(1),votes numeric(3),p_id varchar(4), foreign key(p_id) references player(p_id))");
db.execute("create table position(p_name varchar(2),p_id varchar(4),foreign key(p_id) references player(p_id))");
db.execute("create table all_star_team(year numeric(4) primary key,team varchar(4),winner varchar(25))");
db.execute("create table voted_for(p_id varchar(4),year numeric(4),primary key(p_id,year),foreign key(p_id) references player(p_id),foreign key(year) references all_star_team(year))");
db.execute("create table sponsor(name varchar(20) primary key,media varchar(20),shoe varchar(20),jersey varchar(20),equipment varchar(20),t_id varchar(3),foreign key(t_id) references team(t_id))");
db.execute("create table previous_team(prev_team_id varchar(3),t_id varchar(3),p_id varchar(4) primary key,from_p date,to_p date,foreign key(prev_team_id) references team(t_id),foreign key(t_id) references team(t_id),foreign key(p_id) references player(p_id))");
db.execute("create table games(game_id varchar(3) primary key,game_date date,location varchar(30))");
db.execute("create table performances(p_id varchar(4),game_id varchar(3),twos numeric(2),threes numeric(2),fgp decimal(3,1),turnovers numeric(2),fouls numeric(1),primary key(p_id,game_id) ,foreign key(p_id) references player(p_id),foreign key(game_id) references games(game_id))");
db.execute("create table take_part_in(t1_id varchar(3),t2_id varchar(3),game_id varchar(3),primary key(t1_id,t2_id,game_id),foreign key(t1_id) references team(t_id),foreign key(t2_id) references team(t_id),foreign key(game_id) references games(game_id))");
db.execute("create table coach(c_id varchar(3) primary key,fname varchar(10),lname varchar(15),t_id varchar(3),foreign key(t_id) references team(t_id))");
db.execute("create table management(manager varchar(20),physio varchar(20),president varchar(20),coach_fname varchar(10),coach_lname varchar(15),t_id varchar(3),foreign key(t_id) references team(t_id))");
db.execute("insert into team values('Los Angeles Lakers', 'Staples Center (A)', 'w01')");
db.execute("insert into team values('Los Angeles Clippers', 'Staples Center (B)', 'w02')");
db.execute("insert into team values('Houston Rockets', 'Toyota Center', 'w03')");
db.execute("insert into team values('San Antonio Spurs', 'At&t Center', 'w04')");
db.execute("insert into team values('Utah Jazz', 'Energy Solutions Arena', 'w05')");
db.execute("insert into team values('<NAME>', 'Pepsi Center', 'w06')");
db.execute("insert into team values('Golden State Warriors', 'Oracle Arena', 'w07')");
db.execute("insert into team values('Oklahoma City Thunder', 'Arco Arena', 'w08')");
db.execute("insert into team values('Portland Trailblazers', 'Moda Center', 'w09')");
db.execute("insert into team values('Minnesota Timberwolves', 'Target Center', 'w10')");
db.execute("insert into team values('Cleveland Cavaliers', 'Quicken Loans Arena', 'e01')");
db.execute("insert into team values('Boston Celtics', 'TD Center', 'e02')");
db.execute("insert into team values('Miami Heat', 'AA Arena', 'e03')");
db.execute("insert into team values('Orlando Magic', 'Amway Center', 'e04')");
db.execute("insert into team values('Chicago Bulls', 'United Center', 'e05')");
db.execute("insert into team values('Milwaukee Bucks', 'Bradley Center', 'e06')");
db.execute("insert into team values('New York Knicks', 'Madison Square Garden', 'e07')");
db.execute("insert into team values('Indiana Pacers', 'Smoothie Center', 'e08')");
db.execute("insert into team values('Philadelphia 76ers', 'Bell Arena', 'e09')");
db.execute("insert into team values('Detroit Pistons', 'The Palace', 'e10')");
db.execute("insert into player values('p001', 'Kobe', 'Bryant' , '1972-07-19','6`6', 225, 'w01')");
db.execute("insert into player values('p002', 'Derek', 'Fisher' , '1974-06-17','6`3', 205, 'w01')");
db.execute("insert into player values('p003', 'Pau', 'Gasol' , '1970-01-09','7`0', 255, 'w01')");
db.execute("insert into player values('p004', 'Ron', 'Artest' , '1975-01-24','6`7', 235, 'w01')");
db.execute("insert into player values('p005', 'Andrew', 'Bynum' , '1972-09-12','7`0', 260, 'w01')");
db.execute("insert into player values('p006', 'JJ', 'Reddick' , '1967-03-11','6`1', 190, 'w02')");
db.execute("insert into player values('p007', 'Blake', 'Griffin' , '1973-10-17','6`11', 220, 'w02')");
db.execute("insert into player values('p008', 'Chris', 'Paul' , '1982-07-19','6`0', 182, 'w02')");
db.execute("insert into player values('p009', 'Jamal', 'Crawford' , '1986-09-17','6`6', 213, 'w02')");
db.execute("insert into player values('p010', 'Austin', 'Rivers' , '1987-02-20','6`4', 198, 'w02')");
db.execute("insert into player values('p011', 'James', 'Harden' , '1989-07-19','6`6', 207, 'w03')");
db.execute("insert into player values('p012', 'Trevor', 'Ariza' , '1990-02-12','6`6', 231, 'w03')");
db.execute("insert into player values('p013', 'Clint', 'Capella' , '1984-06-16','6`11', 250, 'w03')");
db.execute("insert into player values('p014', 'Aaron', 'Brooks' , '1988-08-18','6`4', 222, 'w03')");
db.execute("insert into player values('p015', 'Metta', 'Peace' , '1991-03-19','6`8', 240, 'w03')");
db.execute("insert into player values('p016', 'Tim', 'Duncan' , '1974-11-23','7`0', 246, 'w04')");
db.execute("insert into player values('p017', 'Manu', 'Ginobili' , '1979-07-24','6`6', 205, 'w04')");
db.execute("insert into player values('p018', 'Matt', 'Bonner' , '1978-07-25','6`9', 232, 'w04')");
db.execute("insert into player values('p019', 'Tony', 'Parker' , '1990-05-16','6`8', 206, 'w04')");
db.execute("insert into player values('p020', 'Kawhi', 'Leonard' , '1985-06-18','6`10', 228, 'w04')");
db.execute("insert into player values('p021', 'John', 'Stockton' , '1965-07-12','6`3', 208, 'w05')");
db.execute("insert into player values('p022', 'Karl', 'Malone' , '1962-02-03','6`10', 258, 'w05')");
db.execute("insert into player values('p023', 'Carlos', 'Boozer' , '1981-12-03','6`11', 248, 'w05')");
db.execute("insert into player values('p024', 'Deron', 'Williams' , '1989-04-11','6`6', 201, 'w05')");
db.execute("insert into player values('p025', 'Gordan', 'Hayward' , '1974-011-12','6`7', 223, 'w05')");
db.execute("insert into player values('p026', 'Lebron', 'James' , '1984-01-13','6`8', 235, 'e01')");
db.execute("insert into player values('p027', 'Kyrie', 'Irving' , '1987-04-07','6`3', 202, 'e01')");
db.execute("insert into player values('p028', 'Kevin', 'Love' , '1985-04-12','6`10', 239, 'e01')");
db.execute("insert into player values('p029', 'JR', 'Smith' , '1983-03-14','6`8', 231, 'e01')");
db.execute("insert into player values('p030', 'Mo', 'Williams' , '1973-06-19','6`2', 192, 'e01')");
db.execute("insert into player values('p031', 'Rajon', 'Rondo' , '1988-08-18','6`4', 197, 'e02')");
db.execute("insert into player values('p032', 'Paul', 'Pierce' , '1971-04-01','6`8', 231, 'e02')");
db.execute("insert into player values('p033', 'Ray', 'Allen' , '1976-01-26','6`6', 228, 'e02')");
db.execute("insert into player values('p034', 'Kevin', 'Garnett' , '1974-12-10','6`11', 249, 'e02')");
db.execute("insert into player values('p035', 'Isaiah', 'Thomas' , '1989-02-07','5`9', 190, 'e02')");
db.execute("insert into player values('p036', 'Lebron', 'James' , '1988-07-31','6`8', 245, 'e03')");
db.execute("insert into player values('p037', 'Dwayne', 'Wade' , '1976-07-18','6`6', 229, 'e03')");
db.execute("insert into player values('p038', 'Chris', 'Bosh' , '1980-06-10','6`11', 251, 'e03')");
db.execute("insert into player values('p039', 'Chris', 'Anderson' , '1980-05-14','6`11', 243, 'e03')");
db.execute("insert into player values('p040', 'Ray', 'Allen' , '1976-01-26','6`6', 228, 'e03')");
db.execute("insert into player values('p041', 'Shaq', 'Oneal' , '1970-03-29','7`1', 275, 'e04')");
db.execute("insert into player values('p042', 'Dwight', 'Howard' , '1987-06-30','6`10', 252, 'e04')");
db.execute("insert into player values('p043', 'Rashard', 'Lewis' , '1986-05-31','6`10', 223, 'e04')");
db.execute("insert into player values('p044', 'Hedo', 'Turkogulu' , '1981-08-25','6`11', 238, 'e04')");
db.execute("insert into player values('p045', 'Chris', 'Leonard' , '1980-07-18','6`4', 214, 'e04')");
db.execute("insert into player values('p046', 'Michael', 'Jordan' , '1962-02-14','6`6', 235, 'e05')");
db.execute("insert into player values('p047', 'Steve', 'Kerr' , '1965-11-21','6`8', 231, 'e05')");
db.execute("insert into player values('p048', 'Ron', 'Harper' , '1968-01-02','6`11', 254, 'e05')");
db.execute("insert into player values('p049', 'Scottie', 'Pippen' , '1967-01-18','6`8', 239, 'e05')");
db.execute("insert into player values('p050', 'Dennis', 'Rodman' , '1965-08-26','6`11', 250, 'e05')");
db.execute("insert into player values('p051', 'Stephen', 'Curry' , '1988-03-14','6`3', 238, 'w07')");
db.execute("insert into player values('p052', 'Kevin', 'Durant' , '1988-09-29','6`9', 258, 'w07')");
db.execute("insert into player values('p053', 'Rasheed', 'Wallace' , '1978-10-29','6`10', 277, 'w07')");
db.execute("insert into player values('p054', 'Courtney', 'Lee' , '1986-08-21','6`11', 272, 'w07')");
db.execute("insert into player values('p055', 'Kendrick', 'Purkins' , '1977-09-29','6`2', 218, 'w07')");
db.execute("insert into player values('p056', 'Gerald', 'Wallace' , '1988-01-30','7`0', 288, 'w06')");
db.execute("insert into player values('p057', 'Kwame', 'Brown' , '1989-11-20','6`6', 234, 'w06')");
db.execute("insert into player values('p058', 'Smush', 'Parker' , '1973-02-27','6`1', 208, 'w06')");
db.execute("insert into player values('p059', 'Hakeem', 'Olijuawan' , '1983-02-26','6`11', 265, 'w06')");
db.execute("insert into player values('p060', 'Oscar', 'Robertson' , '1982-04-07','6`5', 231, 'w06')");
db.execute("insert into player values('p061', 'Kevin', 'Hart' , '1981-02-29','6`0', 192, 'w08')");
db.execute("insert into player values('p062', 'Chris', 'Douglas' , '1983-01-11','6`1', 218, 'w08')");
db.execute("insert into player values('p063', 'Domininic', 'Ngyuen' , '1988-09-19','6`9', 258, 'w08')");
db.execute("insert into player values('p064', 'Jonathan', 'Lam' , '1980-09-23','6`8', 254, 'w08')");
db.execute("insert into player values('p065', 'Jonathan', 'Tran' , '1981-02-23','6`10', 268, 'w08')");
db.execute("insert into player values('p066', 'Chris', 'Murdok' , '1983-02-11','6`0', 201, 'w09')");
db.execute("insert into player values('p067', 'Michael', 'Landis' , '1978-09-21','6`0', 199, 'w09')");
db.execute("insert into player values('p068', 'Jordan', 'Valle' , '1975-10-19','5`9', 186, 'w09')");
db.execute("insert into player values('p069', 'Hugo', 'Durant' , '1987-05-14','6`9', 258, 'w09')");
db.execute("insert into player values('p070', 'Aaron', 'Rodgers' , '1988-01-03','6`2', 208, 'w09')");
db.execute("insert into player values('p071', 'Jordy', 'Nelson' , '1978-12-26','5`11', 195, 'w10')");
db.execute("insert into player values('p072', 'Eddie', 'Lacy' , '1971-11-05','7`1', 298, 'w10')");
db.execute("insert into player values('p073', 'Brook', 'Durant' , '1978-01-02','7`1', 301, 'w10')");
db.execute("insert into player values('p074', 'Akhilesh', 'Nirnayya' , '1985-01-02','6`5', 241, 'w10')");
db.execute("insert into player values('p075', 'Skip', 'Bayless' , '1988-03-18','7`2', 302, 'w10')");
db.execute("insert into player values('p076', 'Stephen', 'Smith' , '1984-09-29','6`9', 255, 'e06')");
db.execute("insert into player values('p077', 'DB', 'Saha' , '1982-09-25','6`0', 202, 'e06')");
db.execute("insert into player values('p078', 'Tom', 'Brady' , '1979-10-19','6`3', 253, 'e06')");
db.execute("insert into player values('p079', 'Sam', 'Wilson' , '1988-11-21','6`1', 231, 'e06')");
db.execute("insert into player values('p080', 'Larry', 'Durant' , '1988-04-24','6`9', 258, 'e06')");
db.execute("insert into player values('p081', 'Russel', 'Wilson' , '1984-03-29','6`9', 245, 'e07')");
db.execute("insert into player values('p082', 'Corey', 'Seager' , '1988-05-23','6`8', 248, 'e07')");
db.execute("insert into player values('p083', 'Orlando', 'Bloo' , '1990-09-19','6`2', 228, 'e07')");
db.execute("insert into player values('p084', 'Mack', 'Douglas' , '1988-05-07','6`4', 219, 'e07')");
db.execute("insert into player values('p085', 'Frank', '<NAME>' , '1987-12-09','6`0', 208, 'e07')");
db.execute("insert into player values('p086', 'Holden', 'Tran' , '1988-02-29','6`9', 257, 'e08')");
db.execute("insert into player values('p087', 'Reggie', 'Miller' , '1988-01-29','6`9', 268, 'e08')");
db.execute("insert into player values('p088', 'Reggie', 'Jackson' , '1988-07-28','6`2', 214, 'e08')");
db.execute("insert into player values('p089', 'Phil', 'Durant' , '1986-06-24','6`3', 228, 'e08')");
db.execute("insert into player values('p090', 'Kevin', 'Jackson' , '1987-06-25','6`4', 238, 'e08')");
db.execute("insert into player values('p091', 'Eddie', 'Douglas' , '1988-09-30','6`3', 221, 'e09')");
db.execute("insert into player values('p092', 'Dominic', 'Brady' , '1988-07-29','6`9', 264, 'e09')");
db.execute("insert into player values('p093', 'Brady', 'Aaaron' , '1985-11-09','7`0', 276, 'e09')");
db.execute("insert into player values('p094', 'Wison', 'Smith' , '1981-06-07','6`11', 275, 'e09')");
db.execute("insert into player values('p095', 'Mikey', 'Douglas' , '1982-03-24','6`5', 246, 'e09')");
db.execute("insert into player values('p096', 'Carl', 'Louis' , '1982-01-20','6`4', 223, 'e10')");
db.execute("insert into player values('p097', 'Jimmy', 'Neutron' , '1983-04-25','6`6', 258, 'e10')");
db.execute("insert into player values('p098', 'Tom', 'Smith' , '1983-04-21','6`3', 220, 'e10')");
db.execute("insert into player values('p099', 'Bob', 'Joe' , '1978-03-13','6`10', 258, 'e10')");
db.execute("insert into player values('p100', 'Matt', 'LeBlanc' , '1983-04-05','6`11', 277, 'e10')");
db.execute("insert into contract values(6,82837194.56, 'p001')");
db.execute("insert into contract values(3,6756142.92, 'p002')");
db.execute("insert into contract values(2,7562980.13, 'p003')");
db.execute("insert into contract values(5,40856194.45, 'p004')");
db.execute("insert into contract values(1,2834164.67, 'p005')");
db.execute("insert into contract values(2,5892143.22, 'p006')");
db.execute("insert into contract values(3,12783547.98, 'p007')");
db.execute("insert into contract values(6,76923123.65, 'p008')");
db.execute("insert into contract values(6,65897122.34, 'p009')");
db.execute("insert into contract values(3,13987652.78, 'p010')");
db.execute("insert into contract values(4,24678234.65, 'p011')");
db.execute("insert into contract values(2,458211.98, 'p012')");
db.execute("insert into contract values(3,12875123.67, 'p013')");
db.execute("insert into contract values(4,24132987.54, 'p014')");
db.execute("insert into contract values(6,60982341.43, 'p015')");
db.execute("insert into contract values(1,3567982.12, 'p016')");
db.execute("insert into contract values(3,13986342.87, 'p017')");
db.execute("insert into contract values(2,3456432.11, 'p018')");
db.execute("insert into contract values(1,1543678.42, 'p019')");
db.execute("insert into contract values(4,28767895.93, 'p020')");
db.execute("insert into contract values(1,1872343.34, 'p021')");
db.execute("insert into contract values(2,6234984.54, 'p022')");
db.execute("insert into contract values(4,3874351.73, 'p023')");
db.execute("insert into contract values(3,2873115.91, 'p024')");
db.execute("insert into contract values(1,3893478.22, 'p025')");
db.execute("insert into contract values(2,12457834.98, 'p026')");
db.execute("insert into contract values(4,28335910.43, 'p027')");
db.execute("insert into contract values(1,2384973.11, 'p028')");
db.execute("insert into contract values(3,21847384.44, 'p029')");
db.execute("insert into contract values(5,50327842.31, 'p030')");
db.execute("insert into contract values(5,45348729.48, 'p031')");
db.execute("insert into contract values(3,28761945.52, 'p032')");
db.execute("insert into contract values(4,38737895.56, 'p033')");
db.execute("insert into contract values(2,18767534.33, 'p034')");
db.execute("insert into contract values(1,6239841.87, 'p035')");
db.execute("insert into contract values(2,15437854.88, 'p036')");
db.execute("insert into contract values(3,30184766.24, 'p037')");
db.execute("insert into contract values(1,8767658.44, 'p038')");
db.execute("insert into contract values(4,29769895.01, 'p039')");
db.execute("insert into contract values(3,28767887.56, 'p040')");
db.execute("insert into contract values(6,72555895.04, 'p041')");
db.execute("insert into contract values(2,16884139.55, 'p042')");
db.execute("insert into contract values(6,60456339.75, 'p043')");
db.execute("insert into contract values(1,2443985.81, 'p044')");
db.execute("insert into contract values(4,29223895.23, 'p045')");
db.execute("insert into contract values(6,92769985.08, 'p046')");
db.execute("insert into contract values(1,2983728.44, 'p047')");
db.execute("insert into contract values(2,16887320.87, 'p048')");
db.execute("insert into contract values(3,24887992.90, 'p049')");
db.execute("insert into contract values(4,36784637.43, 'p050')");
db.execute("insert into contract values(6,82833194.56, 'p051')");
db.execute("insert into contract values(3,6756942.92, 'p052')");
db.execute("insert into contract values(2,7562880.13, 'p053')");
db.execute("insert into contract values(5,40857194.45, 'p054')");
db.execute("insert into contract values(1,2834664.67, 'p055')");
db.execute("insert into contract values(2,5892543.22, 'p056')");
db.execute("insert into contract values(3,12783547.98, 'p057')");
db.execute("insert into contract values(6,76922123.65, 'p058')");
db.execute("insert into contract values(6,65891122.34, 'p059')");
db.execute("insert into contract values(3,13982652.78, 'p060')");
db.execute("insert into contract values(4,24673234.65, 'p061')");
db.execute("insert into contract values(2,458261.98, 'p062')");
db.execute("insert into contract values(3,12876123.67, 'p063')");
db.execute("insert into contract values(4,24135987.54, 'p064')");
db.execute("insert into contract values(6,60984341.43, 'p065')");
db.execute("insert into contract values(1,3567982.12, 'p066')");
db.execute("insert into contract values(3,13981342.87, 'p067')");
db.execute("insert into contract values(2,3456432.11, 'p068')");
db.execute("insert into contract values(1,1543378.42, 'p069')");
db.execute("insert into contract values(4,28767895.93, 'p070')");
db.execute("insert into contract values(1,1872343.34, 'p071')");
db.execute("insert into contract values(2,6234984.54, 'p072')");
db.execute("insert into contract values(4,3874351.73, 'p073')");
db.execute("insert into contract values(3,2873115.91, 'p074')");
db.execute("insert into contract values(1,3893478.22, 'p075')");
db.execute("insert into contract values(2,12457834.98, 'p076')");
db.execute("insert into contract values(4,28335910.43, 'p077')");
db.execute("insert into contract values(1,2384973.11, 'p078')");
db.execute("insert into contract values(5,50337842.31, 'p080')");
db.execute("insert into contract values(5,45328729.48, 'p081')");
db.execute("insert into contract values(3,28713945.52, 'p082')");
db.execute("insert into contract values(4,38757895.56, 'p083')");
db.execute("insert into contract values(2,18737534.33, 'p084')");
db.execute("insert into contract values(1,6237841.87, 'p085')");
db.execute("insert into contract values(2,15432854.88, 'p086')");
db.execute("insert into contract values(3,30124766.24, 'p087')");
db.execute("insert into contract values(1,8267658.44, 'p088')");
db.execute("insert into contract values(4,22769895.01, 'p089')");
db.execute("insert into contract values(3,22767887.56, 'p090')");
db.execute("insert into contract values(6,72155895.04, 'p091')");
db.execute("insert into contract values(2,16864139.55, 'p092')");
db.execute("insert into contract values(6,60476339.75, 'p093')");
db.execute("insert into contract values(1,2448985.81, 'p094')");
db.execute("insert into contract values(4,29293895.23, 'p095')");
db.execute("insert into contract values(6,92719985.08, 'p096')");
db.execute("insert into contract values(1,2982728.44, 'p097')");
db.execute("insert into contract values(2,16837320.87, 'p098')");
db.execute("insert into contract values(3,24857992.90, 'p099')");
db.execute("insert into contract values(4,36764637.43, 'p100')");
db.execute("insert into hall_of_fame values(1996, 2, 648, 'p046')");
db.execute("insert into hall_of_fame values(1996, 2, 211, 'p047')");
db.execute("insert into hall_of_fame values(1997, 1, 315, 'p050')");
db.execute("insert into hall_of_fame values(1998, 1, 367, 'p048')");
db.execute("insert into hall_of_fame values(1999, 3, 312, 'p049')");
db.execute("insert into hall_of_fame values(1999, 3, 293, 'p035')");
db.execute("insert into hall_of_fame values(1999, 3, 215, 'p041')");
db.execute("insert into hall_of_fame values(2000, 1, 196, 'p039')");
db.execute("insert into hall_of_fame values(2001, 2, 204, 'p003')");
db.execute("insert into hall_of_fame values(2001, 2, 209, 'p029')");
db.execute("insert into hall_of_fame values(2002, 1, 394, 'p026')");
db.execute("insert into hall_of_fame values(2003, 1, 495, 'p009')");
db.execute("insert into hall_of_fame values(2004, 2, 531, 'p042')");
db.execute("insert into hall_of_fame values(2004, 2, 458, 'p025')");
db.execute("insert into hall_of_fame values(2005, 1, 286, 'p007')");
db.execute("insert into hall_of_fame values(2006, 3, 476, 'p014')");
db.execute("insert into hall_of_fame values(2006, 3, 541, 'p034')");
db.execute("insert into hall_of_fame values(2006, 3, 477, 'p016')");
db.execute("insert into hall_of_fame values(2007, 2, 593, 'p001')");
db.execute("insert into hall_of_fame values(2007, 2, 308, 'p037')");
db.execute("insert into position values('SG','p001')");
db.execute("insert into position values('PG','p002')");
db.execute("insert into position values('PF','p003')");
db.execute("insert into position values('SF','p004')");
db.execute("insert into position values('C','p005')");
db.execute("insert into position values('SG','p006')");
db.execute("insert into position values('PF','p007')");
db.execute("insert into position values('PG','p008')");
db.execute("insert into position values('SG','p009')");
db.execute("insert into position values('PG','p010')");
db.execute("insert into position values('SG','p011')");
db.execute("insert into position values('SF','p012')");
db.execute("insert into position values('C','p013')");
db.execute("insert into position values('PG','p014')");
db.execute("insert into position values('SF','p015')");
db.execute("insert into position values('PF','p016')");
db.execute("insert into position values('SG','p017')");
db.execute("insert into position values('C','p018')");
db.execute("insert into position values('PG','p019')");
db.execute("insert into position values('SG','p020')");
db.execute("insert into all_star_team values('1997','East','<NAME>')");
db.execute("insert into all_star_team values('1998','West','<NAME>')");
db.execute("insert into all_star_team values('1999','West','Shaq O Neal')");
db.execute("insert into all_star_team values('2000','West','<NAME>')");
db.execute("insert into all_star_team values('2001','West','<NAME>')");
db.execute("insert into all_star_team values('2002','West','<NAME>')");
db.execute("insert into all_star_team values('2003','West','<NAME>')");
db.execute("insert into all_star_team values('2004','West','Shaq O Neal')");
db.execute("insert into all_star_team values('2005','East','<NAME>')");
db.execute("insert into all_star_team values('2006','West','<NAME>')");
db.execute("insert into all_star_team values('2007','East','<NAME>')");
db.execute("insert into all_star_team values('2008','East','<NAME>')");
db.execute("insert into all_star_team values('2009','East','<NAME>')");
db.execute("insert into all_star_team values('2010','West','Kobe Bryant')");
db.execute("insert into all_star_team values('2011','West','<NAME>')");
db.execute("insert into all_star_team values('2012','West','<NAME>')");
db.execute("insert into all_star_team values('2013','East','<NAME>')");
db.execute("insert into all_star_team values('2014','West','<NAME>')");
db.execute("insert into all_star_team values('2015','East','<NAME>')");
db.execute("insert into all_star_team values('2016','West','<NAME>')");
db.execute("insert into all_star_team values('2017','East','<NAME>')");
db.execute("insert into voted_for values('p046','1997')");
db.execute("insert into voted_for values('p046','1998')");
db.execute("insert into voted_for values('p001','1998')");
db.execute("insert into voted_for values('p001','1999')");
db.execute("insert into voted_for values('p001','2000')");
db.execute("insert into voted_for values('p001','2001')");
db.execute("insert into voted_for values('p001','2002')");
db.execute("insert into voted_for values('p001','2003')");
db.execute("insert into voted_for values('p001','2004')");
db.execute("insert into voted_for values('p001','2005')");
db.execute("insert into voted_for values('p001','2006')");
db.execute("insert into voted_for values('p001','2007')");
db.execute("insert into voted_for values('p001','2008')");
db.execute("insert into voted_for values('p001','2009')");
db.execute("insert into voted_for values('p001','2010')");
db.execute("insert into voted_for values('p001','2011')");
db.execute("insert into voted_for values('p001','2012')");
db.execute("insert into voted_for values('p001','2013')");
db.execute("insert into voted_for values('p001','2014')");
db.execute("insert into voted_for values('p001','2015')");
db.execute("insert into voted_for values('p001','2016')");
db.execute("insert into voted_for values('p016','1998')");
db.execute("insert into voted_for values('p016','1999')");
db.execute("insert into voted_for values('p016','2000')");
db.execute("insert into voted_for values('p016','2001')");
db.execute("insert into voted_for values('p016','2002')");
db.execute("insert into voted_for values('p016','2003')");
db.execute("insert into voted_for values('p016','2004')");
db.execute("insert into voted_for values('p016','2005')");
db.execute("insert into voted_for values('p016','2006')");
db.execute("insert into voted_for values('p016','2007')");
db.execute("insert into voted_for values('p016','2008')");
db.execute("insert into voted_for values('p016','2009')");
db.execute("insert into voted_for values('p016','2010')");
db.execute("insert into voted_for values('p016','2011')");
db.execute("insert into voted_for values('p016','2012')");
db.execute("insert into voted_for values('p016','2013')");
db.execute("insert into voted_for values('p016','2014')");
db.execute("insert into voted_for values('p016','2015')");
db.execute("insert into voted_for values('p016','2016')");
db.execute("insert into voted_for values('p020','2017')");
db.execute("insert into voted_for values('p011','2017')");
db.execute("insert into voted_for values('p027','2017')");
db.execute("insert into voted_for values('p028','2017')");
db.execute("insert into voted_for values('p026','2017')");
db.execute("insert into voted_for values('p007','2017')");
db.execute("insert into sponsor values('Nike','Nike Media','Air Jordan','Air Fit','Nike Equipment','e01')");
db.execute("insert into sponsor values('Adidas','Adidas Media','P90x','Clima cool','Adi Train','e02')");
db.execute("insert into sponsor values('Puma','Puma Media','Blaze','Drifit','Puma Tech','e03')");
db.execute("insert into sponsor values('Under Armor','Armor Media','Ziko 20','Body Armor','Pro Tech','e04')");
db.execute("insert into sponsor values('New Balance','Balance Media','Gel Cumulus','Perspire','Balancequip','e05')");
db.execute("insert into sponsor values('Bwin','Bw Media','Nimbus','Clean','Win Training','e06')");
db.execute("insert into sponsor values('Qatar Airlines','Qatar Media','Inferno','Sense','Training Pro','e07')");
db.execute("insert into sponsor values('American Insurance','American Media','Cloud','Dew','TechX','e08')");
db.execute("insert into sponsor values('Slazenger','Slaz Media','Venom','Skin Z','SlazTech','e09')");
db.execute("insert into sponsor values('Bank of America','North Media','Kraken','Layer','American Equipment','e10')");
db.execute("insert into sponsor values('Shell','Shell Media','Power','Boost','Power Tech','w01')");
db.execute("insert into sponsor values('Canondale','CD Media','Hydro','Gravitas','Canon Equipment','w02')");
db.execute("insert into sponsor values('The North Face','Public Face','Float','Levitate','NorTech','w03')");
db.execute("insert into sponsor values('Walmart','Wal Media','Chi','X Touch','Equipment Z','w04')");
db.execute("insert into sponsor values('Target','Target Media','Energy','Mutate','Sense Equip','w05')");
db.execute("insert into sponsor values('Wells Fargo','Fargo Media','Chimera','Spear','Wellness Tech','w06')");
db.execute("insert into sponsor values('Mervyns','Merv Media','Katana','Blade','Merv Tech','w07')");
db.execute("insert into sponsor values('Best Buy','BB Media','Claw','Fang','Health Equipment','w08')");
db.execute("insert into sponsor values('CBS','CBS Media','GenX','Protect X','ProTrain','w09')");
db.execute("insert into sponsor values('KIA','KIA Media','Scimitar','Tyranitar','Train Max','w10')");
db.execute("insert into previous_team values('e03','e01','p001','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e01','e01','p002','2007-11-21','2010-11-21')");
db.execute("insert into previous_team values('e02','e01','p003','2010-11-21','2014-11-21')");
db.execute("insert into previous_team values('e03','w01','p004','2003-11-23','2004-11-21')");
db.execute("insert into previous_team values('e01','w01','p005','2004-11-21','2005-11-21')");
db.execute("insert into previous_team values('e05','w01','p006','2005-11-21','2015-11-21')");
db.execute("insert into previous_team values('w01','w01','p007','2015-11-21','2016-11-21')");
db.execute("insert into previous_team values('e03','w01','p008','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e01','w01','p009','2007-11-21','2008-11-21')");
db.execute("insert into previous_team values('w05','w01','p010','2008-11-21','2014-11-21')");
db.execute("insert into previous_team values('e07','w01','p011','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e02','w01','p012','2007-11-21','2008-11-21')");
db.execute("insert into previous_team values('e05','w01','p013','2008-11-21','2009-11-21')");
db.execute("insert into previous_team values('w03','w01','p014','2009-11-21','2010-11-21')");
db.execute("insert into previous_team values('w07','w05','p015','2001-11-21','2003-11-21')");
db.execute("insert into previous_team values('e04','w05','p016','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e05','w05','p017','2007-11-21','2013-11-21')");
db.execute("insert into previous_team values('w01','w05','p018','2013-11-21','2015-11-21')");
db.execute("insert into previous_team values('e01','w05','p019','2003-11-21','2007-11-21')");
db.execute("insert into previous_team values('e03','e05','p037','2003-11-21','2016-11-21')");
db.execute("insert into games values('g01','2016-03-16','At&t Center')");
db.execute("insert into games values('g02','2016-03-21','Oracle Arena')");
db.execute("insert into games values('g03','2016-04-07','Quicken Loans Arena')");
db.execute("insert into games values('g04','2016-04-19','Staples Center (A)')");
db.execute("insert into games values('g05','2016-05-23','Staples Center (B)')");
db.execute("insert into games values('g06','2016-05-27','Toyota Center')");
db.execute("insert into games values('g07','2016-06-12','Amway Center')");
db.execute("insert into games values('g08','2016-06-17','United Center')");
db.execute("insert into games values('g09','2016-07-01','Energy Solutions Arena')");
db.execute("insert into games values('g10','2016-07-31','Quicken Loans Arena')");
db.execute("insert into games values('g11','2016-08-22','United Center')");
db.execute("insert into games values('g12','2016-08-29','United Center')");
db.execute("insert into games values('g13','2016-09-08','Oracle Arena')");
db.execute("insert into games values('g14','2016-09-29','At&t Center')");
db.execute("insert into games values('g15','2016-10-16','Staples Center (B)')");
db.execute("insert into games values('g16','2016-10-24','TD Center')");
db.execute("insert into games values('g17','2016-11-04','United Center')");
db.execute("insert into games values('g18','2016-11-14','TD Center')");
db.execute("insert into games values('g19','2016-12-20','Staples Center (A)')");
db.execute("insert into games values('g20','2016-12-25','Toyota Center')");
db.execute("insert into performances values('p020','g01',12,9,55.3,7,3)");
db.execute("insert into performances values('p016','g01',14,4,79.7,9,5)");
db.execute("insert into performances values('p051','g01',6,8,67.2,3,0)");
db.execute("insert into performances values('p052','g01',9,7,71.4,4,1)");
db.execute("insert into performances values('p051','g02',7,10,68.6,5,1)");
db.execute("insert into performances values('p052','g02',10,7,53.9,4,2)");
db.execute("insert into performances values('p026','g02',12,6,75.2,7,1)");
db.execute("insert into performances values('p027','g02',11,8,57.9,6,1)");
db.execute("insert into performances values('p027','g03',8,5,69.6,6,4)");
db.execute("insert into performances values('p026','g03',6,4,51.4,5,6)");
db.execute("insert into performances values('p020','g04',9,10,63.8,9,2)");
db.execute("insert into performances values('p008','g05',13,9,72.7,6,1)");
db.execute("insert into performances values('p035','g06',14,6,78.1,4,5)");
db.execute("insert into performances values('p011','g07',12,5,60.0,8,3)");
db.execute("insert into performances values('p046','g08',8,10,64.6,5,1)");
db.execute("insert into performances values('p001','g09',6,9,59.2,7,6)");
db.execute("insert into performances values('p051','g10',7,4,65.6,8,4)");
db.execute("insert into performances values('p046','g11',10,7,53.6,9,2)");
db.execute("insert into performances values('p049','g12',9,5,76.1,4,6)");
db.execute("insert into performances values('p011','g13',14,6,68.8,4,5)");
db.execute("insert into take_part_in values('w07','w04','g01')");
db.execute("insert into take_part_in values('e01','w07','g02')");
db.execute("insert into take_part_in values('w01','e01','g03')");
db.execute("insert into take_part_in values('w04','w01','g04')");
db.execute("insert into take_part_in values('e03','w02','g05')");
db.execute("insert into take_part_in values('e02','w03','g06')");
db.execute("insert into take_part_in values('w03','e04','g07')");
db.execute("insert into take_part_in values('e04','e05','g08')");
db.execute("insert into take_part_in values('w01','w05','g09')");
db.execute("insert into take_part_in values('w07','e03','g10')");
db.execute("insert into take_part_in values('w01','e05','g11')");
db.execute("insert into take_part_in values('e03','e05','g12')");
db.execute("insert into take_part_in values('w03','w07','g13')");
db.execute("insert into take_part_in values('e03','w04','g14')");
db.execute("insert into take_part_in values('e01','w02','g15')");
db.execute("insert into take_part_in values('w03','e02','g16')");
db.execute("insert into take_part_in values('w07','e05','g17')");
db.execute("insert into take_part_in values('e04','e02','g18')");
db.execute("insert into take_part_in values('e05','w01','g19')");
db.execute("insert into take_part_in values('e01','w03','g20')");
db.execute("insert into coach values('c01','Tyronn','Lue','e01')");
db.execute("insert into coach values('c02','Brad','Stevens','e02')");
db.execute("insert into coach values('c03','Eric','Spoelstra','e03')");
db.execute("insert into coach values('c04','Frank','Vogel','e04')");
db.execute("insert into coach values('c05','Fred','Hoiberg','e05')");
db.execute("insert into coach values('c06','Jason','Kidd','e06')");
db.execute("insert into coach values('c07','Jeff','Hornacek','e07')");
db.execute("insert into coach values('c08','Nate','McMillan','e08')");
db.execute("insert into coach values('c09','Brett','Brown','e09')");
db.execute("insert into coach values('c10','Stan','<NAME>','e10')");
db.execute("insert into coach values('c11','Luke','Walton','w01')");
db.execute("insert into coach values('c12','Doc','Rivers','w02')");
db.execute("insert into coach values('c13','Mike','D`Antoni','w03')");
db.execute("insert into coach values('c14','Gregg','Popovich','w04')");
db.execute("insert into coach values('c15','Quin','Snyder','w05')");
db.execute("insert into coach values('c16','Michael','Malone','w06')");
db.execute("insert into coach values('c17','Steve','Kerr','w07')");
db.execute("insert into coach values('c18','Billy','Donovan','w08')");
db.execute("insert into coach values('c19','Terry','Stotts','w09')");
db.execute("insert into coach values('c20','Tom','Thibodeau','w10')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Luke','Walton','w01')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Doc','Rivers', 'w02')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Mike' ,'D`Antoni', 'w03')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Gregg','Popovich', 'w04')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Quin','Snyder' , 'w05')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Michael' ,'Malone' , 'w06')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Steve','Kerr', 'w07')");
db.execute("insert into management values('<NAME>','K.Larry','<NAME>','Billy' ,'Donovan', 'w08')");
db.execute("insert into management values('<NAME>','<NAME>.','<NAME>','Terry','Stotts', 'w09')");
db.execute("insert into management values('<NAME>','T.Lewis','<NAME>','Tom' ,'Thibodeau' , 'w10')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>' ,'Tyronn' ,'Lue', 'e01')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>', 'Brad' ,'Stevens','e02')");
db.execute("insert into management values('<NAME>','<NAME>.','<NAME>','Erik' ,'Spoelstra' , 'e03')");
db.execute("insert into management values('<NAME>','S.Judy','<NAME>','Frank','Vogel' , 'e04')");
db.execute("insert into management values('<NAME>','L.Brown','<NAME>' ,'Fred','Hoiberg', 'e05')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Jason', 'Kidd' , 'e06')");
db.execute("insert into management values('<NAME>','<NAME>.','<NAME>','Jeff', 'Hornacek', 'e07')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Nate','McMillan','e08')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Brett' ,'Brown', 'e09')");
db.execute("insert into management values('<NAME>','<NAME>','<NAME>','Stan','<NAME>', 'e10')");
db.commit()
| StarcoderdataPython |
3266961 | """
copyright <NAME>
10.06.2021
"""
from numpy.fft import fft, ifft, fftfreq
from numpy import real, imag
import csv
from matplotlib import pyplot as plt
#
# weekly_prices = []
# dates = []
#
# plt.plot(range(len(weekly_prices)), weekly_prices, '-b')
# plt.xlabel('Week #')
# plt.ylabel('Crude Oil Future Price')
# # here we have computed the fft of the weekly_prices
# fft_data = fft(weekly_prices)
# N = len(fft_data)
# assert (N == len(weekly_prices))
#
#
# # TODO: first fill in the frequencies call this list
# # fft_frequencies -- it must have length N
# # it must store the frequencies of each element in the fft_data
# # ensure that the frequencies of the second half are negative.
# # your code here
#
# # This function will be useful for you. Please go through the code.
#
#
# def select_all_items_in_freq_range(lo, hi):
# # TODO: go through the fft_data and select only those frequencies in the range lo/hi
# new_fft_data = [] # make sure we have the 0 frequency component
# fft_frequencies = []
# for (fft_val, fft_freq) in zip(fft_data, fft_frequencies):
# if lo <= fft_freq and fft_freq < hi:
# new_fft_data.append(fft_val)
# elif -hi < fft_freq and fft_freq <= -lo:
# new_fft_data.append(fft_val)
# else:
# new_fft_data.append(0.0)
# filtered_data = ifft(new_fft_data)
# assert all(abs(imag(x)) <= 1E-10 for x in filtered_data)
# return [real(x) for x in filtered_data]
#
#
# upto_1_year = [] # All signal components with frequency < 1/52
# one_year_to_1_quarter = [] # All signal components with frequency between 1/52 (inclusive) and 1/13 weeks (not inclusive)
# less_than_1_quarter = [] # All signal components with frequency >= 1/13
#
#
# # TODO: Redefine the three lists using the select_all_items function
# # your code here
#
#
# # inputs sets a, b, c
# # return True if there exist n1 in a, n2 in B such that n1+n2 in C
# # return False otherwise
# # number n which signifies the maximum number in a, b, c
# # here is a useful reference to set data structure in python
# # https://docs.python.org/3/tutorial/datastructures.html#sets
def check_sum_exists(a, b, c, n):
a_coeffs = [0] * n
b_coeffs = [0] * n
# convert sets a, b into polynomials as provided in the hint
# a_coeffs and b_coeffs should contain the result
# your code here
for coeff in a:
a_coeffs[coeff] = 1
for coeff in b:
b_coeffs[coeff] = 1
# multiply them together
c_coeffs = polynomial_multiply(a_coeffs, b_coeffs)
# use the result to solve the problem at hand
# your code here
for coeff in c:
if c_coeffs[coeff] >= .5:
return True
return False
# return True/False
def polynomial_multiply(a_coeff_list, b_coeff_list):
# Return the coefficient list of the multiplication
# of the two polynomials
# Returned list must be a list of floating point numbers.
# Please convert list from complex to reals by using the
# real function in numpy.
for i in range(len(a_coeff_list) - 1):
b_coeff_list.append(0)
for i in range(len(b_coeff_list) - 1):
a_coeff_list.append(0)
a_fft = fft(a_coeff_list)
b_fft = fft(b_coeff_list)
c = []
for i in range(len(a_fft)):
c.append(a_fft[i] * b_fft[i])
return real(ifft(c))
def maxSubArray(a):
n = len(a)
if n == 1:
return 0
# your code here
# left, right, sum_x = find_max_subarray(a, 0, n-1)
# return sum_x
return find_min_and_max_elements(a, 0, n - 1)
def simple_sub_array(a, low, high):
if low == high:
return 0
elif low + 1 == high:
return max((a[high] - a[low]), 0)
else:
mid = (low + high) // 2
left_min, left_max = find_min_and_max_elements(a, low, mid)
right_min, right_max = find_min_and_max_elements(a, mid + 1, high)
return max((left_max - left_min), (right_max - right_min), (right_max - left_min))
def find_min_and_max_elements(a, low, high):
if low == high:
return 0
elif low == high + 1:
return max(a[high] - a[low, 0])
else:
mid = (low + high) // 2
m1 = find_min_and_max_elements(a, low, mid)
m2 = find_min_and_max_elements(a, mid + 1, high)
y1 = max_element(a, mid + 1, high)
x1 = min_element(a, low, mid)
return max(m1, m2, y1 - x1)
def min_element(a, low, high):
min_ele = float('inf')
for i in range(low, high + 1):
min_ele = min(min_ele, a[i])
return min_ele
def max_element(a, low, high):
max_ele = float('-inf')
for i in range(low, high + 1):
max_ele = max(max_ele, a[i])
return max_ele
def find_max_subarray(a, low, high):
if high == low:
return low, high, a[low]
mid = (low + high) // 2
left_low, left_high, left_sum = find_max_subarray(a, low, mid)
right_low, right_high, right_sum = find_max_subarray(a, mid + 1, high)
cross_low, cross_high, cross_sum = find_max_crossing_subarray(a, low, mid, high)
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
else:
return cross_low, cross_high, cross_sum
def find_max_crossing_subarray(a, low, mid, high):
left_sum = float('-inf')
sum_x = 0
max_left = None
max_right = None
for i in range(mid, low, -1):
sum_x += a[i]
if sum_x > left_sum:
left_sum = sum_x
max_left = i
right_sum = float('-inf')
sum_x = 0
for j in range(mid + 1, high):
sum_x += a[j]
if sum_x > right_sum:
right_sum = sum_x
max_right = j
return max_left, max_right, left_sum + right_sum
| StarcoderdataPython |
171099 | <reponame>sonalimahajan12/Automation-scripts
import requests
import sys
def get_prices():
# Checking there is a coin passed
if len(sys.argv) > 1:
coins = sys.argv[1:]
else:
# Default coins
coins = ["BTC", "ETH", "XRP", "LTC", "BCH",
"ADA", "DOT", "LINK", "BNB", "XLM"]
crypto_data = requests.get(
"https://min-api.cryptocompare.com/"
"data/pricemultifull?fsyms={}&tsyms=USD"
.format(",".join(coins))).json()["RAW"]
data = {}
for i in crypto_data:
data[i] = {
"coin": i,
"price": crypto_data[i]["USD"]["PRICE"],
"change_day": crypto_data[i]["USD"]["CHANGEPCT24HOUR"],
"change_hour": crypto_data[i]["USD"]["CHANGEPCTHOUR"]
}
return data
if __name__ == "__main__":
crypto_data = get_prices()
message = ""
for i in crypto_data:
coin = crypto_data[i]["coin"]
price = crypto_data[i]["price"]
change_day = crypto_data[i]["change_day"]
change_hour = crypto_data[i]["change_hour"]
message += f"\nCoin: {coin}"
message += f"\nPrice: ${price:,.2f}"
message += f"\nHour Change: {change_hour:.3f}%"
message += f"\nDay Change: {change_day:.3f}%\n"
print(message)
| StarcoderdataPython |
4805538 | from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.views import View
def test_response(request):
return HttpResponse('this is a test')
def test_view(request):
return render(request, 'test.html')
def test_404(request):
raise Http404()
class TestView(View):
def get(self, request):
return HttpResponse('this is a response of CBV')
class CallableTestView(object):
def __init__(self, response_text='this is a test'):
self._response_text = response_text
def __call__(self, request):
return HttpResponse(self._response_text)
| StarcoderdataPython |
1703968 | <filename>led_cube.py
from machine import Pin
import utime
class LedCube:
def __init__(self, level_pins, led_pins):
if not len(level_pins) % len(led_pins) == 0:
raise CubeException("led_pins array length does not divide with level_pins array length. len(level_pins) = ", len(level_pins), ", len(led_pins) = ", len(led_pins))
self.level_pins = map(lambda p: Pin(p, Pin.OUT), level_pins)
self.led_pins = map(lambda p: Pin(p, Pin.OUT), led_pins)
class CubeException(Exception):
def __init__(self, message):
self.message = message | StarcoderdataPython |
3389431 | <filename>LeetCode/Python/1197. Minimum Knight Moves.py
"""
See the problem description at: https://leetcode.com/problems/minimum-knight-moves/
"""
class Solution:
def minKnightMoves(self, x: int, y: int) -> int:
from collections import deque
if x == 0 and y == 0: return 0
x, y = abs(x), abs(y)
queue = collections.deque([(0, 0)]) # x and y coordinates
moves_to_xy = dict()
moves_to_xy[0, 0] = 0
xy_moves = [(i, j) for i in {-1, 1, 2, -2} for j in {-1, 1, 2, -2} \
if abs(i) != abs(j)]
while queue:
px, py = queue.popleft()
n_moves = moves_to_xy[px, py]
for xm, ym in xy_moves:
x1, y1 = px + xm, py + ym
nei = x1, y1
if nei == (x, y):
return n_moves + 1
xx, yy = nei
# cond1 = abs(xx) + abs(yy) <= 300 # May not pass the time limit condition
cond2 = xx <= 300 and xx >= -5 and yy <= 300 and yy >= -5
if nei not in moves_to_xy and cond2:
queue.append(nei)
moves_to_xy[nei] = n_moves + 1 | StarcoderdataPython |
3215869 | <filename>joommf/mesh.py
import textwrap
class Mesh(object):
"""class Mesh(lengths, mesh_spacing, scale=1e-9)
lengths: list
List of 3 lengths which make up the global atlas
mesh_spacing:
List of 3 discretisations.
Example usage:
For a rectangular block of 30x30x50nm, with mesh nodes
every 5nm, create a Mesh object with
mesh = Mesh([30, 30, 50], [5, 5, 5])
"""
def __init__(self, lengths, mesh_spacing, scale=1e-9):
assert len(lengths) == 3, "Lengths must contain three values"
assert len(mesh_spacing) == 3, "mesh_spacing must contain a value" + \
"for each coordinate direction"
components = ['x', 'y', 'z']
for vals, component in zip(lengths, components):
assert vals > 0, 'L component {} must be a positive value'.format(
component)
self.lx = lengths[0]
self.ly = lengths[1]
self.lz = lengths[2]
self.dx = mesh_spacing[0]
self.dy = mesh_spacing[1]
self.dz = mesh_spacing[2]
self.scale = scale
def mesh_info(self):
return (self.lx, self.ly, self.lz, self.dx, self.dy, self.dz)
def _atlas_mif(self):
atlas_mif = textwrap.dedent("""\
Specify Oxs_BoxAtlas:atlas {{
xrange {{0 {}}}
yrange {{0 {}}}
zrange {{0 {}}}
}}
""").format(self.lx, self.ly, self.lz)
return atlas_mif
def _mesh_mif(self):
mesh_mif = 'Specify Oxs_RectangularMesh:mesh {\n'
mesh_mif += '\tcellsize {%2e %2e %2e}\n' % (self.dx, self.dy, self.dz)
mesh_mif += '\tatlas :atlas\n'
mesh_mif += '}\n\n'
return mesh_mif
def get_mif(self):
return self._atlas_mif() + self._mesh_mif()
| StarcoderdataPython |
1756708 | <gh_stars>0
"""brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py"""
import asyncio
import random
import re
from cloudbot import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@asyncio.coroutine
@hook.command("brainfuck", "bf")
def bf(text):
"""<prog> - executes <prog> as Brainfuck code
:type text: str
"""
program = re.sub('[^][<>+-.,]', '', text)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return "Unbalanced brackets"
if len(open_brackets) != 0:
return "Unbalanced brackets"
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = (memory[mp] + 1) % 256
elif c == '-':
memory[mp] = (memory[mp] - 1) % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * BUFFER_SIZE)
elif c == '<':
mp -= 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
if not output:
output = "(no output)"
output += "(exceeded {} iterations)".format(MAX_STEPS)
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if not stripped_output:
if output:
return "No printable output"
return "No output"
return stripped_output[:430]
| StarcoderdataPython |
4836328 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
from rpc_client import RpcClient
from mock_validator import MockValidator
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_sdk.protobuf.client_block_pb2 import ClientBlockListRequest
from sawtooth_sdk.protobuf.client_block_pb2 import ClientBlockListResponse
from sawtooth_sdk.protobuf.client_block_pb2 import ClientBlockGetByIdRequest
from sawtooth_sdk.protobuf.client_block_pb2 import ClientBlockGetByNumRequest
from sawtooth_sdk.protobuf.client_block_pb2 import \
ClientBlockGetByTransactionIdRequest
from sawtooth_sdk.protobuf.client_block_pb2 import ClientBlockGetResponse
from sawtooth_sdk.protobuf.client_peers_pb2 import ClientPeersGetResponse
from sawtooth_sdk.protobuf.client_state_pb2 import ClientStateGetRequest
from sawtooth_sdk.protobuf.client_state_pb2 import ClientStateGetResponse
from sawtooth_sdk.protobuf.client_transaction_pb2 import \
ClientTransactionGetRequest
from sawtooth_sdk.protobuf.client_transaction_pb2 import \
ClientTransactionGetResponse
from sawtooth_sdk.protobuf.client_batch_submit_pb2 import \
ClientBatchSubmitRequest
from sawtooth_sdk.protobuf.client_batch_submit_pb2 import \
ClientBatchSubmitResponse
from sawtooth_sdk.protobuf.block_pb2 import Block
from sawtooth_sdk.protobuf.block_pb2 import BlockHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_sdk.protobuf.client_receipt_pb2 import ClientReceiptGetRequest
from sawtooth_sdk.protobuf.client_receipt_pb2 import ClientReceiptGetResponse
from sawtooth_sdk.protobuf.transaction_receipt_pb2 import TransactionReceipt
from sawtooth_sdk.protobuf.events_pb2 import Event
from protobuf.seth_pb2 import SethTransactionReceipt
from protobuf.seth_pb2 import EvmEntry
from protobuf.seth_pb2 import EvmStateAccount
from protobuf.seth_pb2 import EvmStorage
from protobuf.seth_pb2 import SethTransaction
from protobuf.seth_pb2 import CreateExternalAccountTxn
from protobuf.seth_pb2 import CreateContractAccountTxn
from protobuf.seth_pb2 import MessageCallTxn
from protobuf.seth_pb2 import SetPermissionsTxn
import logging
LOGGER = logging.getLogger(__name__)
class SethRpcTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.validator = MockValidator()
cls.validator.listen("tcp://eth0:4004")
cls.url = 'http://seth-rpc:3030/'
cls.rpc = RpcClient(cls.url)
cls.rpc.wait_for_service()
# block values
cls.block_id = "f" * 128
cls.block_num = 123
cls.prev_block_id = "e" * 128
cls.state_root = "d" * 64
cls.txn_id = "c" * 64
cls.gas = 456
# account values
cls.public_key = "036d7bb6ca0fd581eb037e91042320af97508003264f08545a9db134df215f373e"
cls.account_address = "434d46456b6973a678b77382fca0252629f4389f"
cls.contract_address = "f" * 20 * 2
cls.contract_address_b = bytes([0xff] * 20)
cls.contract_init_s = "0" * 60 * 2
cls.contract_init_b = bytes([0x0] * 60)
cls.contract_init_txn_id = "de0a7299e732f04cdf18f098f44d70574512a5f4ef037105b028223195c781c424a48cadaf1a7de26f41b085d57cf40e15f0ebe24cca2bc36114abd679a95d4a"
cls.contract_call_s = "0" * 30 * 2
cls.contract_call_b = bytes([0x0] * 30)
cls.contract_call_txn_id = "057be5cc3860362022178b0d05012c7a8f1073c75d7f1f695d8091e8a18112b07fdcd3403dfc3728dabaf04ea0000e3d7a212abf3b60dbff44155bf8ed237e43"
cls.balance = 123
cls.nonce = 456
cls.code_b = bytes([0xab, 0xcd, 0xef])
cls.code_s = "abcdef"
cls.position_b = bytes([0x01, 0x23, 0x45])
cls.position_s = "012345"
cls.stored_b = bytes([0x67, 0x89])
cls.stored_s = "6789"
cls.topic1_s = "ff" * 32
cls.topic1_b = bytes([0xff] * 32)
cls.topic2_s = "cc" * 32
cls.topic2_b = bytes([0xcc] * 32)
cls.log_data_s = "8888"
cls.log_data_b = bytes([0x88, 0x88])
cls.return_value_s = "2a"
cls.return_value_b = bytes([0x2a])
# -- Network tests -- #
def test_net_version(self):
"""Test that the network id 19 is returned."""
self.assertEqual("19", self.rpc.call("net_version"))
def test_net_peerCount(self):
"""Test that 1 is returned as hex."""
self.rpc.acall("net_peerCount")
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_PEERS_GET_REQUEST)
self.validator.respond(
Message.CLIENT_PEERS_GET_RESPONSE,
ClientPeersGetResponse(
status=ClientPeersGetResponse.OK,
peers=["test_peer"]),
msg)
self.assertEqual("0x1", self.rpc.get_result())
def test_net_listening(self):
"""Test that the True is returned."""
self.assertEqual(True, self.rpc.call("net_listening"))
# -- Block tests -- #
def test_block_number(self):
"""Test that the block number is extracted correctly and returned as
hex."""
self.rpc.acall("eth_blockNumber")
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_BLOCK_LIST_REQUEST)
self.validator.respond(
Message.CLIENT_BLOCK_LIST_RESPONSE,
ClientBlockListResponse(
status=ClientBlockListResponse.OK,
blocks=[Block(
header=BlockHeader(block_num=15).SerializeToString(),
)]),
msg)
self.assertEqual("0xf", self.rpc.get_result())
def test_get_block_transaction_count_by_hash(self):
"""Test that a block transaction count is retrieved correctly, given a
block id."""
self.rpc.acall(
"eth_getBlockTransactionCountByHash", ["0x" + self.block_id])
msg, request = self._receive_block_request_id()
self.assertEqual(request.block_id, self.block_id)
self._send_block_back(msg)
result = self.rpc.get_result()
self.assertEqual(result, "0x1")
def test_get_block_transaction_count_by_hash_wrong_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBlockTransactionCountByHash.
"""
self.rpc.acall(
"eth_getBlockTransactionCountByHash", )
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockHash: DATA(64)]")
def test_get_block_transaction_count_by_hash_no_block(self):
"""Test that None is returned if no block is found for
eth_getBlockTransactionCountByHash.
"""
bad_id = "1" * 128
self.rpc.acall(
"eth_getBlockTransactionCountByHash", ["0x" + bad_id])
msg, request = self._receive_block_request_id()
self.assertEqual(request.block_id, bad_id)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_block_transaction_count_by_number(self):
"""Test that a block transaction count is retrieved correctly, given a
block number."""
self.rpc.acall(
"eth_getBlockTransactionCountByNumber", [hex(self.block_num)])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num)
self._send_block_back(msg)
result = self.rpc.get_result()
self.assertEqual(result, "0x1")
def test_get_block_transaction_count_by_number_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBlockTransactionCountByNumber.
"""
self.rpc.acall(
"eth_getBlockTransactionCountByNumber", )
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockNum: QUANTITY|TAG]")
def test_get_block_transaction_count_by_number_no_block(self):
"""Test that None is returned if no block is found for
eth_getBlockTransactionCountByNumber.
"""
bad_num = 2
self.rpc.acall(
"eth_getBlockTransactionCountByNumber", [hex(bad_num)])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, bad_num)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_block_by_hash(self):
"""Test that a block is retrieved correctly, given a block hash."""
self.rpc.acall("eth_getBlockByHash", ["0x" + self.block_id, False])
msg, request = self._receive_block_request_id()
self.assertEqual(request.block_id, self.block_id)
self._send_block_back(msg)
msg, request = self._receive_receipt_request()
self.assertEqual(request.transaction_ids[0], self.txn_id)
self._send_receipts_back(msg)
result = self.rpc.get_result()
self.assertEqual(result["number"], hex(self.block_num))
self.assertEqual(result["hash"], "0x" + self.block_id)
self.assertEqual(result["parentHash"], "0x" + self.prev_block_id)
self.assertEqual(result["stateRoot"], "0x" + self.state_root)
self.assertEqual(result["gasUsed"], hex(self.gas))
self.assertEqual(result["transactions"][0], "0x" + self.txn_id)
def test_get_block_by_hash_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBlockByHash.
"""
self.rpc.acall("eth_getBlockByHash", )
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockHash: DATA(64), full: BOOL]")
def test_get_block_by_bad_hash(self):
"""Test that None is returned if no block is found for
eth_getBlockByHash.
"""
bad_id = "1" * 128
self.rpc.acall("eth_getBlockByHash", ["0x" + bad_id, False])
msg, request = self._receive_block_request_id()
self.assertEqual(request.block_id, bad_id)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_block_by_number(self):
"""Test that a block is retrieved correctly, given a block number."""
self.rpc.acall("eth_getBlockByNumber", [hex(self.block_num), False])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num)
self._send_block_back(msg)
msg, request = self._receive_receipt_request()
self.assertEqual(request.transaction_ids[0], self.txn_id)
self._send_receipts_back(msg)
result = self.rpc.get_result()
self.assertEqual(result["number"], hex(self.block_num))
self.assertEqual(result["hash"], "0x" + self.block_id)
self.assertEqual(result["parentHash"], "0x" + self.prev_block_id)
self.assertEqual(result["stateRoot"], "0x" + self.state_root)
self.assertEqual(result["gasUsed"], hex(self.gas))
self.assertEqual(result["transactions"][0], "0x" + self.txn_id)
def test_get_block_by_number_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBlockByNumber.
"""
self.rpc.acall("eth_getBlockByNumber", )
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockNum: QUANTITY|TAG, full: BOOL]")
def test_get_block_by_bad_number(self):
"""Test that None is returned if no block is found for
eth_getBlockByNumber.
"""
bad_num = 2
self.rpc.acall("eth_getBlockByNumber", [hex(bad_num), False])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, bad_num)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
# -- Account tests -- #
def test_get_balance(self):
"""Test that an account balance is retrieved correctly."""
# self._test_get_account("balance")
self.rpc.acall(
"eth_getBalance", ["0x" + self.account_address, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + self.account_address + "0" * 24)
self._send_state_response(msg)
result = self.rpc.get_result()
self.assertEqual(hex(self.balance), result)
def test_get_balance_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBalance
"""
self.rpc.acall("eth_getBalance",)
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [address: DATA(20), block: QUANTITY|TAG]")
def test_get_balance_no_block(self):
"""Test that None is returned if no block is found for
eth_getBalance.
"""
bad_account_address = "a" * 20 * 2
self.rpc.acall(
"eth_getBalance", ["0x" + bad_account_address, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + bad_account_address + "0" * 24)
self._send_state_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_code(self):
"""Test that an account's code is retrieved correctly."""
# self._test_get_account("balance")
self.rpc.acall(
"eth_getCode", ["0x" + self.account_address, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + self.account_address + "0" * 24)
self._send_state_response(msg)
result = self.rpc.get_result()
self.assertEqual("0x" + self.code_s, result)
def test_get_code_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getCode.
"""
self.rpc.acall("eth_getCode", )
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [address: DATA(20), block: QUANTITY|TAG]")
def test_get_code_no_block(self):
"""Test that None is returned if no block is found for
eth_getCode.
"""
bad_account_address = "a" * 20 * 2
self.rpc.acall(
"eth_getCode", ["0x" + bad_account_address, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + bad_account_address + "0" * 24)
self._send_state_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_storage_at(self):
"""Test that an account's storage is retrieved correctly."""
# self._test_get_account("balance")
self.rpc.acall(
"eth_getStorageAt",
["0x" + self.account_address, "0x" + self.position_s, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + self.account_address + "0" * 24)
self._send_state_response(msg)
result = self.rpc.get_result()
self.assertEqual("0x" + self.stored_s, result)
def test_get_storage_at_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getStorageAt.
"""
self.rpc.acall("eth_getStorageAt",)
result = self.rpc.get_result()
self.assertEqual(
result["error"]["message"],
"Takes [address: DATA(20), position: QUANTITY, block: "
"QUANTITY|TAG]")
def test_get_storage_at_no_address(self):
"""Test that None is returned if no address is found for
eth_getStorageAt.
"""
bad_account_address = "a" * 20 * 2
self.rpc.acall(
"eth_getStorageAt",
["0x" + bad_account_address, "0x" + self.position_s, "latest"])
msg, request = self._receive_state_request()
self.assertEqual(request.address,
"a68b06" + bad_account_address + "0" * 24)
self._send_state_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_account_by_block_num(self):
"""Tests that account info is retrieved correctly when a block number
is used as the block key.
This requires an extra exchange with the validator to translate the
block number into a block id, since it isn't possible to look up state
based on a block number.
"""
account_address = "f" * 20 * 2
balance = 123
block_num = 321
block_id = "f" * 128
state_root = "b" * 64
self.rpc.acall(
"eth_getBalance", ["0x" + account_address, hex(block_num)])
msg = self.validator.receive()
self.assertEqual(msg.message_type,
Message.CLIENT_BLOCK_GET_BY_NUM_REQUEST)
request = ClientBlockGetByNumRequest()
request.ParseFromString(msg.content)
self.assertEqual(request.block_num, block_num)
self.validator.respond(
Message.CLIENT_BLOCK_GET_RESPONSE,
ClientBlockGetResponse(
status=ClientBlockGetResponse.OK,
block=Block(
header_signature=block_id,
header=BlockHeader(
state_root_hash=state_root,
).SerializeToString(),
)
),
msg)
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_STATE_GET_REQUEST)
request = ClientStateGetRequest()
request.ParseFromString(msg.content)
self.assertEqual(request.state_root, state_root)
self.assertEqual(request.address,
"a68b06" + account_address + "0" * 24)
self.validator.respond(
Message.CLIENT_STATE_GET_RESPONSE,
ClientStateGetResponse(
status=ClientStateGetResponse.OK,
value=EvmEntry(
account=EvmStateAccount(balance=balance),
).SerializeToString()),
msg)
result = self.rpc.get_result()
self.assertEqual(hex(balance), result)
def test_get_account_by_block_num_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getBalance.
"""
self.rpc.acall("eth_getBalance",)
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [address: DATA(20), block: QUANTITY|TAG]")
def test_accounts(self):
"""Tests that account list is retrieved correctly."""
address = "434d46456b6973a678b77382fca0252629f4389f"
self.assertEqual(["0x" + address], self.rpc.call("eth_accounts"))
# -- Transaction tests -- #
def test_get_transaction_by_hash(self):
"""Tests that a transaction is retrieved correctly given its hash."""
txn_ids = [
"0" * 64,
"1" * 64,
"2" * 64,
"3" * 64,
]
txn_idx = 2
self.rpc.acall(
"eth_getTransactionByHash",
["0x" + txn_ids[txn_idx]])
msg, request = self._receive_transaction_request()
self.assertEqual(request.transaction_id, txn_ids[txn_idx])
block = self._make_multi_txn_block(txn_ids)
self._send_transaction_response(msg, block.batches[1].transactions[1])
msg, request = self._receive_block_request_transaction()
self._send_block_back(msg, block)
result = self.rpc.get_result()
self.assertEqual(result["hash"], "0x" + txn_ids[txn_idx])
self.assertEqual(result["nonce"], hex(self.nonce))
self.assertEqual(result["blockHash"], "0x" + self.block_id)
self.assertEqual(result["blockNumber"], hex(self.block_num))
self.assertEqual(result["transactionIndex"], hex(txn_idx))
self.assertEqual(result["from"], "0x" + self.account_address)
self.assertEqual(result["to"], "0x" + self.contract_address)
self.assertEqual(result["value"], "0x0")
self.assertEqual(result["gasPrice"], "0x0")
self.assertEqual(result["gas"], hex(self.gas))
self.assertEqual(result["input"], "0x" + self.contract_call_s)
def test_get_transaction_by_hash_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getTransactionByHash.
"""
self.rpc.acall("eth_getTransactionByHash",)
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [txnHash: DATA(64)]")
def test_get_transaction_by_block_hash_and_index(self):
"""Tests that a transaction is retrieved correctly given a block
signature and transaction index."""
txn_ids = [
"0" * 64,
"1" * 64,
"2" * 64,
"3" * 64,
]
txn_idx = 2
self.rpc.acall(
"eth_getTransactionByBlockHashAndIndex",
["0x" + self.block_id, hex(txn_idx)])
msg, request = self._receive_block_request_id()
self.assertEqual(request.block_id, self.block_id)
block = self._make_multi_txn_block(txn_ids)
self._send_block_back(msg, block)
result = self.rpc.get_result()
self.assertEqual(result["hash"], "0x" + txn_ids[txn_idx])
self.assertEqual(result["nonce"], hex(self.nonce))
self.assertEqual(result["blockHash"], "0x" + self.block_id)
self.assertEqual(result["blockNumber"], hex(self.block_num))
self.assertEqual(result["transactionIndex"], hex(txn_idx))
self.assertEqual(result["from"], "0x" + self.account_address)
self.assertEqual(result["to"], "0x" + self.contract_address)
self.assertEqual(result["value"], "0x0")
self.assertEqual(result["gasPrice"], "0x0")
self.assertEqual(result["gas"], hex(self.gas))
self.assertEqual(result["input"], "0x" + self.contract_call_s)
def test_get_transaction_by_block_hash_and_index_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getTransactionByBlockHashAndIndex.
"""
self.rpc.acall("eth_getTransactionByBlockHashAndIndex",)
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockHash: DATA(64), index: QUANTITY]")
def test_get_transaction_by_block_number_and_index(self):
"""Tests that a transaction is retrieved correctly given a block
number and transaction index."""
txn_ids = [
"0" * 64,
"1" * 64,
"2" * 64,
"3" * 64,
]
txn_idx = 2
self.rpc.acall(
"eth_getTransactionByBlockNumberAndIndex",
[hex(self.block_num), hex(txn_idx)])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num)
block = self._make_multi_txn_block(txn_ids)
self._send_block_back(msg, block)
result = self.rpc.get_result()
self.assertEqual(result["hash"], "0x" + txn_ids[txn_idx])
self.assertEqual(result["nonce"], hex(self.nonce))
self.assertEqual(result["blockHash"], "0x" + self.block_id)
self.assertEqual(result["blockNumber"], hex(self.block_num))
self.assertEqual(result["transactionIndex"], hex(txn_idx))
self.assertEqual(result["from"], "0x" + self.account_address)
self.assertEqual(result["to"], "0x" + self.contract_address)
self.assertEqual(result["value"], "0x0")
self.assertEqual(result["gasPrice"], "0x0")
self.assertEqual(result["gas"], hex(self.gas))
self.assertEqual(result["input"], "0x" + self.contract_call_s)
def test_get_transaction_by_block_number_and_index_bad_input(self):
"""Test that the correct error message is returned if no input is given
to eth_getTransactionByBlockHashAndIndex.
"""
self.rpc.acall("eth_getTransactionByBlockNumberAndIndex",)
result = self.rpc.get_result()
self.assertEqual(result["error"]["message"],
"Takes [blockNum: QUANTITY|TAG, index: QUANTITY]")
def test_get_transaction_no_block(self):
block_id = "a" * 128
block_num = 678
txn_ids = [
"0" * 64,
"1" * 64,
"2" * 64,
"3" * 64,
]
txn_idx = 2
nonce = 4
pub_key = "035e1de3048a62f9f478440a22fd7655b" + \
"<KEY>"
addr = "b4d09ca3c0bc538340e904b689016bbb4248136c"
gas = 100
to_b = bytes([0xab, 0xcd, 0xef])
to_s = "abcdef"
data_b = bytes([0x67, 0x89])
data_s = "6789"
self.rpc.acall(
"eth_getTransactionByBlockNumberAndIndex",
[hex(block_num), hex(txn_idx)])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, block_num)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
self.assertIsNone(result)
def test_get_transaction_by_block_hash_and_index_no_block(self):
"""Tests that a transaction is retrieved correctly given a block
signature and transaction index, where the block doesn't exist but the
transaction does."""
block_id = "a" * 128
txn_idx = 2
self.rpc.acall(
"eth_getTransactionByBlockHashAndIndex",
["0x" + block_id, hex(txn_idx)])
msg = self.validator.receive()
self.assertEqual(msg.message_type,
Message.CLIENT_BLOCK_GET_BY_ID_REQUEST)
request = ClientBlockGetByIdRequest()
request.ParseFromString(msg.content)
self.assertEqual(request.block_id, block_id)
self.validator.respond(
Message.CLIENT_BLOCK_GET_RESPONSE,
ClientBlockGetResponse(status=ClientBlockGetResponse.NO_RESOURCE),
msg)
result = self.rpc.get_result()
self.assertEqual(result, None)
def test_send_transaction_contract_creation(self):
"""Tests that a contract creation txn is submitted correctly."""
self.rpc.acall(
"eth_sendTransaction", [{
"from": "0x" + self.account_address,
"data": "0x" + self.contract_init_s
}])
msg, txn = self._receive_state_request()
self._send_state_response(msg)
msg, txn = self._receive_submit_request()
seth_txn = SethTransaction()
seth_txn.ParseFromString(txn.payload)
self.assertEqual(
seth_txn.transaction_type, SethTransaction.CREATE_CONTRACT_ACCOUNT)
create = seth_txn.create_contract_account
self.assertEqual(create.init, self.contract_init_b)
self.assertEqual(create.gas_limit, 90000)
self.assertEqual(create.gas_price, 10000000000000)
self.assertEqual(create.value, 0)
self._send_submit_response(msg)
self.assertEqual(
"0x" + self.contract_init_txn_id, self.rpc.get_result())
def test_send_transaction_message_call(self):
"""Tests that a message call txn is submitted correctly."""
self.rpc.acall(
"eth_sendTransaction", [{
"from": "0x" + self.account_address,
"data": "0x" + self.contract_call_s,
"to": "0x" + self.contract_address,
}])
msg, txn = self._receive_state_request()
self._send_state_response(msg)
msg, txn = self._receive_submit_request()
seth_txn = SethTransaction()
seth_txn.ParseFromString(txn.payload)
self.assertEqual(
seth_txn.transaction_type, SethTransaction.MESSAGE_CALL)
call = seth_txn.message_call
self.assertEqual(call.data, self.contract_call_b)
self.assertEqual(call.gas_limit, 90000)
self.assertEqual(call.gas_price, 10000000000000)
self.assertEqual(call.value, 0)
self._send_submit_response(msg)
self.assertEqual(
"0x" + self.contract_call_txn_id, self.rpc.get_result())
def test_get_transaction_receipt(self):
"""Tests that a transaction receipt is retrieved correctly."""
self.rpc.acall(
"eth_getTransactionReceipt", ["0x" + self.txn_id])
msg, request = self._receive_receipt_request()
self.assertEqual(request.transaction_ids[0], self.txn_id)
self._send_receipts_back(msg)
msg, request = self._receive_transaction_request()
self._send_transaction_response(msg)
msg, request = self._receive_block_request_transaction()
block = Block(
header=BlockHeader(block_num=self.block_num).SerializeToString(),
header_signature=self.block_id,
batches=[Batch(transactions=[
Transaction(header_signature=self.txn_id)])])
self._send_block_back(msg, block)
result = self.rpc.get_result()
self.assertEqual(result["transactionHash"], "0x" + self.txn_id)
self.assertEqual(result["transactionIndex"], hex(0))
self.assertEqual(result["blockHash"], "0x" + self.block_id)
self.assertEqual(result["blockNumber"], hex(self.block_num))
self.assertEqual(result["cumulativeGasUsed"], hex(self.gas))
self.assertEqual(result["gasUsed"], hex(self.gas))
self.assertEqual(result["returnValue"], "0x" + self.return_value_s)
self.assertEqual(
result["contractAddress"], "0x" + self.contract_address)
log = result["logs"][0]
self.assertEqual(log["removed"], False)
self.assertEqual(log["logIndex"], hex(0))
self.assertEqual(log["transactionIndex"], hex(0))
self.assertEqual(log["transactionHash"], "0x" + self.txn_id)
self.assertEqual(log["blockHash"], "0x" + self.block_id)
self.assertEqual(log["blockNumber"], hex(self.block_num))
self.assertEqual(log["address"], "0x" + self.contract_address)
self.assertEqual(log["data"], "0x" + self.log_data_s)
topic1, topic2 = log["topics"]
self.assertEqual(topic1, "0x" + self.topic1_s)
self.assertEqual(topic2, "0x" + self.topic2_s)
def test_gas_price(self):
"""Tests that the gas price is returned correctly."""
self.assertEqual("0x0", self.rpc.call("eth_gasPrice"))
def test_sign(self):
"""Tests that a payload is signed correctly."""
msg = b"test"
signature = self.rpc.call(
"eth_sign", ["0x" + self.account_address, "0x" + msg.hex()])
self.assertEqual(signature,
"0x4bd3560fcabbe7c13d8829dcb82b381fe3882db14aeb6d22b8b0ea069e60" +\
"28a02d85497c9b26203c31f028f31fa0ae9b944aa219ae6ecf7655b2e2428d" +\
"d6904f")
# -- Log tests -- #
def test_new_filter(self):
"""Test that new log filters are created sequentially and that nothing
breaks while creating them."""
self.rpc.acall("eth_newFilter", [{
"fromBlock": "0x1",
"toBlock": "0x2",
"address": "0x" + self.contract_address,
"topics": [
"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b",
None,
[
"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc"
,]
]
}])
self._block_list_exchange()
result = self.rpc.get_result()
n = int(result, 16)
self.rpc.acall("eth_newFilter", [{
"address": [
"0x" + self.contract_address,
"0x" + self.account_address,
],
"topics": [],
}])
self._block_list_exchange()
result = self.rpc.get_result()
n_plus_1 = int(result, 16)
self.assertEqual(n + 1, n_plus_1)
def test_new_block_filter(self):
"""Test that new block filters are created sequentially and that
nothing breaks while creating them."""
self.rpc.acall("eth_newBlockFilter")
self._block_list_exchange()
result = self.rpc.get_result()
n = int(result, 16)
self.rpc.acall("eth_newBlockFilter")
self._block_list_exchange()
result = self.rpc.get_result()
n_plus_1 = int(result, 16)
self.assertEqual(n + 1, n_plus_1)
def test_new_transaction_filter(self):
"""Test that new transaction filters are created sequentially and that
nothing breaks while creating them."""
self.rpc.acall("eth_newPendingTransactionFilter")
self._block_list_exchange()
result = self.rpc.get_result()
n = int(result, 16)
self.rpc.acall("eth_newPendingTransactionFilter")
self._block_list_exchange()
result = self.rpc.get_result()
n_plus_1 = int(result, 16)
self.assertEqual(n + 1, n_plus_1)
def test_uninstall_filter(self):
"""Test that uninstalling a filter works"""
self.rpc.acall("eth_newBlockFilter")
self._block_list_exchange()
filter_id = self.rpc.get_result()
self.assertEqual(
True, self.rpc.call("eth_uninstallFilter", [filter_id]))
def test_get_logs(self):
"""Test that getting logs works."""
log_filter = {
"fromBlock": hex(self.block_num),
"address": "0x" + self.contract_address,
"topics": [
"0x" + self.topic1_s,
["0x" + self.topic1_s, "0x" + self.topic2_s]
],
}
self.rpc.acall("eth_getLogs", [log_filter])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num)
self._send_block_back(msg)
msg, request = self._receive_receipt_request()
self._send_receipts_back(msg)
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num + 1)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
log = result[0]
self.assertEqual(log["removed"], False)
self.assertEqual(log["logIndex"], hex(0))
self.assertEqual(log["transactionIndex"], hex(0))
self.assertEqual(log["transactionHash"], "0x" + self.txn_id)
self.assertEqual(log["blockHash"], "0x" + self.block_id)
self.assertEqual(log["blockNumber"], hex(self.block_num))
self.assertEqual(log["address"], "0x" + self.contract_address)
self.assertEqual(log["data"], "0x" + self.log_data_s)
topic1, topic2 = log["topics"]
self.assertEqual(topic1, "0x" + self.topic1_s)
self.assertEqual(topic2, "0x" + self.topic2_s)
def test_get_filter_logs(self):
"""Test that getting logs from a filter works."""
log_filter = {
"fromBlock": hex(self.block_num),
"address": "0x" + self.contract_address,
"topics": [
"0x" + self.topic1_s,
["0x" + self.topic1_s, "0x" + self.topic2_s]
],
}
self.rpc.acall("eth_newFilter", [log_filter])
self._block_list_exchange()
filter_id = self.rpc.get_result()
self.rpc.acall("eth_getFilterLogs", [filter_id])
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num)
self._send_block_back(msg)
msg, request = self._receive_receipt_request()
self._send_receipts_back(msg)
msg, request = self._receive_block_request_num()
self.assertEqual(request.block_num, self.block_num + 1)
self._send_block_no_resource(msg)
result = self.rpc.get_result()
log = result[0]
self.assertEqual(log["removed"], False)
self.assertEqual(log["logIndex"], hex(0))
self.assertEqual(log["transactionIndex"], hex(0))
self.assertEqual(log["transactionHash"], "0x" + self.txn_id)
self.assertEqual(log["blockHash"], "0x" + self.block_id)
self.assertEqual(log["blockNumber"], hex(self.block_num))
self.assertEqual(log["address"], "0x" + self.contract_address)
self.assertEqual(log["data"], "0x" + self.log_data_s)
topic1, topic2 = log["topics"]
self.assertEqual(topic1, "0x" + self.topic1_s)
self.assertEqual(topic2, "0x" + self.topic2_s)
def test_get_block_filter_changes(self):
"""Tests that getting block filter changes works."""
self.rpc.acall("eth_newBlockFilter")
self._block_list_exchange()
filter_id = self.rpc.get_result()
block_id_plus_1 = "e" * 128
block_id_plus_2 = "d" * 128
self.rpc.acall("eth_getFilterChanges", [filter_id])
self._block_list_exchange(blocks=[Block(
header=BlockHeader(
block_num=self.block_num+2,
).SerializeToString(),
header_signature=block_id_plus_2,
)])
self._block_get_exchange(block=Block(
header=BlockHeader(
block_num=self.block_num+1,
).SerializeToString(),
header_signature=block_id_plus_1,
))
result = self.rpc.get_result()
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "0x" + block_id_plus_1)
self.assertEqual(result[1], "0x" + block_id_plus_2)
def test_get_transaction_filter_changes(self):
"""Tests that getting transaction filter changes works."""
self.rpc.acall("eth_newPendingTransactionFilter")
self._block_list_exchange()
filter_id = self.rpc.get_result()
txn_id_1 = "e" * 128
txn_id_2 = "d" * 128
self.rpc.acall("eth_getFilterChanges", [filter_id])
self._block_list_exchange(blocks=[Block(
header=BlockHeader(
block_num=self.block_num+2,
).SerializeToString(),
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=txn_id_2,
)])],
)])
self._block_get_exchange(block=Block(
header=BlockHeader(
block_num=self.block_num+1,
).SerializeToString(),
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=txn_id_1,
)])],
))
result = self.rpc.get_result()
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "0x" + txn_id_1)
self.assertEqual(result[1], "0x" + txn_id_2)
def test_get_log_filter_changes(self):
"""Tests that getting log filter changes works."""
txn_ids = [
"d" * 128,
"e" * 128,
]
topics = [
self.topic1_s,
self.topic2_s,
]
# Create the filter
self.rpc.acall("eth_newFilter", [{
"address": "0x" + self.contract_address,
"topics": [
["0x" + t for t in topics],
]
}])
self._block_list_exchange()
filter_id = self.rpc.get_result()
# Request changes
self.rpc.acall("eth_getFilterChanges", [filter_id])
# Exchange blocks
self._block_list_exchange(blocks=[Block(
header=BlockHeader(
block_num=self.block_num+2,
).SerializeToString(),
header_signature=self.block_id,
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=txn_ids[1],
)])],
)])
self._block_get_exchange(block=Block(
header=BlockHeader(
block_num=self.block_num+1,
).SerializeToString(),
header_signature=self.block_id,
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=txn_ids[0],
)])],
))
receipts = [
TransactionReceipt(
data=[SethTransactionReceipt(
gas_used=self.gas,
return_value=self.return_value_b,
contract_address=self.contract_address_b,
).SerializeToString(),
],
events=[Event(
event_type="seth_log_event",
attributes=[
Event.Attribute(key="address", value=self.contract_address),
Event.Attribute(key="topic1", value=topics[0]),
],
data=self.log_data_b,
)],
transaction_id=txn_ids[0],
),
TransactionReceipt(
data=[SethTransactionReceipt(
gas_used=self.gas,
return_value=self.return_value_b,
contract_address=self.contract_address_b,
).SerializeToString(),
],
events=[Event(
event_type="seth_log_event",
attributes=[
Event.Attribute(key="address", value=self.contract_address),
Event.Attribute(key="topic1", value=topics[1]),
],
data=self.log_data_b,
)],
transaction_id=txn_ids[1],
),
]
# Exchange receipts for block 1
msg, request = self._receive_receipt_request()
self.assertEqual(request.transaction_ids[0], txn_ids[0])
self._send_receipts_back(msg, [receipts[0]])
# Exchange receipts for block 2
msg, request = self._receive_receipt_request()
self.assertEqual(request.transaction_ids[0], txn_ids[1])
self._send_receipts_back(msg, [receipts[1]])
result = self.rpc.get_result()
self.assertEqual(len(result), 2)
for i, log in enumerate(result):
self.assertEqual(log["removed"], False)
self.assertEqual(log["logIndex"], hex(0))
self.assertEqual(log["transactionIndex"], hex(0))
self.assertEqual(log["transactionHash"], "0x" + txn_ids[i])
self.assertEqual(log["blockHash"], "0x" + self.block_id)
self.assertEqual(log["blockNumber"], hex(self.block_num + i + 1))
self.assertEqual(log["address"], "0x" + self.contract_address)
self.assertEqual(log["data"], "0x" + self.log_data_s)
topic1 = log["topics"][0]
self.assertEqual(topic1, "0x" + topics[i])
# -- Utilities -- #
def _send_block_back(self, msg, block=None):
if block is None:
block = Block(
header=BlockHeader(
block_num=self.block_num,
previous_block_id=self.prev_block_id,
state_root_hash=self.state_root
).SerializeToString(),
header_signature=self.block_id,
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=self.txn_id,
)])],
)
self.validator.respond(
Message.CLIENT_BLOCK_GET_RESPONSE,
ClientBlockGetResponse(
status=ClientBlockGetResponse.OK,
block=block
),
msg)
def _send_block_no_resource(self, msg):
self.validator.respond(
Message.CLIENT_BLOCK_GET_RESPONSE,
ClientBlockGetResponse(
status=ClientBlockGetResponse.NO_RESOURCE
),
msg)
def _send_block_list_back(self, msg, blocks=None):
if blocks is None:
blocks = [Block(
header=BlockHeader(
block_num=self.block_num,
previous_block_id=self.prev_block_id,
state_root_hash=self.state_root
).SerializeToString(),
header_signature=self.block_id,
batches=[Batch(transactions=[Transaction(
header=TransactionHeader(
family_name="seth",
).SerializeToString(),
header_signature=self.txn_id,
)])],
)]
self.validator.respond(
Message.CLIENT_BLOCK_LIST_RESPONSE,
ClientBlockListResponse(
status=ClientBlockListResponse.OK,
blocks=blocks,
),
msg)
def _send_state_no_resource(self, msg):
self.validator.respond(
Message.CLIENT_STATE_GET_RESPONSE,
ClientStateGetResponse(
status=ClientStateGetResponse.NO_RESOURCE,
),
msg)
def _send_receipts_back(self, msg, receipts=None):
if receipts is None:
receipts = [TransactionReceipt(
data=[SethTransactionReceipt(
gas_used=self.gas,
return_value=self.return_value_b,
contract_address=self.contract_address_b,
).SerializeToString(),
],
events=[Event(
event_type="seth_log_event",
attributes=[
Event.Attribute(key="address", value=self.contract_address),
Event.Attribute(key="topic1", value=self.topic1_s),
Event.Attribute(key="topic2", value=self.topic2_s),
],
data=self.log_data_b,
)],
transaction_id=self.txn_id,
)]
self.validator.respond(
Message.CLIENT_RECEIPT_GET_RESPONSE,
ClientReceiptGetResponse(
status=ClientReceiptGetResponse.OK,
receipts=receipts),
msg)
def _send_state_response(self, msg):
self.validator.respond(
Message.CLIENT_STATE_GET_RESPONSE,
ClientStateGetResponse(
status=ClientStateGetResponse.OK,
value=EvmEntry(
account=EvmStateAccount(
balance=self.balance,
nonce=self.nonce,
code=self.code_b),
storage=[EvmStorage(key=self.position_b,
value=self.stored_b)],
).SerializeToString()),
msg)
def _send_transaction_response(self, msg, transaction=None):
if transaction is None:
transaction = Transaction(
header=TransactionHeader(
family_name="seth",
signer_public_key=self.public_key,
).SerializeToString(),
header_signature=self.txn_id,
payload=SethTransaction(
transaction_type=SethTransaction.MESSAGE_CALL
).SerializeToString())
self.validator.respond(
Message.CLIENT_TRANSACTION_GET_RESPONSE,
ClientTransactionGetResponse(
status=ClientBlockGetResponse.OK,
transaction=transaction),
msg)
def _send_submit_response(self, msg):
self.validator.respond(
Message.CLIENT_BATCH_SUBMIT_RESPONSE,
ClientBatchSubmitResponse(status=ClientBatchSubmitResponse.OK),
msg)
def _receive_receipt_request(self):
# Verify receipt get request
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_RECEIPT_GET_REQUEST)
request = ClientReceiptGetRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_block_request_transaction(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type,
Message.CLIENT_BLOCK_GET_BY_TRANSACTION_ID_REQUEST)
request = ClientBlockGetByTransactionIdRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_block_request_id(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type,
Message.CLIENT_BLOCK_GET_BY_ID_REQUEST)
request = ClientBlockGetByIdRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_block_request_num(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type,
Message.CLIENT_BLOCK_GET_BY_NUM_REQUEST)
request = ClientBlockGetByNumRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_block_list_request(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_BLOCK_LIST_REQUEST)
request = ClientBlockListRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_state_request(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_STATE_GET_REQUEST)
request = ClientStateGetRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_transaction_request(self):
msg = self.validator.receive()
self.assertEqual(
msg.message_type, Message.CLIENT_TRANSACTION_GET_REQUEST)
request = ClientTransactionGetRequest()
request.ParseFromString(msg.content)
return msg, request
def _receive_submit_request(self):
msg = self.validator.receive()
self.assertEqual(msg.message_type, Message.CLIENT_BATCH_SUBMIT_REQUEST)
request = ClientBatchSubmitRequest()
request.ParseFromString(msg.content)
batch = request.batches[0]
batch_header = BatchHeader()
batch_header.ParseFromString(batch.header)
self.assertEqual(batch_header.signer_public_key, self.public_key)
txn = batch.transactions[0]
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
self.assertEqual(txn_header.signer_public_key, self.public_key)
self.assertEqual(txn_header.family_name, "seth")
self.assertEqual(txn_header.family_version, "1.0")
return msg, txn
def _block_get_exchange(self, block=None):
msg, _ = self._receive_block_request_num()
self._send_block_back(msg, block)
def _block_list_exchange(self, blocks=None):
msg, _ = self._receive_block_list_request()
self._send_block_list_back(msg, blocks)
def _make_multi_txn_block(self, txn_ids):
gas = self.gas
nonce = self.nonce
block_id = self.block_id
block_num = self.block_num
pub_key = self.public_key
to = self.contract_address_b
init = self.contract_init_b
data = self.contract_call_b
txns = [
Transaction(
header=TransactionHeader(
family_name="seth",
signer_public_key=pub_key,
).SerializeToString(),
header_signature=txn_ids[i],
payload=txn.SerializeToString())
for i, txn in enumerate([
SethTransaction(
transaction_type=SethTransaction.SET_PERMISSIONS,
set_permissions=SetPermissionsTxn()),
SethTransaction(
transaction_type=SethTransaction.CREATE_EXTERNAL_ACCOUNT,
create_external_account=CreateExternalAccountTxn()),
SethTransaction(
transaction_type=SethTransaction.MESSAGE_CALL,
message_call=MessageCallTxn(
nonce=nonce,
gas_limit=gas,
to=to,
data=data,
)),
SethTransaction(
transaction_type=SethTransaction.CREATE_CONTRACT_ACCOUNT,
create_contract_account=CreateContractAccountTxn(
init=init,
)),
])
]
return Block(
header=BlockHeader(
block_num=block_num,
).SerializeToString(),
header_signature=block_id,
batches=[
Batch(transactions=txns[0:1]),
Batch(transactions=txns[1:3]),
Batch(transactions=txns[3:4]),
])
| StarcoderdataPython |
3297926 | #!/usr/bin/env python3
# coding: utf-8
from prompt_toolkit import prompt
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.formatted_text import ANSI
import re
p_xor = re.compile(r'([^\s]+)\s*\^\s*([^\s]+)')
p_quotes = re.compile(r'^([^"\']+):.*')
p_b64 = re.compile(r'[A-Za-z0-9+/]+={0,2}')
p_int = re.compile(r'[0-9]+')
p_hex = re.compile(r'(\\x|0x)?[0-9a-fA-F]+')
def detectType(arg):
availables_type = ['str', 'int', 'float', 'b64', 'b32', 'hex']
# Manual type
m = p_quotes.match(arg)
if m:
arg_type = m.group(1)
if arg_type in availables_type:
return arg_type
# Else: continue to automatic detection
# Automatic type detection
if p_int.match(arg):
return 'int'
if p_hex.match(arg):
return 'hex'
# Default
return 'str'
def parse(cmd):
m = p_xor.match(cmd)
if m:
arg1 = m.group(1)
arg2 = m.group(2)
print("%s: %s" % (arg1, detectType(arg1)))
print("%s: %s" % (arg2, detectType(arg2)))
return 1
# Default = error
return 0
if __name__ == '__main__':
history = InMemoryHistory()
status_color = "\033[92m"
while True:
try:
cmd = prompt(ANSI(status_color + ">> "), history=history)
except KeyboardInterrupt:
continue # Control-C pressed. Try again.
except EOFError:
break # Control-D pressed.
if parse(cmd):
# Success
status_color = "\033[92m"
else:
# Error
status_color = "\033[91m"
| StarcoderdataPython |
1715735 | import sys
import io
import unittest
from unittest.mock import patch
from fzfaws.s3.helper.s3progress import S3Progress
import boto3
from botocore.stub import Stubber
class TestS3Progress(unittest.TestCase):
def setUp(self):
self.capturedOutput = io.StringIO()
sys.stdout = self.capturedOutput
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("os.path.getsize")
def test_constructor(self, mocked_size):
mocked_size.return_value = 10
client = boto3.client("s3")
stubber = Stubber(client)
stubber.add_response("head_object", {"ContentLength": 100})
stubber.activate()
progress = S3Progress(filename=__file__)
self.assertEqual(progress._filename, __file__)
self.assertEqual(progress._seen_so_far, 0)
self.assertEqual(progress._size, 10)
progress = S3Progress(filename=__file__, client=client, bucket="hello")
self.assertEqual(progress._filename, __file__)
self.assertEqual(progress._seen_so_far, 0)
self.assertEqual(progress._size, 100)
@patch("os.path.getsize")
def test_call(self, mocked_size):
mocked_size.return_value = 1000
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
progress = S3Progress(filename=__file__)
progress(bytes_amount=20)
self.assertRegex(
self.capturedOutput.getvalue(), r"test_s3progress.py 20 Bytes / 1000 Bytes"
)
def test_human_readable_size(self):
progress = S3Progress(filename=__file__)
result = progress.human_readable_size(1000)
self.assertEqual(result, "1000 Bytes")
result = progress.human_readable_size(1024)
self.assertEqual(result, "1.0 KiB")
result = progress.human_readable_size(1048576)
self.assertEqual(result, "1.0 MiB")
result = progress.human_readable_size(1073741824)
self.assertEqual(result, "1.0 GiB")
result = progress.human_readable_size(10737418991)
self.assertEqual(result, "10.0 GiB")
| StarcoderdataPython |
1768609 | from .changelog import Changelog
from .release import Release, Unreleased
| StarcoderdataPython |
1738373 | <filename>fitparse/base.py
#!/usr/bin/env python
import io
import os
import struct
import warnings
# Python 2 compat
try:
num_types = (int, float, long)
except NameError:
num_types = (int, float)
from fitparse.processors import FitFileDataProcessor
from fitparse.profile import FIELD_TYPE_TIMESTAMP, MESSAGE_TYPES
from fitparse.records import (
Crc, DevField, DataMessage, FieldData, FieldDefinition, DevFieldDefinition, DefinitionMessage,
MessageHeader, BASE_TYPES, BASE_TYPE_BYTE,
)
from fitparse.utils import fileish_open, is_iterable, FitParseError, FitEOFError, FitCRCError, FitHeaderError
class DeveloperDataMixin(object):
def __init__(self, *args, check_developer_data=True, **kwargs):
self.check_developer_data = check_developer_data
self.dev_types = {}
super(DeveloperDataMixin, self).__init__(*args, **kwargs)
def _append_dev_data_id(self, dev_data_index, application_id=None, fields=None):
if fields is None:
fields = {}
# Note that nothing in the spec says overwriting an existing type is invalid
self.dev_types[dev_data_index] = {
'dev_data_index': dev_data_index,
'application_id': application_id,
'fields': fields
}
def add_dev_data_id(self, message):
dev_data_index = message.get_raw_value('developer_data_index')
application_id = message.get_raw_value('application_id')
self._append_dev_data_id(dev_data_index, application_id)
def _append_dev_field_description(self, dev_data_index, field_def_num, type=BASE_TYPE_BYTE, name=None,
units=None, native_field_num=None):
if dev_data_index not in self.dev_types:
if self.check_developer_data:
raise FitParseError("No such dev_data_index=%s found" % (dev_data_index))
warnings.warn(
"Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index)
)
self._append_dev_data_id(dev_data_index)
self.dev_types[dev_data_index]["fields"][field_def_num] = DevField(
dev_data_index=dev_data_index,
def_num=field_def_num,
type=type,
name=name,
units=units,
native_field_num=native_field_num
)
def add_dev_field_description(self, message):
dev_data_index = message.get_raw_value('developer_data_index')
field_def_num = message.get_raw_value('field_definition_number')
base_type_id = message.get_raw_value('fit_base_type_id')
field_name = message.get_raw_value('field_name') or "unnamed_dev_field_%s" % field_def_num
units = message.get_raw_value("units")
native_field_num = message.get_raw_value('native_field_num')
if dev_data_index not in self.dev_types:
if self.check_developer_data:
raise FitParseError("No such dev_data_index=%s found" % (dev_data_index))
warnings.warn(
"Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index)
)
self._append_dev_data_id(dev_data_index)
fields = self.dev_types[int(dev_data_index)]['fields']
# Note that nothing in the spec says overwriting an existing field is invalid
fields[field_def_num] = DevField(
dev_data_index=dev_data_index,
def_num=field_def_num,
type=BASE_TYPES[base_type_id],
name=field_name,
units=units,
native_field_num=native_field_num
)
def get_dev_type(self, dev_data_index, field_def_num):
if dev_data_index not in self.dev_types:
if self.check_developer_data:
raise FitParseError(
"No such dev_data_index=%s found when looking up field %s" % (dev_data_index, field_def_num)
)
warnings.warn(
"Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index)
)
self._append_dev_data_id(dev_data_index)
dev_type = self.dev_types[dev_data_index]
if field_def_num not in dev_type['fields']:
if self.check_developer_data:
raise FitParseError(
"No such field %s for dev_data_index %s" % (field_def_num, dev_data_index)
)
warnings.warn(
"Field %s for dev_data_index %s missing. Adding dummy field." % (field_def_num, dev_data_index)
)
self._append_dev_field_description(
dev_data_index=dev_data_index,
field_def_num=field_def_num
)
return dev_type['fields'][field_def_num]
class FitFileDecoder(DeveloperDataMixin):
"""Basic decoder for fit files"""
def __init__(self, fileish, *args, check_crc=True, data_processor=None, **kwargs):
self._file = fileish_open(fileish, 'rb')
self.check_crc = check_crc
self._crc = None
# Get total filesize
self._file.seek(0, os.SEEK_END)
self._filesize = self._file.tell()
self._file.seek(0, os.SEEK_SET)
# Start off by parsing the file header (sets initial attribute values)
self._parse_file_header()
super(FitFileDecoder, self).__init__(*args, **kwargs)
def __del__(self):
self.close()
def close(self):
if hasattr(self, "_file") and self._file and hasattr(self._file, "close"):
self._file.close()
self._file = None
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
##########
# Private low-level utility methods for reading of fit file
def _read(self, size):
if size <= 0:
return None
data = self._file.read(size)
if size != len(data):
raise FitEOFError("Tried to read %d bytes from .FIT file but got %d" % (size, len(data)))
if self.check_crc:
self._crc.update(data)
self._bytes_left -= len(data)
return data
def _read_struct(self, fmt, endian='<', data=None, always_tuple=False):
fmt_with_endian = endian + fmt
size = struct.calcsize(fmt_with_endian)
if size <= 0:
raise FitParseError("Invalid struct format: %s" % fmt_with_endian)
if data is None:
data = self._read(size)
unpacked = struct.unpack(fmt_with_endian, data)
# Flatten tuple if it's got only one value
return unpacked if (len(unpacked) > 1) or always_tuple else unpacked[0]
def _read_and_assert_crc(self, allow_zero=False):
# CRC Calculation is little endian from SDK
# TODO - How to handle the case of unterminated file? Error out and have user retry with check_crc=false?
crc_computed, crc_read = self._crc.value, self._read_struct(Crc.FMT)
if not self.check_crc:
return
if crc_computed == crc_read or (allow_zero and crc_read == 0):
return
raise FitCRCError('CRC Mismatch [computed: %s, read: %s]' % (
Crc.format(crc_computed), Crc.format(crc_read)))
##########
# Private Data Parsing Methods
def _parse_file_header(self):
# Initialize data
self._accumulators = {}
self._bytes_left = -1
self._complete = False
self._compressed_ts_accumulator = 0
self._crc = Crc()
self._local_mesgs = {}
header_data = self._read(12)
if header_data[8:12] != b'.FIT':
raise FitHeaderError("Invalid .FIT File Header")
# Larger fields are explicitly little endian from SDK
header_size, protocol_ver_enc, profile_ver_enc, data_size = self._read_struct('2BHI4x', data=header_data)
# Decode the same way the SDK does
self.protocol_version = float("%d.%d" % (protocol_ver_enc >> 4, protocol_ver_enc & ((1 << 4) - 1)))
self.profile_version = float("%d.%d" % (profile_ver_enc / 100, profile_ver_enc % 100))
# Consume extra header information
extra_header_size = header_size - 12
if extra_header_size > 0:
# Make sure extra field in header is at least 2 bytes to calculate CRC
if extra_header_size < 2:
raise FitHeaderError('Irregular File Header Size')
# Consume extra two bytes of header and check CRC
self._read_and_assert_crc(allow_zero=True)
# Consume any extra bytes, since header size "may be increased in
# "future to add additional optional information" (from SDK)
self._read(extra_header_size - 2)
# After we've consumed the header, set the bytes left to be read
self._bytes_left = data_size
def _parse_message(self):
# When done, calculate the CRC and return None
if self._bytes_left <= 0:
# Don't assert CRC if requested not
if not self._complete and self.check_crc:
self._read_and_assert_crc()
if self._file.tell() >= self._filesize:
self._complete = True
self.close()
return None
# Still have data left in the file - assuming chained fit files
self._parse_file_header()
return self._parse_message()
header = self._parse_message_header()
if header.is_definition:
message = self._parse_definition_message(header)
else:
message = self._parse_data_message(header)
if message.mesg_type is not None:
if message.mesg_type.name == 'developer_data_id':
self.add_dev_data_id(message)
elif message.mesg_type.name == 'field_description':
self.add_dev_field_description(message)
return message
def _parse_message_header(self):
header = self._read_struct('B')
if header & 0x80: # bit 7: Is this record a compressed timestamp?
return MessageHeader(
is_definition=False,
is_developer_data=False,
local_mesg_num=(header >> 5) & 0x3, # bits 5-6
time_offset=header & 0x1F, # bits 0-4
)
else:
return MessageHeader(
is_definition=bool(header & 0x40), # bit 6
is_developer_data=bool(header & 0x20), # bit 5
local_mesg_num=header & 0xF, # bits 0-3
time_offset=None,
)
def _parse_definition_message(self, header):
# Read reserved byte and architecture byte to resolve endian
endian = '>' if self._read_struct('xB') else '<'
# Read rest of header with endian awareness
global_mesg_num, num_fields = self._read_struct('HB', endian=endian)
mesg_type = MESSAGE_TYPES.get(global_mesg_num)
field_defs = []
for n in range(num_fields):
field_def_num, field_size, base_type_num = self._read_struct('3B', endian=endian)
# Try to get field from message type (None if unknown)
field = mesg_type.fields.get(field_def_num) if mesg_type else None
base_type = BASE_TYPES.get(base_type_num, BASE_TYPE_BYTE)
if (field_size % base_type.size) != 0:
warnings.warn(
"Invalid field size %d for field '%s' of type '%s' (expected a multiple of %d); falling back to byte encoding." % (
field_size, field.name, base_type.name, base_type.size)
)
base_type = BASE_TYPE_BYTE
# If the field has components that are accumulators
# start recording their accumulation at 0
if field and field.components:
for component in field.components:
if component.accumulate:
accumulators = self._accumulators.setdefault(global_mesg_num, {})
accumulators[component.def_num] = 0
field_defs.append(FieldDefinition(
field=field,
def_num=field_def_num,
base_type=base_type,
size=field_size,
))
dev_field_defs = []
if header.is_developer_data:
num_dev_fields = self._read_struct('B', endian=endian)
for n in range(num_dev_fields):
field_def_num, field_size, dev_data_index = self._read_struct('3B', endian=endian)
field = self.get_dev_type(dev_data_index, field_def_num)
dev_field_defs.append(DevFieldDefinition(
field=field,
dev_data_index=dev_data_index,
def_num=field_def_num,
size=field_size
))
def_mesg = DefinitionMessage(
header=header,
endian=endian,
mesg_type=mesg_type,
mesg_num=global_mesg_num,
field_defs=field_defs,
dev_field_defs=dev_field_defs,
)
self._local_mesgs[header.local_mesg_num] = def_mesg
return def_mesg
def _parse_raw_values_from_data_message(self, def_mesg):
# Go through mesg's field defs and read them
raw_values = []
for field_def in def_mesg.field_defs + def_mesg.dev_field_defs:
base_type = field_def.base_type
is_byte = base_type.name == 'byte'
# Struct to read n base types (field def size / base type size)
struct_fmt = str(int(field_def.size / base_type.size)) + base_type.fmt
# Extract the raw value, ask for a tuple if it's a byte type
try:
raw_value = self._read_struct(
struct_fmt, endian=def_mesg.endian, always_tuple=is_byte,
)
except FitEOFError:
# file was suddenly terminated
warnings.warn("File was terminated unexpectedly, some data will not be loaded.")
break
# If the field returns with a tuple of values it's definitely an
# oddball, but we'll parse it on a per-value basis it.
# If it's a byte type, treat the tuple as a single value
if isinstance(raw_value, tuple) and not is_byte:
raw_value = tuple(base_type.parse(rv) for rv in raw_value)
else:
# Otherwise, just scrub the singular value
raw_value = base_type.parse(raw_value)
raw_values.append(raw_value)
return raw_values
@staticmethod
def _resolve_subfield(field, def_mesg, raw_values):
# Resolve into (field, parent) ie (subfield, field) or (field, None)
if field.subfields:
for sub_field in field.subfields:
# Go through reference fields for this sub field
for ref_field in sub_field.ref_fields:
# Go through field defs AND their raw values
for field_def, raw_value in zip(def_mesg.field_defs, raw_values):
# If there's a definition number AND raw value match on the
# reference field, then we return this subfield
if (field_def.def_num == ref_field.def_num) and (ref_field.raw_value == raw_value):
return sub_field, field
return field, None
def _apply_scale_offset(self, field, raw_value):
# Apply numeric transformations (scale+offset)
if isinstance(raw_value, tuple):
# Contains multiple values, apply transformations to all of them
return tuple(self._apply_scale_offset(field, x) for x in raw_value)
elif isinstance(raw_value, num_types):
if field.scale:
raw_value = float(raw_value) / field.scale
if field.offset:
raw_value = raw_value - field.offset
return raw_value
@staticmethod
def _apply_compressed_accumulation(raw_value, accumulation, num_bits):
max_value = (1 << num_bits)
max_mask = max_value - 1
base_value = raw_value + (accumulation & ~max_mask)
if raw_value < (accumulation & max_mask):
base_value += max_value
return base_value
def _parse_data_message_components(self, header):
def_mesg = self._local_mesgs.get(header.local_mesg_num)
if not def_mesg:
raise FitParseError('Got data message with invalid local message type %d' % (
header.local_mesg_num))
raw_values = self._parse_raw_values_from_data_message(def_mesg)
field_datas = [] # TODO: I don't love this name, update on DataMessage too
# TODO: Maybe refactor this and make it simpler (or at least broken
# up into sub-functions)
for field_def, raw_value in zip(def_mesg.field_defs + def_mesg.dev_field_defs, raw_values):
field, parent_field = field_def.field, None
if field:
field, parent_field = self._resolve_subfield(field, def_mesg, raw_values)
# Resolve component fields
if field.components:
for component in field.components:
# Render its raw value
try:
cmp_raw_value = component.render(raw_value)
except ValueError:
continue
# Apply accumulated value
if component.accumulate and cmp_raw_value is not None:
accumulator = self._accumulators[def_mesg.mesg_num]
cmp_raw_value = self._apply_compressed_accumulation(
cmp_raw_value, accumulator[component.def_num], component.bits,
)
accumulator[component.def_num] = cmp_raw_value
# Apply scale and offset from component, not from the dynamic field
# as they may differ
cmp_raw_value = self._apply_scale_offset(component, cmp_raw_value)
# Extract the component's dynamic field from def_mesg
cmp_field = def_mesg.mesg_type.fields[component.def_num]
# Resolve a possible subfield
cmp_field, cmp_parent_field = self._resolve_subfield(cmp_field, def_mesg, raw_values)
cmp_value = cmp_field.render(cmp_raw_value)
# Plop it on field_datas
field_datas.append(
FieldData(
field_def=None,
field=cmp_field,
parent_field=cmp_parent_field,
value=cmp_value,
raw_value=cmp_raw_value,
)
)
# TODO: Do we care about a base_type and a resolved field mismatch?
# My hunch is we don't
value = self._apply_scale_offset(field, field.render(raw_value))
else:
value = raw_value
# Update compressed timestamp field
if (field_def.def_num == FIELD_TYPE_TIMESTAMP.def_num) and (raw_value is not None):
self._compressed_ts_accumulator = raw_value
field_datas.append(
FieldData(
field_def=field_def,
field=field,
parent_field=parent_field,
value=value,
raw_value=raw_value,
)
)
# Apply timestamp field if we got a header
if header.time_offset is not None:
ts_value = self._compressed_ts_accumulator = self._apply_compressed_accumulation(
header.time_offset, self._compressed_ts_accumulator, 5,
)
field_datas.append(
FieldData(
field_def=None,
field=FIELD_TYPE_TIMESTAMP,
parent_field=None,
value=FIELD_TYPE_TIMESTAMP.render(ts_value),
raw_value=ts_value,
)
)
return header, def_mesg, field_datas
def _parse_data_message(self, header):
header, def_mesg, field_datas = self._parse_data_message_components(header)
return DataMessage(header=header, def_mesg=def_mesg, fields=field_datas)
@staticmethod
def _should_yield(message, with_definitions, names):
if not message:
return False
if with_definitions or message.type == 'data':
# name arg is None we return all
if names is None:
return True
elif (message.name in names) or (message.mesg_num in names):
return True
return False
@staticmethod
def _make_set(obj):
if obj is None:
return None
if is_iterable(obj):
return set(obj)
else:
return set((obj,))
##########
# Public API
def get_messages(self, name=None, with_definitions=False, as_dict=False):
if with_definitions: # with_definitions implies as_dict=False
as_dict = False
names = self._make_set(name)
while not self._complete:
message = self._parse_message()
if self._should_yield(message, with_definitions, names):
yield message.as_dict() if as_dict else message
def __iter__(self):
return self.get_messages()
class CacheMixin(object):
"""Add message caching to the FitFileDecoder"""
def __init__(self, *args, **kwargs):
super(CacheMixin, self).__init__(*args, **kwargs)
self._messages = []
def _parse_message(self):
self._messages.append(super(CacheMixin, self)._parse_message())
return self._messages[-1]
def get_messages(self, name=None, with_definitions=False, as_dict=False):
if with_definitions: # with_definitions implies as_dict=False
as_dict = False
names = self._make_set(name)
# Yield all parsed messages first
for message in self._messages:
if self._should_yield(message, with_definitions, names):
yield message.as_dict() if as_dict else message
for message in super(CacheMixin, self).get_messages(names, with_definitions, as_dict):
yield message
@property
def messages(self):
return list(self.get_messages())
def parse(self):
while self._parse_message():
pass
class DataProcessorMixin(object):
"""Add data processing to the FitFileDecoder"""
def __init__(self, *args, **kwargs):
self._processor = kwargs.pop("data_processor", None) or FitFileDataProcessor()
super(DataProcessorMixin, self).__init__(*args, **kwargs)
def _parse_data_message(self, header):
header, def_mesg, field_datas = self._parse_data_message_components(header)
# Apply data processors
for field_data in field_datas:
# Apply type name processor
self._processor.run_type_processor(field_data)
self._processor.run_field_processor(field_data)
self._processor.run_unit_processor(field_data)
data_message = DataMessage(header=header, def_mesg=def_mesg, fields=field_datas)
self._processor.run_message_processor(data_message)
return data_message
class UncachedFitFile(DataProcessorMixin, FitFileDecoder):
"""FitFileDecoder with data processing"""
def __init__(self, fileish, *args, check_crc=True, data_processor=None, **kwargs):
# Ensure all optional params are passed as kwargs
super(UncachedFitFile, self).__init__(
fileish,
*args,
check_crc=check_crc,
data_processor=data_processor,
**kwargs
)
class FitFile(CacheMixin, UncachedFitFile):
"""FitFileDecoder with caching and data processing"""
pass
# TODO: Create subclasses like Activity and do per-value monkey patching
# for example local_timestamp to adjust timestamp on a per-file basis
| StarcoderdataPython |
1662400 | from typing import Any
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def save_user(self, request, user, form):
"""
This is called when saving user via allauth registration.
We override this to set additional data on user object.
"""
# Do not persist the user yet so we pass commit=False
# (last argument)
user = super(SocialAccountAdapter, self).save_user(request, user, form)
user.name = form.cleaned_data.get('name')
user.agree = form.cleaned_data.get('agree')
user.save() | StarcoderdataPython |
195518 | <filename>mockfirestore/__init__.py
from .main import DocumentSnapshot, DocumentReference, Query, CollectionReference, MockFirestore | StarcoderdataPython |
3332527 | import numpy as np
import tensorflow as tf
img_shape = (2,2,2,1)
img = tf.placeholder(tf.float32, img_shape)
ksize = [1,2,2,1]
stride = [1,2,2,1]
pool, argmax = tf.nn.max_pool_with_argmax(img, ksize, stride, padding='SAME', name='pool')
img_np = np.zeros(img_shape)
img_np[0,0,0] = 1
img_np[1,1,1] = 2
with tf.Session() as sess:
ops = [argmax]
feed = {img:img_np}
out = sess.run(ops, feed_dict=feed)
print(img_np)
print(out)
#indices = tf.constant([[4], [3], [1], [7]], tf.int64)
#updates = tf.constant([9.1, 10.2, 11, 12])
#shape = tf.constant([8], tf.int64)
#print(indices)
#print(updates)
#print(shape)
#scatter = tf.scatter_nd(indices, updates, shape)
#oshape = tf.constant([2,2,2], tf.int32)
#scatter = tf.reshape(scatter, oshape)
#with tf.Session() as sess:
# print(sess.run(scatter))
| StarcoderdataPython |
1603562 | import os
import torch
import logging
from model import DeepSpeech
class Observer(object):
'''
Train Observer base class.
'''
def __init__(self, logger):
self.logger = logger
def on_epoch_start(self, model, epoch): pass
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results): pass
def on_batch_start(self, model, epoch, batch_no): pass
def on_batch_end(self, model, optimizer, epoch, batch_no, loss_results, wer_results, cer_results, avg_loss): pass
def to_np(x):
return x.data.cpu().numpy()
class TensorboardWriter(Observer):
"""
Update Tensorboard at the end of each epoch
"""
def __init__(self, id, log_dir, log_params):
super().__init__(logging.getLogger('TensorboardWriter'))
os.makedirs(log_dir, exist_ok=True)
from tensorboardX import SummaryWriter
self.id = id
self.log_params = log_params
self.tensorboard_writer = SummaryWriter(log_dir)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug("Updating tensorboard for epoch {} {}".format(epoch + 1, loss_results))
values = {
'Avg Train Loss': loss_results[epoch],
'Avg WER': wer_results[epoch],
'Avg CER': cer_results[epoch],
}
self.tensorboard_writer.add_scalars(self.id, values, epoch + 1)
if self.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
if value.grad is not None:
self.tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
class CheckpointWriter(Observer):
"""
Save model checkpoint at the end of epoch
"""
def __init__(self, save_folder):
super().__init__(logging.getLogger('CheckpointWriter'))
self.logger.debug("CheckpointWriter")
self.save_folder = save_folder
os.makedirs(save_folder, exist_ok=True)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug("Saving checkpoint {}".format(epoch + 1))
file_path = '%s/deepspeech_%d.pth' % (self.save_folder, epoch + 1)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results),
file_path)
class CheckpointBatchWriter(Observer):
"""
Save model checkpoint every number of mini-batches
"""
def __init__(self, save_folder, checkpoint_per_batch):
super().__init__(logging.getLogger('CheckpointBatchWriter'))
self.logger.debug("CheckpointBatchWriter")
self.save_folder = save_folder
self.checkpoint_per_batch = checkpoint_per_batch
os.makedirs(save_folder, exist_ok=True)
def on_batch_end(self, model, optimizer, epoch, batch_no, loss_results, wer_results, cer_results, avg_loss):
if batch_no > 0 and (batch_no + 1) % self.checkpoint_per_batch == 0:
file_path = '%s/deepspeech_checkpoint_epoch_%d_iter_%d.pth' % (self.save_folder, epoch + 1, batch_no + 1)
self.logger.debug("Saving checkpoint model to %s" % file_path)
torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, iteration=batch_no,
loss_results=loss_results,
wer_results=wer_results, cer_results=cer_results, avg_loss=avg_loss),
file_path)
class VisdomWriter(Observer):
def __init__(self, id, epochs):
super().__init__(logging.getLogger('VisdomWriter'))
from visdom import Visdom
self.viz = Visdom()
self.opts = dict(title=id, ylabel='', xlabel='Epoch', legend=['Loss', 'WER', 'CER'])
self.viz_window = None
self.epochs = torch.arange(1, epochs + 1)
def on_epoch_end(self, model, optimizer, epoch, loss_results, wer_results, cer_results):
self.logger.debug('Updating Visdom')
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack(
(loss_results[0:epoch + 1], wer_results[0:epoch + 1], cer_results[0:epoch + 1]), dim=1)
if self.viz_window is None:
self.viz_window = self.viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
self.viz.line(
X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix
Y=y_axis,
win=self.viz_window,
update='replace',
)
| StarcoderdataPython |
1722101 | from unittest import TestCase
from mock import Mock, patch, PropertyMock
from mangrove.datastore.database import DatabaseManager
from mangrove.form_model.field import PhotoField, TextField, FieldSet
from mangrove.form_model.form_model import FormModel
from mangrove.transport.services.MediaSubmissionService import MediaSubmissionService
class TestMediaSubmissionService(TestCase):
def setUp(self):
dbm = Mock(spec=DatabaseManager)
self.image = Mock()
self.image.size = 1000000
media = {"image.png": self.image}
form_code = "form_code"
self.form_model = Mock(spec=FormModel)
self.form_model.form_code = PropertyMock(return_value=form_code)
self.form_model.is_media_type_fields_present = PropertyMock(return_value=True)
with patch("mangrove.transport.services.MediaSubmissionService.get_form_model_by_code") as get_form_model:
get_form_model.return_value = self.form_model
self.media_submission_service = MediaSubmissionService(dbm, media, form_code)
def test_get_media_fields_and_update_values(self):
values = [{u'image': u'image.png'}]
counter = count_generator()
photo_field = PhotoField('image', 'image', 'image')
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([photo_field], values,
counter)
expected_files = {"1-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def test_get_media_fields_in_a_group_and_update_values(self):
values = [{"group": [{"image": "image.png", "name": "something"}]}]
counter = count_generator()
field1 = PhotoField('image', 'image', 'image')
field2 = TextField(name='name', code='name', label='wat is ur name')
field_set = FieldSet('group', 'group', 'group', field_set=[field1, field2])
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([field_set], values,
counter)
expected_files = {"1-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def test_get_media_fields_in_a_repeat_and_update_values(self):
values = [{"group": [{"image": "image.png", "name": "something"}, {"image": "image.png", "name": "something2"},
{"image": "image.png", "name": "something3"}]}]
counter = count_generator()
field1 = PhotoField('image', 'image', 'image')
field2 = TextField(name='name', code='name', label='wat is ur name')
field_set = FieldSet('group', 'group', 'group', field_set=[field1, field2])
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([field_set], values,
counter)
expected_files = {"1-image.png": self.image, "2-image.png": self.image, "3-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def count_generator():
count = 0
while True:
count += 1
yield count | StarcoderdataPython |
3379076 | <filename>eggbox_potential_sampler/eggbox_pes_data_source/tests/test_factory.py<gh_stars>0
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
from eggbox_potential_sampler.eggbox_pes_data_source.data_source\
import EggboxPESDataSource
from eggbox_potential_sampler.eggbox_pes_data_source.model\
import EggboxPESDataSourceModel
from eggbox_potential_sampler.eggbox_plugin import EggboxPlugin
class DataSourceFactoryTestMixin(unittest.TestCase):
def setUp(self):
self.plugin = EggboxPlugin()
self.factory = self.plugin.data_source_factories[0]
def test_initialization(self):
self.assertNotEqual(self.factory.id, "")
self.assertEqual(self.factory.plugin_id, self.plugin.id)
def test_create_model(self):
model = self.factory.create_model({})
self.assertIsInstance(model, EggboxPESDataSourceModel)
model = self.factory.create_model()
self.assertIsInstance(model, EggboxPESDataSourceModel)
def test_create_data_source(self):
ds = self.factory.create_data_source()
self.assertIsInstance(ds, EggboxPESDataSource)
| StarcoderdataPython |
4836809 | <reponame>mariusgheorghies/python
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'auth_identity': 'str',
'auth_password': '<PASSWORD>',
'auth_secret': 'ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecAuthSecret',
'auth_username': 'str',
'_from': 'str',
'headers': 'list[ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecHeaders]',
'hello': 'str',
'html': 'str',
'require_tls': 'bool',
'send_resolved': 'bool',
'smarthost': 'str',
'text': 'str',
'tls_config': 'ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecTlsConfig',
'to': 'str'
}
attribute_map = {
'auth_identity': 'authIdentity',
'auth_password': '<PASSWORD>',
'auth_secret': 'authSecret',
'auth_username': 'authUsername',
'_from': 'from',
'headers': 'headers',
'hello': 'hello',
'html': 'html',
'require_tls': 'requireTLS',
'send_resolved': 'sendResolved',
'smarthost': 'smarthost',
'text': 'text',
'tls_config': 'tlsConfig',
'to': 'to'
}
def __init__(self, auth_identity=None, auth_password=<PASSWORD>, auth_secret=None, auth_username=None, _from=None, headers=None, hello=None, html=None, require_tls=None, send_resolved=None, smarthost=None, text=None, tls_config=None, to=None, local_vars_configuration=None): # noqa: E501
"""ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._auth_identity = None
self._auth_password = <PASSWORD>
self._auth_secret = None
self._auth_username = None
self.__from = None
self._headers = None
self._hello = None
self._html = None
self._require_tls = None
self._send_resolved = None
self._smarthost = None
self._text = None
self._tls_config = None
self._to = None
self.discriminator = None
if auth_identity is not None:
self.auth_identity = auth_identity
if auth_password is not None:
self.auth_password = <PASSWORD>
if auth_secret is not None:
self.auth_secret = auth_secret
if auth_username is not None:
self.auth_username = auth_username
if _from is not None:
self._from = _from
if headers is not None:
self.headers = headers
if hello is not None:
self.hello = hello
if html is not None:
self.html = html
if require_tls is not None:
self.require_tls = require_tls
if send_resolved is not None:
self.send_resolved = send_resolved
if smarthost is not None:
self.smarthost = smarthost
if text is not None:
self.text = text
if tls_config is not None:
self.tls_config = tls_config
if to is not None:
self.to = to
@property
def auth_identity(self):
"""Gets the auth_identity of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The identity to use for authentication. # noqa: E501
:return: The auth_identity of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._auth_identity
@auth_identity.setter
def auth_identity(self, auth_identity):
"""Sets the auth_identity of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The identity to use for authentication. # noqa: E501
:param auth_identity: The auth_identity of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._auth_identity = auth_identity
@property
def auth_password(self):
"""Gets the auth_password of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:return: The auth_password of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecAuthPassword
"""
return self._auth_password
@auth_password.setter
def auth_password(self, auth_password):
"""Sets the auth_password of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
:param auth_password: The auth_password of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecAuthPassword
"""
self._auth_password = auth_password
@property
def auth_secret(self):
"""Gets the auth_secret of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:return: The auth_secret of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecAuthSecret
"""
return self._auth_secret
@auth_secret.setter
def auth_secret(self, auth_secret):
"""Sets the auth_secret of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
:param auth_secret: The auth_secret of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecAuthSecret
"""
self._auth_secret = auth_secret
@property
def auth_username(self):
"""Gets the auth_username of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The username to use for authentication. # noqa: E501
:return: The auth_username of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._auth_username
@auth_username.setter
def auth_username(self, auth_username):
"""Sets the auth_username of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The username to use for authentication. # noqa: E501
:param auth_username: The auth_username of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._auth_username = auth_username
@property
def _from(self):
"""Gets the _from of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The sender address. # noqa: E501
:return: The _from of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self.__from
@_from.setter
def _from(self, _from):
"""Sets the _from of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The sender address. # noqa: E501
:param _from: The _from of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self.__from = _from
@property
def headers(self):
"""Gets the headers of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
Further headers email header key/value pairs. Overrides any headers previously set by the notification implementation. # noqa: E501
:return: The headers of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: list[ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecHeaders]
"""
return self._headers
@headers.setter
def headers(self, headers):
"""Sets the headers of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
Further headers email header key/value pairs. Overrides any headers previously set by the notification implementation. # noqa: E501
:param headers: The headers of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: list[ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecHeaders]
"""
self._headers = headers
@property
def hello(self):
"""Gets the hello of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The hostname to identify to the SMTP server. # noqa: E501
:return: The hello of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._hello
@hello.setter
def hello(self, hello):
"""Sets the hello of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The hostname to identify to the SMTP server. # noqa: E501
:param hello: The hello of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._hello = hello
@property
def html(self):
"""Gets the html of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The HTML body of the email notification. # noqa: E501
:return: The html of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._html
@html.setter
def html(self, html):
"""Sets the html of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The HTML body of the email notification. # noqa: E501
:param html: The html of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._html = html
@property
def require_tls(self):
"""Gets the require_tls of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The SMTP TLS requirement. Note that Go does not support unencrypted connections to remote SMTP endpoints. # noqa: E501
:return: The require_tls of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: bool
"""
return self._require_tls
@require_tls.setter
def require_tls(self, require_tls):
"""Sets the require_tls of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The SMTP TLS requirement. Note that Go does not support unencrypted connections to remote SMTP endpoints. # noqa: E501
:param require_tls: The require_tls of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: bool
"""
self._require_tls = require_tls
@property
def send_resolved(self):
"""Gets the send_resolved of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
Whether or not to notify about resolved alerts. # noqa: E501
:return: The send_resolved of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: bool
"""
return self._send_resolved
@send_resolved.setter
def send_resolved(self, send_resolved):
"""Sets the send_resolved of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
Whether or not to notify about resolved alerts. # noqa: E501
:param send_resolved: The send_resolved of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: bool
"""
self._send_resolved = send_resolved
@property
def smarthost(self):
"""Gets the smarthost of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The SMTP host through which emails are sent. # noqa: E501
:return: The smarthost of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._smarthost
@smarthost.setter
def smarthost(self, smarthost):
"""Sets the smarthost of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The SMTP host through which emails are sent. # noqa: E501
:param smarthost: The smarthost of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._smarthost = smarthost
@property
def text(self):
"""Gets the text of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The text body of the email notification. # noqa: E501
:return: The text of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The text body of the email notification. # noqa: E501
:param text: The text of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._text = text
@property
def tls_config(self):
"""Gets the tls_config of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:return: The tls_config of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecTlsConfig
"""
return self._tls_config
@tls_config.setter
def tls_config(self, tls_config):
"""Sets the tls_config of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
:param tls_config: The tls_config of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecTlsConfig
"""
self._tls_config = tls_config
@property
def to(self):
"""Gets the to of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
The email address to send notifications to. # noqa: E501
:return: The to of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs.
The email address to send notifications to. # noqa: E501
:param to: The to of this ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs. # noqa: E501
:type: str
"""
self._to = to
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComCoreosMonitoringV1alpha1AlertmanagerConfigSpecEmailConfigs):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
6737 | <gh_stars>0
from gtts import gTTS as ttos
from pydub import AudioSegment
import os
def generate_mp3 (segments, fade_ms, speech_gain, comment_fade_ms, language = "en", output_file_name = "generated_program_sound") :
def apply_comments (exercise_audio, segment) :
new_exercise_audio = exercise_audio
for comment in segment.comments :
comment_audio = comment["comment_audio"]
comment_time_ms = comment["second"]*1000 + comment["minute"]*60000
part_01 = new_exercise_audio[comment_time_ms:comment_time_ms+len(comment_audio)+comment_fade_ms*2]
part_02 = part_01.fade(to_gain=-speech_gain, start=0, end=comment_fade_ms)
part_02 = part_02.fade(to_gain= speech_gain, start=comment_fade_ms+len(comment_audio), end=len(part_02))
part_02 = part_02.overlay(comment_audio, position=comment_fade_ms)
new_exercise_audio = new_exercise_audio[:comment_time_ms] + part_02 + new_exercise_audio[comment_time_ms+len(part_02):]
return new_exercise_audio
def append_segment (current_audio, next_segment, future_segment) :
segment_audio = next_segment.song_audio
segment_audio_faded = segment_audio - speech_gain
segment_text_audio = next_segment.text_audio
part_01 = segment_audio_faded[:len(segment_text_audio)] # First part of next segment
part_01 = current_audio[-len(segment_text_audio):].append(part_01, crossfade=len(segment_text_audio)).overlay(segment_text_audio) #
part_02 = part_01 + segment_audio_faded[len(part_01):len(part_01)+fade_ms].fade(to_gain=speech_gain, start=0, end=fade_ms) # Faded up to exercise gain
part_03 = apply_comments(segment_audio[len(part_02):len(part_02)+next_segment.get_exercise_duration_ms()+fade_ms], next_segment) # Apply comments to exercise
part_03 = part_02 + part_03.fade(to_gain=-speech_gain, start=len(part_03)-fade_ms, end=len(part_03))
part_04 = current_audio[:-len(segment_text_audio)] + part_03
if not future_segment :
part_05 = part_04.fade_out(fade_ms)
ttos(text="Program finished", lang=language, slow=False).save("output.mp3")
finish_voice = AudioSegment.from_file("output.mp3")
print("Cleaning up output.mp3")
os.remove("output.mp3")
return part_05 + finish_voice
else :
part_05 = part_04 + segment_audio_faded[len(part_03):len(part_03)+len(future_segment.text_audio)]
return part_05
print("Generating MP3 for segment 1 of " + str(len(segments)))
intro_segment_audio = segments[0].song_audio
intro_segment_text_audio = segments[0].text_audio
intro_segment_audio_faded = intro_segment_audio - speech_gain
part_01 = intro_segment_audio_faded[:fade_ms].fade_in(fade_ms)
part_02 = part_01 + intro_segment_audio_faded[len(part_01):len(part_01)+len(intro_segment_text_audio)].overlay(intro_segment_text_audio)
part_03 = part_02 + intro_segment_audio_faded[len(part_02):len(part_02)+fade_ms].fade(to_gain=speech_gain, start=0, end=fade_ms)
part_04 = apply_comments(intro_segment_audio[len(part_03):len(part_03)+segments[0].get_exercise_duration_ms()+fade_ms], segments[0])
part_04 = part_03 + part_04.fade(to_gain=-speech_gain, start=len(part_04)-fade_ms, end=len(part_04))
part_05 = part_04 + intro_segment_audio_faded[len(part_04):len(part_04)+len(segments[1].text_audio)]
program_audio = part_05
for i in range(1, len(segments)) :
print("Generating MP3 for segment " + str(i+1) + " of " + str(len(segments)))
if i+1 >= len(segments) :
program_audio = append_segment(program_audio, segments[i], None)
else :
program_audio = append_segment(program_audio, segments[i], segments[i+1])
if not os.path.exists("./output") :
os.mkdir("./output")
print("Exporting final mp3 ...")
file_path = "./output/"+output_file_name+".mp3"
program_audio.export(file_path, format="mp3")
print("Done! Exported mp3 to "+ file_path)
| StarcoderdataPython |
97342 | <reponame>chrisjbillington/parpde
# Example file that finds the groundstate of a condensate in a rotating frame.
# Takes quite some time to run so you can just stop it when you run out of
# patience and run the plotting script.
# Run with 'mpirun -n <N CPUs> python run.py'
from __future__ import division, print_function
import sys
sys.path.insert(0, '../../..') # The location of the modules we need to import
import numpy as np
import h5py
from parPDE import Simulator2D, LAPLACIAN, GRADX, GRADY
from BEC2D import BEC2D
TIMESTEP_FACTOR = int(sys.argv[1])
METHOD = sys.argv[2]
def get_number_and_trap(rhomax, R):
"""Gives the 2D normalisation constant and trap frequency required for the
specified maximum density and radius of a single-component condensate in
the Thomas-Fermi approximation"""
N = pi * rhomax * R**2 / 2
omega = np.sqrt(2 * g * rhomax / (m * R**2))
return N, omega
# Constants:
pi = np.pi
hbar = 1.054571726e-34 # Reduced Planck's constant
a_0 = 5.29177209e-11 # Bohr radius
u = 1.660539e-27 # unified atomic mass unit
m = 86.909180*u # 87Rb atomic mass
a = 98.98*a_0 # 87Rb |2,2> scattering length
g = 4*pi*hbar**2*a/m # 87Rb self interaction constant
rhomax = 2.5e14 * 1e6 # Desired peak condensate density
R = 7.5e-6 # Desired condensate radius
mu = g* rhomax # Approximate chemical potential for desired max density
# (assuming all population is in in mF=+1 or mF=-1)
N_2D, omega = get_number_and_trap(rhomax, R) # 2D normalisation constant and trap frequency
# required for specified radius and peak density
# Rotation rate:
Omega = 2*omega
# Space:
nx_global = ny_global = 256
x_max_global = y_max_global = 10e-6
simulator = Simulator2D(-x_max_global, x_max_global, -y_max_global, y_max_global, nx_global, ny_global,
periodic_x=False, periodic_y=False, operator_order=6)
bec2d = BEC2D(simulator, natural_units=False, use_ffts=False)
x = simulator.x
y = simulator.y
dx = simulator.dx
dy = simulator.dy
dispersion_timescale = dx**2 * m / (pi * hbar)
chemical_potential_timescale = 2*pi*hbar/mu
r2 = x**2.0 + y**2.0
r = np.sqrt(r2)
# A harmonic trap to exactly cancel out the centrifugal force:
alpha = 2
V = 0.5 * m * Omega**2 * R**2.0 * (r/R)**alpha
# A high order polynomial trap as a hard wall potential:
alpha = 16
V += 0.5 * m * omega**2 * R**2.0 * (r/R)**alpha
# The kinetic and rotation terms of the Hamiltonian:
K = -hbar**2/(2*m)*LAPLACIAN - 1j*hbar*Omega*(y * GRADX - x * GRADY)
def H(t, psi):
"""The Hamiltonian for single-component wavefunction psi. Returns the
kinetic term as an OperatorSum instance, and the local terms separately."""
H_local_lin = V
H_local_nonlin = g * abs(psi)**2
return K, H_local_lin, H_local_nonlin
if __name__ == '__main__':
# The initial Thomas-Fermi guess:
psi = rhomax * (1 - (x**2 + y**2) / R**2)
psi[psi < 0] = 0
psi = np.sqrt(psi)
with h5py.File('initial_smaller.h5', 'r') as f:
psi = f['psi'][:]
# Print some vortices, seeding the pseudorandom number generator so that
# MPI processes all agree on where the vortices are:
np.random.seed(42)
for i in range(30):
sign = np.sign(np.random.normal())
x_vortex = np.random.normal(0, scale=R)
y_vortex = np.random.normal(0, scale=R)
psi[:] *= np.exp(sign * 1j*np.arctan2(x - y_vortex, y - x_vortex))
# Smooth it a bit in imaginary time:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=0.1*chemical_potential_timescale,
H=H, psi=psi, mu=mu, method='rk4ilip', imaginary_time=True,
output_interval=100, output_directory='smoothing')
# And evolve it in time for 20ms:
psi = bec2d.evolve(dt=dispersion_timescale/TIMESTEP_FACTOR, t_final=20e-3,
H=H, psi=psi, mu=mu, method=METHOD, imaginary_time=False,
output_interval=100, output_directory='evolution')
| StarcoderdataPython |
1628783 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: realizer.py
# Purpose: music21 class to define a figured bass line, consisting of notes
# and figures in a given key.
# Authors: <NAME>
#
# Copyright: Copyright © 2011 <NAME> and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
This module, the heart of fbRealizer, is all about realizing
a bass line of (bassNote, notationString)
pairs. All it takes to create well-formed realizations of a
bass line is a few lines of music21 code,
from start to finish. See :class:`~music21.figuredBass.realizer.FiguredBassLine` for more details.
>>> from music21.figuredBass import realizer
>>> from music21 import note
>>> fbLine = realizer.FiguredBassLine()
>>> fbLine.addElement(note.Note('C3'))
>>> fbLine.addElement(note.Note('D3'), '4,3')
>>> fbLine.addElement(note.Note('C3', quarterLength = 2.0))
>>> allSols = fbLine.realize()
>>> allSols.getNumSolutions()
30
>>> #_DOCS_SHOW allSols.generateRandomRealizations(14).show()
.. image:: images/figuredBass/fbRealizer_intro.*
:width: 500
The same can be accomplished by taking the notes and notations
from a :class:`~music21.stream.Stream`.
See :meth:`~music21.figuredBass.realizer.figuredBassFromStream` for more details.
>>> s = converter.parse('tinynotation: C4 D4_4,3 C2', makeNotation=False)
>>> fbLine = realizer.figuredBassFromStream(s)
>>> allSols2 = fbLine.realize()
>>> allSols2.getNumSolutions()
30
'''
import collections
import copy
import random
import unittest
from music21 import chord
from music21 import clef
from music21 import exceptions21
from music21 import key
from music21 import meter
from music21 import note
from music21 import pitch
from music21 import stream
from music21.figuredBass import checker
from music21.figuredBass import notation
from music21.figuredBass import realizerScale
from music21.figuredBass import rules
from music21.figuredBass import segment
_MOD = 'figuredBass.realizer'
def figuredBassFromStream(streamPart):
'''
Takes a :class:`~music21.stream.Part` (or another :class:`~music21.stream.Stream` subclass)
and returns a :class:`~music21.figuredBass.realizer.FiguredBassLine` object whose bass notes
have notations taken from the lyrics in the source stream. This method along with the
:meth:`~music21.figuredBass.realizer.FiguredBassLine.realize` method provide the easiest
way of converting from a notated version of a figured bass (such as in a MusicXML file) to
a realized version of the same line.
>>> s = converter.parse('tinynotation: 4/4 C4 D8_6 E8_6 F4 G4_7 c1', makeNotation=False)
>>> fb = figuredBass.realizer.figuredBassFromStream(s)
>>> fb
<music21.figuredBass.realizer.FiguredBassLine object at 0x...>
>>> fbRules = figuredBass.rules.Rules()
>>> fbRules.partMovementLimits = [(1, 2), (2, 12), (3, 12)]
>>> fbRealization = fb.realize(fbRules)
>>> fbRealization.getNumSolutions()
13
>>> #_DOCS_SHOW fbRealization.generateRandomRealizations(8).show()
.. image:: images/figuredBass/fbRealizer_fbStreamPart.*
:width: 500
'''
sf = streamPart.flat
sfn = sf.notes
keyList = sf.getElementsByClass(key.Key)
myKey = None
if not keyList:
keyList = sf.getElementsByClass(key.KeySignature)
if not keyList:
myKey = key.Key('C')
else:
myKey = keyList[0].asKey('major')
else:
myKey = keyList[0]
tsList = sf.getElementsByClass(meter.TimeSignature)
if not tsList:
ts = meter.TimeSignature('4/4')
else:
ts = tsList[0]
fb = FiguredBassLine(myKey, ts)
if streamPart.hasMeasures():
paddingLeft = streamPart.measure(0).paddingLeft
if paddingLeft != 0.0:
fb._paddingLeft = paddingLeft
for n in sfn:
if n.lyrics:
annotationString = ', '.join([x.text for x in n.lyrics])
fb.addElement(n, annotationString)
else:
fb.addElement(n)
return fb
def addLyricsToBassNote(bassNote, notationString=None):
'''
Takes in a bassNote and a corresponding notationString as arguments.
Adds the parsed notationString as lyrics to the bassNote, which is
useful when displaying the figured bass in external software.
>>> from music21.figuredBass import realizer
>>> from music21 import note
>>> n1 = note.Note('G3')
>>> realizer.addLyricsToBassNote(n1, '6,4')
>>> n1.lyrics[0].text
'6'
>>> n1.lyrics[1].text
'4'
>>> #_DOCS_SHOW n1.show()
.. image:: images/figuredBass/fbRealizer_lyrics.*
:width: 100
'''
bassNote.lyrics = []
n = notation.Notation(notationString)
if not n.figureStrings:
return
maxLength = 0
for fs in n.figureStrings:
if len(fs) > maxLength:
maxLength = len(fs)
for fs in n.figureStrings:
spacesInFront = ''
for i in range(maxLength - len(fs)):
spacesInFront += ' '
bassNote.addLyric(spacesInFront + fs, applyRaw=True)
class FiguredBassLine:
'''
A FiguredBassLine is an interface for realization of a line of (bassNote, notationString) pairs.
Currently, only 1:1 realization is supported, meaning that every bassNote is realized and the
:attr:`~music21.note.GeneralNote.quarterLength` or duration of a realization above a bassNote
is identical to that of the bassNote.
`inKey` defaults to C major.
`inTime` defaults to 4/4.
>>> from music21.figuredBass import realizer
>>> from music21 import key
>>> from music21 import meter
>>> fbLine = realizer.FiguredBassLine(key.Key('B'), meter.TimeSignature('3/4'))
>>> fbLine.inKey
<music21.key.Key of B major>
>>> fbLine.inTime
<music21.meter.TimeSignature 3/4>
'''
_DOC_ORDER = ['addElement', 'generateBassLine', 'realize']
_DOC_ATTR = {'inKey': 'A :class:`~music21.key.Key` which implies a scale value, '
'scale mode, and key signature for a '
':class:`~music21.figuredBass.realizerScale.FiguredBassScale`.',
'inTime': 'A :class:`~music21.meter.TimeSignature` which specifies the '
'time signature of realizations outputted to a '
':class:`~music21.stream.Score`.'}
def __init__(self, inKey=None, inTime=None):
if inKey is None:
inKey = key.Key('C')
if inTime is None:
inTime = meter.TimeSignature('4/4')
self.inKey = inKey
self.inTime = inTime
self._paddingLeft = 0.0
self._overlaidParts = stream.Part()
self._fbScale = realizerScale.FiguredBassScale(inKey.pitchFromDegree(1), inKey.mode)
self._fbList = []
def addElement(self, bassObject, notationString=None):
'''
Use this method to add (bassNote, notationString) pairs to the bass line. Elements
are realized in the order they are added.
>>> from music21.figuredBass import realizer
>>> from music21 import key
>>> from music21 import meter
>>> from music21 import note
>>> fbLine = realizer.FiguredBassLine(key.Key('B'), meter.TimeSignature('3/4'))
>>> fbLine.addElement(note.Note('B2'))
>>> fbLine.addElement(note.Note('C#3'), '6')
>>> fbLine.addElement(note.Note('D#3'), '6')
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbRealizer_bassLine.*
:width: 200
OMIT_FROM_DOCS
>>> fbLine = realizer.FiguredBassLine(key.Key('C'), meter.TimeSignature('4/4'))
>>> fbLine.addElement(harmony.ChordSymbol('C'))
>>> fbLine.addElement(harmony.ChordSymbol('G'))
>>> fbLine = realizer.FiguredBassLine(key.Key('C'), meter.TimeSignature('4/4'))
>>> fbLine.addElement(roman.RomanNumeral('I'))
>>> fbLine.addElement(roman.RomanNumeral('V'))
'''
bassObject.notationString = notationString
c = bassObject.classes
if 'Note' in c:
self._fbList.append((bassObject, notationString)) # a bass note, and a notationString
addLyricsToBassNote(bassObject, notationString)
# ---------- Added to accommodate harmony.ChordSymbol and roman.RomanNumeral objects ---
elif 'RomanNumeral' in c or 'ChordSymbol' in c:
self._fbList.append(bassObject) # a roman Numeral object
else:
raise FiguredBassLineException(
'Not a valid bassObject (only note.Note, '
+ f'harmony.ChordSymbol, and roman.RomanNumeral supported) was {bassObject!r}')
def generateBassLine(self):
'''
Generates the bass line as a :class:`~music21.stream.Score`.
>>> from music21.figuredBass import realizer
>>> from music21 import key
>>> from music21 import meter
>>> from music21 import note
>>> fbLine = realizer.FiguredBassLine(key.Key('B'), meter.TimeSignature('3/4'))
>>> fbLine.addElement(note.Note('B2'))
>>> fbLine.addElement(note.Note('C#3'), '6')
>>> fbLine.addElement(note.Note('D#3'), '6')
>>> #_DOCS_SHOW fbLine.generateBassLine().show()
.. image:: images/figuredBass/fbRealizer_bassLine.*
:width: 200
>>> from music21 import corpus
>>> sBach = corpus.parse('bach/bwv307')
>>> sBach['bass'].measure(0).show('text')
{0.0} ...
{0.0} <music21.clef.BassClef>
{0.0} <music21.key.Key of B- major>
{0.0} <music21.meter.TimeSignature 4/4>
{0.0} <music21.note.Note B->
{0.5} <music21.note.Note C>
>>> fbLine = realizer.figuredBassFromStream(sBach['bass'])
>>> fbLine.generateBassLine().measure(1).show('text')
{0.0} <music21.clef.BassClef>
{0.0} <music21.key.KeySignature of 2 flats>
{0.0} <music21.meter.TimeSignature 4/4>
{3.0} <music21.note.Note B->
{3.5} <music21.note.Note C>
'''
bassLine = stream.Part()
bassLine.append(clef.BassClef())
bassLine.append(key.KeySignature(self.inKey.sharps))
bassLine.append(copy.deepcopy(self.inTime))
r = None
if self._paddingLeft != 0.0:
r = note.Rest(quarterLength=self._paddingLeft)
bassLine.append(r)
for (bassNote, unused_notationString) in self._fbList:
bassLine.append(bassNote)
bl2 = bassLine.makeNotation(inPlace=False, cautionaryNotImmediateRepeat=False)
if r is not None:
m0 = bl2.getElementsByClass('Measure')[0]
m0.remove(m0.getElementsByClass('Rest')[0])
m0.padAsAnacrusis()
return bl2
def retrieveSegments(self, fbRules=None, numParts=4, maxPitch=None):
'''
generates the segmentList from an fbList, including any overlaid Segments
if fbRules is None, creates a new rules.Rules() object
if maxPitch is None, uses pitch.Pitch('B5')
'''
if fbRules is None:
fbRules = rules.Rules()
if maxPitch is None:
maxPitch = pitch.Pitch('B5')
segmentList = []
bassLine = self.generateBassLine()
if len(self._overlaidParts) >= 1:
self._overlaidParts.append(bassLine)
currentMapping = checker.extractHarmonies(self._overlaidParts)
else:
currentMapping = checker.createOffsetMapping(bassLine)
allKeys = sorted(currentMapping.keys())
bassLine = bassLine.flat.notes
bassNoteIndex = 0
previousBassNote = bassLine[bassNoteIndex]
bassNote = currentMapping[allKeys[0]][-1]
previousSegment = segment.OverlaidSegment(bassNote, bassNote.notationString,
self._fbScale,
fbRules, numParts, maxPitch)
previousSegment.quarterLength = previousBassNote.quarterLength
segmentList.append(previousSegment)
for k in allKeys[1:]:
(startTime, unused_endTime) = k
bassNote = currentMapping[k][-1]
currentSegment = segment.OverlaidSegment(bassNote, bassNote.notationString,
self._fbScale,
fbRules, numParts, maxPitch)
for partNumber in range(1, len(currentMapping[k])):
upperPitch = currentMapping[k][partNumber - 1]
currentSegment.fbRules._partPitchLimits.append((partNumber, upperPitch))
if startTime == previousBassNote.offset + previousBassNote.quarterLength:
bassNoteIndex += 1
previousBassNote = bassLine[bassNoteIndex]
currentSegment.quarterLength = previousBassNote.quarterLength
else:
for partNumber in range(len(currentMapping[k]), numParts + 1):
previousSegment.fbRules._partsToCheck.append(partNumber)
# Fictitious, representative only for harmonies preserved
# with addition of melody or melodies
currentSegment.quarterLength = 0.0
segmentList.append(currentSegment)
previousSegment = currentSegment
return segmentList
def overlayPart(self, music21Part):
self._overlaidParts.append(music21Part)
def realize(self, fbRules=None, numParts=4, maxPitch=None):
'''
Creates a :class:`~music21.figuredBass.segment.Segment`
for each (bassNote, notationString) pair
added using :meth:`~music21.figuredBass.realizer.FiguredBassLine.addElement`.
Each Segment is associated
with the :class:`~music21.figuredBass.rules.Rules` object provided, meaning that rules are
universally applied across all Segments. The number of parts in a realization
(including the bass) can be controlled through numParts, and the maximum pitch can
likewise be controlled through maxPitch.
Returns a :class:`~music21.figuredBass.realizer.Realization`.
If this methods is called without having provided any (bassNote, notationString) pairs,
a FiguredBassLineException is raised. If only one pair is provided, the Realization will
contain :meth:`~music21.figuredBass.segment.Segment.allCorrectConsecutivePossibilities`
for the one note.
if `fbRules` is None, creates a new rules.Rules() object
if `maxPitch` is None, uses pitch.Pitch('B5')
>>> from music21.figuredBass import realizer
>>> from music21.figuredBass import rules
>>> from music21 import key
>>> from music21 import meter
>>> from music21 import note
>>> fbLine = realizer.FiguredBassLine(key.Key('B'), meter.TimeSignature('3/4'))
>>> fbLine.addElement(note.Note('B2'))
>>> fbLine.addElement(note.Note('C#3'), '6')
>>> fbLine.addElement(note.Note('D#3'), '6')
>>> fbRules = rules.Rules()
>>> r1 = fbLine.realize(fbRules)
>>> r1.getNumSolutions()
208
>>> fbRules.forbidVoiceOverlap = False
>>> r2 = fbLine.realize(fbRules)
>>> r2.getNumSolutions()
7908
OMIT_FROM_DOCS
>>> fbLine3 = realizer.FiguredBassLine(key.Key('C'), meter.TimeSignature('2/4'))
>>> h1 = harmony.ChordSymbol('C')
>>> h1.bass().octave = 4
>>> fbLine3.addElement(h1)
>>> h2 = harmony.ChordSymbol('G')
>>> h2.bass().octave = 4
>>> fbLine3.addElement(h2)
>>> r3 = fbLine3.realize()
>>> r3.getNumSolutions()
13
>>> fbLine4 = realizer.FiguredBassLine(key.Key('C'), meter.TimeSignature('2/4'))
>>> fbLine4.addElement(roman.RomanNumeral('I'))
>>> fbLine4.addElement(roman.RomanNumeral('IV'))
>>> r4 = fbLine4.realize()
>>> r4.getNumSolutions()
13
'''
if fbRules is None:
fbRules = rules.Rules()
if maxPitch is None:
maxPitch = pitch.Pitch('B5')
segmentList = []
listOfHarmonyObjects = False
for item in self._fbList:
try:
c = item.classes
except AttributeError:
continue
if 'Note' in c:
break
# Added to accommodate harmony.ChordSymbol and roman.RomanNumeral objects
if 'RomanNumeral' in c or 'ChordSymbol' in c:
listOfHarmonyObjects = True
break
if listOfHarmonyObjects:
for harmonyObject in self._fbList:
listOfPitchesJustNames = []
for thisPitch in harmonyObject.pitches:
listOfPitchesJustNames.append(thisPitch.name)
# remove duplicates just in case...
d = {}
for x in listOfPitchesJustNames:
d[x] = x
outputList = d.values()
def g(y):
return y if y != 0.0 else 1.0
passedNote = note.Note(harmonyObject.bass().nameWithOctave,
quarterLength=g(harmonyObject.duration.quarterLength))
correspondingSegment = segment.Segment(bassNote=passedNote,
fbScale=self._fbScale,
fbRules=fbRules,
numParts=numParts,
maxPitch=maxPitch,
listOfPitches=outputList)
correspondingSegment.quarterLength = g(harmonyObject.duration.quarterLength)
segmentList.append(correspondingSegment)
# ---------- Original code - Accommodates a tuple (figured bass) --------
else:
segmentList = self.retrieveSegments(fbRules, numParts, maxPitch)
if len(segmentList) >= 2:
for segmentIndex in range(len(segmentList) - 1):
segmentA = segmentList[segmentIndex]
segmentB = segmentList[segmentIndex + 1]
correctAB = segmentA.allCorrectConsecutivePossibilities(segmentB)
segmentA.movements = collections.defaultdict(list)
listAB = list(correctAB)
for (possibA, possibB) in listAB:
segmentA.movements[possibA].append(possibB)
self._trimAllMovements(segmentList)
elif len(segmentList) == 1:
segmentA = segmentList[0]
segmentA.correctA = list(segmentA.allCorrectSinglePossibilities())
elif not segmentList:
raise FiguredBassLineException('No (bassNote, notationString) pairs to realize.')
return Realization(realizedSegmentList=segmentList, inKey=self.inKey,
inTime=self.inTime, overlaidParts=self._overlaidParts[0:-1],
paddingLeft=self._paddingLeft)
def _trimAllMovements(self, segmentList):
'''
Each :class:`~music21.figuredBass.segment.Segment` which resolves to another
defines a list of movements, nextMovements. Keys for nextMovements are correct
single possibilities of the current Segment. For a given key, a value is a list
of correct single possibilities in the subsequent Segment representing acceptable
movements between the two. There may be movements in a string of Segments which
directly or indirectly lead nowhere. This method is designed to be called on
a list of Segments **after** movements are found, as happens in
:meth:`~music21.figuredBass.realizer.FiguredBassLine.realize`.
'''
if len(segmentList) == 1 or len(segmentList) == 2:
return True
elif len(segmentList) >= 3:
segmentList.reverse()
# gets this wrong... # pylint: disable=cell-var-from-loop
movementsAB = None
for segmentIndex in range(1, len(segmentList) - 1):
movementsAB = segmentList[segmentIndex + 1].movements
movementsBC = segmentList[segmentIndex].movements
# eliminated = []
for (possibB, possibCList) in list(movementsBC.items()):
if not possibCList:
del movementsBC[possibB]
for (possibA, possibBList) in list(movementsAB.items()):
movementsAB[possibA] = list(
filter(lambda possibBB: (possibBB in movementsBC), possibBList))
for (possibA, possibBList) in list(movementsAB.items()):
if not possibBList:
del movementsAB[possibA]
segmentList.reverse()
return True
class Realization:
'''
Returned by :class:`~music21.figuredBass.realizer.FiguredBassLine` after calling
:meth:`~music21.figuredBass.realizer.FiguredBassLine.realize`. Allows for the
generation of realizations as a :class:`~music21.stream.Score`.
* See the :mod:`~music21.figuredBass.examples` module for examples on the generation
of realizations.
* A possibility progression is a valid progression through a string of
:class:`~music21.figuredBass.segment.Segment` instances.
See :mod:`~music21.figuredBass.possibility` for more details on possibilities.
'''
_DOC_ORDER = ['getNumSolutions', 'generateRandomRealization',
'generateRandomRealizations', 'generateAllRealizations',
'getAllPossibilityProgressions', 'getRandomPossibilityProgression',
'generateRealizationFromPossibilityProgression']
_DOC_ATTR = {'keyboardStyleOutput': '''True by default. If True, generated realizations
are represented in keyboard style, with two staves. If False,
realizations are represented in chorale style with n staves,
where n is the number of parts. SATB if n = 4.'''}
def __init__(self, **fbLineOutputs):
# fbLineOutputs always will have three elements, checks are for sphinx documentation only.
if 'realizedSegmentList' in fbLineOutputs:
self._segmentList = fbLineOutputs['realizedSegmentList']
if 'inKey' in fbLineOutputs:
self._inKey = fbLineOutputs['inKey']
self._keySig = key.KeySignature(self._inKey.sharps)
if 'inTime' in fbLineOutputs:
self._inTime = fbLineOutputs['inTime']
if 'overlaidParts' in fbLineOutputs:
self._overlaidParts = fbLineOutputs['overlaidParts']
if 'paddingLeft' in fbLineOutputs:
self._paddingLeft = fbLineOutputs['paddingLeft']
self.keyboardStyleOutput = True
def getNumSolutions(self):
'''
Returns the number of solutions (unique realizations) to a Realization by calculating
the total number of paths through a string of :class:`~music21.figuredBass.segment.Segment`
movements. This is faster and more efficient than compiling each unique realization into a
list, adding it to a master list, and then taking the length of the master list.
>>> from music21.figuredBass import examples
>>> fbLine = examples.exampleB()
>>> fbRealization = fbLine.realize()
>>> fbRealization.getNumSolutions()
422
>>> fbLine2 = examples.exampleC()
>>> fbRealization2 = fbLine2.realize()
>>> fbRealization2.getNumSolutions()
833
'''
if len(self._segmentList) == 1:
return len(self._segmentList[0].correctA)
# What if there's only one (bassNote, notationString)?
self._segmentList.reverse()
pathList = {}
for segmentIndex in range(1, len(self._segmentList)):
segmentA = self._segmentList[segmentIndex]
newPathList = {}
if not pathList:
for possibA in segmentA.movements:
newPathList[possibA] = len(segmentA.movements[possibA])
else:
for possibA in segmentA.movements:
prevValue = 0
for possibB in segmentA.movements[possibA]:
prevValue += pathList[possibB]
newPathList[possibA] = prevValue
pathList = newPathList
numSolutions = 0
for possibA in pathList:
numSolutions += pathList[possibA]
self._segmentList.reverse()
return numSolutions
def getAllPossibilityProgressions(self):
'''
Compiles each unique possibility progression, adding
it to a master list. Returns the master list.
.. warning:: This method is unoptimized, and may take a prohibitive amount
of time for a Realization which has more than 200,000 solutions.
'''
progressions = []
if len(self._segmentList) == 1:
for possibA in self._segmentList[0].correctA:
progressions.append([possibA])
return progressions
currMovements = self._segmentList[0].movements
for possibA in currMovements:
possibBList = currMovements[possibA]
for possibB in possibBList:
progressions.append([possibA, possibB])
for segmentIndex in range(1, len(self._segmentList) - 1):
currMovements = self._segmentList[segmentIndex].movements
for unused_progressionIndex in range(len(progressions)):
progression = progressions.pop(0)
possibB = progression[-1]
for possibC in currMovements[possibB]:
newProgression = copy.copy(progression)
newProgression.append(possibC)
progressions.append(newProgression)
return progressions
def getRandomPossibilityProgression(self):
'''
Returns a random unique possibility progression.
'''
progression = []
if len(self._segmentList) == 1:
possibA = random.sample(self._segmentList[0].correctA, 1)[0]
progression.append(possibA)
return progression
currMovements = self._segmentList[0].movements
if self.getNumSolutions() == 0:
raise FiguredBassLineException('Zero solutions')
prevPossib = random.sample(currMovements.keys(), 1)[0]
progression.append(prevPossib)
for segmentIndex in range(len(self._segmentList) - 1):
currMovements = self._segmentList[segmentIndex].movements
nextPossib = random.sample(currMovements[prevPossib], 1)[0]
progression.append(nextPossib)
prevPossib = nextPossib
return progression
def generateRealizationFromPossibilityProgression(self, possibilityProgression):
'''
Generates a realization as a :class:`~music21.stream.Score` given a possibility progression.
'''
sol = stream.Score()
bassLine = stream.Part()
bassLine.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
r = None
if self._paddingLeft != 0.0:
r = note.Rest(quarterLength=self._paddingLeft)
bassLine.append(copy.deepcopy(r))
if self.keyboardStyleOutput:
rightHand = stream.Part()
sol.insert(0.0, rightHand)
rightHand.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
if r is not None:
rightHand.append(copy.deepcopy(r))
for segmentIndex in range(len(self._segmentList)):
possibA = possibilityProgression[segmentIndex]
bassNote = self._segmentList[segmentIndex].bassNote
bassLine.append(copy.deepcopy(bassNote))
rhPitches = possibA[0:-1]
rhChord = chord.Chord(rhPitches)
rhChord.quarterLength = self._segmentList[segmentIndex].quarterLength
rightHand.append(rhChord)
rightHand.insert(0.0, clef.TrebleClef())
rightHand.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
if r is not None:
rightHand[0].pop(3)
rightHand[0].padAsAnacrusis()
else: # Chorale-style output
upperParts = []
for partNumber in range(len(possibilityProgression[0]) - 1):
fbPart = stream.Part()
sol.insert(0.0, fbPart)
fbPart.append([copy.deepcopy(self._keySig), copy.deepcopy(self._inTime)])
if r is not None:
fbPart.append(copy.deepcopy(r))
upperParts.append(fbPart)
for segmentIndex in range(len(self._segmentList)):
possibA = possibilityProgression[segmentIndex]
bassNote = self._segmentList[segmentIndex].bassNote
bassLine.append(copy.deepcopy(bassNote))
for partNumber in range(len(possibA) - 1):
n1 = note.Note(possibA[partNumber])
n1.quarterLength = self._segmentList[segmentIndex].quarterLength
upperParts[partNumber].append(n1)
for upperPart in upperParts:
c = clef.bestClef(upperPart, allowTreble8vb=True, recurse=True)
upperPart.insert(0.0, c)
upperPart.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
if r is not None:
upperPart[0].pop(3)
upperPart[0].padAsAnacrusis()
bassLine.insert(0.0, clef.BassClef())
bassLine.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
if r is not None:
bassLine[0].pop(3)
bassLine[0].padAsAnacrusis()
sol.insert(0.0, bassLine)
return sol
def generateAllRealizations(self):
'''
Generates all unique realizations as a :class:`~music21.stream.Score`.
.. warning:: This method is unoptimized, and may take a prohibitive amount
of time for a Realization which has more than 100 solutions.
'''
allSols = stream.Score()
possibilityProgressions = self.getAllPossibilityProgressions()
if not possibilityProgressions:
raise FiguredBassLineException('Zero solutions')
sol0 = self.generateRealizationFromPossibilityProgression(possibilityProgressions[0])
for music21Part in sol0:
allSols.append(music21Part)
for possibIndex in range(1, len(possibilityProgressions)):
solX = self.generateRealizationFromPossibilityProgression(
possibilityProgressions[possibIndex])
for partIndex in range(len(solX)):
for music21Measure in solX[partIndex]:
allSols[partIndex].append(music21Measure)
return allSols
def generateRandomRealization(self):
'''
Generates a random unique realization as a :class:`~music21.stream.Score`.
'''
possibilityProgression = self.getRandomPossibilityProgression()
return self.generateRealizationFromPossibilityProgression(possibilityProgression)
def generateRandomRealizations(self, amountToGenerate=20):
'''
Generates *amountToGenerate* unique realizations as a :class:`~music21.stream.Score`.
.. warning:: This method is unoptimized, and may take a prohibitive amount
of time if amountToGenerate is more than 100.
'''
if amountToGenerate > self.getNumSolutions():
return self.generateAllRealizations()
allSols = stream.Score()
sol0 = self.generateRandomRealization()
for music21Part in sol0:
allSols.append(music21Part)
for unused_counter_solution in range(1, amountToGenerate):
solX = self.generateRandomRealization()
for partIndex in range(len(solX)):
for music21Measure in solX[partIndex]:
allSols[partIndex].append(music21Measure)
return allSols
_DOC_ORDER = [figuredBassFromStream, addLyricsToBassNote,
FiguredBassLine, Realization]
class FiguredBassLineException(exceptions21.Music21Exception):
pass
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
pass
if __name__ == '__main__':
import music21
music21.mainTest(Test)
| StarcoderdataPython |
1781923 | <reponame>bopopescu/phyG
import logging
import os
import threading
from galaxy.util import asbool
from galaxy.web.framework.helpers import time_ago
from tool_shed.util import readme_util
import tool_shed.util.shed_util_common as suc
log = logging.getLogger( __name__ )
# String separator
STRSEP = '__ESEP__'
class Folder( object ):
"""Container object."""
def __init__( self, id=None, key=None, label=None, parent=None ):
self.id = id
self.key = key
self.label = label
self.parent = parent
self.description = None
self.datatypes = []
self.folders = []
self.invalid_repository_dependencies = []
self.invalid_tool_dependencies = []
self.invalid_tools = []
self.installation_errors = []
self.current_repository_installation_errors = []
self.repository_installation_errors = []
self.tool_dependency_installation_errors = []
self.valid_tools = []
self.valid_data_managers = []
self.invalid_data_managers = []
self.tool_dependencies = []
self.failed_tests = []
self.missing_test_components = []
self.not_tested = []
self.passed_tests = []
self.test_environments = []
self.repository_dependencies = []
self.readme_files = []
self.workflows = []
def contains_folder( self, folder ):
for index, contained_folder in enumerate( self.folders ):
if folder == contained_folder:
return index, contained_folder
return 0, None
def contains_repository_dependency( self, repository_dependency ):
listified_repository_dependency = repository_dependency.listify
for contained_repository_dependency in self.repository_dependencies:
if contained_repository_dependency.listify == listified_repository_dependency:
return True
return False
def remove_repository_dependency( self, repository_dependency ):
listified_repository_dependency = repository_dependency.listify
for contained_repository_dependency in self.repository_dependencies:
if contained_repository_dependency.listify == listified_repository_dependency:
self.repository_dependencies.remove( contained_repository_dependency )
def to_repository_dependency( self, repository_dependency_id ):
toolshed, name, owner, changeset_revision, prior_installation_required = suc.parse_repository_dependency_tuple( self.key.split( STRSEP ) )
return RepositoryDependency( id=repository_dependency_id,
toolshed=toolshed,
repository_name=name,
repository_owner=owner,
changeset_revision=changeset_revision,
prior_installation_required=asbool( prior_installation_required ) )
class DataManager( object ):
"""Data Manager object"""
def __init__( self, id=None, name=None, version=None, data_tables=None ):
self.id = id
self.name = name
self.version = version
self.data_tables = data_tables
class Datatype( object ):
"""Datatype object"""
def __init__( self, id=None, extension=None, type=None, mimetype=None, subclass=None, converters=None, display_app_containers=None ):
self.id = id
self.extension = extension
self.type = type
self.mimetype = mimetype
self.subclass = subclass
self.converters = converters
self.display_app_containers = display_app_containers
class FailedTest( object ):
"""Failed tool tests object"""
def __init__( self, id=None, stderr=None, test_id=None, tool_id=None, tool_version=None, traceback=None ):
self.id = id
self.stderr = stderr
self.test_id = test_id
self.tool_id = tool_id
self.tool_version = tool_version
self.traceback = traceback
class InvalidDataManager( object ):
"""Invalid data Manager object"""
def __init__( self, id=None, index=None, error=None ):
self.id = id
self.index = index
self.error = error
class InvalidRepositoryDependency( object ):
"""Invalid repository dependency definition object"""
def __init__( self, id=None, toolshed=None, repository_name=None, repository_owner=None, changeset_revision=None, prior_installation_required=False, error=None ):
self.id = id
self.toolshed = toolshed
self.repository_name = repository_name
self.repository_owner = repository_owner
self.changeset_revision = changeset_revision
self.prior_installation_required = prior_installation_required
self.error = error
class InvalidTool( object ):
"""Invalid tool object"""
def __init__( self, id=None, tool_config=None, repository_id=None, changeset_revision=None, repository_installation_status=None ):
self.id = id
self.tool_config = tool_config
self.repository_id = repository_id
self.changeset_revision = changeset_revision
self.repository_installation_status = repository_installation_status
class InvalidToolDependency( object ):
"""Invalid tool dependency definition object"""
def __init__( self, id=None, name=None, version=None, type=None, error=None ):
self.id = id
self.name = name
self.version = version
self.type = type
self.error = error
class MissingTestComponent( object ):
"""Missing tool test components object"""
def __init__( self, id=None, missing_components=None, tool_guid=None, tool_id=None, tool_version=None ):
self.id = id
self.missing_components = missing_components
self.tool_guid = tool_guid
self.tool_id = tool_id
self.tool_version = tool_version
class NotTested( object ):
"""NotTested object"""
def __init__( self, id=None, reason=None ):
self.id = id
self.reason = reason
class PassedTest( object ):
"""Passed tool tests object"""
def __init__( self, id=None, test_id=None, tool_id=None, tool_version=None ):
self.id = id
self.test_id = test_id
self.tool_id = tool_id
self.tool_version = tool_version
class ReadMe( object ):
"""Readme text object"""
def __init__( self, id=None, name=None, text=None ):
self.id = id
self.name = name
self.text = text
class RepositoryDependency( object ):
"""Repository dependency object"""
def __init__( self, id=None, toolshed=None, repository_name=None, repository_owner=None, changeset_revision=None, prior_installation_required=False,
installation_status=None, tool_shed_repository_id=None ):
self.id = id
self.toolshed = toolshed
self.repository_name = repository_name
self.repository_owner = repository_owner
self.changeset_revision = changeset_revision
self.prior_installation_required = prior_installation_required
self.installation_status = installation_status
self.tool_shed_repository_id = tool_shed_repository_id
@property
def listify( self ):
return [ self.toolshed, self.repository_name, self.repository_owner, self.changeset_revision, asbool( str( self.prior_installation_required ) ) ]
class RepositoryInstallationError( object ):
"""Repository installation error object"""
def __init__( self, id=None, tool_shed=None, name=None, owner=None, changeset_revision=None, error_message=None ):
self.id = id
self.tool_shed = tool_shed
self.name = name
self.owner = owner
self.changeset_revision = changeset_revision
self.error_message = error_message
class TestEnvironment( object ):
"""Tool test environment object"""
def __init__( self, id=None, architecture=None, galaxy_database_version=None, galaxy_revision=None, python_version=None, system=None, time_last_tested=None,
tool_shed_database_version=None, tool_shed_mercurial_version=None, tool_shed_revision=None ):
self.id = id
self.architecture = architecture
self.galaxy_database_version = galaxy_database_version
self.galaxy_revision = galaxy_revision
self.python_version = python_version
self.system = system
self.time_last_tested = time_last_tested
self.tool_shed_database_version = tool_shed_database_version
self.tool_shed_mercurial_version = tool_shed_mercurial_version
self.tool_shed_revision = tool_shed_revision
class Tool( object ):
"""Tool object"""
def __init__( self, id=None, tool_config=None, tool_id=None, name=None, description=None, version=None, requirements=None,
repository_id=None, changeset_revision=None, repository_installation_status=None ):
self.id = id
self.tool_config = tool_config
self.tool_id = tool_id
self.name = name
self.description = description
self.version = version
self.requirements = requirements
self.repository_id = repository_id
self.changeset_revision = changeset_revision
self.repository_installation_status = repository_installation_status
class ToolDependency( object ):
"""Tool dependency object"""
def __init__( self, id=None, name=None, version=None, type=None, install_dir=None, readme=None, installation_status=None, repository_id=None,
tool_dependency_id=None, is_orphan=None ):
self.id = id
self.name = name
self.version = version
self.type = type
self.install_dir = install_dir
self.readme = readme
self.installation_status = installation_status
self.repository_id = repository_id
self.tool_dependency_id = tool_dependency_id
self.is_orphan = is_orphan
@property
def listify( self ):
return [ self.name, self.version, self.type ]
class ToolDependencyInstallationError( object ):
"""Tool dependency installation error object"""
def __init__( self, id=None, type=None, name=None, version=None, error_message=None ):
self.id = id
self.type = type
self.name = name
self.version = version
self.error_message = error_message
class Workflow( object ):
"""Workflow object."""
def __init__( self, id=None, workflow_name=None, steps=None, format_version=None, annotation=None, repository_metadata_id=None, repository_id=None ):
# When rendered in the tool shed, repository_metadata_id will have a value and repository_id will be None. When rendered in Galaxy, repository_id
# will have a value and repository_metadata_id will be None.
self.id = id
self.workflow_name = workflow_name
self.steps = steps
self.format_version = format_version
self.annotation = annotation
self.repository_metadata_id = repository_metadata_id
self.repository_id = repository_id
def add_orphan_settings_to_tool_dependencies( tool_dependencies, orphan_tool_dependencies ):
"""Inspect all received tool dependencies and label those that are orphans within the repository."""
orphan_env_dependencies = orphan_tool_dependencies.get( 'set_environment', None )
new_tool_dependencies = {}
if tool_dependencies:
for td_key, requirements_dict in tool_dependencies.items():
if td_key in [ 'set_environment' ]:
# "set_environment": [{"name": "R_SCRIPT_PATH", "type": "set_environment"}]
if orphan_env_dependencies:
new_set_environment_dict_list = []
for set_environment_dict in requirements_dict:
if set_environment_dict in orphan_env_dependencies:
set_environment_dict[ 'is_orphan' ] = True
else:
set_environment_dict[ 'is_orphan' ] = False
new_set_environment_dict_list.append( set_environment_dict )
new_tool_dependencies[ td_key ] = new_set_environment_dict_list
else:
new_tool_dependencies[ td_key ] = requirements_dict
else:
# {"R/2.15.1": {"name": "R", "readme": "some string", "type": "package", "version": "2.15.1"}
if td_key in orphan_tool_dependencies:
requirements_dict[ 'is_orphan' ] = True
else:
requirements_dict[ 'is_orphan' ] = False
new_tool_dependencies[ td_key ] = requirements_dict
return new_tool_dependencies
def build_data_managers_folder( trans, folder_id, data_managers, label=None ):
"""Return a folder hierarchy containing Data Managers."""
if data_managers:
if label is None:
label = "Data Managers"
data_manager_id = 0
folder_id += 1
data_managers_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
key = "valid_data_managers"
folder = Folder( id=folder_id, key=key, label=label, parent=data_managers_root_folder )
data_managers_root_folder.folders.append( folder )
# Insert a header row.
data_manager_id += 1
data_manager = DataManager( id=data_manager_id,
name='Name',
version='Version',
data_tables='Data Tables' )
folder.valid_data_managers.append( data_manager )
for data_manager_dict in data_managers.itervalues():
data_manager_id += 1
data_manager = DataManager( id=data_manager_id,
name=data_manager_dict.get( 'name', '' ),
version=data_manager_dict.get( 'version', '' ),
data_tables=", ".join( data_manager_dict.get( 'data_tables', '' ) ) )
folder.valid_data_managers.append( data_manager )
else:
data_managers_root_folder = None
return folder_id, data_managers_root_folder
def build_datatypes_folder( trans, folder_id, datatypes, label='Datatypes' ):
"""Return a folder hierarchy containing datatypes."""
if datatypes:
datatype_id = 0
folder_id += 1
datatypes_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='datatypes', label=label, parent=datatypes_root_folder )
datatypes_root_folder.folders.append( folder )
# Insert a header row.
datatype_id += 1
datatype = Datatype( id=datatype_id,
extension='extension',
type='type',
mimetype='mimetype',
subclass='subclass' )
folder.datatypes.append( datatype )
for datatypes_dict in datatypes:
# {"converters":
# [{"target_datatype": "gff",
# "tool_config": "bed_to_gff_converter.xml",
# "guid": "localhost:9009/repos/test/bed_to_gff_converter/CONVERTER_bed_to_gff_0/2.0.0"}],
# "display_in_upload": "true",
# "dtype": "galaxy.datatypes.interval:Bed",
# "extension": "bed"}
# TODO: converters and display_app information is not currently rendered. Should it be?
# Handle defined converters, if any.
converters = datatypes_dict.get( 'converters', None )
if converters:
num_converters = len( converters )
else:
num_converters = 0
# Handle defined display applications, if any.
display_app_containers = datatypes_dict.get( 'display_app_containers', None )
if display_app_containers:
num_display_app_containers = len( display_app_containers )
else:
num_display_app_containers = 0
datatype_id += 1
datatype = Datatype( id=datatype_id,
extension=datatypes_dict.get( 'extension', '' ),
type=datatypes_dict.get( 'dtype', '' ),
mimetype=datatypes_dict.get( 'mimetype', '' ),
subclass=datatypes_dict.get( 'subclass', '' ),
converters=num_converters,
display_app_containers=num_display_app_containers )
folder.datatypes.append( datatype )
else:
datatypes_root_folder = None
return folder_id, datatypes_root_folder
def build_invalid_data_managers_folder( trans, folder_id, data_managers, error_messages=None, label=None ):
"""Return a folder hierarchy containing invalid Data Managers."""
if data_managers or error_messages:
if label is None:
label = "Invalid Data Managers"
data_manager_id = 0
folder_id += 1
data_managers_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
key = "invalid_data_managers"
folder = Folder( id=folder_id, key=key, label=label, parent=data_managers_root_folder )
data_managers_root_folder.folders.append( folder )
# Insert a header row.
data_manager_id += 1
data_manager = InvalidDataManager( id=data_manager_id,
index='Element Index',
error='Error' )
folder.invalid_data_managers.append( data_manager )
if error_messages:
for error_message in error_messages:
data_manager_id += 1
data_manager = InvalidDataManager( id=data_manager_id,
index=0,
error=error_message )
folder.invalid_data_managers.append( data_manager )
has_errors = True
for data_manager_dict in data_managers:
data_manager_id += 1
data_manager = InvalidDataManager( id=data_manager_id,
index=data_manager_dict.get( 'index', 0 ) + 1,
error=data_manager_dict.get( 'error_message', '' ) )
folder.invalid_data_managers.append( data_manager )
has_errors = True
else:
data_managers_root_folder = None
return folder_id, data_managers_root_folder
def build_invalid_repository_dependencies_root_folder( trans, folder_id, invalid_repository_dependencies_dict ):
"""Return a folder hierarchy containing invalid repository dependencies."""
label = 'Invalid repository dependencies'
if invalid_repository_dependencies_dict:
invalid_repository_dependency_id = 0
folder_id += 1
invalid_repository_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
invalid_repository_dependencies_folder = Folder( id=folder_id,
key='invalid_repository_dependencies',
label=label,
parent=invalid_repository_dependencies_root_folder )
invalid_repository_dependencies_root_folder.folders.append( invalid_repository_dependencies_folder )
invalid_repository_dependencies = invalid_repository_dependencies_dict[ 'repository_dependencies' ]
for invalid_repository_dependency in invalid_repository_dependencies:
folder_id += 1
invalid_repository_dependency_id += 1
toolshed, name, owner, changeset_revision, prior_installation_required, error = \
suc.parse_repository_dependency_tuple( invalid_repository_dependency, contains_error=True )
key = generate_repository_dependencies_key_for_repository( toolshed, name, owner, changeset_revision, prior_installation_required )
label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>" % ( name, changeset_revision, owner )
folder = Folder( id=folder_id,
key=key,
label=label,
parent=invalid_repository_dependencies_folder )
ird = InvalidRepositoryDependency( id=invalid_repository_dependency_id,
toolshed=toolshed,
repository_name=name,
repository_owner=owner,
changeset_revision=changeset_revision,
prior_installation_required=asbool( prior_installation_required ),
error=error )
folder.invalid_repository_dependencies.append( ird )
invalid_repository_dependencies_folder.folders.append( folder )
else:
invalid_repository_dependencies_root_folder = None
return folder_id, invalid_repository_dependencies_root_folder
def build_invalid_tool_dependencies_root_folder( trans, folder_id, invalid_tool_dependencies_dict ):
"""Return a folder hierarchy containing invalid tool dependencies."""
# # INvalid tool dependencies are always packages like:
# {"R/2.15.1": {"name": "R", "readme": "some string", "type": "package", "version": "2.15.1" "error" : "some sting" }
label = 'Invalid tool dependencies'
if invalid_tool_dependencies_dict:
invalid_tool_dependency_id = 0
folder_id += 1
invalid_tool_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
invalid_tool_dependencies_folder = Folder( id=folder_id,
key='invalid_tool_dependencies',
label=label,
parent=invalid_tool_dependencies_root_folder )
invalid_tool_dependencies_root_folder.folders.append( invalid_tool_dependencies_folder )
for td_key, requirements_dict in invalid_tool_dependencies_dict.items():
folder_id += 1
invalid_tool_dependency_id += 1
name = requirements_dict[ 'name' ]
type = requirements_dict[ 'type' ]
version = requirements_dict[ 'version' ]
error = requirements_dict[ 'error' ]
key = generate_tool_dependencies_key( name, version, type )
label = "Version <b>%s</b> of the <b>%s</b> <b>%s</b>" % ( version, name, type )
folder = Folder( id=folder_id,
key=key,
label=label,
parent=invalid_tool_dependencies_folder )
itd = InvalidToolDependency( id=invalid_tool_dependency_id,
name=name,
version=version,
type=type,
error=error )
folder.invalid_tool_dependencies.append( itd )
invalid_tool_dependencies_folder.folders.append( folder )
else:
invalid_tool_dependencies_root_folder = None
return folder_id, invalid_tool_dependencies_root_folder
def build_invalid_tools_folder( trans, folder_id, invalid_tool_configs, changeset_revision, repository=None, label='Invalid tools' ):
"""Return a folder hierarchy containing invalid tools."""
# TODO: Should we display invalid tools on the tool panel selection page when installing the repository into Galaxy?
if invalid_tool_configs:
invalid_tool_id = 0
folder_id += 1
invalid_tools_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='invalid_tools', label=label, parent=invalid_tools_root_folder )
invalid_tools_root_folder.folders.append( folder )
for invalid_tool_config in invalid_tool_configs:
invalid_tool_id += 1
if repository:
repository_id = repository.id
if trans.webapp.name == 'galaxy':
repository_installation_status = repository.status
else:
repository_installation_status = None
else:
repository_id = None
repository_installation_status = None
invalid_tool = InvalidTool( id=invalid_tool_id,
tool_config=invalid_tool_config,
repository_id=repository_id,
changeset_revision=changeset_revision,
repository_installation_status=repository_installation_status )
folder.invalid_tools.append( invalid_tool )
else:
invalid_tools_root_folder = None
return folder_id, invalid_tools_root_folder
def build_readme_files_folder( trans, folder_id, readme_files_dict, label='Readme files' ):
"""Return a folder hierarchy containing readme text files."""
if readme_files_dict:
multiple_readme_files = len( readme_files_dict ) > 1
readme_id = 0
folder_id += 1
readme_files_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
if multiple_readme_files:
folder_id += 1
readme_files_folder = Folder( id=folder_id, key='readme_files', label=label, parent=readme_files_root_folder )
readme_files_root_folder.folders.append( readme_files_folder )
for readme_file_name, readme_file_text in readme_files_dict.items():
readme_id += 1
readme = ReadMe( id=readme_id, name=readme_file_name, text=readme_file_text )
if multiple_readme_files:
folder_id += 1
folder = Folder( id=folder_id, key=readme.name, label=readme.name, parent=readme_files_folder )
folder.readme_files.append( readme )
readme_files_folder.folders.append( folder )
else:
folder_id += 1
readme_files_folder = Folder( id=folder_id, key='readme_files', label=readme.name, parent=readme_files_root_folder )
readme_files_folder.readme_files.append( readme )
readme_files_root_folder.folders.append( readme_files_folder )
else:
readme_files_root_folder = None
return folder_id, readme_files_root_folder
def build_repository_containers_for_galaxy( trans, repository, datatypes, invalid_tools, missing_repository_dependencies, missing_tool_dependencies,
readme_files_dict, repository_dependencies, tool_dependencies, valid_tools, workflows, valid_data_managers,
invalid_data_managers, data_managers_errors, new_install=False, reinstalling=False ):
"""Return a dictionary of containers for the received repository's dependencies and readme files for display during installation to Galaxy."""
containers_dict = dict( datatypes=None,
invalid_tools=None,
missing_tool_dependencies=None,
readme_files=None,
repository_dependencies=None,
missing_repository_dependencies=None,
tool_dependencies=None,
valid_tools=None,
workflows=None,
valid_data_managers=None,
invalid_data_managers=None )
# Some of the tool dependency folders will include links to display tool dependency information, and some of these links require the repository
# id. However we need to be careful because sometimes the repository object is None.
if repository:
repository_id = repository.id
changeset_revision = repository.changeset_revision
else:
repository_id = None
changeset_revision = None
lock = threading.Lock()
lock.acquire( True )
try:
folder_id = 0
# Datatypes container.
if datatypes:
folder_id, datatypes_root_folder = build_datatypes_folder( trans, folder_id, datatypes )
containers_dict[ 'datatypes' ] = datatypes_root_folder
# Invalid tools container.
if invalid_tools:
folder_id, invalid_tools_root_folder = build_invalid_tools_folder( trans,
folder_id,
invalid_tools,
changeset_revision,
repository=repository,
label='Invalid tools' )
containers_dict[ 'invalid_tools' ] = invalid_tools_root_folder
# Readme files container.
if readme_files_dict:
folder_id, readme_files_root_folder = build_readme_files_folder( trans, folder_id, readme_files_dict )
containers_dict[ 'readme_files' ] = readme_files_root_folder
# Installed repository dependencies container.
if repository_dependencies:
if new_install:
label = 'Repository dependencies'
else:
label = 'Installed repository dependencies'
folder_id, repository_dependencies_root_folder = build_repository_dependencies_folder( trans=trans,
folder_id=folder_id,
repository_dependencies=repository_dependencies,
label=label,
installed=True )
containers_dict[ 'repository_dependencies' ] = repository_dependencies_root_folder
# Missing repository dependencies container.
if missing_repository_dependencies:
folder_id, missing_repository_dependencies_root_folder = \
build_repository_dependencies_folder( trans=trans,
folder_id=folder_id,
repository_dependencies=missing_repository_dependencies,
label='Missing repository dependencies',
installed=False )
containers_dict[ 'missing_repository_dependencies' ] = missing_repository_dependencies_root_folder
# Installed tool dependencies container.
if tool_dependencies:
if new_install:
label = 'Tool dependencies'
else:
label = 'Installed tool dependencies'
# We only want to display the Status column if the tool_dependency is missing.
folder_id, tool_dependencies_root_folder = build_tool_dependencies_folder( trans,
folder_id,
tool_dependencies,
label=label,
missing=False,
new_install=new_install,
reinstalling=reinstalling )
containers_dict[ 'tool_dependencies' ] = tool_dependencies_root_folder
# Missing tool dependencies container.
if missing_tool_dependencies:
# We only want to display the Status column if the tool_dependency is missing.
folder_id, missing_tool_dependencies_root_folder = build_tool_dependencies_folder( trans,
folder_id,
missing_tool_dependencies,
label='Missing tool dependencies',
missing=True,
new_install=new_install,
reinstalling=reinstalling )
containers_dict[ 'missing_tool_dependencies' ] = missing_tool_dependencies_root_folder
# Valid tools container.
if valid_tools:
folder_id, valid_tools_root_folder = build_tools_folder( trans,
folder_id,
valid_tools,
repository,
changeset_revision,
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Workflows container.
if workflows:
folder_id, workflows_root_folder = build_workflows_folder( trans=trans,
folder_id=folder_id,
workflows=workflows,
repository_metadata_id=None,
repository_id=repository_id,
label='Workflows' )
containers_dict[ 'workflows' ] = workflows_root_folder
if valid_data_managers:
folder_id, valid_data_managers_root_folder = build_data_managers_folder( trans=trans,
folder_id=folder_id,
data_managers=valid_data_managers,
label='Valid Data Managers' )
containers_dict[ 'valid_data_managers' ] = valid_data_managers_root_folder
if invalid_data_managers or data_managers_errors:
folder_id, invalid_data_managers_root_folder = build_invalid_data_managers_folder( trans=trans,
folder_id=folder_id,
data_managers=invalid_data_managers,
error_messages=data_managers_errors,
label='Invalid Data Managers' )
containers_dict[ 'invalid_data_managers' ] = invalid_data_managers_root_folder
except Exception, e:
log.debug( "Exception in build_repository_containers_for_galaxy: %s" % str( e ) )
finally:
lock.release()
return containers_dict
def build_repository_containers_for_tool_shed( trans, repository, changeset_revision, repository_dependencies, repository_metadata ):
"""Return a dictionary of containers for the received repository's dependencies and contents for display in the tool shed."""
containers_dict = dict( datatypes=None,
invalid_tools=None,
readme_files=None,
repository_dependencies=None,
tool_dependencies=None,
valid_tools=None,
workflows=None,
valid_data_managers=None
)
if repository_metadata:
metadata = repository_metadata.metadata
tool_test_results = repository_metadata.tool_test_results
try:
time_last_tested = time_ago( repository_metadata.time_last_tested )
except:
time_last_tested = None
lock = threading.Lock()
lock.acquire( True )
try:
folder_id = 0
# Datatypes container.
if metadata:
if 'datatypes' in metadata:
datatypes = metadata[ 'datatypes' ]
folder_id, datatypes_root_folder = build_datatypes_folder( trans, folder_id, datatypes )
containers_dict[ 'datatypes' ] = datatypes_root_folder
# Invalid repository dependencies container.
if metadata:
if 'invalid_repository_dependencies' in metadata:
invalid_repository_dependencies = metadata[ 'invalid_repository_dependencies' ]
folder_id, invalid_repository_dependencies_root_folder = \
build_invalid_repository_dependencies_root_folder( trans,
folder_id,
invalid_repository_dependencies )
containers_dict[ 'invalid_repository_dependencies' ] = invalid_repository_dependencies_root_folder
# Invalid tool dependencies container.
if metadata:
if 'invalid_tool_dependencies' in metadata:
invalid_tool_dependencies = metadata[ 'invalid_tool_dependencies' ]
folder_id, invalid_tool_dependencies_root_folder = \
build_invalid_tool_dependencies_root_folder( trans,
folder_id,
invalid_tool_dependencies )
containers_dict[ 'invalid_tool_dependencies' ] = invalid_tool_dependencies_root_folder
# Invalid tools container.
if metadata:
if 'invalid_tools' in metadata:
invalid_tool_configs = metadata[ 'invalid_tools' ]
folder_id, invalid_tools_root_folder = build_invalid_tools_folder( trans,
folder_id,
invalid_tool_configs,
changeset_revision,
repository=repository,
label='Invalid tools' )
containers_dict[ 'invalid_tools' ] = invalid_tools_root_folder
# Readme files container.
if metadata:
if 'readme_files' in metadata:
readme_files_dict = readme_util.build_readme_files_dict( metadata )
folder_id, readme_files_root_folder = build_readme_files_folder( trans, folder_id, readme_files_dict )
containers_dict[ 'readme_files' ] = readme_files_root_folder
# Repository dependencies container.
folder_id, repository_dependencies_root_folder = build_repository_dependencies_folder( trans=trans,
folder_id=folder_id,
repository_dependencies=repository_dependencies,
label='Repository dependencies',
installed=False )
if repository_dependencies_root_folder:
containers_dict[ 'repository_dependencies' ] = repository_dependencies_root_folder
# Tool dependencies container.
if metadata:
if 'tool_dependencies' in metadata:
tool_dependencies = metadata[ 'tool_dependencies' ]
if trans.webapp.name == 'tool_shed':
if 'orphan_tool_dependencies' in metadata:
orphan_tool_dependencies = metadata[ 'orphan_tool_dependencies' ]
tool_dependencies = add_orphan_settings_to_tool_dependencies( tool_dependencies, orphan_tool_dependencies )
folder_id, tool_dependencies_root_folder = build_tool_dependencies_folder( trans,
folder_id,
tool_dependencies,
missing=False,
new_install=False )
containers_dict[ 'tool_dependencies' ] = tool_dependencies_root_folder
# Valid tools container.
if metadata:
if 'tools' in metadata:
valid_tools = metadata[ 'tools' ]
folder_id, valid_tools_root_folder = build_tools_folder( trans,
folder_id,
valid_tools,
repository,
changeset_revision,
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Tool test results container.
if tool_test_results and len( tool_test_results ) > 1:
# Only create and populate this folder if there are actual tool test results to display, since the display of the 'Test environment'
# folder by itself can be misleading. We check for more than a single entry in the tool_test_results dictionary because it may have
# only the "test_environment" entry, but we want at least 1 of "passed_tests", "failed_tests", "installation_errors", "missing_test_components"
# "skipped_tests", "not_tested" or any other entry that may be added in the future.
folder_id, tool_test_results_root_folder = build_tool_test_results_folder( trans, folder_id, tool_test_results, time_last_tested=time_last_tested )
containers_dict[ 'tool_test_results' ] = tool_test_results_root_folder
# Workflows container.
if metadata:
if 'workflows' in metadata:
workflows = metadata[ 'workflows' ]
folder_id, workflows_root_folder = build_workflows_folder( trans=trans,
folder_id=folder_id,
workflows=workflows,
repository_metadata_id=repository_metadata.id,
repository_id=None,
label='Workflows' )
containers_dict[ 'workflows' ] = workflows_root_folder
# Valid Data Managers container
if metadata:
if 'data_manager' in metadata:
data_managers = metadata['data_manager'].get( 'data_managers', None )
folder_id, data_managers_root_folder = build_data_managers_folder( trans, folder_id, data_managers, label="Data Managers" )
containers_dict[ 'valid_data_managers' ] = data_managers_root_folder
error_messages = metadata['data_manager'].get( 'error_messages', None )
data_managers = metadata['data_manager'].get( 'invalid_data_managers', None )
folder_id, data_managers_root_folder = build_invalid_data_managers_folder( trans, folder_id, data_managers, error_messages, label="Invalid Data Managers" )
containers_dict[ 'invalid_data_managers' ] = data_managers_root_folder
except Exception, e:
log.debug( "Exception in build_repository_containers_for_tool_shed: %s" % str( e ) )
finally:
lock.release()
return containers_dict
def build_repository_dependencies_folder( trans, folder_id, repository_dependencies, label='Repository dependencies', installed=False ):
"""Return a folder hierarchy containing repository dependencies."""
if repository_dependencies:
repository_dependency_id = 0
folder_id += 1
# Create the root folder.
repository_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
# Create the Repository dependencies folder and add it to the root folder.
repository_dependencies_folder_key = repository_dependencies[ 'root_key' ]
repository_dependencies_folder = Folder( id=folder_id, key=repository_dependencies_folder_key, label=label, parent=repository_dependencies_root_folder )
del repository_dependencies[ 'root_key' ]
# The received repository_dependencies is a dictionary with keys: 'root_key', 'description', and one or more repository_dependency keys.
# We want the description value associated with the repository_dependencies_folder.
repository_dependencies_folder.description = repository_dependencies.get( 'description', None )
repository_dependencies_root_folder.folders.append( repository_dependencies_folder )
del repository_dependencies[ 'description' ]
repository_dependencies_folder, folder_id, repository_dependency_id = \
populate_repository_dependencies_container( trans, repository_dependencies_folder, repository_dependencies, folder_id, repository_dependency_id )
repository_dependencies_folder = prune_repository_dependencies( repository_dependencies_folder )
else:
repository_dependencies_root_folder = None
return folder_id, repository_dependencies_root_folder
def build_tools_folder( trans, folder_id, tool_dicts, repository, changeset_revision, valid=True, label='Valid tools' ):
"""Return a folder hierarchy containing valid tools."""
if tool_dicts:
tool_id = 0
folder_id += 1
tools_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='tools', label=label, parent=tools_root_folder )
if trans.webapp.name == 'galaxy':
folder.description = 'click the name to inspect the tool metadata'
tools_root_folder.folders.append( folder )
# Insert a header row.
tool_id += 1
tool = Tool( id=tool_id,
tool_config='',
tool_id='',
name='Name',
description='Description',
version='Version',
requirements='',
repository_id='',
changeset_revision='' )
folder.valid_tools.append( tool )
if repository:
repository_id = repository.id
if trans.webapp.name == 'galaxy':
repository_installation_status = repository.status
else:
repository_installation_status = None
else:
repository_id = None
repository_installation_status = None
for tool_dict in tool_dicts:
tool_id += 1
if 'requirements' in tool_dict:
requirements = tool_dict[ 'requirements' ]
requirements_str = ''
for requirement_dict in requirements:
requirements_str += '%s (%s), ' % ( requirement_dict[ 'name' ], requirement_dict[ 'type' ] )
requirements_str = requirements_str.rstrip( ', ' )
else:
requirements_str = 'none'
tool = Tool( id=tool_id,
tool_config=tool_dict[ 'tool_config' ],
tool_id=tool_dict[ 'id' ],
name=tool_dict[ 'name' ],
description=tool_dict[ 'description' ],
version=tool_dict[ 'version' ],
requirements=requirements_str,
repository_id=repository_id,
changeset_revision=changeset_revision,
repository_installation_status=repository_installation_status )
folder.valid_tools.append( tool )
else:
tools_root_folder = None
return folder_id, tools_root_folder
def build_tool_dependencies_folder( trans, folder_id, tool_dependencies, label='Tool dependencies', missing=False, new_install=False, reinstalling=False ):
"""Return a folder hierarchy containing tool dependencies."""
# When we're in Galaxy (not the tool shed) and the tool dependencies are not installed or are in an error state, they are considered missing. The tool
# dependency status will be displayed only if a record exists for the tool dependency in the Galaxy database, but the tool dependency is not installed.
# The value for new_install will be True only if the associated repository in being installed for the first time. This value is used in setting the
# container description.
if tool_dependencies:
tool_dependency_id = 0
folder_id += 1
tool_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='tool_dependencies', label=label, parent=tool_dependencies_root_folder )
if trans.webapp.name == 'galaxy':
if new_install or reinstalling:
folder.description = "repository tools require handling of these dependencies"
elif missing and not new_install and not reinstalling:
folder.description = 'click the name to install the missing dependency'
else:
folder.description = 'click the name to browse the dependency installation directory'
tool_dependencies_root_folder.folders.append( folder )
# Insert a header row.
tool_dependency_id += 1
if trans.webapp.name == 'galaxy':
# Include the installation directory.
tool_dependency = ToolDependency( id=tool_dependency_id,
name='Name',
version='Version',
type='Type',
install_dir='Install directory',
readme=None,
installation_status='Installation status',
repository_id=None,
tool_dependency_id=None,
is_orphan=None )
else:
tool_dependency = ToolDependency( id=tool_dependency_id,
name='Name',
version='Version',
type='Type',
install_dir=None,
readme=None,
installation_status=None,
repository_id=None,
tool_dependency_id=None,
is_orphan='Orphan' )
folder.tool_dependencies.append( tool_dependency )
is_orphan_description = "these dependencies may not be required by tools in this repository"
for dependency_key, requirements_dict in tool_dependencies.items():
tool_dependency_id += 1
if dependency_key in [ 'set_environment' ]:
for set_environment_dict in requirements_dict:
if trans.webapp.name == 'tool_shed':
is_orphan = set_environment_dict.get( 'is_orphan', False )
else:
# TODO: handle this is Galaxy
is_orphan = False
if is_orphan:
folder.description = is_orphan_description
name = set_environment_dict.get( 'name', None )
type = set_environment_dict[ 'type' ]
repository_id = set_environment_dict.get( 'repository_id', None )
td_id = set_environment_dict.get( 'tool_dependency_id', None )
if trans.webapp.name == 'galaxy':
installation_status = set_environment_dict.get( 'status', 'Never installed' )
else:
installation_status = None
tool_dependency = ToolDependency( id=tool_dependency_id,
name=name,
version=None,
type=type,
install_dir=None,
readme=None,
installation_status=installation_status,
repository_id=repository_id,
tool_dependency_id=td_id,
is_orphan=is_orphan )
folder.tool_dependencies.append( tool_dependency )
else:
if trans.webapp.name == 'tool_shed':
is_orphan = requirements_dict.get( 'is_orphan', False )
else:
# TODO: handle this is Galaxy
is_orphan = False
if is_orphan:
folder.description = is_orphan_description
name = requirements_dict[ 'name' ]
version = requirements_dict[ 'version' ]
type = requirements_dict[ 'type' ]
install_dir = requirements_dict.get( 'install_dir', None )
repository_id = requirements_dict.get( 'repository_id', None )
td_id = requirements_dict.get( 'tool_dependency_id', None )
if trans.webapp.name == 'galaxy':
installation_status = requirements_dict.get( 'status', 'Never installed' )
else:
installation_status = None
tool_dependency = ToolDependency( id=tool_dependency_id,
name=name,
version=version,
type=type,
install_dir=install_dir,
readme=None,
installation_status=installation_status,
repository_id=repository_id,
tool_dependency_id=td_id,
is_orphan=is_orphan )
folder.tool_dependencies.append( tool_dependency )
else:
tool_dependencies_root_folder = None
return folder_id, tool_dependencies_root_folder
def build_tool_test_results_folder( trans, folder_id, tool_test_results_dict, label='Tool test results', time_last_tested=None ):
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
if tool_test_results_dict:
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
if test_environment_dict:
folder_id += 1
test_results_folder = Folder( id=folder_id, key='test_results', label=label, parent=tool_test_results_root_folder )
tool_test_results_root_folder.folders.append( test_results_folder )
folder_id += 1
folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=test_results_folder )
test_results_folder.folders.append( folder )
test_environment = TestEnvironment( id=1,
architecture=test_environment_dict.get( 'architecture', '' ),
galaxy_database_version=test_environment_dict.get( 'galaxy_database_version', '' ),
galaxy_revision=test_environment_dict.get( 'galaxy_revision', '' ),
python_version=test_environment_dict.get( 'python_version', '' ),
system=test_environment_dict.get( 'system', '' ),
time_last_tested=time_last_tested,
tool_shed_database_version=test_environment_dict.get( 'tool_shed_database_version', '' ),
tool_shed_mercurial_version=test_environment_dict.get( 'tool_shed_mercurial_version', '' ),
tool_shed_revision=test_environment_dict.get( 'tool_shed_revision', '' ) )
folder.test_environments.append( test_environment )
not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
if not_tested_dict:
folder_id += 1
folder = Folder( id=folder_id, key='not_tested', label='Not tested', parent=test_results_folder )
test_results_folder.folders.append( folder )
not_tested_id = 0
not_tested = NotTested( id=not_tested_id,
reason=not_tested_dict.get( 'reason', '' ) )
folder.not_tested.append( not_tested )
passed_tests_dicts = tool_test_results_dict.get( 'passed_tests', [] )
if passed_tests_dicts:
folder_id += 1
folder = Folder( id=folder_id, key='passed_tests', label='Tests that passed successfully', parent=test_results_folder )
test_results_folder.folders.append( folder )
passed_test_id = 0
for passed_tests_dict in passed_tests_dicts:
passed_test_id += 1
passed_test = PassedTest( id=passed_test_id,
test_id=passed_tests_dict.get( 'test_id' '' ),
tool_id=passed_tests_dict.get( 'tool_id', '' ),
tool_version=passed_tests_dict.get( 'tool_version', '' ) )
folder.passed_tests.append( passed_test )
failed_tests_dicts = tool_test_results_dict.get( 'failed_tests', [] )
if failed_tests_dicts:
folder_id += 1
folder = Folder( id=folder_id, key='failed_tests', label='Tests that failed', parent=test_results_folder )
test_results_folder.folders.append( folder )
failed_test_id = 0
for failed_tests_dict in failed_tests_dicts:
failed_test_id += 1
failed_test = FailedTest( id=failed_test_id,
stderr=failed_tests_dict.get( 'stderr', '' ),
test_id=failed_tests_dict.get( 'test_id', '' ),
tool_id=failed_tests_dict.get( 'tool_id', '' ),
tool_version=failed_tests_dict.get( 'tool_version', '' ),
traceback=failed_tests_dict.get( 'traceback', '' ) )
folder.failed_tests.append( failed_test )
missing_test_components_dicts = tool_test_results_dict.get( 'missing_test_components', [] )
if missing_test_components_dicts:
folder_id += 1
folder = Folder( id=folder_id, key='missing_test_components', label='Tools missing tests or test data', parent=test_results_folder )
test_results_folder.folders.append( folder )
missing_test_component_id = 0
for missing_test_components_dict in missing_test_components_dicts:
missing_test_component_id += 1
missing_test_component = MissingTestComponent( id=missing_test_component_id,
missing_components=missing_test_components_dict.get( 'missing_components', '' ),
tool_guid=missing_test_components_dict.get( 'tool_guid', '' ),
tool_id=missing_test_components_dict.get( 'tool_id', '' ),
tool_version=missing_test_components_dict.get( 'tool_version', '' ) )
folder.missing_test_components.append( missing_test_component )
installation_error_dicts = tool_test_results_dict.get( 'installation_errors', {} )
if installation_error_dicts:
current_repository_errors = installation_error_dicts.get( 'current_repository', [] )
repository_dependency_errors = installation_error_dicts.get( 'repository_dependencies', [] )
tool_dependency_errors = installation_error_dicts.get( 'tool_dependencies', [] )
if current_repository_errors or repository_dependency_errors or tool_dependency_errors:
folder_id += 1
installation_error_base_folder = Folder( id=folder_id,
key='installation_errors',
label='Installation errors',
parent=test_results_folder )
if current_repository_errors:
folder_id += 1
subfolder = Folder( id=folder_id,
key='current_repository_errors',
label='This repository',
parent=installation_error_base_folder )
repository_error_id = 0
for repository_error_dict in current_repository_errors:
repository_error_id += 1
repository_installation_error = RepositoryInstallationError( id=repository_error_id,
tool_shed=repository_error_dict.get( 'tool_shed', '' ),
name=repository_error_dict.get( 'name', '' ),
owner=repository_error_dict.get( 'owner', '' ),
changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
error_message=repository_error_dict.get( 'error_message', '' ) )
subfolder.current_repository_installation_errors.append( repository_installation_error )
installation_error_base_folder.folders.append( subfolder )
if repository_dependency_errors:
folder_id += 1
subfolder = Folder( id=folder_id,
key='repository_dependency_errors',
label='Repository dependencies',
parent=installation_error_base_folder )
repository_error_id = 0
for repository_error_dict in repository_dependency_errors:
repository_error_id += 1
repository_installation_error = RepositoryInstallationError( id=repository_error_id,
tool_shed=repository_error_dict.get( 'tool_shed', '' ),
name=repository_error_dict.get( 'name', '' ),
owner=repository_error_dict.get( 'owner', '' ),
changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
error_message=repository_error_dict.get( 'error_message', '' ) )
subfolder.repository_installation_errors.append( repository_installation_error )
installation_error_base_folder.folders.append( subfolder )
if tool_dependency_errors:
folder_id += 1
subfolder = Folder( id=folder_id,
key='tool_dependency_errors',
label='Tool dependencies',
parent=installation_error_base_folder )
tool_dependency_error_id = 0
for tool_dependency_error_dict in tool_dependency_errors:
tool_dependency_error_id += 1
tool_dependency_installation_error = ToolDependencyInstallationError( id=tool_dependency_error_id,
type=tool_dependency_error_dict.get( 'type', '' ),
name=tool_dependency_error_dict.get( 'name', '' ),
version=tool_dependency_error_dict.get( 'version', '' ),
error_message=tool_dependency_error_dict.get( 'error_message', '' ) )
subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
installation_error_base_folder.folders.append( subfolder )
test_results_folder.installation_errors.append( installation_error_base_folder )
else:
tool_test_results_root_folder = None
return folder_id, tool_test_results_root_folder
def build_workflows_folder( trans, folder_id, workflows, repository_metadata_id=None, repository_id=None, label='Workflows' ):
"""
Return a folder hierarchy containing workflow objects for each workflow dictionary in the received workflows list. When
this method is called from the tool shed, repository_metadata_id will have a value and repository_id will be None. When
this method is called from Galaxy, repository_id will have a value only if the repository is not currenlty being installed
and repository_metadata_id will be None.
"""
if workflows:
workflow_id = 0
folder_id += 1
workflows_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='workflows', label=label, parent=workflows_root_folder )
workflows_root_folder.folders.append( folder )
# Insert a header row.
workflow_id += 1
workflow = Workflow( id=workflow_id,
workflow_name='Name',
steps='steps',
format_version='format-version',
annotation='annotation',
repository_metadata_id=repository_metadata_id,
repository_id=repository_id )
folder.workflows.append( workflow )
for workflow_tup in workflows:
workflow_dict=workflow_tup[ 1 ]
steps = workflow_dict.get( 'steps', [] )
if steps:
steps = str( len( steps ) )
else:
steps = 'unknown'
workflow_id += 1
workflow = Workflow( id=workflow_id,
workflow_name=workflow_dict.get( 'name', '' ),
steps=steps,
format_version=workflow_dict.get( 'format-version', '' ),
annotation=workflow_dict.get( 'annotation', '' ),
repository_metadata_id=repository_metadata_id,
repository_id=repository_id )
folder.workflows.append( workflow )
else:
workflows_root_folder = None
return folder_id, workflows_root_folder
def cast_empty_repository_dependency_folders( folder, repository_dependency_id ):
"""
Change any empty folders contained within the repository dependencies container into a repository dependency since it has no repository dependencies
of it's own. This method is not used (and may not be needed), but here it is just in case.
"""
if not folder.folders and not folder.repository_dependencies:
repository_dependency_id += 1
repository_dependency = folder.to_repository_dependency( repository_dependency_id )
if not folder.parent.contains_repository_dependency( repository_dependency ):
folder.parent.repository_dependencies.append( repository_dependency )
folder.parent.folders.remove( folder )
for sub_folder in folder.folders:
return cast_empty_repository_dependency_folders( sub_folder, repository_dependency_id )
return folder, repository_dependency_id
def generate_repository_dependencies_folder_label_from_key( repository_name, repository_owner, changeset_revision, prior_installation_required, key ):
"""Return a repository dependency label based on the repository dependency key."""
if key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, prior_installation_required, key ):
label = 'Repository dependencies'
else:
if prior_installation_required:
prior_installation_required_str = " <i>(prior install required)</i>"
else:
prior_installation_required_str = ""
label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>%s" % \
( repository_name, changeset_revision, repository_owner, prior_installation_required_str )
return label
def generate_repository_dependencies_key_for_repository( toolshed_base_url, repository_name, repository_owner, changeset_revision, prior_installation_required ):
# FIXME: assumes tool shed is current tool shed since repository dependencies across tool sheds is not yet supported.
return '%s%s%s%s%s%s%s%s%s' % ( str( toolshed_base_url ).rstrip( '/' ),
STRSEP,
str( repository_name ),
STRSEP,
str( repository_owner ),
STRSEP,
str( changeset_revision ),
STRSEP,
str( prior_installation_required ) )
def generate_tool_dependencies_key( name, version, type ):
return '%s%s%s%s%s' % ( str( name ), STRSEP, str( version ), STRSEP, str( type ) )
def get_folder( folder, key ):
if folder.key == key:
return folder
for sub_folder in folder.folders:
return get_folder( sub_folder, key )
return None
def get_components_from_key( key ):
# FIXME: assumes tool shed is current tool shed since repository dependencies across tool sheds is not yet supported.
items = key.split( STRSEP )
toolshed_base_url = items[ 0 ]
repository_name = items[ 1 ]
repository_owner = items[ 2 ]
changeset_revision = items[ 3 ]
if len( items ) == 5:
prior_installation_required = asbool( str( items[ 4 ] ) )
return toolshed_base_url, repository_name, repository_owner, changeset_revision, prior_installation_required
else:
# For backward compatibility to the 12/20/12 Galaxy release we have to return the following, and callers must handle exceptions.
return toolshed_base_url, repository_name, repository_owner, changeset_revision
def handle_repository_dependencies_container_entry( trans, repository_dependencies_folder, rd_key, rd_value, folder_id, repository_dependency_id, folder_keys ):
try:
toolshed, repository_name, repository_owner, changeset_revision, prior_installation_required = get_components_from_key( rd_key )
except ValueError:
# For backward compatibility to the 12/20/12 Galaxy release, default prior_installation_required to False.
toolshed, repository_name, repository_owner, changeset_revision = get_components_from_key( rd_key )
prior_installation_required = False
folder = get_folder( repository_dependencies_folder, rd_key )
label = generate_repository_dependencies_folder_label_from_key( repository_name,
repository_owner,
changeset_revision,
prior_installation_required,
repository_dependencies_folder.key )
if folder:
if rd_key not in folder_keys:
folder_id += 1
sub_folder = Folder( id=folder_id, key=rd_key, label=label, parent=folder )
folder.folders.append( sub_folder )
else:
sub_folder = folder
else:
folder_id += 1
sub_folder = Folder( id=folder_id, key=rd_key, label=label, parent=repository_dependencies_folder )
repository_dependencies_folder.folders.append( sub_folder )
if trans.webapp.name == 'galaxy':
# Insert a header row.
repository_dependency_id += 1
repository_dependency = RepositoryDependency( id=repository_dependency_id,
repository_name='Name',
changeset_revision='Revision',
repository_owner='Owner',
installation_status='Installation status' )
# Insert the header row into the folder.
sub_folder.repository_dependencies.append( repository_dependency )
for repository_dependency in rd_value:
if trans.webapp.name == 'galaxy':
if len( repository_dependency ) == 6:
# Metadata should have been reset on this installed repository, but it wasn't.
tool_shed_repository_id = repository_dependency[ 4 ]
installation_status = repository_dependency[ 5 ]
tool_shed, name, owner, changeset_revision = repository_dependency[ 0:4 ]
# Default prior_installation_required to False.
prior_installation_required = False
repository_dependency = [ tool_shed, name, owner, changeset_revision, prior_installation_required ]
elif len( repository_dependency ) == 7:
# We have a repository dependency tuple that includes a prior_installation_required value.
tool_shed_repository_id = repository_dependency[ 5 ]
installation_status = repository_dependency[ 6 ]
repository_dependency = repository_dependency[ 0:5 ]
else:
tool_shed_repository_id = None
installation_status = 'unknown'
else:
tool_shed_repository_id = None
installation_status = None
can_create_dependency = not is_subfolder_of( sub_folder, repository_dependency )
if can_create_dependency:
toolshed, repository_name, repository_owner, changeset_revision, prior_installation_required = \
suc.parse_repository_dependency_tuple( repository_dependency )
repository_dependency_id += 1
repository_dependency = RepositoryDependency( id=repository_dependency_id,
toolshed=toolshed,
repository_name=repository_name,
repository_owner=repository_owner,
changeset_revision=changeset_revision,
prior_installation_required=asbool( prior_installation_required ),
installation_status=installation_status,
tool_shed_repository_id=tool_shed_repository_id )
# Insert the repository_dependency into the folder.
sub_folder.repository_dependencies.append( repository_dependency )
return repository_dependencies_folder, folder_id, repository_dependency_id
def is_subfolder_of( folder, repository_dependency ):
toolshed, repository_name, repository_owner, changeset_revision, prior_installation_required = \
suc.parse_repository_dependency_tuple( repository_dependency )
key = generate_repository_dependencies_key_for_repository( toolshed, repository_name, repository_owner, changeset_revision, asbool( prior_installation_required ) )
for sub_folder in folder.folders:
if key == sub_folder.key:
return True
return False
def key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, prior_installation_required, key ):
try:
toolshed_base_url, key_name, key_owner, key_changeset_revision, key_prior_installation_required = get_components_from_key( key )
except ValueError:
# For backward compatibility to the 12/20/12 Galaxy release, default key_prior_installation_required to False.
toolshed_base_url, key_name, key_owner, key_changeset_revision = get_components_from_key( key )
key_prior_installation_required = False
return repository_name == key_name and \
repository_owner == key_owner and \
changeset_revision == key_changeset_revision and \
prior_installation_required == key_prior_installation_required
def populate_repository_dependencies_container( trans, repository_dependencies_folder, repository_dependencies, folder_id, repository_dependency_id ):
folder_keys = repository_dependencies.keys()
for key, value in repository_dependencies.items():
repository_dependencies_folder, folder_id, repository_dependency_id = \
handle_repository_dependencies_container_entry( trans, repository_dependencies_folder, key, value, folder_id, repository_dependency_id, folder_keys )
return repository_dependencies_folder, folder_id, repository_dependency_id
def print_folders( pad, folder ):
# For debugging...
pad_str = ''
for i in range( 1, pad ):
pad_str += ' '
print '%sid: %s key: %s' % ( pad_str, str( folder.id ), folder.key )
for repository_dependency in folder.repository_dependencies:
print ' %s%s' % ( pad_str, repository_dependency.listify )
for sub_folder in folder.folders:
print_folders( pad+5, sub_folder )
def prune_repository_dependencies( folder ):
"""
Since the object used to generate a repository dependencies container is a dictionary and not an odict() (it must be json-serialize-able), the
order in which the dictionary is processed to create the container sometimes results in repository dependency entries in a folder that also
includes the repository dependency as a sub-folder (if the repository dependency has it's own repository dependency). This method will remove
all repository dependencies from folder that are also sub-folders of folder.
"""
repository_dependencies = [ rd for rd in folder.repository_dependencies ]
for repository_dependency in repository_dependencies:
listified_repository_dependency = repository_dependency.listify
if is_subfolder_of( folder, listified_repository_dependency ):
repository_dependencies.remove( repository_dependency )
folder.repository_dependencies = repository_dependencies
for sub_folder in folder.folders:
return prune_repository_dependencies( sub_folder )
return folder
| StarcoderdataPython |
1795222 | <reponame>xUndero/noc
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# BeefCLI
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import socket
# Third-party modules
import tornado.gen
import tornado.iostream
from tornado.concurrent import TracebackFuture
# NOC modules
from .base import CLI
from .telnet import TelnetIOStream
class BeefCLI(CLI):
name = "beef_cli"
default_port = 23
def create_iostream(self):
self.state = "notconnected"
sender, receiver = socket.socketpair()
self.sender = tornado.iostream.IOStream(sender)
return BeefIOStream(receiver, self)
@tornado.gen.coroutine
def send(self, cmd):
# @todo: Apply encoding
cmd = str(cmd)
self.logger.debug("Send: %r", cmd)
if self.state != "prompt":
raise tornado.gen.Return() # Will be replied via reply_state
beef = self.script.request_beef()
gen = beef.iter_cli_reply(cmd[: -len(self.profile.command_submit)])
self.ioloop.add_callback(self.streamer, gen)
@tornado.gen.coroutine
def streamer(self, gen):
"""
Stream gen to sender
:param gen:
:return:
"""
try:
for reply in gen:
yield self.sender.write(reply)
yield
except KeyError:
# Propagate exception
self.sender.write(self.SYNTAX_ERROR_CODE)
yield
def set_state(self, state):
changed = self.state != state
super(BeefCLI, self).set_state(state)
# Force state enter reply
if changed:
self.ioloop.add_callback(self.reply_state, state)
@tornado.gen.coroutine
def reply_state(self, state):
"""
Spool state entry sequence
:param state:
:return:
"""
self.logger.debug("Replying '%s' state", state)
beef = self.script.request_beef()
for reply in beef.iter_fsm_state_reply(state):
self.sender.write(reply)
yield
def close(self):
self.sender.close()
self.sender = None
super(BeefCLI, self).close()
def send_pager_reply(self, data, match):
"""
Beef need no pagers
"""
self.collected_data += [data]
class BeefIOStream(TelnetIOStream):
def connect(self, *args, **kwargs):
"""
Always connected
:param args:
:param kwargs:
:return:
"""
future = self._connect_future = TracebackFuture()
# Force beef downloading
beef = self.cli.script.request_beef()
if not beef:
# Connection refused
self.close(exc_info=True)
return future
future.set_result(True)
# Start replying start state
self.cli.set_state("start")
self._add_io_state(self.io_loop.WRITE)
return future
def close(self):
self.socket.close()
self.socket = None
| StarcoderdataPython |
1681793 | <filename>scanpipe/tasks.py
# SPDX-License-Identifier: Apache-2.0
#
# http://nexb.com and https://github.com/nexB/scancode.io
# The ScanCode.io software is licensed under the Apache License version 2.0.
# Data generated with ScanCode.io is provided as-is without warranties.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Data Generated with ScanCode.io is provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode.io should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
#
# ScanCode.io is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode.io for support and download.
import logging
from django.apps import apps
logger = logging.getLogger(__name__)
def info(message, pk):
logger.info(f"Run[{pk}] {message}")
def get_run_instance(run_pk):
"""
Returns the run instance using the `run_pk`.
"""
run_model = apps.get_model("scanpipe", "Run")
return run_model.objects.get(pk=run_pk)
def report_failure(job, connection, type, value, traceback):
"""
This callback will be called when an exception is raised during the Job
execution but was not caught by the task itself.
"""
run = get_run_instance(run_pk=job.id)
run.set_task_ended(exitcode=1, output=f"value={value} trace={traceback}")
def execute_pipeline_task(run_pk):
info(f"Enter `execute_pipeline_task` Run.pk/Task.id={run_pk}", run_pk)
run = get_run_instance(run_pk)
project = run.project
run.reset_task_values()
run.set_scancodeio_version()
run.set_task_started(run_pk)
info(f'Run pipeline: "{run.pipeline_name}" on project: "{project.name}"', run_pk)
pipeline = run.make_pipeline_instance()
exitcode, output = pipeline.execute()
info("Update Run instance with exitcode, output, and end_date", run_pk)
run.set_task_ended(exitcode, output, refresh_first=True)
run.send_project_subscriptions()
if run.task_succeeded:
# We keep the temporary files available for debugging in case of error
project.clear_tmp_directory()
| StarcoderdataPython |
1794338 | <gh_stars>1-10
# Problem : https://www.hackerrank.com/challenges/py-set-add/problem
# Score : 10 points(MAX)
loops = input() # Quantidade de valores que entrarão
grupo = [] # Grupo para alocar esses valores
[grupo.append(input()) for i in range(int(loops))] # para cada loop adicione a palavra no grupo dist
print(len(set(grupo))) # Transforme a lista em um set para remover os valores repetidos e depois retorne o tamanho do set
| StarcoderdataPython |
3259841 | <filename>scripts/lasso_1-regularisation_path.py
import os.path
import numpy as np
from numpy import linalg as la
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import invprob.sparse as sparse
from invprob.optim import fb_lasso
#########################################
# This is for production only
import importlib
importlib.reload(sparse)
importlib.reload(fb)
#########################################
np.random.seed(seed=78) # Seed for np.random
dpi = 100 # Resolution for plotting (230 for small screen, 100 for large one)
plt.ion()
folder = "scripts/../output/L1_reg/"
# We start by defining the characteristics of the problem
data_size = 100
data_number = round(data_size / 2)
sparsity_level = 10
noise_level = 1e-2 * 0
# We define the main components of our problem
Phi = np.random.randn(data_number, data_size)
x0 = np.sign(sparse.randn(data_size, 1, sparsity_level))
noise = noise_level * np.random.randn(data_number, 1)
y = Phi@x0 + noise
# Let's compare the ground truth with the pseudo inverse solution
x_pinv = la.lstsq(Phi, y, rcond=None)[0]
_ = plt.figure(dpi=dpi)
sparse.stem(x0, "C0", "ground truth")
sparse.stem(x_pinv, "C1", "pinv solution")
plt.show()
# Let's compare the ground truth with the solution of the LASSO
# (computed with the Forward-Backward algorithm)
reg_param = 0.01
iter_nb = 40000
x_reg = fb_lasso(Phi, y, reg_param, iter_nb)
_ = plt.figure(dpi=dpi)
sparse.stem(x0, "C0", "ground truth")
sparse.stem(x_reg, "C1", "reg solution")
plt.show()
# We look at what happens during the iterations of the algorithm
x_reg, details = fb_lasso(Phi, y, reg_param, iter_nb, verbose=True)
plt.figure(dpi=dpi)
plt.title(r"Evolution of $f(x_n)$")
plt.plot(details.get("function_value"))
plt.figure(dpi=dpi)
plt.title(r"Evolution of supp$(x_n)$")
plt.plot(details.get("iterate_support"))
plt.show()
# Now we generate the regularization path
# Quite expensive in time depending on the parameters!
def compute_reg_path(Phi, y, reg_param_grid):
print("Computing the regularization path")
reg_path = np.empty((Phi.shape[1], 0), int)
x_ini = np.zeros((Phi.shape[1], 1))
for reg_param in reg_param_grid:
''' We choose the number of iterations to do depending on the reg_param.
This is a completely custom choice, it seems to work quite well
on random problems.
'''
if reg_param < 0.1:
iter_nb = 40000
elif reg_param < 1:
iter_nb = 4000
elif reg_param < 10:
iter_nb = 1000
else:
iter_nb = 200
# We use a warm restart approach:
# for each problem we use the solution of the previous problem
# as a starting point
x_reg = fb_lasso(Phi, y, reg_param, iter_nb, x_ini=x_ini)
x_ini = x_reg
reg_path = np.concatenate((reg_path, x_reg), axis=1)
return reg_path
regp_min = -2
regp_max = 2
regp_number = 200
reg_param_grid = np.round(np.logspace(regp_min, regp_max, regp_number), 3)[::-1]
if os.path.isfile(folder + 'reg_path_noiseless.npy'):
reg_path = np.load(folder + 'reg_path_noiseless.npy')
if reg_path.shape[1] != regp_number: # Previous but different experiment
reg_path = compute_reg_path(Phi, y, reg_param_grid)
np.save(folder + 'reg_path_noiseless.npy', reg_path)
else:
reg_path = compute_reg_path(Phi, y, reg_param_grid)
np.save(folder + 'reg_path_noiseless.npy', reg_path)
# We save the reg path as many image files and as an animated gif
# This is the name under which we save the data
file_name = folder + 'reg_path_noiseless'
# We concatenate conveniently x0 and reg_path in such a way that for every frame
# we plot two signals: x0 and reg_path[param]
paths = np.stack((np.repeat(x0, regp_number, axis=1), reg_path),
axis=2)
# We chose a title for every frame we'll plot
title_grid = [r"Ground truth $x_0$ vs regularised solution $x_\lambda$ " +
"for $\lambda$=" + str(param) for param in reg_param_grid]
plt.ioff()
plt.figure(dpi=dpi)
options = {"animation": False, # What we wanna save and how
"frames": False,
"interval": 100,
"file_name": file_name}
sparse.save_stem_gif(paths, reg_param_grid, title_grid, options)
| StarcoderdataPython |
1678147 | <reponame>DataDog/datadog-sync-cli<gh_stars>1-10
# Unless explicitly stated otherwise all files in this repository are licensed
# under the 3-clause BSD style license (see LICENSE).
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019 Datadog, Inc.
from typing import Optional, List, Dict
from datadog_sync.utils.base_resource import BaseResource, ResourceConfig
from datadog_sync.utils.custom_client import CustomClient
class Dashboards(BaseResource):
resource_type = "dashboards"
resource_config = ResourceConfig(
resource_connections={
"monitors": ["widgets.definition.alert_id", "widgets.definition.widgets.definition.alert_id"],
"service_level_objectives": ["widgets.definition.slo_id", "widgets.definition.widgets.definition.slo_id"],
"roles": ["restricted_roles"],
},
base_path="/api/v1/dashboard",
excluded_attributes=["id", "author_handle", "author_name", "url", "created_at", "modified_at"],
)
# Additional Dashboards specific attributes
def get_resources(self, client: CustomClient) -> List[Dict]:
resp = client.get(self.resource_config.base_path).json()
return resp["dashboards"]
def import_resource(self, resource: Dict) -> None:
source_client = self.config.source_client
dashboard = source_client.get(self.resource_config.base_path + f"/{resource['id']}").json()
self.resource_config.source_resources[resource["id"]] = dashboard
def pre_resource_action_hook(self, _id, resource: Dict) -> None:
pass
def pre_apply_hook(self, resources: Dict[str, Dict]) -> Optional[list]:
pass
def create_resource(self, _id: str, resource: Dict) -> None:
destination_client = self.config.destination_client
resp = destination_client.post(self.resource_config.base_path, resource).json()
self.resource_config.destination_resources[_id] = resp
def update_resource(self, _id: str, resource: Dict) -> None:
destination_client = self.config.destination_client
resp = destination_client.put(
self.resource_config.base_path + f"/{self.resource_config.destination_resources[_id]['id']}", resource
).json()
self.resource_config.destination_resources[_id] = resp
def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> None:
super(Dashboards, self).connect_id(key, r_obj, resource_to_connect)
| StarcoderdataPython |
1725112 | """
Bio ontology to be used in the enrichment of SAA indices by Golden Agents.
"""
from rdflib import Dataset, Graph, Namespace
from rdflib import XSD, RDF, RDFS, OWL
from rdflib import URIRef, BNode, Literal
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy.rdfsSubject import rdfsSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfContainer
bio = Namespace("http://purl.org/vocab/bio/0.1/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
saa = Namespace("http://goldenagents.org/uva/SAA/ontology/")
#######
# BIO #
#######
class Event(rdfsSubject):
rdf_type = bio.Event, sem.Event
label = rdfMultiple(RDFS.label)
date = rdfSingle(bio.date)
followingEvent = rdfSingle(bio.followingEvent)
precedingEvent = rdfSingle(bio.precedingEvent)
hasTimeStamp = rdfSingle(sem.hasTimeStamp)
hasBeginTimeStamp = rdfSingle(sem.hasBeginTimeStamp)
hasEndTimeStamp = rdfSingle(sem.hasEndTimeStamp)
hasEarliestBeginTimeStamp = rdfSingle(sem.hasEarliestBeginTimeStamp)
hasLatestBeginTimeStamp = rdfSingle(sem.hasLatestBeginTimeStamp)
hasEarliestEndTimeStamp = rdfSingle(sem.hasEarliestEndTimeStamp)
hasLatestEndTimeStamp = rdfSingle(sem.hasLatestEndTimeStamp)
place = rdfSingle(bio.place) # multi-predicates?
witness = rdfMultiple(bio.witness)
spectator = rdfMultiple(bio.spectator)
parent = rdfMultiple(bio.parent)
hasActor = rdfMultiple(sem.hasActor, range_type=sem.Role)
comment = rdfSingle(RDFS.comment)
class IndividualEvent(Event):
rdf_type = bio.IndividualEvent, sem.Event
principal = rdfSingle(bio.principal)
label = rdfMultiple(RDFS.label)
class GroupEvent(Event):
rdf_type = bio.GroupEvent, sem.Event
partner = rdfMultiple(bio.partner)
label = rdfMultiple(RDFS.label)
class Birth(IndividualEvent):
rdf_type = bio.Birth, sem.Event
class Baptism(IndividualEvent):
rdf_type = bio.Baptism, sem.Event
class Burial(IndividualEvent):
rdf_type = bio.Burial, sem.Event
class Death(IndividualEvent):
rdf_type = bio.Death, sem.Event
class Marriage(GroupEvent):
rdf_type = bio.Marriage, sem.Event
class IntendedMarriage(GroupEvent):
rdf_type = saa.IntendedMarriage
hasDocument = rdfMultiple(saa.hasDocument)
class PrenuptialAgreement(GroupEvent):
rdf_type = saa.PrenuptialAgreement
#######
# SEM #
#######
class Role(rdfsSubject):
rdf_type = sem.Role
value = rdfSingle(RDF.value)
label = rdfMultiple(RDFS.label)
roleType = rdfSingle(sem.roleType)
class RoleType(rdfsSubject):
rdf_type = sem.RoleType
label = rdfMultiple(RDFS.label)
| StarcoderdataPython |
3394393 | # coding=utf-8
from .eth import ADDRESS as COINBASE_ADDRESS
from .eth import MAX_TX_TRY
from .eth import MIN_GAS
from .eth import PRIVATE_KEY as COINBASE_PRIVATE_KEY
from .mixer import ADDRESS as MIXER_ADDRESS
from .mixer import PRIVATE_KEY as MIXER_PRIVATE_KEY
from .referral import REFERRAL_DUMMY
from .referral import REFERRAL_URL
from .services import VPN_SERVICE
from .swaps import ADDRESS as SWAP_ADDRESS
from .swaps import BTC_BASED_COINS
from .swaps import ETHEREUM_BASED_COINS
from .swaps import FEE_PERCENTAGE
from .swaps import PRIVATE_KEY as SWAP_PRIVATE_KEY
from .swaps import TOKENS as SWAP_TOKENS
from .tokens import MAIN_TOKENS
from .tokens import RINKEBY_TOKENS
from .vars import DECIMALS
from .vars import LIMIT_100MB
from .vars import LIMIT_10MB
from .vars import SESSIONS_SALT
| StarcoderdataPython |
1718233 | <filename>a4/bert.py
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from bert_data_loader import create_data_loader
from train_eval import train_epoch, eval_model
if __name__ == "__main__":
df_train = pd.read_csv('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a4/data/train.csv')
df_val = pd.read_csv('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a4/data/val.csv')
df_test = pd.read_csv('/Users/wendyyyy/Cornell/CDS/IntSys-Education-master/a4/data/test.csv')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
train_data_loader = create_data_loader(df_train[:10000], tokenizer, 160, 16)
val_data_loader = create_data_loader(df_val, tokenizer, 160, 16)
test_data_loader = create_data_loader(df_test, tokenizer, 160, 16)
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-cased')
self.drop = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=False
)
output = self.drop(pooled_output)
return self.out(output)
EPOCHS = 10
model = SentimentClassifier(5)
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.CrossEntropyLoss()
for epoch in range(EPOCHS):
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
scheduler,
len(df_train)
)
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
len(df_val)
) | StarcoderdataPython |
4803024 | <filename>movie/forms.py
from dal import autocomplete
from django import forms
from movie.models import Movie
class MovieForm(forms.ModelForm):
title = autocomplete.Select2ListCreateChoiceField(widget=autocomplete.ListSelect2(url='movie:movie_title_autocomplete'))
class Meta:
model = Movie
fields = ('__all__')
| StarcoderdataPython |
3270026 | <filename>nails_project/core/mixins.py<gh_stars>0
class BootstrapFormMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_bootstrap_fields()
def _init_bootstrap_fields(self):
for (_, field) in self.fields.items():
if 'class' not in field.widget.attrs:
field.widget.attrs['class'] = ''
if _ == "type" or _ == "feedback":
field.widget.attrs['class'] = 'form-select'
else:
field.widget.attrs['class'] = 'form-control'
| StarcoderdataPython |
4833115 | import functools
import os
import sys
from datetime import timedelta
import dotenv
from loguru import logger
import copy
from novelsave.settings import config, console_formatter
def app() -> dict:
"""Initialize and return the configuration used by the base application"""
return copy.deepcopy(config)
def logger_config() -> dict:
return {
"handlers": [
{
"sink": sys.stdout,
"level": "TRACE",
"format": console_formatter,
"backtrace": True,
"diagnose": True,
},
{
"sink": config["config"]["dir"] / "logs" / "{time}.log",
"level": "TRACE",
"retention": "3 days",
"encoding": "utf-8",
},
]
}
def intenv(key: str, default: int) -> int:
try:
return int(os.getenv(key))
except (TypeError, ValueError):
return default
@functools.lru_cache()
def discord() -> dict:
"""Initialize and return discord configurations as a dict
The returned dict must contain 'DISCORD_TOKEN'
"""
dotenv.load_dotenv()
discord_token = os.getenv("DISCORD_TOKEN")
if not discord_token:
logger.error("Required environment variable 'DISCORD_TOKEN' is not set.")
return {
"key": discord_token,
"session": {
"retain": timedelta(minutes=intenv("DISCORD_SESSION_TIMEOUT", 10)),
"threads": intenv("DISCORD_SESSION_THREADS", 5),
},
"search": {
"limit": intenv("DISCORD_SEARCH_LIMIT", 20),
"disabled": os.getenv("DISCORD_SEARCH_DISABLED", "no").lower(),
},
"cloud": {
"filehost": os.getenv("DISCORD_EXTERNAL_FILEHOST", "none").lower(),
},
}
| StarcoderdataPython |
52386 | <filename>ntc_rosetta/yang/__init__.py
import pathlib
from yangson.datamodel import DataModel
_DATAMODELS = {"openconfig": None, "ntc": None}
BASEPATH = pathlib.Path(__file__).parent
OPENCONFIG_LIB = f"{BASEPATH}/openconfig.json"
OPENCONFIG_PATH = [
BASEPATH.joinpath("YangModels/standard/ietf/RFC"),
BASEPATH.joinpath("openconfig/release/models"),
] + [
fname
for fname in BASEPATH.joinpath("openconfig/release/models").iterdir()
if fname.is_dir()
]
def _get_openconfig_data_model() -> DataModel:
return DataModel.from_file(OPENCONFIG_LIB, OPENCONFIG_PATH)
def _get_ntc_data_model() -> DataModel:
base = pathlib.Path(__file__).parent
lib = f"{base}/ntc-yang-models/models/ntc-models-library.json"
path = [
base.joinpath("ntc-yang-models/models/arp"),
base.joinpath("ntc-yang-models/models/ietf"),
base.joinpath("ntc-yang-models/models/system"),
base.joinpath("ntc-yang-models/models/types"),
base.joinpath("ntc-yang-models/models/vlan"),
base.joinpath("ntc-yang-models/models/vrf"),
]
return DataModel.from_file(lib, path)
def get_data_model(model: str = "openconfig") -> DataModel:
"""
Returns an instantiated data model.
"""
if model == "openconfig":
if _DATAMODELS["openconfig"] is None:
_DATAMODELS["openconfig"] = _get_openconfig_data_model()
return _DATAMODELS["openconfig"]
elif model == "ntc":
if _DATAMODELS["ntc"] is None:
_DATAMODELS["ntc"] = _get_ntc_data_model()
return _DATAMODELS["ntc"]
else:
raise ValueError(f"model {model} not recognized")
__all__ = ("get_data_model",)
| StarcoderdataPython |
42359 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import BoardMember
# Post 추가
from django.core import serializers
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.permissions import IsAuthenticated # 로그인여부 확인
from rest_framework_jwt.authentication import JSONWebTokenAuthentication # JWT인증 확인
from .models import Post
def register(request):
if request.method == 'GET':
return render(request, 'register.html')
elif request.method == 'POST':
username = request.POST.get('username', None)
password = request.POST['password']
email = request.POST.get('email', None)
res_data = {}
if not (username and email):
res_data['error'] = '모든 값을 입력해주세요.'
else:
member = BoardMember(
username = username,
password = password,
email = email,
)
member.save() # 데이터베이스에 저장
print("#####회원가입#####\nid: ", member.username, "\npw: ", member.password, "\nemail: ", member.email)
return redirect('/') # 다른 페이지로 이동
@api_view(['GET']) # 요청이 GET인지 확인하여 JSON 타입으로 반환
@permission_classes((IsAuthenticated, )) # 권한을 체크(로그인 했는지 여부만 체크)
@authentication_classes((JSONWebTokenAuthentication,)) # JWT토큰 확인, 토큰에 이상이 있으면 JSON으로 반환
def posts(request):
posts = Post.objects.filter(published_at__isnull=False).order_by('-published_at')
post_list = serializers.serialize('json', posts)
return HttpResponse(post_list, content_type="text/json-comment-filtered")
# 다음일정 (대기)
# https://velog.io/@teddybearjung/Django-%EB%A1%9C-%EA%B2%8C%EC%8B%9C%ED%8C%90-%EB%A7%8C%EB%93%A4%EA%B8%B010.-Login-%ED%99%94%EB%A9%B4-templates-%EB%A7%8C%EB%93%A4%EA%B8%B0-login-%ED%95%A8%EC%88%98-%EC%9E%91%EC%84%B1 | StarcoderdataPython |
4807174 | <reponame>YanrongXu/Leetcode<filename>Binary Search/162. Find Peak Element.py
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
if not nums:
print
return -1
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] < nums[mid + 1]:
start = mid
else:
end = mid
if nums[start] > nums[end]:
return start
return end
| StarcoderdataPython |
3235463 | from scipy.io.wavfile import read
import os
import torch
import numpy as np
MAX_WAV_VALUE = 32768.0
def load_wav_to_torch(full_path):
"""
Loads wavdata into torch array
"""
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)) / MAX_WAV_VALUE, sampling_rate
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
class LossMeter():
def __init__(self, name, writer, log_per_step, init_step=0, auto_log=True):
self.name = name
self.writer = writer
self.log_per_step = log_per_step
self.step = init_step
self.auto_log = auto_log
self.loss = []
def add(self, loss):
self.step += 1
assert isinstance(loss, float), 'Loss must be float type'
self.loss.append(loss)
if self.auto_log and self.step % self.log_per_step == 0:
self.writer.add_scalar(self.name, self.mean(), self.step)
self.reset()
def reset(self):
self.loss = []
def mean(self):
return self.sum() / len(self.loss)
def sum(self):
return sum(self.loss)
| StarcoderdataPython |
104888 | <filename>test/transform/test_rotate.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
from skimage import io
from os.path import dirname as opd
from os.path import abspath as opa
from os.path import join as opj
TEST_PATH = opa(opd(opd(__file__)))
PRJ_PATH = opd(TEST_PATH)
sys.path.insert(0, PRJ_PATH)
sys.path.insert(0, opj(PRJ_PATH, "pycontour"))
from pycontour.img import cnt_mask_img
from pycontour.transform import rotate_cnt
def test_mask_img():
img_path = os.path.join(TEST_PATH, "data/Imgs/20181218042607.jpg")
img = io.imread(img_path)
np_arr = np.array([[300, 600, 700, 500, 400], [300, 400, 500, 600, 500]])
rot_cnt = rotate_cnt(np_arr, 20)
masked_img1 = cnt_mask_img(img, np_arr)
masked_img2 = cnt_mask_img(img, rot_cnt)
import matplotlib.pylab as plt
plt.figure(1)
plt.subplot(211)
plt.imshow(masked_img1)
plt.subplot(212)
plt.imshow(masked_img2)
# plt.show()
| StarcoderdataPython |
162306 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2019 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2019 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
from . import amqp
from . import api
from . import asynchronous
from . import base
from . import bus
from . import cache
from . import component
from . import compress
from . import config
from . import controller
from . import crypt
from . import data
from . import defines
from . import exceptions
from . import execution
from . import export
from . import extra
from . import geo
from . import git
from . import http
from . import legacy
from . import log
from . import meta
from . import mock
from . import model
from . import mongo
from . import observer
from . import part
from . import preferences
from . import queuing
from . import redisdb
from . import request
from . import scheduler
from . import serialize
from . import session
from . import settings
from . import smtp
from . import storage
from . import structures
from . import typesf
from . import util
from . import validation
from .amqp import AMQP
from .api import API, OAuthAPI, OAuth1API, OAuth2API
from .asynchronous import ASYNC_HEADER, AsyncManager, SimpleManager, QueueManager, AwaitWrapper, CoroutineWrapper,\
AyncgenWrapper, await_wrap, await_yield, ensure_generator, is_coroutine, is_coroutine_object, is_coroutine_native,\
to_coroutine, wrap_silent, unavailable, is_neo, Future, coroutine, wakeup, sleep, wait, notify, build_future,\
ensure_async, header_a, ensure_a
from .base import APP, LEVEL, NAME, VERSION, PLATFORM, IDENTIFIER_SHORT, IDENTIFIER_LONG, IDENTIFIER,\
API_VERSION, BUFFER_SIZE, MAX_LOG_SIZE, MAX_LOG_COUNT, App, APIApp, WebApp, Template, get_app, get_name,\
get_base_path, get_cache, get_preferences, get_bus, get_request, get_session, get_model, get_controller, get_part,\
get_adapter, get_manager, get_logger, get_level, is_loaded, is_devel, is_safe, to_locale, on_exit
from .bus import Bus, MemoryBus, RedisBus
from .cache import Cache, MemoryCache, FileCache, RedisCache, SerializedCache
from .component import Component
from .compress import Compress
from .config import conf, conf_prefix, conf_suffix, conf_s, conf_r, conf_d, conf_ctx
from .controller import Controller
from .crypt import Cipher, RC4, Spritz
from .data import DataAdapter, MongoAdapter, TinyAdapter, Collection, MongoCollection, TinyCollection
from .defines import ITERABLES, MOBILE_REGEX, TABLET_REGEX, MOBILE_PREFIX_REGEX, BODY_REGEX, TAG_REGEX,\
EMAIL_REGEX, BROWSER_INFO, OS_INFO, WINDOWS_LOCALE, SLUG_PERMUTATIONS
from .exceptions import AppierException, OperationalError, SecurityError, AssertionError,\
ValidationError, NotFoundError, NotImplementedError, BaseInternalError, ValidationInternalError,\
ValidationMultipleError, HTTPError, APIError, APIAccessError, OAuthAccessError
from .execution import ExecutionThread, background, insert_work, interval_work, seconds_work,\
minutes_work, hourly_work, daily_work, weekly_work, monthly_work, seconds_eval, minutes_eval,\
hourly_eval, daily_eval, weekly_eval, monthly_eval
from .export import ExportManager
from .extra import get_a, post_a, put_a, delete_a, patch_a, get_w, post_w, put_w, delete_w, patch_w
from .geo import GeoResolver
from .git import Git
from .http import file_g, get_f, get, post, put, delete, HTTPResponse
from .log import MemoryHandler, BaseFormatter, ThreadFormatter, DummyLogger, reload_format, rotating_handler,\
smtp_handler, in_signature
from .meta import Ordered, Indexed
from .mock import MockObject, MockResponse, MockApp
from .model import Model, LocalModel, Field, link, operation, view, field, type_d, is_unset
from .mongo import Mongo, MongoEncoder, get_connection, reset_connection, get_db, drop_db, object_id, dumps
from .observer import Observable
from .part import Part
from .preferences import Preferences, MemoryPreferences, FilePreferences, RedisPreferences
from .queuing import Queue, MemoryQueue, MultiprocessQueue, AMQPQueue
from .redisdb import Redis
from .request import CODE_STRINGS, Request, MockRequest
from .scheduler import Scheduler
from .serialize import serialize_csv, serialize_ics, build_encoder
from .session import Session, MockSession, MemorySession, FileSession, RedisSession, ClientSession
from .settings import DEBUG, USERNAME, PASSWORD
from .smtp import message, message_base, message_netius, smtp_engine, multipart, plain,\
html, header
from .storage import StorageEngine, BaseEngine, FsEngine
from .structures import OrderedDict, LazyDict, LazyValue, GeneratorFile, lazy_dict, lazy
from .typesf import AbstractType, Type, File, Files, ImageFile, ImageFiles, image, images, Reference,\
reference, References, references, Encrypted, encrypted, secure
from .util import is_iterable, is_mobile, is_tablet, is_browser, is_bot, browser_info, email_parts, email_mime,\
email_name, email_base, date_to_timestamp, obfuscate, import_pip, ensure_pip, install_pip, install_pip_s,\
request_json, get_context, get_object, resolve_alias, page_types, find_types, norm_object, set_object, leafs,\
gather_errors, gen_token, html_to_text, camel_to_underscore, camel_to_readable, underscore_to_readable, quote,\
unquote, escape, unescape, split_unescape, call_safe, base_name, base_name_m, is_content_type, parse_content_type,\
parse_cookie, parse_multipart, decode_params, load_form, check_login, check_user, check_token, check_tokens,\
ensure_login, get_tokens_m, to_tokens_m, dict_merge, deprecated, cached, private, ensure, delayed, route, error_handler,\
exception_handler, before_request, after_request, is_detached, sanitize, verify, verify_equal, verify_not_equal,\
verify_many, execute, ctx_locale, ctx_request, FileTuple, BaseThread, JSONEncoder
from .validation import validate, validate_b, validate_e, safe, eq, gt, gte, lt, lte, not_null, not_empty, not_false,\
is_in, is_upper, is_lower, is_simple, is_email, is_url, is_regex, field_eq, field_gt, field_gte, field_lt,\
field_lte, string_gt, string_lt, string_eq, equals, not_past, not_duplicate, all_different, no_self
from .amqp import get_connection as get_amqp
from .amqp import properties as properties_amqp
from .mongo import get_connection as get_mongo
from .mongo import get_db as get_mongo_db
from .mongo import drop_db as drop_mongo_db
from .mongo import object_id as object_id_mongo
from .mongo import dumps as dumps_mongo
from .mongo import serialize as serialize_mongo
from .mongo import directions as directions_mongo
from .redisdb import get_connection as get_redis
from .redisdb import dumps as dumps_redis
HTTPError = exceptions.HTTPError
| StarcoderdataPython |
34656 | <reponame>MatthiasValvekens/certvalidator
# coding: utf-8
import inspect
def type_name(value):
"""
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in {'builtins', '__builtin__'}:
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__)
| StarcoderdataPython |
1758490 | from setuptools import find_packages, setup
setup(
name='IdobataPlugin', version='0.5',
packages=find_packages(exclude=['*.tests*']),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kompiro/trac-idobata-plugin',
description='Trac - Idobata integration',
platforms='all',
license='Apache License v2',
install_requires = ['Trac >= 1.0'],
entry_points = {
'trac.plugins': [
'idobata = idobata.notification',
],
},
)
| StarcoderdataPython |
54095 | <reponame>FabricExile/Kraken
"""Kraken - maths.euler module.
Classes:
Euler -- Euler rotation.
"""
import math
from kraken.core.kraken_system import ks
from kraken.core.maths.math_object import MathObject
from kraken.core.maths.mat33 import Mat33
from kraken.core.maths.rotation_order import RotationOrder
rotationOrderStrToIntMapping = {
'xyz': 0,
'XYZ': 0,
'yzx': 1,
'YZX': 1,
'zxy': 2,
'ZXY': 2,
'xzy': 3,
'XZY': 3,
'zyx': 4,
'ZYX': 4,
'yxz': 5,
'YXZ': 5
}
rotationOrderIntToStrMapping = [
'XYZ',
'YZX',
'ZXY',
'XZY',
'ZYX',
'YXZ'
]
class Euler(MathObject):
"""Euler rotation object."""
def __init__(self, x=None, y=None, z=None, ro=None):
"""Initialize values for x,y,z, and rotation order values."""
super(Euler, self).__init__()
if ks.getRTValTypeName(x) == 'Euler':
self._rtval = x
else:
if x is not None and not isinstance(x, (int, float)) and not isinstance(x, Euler):
raise TypeError("Euler: Invalid type for 'x' argument. \
Must be an int or float.")
if y is not None and not isinstance(y, (int, float)):
raise TypeError("Euler: Invalid type for 'y' argument. Must be \
an int or float.")
if z is not None and not isinstance(z, (int, float)):
raise TypeError("Euler: Invalid type for 'z' argument. Must be \
an int or float.")
if ro is not None:
if isinstance(ro, basestring) or isinstance(ro, (int)):
ro = RotationOrder(order=ro)
self._rtval = ks.rtVal('Euler')
if isinstance(x, Euler):
self.set(x=x.x, y=x.y, z=x.z, ro=x.ro)
elif x is not None and y is not None and z is not None:
if ro is not None:
self.set(x=x, y=y, z=z, ro=ro)
else:
self.set(x=x, y=y, z=z)
def __str__(self):
"""String representation of Euler object."""
return "Euler(x=" + str(self.x) + ", y=" + str(self.y) + ", z=" + str(self.z) + ", ro= '" + str(self.ro) + "')"
@property
def x(self):
"""X parameter property.
Returns:
float: Value of the X property.
"""
return self._rtval.x
@x.setter
def x(self, value):
"""X parameter setter.
Args:
value (float): X value of the Euler Angles.
"""
self._rtval.x = ks.rtVal('Scalar', value)
@property
def y(self):
"""Y parameter property.
Returns:
float: Value of the Y property.
"""
return self._rtval.y
@y.setter
def y(self, value):
"""Y parameter setter.
Args:
value (float): Y value of the Euler Angles.
"""
self._rtval.y = ks.rtVal('Scalar', value)
@property
def z(self):
"""Z parameter property.
Returns:
float: Value of the Z property.
"""
return self._rtval.z
@z.setter
def z(self, value):
"""Z parameter setter.
Args:
value (float): Z value of the Euler Angles.
"""
self._rtval.z = ks.rtVal('Scalar', value)
@property
def ro(self):
"""Rotation Order parameter property.
Returns:
object: Rotation Order of this Euler.
"""
return RotationOrder(self._rtval.ro)
@ro.setter
def ro(self, value):
"""Rotation Order setter.
Args:
value (int): Rotation Order(ro) value of the Euler Angles.
"""
self._rtval.ro = ks.rtVal('RotationOrder', value)
def clone(self):
"""Returns a clone of the Euler.
Returns:
Euler: The cloned Euler
"""
euler = Euler();
euler.x = self.x;
euler.y = self.y;
euler.z = self.z;
euler.ro = self.ro;
return euler
# Setter from scalar components
def set(self, x, y, z, ro=None):
"""Scalar component setter.
Args:
x (float): x angle in radians.
y (float): y angle in radians.
z (float): z angle in radians.
ro (int): the rotation order to use in the euler angles.
Returns:
bool: True if successful.
"""
if ro is None:
self._rtval.set('', ks.rtVal('Scalar', x), ks.rtVal('Scalar', y), ks.rtVal('Scalar', z))
else:
self._rtval.set('', ks.rtVal('Scalar', x), ks.rtVal('Scalar', y), ks.rtVal('Scalar', z), ks.rtVal('RotationOrder', ro))
return True
def equal(self, other):
"""Checks equality of this Euler with another.
Args:
other (Euler): Other value to check equality with.
Returns:
bool: True if equal.
"""
return self._rtval.equal('Boolean', ks.rtVal('Euler', other))
def almostEqual(self, other, precision):
"""Checks almost equality of this Euler with another.
Args:
other (Euler): Other value to check equality with.
precision (float): precision value.
Returns:
bool: True if almost equal.
"""
return self._rtval.almostEqual('Boolean', ks.rtVal('Euler', other), ks.rtVal('Scalar', precision))
def toMat33(self):
"""Converts the Euler angles value to a Mat33.
Returns:
Mat33: The Mat33 object representing this Euler.
"""
return Mat33(self._rtval.toMat33('Mat33'))
| StarcoderdataPython |
141838 | <reponame>dopplershift/siphon
# Copyright (c) 2016 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Test Coverage Dataset."""
import warnings
from siphon.cdmr.coveragedataset import CoverageDataset
from siphon.testing import get_recorder
recorder = get_recorder(__file__)
# Ignore warnings about CoverageDataset
warnings.simplefilter('ignore')
@recorder.use_cassette('hrrr_cdmremotefeature')
def test_simple_cdmremotefeature():
"""Smoke test for CDMRemoteFeature."""
cd = CoverageDataset('http://localhost:8080/thredds/cdmrfeature/grid/'
'test/HRRR_CONUS_2p5km_20160309_1600.grib2')
assert cd.grids
@recorder.use_cassette('hrrr_cdmremotefeature')
def test_simple_cdmremotefeature_str():
"""Smoke test for converting CoverageDataset to str."""
cd = CoverageDataset('http://localhost:8080/thredds/cdmrfeature/grid/'
'test/HRRR_CONUS_2p5km_20160309_1600.grib2')
assert str(cd)
| StarcoderdataPython |
1673764 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.ingredient_manager),
url(r'^recipe$', views.recipe_manager)
] | StarcoderdataPython |
1671195 | import discord
from discord.ext import commands
import os
from tabulate import tabulate
from cogs.database.db import Base, Configs
from cogs.database.database import session
present_configs = os.listdir("cogs/configs")
class download(commands.Cog):
def __init__(self,bot):
self.bot = bot
def channel():
def predicate(ctx):
return ctx.channel.id == <Channel ID>
return commands.check(predicate)
@channel()
@commands.command(pass_context=True)
@commands.guild_only()
@commands.has_role("Config Permission Role")
async def download(self,ctx,arg):
lwrcase = arg.lower()
list = str(lwrcase).split(",")
for elem in list:
if ".loli" in elem:
lwrarg = elem
else:
lwrarg = str(elem)+".loli"
try:
configs = session.query(Configs).filter(Configs.name == lwrarg).first()
if lwrarg not in present_configs:
await ctx.send("Config not found, please request it !")
return
user = ctx.message.author
info = [['Name',configs.name],['Captcha',configs.captcha],['Capture',configs.capture],['Proxies',configs.proxies],['Author',configs.author],['Uploaded by',configs.uploaded_by],["Wordlists",configs.wordlist1+"|"+configs.wordlist2]]
file = discord.File(f"cogs/configs/{lwrarg}")
await user.send("```\n"+tabulate(info)+"```",file=file)
except Exception as e:
await ctx.send("Could not complete your command.")
print(e)
await ctx.send("Sent you a DM containing the config.")
def setup(bot):
bot.add_cog(download(bot))
| StarcoderdataPython |
3299740 | <gh_stars>1-10
import time
from pprint import pprint
from flask_login import login_required
from flask import Blueprint, session, request
from sqlalchemy import create_engine
import dateutil.parser
import logging
from datetime import datetime, timedelta, timezone
import pytz
from utils.app import jsonify, app, get_nested
from utils.tasks import create_task
from utils.common import now
from data.api_credentials import get_calendar_api_client, get_user_api_client
from models.base import db
from models.app_main import User
from models.app_sync import Sync, SyncTask
from models.user_calendar import CalendarEvents, CalendarEventAttendees, CalendarUser, CalendarUserAlias
from sqlalchemy.orm import sessionmaker
# TODO do 31 days for now, to make sure things go fast and we don't get into trouble with gcal limits
HISTORIC_DAYS_TO_PULL = 31
FUTURE_DAYS_TO_PULL = 31
CALENDAR_SYNC_HANDLER_URL = '/api/syncs/tasks/calendar_sync'
def update_task_status():
pass
def store_calendar_event_attendees(event, attendee, email, database_session, is_organizer):
event_id = event.get('id')
attendee_primary_key = (event_id, email)
calendar_user_alias = database_session.query(CalendarUserAlias).get(email)
attendee_values = {
'event_id': event_id,
'invited_email': email,
'is_organizer': is_organizer
}
if attendee:
attendee_values['is_optional'] = bool(attendee.get('optional'))
if attendee.get('displayName'):
attendee_values['display_name'] = attendee.get('displayName')
elif calendar_user_alias:
attendee_values['display_name'] = calendar_user_alias.calendar_user.full_name
attendee_values['response_status'] = attendee.get('responseStatus')
attendee_values['is_organizer'] = bool(attendee.get('organizer'))
attendee_values['comment'] = attendee.get('comment')
else:
#TODO assume that the user is the organizer of the event and the only attendee, so not optional
attendee_values['is_optional'] = False
attendee_values['is_organizer'] = True
if calendar_user_alias:
attendee_values['display_name'] = calendar_user_alias.calendar_user.full_name
else:
attendee_values['display_name'] = email
attendee_values['response_status'] = 'accepted'
attendee_values['comment'] = ''
if calendar_user_alias:
attendee_values['calendar_user_id'] = calendar_user_alias.calendar_user.id
calendar_event_attendee = insert_or_update(CalendarEventAttendees, attendee_primary_key, attendee_values, database_session)
def store_calendar_events(calendar_id, data, database_session, sync_id):
tasks = sync_tasks = SyncTask.query.filter_by(
sync_id=sync_id,
class_name=CalendarEvents.__name__,
commit_id=calendar_id
)
tasks.update({
'store_start': now(),
'status': 'storing'
}, synchronize_session=False)
db.session.commit()
try:
for event in data:
primary_key = event.get('id')
values = {
"event_id": primary_key,
"organizer_email": get_nested(event, 'organizer', 'email'),
"creator_email": get_nested(event, 'creator', 'email'),
"status": event.get('status'),
"is_recurring": not not event.get('recurringEventId'),
"recurrence_id": event.get('recurringEventId'),
"title": event.get('summary'),
"location": event.get('location'),
"description": event.get('description')
}
if get_nested(event, 'start', 'dateTime'):
values['start_time'] = dateutil.parser.parse(get_nested(event, 'start', 'dateTime'))
if get_nested(event, 'end', 'dateTime'):
values["end_time"] = dateutil.parser.parse(get_nested(event, 'end', 'dateTime'))
if event.get('created'):
values["created_at"]: dateutil.parser.parse(event.get('created'))
if event.get('updated'):
values["updated_at"]: dateutil.parser.parse(event.get('updated'))
is_organizer = get_nested(event, 'organizer', 'self')
calendar_event = insert_or_update(CalendarEvents, primary_key, values, database_session)
if event.get('attendees') is None and is_organizer:
store_calendar_event_attendees(
event,
None,
calendar_id,
database_session,
is_organizer
)
else:
for attendee in event.get('attendees') or []:
attendee_primary_key = (event.get('id'), attendee.get('email'))
store_calendar_event_attendees(
event,
attendee,
attendee.get('email'),
database_session,
is_organizer
)
tasks.update({
'status': 'success'
}, synchronize_session=False)
except Exception as err:
tasks.update({
'status': 'error',
'errors': str(err)
}, synchronize_session=False)
raise err
finally:
tasks.update({
'store_end': now()
}, synchronize_session=False)
db.session.commit()
database_session.commit()
def get_calendar_events(cal_client, cal, sync_id):
tasks = sync_tasks = SyncTask.query.filter_by(
sync_id=sync_id,
class_name=CalendarEvents.__name__,
commit_id=cal
)
tasks.update({
'pull_start': now(),
'status': 'pulling'
}, synchronize_session=False)
db.session.commit()
# Loop through all the calendars we need to fetch
start_datetime = datetime.utcnow() - timedelta(days=HISTORIC_DAYS_TO_PULL)
start_datetime = start_datetime.replace(tzinfo=pytz.UTC)
end_datetime = datetime.utcnow() + timedelta(days=FUTURE_DAYS_TO_PULL)
end_datetime = end_datetime.replace(tzinfo=pytz.UTC)
logging.info(f'Getting calendar data for {cal} from {start_datetime} - {end_datetime}')
try:
all_events = []
response = cal_client.events().list(calendarId=cal,
maxResults=100,
singleEvents=True,
orderBy='startTime',
timeMin=start_datetime.isoformat(),
timeMax=end_datetime.isoformat(),
timeZone='UTC').execute()
nextPageToken = response.get('nextPageToken')
events = response.get('items', [])
all_events = events
while nextPageToken:
# Fetch this series of results
response = cal_client.events().list(
calendarId=cal,
maxResults=100,
singleEvents=True,
orderBy='startTime',
timeMin=start_datetime.isoformat(),
timeMax=end_datetime.isoformat(),
pageToken=nextPageToken).execute()
nextPageToken = response.get('nextPageToken')
events = response.get('items', [])
all_events = all_events + events
tasks.update({
'status': 'success'
}, synchronize_session=False)
except Exception as err:
tasks.update({
'status': 'error',
'errors': str(err)
}, synchronize_session=False)
raise err
finally:
tasks.update({
'pull_end': now()
}, synchronize_session=False)
db.session.commit()
return all_events
def set_object_values(obj, values):
for key, val in values.items():
setattr(obj, key, val)
def insert_or_update(obj_type, primary_key, values, session):
obj = session.query(obj_type).get(primary_key)
if obj:
set_object_values(obj, values)
else:
obj = obj_type()
set_object_values(obj, values)
session.add(obj)
return obj
# TODO I think this can be done more efficient but didn't get it to work fast enough. See https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.bulk_save_objects
def store_calendar_users(users, database_session, sync_id):
tasks = sync_tasks = SyncTask.query.filter_by(
sync_id=sync_id,
class_name=CalendarUser.__name__,
commit_id='get_pull_users'
)
tasks.update({
'store_start': now(),
'status': 'storing'
}, synchronize_session=False)
db.session.commit()
obs = []
try:
for user in users:
primary_key = user.get('id')
values = {
'id': user.get('id'),
'primary_alias': user.get('primaryEmail'),
'given_name': get_nested(user, 'name', 'givenName'),
'family_name': get_nested(user, 'name', 'familyName'),
'full_name': get_nested(user, 'name', 'fullName'),
'current_calendar_timezone': user.get('timezone')
}
calendar_user = insert_or_update(CalendarUser, primary_key, values, database_session)
if user.get('emails'):
for email in user.get('emails'):
primary_key = email.get('address')
values = {
'alias': primary_key,
'calendar_user': calendar_user,
'calendar_user_id': calendar_user.id
}
insert_or_update(CalendarUserAlias, primary_key, values, database_session)
else:
primary_key = user.get('primaryEmail')
values = {
'alias': user.get('primaryEmail'),
'calendar_user': calendar_user,
'calendar_user_id': calendar_user.id
}
insert_or_update(CalendarUserAlias, primary_key, values, database_session)
tasks.update({
'status': 'success'
}, synchronize_session=False)
except Exception as e:
tasks.update({
'status': 'error',
'errors': str(e)
}, synchronize_session=False)
raise
finally:
tasks.update({
'store_end': now()
}, synchronize_session=False)
db.session.commit()
database_session.commit()
def get_calendar_users(user, calendars, cal_client, sync_id):
user_client = get_user_api_client(user.id)
tasks = SyncTask.query.filter_by(
sync_id=sync_id,
class_name=CalendarUser.__name__,
commit_id='get_pull_users'
)
tasks.update({
'pull_start': now(),
'status': 'pulling'
}, synchronize_session=False)
db.session.commit()
# TODO is this the best way to get the domain or customer ID for a user?
domain = user.email[user.email.find("@") + 1:]
page_token = None
users = []
i = 0
try:
while True:
if domain == 'gmail.com':
user_list = None
page_users = [{
'id': user.sub,
'primaryEmail': user.email,
'name': {
'givenName': user.given_name,
'familyName': user.family_name,
'fullName': user.name
}
}]
else:
user_list = user_client.users().list(
pageToken=page_token,
viewType='domain_public', domain=domain).execute()
page_users = user_list.get('users', [])
for entry in page_users:
if entry['primaryEmail'] in calendars:
request = cal_client.calendars().get(calendarId=entry['primaryEmail']).execute()
if i == 0:
i = 1
entry['timezone'] = request.get('timeZone')
users.append(entry)
if user_list:
page_token = user_list.get('nextPageToken')
if not page_token:
break
tasks.update({
'status': 'success'
}, synchronize_session=False)
except Exception as e:
tasks.update({
'status': 'error',
'errors': str(e)
}, synchronize_session=False)
raise
finally:
tasks.update({
'pull_end': now()
}, synchronize_session=False)
db.session.commit()
return users
def calendar_sync_main(user_id, calendars):
# Get the API client
user = User.query.get(int(user_id))
sync = Sync(
status='pending',
user=user,
start=now()
)
db.session.add(sync)
task = SyncTask(
sync=sync,
class_name=CalendarUser.__name__,
commit_id='get_pull_users',
status='pending'
)
db.session.add(task)
for cal in calendars:
task = SyncTask(
sync=sync,
class_name=CalendarEvents.__name__,
commit_id=cal,
status='pending'
)
db.session.add(task)
db.session.commit()
sync_id = sync.id
database_session = None
try:
# TODO this is assuming the user always has one database, which is true for now
database = user.databases[0]
database_url = database.get_url()
engine = create_engine(database_url)
database_session = sessionmaker(bind=engine)()
cal_client = get_calendar_api_client(user.id)
users = get_calendar_users(user, calendars, cal_client, sync_id)
store_calendar_users(users, database_session, sync_id)
for cal in calendars:
cal_events = get_calendar_events(cal_client, cal, sync_id)
store_calendar_events(cal, cal_events, database_session, sync_id)
sync.status = 'success'
except Exception as e:
sync.status = 'failed'
sync.message = str(e)
raise
finally:
sync.end = now()
if database_session:
database_session.close()
db.session.commit()
return {
"DONE": True
}
def start_calendar_sync_task(req, calendar_sync_settings):
user_id = calendar_sync_settings.user_id
calendars = calendar_sync_settings.synced_calendars
create_task(req, CALENDAR_SYNC_HANDLER_URL, {
"user_id": user_id,
"calendars": calendars
})
| StarcoderdataPython |
3349486 | def safe_compare_dataframes(first, second):
"""Compare two dataframes even if they have NaN values.
Args:
first (pandas.DataFrame): DataFrame to compare
second (pandas.DataFrame): DataFrame to compare
Returns:
bool
"""
if first.isnull().all().all():
return first.equals(second)
else:
nulls = (first.isnull() == second.isnull()).all().all()
values = (first[~first.isnull()] == second[~second.isnull()]).all().all()
return nulls and values
| StarcoderdataPython |
3313300 | <filename>notebooks/unfolding/PyUnfold/make_counts.py
#!/usr/bin/env python
import numpy as np
import pandas as pd
import ROOT
from ROOT import TH1F, TH2F, TNamed
from ROOT import gROOT, gSystem
import itertools
import os
import re
if __name__ == "__main__":
formatted_df_outfile = os.path.join('/data/user/jbourbeau/composition/unfolding',
'unfolding-dataframe-PyUnfold-formatted.csv')
df_flux = pd.read_csv(formatted_df_outfile, index_col='log_energy_bin_idx')
counts = df_flux['counts'].values
ebins = len(counts)+1
earray = np.arange(ebins, dtype=float)
print('earray = {}'.format(earray))
ebins -= 1
# ROOT Output
binname = 'bin0'
OutFile = 'counts.root'
fout = ROOT.TFile(OutFile, "RECREATE")
# Check if bin directory exists, quit if so, otherwise create it!
if ( not fout.GetDirectory(binname) ):
pdir = fout.mkdir(binname,"Bin number 0")
else:
fout.Close()
print("\n=========================\n")
errormessage = "Directory %s already exists!\nEither try another bin number or delete %s and start again. Exiting...\n"%(binname,OutFile)
raise ValueError(errormessage)
# Go to home of ROOT file
fout.cd(binname)
# Prepare Combined Weighted Histograms - To be Normalized by Model After Filling
# Isotropic Weights of Causes - For Calculating Combined Species Efficiency
Eff = TH1F('Compositions', 'Non-Normed Combined Efficiency', ebins, earray)
Eff.GetXaxis().SetTitle('Effects')
Eff.GetYaxis().SetTitle('Counts')
Eff.SetStats(0)
Eff.Sumw2()
for ci in xrange(0,ebins):
print('counts[{}] = {}'.format(ci, counts[ci]))
Eff.SetBinContent(ci+1, counts[ci])
Eff.SetBinError(ci+1, np.sqrt(counts[ci]))
# Write the weighted histograms to file
Eff.Write()
fout.Write()
fout.Close()
print("Saving output file %s\n"%OutFile)
print("\n=========================\n")
print("Finished here! Exiting...")
| StarcoderdataPython |
48857 | <reponame>the-scouts/incognita
import time
import geopandas as gpd
import pandas as pd
from incognita.data.scout_census import load_census_data
from incognita.geographies import district_boundaries
from incognita.logger import logger
from incognita.utility import config
from incognita.utility import filter
from incognita.utility import timing
if __name__ == "__main__":
start_time = time.time()
logger.info(f"Starting at {time.strftime('%H:%M:%S', time.localtime(start_time))}")
census_data = load_census_data()
census_data = filter.filter_records(census_data, "Census_ID", {20})
# Remove Jersey, Guernsey, and Isle of Man as they have invalid lat/long coordinates for their postcodes
census_data = filter.filter_records(census_data, "C_name", {"<NAME>", "Isle of Man", "Jersey"}, exclude_matching=True)
# low resolution shape data
world_low_res = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
uk_shape = world_low_res.loc[world_low_res.name == "United Kingdom", "geometry"].array.data[0]
# # high resolution shape data
# uk_shape = gpd.read_file(r"S:\Development\incognita\data\UK Shape\GBR_adm0.shp")["geometry"].array.data[0]
logger.info("UK outline shapefile loaded.")
district_polygons = district_boundaries.create_district_boundaries(census_data, clip_to=uk_shape)
logger.info("District boundaries estimated!")
location_ids = census_data[["D_ID", "C_ID", "R_ID", "X_ID"]].dropna(subset=["D_ID"]).drop_duplicates().astype("Int64")
district_polygons = pd.merge(district_polygons, location_ids, how="left", on="D_ID")
logger.info("Added County, Region & Country location codes.")
district_polygons.to_file(config.SETTINGS.folders.boundaries / "districts-borders-uk.geojson", driver="GeoJSON")
logger.info("District boundaries saved.")
timing.close(start_time)
| StarcoderdataPython |
3281661 | #!/usr/bin/env python
# Copyright 2017 Google, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from google.cloud import kms_v1
from google.cloud.kms_v1 import enums
# [START kms_create_keyring]
def create_key_ring(project_id, location_id, key_ring_id):
"""Creates a KeyRing in the given location (e.g. global)."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the location associated with the KeyRing.
parent = client.location_path(project_id, location_id)
# The keyring object template
keyring_name = client.key_ring_path(project_id, location_id, key_ring_id)
keyring = {'name': keyring_name}
# Create a KeyRing
response = client.create_key_ring(parent, key_ring_id, keyring)
print('Created KeyRing {}.'.format(response.name))
return response
# [END kms_create_keyring]
# [START kms_create_cryptokey]
def create_crypto_key(project_id, location_id, key_ring_id, crypto_key_id):
"""Creates a CryptoKey within a KeyRing in the given location."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the KeyRing associated with the CryptoKey.
parent = client.key_ring_path(project_id, location_id, key_ring_id)
# Create the CryptoKey object template
purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
crypto_key = {'purpose': purpose}
# Create a CryptoKey for the given KeyRing.
response = client.create_crypto_key(parent, crypto_key_id, crypto_key)
print('Created CryptoKey {}.'.format(response.name))
return response
# [END kms_create_cryptokey]
# [START kms_encrypt]
def encrypt_symmetric(project_id, location_id, key_ring_id, crypto_key_id,
plaintext):
"""Encrypts input plaintext data using the provided symmetric CryptoKey."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
name = client.crypto_key_path_path(project_id, location_id, key_ring_id,
crypto_key_id)
# Use the KMS API to encrypt the data.
response = client.encrypt(name, plaintext)
return response.ciphertext
# [END kms_encrypt]
# [START kms_decrypt]
def decrypt_symmetric(project_id, location_id, key_ring_id, crypto_key_id,
ciphertext):
"""Decrypts input ciphertext using the provided symmetric CryptoKey."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
name = client.crypto_key_path_path(project_id, location_id, key_ring_id,
crypto_key_id)
# Use the KMS API to decrypt the data.
response = client.decrypt(name, ciphertext)
return response.plaintext
# [END kms_decrypt]
# [START kms_disable_cryptokey_version]
def disable_crypto_key_version(project_id, location_id, key_ring_id,
crypto_key_id, version_id):
"""Disables a CryptoKeyVersion associated with a given CryptoKey and
KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# Construct the resource name of the CryptoKeyVersion.
name = client.crypto_key_version_path(project_id, location_id, key_ring_id,
crypto_key_id, version_id)
# Use the KMS API to disable the CryptoKeyVersion.
new_state = enums.CryptoKeyVersion.CryptoKeyVersionState.DISABLED
version = {'name': name, 'state': new_state}
update_mask = {'paths': ["state"]}
# Print results
response = client.update_crypto_key_version(version, update_mask)
print('CryptoKeyVersion {}\'s state has been set to {}.'.format(
name, response.state))
# [END kms_disable_cryptokey_version]
# [START kms_enable_cryptokey_version]
def enable_crypto_key_version(project_id, location_id, key_ring_id,
crypto_key_id, version_id):
"""Enables a CryptoKeyVersion associated with a given CryptoKey and
KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# Construct the resource name of the CryptoKeyVersion.
name = client.crypto_key_version_path(project_id, location_id, key_ring_id,
crypto_key_id, version_id)
# Use the KMS API to enable the CryptoKeyVersion.
new_state = enums.CryptoKeyVersion.CryptoKeyVersionState.ENABLED
version = {'name': name, 'state': new_state}
update_mask = {'paths': ["state"]}
# Print results
response = client.update_crypto_key_version(version, update_mask)
print('CryptoKeyVersion {}\'s state has been set to {}.'.format(
name, response.state))
# [END kms_enable_cryptokey_version]
# [START kms_destroy_cryptokey_version]
def destroy_crypto_key_version(
project_id, location_id, key_ring_id, crypto_key_id, version_id):
"""Schedules a CryptoKeyVersion associated with a given CryptoKey and
KeyRing for destruction 24 hours in the future."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# Construct the resource name of the CryptoKeyVersion.
name = client.crypto_key_version_path(project_id, location_id, key_ring_id,
crypto_key_id, version_id)
# Use the KMS API to mark the CryptoKeyVersion for destruction.
response = client.destroy_crypto_key_version(name)
# Print results
print('CryptoKeyVersion {}\'s state has been set to {}.'.format(
name, response.state))
# [END kms_destroy_cryptokey_version]
# [START kms_restore_cryptokey_version]
def restore_crypto_key_version(
project_id, location_id, key_ring_id, crypto_key_id, version_id):
"""Restores a CryptoKeyVersion that is scheduled for destruction."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# Construct the resource name of the CryptoKeyVersion.
name = client.crypto_key_version_path(project_id, location_id, key_ring_id,
crypto_key_id, version_id)
# Use the KMS API to restore the CryptoKeyVersion.
response = client.restore_crypto_key_version(name)
# Print results
print('CryptoKeyVersion {}\'s state has been set to {}.'.format(
name, response.state))
# [END kms_restore_cryptokey_version]
# [START kms_add_member_to_cryptokey_policy]
def add_member_to_crypto_key_policy(
project_id, location_id, key_ring_id, crypto_key_id, member, role):
"""Adds a member with a given role to the Identity and Access Management
(IAM) policy for a given CryptoKey associated with a KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
resource = client.crypto_key_path_path(project_id, location_id,
key_ring_id, crypto_key_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Add member
policy.bindings.add(
role=role,
members=[member])
# Update the IAM Policy.
client.set_iam_policy(resource, policy)
# Print results
print('Member {} added with role {} to policy for CryptoKey {} \
in KeyRing {}'.format(member, role, crypto_key_id, key_ring_id))
# [END kms_add_member_to_cryptokey_policy]
# [START kms_add_member_to_keyring_policy]
def add_member_to_key_ring_policy(
project_id, location_id, key_ring_id, member, role):
"""Adds a member with a given role to the Identity and Access Management
(IAM) policy for a given KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the KeyRing.
resource = client.key_ring_path(project_id, location_id, key_ring_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Add member
policy.bindings.add(
role=role,
members=[member])
# Update the IAM Policy.
client.set_iam_policy(resource, policy)
# Print results
print('Member {} added with role {} to policy in KeyRing {}'
.format(member, role, key_ring_id))
# [END kms_add_member_to_keyring_policy]
# [START kms_remove_member_from_cryptokey_policy]
def remove_member_from_crypto_key_policy(
project_id, location_id, key_ring_id, crypto_key_id, member, role):
"""Removes a member with a given role from the Identity and Access
Management (IAM) policy for a given CryptoKey associated with a KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
resource = client.crypto_key_path_path(project_id, location_id,
key_ring_id, crypto_key_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Remove member
for b in list(policy.bindings):
if b.role == role and member in b.members:
b.members.remove(member)
# Update the IAM Policy.
client.set_iam_policy(resource, policy)
# Print results
print('Member {} removed from role {} for CryptoKey in KeyRing {}'
.format(member, role, crypto_key_id, key_ring_id))
# [END kms_remove_member_from_cryptokey_policy]
def remove_member_from_key_ring_policy(project_id, location_id, key_ring_id,
member, role):
"""Removes a member with a given role from the Identity and Access
Management (IAM) policy for a given KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the KeyRing.
resource = client.key_ring_path(project_id, location_id, key_ring_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Remove member
for b in list(policy.bindings):
if b.role == role and member in b.members:
b.members.remove(member)
# Update the IAM Policy.
client.set_iam_policy(resource, policy)
# Print results
print('Member {} removed from role {} for KeyRing {}'
.format(member, role, key_ring_id))
# [START kms_get_keyring_policy]
def get_key_ring_policy(project_id, location_id, key_ring_id):
"""Gets the Identity and Access Management (IAM) policy for a given KeyRing
and prints out roles and the members assigned to those roles."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the KeyRing.
resource = client.key_ring_path(project_id, location_id, key_ring_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Print results
print('Printing IAM policy for resource {}:'.format(resource))
for b in policy.bindings:
for m in b.members:
print('Role: {} Member: {}'.format(b.role, m))
return policy
# [END kms_get_keyring_policy]
def get_crypto_key_policy(project_id, location_id, key_ring_id, crypto_key_id):
"""Gets the Identity and Access Management (IAM) policy for a given KeyRing
and prints out roles and the members assigned to those roles."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
resource = client.crypto_key_path_path(project_id, location_id,
key_ring_id, crypto_key_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Print results
print('Printing IAM policy for resource {}:'.format(resource))
for b in policy.bindings:
for m in b.members:
print('Role: {} Member: {}'.format(b.role, m))
return policy
| StarcoderdataPython |
3277495 | import uuid
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django.contrib.auth.models import User
from django_countries.fields import CountryField
from shop.models import Product
from members.models import Member
from user_profiles.models import StoreUser
from decimal import Decimal
class ShopOrder(models.Model):
order_number = models.CharField(max_length=32, null=False, editable=False)
store_user = models.ForeignKey(StoreUser, on_delete=models.SET_NULL,
null=True, blank=True,
related_name='shop_orders')
date = models.DateTimeField(auto_now_add=True)
full_name = models.CharField(max_length=60, null=False, blank=False)
email_address = models.EmailField(max_length=254, null=False, blank=False)
phone_number = models.CharField(max_length=20, null=True, blank=True)
address_line1 = models.CharField(max_length=80, null=False, blank=False)
address_line2 = models.CharField(max_length=80, null=True, blank=True)
town_or_city = models.CharField(max_length=50, null=False, blank=False)
county_or_region = models.CharField(max_length=50, null=True, blank=True)
postcode = models.CharField(max_length=10, null=False, blank=False)
country = CountryField(blank_label='Country *', null=False, blank=False)
delivery_charge = models.DecimalField(max_digits=6, decimal_places=2,
null=False, default=0)
order_total = models.DecimalField(max_digits=8, decimal_places=2,
null=False, default=0)
grand_total = models.DecimalField(max_digits=8, decimal_places=2,
null=False, default=0)
shopping_cart = models.TextField(null=False, blank=False, default='')
stripe_pid = models.CharField(max_length=254, null=False, blank=False,
default='')
def _generate_order_number(self):
return uuid.uuid4().hex.upper()
def update_cart_total(self):
self.order_total = (self.lineitems.aggregate
(Sum('lineitem_total'))['lineitem_total__sum'] or
0)
vip = Member.objects.filter(is_vip=True)
user = User.objects.filter(username=self.store_user)
if vip == user:
self.order_total = (Decimal(self.order_total *
settings.VIP_DISCOUNT_PERCENTAGE / 100))
if self.order_total < settings.FREE_DELIVERY_THRESHOLD:
if vip == user:
self.delivery_charge = 0
else:
self.delivery_charge = (Decimal(
settings.STANDARD_DELIVERY_CHARGE))
else:
self.delivery_charge = 0
self.grand_total = self.order_total + self.delivery_charge
self.save()
def save(self, *args, **kwargs):
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
class OrderLineItem(models.Model):
shop_order = models.ForeignKey(ShopOrder, null=False, blank=False,
on_delete=models.CASCADE,
related_name='lineitems')
item = models.ForeignKey(Product, null=False, blank=False,
on_delete=models.CASCADE)
item_size = models.CharField(max_length=2, null=True, blank=True)
quantity = models.IntegerField(null=False, blank=False, default=0)
lineitem_total = models.DecimalField(max_digits=6, decimal_places=2,
null=False, blank=False)
def save(self, *args, **kwargs):
self.lineitem_total = self.item.price * self.quantity
super().save(*args, **kwargs)
def __str__(self):
return f'{self.item.name} on order {self.shop_order.order_number}'
| StarcoderdataPython |
13130 | <filename>hydropy/__init__.py
"""
Hydropy
=======
Provides functions to work with hydrological processes and equations
"""
| StarcoderdataPython |
1758541 | # (c) 2017 <NAME>
"""common interface for all pore geometries
should behave as follows:
-) defines default parameters for the geometry
-) provides function that takes geoname, geo params (including dim) plus
small additional number of non-geo (solver) params
(h, reconstruct, subs), and returns geo.
-) optionally, provide functions that returns underlying geometrical object before
building
-) allows easy, modular addition of new geometries"""
import numpy as np
def lazy_import():
global Params, any_params, polygons, MultiPore, Pore, pughpore
global curved, alphahempoly
from nanopores.tools.utilities import Params, any_params
import nanopores.tools.polygons as polygons
from nanopores.geometries.cylpore import MultiPore, Pore
import nanopores.geometries.pughpore as pughpore
import nanopores.geometries.curved as curved
from nanopores.geometries.alphahempoly import poly as alphahempoly
def get_geo(geoname=None, **params):
params["geoname"] = geoname
geoclass = geometries[geoname](**params)
return geoclass.get_geo()
def get_pore(geoname=None, **params):
params["geoname"] = geoname
geoclass = geometries[geoname](**params)
return geoclass.get_pore()
class BasePore(object):
default = dict(
dim = 2,
subs = None,
)
def __init__(self, h=1., reconstruct=False, **params):
lazy_import()
self.params = Params(self.default, **params)
self.h = h
self.reconstruct = reconstruct
def get_geo(self):
return self.build()
def get_pore(self):
pore = self.pore()
pore.build_nogeo()
return pore
class PughPore(BasePore):
@property
def default(self):
return dict(pughpore.params,
geoname = "pugh",
diamPore = 6., # will override l0,.. if set
diamDNA = 2.5, # will override l0,.. if diamPore set
dim = 3,
)
def build(self):
params = self.params
h = self.h
if params.diamPore is not None:
diamPore = params.diamPore # inner (effective) pore diameter
diamDNA = params.diamDNA # dna diameter of outer dna layers
l0 = diamPore + 6.*diamDNA
l1 = diamPore + 4.*diamDNA
l2 = diamPore + 2.*diamDNA
l3 = diamPore
l4 = l1
params.update(l0=l0, l1=l1, l2=l2, l3=l3, l4=l4)
if params.dim == 3:
geo = pughpore.get_geo(h, **params)
if geo.params["x0"] is not None:
molec = curved.Sphere(geo.params["rMolecule"],
geo.params["x0"])
geo.curved = dict(moleculeb = molec.snap)
elif params.dim == 2:
geo = pughpore.get_geo_cyl(h, **params)
if geo.params["x0"] is not None:
molec = curved.Circle(geo.params["rMolecule"],
geo.params["x0"])
geo.curved = dict(moleculeb = molec.snap)
elif params.dim == 1:
geo = pughpore.get_geo1D(h, **params)
return geo
class PughPoreCyl(BasePore):
default = dict(
# specify pore
l0 = 18., #22.5,
l1 = 14., #17.5,
l2 = 10., #12.5,
l3 = 6., #7.5,
l4 = 14., #17.5,
hpore = 46.,
h2 = 46.-35., # 11.
h1 = 46.-35.-2.5, # 8.5
h4 = 10.,
diamPore = 6., # will override l0,.. if set
diamDNA = 2.5, # will override l0,.. if diamPore set
dim = 2,
R = 20.,
H = 70.,
H0 = 60.,
R0 = None,
rMolecule = 2.0779, # molecular radius of protein trypsin
x0 = None,
lcMolecule = 0.2, # relative to global mesh size
lcCenter = 0.5,
hmem = 2.2,
zmem = -46./2. + 2.2/2.,
poreregion = True,
subs = None,
)
def polygon(self):
params = self.params
if params.diamPore is not None:
diamPore = params.diamPore # inner (effective) pore diameter
diamDNA = params.diamDNA # dna diameter of outer dna layers
l0 = diamPore + 6.*diamDNA
l1 = diamPore + 4.*diamDNA
l2 = diamPore + 2.*diamDNA
l3 = diamPore
l4 = l1
params.update(l0=l0, l1=l1, l2=l2, l3=l3, l4=l4)
r = [0.5*params.l3, 0.5*params.l2, 0.5*params.l1, 0.5*params.l0,
0.5*params.l4]
ztop = params.hpore/2.
zbot = -ztop
z = [zbot, ztop - params.h2, ztop - params.h1, ztop, zbot + params.h4]
# indices: [(0,0), (0,1), (1,1), (1,2), ..., (4,4), (4,0)]
n = len(r)
return [(r[i / 2 % n], z[(i+1) / 2 % n]) for i in range(2*n)]
def pore(self):
params = self.params
dna = self.polygon()
pore = MultiPore(**params)
pore.add_polygons(dna=dna)
pore.synonymes = dict(chargeddnab="dnab")
return pore
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
class AlphaHem(BasePore):
default = dict(
dim = 2,
Htop = 7.,
Hbot = 15.,
R = 10.,
cs = [-3, -6],
zmem = -7.625,
proteincs = [-2.3, -4.6, -7.2],
subs = None,
)
def pore(self):
return Pore(alphahempoly, porename="alphahem", **self.params)
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
class WeiPore(BasePore):
default = dict(
R = 120.,
R0 = 100.,
H = 240.,
#H0 = 70.,
x0 = None, #[0, 0, 46],
rMolecule = 6.,
dim = 3,
no_membrane = True,
dp = 45, # (small) pore diameter as used in paper
angle = 40, # aperture angle in degrees
lcCenter = 0.3,
lcMolecule = 0.1,
h = 10.,
subs = None,
reconstruct = False,
poreregion = True,
receptor = None, #[40., 0., -30.],
rReceptor = 1.25,
reverse = True, # if True, narrow opening is at the top, as in paper
)
def polygons(self, params):
lsin = 50. # SiN membrane thickness (in vertical direction)
lau = 40. # Au membrane thickness (in vertical direction)
rlau = 10. # Au thickness in radial direction
lsam = 3. # SAM layer thickness (in vertical direction)
l0 = lau + lsin + lsam
angle2 = params.angle/2. * np.pi/180.
tan = np.tan(angle2)
sin = np.sin(angle2)
cos = np.cos(angle2)
l = l0/2.
r0 = params.dp/2. - lsam
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
R = params.R
split = 0.7
Rsplit = split*R + (1.-split)*r1
if not params.reverse:
sam = [[r0, -l], [r1, l], [R, l], [R, l - lsam],
[rsam - tan*(lsam - l0), l - lsam], [rsam, -l]]
au = [sam[5], sam[4], sam[3], [R, -l + lsin],
[rsin + tan*lsin, -l + lsin],
[rsin, -l]]
sin = [au[5], au[4], au[3], [R, -l]]
else:
l = -l
sam = [[r0, -l], [r1, l], [R, l], [R, l + lsam],
[rsam - tan*(lsam - l0), l + lsam], [rsam, -l]][::-1]
au = [sam[-6], sam[-5], sam[-4], [R, -l - lsin],
[rsin + tan*lsin, -l - lsin],
[rsin, -l]][::-1]
sin = [au[-6], au[-5], au[-4], [R, -l]][::-1]
return sam, au, sin, Rsplit
def pore(self):
params = self.params
sam, au, sin, Rsplit = self.polygons(params)
sam, unchargedsam = polygons.Polygon(sam).split(Rsplit)
au, unchargedau = polygons.Polygon(au).split(Rsplit)
sin, unchargedsin = polygons.Polygon(sin).split(Rsplit)
pore = MultiPore(**params)
pore.add_polygons(chargedsam=sam, chargedau=au, chargedsin=sin,
unchargedsam=unchargedsam, unchargedau=unchargedau,
unchargedsin=unchargedsin)
pore.synonymes = dict(
sam={"chargedsam", "unchargedsam"},
au={"chargedau", "unchargedau"},
sin={"chargedsin", "unchargedsin"},)
if params.receptor is not None:
receptor = polygons.Ball(params.receptor, params.rReceptor, lc=0.1)
pore.add_balls(receptor=receptor)
return pore
def build(self):
pore = self.pore()
geo = pore.build(self.h, self.params.subs, self.reconstruct)
return geo
geometries = dict(
wei = WeiPore,
pugh = PughPore,
pughcyl = PughPoreCyl,
alphahem = AlphaHem,
)
if __name__ == "__main__":
lazy_import()
params = any_params(geoname="wei", h=10.)
geo = get_geo(**params)
print('geo')
#print(geo)
#print geo.mesh.coordinates()
geo.plot_subdomains()
geo.plot_boundaries(interactive=True)
| StarcoderdataPython |
1698610 | <reponame>Verizon/YANG-validator
#
# Copyright Verizon Inc.
# Licensed under the terms of the Apache License 2.0 license. See LICENSE file in project root for terms.
#
import sys
import threading
import asyncio
import jsonToYang
import json
import traceback
import time
import pprint
topic = None
jsonStr = None
def convertToYang(topic, jsonStr):
jsonObj = json.loads(jsonStr)
mappingDetails = jsonToYang.getMappingDetails(topic)
yangStr = jsonToYang.convertToYangJsonStr(mappingDetails, jsonObj)
#pprint.pprint(yangStr)
return yangStr
#
# Main code
#
#if len(sys.argv) < 1:
# print('Must pass a topic and input json string')
# exit()
#else:
while True:
topic = input("Topic")
print('Using topic: ' + topic)
sys.stdout.flush()
jsonStr = input("Enter Json")
print('Using json string: ' + jsonStr)
sys.stdout.flush()
yangStr = convertToYang(topic, jsonStr)
print('YANG_RESPONSE=' + yangStr)
print('END PROCESSING')
sys.stdout.flush()
| StarcoderdataPython |
1691429 | #!/usr/bin/env python3
"""
Assumptions:
- The sequence residue lines are in upper case. I've never seen FASTQ files otherwise,
but if they aren't this will report no reads being filtered. A check would slow the
script too much.
Author: <NAME> (jorvis AT gmail)
"""
import argparse
import re
import sys
def main():
parser = argparse.ArgumentParser( description='Filter FASTQ file by N content')
## output file to be written
parser.add_argument('-l', '--left', type=str, required=False, help='FASTQ: Left (R1) mate pair file' )
parser.add_argument('-r', '--right', type=str, required=False, help='FASTQ: Right (R2) mate pair file' )
parser.add_argument('-s', '--singletons', type=str, required=False, help='FASTQ: singleton reads file' )
parser.add_argument('-lo', '--left_out', type=str, required=False, help='Output file of Left reads kept' )
parser.add_argument('-ro', '--right_out', type=str, required=False, help='Output file of Right reads kept')
parser.add_argument('-so', '--singletons_out', type=str, required=False, help='Output file of singleton reads kept')
parser.add_argument('-p', '--percent_n_cutoff', type=int, required=True, help='1-100: Percentage of Ns in read >= this will cause removal')
parser.add_argument('-or', '--output_report', type=str, required=False, help='Optional report of read filtering statistics')
args = parser.parse_args()
l_in_fh = None
r_in_fh = None
s_in_fh = None
l_out_fh = None
r_out_fh = None
s_out_fh = None
# Let the option checking and file handle bonanza begin!
# One of these must be passed: [left, right], [left, right, singletons], [singletons]
if args.left is not None and args.right is not None:
# singletons is optional if L/R is passed
if args.singletons is not None:
s_in_fh = open(args.singletons)
if args.left_out is None:
raise Exception("ERROR: If you specify --left you must also specify --left_out")
else:
l_out_fh = open(args.left_out, 'wt')
if args.right_out is None:
raise Exception("ERROR: If you specify --right you must also specify --right_out")
else:
r_out_fh = open(args.right_out, 'wt')
if args.singletons_out is None:
raise Exception("ERROR: The --singletons_out option must be passed if --left and --right are specified, even " \
"if --singletons isn't. This is so that orphaned mate pairs can still be exported.")
else:
s_out_fh = open(args.singletons_out, 'wt')
elif args.singletons is not None:
s_in_fh = open(args.singletons)
if args.singletons_out is None:
raise Exception("ERROR: If you pass --singletons you must also pass --singletons_out")
else:
s_out_fh = open(args.singletons_out, 'wt')
else:
raise Exception("ERROR. One of these must be passed: [left, right], [left, right, singletons], [singletons]")
##############################################################
# now the actual functioning code
##############################################################
line_count = 0
record_count = 0
last_lheader = None
last_rheader = None
#counts = {'left_kept':0, 'left_discarded':0, 'right_kept':0, 'right_discarded':0, 'singlets_kept':0, 'singlets_discarded':0}
counts = {'total_reads':0, 'pairs_kept':0, 'pairs_discarded':0, \
'left_only_kept':0, 'left_only_discarded':0, \
'right_only_kept':0, 'right_only_discarded':0, \
'singletons_kept':0, 'singletons_discarded':0
}
# for debugging purposes on large files only
line_limit = None
if args.left is not None and args.right is not None:
l_keep = False
r_keep = False
# Read left and right files at once.
with open(args.left) as l_in_fh, open(args.right) as r_in_fh:
for l_line, r_line in zip(l_in_fh, r_in_fh):
line_count += 1
if line_limit is not None and line_count > line_limit:
print("WARNING: Program exited after {0} lines for debugging".format(line_limit))
break
if line_count % 4 == 1:
record_count += 1
last_lheader = l_line
last_rheader = r_line
l_keep = False
r_keep = False
elif line_count % 4 == 2:
counts['total_reads'] += 2
l_pct_n = (l_line.count('N') / (len(l_line) - 1)) * 100
r_pct_n = (r_line.count('N') / (len(r_line) - 1)) * 100
#print("LN%: {0}\tRN%{1}: ".format(l_pct_n, r_pct_n))
# left bad, right good
if l_pct_n >= args.percent_n_cutoff and r_pct_n < args.percent_n_cutoff:
l_keep = False
r_keep = s_out_fh
r_keep.write(last_rheader)
r_keep.write(r_line)
counts['left_only_discarded'] += 1
counts['right_only_kept'] += 1
# left good, right bad
elif l_pct_n < args.percent_n_cutoff and r_pct_n >= args.percent_n_cutoff:
r_keep = False
l_keep = s_out_fh
l_keep.write(last_lheader)
l_keep.write(l_line)
counts['left_only_kept'] += 1
counts['right_only_discarded'] += 1
# both good
elif l_pct_n < args.percent_n_cutoff and r_pct_n < args.percent_n_cutoff:
l_keep = l_out_fh
r_keep = r_out_fh
l_keep.write(last_lheader)
l_keep.write(l_line)
r_keep.write(last_rheader)
r_keep.write(r_line)
counts['pairs_kept'] += 1
# both bad
else:
counts['pairs_discarded'] += 1
else:
# handle the third/fourth lines
if r_keep:
r_keep.write(r_line)
if l_keep:
l_keep.write(l_line)
if args.singletons is not None:
# s_in_fh
# s_out_fh
line_count = 0
record_count = 0
last_header = None
keep = False
for line in s_in_fh:
line_count += 1
if line_count % 4 == 1:
record_count += 1
last_header = line
keep = False
elif line_count % 4 == 2:
counts['total_reads'] += 1
pct_n = (line.count('N') / (len(line) - 1)) * 100
if pct_n < args.percent_n_cutoff:
keep = True
s_out_fh.write(last_header)
s_out_fh.write(line)
counts['singletons_kept'] += 1
else:
counts['singletons_discarded'] += 1
else:
if keep:
s_out_fh.write(line)
if args.output_report is not None:
report_fh = open(args.output_report, 'wt')
report_fh.write("Total input reads: {0}\n".format(counts['total_reads']))
report_fh.write("Read pairs kept: {0}\n".format(counts['pairs_kept']))
report_fh.write("Read pairs discarded: {0}\n".format(counts['pairs_discarded']))
report_fh.write("Left only reads kept: {0}\n".format(counts['left_only_kept']))
report_fh.write("Left only reads discarded: {0}\n".format(counts['left_only_discarded']))
report_fh.write("Right only reads kept: {0}\n".format(counts['right_only_kept']))
report_fh.write("Right only reads discarded: {0}\n".format(counts['right_only_discarded']))
report_fh.write("Singleton reads kept: {0}\n".format(counts['singletons_kept']))
report_fh.write("Singleton reads discarded: {0}\n".format(counts['singletons_discarded']))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4843238 | <filename>eyecandy/helpers.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import decimal
import os
import subprocess
import sys
import time
def generate_effect():
# input, output, effect
subprocess.call(['python', sys.argv[3], sys.argv[1], sys.argv[2]])
def progressbar(it, prefix='Processing', prog='#',
sufix='Dialog Lines', size=70):
"""Progress Bar"""
count = len(it)
lenstrcount = len(str(count))
size = size - len(prefix) - len(sufix) - (lenstrcount * 2) - 4
def _show(i):
x = int(size * i / count)
percent = i / count
formatnum = ('{:' + str(lenstrcount) + 'd}/'
'{:' + str(lenstrcount) + 'd}')
progress = '{:s}: [{:s}{:s}] ' + formatnum + ' {:s} ({:d}%)\r'
# print(progress.format(
# prefix, prog * x, '.' * (size - x), i, count, sufix))
sys.stdout.write(progress.format(
prefix, prog * x, '.' * (size - x), i, count, sufix,
int(round(percent * 100))))
sys.stdout.flush()
_show(0)
for i, item in enumerate(it):
i += 1
_show(i)
yield item
def timer():
"""Cross platform timer"""
if os.name == 'nt':
return time.clock()
else:
return time.time()
# decorator
def timeit(method):
"""Timer decorator"""
def timed(*args, **kw):
ts = timer()
result = method(*args, **kw)
te = timer()
time_diff = int(round((te - ts) * 1000, 0))
seg, ms = divmod(time_diff, 1000)
m, s = divmod(seg, 60)
h, m = divmod(m, 60)
hstr = mstr = sstr = ''
if h:
hstr = ' {:d} hours '.format(h)
if m:
mstr = ' {:d} minutes '.format(m)
if s:
sstr = ' {:d} seconds '.format(s)
time_message = ('\n{:s} took {:s}{:s}{:s}'
'{:3d} miliseconds.\n'.format(
method.__name__, hstr, mstr, sstr, ms))
sys.stdout.write(time_message)
sys.stdout.flush()
return result
return timed
def round_format_str(number, decimals=5):
"""Round a number and remove trailing zeros"""
prec = len(str(float(number)).split('.')[0]) + decimals
context = decimal.Context(prec=prec)
dec = context.create_decimal_from_float(float(number))
tup = dec.as_tuple()
delta = len(tup.digits) + tup.exponent
digits = ''.join(str(d) for d in tup.digits)
if delta <= 0:
zeros = abs(tup.exponent) - len(tup.digits)
val = '0.' + ('0' * zeros) + digits
else:
val = digits[:delta] + ('0' * tup.exponent) + '.' + digits[delta:]
val = val.rstrip('0')
if val[-1] == '.':
val = val[:-1]
if tup.sign:
val = '-' + val
integer, decimals = str(float(val)).split(".")
if decimals == "0":
return integer
else:
return val
def start_file(filename):
if os.name == 'mac':
subprocess.call(('open', filename))
elif os.name == 'nt':
os.startfile(filename)
elif os.name == 'posix':
subprocess.call(('xdg-open', filename))
| StarcoderdataPython |
3297364 | """Reporting shifts Configuration
"""
from reporting.views import CreateIncidentReport, CreateInvolvedParties, ListIncidentTypes, ListInvolvedParties
from django.urls import path
urlpatterns = [
path('form-options/incident-types/',
ListIncidentTypes.as_view(), name="incident_types"),
path('form-options/involved-parties/',
ListInvolvedParties.as_view(), name="list_involved_parties"),
path('form-options/involved-parties/create/',
CreateInvolvedParties.as_view(), name="create_involved_party"),
path('incident-reports/create/', CreateIncidentReport.as_view(),
name="create_incident_report"),
]
| StarcoderdataPython |
1687945 | <gh_stars>0
'''Implements the `java_compiler_toolchain` rule.
Java compiler toolchain instances are created with `java_compiler_toolchain`
rule instances. A separate `toolchain` rule instance is used to declare a
`java_compiler_toolchain` instance has the type
`@dwtj_rules_java//java/toolchains/java_compiler_toolchain:toolchain_type`.
See [the Bazel Toolchain documentation][1] for more information.
An example might look something like this:
```build
java_compiler_toolchain(
name = "_my_javac",
javac_executable = ":my_javac_executable",
)
toolchain(
name = "my_javac",
exec_compatible_with = [
...
],
target_compatible_with = [
...
],
toolchain = ":_my_javac",
toolchain_type = "@dwtj_rules_java//java/toolchains/java_compiler_toolchain:toolchain_type",
)
```
[1]: https://docs.bazel.build/versions/3.4.0/toolchains.html
'''
JavaCompilerToolchainInfo = provider(
doc = "Specifies the tools, scripts, and configuration needed to compile and JAR Java targets.",
fields = {
"javac_executable": "A `javac` executable file (in the host configuration).",
"jar_executable": "A `jar` executable file (in the host configuration).",
"compile_java_jar_script_template": "A template for a script which is used to compile Java sources to a JAR file.",
"class_path_separator": "The class path separator to use when invoking this `javac` executable."
},
)
def _java_compiler_toolchain_impl(ctx):
java_compiler_toolchain_info = JavaCompilerToolchainInfo(
javac_executable = ctx.file.javac_executable,
jar_executable = ctx.file.jar_executable,
compile_java_jar_script_template = ctx.file._compile_java_jar_script_template,
class_path_separator = ctx.attr.class_path_separator,
)
toolchain_info = platform_common.ToolchainInfo(
java_compiler_toolchain_info = java_compiler_toolchain_info,
)
return [
toolchain_info,
java_compiler_toolchain_info,
]
java_compiler_toolchain = rule(
implementation = _java_compiler_toolchain_impl,
attrs = {
"javac_executable": attr.label(
allow_single_file = True,
mandatory = True,
executable = True,
cfg = "host",
),
"jar_executable": attr.label(
allow_single_file = True,
mandatory = True,
executable = True,
cfg = "host",
),
# NOTE(dwtj): This seems like a somewhat roundabout way to make this
# template available for instantiation in the `compile_java_jar()`
# helper function, but I haven't yet figured out another way to do it
# which resolves the label to a `File`.
# TODO(dwtj): Try the `Label()` constructor.
"_compile_java_jar_script_template": attr.label(
default = "//java:common/actions/TEMPLATE.compile_java_jar.sh",
allow_single_file = True,
),
"class_path_separator": attr.string(
default = ":", # Defaults to the Unix-like class-path separator.
)
},
provides = [JavaCompilerToolchainInfo]
)
| StarcoderdataPython |
3389881 | import tensorflow_datasets as tfds
import config
def load_dataset():
imdb, info = tfds.load('imdb_reviews',
data_dir=config.DATA_PATH,
with_info=True,
as_supervised=True)
return imdb, info
if __name__ == '__main__':
load_dataset()
| StarcoderdataPython |
22224 | from dataclasses import dataclass
from quran.domain.entity import Entity
@dataclass
class Edition(Entity):
id: str
language: str
name: str
translator: str
type: str
format: str
direction: str
| StarcoderdataPython |
177519 | <reponame>victorcouste/great_expectations
from great_expectations.cli.upgrade_helpers.upgrade_helper_v11 import UpgradeHelperV11
from great_expectations.cli.upgrade_helpers.upgrade_helper_v13 import UpgradeHelperV13
GE_UPGRADE_HELPER_VERSION_MAP = {
1: UpgradeHelperV11,
2: UpgradeHelperV13,
}
| StarcoderdataPython |
164174 | <reponame>luckdeluxe/hardware-store
from . import stripe
def create_card(user, token):
source = stripe.Customer.create_source(
user.customer_id,
source = token
)
return source
| StarcoderdataPython |
3264341 | <reponame>kant/flight-blender
from rest_framework import serializers
from .models import GeoFence
class GeoFenceSerializer(serializers.ModelSerializer):
altitude_ref = serializers.SerializerMethodField()
class Meta:
model = GeoFence
fields = '__all__'
def get_altitude_ref(self,obj):
return obj.get_altitude_ref_display()
| StarcoderdataPython |
1775401 | import os
from interface import interface
from flask import render_template
@interface.route('/')
@interface.route('/index')
def index():
images = []
for r, d, f in os.walk('../data/16_0/'):
for file in f:
if ".png" in file:
images.append(file)
print('\n'.join(images))
return render_template('index.html', title='Home', images=sorted(images)) | StarcoderdataPython |
1680117 | import sys
import random
assert sys.version_info >= (3, 7), "This script requires at least Python 3.7"
guessTotal = 0
answer = random.randint(0, 50)
playerGuess = input("Choose a number between 1 and 50.\n")
while playerGuess != answer:
while not isinstance(playerGuess, int):
try:
playerGuess = int(playerGuess)
except ValueError:
print("Invalid entry.")
playerGuess = input("Choose a number between 1 and 50.\n")
if playerGuess < answer and playerGuess > 0:
print("Your guess is too low.")
guessTotal += 1
playerGuess = input("Choose a number between 1 and 50.\n")
elif playerGuess > answer and playerGuess < 51:
print("Your guess is too high.")
guessTotal += 1
playerGuess = input("Choose a number between 1 and 50.\n")
elif playerGuess > 50:
print("That number is out of range.")
guessTotal += 1
playerGuess = input("Choose a number between 1 and 50.\n")
elif playerGuess <= 0:
print("That number is out of range.")
guessTotal += 1
playerGuess = input("Choose a number between 1 and 50.\n")
else:
print("Correct!\nYour total number of guesses was: " + str(guessTotal))
| StarcoderdataPython |
1688801 | <gh_stars>1-10
import asyncio
import logging
import pytest
from mock import Mock
from tests import run10
from traio import Scope
def test_version():
"""Just ensure we have a version string"""
from traio.__version__ import __version__
assert isinstance(__version__, str)
def test_logging():
"""Logging Scope"""
Scope.set_debug(True)
scope = Scope()
assert scope.logger.level == logging.DEBUG
Scope.set_debug(False)
assert scope.logger.level >= logging.DEBUG
@pytest.mark.asyncio
async def test_empty():
"""Empty Scope"""
async with Scope():
pass
@pytest.mark.asyncio
async def test_empty_timeout():
"""Empty Scope"""
async with Scope(timeout=0.1):
pass
@pytest.mark.asyncio
async def test_simple():
"""Simple scope with one execution"""
async def run(m):
await asyncio.sleep(0.01)
m()
mock = Mock()
async with Scope() as n:
n << run(mock)
assert mock.called
@pytest.mark.asyncio
async def test_future():
"""Simple scope with a future; should not timeout"""
async with Scope(timeout=0.1) as n:
f = asyncio.Future()
n.spawn(f)
f.set_result(None)
@pytest.mark.asyncio
async def test_block_raises():
"""Raise an exception from the block"""
with pytest.raises(ValueError):
async with Scope():
raise ValueError('boom')
@pytest.mark.asyncio
async def test_task_raises():
"""Raise an exception from a task"""
async def raiser():
await asyncio.sleep(0.01)
raise ValueError('boom')
with pytest.raises(ValueError):
async with Scope() as n:
n << raiser()
@pytest.mark.asyncio
async def test_task_count():
"""Raise an exception from a task"""
async with Scope() as out:
for i in range(1, 11):
out << run10()
assert len(out.get_tasks()) == i
async with out.fork() as inner:
inner << run10()
inner << run10()
inner << run10()
assert len(out.get_tasks()) == 11
assert len(inner.get_tasks()) == 3
# Now cancel both
out.cancel()
| StarcoderdataPython |
115622 | <reponame>Infinity-LTD/discord_gradiusbot
import asyncio
import logging
logger = logging.getLogger('gradiusbot')
logger.info("[Public Plugin] <portals.py>: This plugin allows you to use portals to move your messages around!")
portal_dict = {}
@asyncio.coroutine
async def action(**kwargs):
message = kwargs['message']
config = kwargs['config']
client = kwargs['client']
# if the portal is blue, store a key:value of user:portal content
if '<:portal_blue:502170507198857226>' in message.content:
remaining_content = message.content.replace('<:portal_blue:502170507198857226>', '')
portal_dict[message.author.id] = {"content": remaining_content, "message": message}
# if the portal is red, check to see if the user has any blue portals, and move the content from there, to here.
if '<:portal_orange:502170488680874004>' in message.content:
if message.author.id in portal_dict.keys() and portal_dict[message.author.id]:
await message.channel.send('<@' + str(message.author.id) + '>: <:portal_orange:502170488680874004>' + portal_dict[message.author.id]['content'])
await portal_dict[message.author.id]['message'].delete()
await message.delete()
portal_dict[message.author.id] = None
| StarcoderdataPython |
1782600 | from django.db import models
# Create your models here.
class feedback(models.Model):
name = models.TextField()
email = models.TextField()
subject = models.TextField()
patient = models.CharField(max_length=30)
def __str__(self):
return self.name
| StarcoderdataPython |
128319 | #숫자카드게임-1
#n,m을 공백으로 구분하여 입력받기
n, m = map(int, input().split())
result = 0
for i in range(n):
data = list(map(int, input().split()))
min_value = min(data)
result = max(result, min_value)
print(result) | StarcoderdataPython |
4812477 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json, hub
from frappe.website.website_generator import WebsiteGenerator
from hub.hub.utils import autoname_increment_by_field
class HubItem(WebsiteGenerator):
website = frappe._dict(
page_title_field = "item_name"
)
def autoname(self):
super(HubItem, self).autoname()
self.hub_item_code = self.name
self.name = autoname_increment_by_field(self.doctype, 'hub_item_code', self.name)
def validate(self):
if self.image.startswith('/'):
site_name = frappe.db.get_value('Hub Company', self.company_name, 'site_name')
self.image = '//' + site_name + self.image
if not self.route:
self.route = 'items/' + self.name
def get_context(self, context):
context.no_cache = True
def get_list_context(context):
context.allow_guest = True
context.no_cache = True
context.title = 'Items'
context.no_breadcrumbs = True
context.order_by = 'creation desc'
| StarcoderdataPython |
104159 | <filename>Lab_6/data/core/myservices/pcpingweb01.py
#
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
''' WAN BGP user-defined service.
'''
import os
from core.service import CoreService, addservice
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
class PCPingWEB01(CoreService):
''' This service starts up a PPSP Peer in Leecher Mode playing 00_dg_2s_p2p_emu contents with local ~/cache/.
'''
# a unique name is required, without spaces
_name = "PingWEB01"
# you can create your own group here
_group = "WAN_BGP"
# list of other services this service depends on
_depends = ()
# per-node directories
_dirs = ()
# generated files (without a full path this file goes in the node's dir,
# e.g. /tmp/pycore.12345/n1.conf/)
_configs = ('startpc.sh', )
# this controls the starting order vs other enabled services
_startindex = 60
# list of startup commands, also may be generated during startup
_startup = ('sh startpc.sh',)
# list of shutdown commands
_shutdown = ()
@classmethod
def generateconfig(cls, node, filename, services):
''' Return a string that will be written to filename, or sent to the
GUI for user customization.
'''
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by PCPingWEB01 (pcpingweb01.py)\n"
for ifc in node.netifs():
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
# here we do something interesting
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
break
cfg += "\n"
# Test network before starting operation
cfg += "ping -n -D -c 50 -W 70 198.18.11.10 >/home/vagrant/output/pingtest%s.log\n" % (node.name)
return cfg
@staticmethod
def subnetentry(x):
''' Generate a subnet declaration block given an IPv4 prefix string
for inclusion in the config file.
'''
if x.find(":") >= 0:
# this is an IPv6 address
return ""
else:
net = IPv4Prefix(x)
return 'echo " network %s"' % (net)
# this line is required to add the above class to the list of available services
addservice(PCPingWEB01)
| StarcoderdataPython |
146803 | # Copyright 2006-2007 Virtutech AB
import sim_commands
def checkbit(a, bit):
if a & (1 << bit):
return 1
else:
return 0
def get_info(obj):
return [ (None, [
("PHY object", obj.phy),
] ) ] + sim_commands.get_pci_info(obj)
def get_status(obj):
csr0 = obj.csr_csr0
csr0a = "INIT=%d STRT=%d STOP=%d TDMD=%d TXON=%d RXON=%d INEA=%d INTR=%d" % (
checkbit(csr0, 0), checkbit(csr0, 1), checkbit(csr0, 2), checkbit(csr0, 3),
checkbit(csr0, 4), checkbit(csr0, 5), checkbit(csr0, 6), checkbit(csr0, 7))
csr0b = "IDON=%d TINT=%d RINT=%d MERR=%d MISS=%d CERR=%d BABL=%d ERR=%d" % (
checkbit(csr0, 8), checkbit(csr0, 9), checkbit(csr0, 10), checkbit(csr0, 11),
checkbit(csr0, 12), checkbit(csr0, 13), checkbit(csr0, 14), checkbit(csr0, 15))
return ([ (None,
[ ("CSR0", csr0a),
("", csr0b),
("CSR1", "0x%x" % obj.csr_csr1),
("CSR2", "0x%x" % obj.csr_csr2),
("CSR3", "BCON=%d ACON=%d BSWP=%d" % (
(checkbit(obj.csr_csr3, 0), checkbit(obj.csr_csr3, 1), checkbit(obj.csr_csr3, 2)))),
("CSR15", "0x%x" % obj.csr_csr15),
("RAP", obj.ioreg_rap) ]),
] + sim_commands.get_pci_status(obj))
sim_commands.new_pci_header_command('AM79C973', None)
sim_commands.new_info_command('AM79C973', get_info)
sim_commands.new_status_command('AM79C973', get_status)
| StarcoderdataPython |
3212415 | <filename>utils/csv_parser.py
import pandas as pd
def get_co_authorship(csv_file_path, year):
data = pd.read_csv(csv_file_path, usecols=['id', 'date', 'authors'], index_col=False)
data['publication_year'] = data['date'].apply(lambda x: x.split('-')[0])
data['publication_month'] = data['date'].apply(lambda x: x.split('-')[1])
return | StarcoderdataPython |
3363461 | from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from .models import TUser
# Register your models here.
class TUserAdmin(UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'signup_date')
ordering = ('email',)
fieldsets = (
('UserInfo', {'fields':('email',)}),
)
# add_fieldsets = (('email',{'fields':'email'}),)
admin.site.register(TUser, TUserAdmin)
admin.site.unregister(Group)
| StarcoderdataPython |
58305 | <reponame>KarrLab/python_package_tutorial<gh_stars>10-100
from . import boolean
from . import dfba
from . import ode
from . import stochastic
from . import multi_algorithm
from . import mrna_and_proteins_using_several_methods
| StarcoderdataPython |
1637400 | <reponame>JorisDeRieck/hass-nhc2
"""Support for NHC2 lights."""
import logging
from homeassistant.components.light import LightEntity, SUPPORT_BRIGHTNESS, ATTR_BRIGHTNESS
from nhc2_coco import CoCoLight, CoCo
from nhc2_coco.coco_device_class import CoCoDeviceClass
from .const import DOMAIN, KEY_GATEWAY, BRAND, LIGHT
from .helpers import nhc2_entity_processor
KEY_GATEWAY = KEY_GATEWAY
KEY_ENTITY = 'nhc2_lights'
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load NHC2 lights based on a config entry."""
hass.data.setdefault(KEY_ENTITY, {})[config_entry.entry_id] = []
gateway: CoCo = hass.data[KEY_GATEWAY][config_entry.entry_id]
_LOGGER.debug('Platform is starting')
gateway.get_devices(CoCoDeviceClass.LIGHTS,
nhc2_entity_processor(hass,
config_entry,
async_add_entities,
KEY_ENTITY,
lambda x: NHC2HassLight(x))
)
class NHC2HassLight(LightEntity):
"""Representation of an NHC2 Light."""
def __init__(self, nhc2light: CoCoLight, optimistic=True):
"""Initialize a light."""
self._nhc2light = nhc2light
self._optimistic = optimistic
self._is_on = nhc2light.is_on
if self._nhc2light.support_brightness:
if self._is_on is False:
self._brightness = 0
else:
self._brightness = round(self._nhc2light.brightness * 2.55)
else:
self._brightness = None
nhc2light.on_change = self._on_change
def _on_change(self):
self._is_on = self._nhc2light.is_on
if self._nhc2light.support_brightness:
if self._is_on is False:
self._brightness = 0
else:
self._brightness = round(self._nhc2light.brightness * 2.55)
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Pass - not in use."""
pass
def turn_on(self, **kwargs) -> None:
"""Pass - not in use."""
pass
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self._nhc2light.turn_on()
brightness = kwargs.get(ATTR_BRIGHTNESS)
if self._nhc2light.support_brightness and brightness is not None:
self._nhc2light.set_brightness(round((brightness) / 2.55))
if self._optimistic:
self._is_on = True
self.schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._nhc2light.turn_off()
if self._optimistic:
self._is_on = False
self.schedule_update_ha_state()
def nhc2_update(self, nhc2light: CoCoLight):
"""Update the NHC2 light with a new object."""
self._nhc2light = nhc2light
nhc2light.on_change = self._on_change
self.schedule_update_ha_state()
@property
def unique_id(self):
"""Return the lights UUID."""
return self._nhc2light.uuid
@property
def uuid(self):
"""Return the lights UUID."""
return self._nhc2light.uuid
@property
def should_poll(self):
"""Return false, since the light will push state."""
return False
@property
def name(self):
"""Return the lights name."""
return self._nhc2light.name
@property
def available(self):
"""Return true if the light is online."""
return self._nhc2light.online
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if the light is on."""
return self._is_on
@property
def device_info(self):
"""Return the device info."""
return {
'identifiers': {
(DOMAIN, self.unique_id)
},
'name': self.name,
'manufacturer': BRAND,
'model': LIGHT,
'via_hub': (DOMAIN, self._nhc2light.profile_creation_id),
}
@property
def supported_features(self):
"""Return supported features."""
if self._nhc2light.support_brightness:
return SUPPORT_BRIGHTNESS
return 0
| StarcoderdataPython |
1719726 | import numpy as np
import torch
def getIndeices(shape,height,width,stride,dialation, offset):
H, W = shape
outHeight = (H - dialation*(height-1)-1) // stride +1
outWidth = (W - dialation*(width-1)-1) // stride +1
i0 = np.repeat(np.arange(height)*dialation, width)
i1 = stride * np.repeat(np.arange(outHeight), outWidth)
j0 = np.tile(np.arange(width)*dialation, height)
j1 = stride * np.tile(np.arange(outWidth), outHeight)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
return (i.transpose(1,0)+offset)%H, (j.transpose(1,0)+offset)%W
def dispatch(i,j,x):
x_ = x[:,:,i,j].reshape(x.shape[0],x.shape[1],i.shape[0],i.shape[1])
return x, x_
def collect(i,j,x,x_):
xi = x.clone()
xi[:,:,i,j]=x_.reshape(x.shape[0],-1,i.shape[0],i.shape[1])
return xi | StarcoderdataPython |
1631043 | <reponame>kvchen/keffbot
import binascii
import git
import sys
import os
import logging
logger = logging.getLogger('root')
__match__ = r"!update|!reload"
def on_message(bot, channel, user, message):
requires_reload = message == '!reload'
if message == '!update':
local = git.Repo(os.getcwd())
origin = git.remote.Remote(local, 'origin')
prev_commit = local.heads[0].commit
logger.info("Updating from origin repository")
for pull_info in origin.pull():
if prev_commit == pull_info.commit:
bot.send_text(channel, "`{}` is already up-to-date!".format(
bot.name))
break
requires_reload = True
commit_hash = binascii.hexlify(pull_info.commit.binsha).decode()
commit_message = pull_info.commit.message.strip()
bot.send_text(channel, "*Fast-forwarding* to `{}`".format(
commit_hash))
logger.debug("Fast-forwarding to {}".format(commit_hash))
bot.send_text(channel, "*Latest commit*: `{}`".format(
commit_message))
logger.debug("Latest commit: {}".format(commit_message))
if requires_reload:
bot.send_text(channel, "_Reloading...see you on the other side!_")
python = sys.executable
os.execl(python, python, *sys.argv)
| StarcoderdataPython |
3368313 | <filename>tests/test_fileops.py
from utils.fileops import get_abs_path
from pathlib import Path
def test_get_abs_path():
x = get_abs_path()
assert x.is_file(), "Absolute path not generated correctly!" | StarcoderdataPython |
63858 | # Generated by Django 3.1.5 on 2021-01-19 08:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('finances', '0001_initial'),
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='payment',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.client'),
),
migrations.AddField(
model_name='payment',
name='fee',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='finances.fee', verbose_name='Payment For'),
),
migrations.AddField(
model_name='payment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Added By'),
),
migrations.AddField(
model_name='fee',
name='job',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.job'),
),
migrations.AddField(
model_name='expense',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Entered By'),
),
]
| StarcoderdataPython |
190383 | <reponame>domwillcode/home-assistant
"""The mfi component."""
| StarcoderdataPython |
1675615 | import cv2
import numpy as np
import glob
from scipy.stats import multivariate_normal
import copy
out = cv2.VideoWriter('3D_GMM.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 5, (640, 480))
DATASET = "DETECTBUOY-FRAMES/Data"
def Ellipse_Fit(mask):
processed = mask.astype(np.uint8)
processed = cv2.GaussianBlur(processed, (5, 5), cv2.BORDER_DEFAULT)
ret, thresh = cv2.threshold(processed, 60, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ellipses = []
for cnt in contours:
if cv2.contourArea(cnt) > 300 and cv2.contourArea(cnt) < 5000:
ellipses.append(cv2.fitEllipse(cnt))
outEllipse = []
for ell in ellipses:
(x, y), (MA, ma), angle = ell
if abs(MA / ma - 1) < 0.3:
outEllipse.append(ell)
return outEllipse
def High_PDF(prob, threshold):
p = prob.reshape((prob.shape[0] * prob.shape[1], prob.shape[2]))
q = np.multiply(p, p > threshold)
b = np.multiply(q > 0, np.equal(q, np.max(q, axis=-1, keepdims=True))) * 255
c = b.reshape((prob.shape[0], prob.shape[1], prob.shape[2]))
return c
def Water_Mask(frame):
# For redBuoy1
mean = np.array([80.27603646, 141.43706643, 253.22644464])
cov = np.array([[190.60613704, 201.66921469, -5.62641894],
[201.66921469, 340.80624709, -14.2263423],
[-5.62641894, -14.2263423, 3.51000389]])
P_RB1 = multivariate_normal.pdf(frame, mean, cov)
# For redBuoy2
mean = np.array([129.75146712, 187.0247840, 232.87476706])
cov = np.array([[792.3089489, 966.06181438, -76.63443504],
[966.06181438, 1358.97343543, -15.6558208],
[-76.63443504, -15.6558208, 274.29810684]])
P_RB2 = multivariate_normal.pdf(frame, mean, cov)
# For redBuoy3
mean = np.array([117.81710669, 204.2309085, 239.41048976])
cov = np.array([[443.75427994, 518.28342899, -139.95097112],
[518.28342899, 707.05237291, -187.05091184],
[-139.95097112, -187.05091184, 64.27720605]])
P_RB3 = multivariate_normal.pdf(frame, mean, cov)
P_RB = P_RB1 + P_RB2 + P_RB3
# For Green1
mean = np.array([112.05003011, 183.18656764, 103.53271839])
cov = np.array([[98.18729895, 128.48175019, 111.23031125],
[128.48175019, 372.47086917, 237.17047113],
[111.23031125, 237.17047113, 230.78640153]])
P_GB1 = multivariate_normal.pdf(frame, mean, cov)
# For Green2
mean = np.array([125.22320558, 229.46544678, 142.17248589])
cov = np.array([[83.42004155, 109.12603316, 133.04099339],
[109.12603316, 181.75339967, 209.44426981],
[133.04099339, 209.44426981, 280.21373779]])
P_GB2 = multivariate_normal.pdf(frame, mean, cov)
# For Green3
mean = np.array([150.32076907, 239.42616469, 187.56685088])
cov = np.array([[296.42463121, 109.06686387, 351.389052],
[109.06686387, 138.29429843, 172.87515629],
[351.389052, 172.87515629, 653.94501523]])
P_GB3 = multivariate_normal.pdf(frame, mean, cov)
P_GB = P_GB1 + P_GB2 + P_GB3
# For yellowBuoy
mean = np.array([93.18674196, 204.10273852, 208.83574233])
cov = np.array([[325.95744462, 14.78707018, -304.72169773],
[14.78707018, 161.85807802, 267.4821683],
[-304.72169773, 267.4821683, 890.87026603]])
P_YB = multivariate_normal.pdf(frame, mean, cov)
# For Water1
mean = np.array([154.242466 ,228.26091272,233.45074722])
cov = np.array([[59.2038326 , 46.17327671, 5.3503438 ],
[46.17327671, 58.66903207, -7.51014766],
[ 5.3503438 , -7.51014766, 26.28058457]])
P_W1 = multivariate_normal.pdf(frame, mean, cov)
mean = np.array([141.96297332 ,204.83155696,220.47708726])
cov = np.array([[100.70632783, 148.60410607, 59.9378063 ],
[148.60410607, 320.22102525, 129.64470878],
[ 59.9378063 , 129.64470878, 121.25904618]])
P_W2 = multivariate_normal.pdf(frame, mean, cov)
mean = np.array([178.2135104 ,238.03114502 ,180.63696875])
cov = np.array([[ 44.16861721, 46.21022285, 68.88757629],
[ 46.21022285, 58.90147946, 78.51143783],
[ 68.88757629, 78.51143783, 203.85445566]])
P_W3 = multivariate_normal.pdf(frame, mean, cov)
P_W = P_W1 + P_W2 + P_W3
prob = np.zeros((frame.shape[0], frame.shape[1], 4))
prob[:, :, 0] = P_RB
prob[:, :, 1] = P_GB
prob[:, :, 2] = P_YB
prob[:, :, 3] = P_W * 0.99
# best results with Multiply
RGY_Buoy = High_PDF(prob, 1e-15) # -15
return RGY_Buoy
def Buoy_data(waterRemoved):
# For redBuoy1
mean = np.array([129.75151074, 187.02495822, 232.87487513])
cov = np.array([[792.30842907, 966.0620035, -76.63515958],
[966.0620035, 1358.97477086, -15.65802897],
[-76.63515958, -15.65802897, 274.29390402]])
P_RB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For redBuoy2
mean = np.array([117.81699529, 204.23082796, 239.41051339])
cov = np.array([[443.75320996, 518.2835338, -139.95105276],
[518.2835338, 707.05318175, -187.05121695],
[-139.95105276, -187.05121695, 64.27726249]])
P_RB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For redBuoy3
mean = np.array([81.53413865, 141.57207486, 253.14210245])
cov = np.array([[228.92875888, 224.1567059, -7.02999134],
[224.1567059, 339.10305449, -13.59245238],
[-7.02999134, -13.59245238, 3.91363665]])
P_RB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiRb = np.array([0.15838274, 0.38113269, 0.44139788])
P_RB = PiRb[0] * P_RB1 + PiRb[1] * P_RB2 + PiRb[2] * P_RB3
# For Green1
mean = np.array([110.15586103, 177.988079, 97.8360865])
cov = np.array([[82.84302567, 106.35540435, 74.22384909],
[106.35540435, 306.33086617, 154.3897207],
[74.22384909, 154.3897207, 118.64202382]])
P_GB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For Green2
mean = np.array([124.00448114, 217.39861905, 136.44552769])
cov = np.array([[135.27527716, 132.43005772, 186.54968698],
[132.43005772, 361.10595221, 281.7120668],
[186.54968698, 281.7120668, 375.55342302]])
P_GB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For Green3
mean = np.array([152.97075593, 244.63284543, 194.2491698])
cov = np.array([[269.37418864, 37.51788466, 286.85356749],
[37.51788466, 38.57928137, 14.06820397],
[286.85356749, 14.06820397, 491.56890665]])
P_GB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiGb = np.array([0.39978126, 0.38033716, 0.19886462])
P_GB = PiGb[0] * P_GB1 + PiGb[1] * P_GB2 + PiGb[2] * P_GB3
# For yellowBuoy1
mean = np.array([124.48956165, 235.49979435, 232.22955126])
cov = np.array([[1165.98834055, 180.00433825, -59.25367115],
[180.00433825, 78.85588687, 20.33064827],
[-59.25367115, 20.33064827, 81.66227936]])
P_YB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For yellowBuoy
mean = np.array([93.18674196, 204.10273852, 208.83574233])
cov = np.array([[325.95744462, 14.78707018, -304.72169773],
[14.78707018, 161.85807802, 267.4821683],
[-304.72169773, 267.4821683, 890.87026603]])
P_YB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For yellowBuoy
mean = np.array([138.56180468, 240.07565167, 229.07810767])
cov = np.array([[775.88598663, -42.21694591, -40.46084514],
[-42.21694591, 4.60254418, 2.08209706],
[-40.46084514, 2.08209706, 6.96561565]])
P_YB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiYb = np.array([0.26255614, 0.2175131, 0.50246477])
P_YB = PiYb[0] * P_YB1 + PiYb[1] * P_YB2 + PiYb[2] * P_YB3
prob = np.zeros((frame.shape[0], frame.shape[1], 3))
prob[:, :, 0] = P_RB
prob[:, :, 1] = P_GB
prob[:, :, 2] = P_YB
RGY_Buoy_2 = High_PDF(prob, 1e-6) # -20
return RGY_Buoy_2
def Draw_Ellipse(RGY_Buoy_2,Image_Input):
ellipseR = Ellipse_Fit(RGY_Buoy_2[:, :, 0].astype(np.uint8))
Image_Input_1 = copy.deepcopy(Image_Input)
for ell in ellipseR:
cv2.ellipse(Image_Input_1, ell, (0, 0, 255), 5)
ellipseG = Ellipse_Fit(RGY_Buoy_2[:, :, 1].astype(np.uint8))
for ell in ellipseG:
cv2.ellipse(Image_Input_1, ell, (0, 255, 0), 5)
ellipseY = Ellipse_Fit(RGY_Buoy_2[:, :, 2].astype(np.uint8))
for ell in ellipseY:
cv2.ellipse(Image_Input_1, ell, (0, 255, 255), 5)
return Image_Input_1
for file in glob.glob(f"{DATASET}/*.jpg"):
Image_Input = cv2.imread(file)
frame = np.zeros((Image_Input.shape[0], Image_Input.shape[1], 3))
frame[:, :, 0] = Image_Input[:, :, 0]
frame[:, :, 1] = Image_Input[:, :, 1]
frame[:, :, 2] = Image_Input[:, :, 2]
## Order of Probabilities - green, red
RGY_Buoy = Water_Mask(frame)
Water_Remove = RGY_Buoy[:, :, 3].astype(np.int8)
Water_Remove = cv2.bitwise_not(Water_Remove)
waterRemoved = cv2.bitwise_and(Image_Input, Image_Input, mask=Water_Remove)
# cv2.imshow("WATERMASK",waterRemoved)
RGY_Buoy_2 = Buoy_data(waterRemoved)
redBuoySegement = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 0].astype(np.int8))
greenBuoySegment = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 1].astype(np.int8))
yellowBuoySegment = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 2].astype(np.int8))
# cv2.imshow("R-BUOY",redBuoySegement)
# cv2.imshow("G-BUOY",greenBuoySegment)
# cv2.imshow("Y-BUOY",yellowBuoySegment)
Image_Input_1 = Draw_Ellipse(RGY_Buoy_2, Image_Input)
out.write(Image_Input_1)
cv2.imshow("ALL-BUOY-DETECT", Image_Input_1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.release()
cv2.destroyAllWindows() | StarcoderdataPython |
1789165 | <filename>Backend Server/AdminRoutes/admin_frontpage.py
from app import app
from db_config import *
from flask import jsonify, request
@app.route("/api/Admin/view/items_sold_unsold", methods=["GET"])
def sold_unsold_items():
conn = mysql.connection
cursor = conn.cursor()
try:
sql = f"""-- sql
SELECT `Sold`.`Product ID` AS `IID`, `Sold Items`, `Unsold Items`
FROM (SELECT `Product ID`, COUNT(*) AS `Sold Items`
FROM `Item`
WHERE `Purchase Status` = "Sold"
GROUP BY `Product ID`) AS `Sold`
INNER JOIN (SELECT `Product ID`, COUNT(*) AS `Unsold Items`
FROM `Item`
WHERE `Purchase Status` = "Unsold"
GROUP BY `Product ID`) AS `Unsold`
ON `Sold`.`Product ID` = `Unsold`.`Product ID`;
"""
cursor.execute(sql)
conn.commit()
rows = cursor.fetchall()
resp = jsonify(success=rows)
resp.status_code = 200
return resp
except Exception as e:
print(str(e))
return invalid("Something went wrong when finding Items")
finally:
cursor.close()
@app.route("/api/Admin/view/items_sold_cat_model", methods=["GET"])
def sold_items_cat_model():
conn = mysql.connection
cursor = conn.cursor()
try:
sql = f"""-- sql
SELECT p1.Category, p1.Model, COUNT(`Item ID`) AS "Number of Sold Item"
FROM OSHES.Item
LEFT JOIN OSHES.Product p1 ON OSHES.Item.`Product ID` = p1.`Product ID`
WHERE OSHES.Item.`Purchase Status` = "Sold"
GROUP BY p1.Category, p1.Model
ORDER BY 1;
"""
cursor.execute(sql)
conn.commit()
rows = cursor.fetchall()
resp = jsonify(success=rows)
resp.status_code = 200
return resp
except Exception as e:
print(str(e))
return invalid("Something went wrong when finding Items")
finally:
cursor.close()
@app.route("/api/Admin/view/customers_unpaid", methods=["GET"])
def unpaid_customers():
# insert auto canceler after 10 days
# then view all customers with unpaid service fees
conn = mysql.connection
cursor = conn.cursor()
try:
sql1 = f"""-- sql
UPDATE `Request` SET `Request Status` =
CASE WHEN `Request Status` = "Submitted and Waiting for Payment" AND curdate() > DATE_ADD(`Request Date`, INTERVAL 10 DAY)
THEN "Canceled"
ELSE `Request Status`
END;
"""
sql2 = f"""-- sql
SELECT c1.`Customer ID`, c1.`Name`, c1.`Gender`, c1.`PhoneNumber`, c1.`Email`, c1.`Address`
FROM OSHES.Customer c1
INNER JOIN OSHES.Request r1 ON c1.`Customer ID` = r1.`Customer ID`
WHERE r1.`Request Status` = "Submitted and Waiting for payment"
ORDER BY c1.`Customer ID`;
"""
cursor.execute(sql1)
cursor.execute(sql2)
conn.commit()
rows = cursor.fetchall()
resp = jsonify(success=rows)
resp.status_code = 200
return resp
except Exception as e:
print(str(e))
return invalid("Something went wrong when fetching customers")
finally:
cursor.close()
@app.errorhandler(404)
def not_found(error):
message = {
'status': 404,
'error': 'Not Found: ' + error,
'error location': request.url
}
response = jsonify(message)
response.status_code = 404
return response
@app.errorhandler(500)
def invalid(error):
message = {
'status': 500,
'error': error,
'error location': request.url
}
response = jsonify(message)
response.status_code = 500
return response
| StarcoderdataPython |
1628126 | <gh_stars>1-10
limit = int( input() )
data = []
ans = []
for y in range( limit ):
data.append( [] )
ans.append( [] )
for x in range( limit ):
data[ y ].append( [] )
data[ y ][ x ] = 0
ans[ y ].append( [] )
ans[ y ][ x ] = 0
print( "data =", data )
print( "ans =", ans )
i = 0
for y in range( limit ):
for x in range( limit ):
i += 1
data[ y ][ x ] = i
print( "data =", data )
i = 0
t = 0
n = limit
y = 0
x = 0
yd = 0
xd = 0
while i < limit ** 2:
if n % 4 == 1:
xd = 1
if n % 4 == 2:
yd = 1
if n % 4 == 3:
xd = -1
if n % 4 == 0:
yd = -1
while t <= ( n // 1 ):
print( "y", y + ((limit - int( n // 1 )) * yd), "| x", x + ((limit - int( n // 1 )) * xd) )
ans[ y + ( ( limit - int( n // 1 ) ) * yd ) ][ x + ( ( limit - int( n // 1 ) ) * xd ) ] = i
print( "i", i, "| t", t, "| n", n )
print( "ans =", ans )
x += xd
y += yd
t += 1
i += 1
print()
t = 0
yd = 0
xd = 0
n -= 0.5 | StarcoderdataPython |
3396761 | <filename>mail.py
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.message import EmailMessage
# Open the plain text file whose name is in textfile for reading.
# Create a text/plain message
def sendMail(recipient, textbody):
msg = EmailMessage()
msg.set_content(textbody)
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = "Certificate Warning"
msg['From'] = "<EMAIL>"
msg['To'] = recipient
# Send the message via our own SMTP server.
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
sendMail("<EMAIL>", "Test") | StarcoderdataPython |
1619633 | stations = { 'acheng': 'ACB',
'aershan': 'ART',
'aershanbei': 'ARX',
'aihe': 'AHP',
'aijiacun': 'AJJ',
'ajin': 'AJD',
'akesu': 'ASR',
'aketao': 'AER',
'alashankou': 'AKR',
'alihe': 'AHX',
'alongshan': 'ASX',
'amuer': 'JTX',
'ananzhuang': 'AZM',
'anda': 'ADX',
'ande': 'ARW',
'anding': 'ADP',
'angangxi': 'AAX',
'anguang': 'AGT',
'anhua': 'PKQ',
'anjia': 'AJB',
'ankang': 'AKY',
'ankouyao': 'AYY',
'anlong': 'AUZ',
'anlu': 'ALN',
'anping': 'APT',
'anqing': 'AQH',
'anqingxi': 'APH',
'anren': 'ARG',
'anshan': 'AST',
'anshanxi': 'AXT',
'anshun': 'ASW',
'anshunxi': 'ASE',
'antang': 'ATV',
'antingbei': 'ASH',
'antu': 'ATL',
'antuxi': 'AXL',
'anxi': 'AXS',
'anyang': 'AYF',
'anyangdong': 'ADF',
'aojiang': 'ARH',
'aolibugao': 'ALD',
'atushi': 'ATR',
'babu': 'BBE',
'bachu': 'BCR',
'badaling': 'ILP',
'badong': 'BNN',
'baibiguan': 'BGV',
'baicheng': 'BCT',
'baigou': 'FEP',
'baiguo': 'BGM',
'baihe': 'BEL',
'baihedong': 'BIY',
'baihexian': 'BEY',
'baijian': 'BAP',
'baijigou': 'BJJ',
'baijipo': 'BBM',
'baikuipu': 'BKB',
'bailang': 'BRZ',
'bailixia': 'AAP',
'baimajing': 'BFQ',
'baiqi': 'BQP',
'baiquan': 'BQL',
'baise': 'BIZ',
'baisha': 'BSW',
'baishanshi': 'HJL',
'baishapo': 'BPM',
'baishishan': 'BAL',
'baishuijiang': 'BSY',
'baishuixian': 'BGY',
'baishuizhen': 'BUM',
'baiyangdian': 'FWP',
'baiyi': 'FHW',
'baiyinchagan': 'BYC',
'baiyinhuanan': 'FNC',
'baiyinhushuo': 'BCD',
'baiyinshi': 'BNJ',
'baiyintala': 'BID',
'baiyinxi': 'BXJ',
'baiyunebo': 'BEC',
'bajiaotai': 'BTD',
'balin': 'BLX',
'bamiancheng': 'BMD',
'bamiantong': 'BMB',
'bancheng': 'BUP',
'banmaoqing': 'BNM',
'bantian': 'BTQ',
'baodi': 'BPP',
'baoding': 'BDP',
'baodingdong': 'BMP',
'baohuashan': 'BWH',
'baoji': 'BJY',
'baojinan': 'BBY',
'baokang': 'BKD',
'baolage': 'BQC',
'baolin': 'BNB',
'baolongshan': 'BND',
'baoqing': 'BUB',
'baoquanling': 'BQB',
'baotou': 'BTC',
'baotoudong': 'BDC',
'bashan': 'BAY',
'baxiantong': 'VXD',
'bayangaole': 'BAC',
'bayuquan': 'BYT',
'bazhong': 'IEW',
'bazhongdong': 'BDE',
'bazhou': 'RMP',
'bazhouxi': 'FOP',
'beian': 'BAB',
'beibei': 'BPW',
'beidaihe': 'BEP',
'beihai': 'BHZ',
'beijiao': 'IBQ',
'beijing': 'BJP',
'beijingbei': 'VAP',
'beijingdong': 'BOP',
'beijingnan': 'VNP',
'beijingxi': 'BXP',
'beijingzi': 'BRT',
'beiliu': 'BOZ',
'beimaquanzi': 'BRP',
'beipiaonan': 'RPD',
'beitai': 'BTT',
'beitun': 'BYP',
'beitunshi': 'BXR',
'beiying': 'BIV',
'beiyinhe': 'BYB',
'beizhai': 'BVP',
'bencha': 'FWH',
'bengbu': 'BBH',
'bengbunan': 'BMH',
'benhong': 'BVC',
'benxi': 'BXT',
'benxihu': 'BHT',
'benxixincheng': 'BVT',
'bijiang': 'BLQ',
'bijiashan': 'BSB',
'bijiguan': 'BJM',
'binhai': 'FHP',
'binhaibei': 'FCP',
'binjiang': 'BJB',
'binxian': 'BXY',
'binyang': 'UKZ',
'binzhou': 'BIK',
'bishan': 'FZW',
'boao': 'BWQ',
'bobai': 'BBZ',
'boketu': 'BKX',
'bole': 'BOR',
'boli': 'BLB',
'botou': 'BZP',
'boxing': 'BXK',
'bozhou': 'BZH',
'buhai': 'BUT',
'buliekai': 'BLR',
'caijiagou': 'CJT',
'caijiapo': 'CJY',
'caishan': 'CON',
'cangnan': 'CEH',
'cangshi': 'CST',
'cangxi': 'CXE',
'cangzhou': 'COP',
'cangzhouxi': 'CBP',
'caohai': 'WBW',
'caohekou': 'CKT',
'caoshi': 'CSL',
'caoxian': 'CXK',
'caozili': 'CFP',
'ceheng': 'CHZ',
'cenxi': 'CNZ',
'chabuga': 'CBC',
'chaigang': 'CGT',
'chaigoupu': 'CGV',
'chaihe': 'CHB',
'chajiang': 'CAM',
'chaka': 'CVO',
'chaling': 'CDG',
'chalingnan': 'CNG',
'changcheng': 'CEJ',
'changchong': 'CCM',
'changchun': 'CCT',
'changchunnan': 'CET',
'changchunxi': 'CRT',
'changde': 'VGQ',
'changdian': 'CDT',
'changge': 'CEF',
'changle': 'CLK',
'changli': 'CLP',
'changlingzi': 'CLT',
'changlinhe': 'FVH',
'changnong': 'CNJ',
'changping': 'DAQ',
'changpingbei': 'VBP',
'changpingdong': 'FQQ',
'changpoling': 'CPM',
'changqingqiao': 'CQJ',
'changsha': 'CSQ',
'changshanan': 'CWQ',
'changshantun': 'CVT',
'changshou': 'EFW',
'changshoubei': 'COW',
'changshouhu': 'CSE',
'changting': 'CES',
'changtingnan': 'CNS',
'changtingzhen': 'CDB',
'changtu': 'CTT',
'changtuxi': 'CPT',
'changwu': 'CWY',
'changxing': 'CBH',
'changxingnan': 'CFH',
'changyang': 'CYN',
'changyuan': 'CYF',
'changzheng': 'CZJ',
'changzhi': 'CZF',
'changzhibei': 'CBF',
'changzhou': 'CZH',
'changzhoubei': 'ESH',
'changzhuang': 'CVK',
'chaohu': 'CIH',
'chaohudong': 'GUH',
'chaolianggou': 'CYP',
'chaoshan': 'CBQ',
'chaoyang': 'CYD',
'chaoyangchuan': 'CYL',
'chaoyangdi': 'CDD',
'chaoyangzhen': 'CZL',
'chaozhou': 'CKQ',
'chasuqi': 'CSC',
'chengcheng': 'CUY',
'chengde': 'CDP',
'chengdedong': 'CCP',
'chengdu': 'CDW',
'chengdudong': 'ICW',
'chengdunan': 'CNW',
'chenggaozi': 'CZB',
'chenggu': 'CGY',
'chengjisihan': 'CJX',
'chenguanying': 'CAJ',
'chengyang': 'CEK',
'chengzitan': 'CWT',
'chenming': 'CMB',
'chenqing': 'CQB',
'chenxi': 'CXQ',
'chenxiangtun': 'CXT',
'chenzhou': 'CZQ',
'chenzhouxi': 'ICQ',
'chezhuanwan': 'CWM',
'chibi': 'CBN',
'chibibei': 'CIN',
'chifeng': 'CFD',
'chifengxi': 'CID',
'chizhou': 'IYH',
'chongqing': 'CQW',
'chongqingbei': 'CUW',
'chongqingnan': 'CRW',
'chongren': 'CRG',
'chongzuo': 'CZZ',
'chuangyecun': 'CEX',
'chunwan': 'CQQ',
'chunyang': 'CAL',
'chushan': 'CSB',
'chuxiong': 'COM',
'chuzhou': 'CXH',
'chuzhoubei': 'CUH',
'cili': 'CUQ',
'cishan': 'CSP',
'cixi': 'CRP',
'cixian': 'CIP',
'ciyao': 'CYK',
'congjiang': 'KNW',
'cuihuangkou': 'CHP',
'cuogang': 'CAX',
'daan': 'RAT',
'daanbei': 'RNT',
'daba': 'DBJ',
'daban': 'DBC',
'dachaigou': 'DGJ',
'dacheng': 'DCT',
'dadenggou': 'DKJ',
'dafangnan': 'DNE',
'daguan': 'RGW',
'daguantun': 'DTT',
'dagushan': 'RMT',
'dahongqi': 'DQD',
'dahuichang': 'DHP',
'dahushan': 'DHD',
'dailing': 'DLB',
'daixian': 'DKV',
'daiyue': 'RYV',
'dajiagou': 'DJT',
'dajian': 'DFP',
'daju': 'DIM',
'dakoutun': 'DKP',
'dalateqi': 'DIC',
'dalatexi': 'DNC',
'dali': 'DKM',
'dalian': 'DLT',
'dalianbei': 'DFT',
'dalin': 'DLD',
'daluhao': 'DLC',
'dandong': 'DUT',
'dandongxi': 'RWT',
'danfeng': 'DGY',
'dangshan': 'DKH',
'dangshannan': 'PRH',
'dangtudong': 'OWH',
'dangyang': 'DYN',
'dani': 'DNZ',
'dantu': 'RUH',
'danxiashan': 'IRQ',
'danyang': 'DYH',
'danyangbei': 'EXH',
'daobao': 'RBT',
'daoerdeng': 'DRD',
'daoqing': 'DML',
'daozhou': 'DFZ',
'dapanshi': 'RPP',
'dapingfang': 'DPD',
'dapu': 'DPI',
'daqilaha': 'DQX',
'daqing': 'DZX',
'daqingdong': 'LFX',
'daqinggou': 'DSD',
'daqingxi': 'RHX',
'dashiqiao': 'DQT',
'dashitou': 'DSL',
'dashitounan': 'DAL',
'dashizhai': 'RZT',
'datianbian': 'DBM',
'datong': 'DTV',
'datongxi': 'DTO',
'datun': 'DNT',
'dawang': 'WWQ',
'dawangtan': 'DZZ',
'dawanzi': 'DFM',
'dawukou': 'DFJ',
'daxing': 'DXX',
'daxinggou': 'DXL',
'dayan': 'DYX',
'dayangshu': 'DUX',
'dayebei': 'DBN',
'daying': 'DYV',
'dayingdong': 'IAW',
'dayingzhen': 'DJP',
'dayingzi': 'DZD',
'dayu': 'DYG',
'dayuan': 'DYZ',
'dazhanchang': 'DTJ',
'dazhangzi': 'DAP',
'dazhou': 'RXW',
'dazhuyuan': 'DZY',
'dazunan': 'FQW',
'dean': 'DAG',
'debao': 'RBZ',
'debosi': 'RDT',
'dechang': 'DVW',
'deerbuer': 'DRX',
'dehui': 'DHT',
'dehuixi': 'DXT',
'delingha': 'DHO',
'dengshahe': 'DWT',
'dengta': 'DGT',
'dengzhou': 'DOF',
'deqing': 'DRH',
'deqingxi': 'MOH',
'dexing': 'DWG',
'deyang': 'DYW',
'dezhou': 'DZP',
'dezhoudong': 'DIP',
'dianjiang': 'DJE',
'dianxin': 'DXM',
'didao': 'DDB',
'dingbian': 'DYJ',
'dinghudong': 'UWQ',
'dinghushan': 'NVQ',
'dingnan': 'DNG',
'dingtao': 'DQK',
'dingxi': 'DSJ',
'dingxiang': 'DXV',
'dingyuan': 'EWH',
'dingzhou': 'DXP',
'dingzhoudong': 'DOP',
'diwopu': 'DWJ',
'dizhuang': 'DVQ',
'dongandong': 'DCZ',
'dongbianjing': 'DBB',
'dongdaihe': 'RDD',
'dongerdaohe': 'DRB',
'dongfang': 'UFQ',
'dongfanghong': 'DFB',
'dongfeng': 'DIL',
'donggangbei': 'RGT',
'dongguan': 'RTQ',
'dongguandong': 'DMQ',
'dongguang': 'DGP',
'donghai': 'DHB',
'donghaixian': 'DQH',
'dongjin': 'DKB',
'dongjingcheng': 'DJB',
'donglai': 'RVD',
'dongmiaohe': 'DEP',
'dongmingcun': 'DMD',
'dongmingxian': 'DNF',
'dongsheng': 'DOC',
'dongshengxi': 'DYC',
'dongtai': 'DBH',
'dongtonghua': 'DTL',
'dongwan': 'DRJ',
'dongxiang': 'DXG',
'dongxinzhuang': 'DXD',
'dongxu': 'RXP',
'dongying': 'DPK',
'dongyingnan': 'DOK',
'dongyudi': 'DBV',
'dongzhen': 'DNV',
'dongzhi': 'DCH',
'dongzhuang': 'DZV',
'douluo': 'DLV',
'douzhangzhuang': 'RZP',
'douzhuang': 'ROP',
'duanzhou': 'WZQ',
'duge': 'DMM',
'duiqingshan': 'DQB',
'duizhen': 'DWV',
'dujia': 'DJL',
'dujiangyan': 'DDW',
'dulitun': 'DTX',
'dunhua': 'DHL',
'dunhuang': 'DHJ',
'dushan': 'RWW',
'dushupu': 'DPM',
'duyun': 'RYW',
'duyundong': 'KJW',
'ebian': 'EBW',
'eerduosi': 'EEC',
'ejina': 'EJC',
'emei': 'EMW',
'emeishan': 'IXW',
'enshi': 'ESN',
'erdaogoumen': 'RDP',
'erdaowan': 'RDX',
'erlian': 'RLC',
'erlong': 'RLD',
'erlongshantun': 'ELA',
'ermihe': 'RML',
'erying': 'RYJ',
'ezhou': 'ECN',
'ezhoudong': 'EFN',
'faer': 'FEM',
'fanchangxi': 'PUH',
'fangchenggangbei': 'FBZ',
'fanjiatun': 'FTT',
'fanshi': 'FSV',
'fanzhen': 'VZK',
'faqi': 'FQE',
'feidong': 'FIH',
'feixian': 'FXK',
'fengcheng': 'FCG',
'fengchengdong': 'FDT',
'fengchengnan': 'FNG',
'fengdu': 'FUW',
'fenghua': 'FHH',
'fenghuangcheng': 'FHT',
'fenghuangjichang': 'FJQ',
'fenglezhen': 'FZB',
'fenglingdu': 'FLV',
'fengshuicun': 'FSJ',
'fengshun': 'FUQ',
'fengtun': 'FTX',
'fengxian': 'FXY',
'fengyang': 'FUH',
'fengzhen': 'FZC',
'fengzhou': 'FZY',
'fenhe': 'FEV',
'fenyang': 'FAV',
'fenyi': 'FYG',
'foshan': 'FSQ',
'fuan': 'FAS',
'fuchuan': 'FDZ',
'fuding': 'FES',
'fuhai': 'FHR',
'fujin': 'FIB',
'fulaerji': 'FRX',
'fuling': 'FLW',
'fulingbei': 'FEW',
'fuliqu': 'FLJ',
'fulitun': 'FTB',
'funan': 'FNH',
'funing': 'FNP',
'fuqing': 'FQS',
'fuquan': 'VMW',
'fushankou': 'FKP',
'fushanzhen': 'FZQ',
'fushun': 'FST',
'fushunbei': 'FET',
'fusong': 'FSL',
'fusui': 'FSZ',
'futian': 'NZQ',
'futuyu': 'FYP',
'fuxian': 'FEY',
'fuxiandong': 'FDY',
'fuxin': 'FXD',
'fuyang': 'FYH',
'fuyu': 'FYX',
'fuyuan': 'FYM',
'fuyubei': 'FBT',
'fuzhou': 'FZG',
'fuzhoubei': 'FBG',
'fuzhoudong': 'FDG',
'fuzhounan': 'FYS',
'gaizhou': 'GXT',
'gaizhouxi': 'GAT',
'gancaodian': 'GDJ',
'gangou': 'GGL',
'gangu': 'GGJ',
'ganhe': 'GAX',
'ganluo': 'VOW',
'ganqika': 'GQD',
'ganquan': 'GQY',
'ganquanbei': 'GEY',
'ganshui': 'GSW',
'gantang': 'GNJ',
'ganzhou': 'GZG',
'gaoan': 'GCG',
'gaobeidian': 'GBP',
'gaobeidiandong': 'GMP',
'gaocheng': 'GEP',
'gaocun': 'GCV',
'gaogezhuang': 'GGP',
'gaolan': 'GEJ',
'gaoloufang': 'GFM',
'gaomi': 'GMK',
'gaoping': 'GPF',
'gaoqiaozhen': 'GZD',
'gaoshanzi': 'GSD',
'gaotai': 'GTJ',
'gaotainan': 'GAJ',
'gaotan': 'GAY',
'gaoyi': 'GIP',
'gaoyixi': 'GNP',
'gaozhou': 'GSQ',
'gashidianzi': 'GXD',
'gediannan': 'GNN',
'geermu': 'GRO',
'gegenmiao': 'GGT',
'geju': 'GEM',
'genhe': 'GEX',
'gezhenpu': 'GZT',
'gongcheng': 'GCZ',
'gongmiaozi': 'GMC',
'gongnonghu': 'GRT',
'gongpengzi': 'GPT',
'gongqingcheng': 'GAG',
'gongyi': 'GXF',
'gongyinan': 'GYF',
'gongyingzi': 'GYD',
'gongzhuling': 'GLT',
'gongzhulingnan': 'GBT',
'goubangzi': 'GBD',
'guan': 'GFP',
'guangan': 'VJW',
'guangannan': 'VUW',
'guangao': 'GVP',
'guangde': 'GRH',
'guanghan': 'GHW',
'guanghanbei': 'GVW',
'guangmingcheng': 'IMQ',
'guangnanwei': 'GNM',
'guangning': 'FBQ',
'guangningsi': 'GQT',
'guangningsinan': 'GNT',
'guangshan': 'GUN',
'guangshui': 'GSN',
'guangtongbei': 'GPM',
'guangyuan': 'GYW',
'guangyuannan': 'GAW',
'guangze': 'GZS',
'guangzhou': 'GZQ',
'guangzhoubei': 'GBQ',
'guangzhoudong': 'GGQ',
'guangzhounan': 'IZQ',
'guangzhouxi': 'GXQ',
'guanlin': 'GLF',
'guanling': 'GLE',
'guanshui': 'GST',
'guanting': 'GTP',
'guantingxi': 'KEP',
'guanzhaishan': 'GSS',
'guanzijing': 'GOT',
'guazhou': 'GZJ',
'gucheng': 'GCN',
'guchengzhen': 'GZB',
'gudong': 'GDV',
'guian': 'GAE',
'guiding': 'GTW',
'guidingbei': 'FMW',
'guidingnan': 'IDW',
'guidingxian': 'KIW',
'guigang': 'GGZ',
'guilin': 'GLZ',
'guilinbei': 'GBZ',
'guilinxi': 'GEZ',
'guiliuhe': 'GHT',
'guiping': 'GAZ',
'guixi': 'GXG',
'guiyang': 'GIW',
'guiyangbei': 'KQW',
'gujiao': 'GJV',
'gujiazi': 'GKT',
'gulang': 'GLJ',
'gulian': 'GRX',
'guojiadian': 'GDT',
'guoleizhuang': 'GLP',
'guosong': 'GSL',
'guoyang': 'GYH',
'guozhen': 'GZY',
'gushankou': 'GSP',
'gushi': 'GXN',
'gutian': 'GTS',
'gutianbei': 'GBS',
'gutianhuizhi': 'STS',
'guyuan': 'GUJ',
'guzhen': 'GEH',
'haerbin': 'HBB',
'haerbinbei': 'HTB',
'haerbindong': 'VBB',
'haerbinxi': 'VAB',
'haianxian': 'HIH',
'haibei': 'HEB',
'haicheng': 'HCT',
'haichengxi': 'HXT',
'haidongxi': 'HDO',
'haikou': 'VUQ',
'haikoudong': 'HMQ',
'hailaer': 'HRX',
'hailin': 'HRB',
'hailong': 'HIL',
'hailun': 'HLB',
'haining': 'HNH',
'hainingxi': 'EUH',
'haishiwan': 'HSO',
'haituozi': 'HZT',
'haiwan': 'RWH',
'haiyang': 'HYK',
'haiyangbei': 'HEK',
'halahai': 'HIT',
'halasu': 'HAX',
'hami': 'HMR',
'hancheng': 'HCY',
'hanchuan': 'HCN',
'hanconggou': 'HKB',
'handan': 'HDP',
'handandong': 'HPP',
'hanfuwan': 'HXJ',
'hangjinhouqi': 'HDC',
'hangu': 'HGP',
'hangzhou': 'HZH',
'hangzhoudong': 'HGH',
'hangzhounan': 'XHH',
'hanjiang': 'HJS',
'hankou': 'HKN',
'hanling': 'HAT',
'hanmaying': 'HYP',
'hanshou': 'VSQ',
'hanyin': 'HQY',
'hanyuan': 'WHW',
'hanzhong': 'HOY',
'haolianghe': 'HHB',
'hebei': 'HMB',
'hebi': 'HAF',
'hebian': 'HBV',
'hebidong': 'HFF',
'hechuan': 'WKW',
'hechun': 'HCZ',
'hefei': 'HFH',
'hefeibeicheng': 'COH',
'hefeinan': 'ENH',
'hefeixi': 'HTH',
'hegang': 'HGB',
'heichongtan': 'HCJ',
'heihe': 'HJB',
'heijing': 'HIM',
'heishui': 'HOT',
'heitai': 'HQB',
'heiwang': 'HWK',
'hejiadian': 'HJJ',
'hejianxi': 'HXP',
'hejin': 'HJV',
'hejing': 'HJR',
'hekoubei': 'HBM',
'hekounan': 'HKJ',
'heli': 'HOB',
'helong': 'HLL',
'hengdaohezi': 'HDB',
'hengfeng': 'HFG',
'henggouqiaodong': 'HNN',
'hengnan': 'HNG',
'hengshan': 'HSQ',
'hengshanxi': 'HEQ',
'hengshui': 'HSP',
'hengyang': 'HYQ',
'hengyangdong': 'HVQ',
'heping': 'VAQ',
'hepu': 'HVZ',
'heqing': 'HQM',
'heshengqiaodong': 'HLN',
'heshituoluogai': 'VSR',
'heshuo': 'VUR',
'hetian': 'VTR',
'heyang': 'HAY',
'heyangbei': 'HTY',
'heyuan': 'VIQ',
'heze': 'HIK',
'hezhou': 'HXZ',
'hongan': 'HWN',
'honganxi': 'VXN',
'hongguangzhen': 'IGW',
'hongguo': 'HEM',
'honghe': 'HPB',
'honghuagou': 'VHD',
'hongjiang': 'HFM',
'hongqing': 'HEY',
'hongshan': 'VSB',
'hongshaxian': 'VSJ',
'hongsipu': 'HSJ',
'hongtong': 'HDV',
'hongtongxi': 'HTV',
'hongxiantai': 'HTJ',
'hongxing': 'VXB',
'hongxinglong': 'VHB',
'hongyan': 'VIX',
'houma': 'HMV',
'houmaxi': 'HPV',
'houmen': 'KMQ',
'huacheng': 'VCQ',
'huade': 'HGC',
'huahu': 'KHN',
'huaian': 'AUH',
'huaiannan': 'AMH',
'huaibei': 'HRH',
'huaibin': 'HVN',
'huaihua': 'HHQ',
'huaihuanan': 'KAQ',
'huaiji': 'FAQ',
'huainan': 'HAH',
'huainandong': 'HOH',
'huairen': 'HRV',
'huairendong': 'HFV',
'huairou': 'HRP',
'huairoubei': 'HBP',
'huajia': 'HJT',
'huajiazhuang': 'HJM',
'hualin': 'HIB',
'huanan': 'HNB',
'huangbai': 'HBL',
'huangchuan': 'KCN',
'huangcun': 'HCP',
'huanggang': 'KGN',
'huanggangdong': 'KAN',
'huanggangxi': 'KXN',
'huangguayuan': 'HYM',
'huanggutun': 'HTT',
'huanghejingqu': 'HCF',
'huanghuatong': 'HUD',
'huangkou': 'KOH',
'huangling': 'ULY',
'huanglingnan': 'VLY',
'huangliu': 'KLQ',
'huangmei': 'VEH',
'huangnihe': 'HHL',
'huangshan': 'HKH',
'huangshanbei': 'NYH',
'huangshi': 'HSN',
'huangshibei': 'KSN',
'huangshidong': 'OSN',
'huangsongdian': 'HDL',
'huangyangtan': 'HGJ',
'huangyangzhen': 'HYJ',
'huangyuan': 'HNO',
'huangzhou': 'VON',
'huantai': 'VTK',
'huanxintian': 'VTB',
'huapengzi': 'HZM',
'huaqiao': 'VQH',
'huarong': 'HRN',
'huarongdong': 'HPN',
'huarongnan': 'KRN',
'huashan': 'HSY',
'huashanbei': 'HDY',
'huashannan': 'KNN',
'huaying': 'HUW',
'huayuan': 'HUN',
'huayuankou': 'HYT',
'huazhou': 'HZZ',
'huhehaote': 'HHC',
'huhehaotedong': 'NDC',
'huian': 'HNS',
'huichangbei': 'XEG',
'huidong': 'KDQ',
'huihuan': 'KHQ',
'huinong': 'HMJ',
'huishan': 'VCH',
'huitong': 'VTQ',
'huixian': 'HYY',
'huizhou': 'HCQ',
'huizhounan': 'KNQ',
'huizhouxi': 'VXQ',
'hukou': 'HKG',
'hulan': 'HUB',
'hulin': 'VLB',
'huludao': 'HLD',
'huludaobei': 'HPD',
'hulusitai': 'VTJ',
'humen': 'IUQ',
'hunchun': 'HUL',
'hunhe': 'HHT',
'huoerguosi': 'HFR',
'huojia': 'HJF',
'huolianzhai': 'HLT',
'huolinguole': 'HWD',
'huoqiu': 'FBH',
'huozhou': 'HZV',
'huozhoudong': 'HWV',
'hushiha': 'HHP',
'hushitai': 'HUT',
'huzhou': 'VZH',
'jiafeng': 'JFF',
'jiagedaqi': 'JGX',
'jialuhe': 'JLF',
'jiamusi': 'JMB',
'jian': 'VAG',
'jianchang': 'JFD',
'jianfeng': 'PFQ',
'jiangbiancun': 'JBG',
'jiangdu': 'UDH',
'jianghua': 'JHZ',
'jiangjia': 'JJB',
'jiangjin': 'JJW',
'jiangle': 'JLS',
'jiangmen': 'JWQ',
'jiangning': 'JJH',
'jiangningxi': 'OKH',
'jiangqiao': 'JQX',
'jiangshan': 'JUH',
'jiangsuotian': 'JOM',
'jiangyan': 'UEH',
'jiangyong': 'JYZ',
'jiangyou': 'JFW',
'jiangyuan': 'SZL',
'jianhu': 'AJH',
'jianningxianbei': 'JCS',
'jianou': 'JVS',
'jianouxi': 'JUS',
'jiansanjiang': 'JIB',
'jianshe': 'JET',
'jianshi': 'JRN',
'jianshui': 'JSM',
'jianyang': 'JYW',
'jianyangnan': 'JOW',
'jiaocheng': 'JNV',
'jiaohe': 'JHL',
'jiaohexi': 'JOL',
'jiaomei': 'JES',
'jiaozhou': 'JXK',
'jiaozhoubei': 'JZK',
'jiaozuo': 'JOF',
'jiaozuodong': 'WEF',
'jiashan': 'JSH',
'jiashannan': 'EAH',
'jiaxiang': 'JUK',
'jiaxing': 'JXH',
'jiaxingnan': 'EPH',
'jiaxinzi': 'JXT',
'jiayuguan': 'JGJ',
'jiayuguannan': 'JBJ',
'jidong': 'JOB',
'jieshoushi': 'JUN',
'jiexiu': 'JXV',
'jiexiudong': 'JDV',
'jieyang': 'JRQ',
'jiguanshan': 'JST',
'jijiagou': 'VJD',
'jilin': 'JLL',
'jiling': 'JLJ',
'jimobei': 'JVK',
'jinan': 'JNK',
'jinandong': 'JAK',
'jinanxi': 'JGK',
'jinbaotun': 'JBD',
'jinchang': 'JCJ',
'jincheng': 'JCF',
'jinchengbei': 'JEF',
'jinchengjiang': 'JJZ',
'jingbian': 'JIY',
'jingchuan': 'JAJ',
'jingde': 'NSH',
'jingdezhen': 'JCG',
'jingdian': 'JFP',
'jinggangshan': 'JGG',
'jinghai': 'JHP',
'jinghe': 'JHR',
'jinghenan': 'JIR',
'jingmen': 'JMN',
'jingnan': 'JNP',
'jingoutun': 'VGP',
'jingpeng': 'JPC',
'jingshan': 'JCN',
'jingtai': 'JTJ',
'jingtieshan': 'JVJ',
'jingxi': 'JMZ',
'jingxian': 'LOH',
'jingxing': 'JJP',
'jingyu': 'JYL',
'jingyuan': 'JYJ',
'jingyuanxi': 'JXJ',
'jingzhou': 'JBN',
'jinhe': 'JHB',
'jinhua': 'JBH',
'jinhuanan': 'RNH',
'jining': 'JIK',
'jiningnan': 'JAC',
'jinjiang': 'JJS',
'jinkeng': 'JKT',
'jinmacun': 'JMM',
'jinshanbei': 'EGH',
'jinshantun': 'JTB',
'jinxian': 'JUG',
'jinxiannan': 'JXG',
'jinyuewan': 'PYQ',
'jinyun': 'JYH',
'jinyunxi': 'PYH',
'jinzhai': 'JZH',
'jinzhangzi': 'JYD',
'jinzhong': 'JZV',
'jinzhou': 'JZD',
'jinzhounan': 'JOD',
'jishan': 'JVV',
'jishou': 'JIQ',
'jishu': 'JSL',
'jiujiang': 'JJG',
'jiuquan': 'JQJ',
'jiuquannan': 'JNJ',
'jiusan': 'SSX',
'jiutai': 'JTL',
'jiutainan': 'JNL',
'jiuzhuangwo': 'JVP',
'jiwen': 'JWX',
'jixi': 'JXB',
'jixian': 'JKP',
'jixibei': 'NRH',
'jixixian': 'JRH',
'jiyuan': 'JYF',
'juancheng': 'JCK',
'jubao': 'JRT',
'junan': 'JOK',
'junde': 'JDB',
'junliangchengbei': 'JMP',
'jurongxi': 'JWH',
'juxian': 'JKK',
'juye': 'JYK',
'kaian': 'KAT',
'kaifeng': 'KFF',
'kaifengbei': 'KBF',
'kaijiang': 'KAW',
'kaili': 'KLW',
'kailinan': 'QKW',
'kailu': 'KLC',
'kaitong': 'KTT',
'kaiyang': 'KVW',
'kaiyuan': 'KYT',
'kaiyuanxi': 'KXT',
'kalaqi': 'KQX',
'kangcheng': 'KCP',
'kangjinjing': 'KJB',
'kangxiling': 'KXZ',
'kangzhuang': 'KZP',
'kashi': 'KSR',
'kedong': 'KOB',
'kelamayi': 'KHR',
'kelan': 'KLV',
'keshan': 'KSB',
'keyihe': 'KHX',
'kouqian': 'KQL',
'kuandian': 'KDT',
'kuche': 'KCR',
'kuduer': 'KDX',
'kuerle': 'KLR',
'kuishan': 'KAB',
'kuitan': 'KTQ',
'kuitun': 'KTR',
'kulun': 'KLD',
'kunming': 'KMM',
'kunmingxi': 'KXM',
'kunshan': 'KSH',
'kunshannan': 'KNH',
'kunyang': 'KAM',
'lagu': 'LGB',
'laha': 'LHX',
'laibin': 'UBZ',
'laibinbei': 'UCZ',
'laituan': 'LVZ',
'laiwudong': 'LWK',
'laiwuxi': 'UXK',
'laixi': 'LXK',
'laixibei': 'LBK',
'laiyang': 'LYK',
'laiyuan': 'LYP',
'laizhou': 'LZS',
'lalin': 'LAB',
'lamadian': 'LMX',
'lancun': 'LCK',
'langang': 'LNB',
'langfang': 'LJP',
'langfangbei': 'LFP',
'langweishan': 'LRJ',
'langxiang': 'LXB',
'langzhong': 'LZE',
'lankao': 'LKF',
'lankaonan': 'LUF',
'lanling': 'LLB',
'lanlingbei': 'COK',
'lanxi': 'LWH',
'lanzhou': 'LZJ',
'lanzhoudong': 'LVJ',
'lanzhouxi': 'LAJ',
'lanzhouxinqu': 'LQJ',
'laobian': 'LLT',
'laochengzhen': 'ACQ',
'laofu': 'UFD',
'laolai': 'LAX',
'laoying': 'LXL',
'lasa': 'LSO',
'lazha': 'LEM',
'lechang': 'LCQ',
'ledong': 'UQQ',
'ledu': 'LDO',
'ledunan': 'LVO',
'leiyang': 'LYQ',
'leiyangxi': 'LPQ',
'leizhou': 'UAQ',
'lengshuijiangdong': 'UDQ',
'lepingshi': 'LPG',
'leshan': 'IVW',
'leshanbei': 'UTW',
'leshancun': 'LUM',
'liangdang': 'LDY',
'liangdixia': 'LDP',
'lianggezhuang': 'LGP',
'liangjia': 'UJT',
'liangjiadian': 'LRT',
'liangping': 'UQW',
'liangpingnan': 'LPE',
'liangshan': 'LMK',
'lianjiang': 'LJZ',
'lianjiangkou': 'LHB',
'lianshanguan': 'LGT',
'lianyuan': 'LAQ',
'lianyungang': 'UIH',
'lianyungangdong': 'UKH',
'liaocheng': 'UCK',
'liaoyang': 'LYT',
'liaoyuan': 'LYL',
'liaozhong': 'LZD',
'licheng': 'UCP',
'lichuan': 'LCN',
'liduigongyuan': 'INW',
'lijia': 'LJB',
'lijiang': 'LHM',
'lijiaping': 'LIJ',
'lijinnan': 'LNK',
'lilinbei': 'KBQ',
'liling': 'LLG',
'lilingdong': 'UKQ',
'limudian': 'LMB',
'lincheng': 'UUP',
'linchuan': 'LCG',
'lindong': 'LRC',
'linfen': 'LFV',
'linfenxi': 'LXV',
'lingaonan': 'KGQ',
'lingbao': 'LBF',
'lingbaoxi': 'LPF',
'lingbi': 'GMH',
'lingcheng': 'LGK',
'linghai': 'JID',
'lingling': 'UWZ',
'lingqiu': 'LVV',
'lingshi': 'LSV',
'lingshidong': 'UDV',
'lingshui': 'LIQ',
'lingwu': 'LNJ',
'lingyuan': 'LYD',
'lingyuandong': 'LDD',
'linhai': 'UFH',
'linhe': 'LHC',
'linjialou': 'ULK',
'linjiang': 'LQL',
'linkou': 'LKB',
'linli': 'LWQ',
'linqing': 'UQK',
'linshengpu': 'LBT',
'linxi': 'LXC',
'linxiang': 'LXQ',
'linyi': 'LVK',
'linyibei': 'UYK',
'linying': 'LNF',
'linyuan': 'LYX',
'linze': 'LEJ',
'linzenan': 'LDJ',
'liquan': 'LGY',
'lishizhai': 'LET',
'lishui': 'LDH',
'lishuzhen': 'LSB',
'litang': 'LTZ',
'liudaohezi': 'LVP',
'liuhe': 'LNL',
'liuhezhen': 'LEX',
'liujiadian': 'UDT',
'liujiahe': 'LVT',
'liulinnan': 'LKV',
'liupanshan': 'UPJ',
'liupanshui': 'UMW',
'liushuigou': 'USP',
'liushutun': 'LSD',
'liuyuan': 'DHR',
'liuyuannan': 'LNR',
'liuzhi': 'LIW',
'liuzhou': 'LZZ',
'liwang': 'VLJ',
'lixian': 'LEQ',
'liyang': 'LEH',
'lizhi': 'LZX',
'longandong': 'IDZ',
'longchang': 'LCW',
'longchangbei': 'NWW',
'longchuan': 'LUQ',
'longdongbao': 'FVW',
'longfeng': 'KFQ',
'longgou': 'LGJ',
'longgudian': 'LGM',
'longhua': 'UHP',
'longjia': 'UJL',
'longjiang': 'LJX',
'longjing': 'LJL',
'longli': 'LLW',
'longlibei': 'KFW',
'longnan': 'UNG',
'longquansi': 'UQJ',
'longshanzhen': 'LAS',
'longshi': 'LAG',
'longtangba': 'LBM',
'longxi': 'LXJ',
'longxian': 'LXY',
'longyan': 'LYS',
'longyou': 'LMH',
'longzhen': 'LZA',
'longzhuagou': 'LZT',
'loudi': 'LDQ',
'loudinan': 'UOQ',
'luan': 'UAH',
'luanhe': 'UDP',
'luanheyan': 'UNP',
'luanping': 'UPP',
'luanxian': 'UXP',
'luchaogang': 'UCH',
'lucheng': 'UTP',
'luchuan': 'LKZ',
'ludao': 'LDL',
'lueyang': 'LYY',
'lufan': 'LVM',
'lufeng': 'LLQ',
'lufengnan': 'LQM',
'lugou': 'LOM',
'lujiang': 'UJH',
'lukoupu': 'LKQ',
'luliang': 'LRM',
'lulong': 'UAP',
'luntai': 'LAR',
'luocheng': 'VCZ',
'luofa': 'LOP',
'luohe': 'LON',
'luohexi': 'LBN',
'luojiang': 'LJW',
'luojiangdong': 'IKW',
'luomen': 'LMJ',
'luoping': 'LPM',
'luopoling': 'LPP',
'luoshan': 'LRN',
'luotuoxiang': 'LTJ',
'luowansanjiang': 'KRW',
'luoyang': 'LYF',
'luoyangdong': 'LDF',
'luoyanglongmen': 'LLF',
'luoyuan': 'LVS',
'lushan': 'LSG',
'lushuihe': 'LUL',
'lutai': 'LTP',
'luxi': 'LUG',
'luzhai': 'LIZ',
'luzhaibei': 'LSZ',
'lvboyuan': 'LCF',
'lvhua': 'LWJ',
'lvliang': 'LHV',
'lvshun': 'LST',
'maanshan': 'MAH',
'maanshandong': 'OMH',
'macheng': 'MCN',
'machengbei': 'MBN',
'mahuang': 'MHZ',
'maiyuan': 'MYS',
'malan': 'MLR',
'malianhe': 'MHB',
'malin': 'MID',
'malong': 'MGM',
'manasi': 'MSR',
'manasihu': 'MNR',
'mangui': 'MHX',
'manshuiwan': 'MKW',
'manzhouli': 'MLX',
'maoba': 'MBY',
'maobaguan': 'MGY',
'maocaoping': 'KPM',
'maoershan': 'MRB',
'maolin': 'MLD',
'maoling': 'MLZ',
'maoming': 'MDQ',
'maomingxi': 'MMZ',
'maoshezu': 'MOM',
'maqiaohe': 'MQB',
'masanjia': 'MJT',
'mashan': 'MAB',
'mawei': 'VAW',
'mayang': 'MVQ',
'meihekou': 'MHL',
'meilan': 'MHQ',
'meishan': 'MSW',
'meishandong': 'IUW',
'meixi': 'MEB',
'meizhou': 'MOQ',
'mengdonghe': 'MUQ',
'mengjiagang': 'MGB',
'mengzhuang': 'MZF',
'mengzi': 'MZM',
'mengzibei': 'MBM',
'menyuan': 'MYO',
'mianchi': 'MCF',
'mianchinan': 'MNF',
'mianduhe': 'MDX',
'mianning': 'UGW',
'mianxian': 'MVY',
'mianyang': 'MYW',
'miaocheng': 'MAP',
'miaoling': 'MLL',
'miaoshan': 'MSN',
'miaozhuang': 'MZJ',
'midu': 'MDF',
'miluo': 'MLQ',
'miluodong': 'MQQ',
'mingcheng': 'MCL',
'minggang': 'MGN',
'minggangdong': 'MDN',
'mingguang': 'MGH',
'mingshuihe': 'MUT',
'mingzhu': 'MFQ',
'minhenan': 'MNO',
'minle': 'MBJ',
'minqing': 'MQS',
'minqingbei': 'MBS',
'minquan': 'MQF',
'minquanbei': 'MIF',
'mishan': 'MSB',
'mishazi': 'MST',
'miyi': 'MMW',
'miyunbei': 'MUP',
'mizhi': 'MEY',
'modaoshi': 'MOB',
'moerdaoga': 'MRX',
'mohe': 'MVX',
'moyu': 'MUR',
'mudanjiang': 'MDB',
'muling': 'MLB',
'mulitu': 'MUD',
'mupang': 'MPQ',
'muping': 'MBK',
'nailin': 'NLD',
'naiman': 'NMD',
'naluo': 'ULZ',
'nanboshan': 'NBK',
'nanbu': 'NBE',
'nancao': 'NEF',
'nancha': 'NCB',
'nanchang': 'NCG',
'nanchangxi': 'NXG',
'nancheng': 'NDG',
'nanchengsi': 'NSP',
'nanchong': 'NCW',
'nanchongbei': 'NCE',
'nandamiao': 'NMP',
'nandan': 'NDZ',
'nanfen': 'NFT',
'nanfenbei': 'NUT',
'nanfeng': 'NFG',
'nangongdong': 'NFP',
'nanguancun': 'NGP',
'nanguanling': 'NLT',
'nanhechuan': 'NHJ',
'nanhua': 'NHS',
'nanhudong': 'NDN',
'nanjiang': 'FIW',
'nanjiangkou': 'NDQ',
'nanjing': 'NJS',
'nanjingnan': 'NKH',
'nankou': 'NKP',
'nankouqian': 'NKT',
'nanlang': 'NNQ',
'nanling': 'LLH',
'nanmu': 'NMX',
'nanning': 'NNZ',
'nanningdong': 'NFZ',
'nanningxi': 'NXZ',
'nanping': 'NPS',
'nanpingbei': 'NBS',
'nanpingnan': 'NNS',
'nanqiao': 'NQD',
'nanqiu': 'NCK',
'nantai': 'NTT',
'nantong': 'NUH',
'nantou': 'NOQ',
'nanwanzi': 'NWP',
'nanxiangbei': 'NEH',
'nanxiong': 'NCQ',
'nanyang': 'NFF',
'nanyangzhai': 'NYF',
'nanyu': 'NUP',
'nanzamu': 'NZT',
'nanzhao': 'NAF',
'napu': 'NPZ',
'naqu': 'NQO',
'nayong': 'NYE',
'nehe': 'NHX',
'neijiang': 'NJW',
'neijiangbei': 'NKW',
'neixiang': 'NXF',
'nengjia': 'NJD',
'nenjiang': 'NGX',
'niangziguan': 'NIP',
'nianzishan': 'NZX',
'nihezi': 'NHD',
'nileke': 'NIR',
'nimu': 'NMO',
'ningan': 'NAB',
'ningbo': 'NGH',
'ningbodong': 'NVH',
'ningcun': 'NCZ',
'ningde': 'NES',
'ningdong': 'NOJ',
'ningdongnan': 'NDJ',
'ningguo': 'NNH',
'ninghai': 'NHH',
'ningjia': 'NVT',
'ninglingxian': 'NLF',
'ningming': 'NMZ',
'ningwu': 'NWV',
'ningxiang': 'NXQ',
'niujia': 'NJB',
'niuxintai': 'NXT',
'nongan': 'NAT',
'nuanquan': 'NQJ',
'paihuaibei': 'PHP',
'pananzhen': 'PAJ',
'panguan': 'PAM',
'panjiadian': 'PDP',
'panjin': 'PVD',
'panjinbei': 'PBD',
'panshi': 'PSL',
'panzhihua': 'PRW',
'panzhou': 'PAE',
'paozi': 'POD',
'peide': 'PDB',
'pengan': 'PAW',
'pengshan': 'PSW',
'pengshanbei': 'PPW',
'pengshui': 'PHW',
'pengyang': 'PYJ',
'pengze': 'PZG',
'pengzhou': 'PMW',
'piandian': 'PRP',
'pianling': 'PNT',
'piaoertun': 'PRT',
'pikou': 'PUT',
'pikounan': 'PKT',
'pingan': 'PAL',
'pinganyi': 'PNO',
'pinganzhen': 'PZT',
'pingbanan': 'PBE',
'pingbian': 'PBM',
'pingchang': 'PCE',
'pingdingshan': 'PEN',
'pingdingshanxi': 'BFF',
'pingdu': 'PAK',
'pingfang': 'PFB',
'pinggang': 'PGL',
'pingguan': 'PGM',
'pingguo': 'PGZ',
'pinghekou': 'PHM',
'pinghu': 'PHQ',
'pingliang': 'PIJ',
'pingliangnan': 'POJ',
'pingnannan': 'PAZ',
'pingquan': 'PQP',
'pingshan': 'PSB',
'pingshang': 'PSK',
'pingshe': 'PSV',
'pingshi': 'PSQ',
'pingtai': 'PVT',
'pingtian': 'PTM',
'pingwang': 'PWV',
'pingxiang': 'PXZ',
'pingxiangbei': 'PBG',
'pingxingguan': 'PGV',
'pingyang': 'PYX',
'pingyao': 'PYV',
'pingyaogucheng': 'PDV',
'pingyi': 'PIK',
'pingyu': 'PYP',
'pingyuan': 'PYK',
'pingyuanpu': 'PPJ',
'pingzhuang': 'PZD',
'pingzhuangnan': 'PND',
'pishan': 'PSR',
'pixian': 'PWW',
'pixianxi': 'PCW',
'pizhou': 'PJH',
'podixia': 'PXJ',
'puan': 'PAN',
'puanxian': 'PUE',
'pucheng': 'PCY',
'puchengdong': 'PEY',
'puding': 'PGW',
'pulandian': 'PLT',
'puning': 'PEQ',
'putaojing': 'PTW',
'putian': 'PTS',
'puwan': 'PWT',
'puxiong': 'POW',
'puyang': 'PYF',
'qianan': 'QOT',
'qianfeng': 'QFB',
'qianhe': 'QUY',
'qianjiang': 'QNW',
'qianjinzhen': 'QEB',
'qianmotou': 'QMP',
'qianshan': 'QXQ',
'qianwei': 'QWD',
'qianweitang': 'QWP',
'qianxian': 'QBY',
'qianyang': 'QOY',
'qiaotou': 'QAT',
'qiaoxi': 'QXJ',
'qichun': 'QRN',
'qidian': 'QDM',
'qidong': 'QMQ',
'qidongbei': 'QRQ',
'qifengta': 'QVP',
'qijiang': 'QJW',
'qijiapu': 'QBT',
'qilihe': 'QLD',
'qimen': 'QIH',
'qingan': 'QAB',
'qingbaijiangdong': 'QFW',
'qingchengshan': 'QSW',
'qingdao': 'QDK',
'qingdaobei': 'QHK',
'qingdui': 'QET',
'qingfeng': 'QFT',
'qinghe': 'QIP',
'qinghecheng': 'QYP',
'qinghemen': 'QHD',
'qinghuayuan': 'QHP',
'qingjianxian': 'QNY',
'qinglian': 'QEW',
'qinglong': 'QIB',
'qinglongshan': 'QGH',
'qingshan': 'QSB',
'qingshen': 'QVW',
'qingsheng': 'QSQ',
'qingshui': 'QUJ',
'qingshuibei': 'QEJ',
'qingtian': 'QVH',
'qingtongxia': 'QTJ',
'qingxian': 'QXP',
'qingxu': 'QUV',
'qingyangshan': 'QSJ',
'qingyuan': 'QYT',
'qingzhoushi': 'QZK',
'qinhuangdao': 'QTP',
'qinjia': 'QJB',
'qinjiazhuang': 'QZV',
'qinling': 'QLY',
'qinxian': 'QVV',
'qinyang': 'QYF',
'qinzhou': 'QRZ',
'qinzhoudong': 'QDZ',
'qionghai': 'QYQ',
'qiqihaer': 'QHX',
'qiqihaernan': 'QNB',
'qishan': 'QAY',
'qishuyan': 'QYH',
'qitaihe': 'QTB',
'qixian': 'QXV',
'qixiandong': 'QGV',
'qixiaying': 'QXC',
'qiyang': 'QWQ',
'qiyangbei': 'QVQ',
'qiying': 'QYJ',
'qiziwan': 'QZQ',
'quanjiao': 'INH',
'quanyang': 'QYL',
'quanzhou': 'QYS',
'quanzhoudong': 'QRS',
'quanzhounan': 'QNZ',
'queshan': 'QSN',
'qufu': 'QFK',
'qufudong': 'QAK',
'qujiang': 'QIM',
'qujing': 'QJM',
'qujiu': 'QJZ',
'quli': 'QLZ',
'qushuixian': 'QSO',
'quxian': 'QRW',
'quzhou': 'QEH',
'raoping': 'RVQ',
'raoyang': 'RVP',
'raoyanghe': 'RHD',
'renbu': 'RUO',
'renqiu': 'RQP',
'reshui': 'RSD',
'rikaze': 'RKO',
'rizhao': 'RZK',
'rongan': 'RAZ',
'rongchang': 'RCW',
'rongchangbei': 'RQW',
'rongcheng': 'RCK',
'ronggui': 'RUQ',
'rongjiang': 'RVW',
'rongshui': 'RSZ',
'rongxian': 'RXZ',
'rudong': 'RIH',
'rugao': 'RBH',
'ruian': 'RAH',
'ruichang': 'RCG',
'ruijin': 'RJG',
'rujigou': 'RQJ',
'rushan': 'ROK',
'ruyang': 'RYF',
'ruzhou': 'ROF',
'saihantala': 'SHC',
'salaqi': 'SLC',
'sandaohu': 'SDL',
'sanduxian': 'KKW',
'sanggendalai': 'OGC',
'sanguankou': 'OKJ',
'sangyuanzi': 'SAJ',
'sanhexian': 'OXP',
'sanhezhuang': 'SVP',
'sanhuizhen': 'OZW',
'sanjiadian': 'ODP',
'sanjianfang': 'SFX',
'sanjiangkou': 'SKD',
'sanjiangnan': 'SWZ',
'sanjiangxian': 'SOZ',
'sanjiazhai': 'SMM',
'sanjingzi': 'OJT',
'sanmenxia': 'SMF',
'sanmenxian': 'OQH',
'sanmenxianan': 'SCF',
'sanmenxiaxi': 'SXF',
'sanming': 'SMS',
'sanmingbei': 'SHS',
'sanshijia': 'SRD',
'sanshilipu': 'SST',
'sanshui': 'SJQ',
'sanshuibei': 'ARQ',
'sanshuinan': 'RNQ',
'sansui': 'QHW',
'santangji': 'SDH',
'sanya': 'JUQ',
'sanyangchuan': 'SYJ',
'sanyijing': 'OYD',
'sanying': 'OEJ',
'sanyuan': 'SAY',
'sanyuanpu': 'SYL',
'shache': 'SCR',
'shacheng': 'SCP',
'shahai': 'SED',
'shahe': 'SHP',
'shahekou': 'SKT',
'shaheshi': 'VOP',
'shahousuo': 'SSD',
'shalingzi': 'SLP',
'shanchengzhen': 'SCL',
'shandan': 'SDJ',
'shangbancheng': 'SBP',
'shangbanchengnan': 'OBP',
'shangcheng': 'SWN',
'shangdu': 'SXC',
'shanggaozhen': 'SVK',
'shanghai': 'SHH',
'shanghaihongqiao': 'AOH',
'shanghainan': 'SNH',
'shanghaixi': 'SXH',
'shanghang': 'JBS',
'shanghe': 'SOK',
'shangjia': 'SJB',
'shangluo': 'OLY',
'shangnan': 'ONY',
'shangqiu': 'SQF',
'shangqiunan': 'SPF',
'shangrao': 'SRG',
'shangwan': 'SWP',
'shangxipu': 'SXM',
'shangyaodun': 'SPJ',
'shangyu': 'BDH',
'shangyuan': 'SUD',
'shangyubei': 'SSH',
'shangzhi': 'SZB',
'shanhaiguan': 'SHD',
'shanhetun': 'SHL',
'shanpodong': 'SBN',
'shanshan': 'SSR',
'shanshanbei': 'SMR',
'shanshi': 'SQB',
'shantou': 'OTQ',
'shanwei': 'OGQ',
'shanyin': 'SNV',
'shaodong': 'FIQ',
'shaoguan': 'SNQ',
'shaoguandong': 'SGQ',
'shaojiatang': 'SJJ',
'shaoshan': 'SSQ',
'shaoshannan': 'INQ',
'shaowu': 'SWS',
'shaoxing': 'SOH',
'shaoxingbei': 'SLH',
'shaoyang': 'SYQ',
'shaoyangbei': 'OVQ',
'shapotou': 'SFJ',
'shaqiao': 'SQM',
'shatuo': 'SFM',
'shawanxian': 'SXR',
'shaxian': 'SAS',
'shelihu': 'VLD',
'shenchi': 'SMV',
'shenfang': 'OLH',
'shengfang': 'SUP',
'shenjia': 'OJB',
'shenjiahe': 'OJJ',
'shenjingzi': 'SWT',
'shenmu': 'OMY',
'shenqiu': 'SQN',
'shenshu': 'SWB',
'shentou': 'SEV',
'shenyang': 'SYT',
'shenyangbei': 'SBT',
'shenyangdong': 'SDT',
'shenyangnan': 'SOT',
'shenzhen': 'SZQ',
'shenzhenbei': 'IOQ',
'shenzhendong': 'BJQ',
'shenzhenpingshan': 'IFQ',
'shenzhenxi': 'OSQ',
'shenzhou': 'OZP',
'shexian': 'OEP',
'shexianbei': 'NPH',
'shiba': 'OBJ',
'shibing': 'AQW',
'shiboyuan': 'ZWT',
'shicheng': 'SCT',
'shidu': 'SEP',
'shihezi': 'SZR',
'shijiazhuang': 'SJP',
'shijiazhuangbei': 'VVP',
'shijiazi': 'SJD',
'shijiazui': 'SHM',
'shijingshannan': 'SRP',
'shilidian': 'OMP',
'shilin': 'SPB',
'shiling': 'SOL',
'shilinnan': 'LNM',
'shilong': 'SLQ',
'shimenxian': 'OMQ',
'shimenxianbei': 'VFQ',
'shiqiao': 'SQE',
'shiqiaozi': 'SQT',
'shiquanxian': 'SXY',
'shiren': 'SRL',
'shirencheng': 'SRB',
'shishan': 'SAD',
'shishanbei': 'NSQ',
'shiti': 'STE',
'shitou': 'OTB',
'shixian': 'SXL',
'shixiazi': 'SXJ',
'shixing': 'IPQ',
'shiyan': 'SNN',
'shizhuang': 'SNM',
'shizhuxian': 'OSW',
'shizong': 'SEM',
'shizuishan': 'QQJ',
'shoushan': 'SAT',
'shouyang': 'SYV',
'shuangchengbei': 'SBB',
'shuangchengpu': 'SCB',
'shuangfeng': 'OFB',
'shuangfengbei': 'NFQ',
'shuanghezhen': 'SEL',
'shuangji': 'SML',
'shuangliao': 'ZJD',
'shuangliujichang': 'IPW',
'shuangliuxi': 'IQW',
'shuangpai': 'SBZ',
'shuangyashan': 'SSB',
'shucheng': 'OCH',
'shuidong': 'SIL',
'shuifu': 'OTW',
'shuijiahu': 'SQH',
'shuiquan': 'SID',
'shuiyang': 'OYP',
'shuiyuan': 'OYJ',
'shulan': 'SLL',
'shule': 'SUR',
'shulehe': 'SHJ',
'shunchang': 'SCS',
'shunde': 'ORQ',
'shundexueyuan': 'OJQ',
'shunyi': 'SOP',
'shuozhou': 'SUV',
'shuyang': 'FMH',
'sidaowan': 'OUD',
'sifangtai': 'STB',
'siheyong': 'OHD',
'sihong': 'GQH',
'sihui': 'AHQ',
'sijialing': 'OLK',
'siping': 'SPT',
'sipingdong': 'PPT',
'sishui': 'OSK',
'sixian': 'GPH',
'siyang': 'MPH',
'song': 'SOB',
'songchenglu': 'SFF',
'songhe': 'SBM',
'songjiang': 'SAH',
'songjianghe': 'SJL',
'songjiangnan': 'IMH',
'songjiangzhen': 'OZL',
'songshu': 'SFT',
'songshuzhen': 'SSL',
'songtao': 'MZQ',
'songyuan': 'VYT',
'songyuanbei': 'OCT',
'songzi': 'SIN',
'suide': 'ODY',
'suifenhe': 'SFB',
'suihua': 'SHB',
'suiling': 'SIB',
'suining': 'NIW',
'suiping': 'SON',
'suixi': 'SXZ',
'suiyang': 'SYB',
'suizhong': 'SZD',
'suizhongbei': 'SND',
'suizhou': 'SZN',
'sujiatun': 'SXT',
'suning': 'SYP',
'sunjia': 'SUB',
'sunwu': 'SKB',
'sunzhen': 'OZY',
'suolun': 'SNT',
'suotuhan': 'SHX',
'susong': 'OAH',
'suzhou': 'OXH',
'suzhoubei': 'OHH',
'suzhoudong': 'SRH',
'suzhouxinqu': 'ITH',
'suzhouyuanqu': 'KAH',
'taerqi': 'TVX',
'taha': 'THX',
'tahe': 'TXX',
'taian': 'TID',
'taigu': 'TGV',
'taiguxi': 'TIV',
'taihe': 'THG',
'taihu': 'TKH',
'taikang': 'TKX',
'tailai': 'TLX',
'taimushan': 'TLS',
'taining': 'TNS',
'taipingchuan': 'TIT',
'taipingzhen': 'TEB',
'taiqian': 'TTK',
'taishan': 'TAK',
'taiyangshan': 'TYJ',
'taiyangsheng': 'TQT',
'taiyuan': 'TYV',
'taiyuanbei': 'TBV',
'taiyuandong': 'TDV',
'taiyuannan': 'TNV',
'taizhou': 'UTH',
'tancheng': 'TZK',
'tangbao': 'TBQ',
'tangchi': 'TCX',
'tanggu': 'TGP',
'tanghai': 'THM',
'tanghe': 'THF',
'tangjiawan': 'PDQ',
'tangshan': 'TSP',
'tangshanbei': 'FUP',
'tangshancheng': 'TCT',
'tangwanghe': 'THB',
'tangxunhu': 'THN',
'tangyin': 'TYF',
'tangyuan': 'TYB',
'tanjiajing': 'TNJ',
'taocun': 'TCK',
'taocunbei': 'TOK',
'taojiatun': 'TOT',
'taolaizhao': 'TPT',
'taonan': 'TVT',
'taoshan': 'TAB',
'tashizui': 'TIM',
'tayayi': 'TYP',
'tengxian': 'TAZ',
'tengzhou': 'TXK',
'tengzhoudong': 'TEK',
'tiandong': 'TDZ',
'tiandongbei': 'TBZ',
'tiangang': 'TGL',
'tianjin': 'TJP',
'tianjinbei': 'TBP',
'tianjinnan': 'TIP',
'tianjinxi': 'TXP',
'tianlin': 'TFZ',
'tianmen': 'TMN',
'tianmennan': 'TNN',
'tianqiaoling': 'TQL',
'tianshifu': 'TFT',
'tianshui': 'TSJ',
'tianyang': 'TRZ',
'tianyi': 'TND',
'tianzhen': 'TZV',
'tianzhu': 'TZJ',
'tianzhushan': 'QWH',
'tiechang': 'TCL',
'tieli': 'TLB',
'tieling': 'TLT',
'tielingxi': 'PXT',
'tingliang': 'TIZ',
'tonganyi': 'TAJ',
'tongbai': 'TBF',
'tongbei': 'TBB',
'tongcheng': 'TTH',
'tongdao': 'TRQ',
'tonggou': 'TOL',
'tongguan': 'TGY',
'tonghai': 'TAM',
'tonghua': 'THL',
'tonghuaxian': 'TXL',
'tongjiang': 'TJB',
'tongjunzhuang': 'TZP',
'tongliao': 'TLD',
'tongling': 'TJH',
'tonglingbei': 'KXH',
'tongnan': 'TVW',
'tongren': 'RDQ',
'tongrennan': 'TNW',
'tongtu': 'TUT',
'tongxiang': 'TCH',
'tongxin': 'TXJ',
'tongyuanpu': 'TYT',
'tongyuanpuxi': 'TST',
'tongzhouxi': 'TAP',
'tongzi': 'TZW',
'tongzilin': 'TEW',
'tuanjie': 'TIX',
'tuditangdong': 'TTN',
'tuguiwula': 'TGC',
'tuha': 'THR',
'tuliemaodu': 'TMD',
'tulihe': 'TEX',
'tulufan': 'TFR',
'tulufanbei': 'TAR',
'tumen': 'TML',
'tumenbei': 'QSL',
'tumenzi': 'TCJ',
'tumuertai': 'TRC',
'tuoyaoling': 'TIL',
'tuqiang': 'TQX',
'tuqiaozi': 'TQJ',
'tuxi': 'TSW',
'wafangdian': 'WDT',
'wafangdianxi': 'WXT',
'waitoushan': 'WIT',
'walagan': 'WVX',
'wanfatun': 'WFB',
'wanganzhen': 'WVP',
'wangcang': 'WEW',
'wangdu': 'WDP',
'wangfu': 'WUT',
'wanggang': 'WGB',
'wangjiawan': 'WJJ',
'wangjiayingxi': 'KNM',
'wangou': 'WGL',
'wangqing': 'WQL',
'wangtong': 'WTP',
'wangtuanzhuang': 'WZJ',
'wangyang': 'WYB',
'wangzhaotun': 'WZB',
'wanle': 'WEB',
'wannian': 'WWG',
'wanning': 'WNQ',
'wanyuan': 'WYY',
'wanzhou': 'WYW',
'wanzhoubei': 'WZE',
'wawushan': 'WAH',
'wayaotian': 'WIM',
'weidong': 'WVT',
'weifang': 'WFK',
'weihai': 'WKK',
'weihaibei': 'WHK',
'weihe': 'WHB',
'weihui': 'WHF',
'weihulingbei': 'WBL',
'weijin': 'WJL',
'weinan': 'WNY',
'weinanbei': 'WBY',
'weinannan': 'WVY',
'weinanzhen': 'WNJ',
'weiqing': 'WAM',
'weishanzhuang': 'WSP',
'weishe': 'WSM',
'weixing': 'WVB',
'weizhangzi': 'WKD',
'weizhuang': 'WZY',
'weizigou': 'WZL',
'weizizhen': 'WQP',
'wenan': 'WBP',
'wenchang': 'WEQ',
'wenchun': 'WDB',
'wendeng': 'WBK',
'wendengdong': 'WGK',
'wendi': 'WNZ',
'wenling': 'VHH',
'wenshui': 'WEV',
'wenxi': 'WXV',
'wenxixi': 'WOV',
'wenzhou': 'RZH',
'wenzhounan': 'VRH',
'woken': 'WQB',
'wolitun': 'WLX',
'wopi': 'WPT',
'wuan': 'WAP',
'wuchagou': 'WCT',
'wuchang': 'WCB',
'wudalianchi': 'WRB',
'wudangshan': 'WRN',
'wudaogou': 'WDL',
'wudaohe': 'WHP',
'wuerqihan': 'WHX',
'wufushan': 'WFG',
'wugong': 'WGY',
'wuguantian': 'WGM',
'wuhai': 'WVC',
'wuhaixi': 'WXC',
'wuhan': 'WHN',
'wuhu': 'WHH',
'wuji': 'WJP',
'wujia': 'WUB',
'wujiachuan': 'WCJ',
'wujiatun': 'WJT',
'wukeshu': 'WKT',
'wulanhada': 'WLC',
'wulanhaote': 'WWT',
'wulashan': 'WSC',
'wulateqianqi': 'WQC',
'wulian': 'WLK',
'wulong': 'WLW',
'wulongbei': 'WBT',
'wulongbeidong': 'WMT',
'wulongquannan': 'WFN',
'wulumuqi': 'WAR',
'wulumuqinan': 'WMR',
'wunuer': 'WRX',
'wunvshan': 'WET',
'wupu': 'WUY',
'wuqiao': 'WUP',
'wuqing': 'WWP',
'wushan': 'WSJ',
'wusheng': 'WSE',
'wutaishan': 'WSV',
'wuwei': 'WUJ',
'wuweinan': 'WWJ',
'wuwu': 'WVR',
'wuxi': 'WXR',
'wuxiang': 'WVV',
'wuxidong': 'WGH',
'wuxixinqu': 'IFH',
'wuxu': 'WYZ',
'wuxue': 'WXN',
'wuyi': 'RYH',
'wuyibei': 'WDH',
'wuyiling': 'WPB',
'wuying': 'WWB',
'wuyishan': 'WAS',
'wuyishanbei': 'WBS',
'wuyishandong': 'WCS',
'wuyuan': 'WYC',
'wuzhai': 'WZV',
'wuzhi': 'WIF',
'wuzhou': 'WZZ',
'wuzhounan': 'WBZ',
'xiabancheng': 'EBP',
'xiachengzi': 'XCB',
'xiaguanying': 'XGJ',
'xiahuayuan': 'XYP',
'xiajiang': 'EJG',
'xiamatang': 'XAT',
'xiamen': 'XMS',
'xiamenbei': 'XKS',
'xiamengaoqi': 'XBS',
'xian': 'XAY',
'xianbei': 'EAY',
'xiangcheng': 'ERN',
'xiangfang': 'XFB',
'xiangfen': 'XFV',
'xiangfenxi': 'XTV',
'xianghe': 'XXB',
'xianglan': 'XNB',
'xiangtan': 'XTQ',
'xiangtanbei': 'EDQ',
'xiangtang': 'XTG',
'xiangxiang': 'XXQ',
'xiangyang': 'XFN',
'xiangyangdong': 'XWN',
'xiangyuan': 'EIF',
'xiangyun': 'EXM',
'xianlin': 'XPH',
'xiannan': 'CAY',
'xianning': 'XNN',
'xianningbei': 'XRN',
'xianningdong': 'XKN',
'xianningnan': 'UNN',
'xianrenqiao': 'XRL',
'xiantaoxi': 'XAN',
'xianyang': 'XYY',
'xianyangqindu': 'XOY',
'xianyou': 'XWS',
'xiaocun': 'XEM',
'xiaodejiang': 'EJM',
'xiaodong': 'XOD',
'xiaogan': 'XGN',
'xiaoganbei': 'XJN',
'xiaoheyan': 'XYD',
'xiaohezhen': 'EKY',
'xiaojinkou': 'NKQ',
'xiaolan': 'EAQ',
'xiaoling': 'XLB',
'xiaonan': 'XNV',
'xiaoshao': 'XAM',
'xiaoshi': 'XST',
'xiaosigou': 'ESP',
'xiaoxi': 'XOV',
'xiaoxianbei': 'QSH',
'xiaoxinjie': 'XXM',
'xiaoxizhuang': 'XXP',
'xiaoyangqi': 'XYX',
'xiaoyuejiu': 'XFM',
'xiaoyugu': 'XHM',
'xiapu': 'XOS',
'xiashe': 'XSV',
'xiashi': 'XIZ',
'xiataizi': 'EIP',
'xiayixian': 'EJH',
'xibali': 'XLP',
'xichang': 'ECW',
'xichangnan': 'ENW',
'xidamiao': 'XMP',
'xide': 'EDW',
'xiehejian': 'EEP',
'xiejiazhen': 'XMT',
'xifeng': 'XFW',
'xigangzi': 'NBB',
'xigu': 'XIJ',
'xigucheng': 'XUJ',
'xihudong': 'WDQ',
'xijiekou': 'EKM',
'xilin': 'XYB',
'xilinhaote': 'XTC',
'xiliu': 'GCT',
'ximashan': 'XMB',
'xinan': 'EAM',
'xinanxian': 'XAF',
'xinbaoan': 'XAP',
'xinchengzi': 'XCT',
'xinchuoyuan': 'XRX',
'xindudong': 'EWW',
'xinfeng': 'EFG',
'xingan': 'XAZ',
'xinganbei': 'XDZ',
'xingcheng': 'XCD',
'xingguo': 'EUG',
'xinghexi': 'XEC',
'xingkai': 'EKB',
'xinglongdian': 'XDD',
'xinglongxian': 'EXP',
'xinglongzhen': 'XZB',
'xingning': 'ENQ',
'xingping': 'XPY',
'xingquanbu': 'XQJ',
'xingshu': 'XSB',
'xingshutun': 'XDT',
'xingtai': 'XTP',
'xingtaidong': 'EDP',
'xingye': 'SNZ',
'xingyi': 'XRZ',
'xinhe': 'XIR',
'xinhua': 'EHQ',
'xinhuanan': 'EJQ',
'xinhuang': 'XLQ',
'xinhuangxi': 'EWQ',
'xinhuatun': 'XAX',
'xinhui': 'EFQ',
'xining': 'XNO',
'xinji': 'ENP',
'xinjiang': 'XJV',
'xinjin': 'IRW',
'xinjinnan': 'ITW',
'xinle': 'ELP',
'xinli': 'XLJ',
'xinlin': 'XPX',
'xinlitun': 'XLD',
'xinlizhen': 'XGT',
'xinmin': 'XMD',
'xinpingtian': 'XPM',
'xinqing': 'XQB',
'xinqiu': 'XQD',
'xinsongpu': 'XOB',
'xinwopu': 'EPD',
'xinxian': 'XSN',
'xinxiang': 'XXF',
'xinxiangdong': 'EGF',
'xinxingxian': 'XGQ',
'xinyang': 'XUN',
'xinyangdong': 'OYN',
'xinyangzhen': 'XZJ',
'xinyi': 'EEQ',
'xinyouyi': 'EYB',
'xinyu': 'XUG',
'xinyubei': 'XBG',
'xinzhangfang': 'XZX',
'xinzhangzi': 'ERP',
'xinzhao': 'XZT',
'xinzhengjichang': 'EZF',
'xinzhou': 'XXV',
'xiongyuecheng': 'XYT',
'xiping': 'XPN',
'xipu': 'XIW',
'xipudong': 'XAW',
'xishui': 'XZN',
'xiushan': 'ETW',
'xiuwu': 'XWF',
'xiuwuxi': 'EXF',
'xiwuqi': 'XWC',
'xixia': 'XIF',
'xixian': 'ENN',
'xixiang': 'XQY',
'xixiaozhao': 'XZC',
'xiyangcun': 'XQF',
'xizhelimu': 'XRD',
'xizi': 'XZD',
'xuancheng': 'ECH',
'xuangang': 'XGV',
'xuanhan': 'XHY',
'xuanhe': 'XWJ',
'xuanhua': 'XHP',
'xuanwei': 'XWM',
'xuanzhong': 'XRP',
'xuchang': 'XCF',
'xuchangdong': 'XVF',
'xujia': 'XJB',
'xujiatai': 'XTJ',
'xujiatun': 'XJT',
'xunyang': 'XUY',
'xunyangbei': 'XBY',
'xupu': 'EPQ',
'xupunan': 'EMQ',
'xusanwan': 'XSJ',
'xushui': 'XSP',
'xuwen': 'XJQ',
'xuzhou': 'XCH',
'xuzhoudong': 'UUH',
'yabuli': 'YBB',
'yabulinan': 'YWB',
'yakeshi': 'YKX',
'yalongwan': 'TWQ',
'yanan': 'YWY',
'yancheng': 'YEK',
'yanchi': 'YAP',
'yanchuan': 'YYY',
'yandangshan': 'YGH',
'yangang': 'YGW',
'yangcao': 'YAB',
'yangcaodi': 'YKM',
'yangcha': 'YAL',
'yangchang': 'YED',
'yangcheng': 'YNF',
'yangchenghu': 'AIH',
'yangchun': 'YQQ',
'yangcun': 'YBP',
'yanggang': 'YRB',
'yanggao': 'YOV',
'yanggu': 'YIK',
'yanghe': 'GTH',
'yangjiuhe': 'YHM',
'yanglin': 'YLM',
'yangling': 'YSY',
'yanglingnan': 'YEY',
'yangliuqing': 'YQP',
'yangmingbu': 'YVV',
'yangpingguan': 'YAY',
'yangpu': 'ABM',
'yangqu': 'YQV',
'yangquan': 'AQP',
'yangquanbei': 'YPP',
'yangquanqu': 'YYV',
'yangshuling': 'YAD',
'yangshuo': 'YCZ',
'yangweishao': 'YWM',
'yangxin': 'YVK',
'yangyi': 'ARP',
'yangzhangzi': 'YZD',
'yangzhewo': 'AEM',
'yangzhou': 'YLH',
'yanhecheng': 'YHP',
'yanhui': 'AEP',
'yanji': 'YJL',
'yanjiao': 'AJP',
'yanjiazhuang': 'AZK',
'yanjin': 'AEW',
'yanjixi': 'YXL',
'yanliang': 'YNY',
'yanling': 'YAG',
'yanqi': 'YSR',
'yanqing': 'YNP',
'yanshan': 'AOP',
'yanshi': 'YSF',
'yantai': 'YAK',
'yantainan': 'YLK',
'yantongshan': 'YSL',
'yantongtun': 'YUX',
'yanzhou': 'YZK',
'yanzibian': 'YZY',
'yaoan': 'YAC',
'yaojia': 'YAT',
'yaoqianhutun': 'YQT',
'yaoshang': 'ASP',
'yatunpu': 'YTZ',
'yayuan': 'YYL',
'yazhou': 'YUQ',
'yebaishou': 'YBD',
'yecheng': 'YER',
'yesanpo': 'AIP',
'yian': 'YAX',
'yibin': 'YBW',
'yichang': 'YCN',
'yichangdong': 'HAN',
'yicheng': 'YIN',
'yichun': 'YEG',
'yichunxi': 'YCG',
'yiershi': 'YET',
'yijiang': 'RVH',
'yijianpu': 'YJT',
'yilaha': 'YLX',
'yiliang': 'ALW',
'yiliangbei': 'YSM',
'yilin': 'YLB',
'yima': 'YMF',
'yimianpo': 'YPB',
'yimianshan': 'YST',
'yimin': 'YMX',
'yinai': 'YVM',
'yinan': 'YNK',
'yinchuan': 'YIJ',
'yindi': 'YDM',
'yingbinlu': 'YFW',
'yingcheng': 'YHN',
'yingchengzi': 'YCT',
'yingchun': 'YYB',
'yingde': 'YDQ',
'yingdexi': 'IIQ',
'yingjie': 'YAM',
'yingjisha': 'YIR',
'yingkou': 'YKT',
'yingkoudong': 'YGT',
'yingpanshui': 'YZJ',
'yingshan': 'NUW',
'yingshouyingzi': 'YIP',
'yingtan': 'YTG',
'yingtanbei': 'YKG',
'yingxian': 'YZV',
'yining': 'YMR',
'yiningdong': 'YNR',
'yinlang': 'YJX',
'yinping': 'KPQ',
'yintan': 'CTQ',
'yishui': 'YUK',
'yitulihe': 'YEX',
'yiwu': 'YWH',
'yixian': 'YXD',
'yixing': 'YUH',
'yiyang': 'AEQ',
'yizheng': 'UZH',
'yizhou': 'YSZ',
'yizi': 'YQM',
'yongan': 'YAS',
'yonganxiang': 'YNB',
'yongchengbei': 'RGH',
'yongchuan': 'YCW',
'yongchuandong': 'WMW',
'yongdeng': 'YDJ',
'yongding': 'YGS',
'yongfengying': 'YYM',
'yongfunan': 'YBZ',
'yongji': 'YIV',
'yongjia': 'URH',
'yongjibei': 'AJV',
'yongkang': 'RFH',
'yongkangnan': 'QUH',
'yonglang': 'YLW',
'yongledian': 'YDY',
'yongshou': 'ASY',
'yongtai': 'YTS',
'yongxiu': 'ACG',
'yongzhou': 'AOQ',
'youhao': 'YOB',
'youxi': 'YXS',
'youxian': 'YOG',
'youxiannan': 'YXG',
'youyang': 'AFW',
'yuanbaoshan': 'YUD',
'yuandun': 'YAJ',
'yuanmou': 'YMM',
'yuanping': 'YPV',
'yuanqian': 'AQK',
'yuanshi': 'YSP',
'yuantan': 'YTQ',
'yuanyangzhen': 'YYJ',
'yucheng': 'YCK',
'yuchengxian': 'IXH',
'yuci': 'YCV',
'yudu': 'YDG',
'yuechi': 'AWW',
'yuejiajing': 'YGJ',
'yueliangtian': 'YUM',
'yueqing': 'UPH',
'yueshan': 'YBF',
'yuexi': 'YHW',
'yueyang': 'YYQ',
'yueyangdong': 'YIQ',
'yuge': 'VTM',
'yuhang': 'EVH',
'yujiang': 'YHG',
'yujiapu': 'YKP',
'yuliangpu': 'YLD',
'yulin': 'YLZ',
'yumen': 'YXJ',
'yunan': 'YKQ',
'yuncailing': 'ACP',
'yuncheng': 'YNV',
'yunchengbei': 'ABV',
'yundonghai': 'NAQ',
'yunfudong': 'IXQ',
'yunjusi': 'AFP',
'yunlianghe': 'YEF',
'yunmeng': 'YMN',
'yunshan': 'KZQ',
'yunxiao': 'YBS',
'yuping': 'YZW',
'yuquan': 'YQB',
'yushan': 'YNG',
'yushannan': 'YGG',
'yushe': 'YSV',
'yushi': 'YSJ',
'yushu': 'YRT',
'yushugou': 'YGP',
'yushutai': 'YUT',
'yushutun': 'YSX',
'yutianxian': 'ATP',
'yuxi': 'YXM',
'yuyao': 'YYH',
'yuyaobei': 'CTH',
'zaolin': 'ZIV',
'zaoqiang': 'ZVP',
'zaoyang': 'ZYN',
'zaozhuang': 'ZEK',
'zaozhuangdong': 'ZNK',
'zaozhuangxi': 'ZFK',
'zengjiapingzi': 'ZBW',
'zengkou': 'ZKE',
'zepu': 'ZPR',
'zerunli': 'ZLM',
'zhalainuoerxi': 'ZXX',
'zhalantun': 'ZTX',
'zhalute': 'ZLD',
'zhangbaiwan': 'ZUP',
'zhangdang': 'ZHT',
'zhanggutai': 'ZGD',
'zhangjiajie': 'DIQ',
'zhangjiakou': 'ZKP',
'zhangjiakounan': 'ZMP',
'zhanglan': 'ZLV',
'zhangmutou': 'ZOQ',
'zhangmutoudong': 'ZRQ',
'zhangping': 'ZPS',
'zhangpu': 'ZCS',
'zhangqiao': 'ZQY',
'zhangqiu': 'ZTK',
'zhangshu': 'ZSG',
'zhangshudong': 'ZOG',
'zhangweitun': 'ZWB',
'zhangwu': 'ZWD',
'zhangxin': 'ZIP',
'zhangye': 'ZYJ',
'zhangyexi': 'ZEJ',
'zhangzhou': 'ZUS',
'zhangzhoudong': 'GOS',
'zhanjiang': 'ZJZ',
'zhanjiangxi': 'ZWQ',
'zhaoan': 'ZDS',
'zhaobai': 'ZBP',
'zhaocheng': 'ZCV',
'zhaodong': 'ZDB',
'zhaofupu': 'ZFM',
'zhaoguang': 'ZGB',
'zhaohua': 'ZHW',
'zhaoqing': 'ZVQ',
'zhaoqingdong': 'FCQ',
'zhaotong': 'ZDW',
'zhashui': 'ZSY',
'zhazi': 'ZAL',
'zhelimu': 'ZLC',
'zhenan': 'ZEY',
'zhenchengdi': 'ZDV',
'zhengding': 'ZDP',
'zhengdingjichang': 'ZHP',
'zhengxiangbaiqi': 'ZXC',
'zhengzhou': 'ZZF',
'zhengzhoudong': 'ZAF',
'zhengzhouxi': 'XPF',
'zhenjiang': 'ZJH',
'zhenjiangnan': 'ZEH',
'zhenlai': 'ZLT',
'zhenping': 'ZPF',
'zhenxi': 'ZVT',
'zhenyuan': 'ZUW',
'zhian': 'ZAD',
'zhicheng': 'ZCN',
'zhifangdong': 'ZMN',
'zhijiang': 'ZPQ',
'zhijiangbei': 'ZIN',
'zhijin': 'IZW',
'zhijinbei': 'ZJE',
'zhongchuanjichang': 'ZJJ',
'zhonghe': 'ZHX',
'zhonghuamen': 'VNH',
'zhongjiacun': 'ZJY',
'zhongkai': 'KKQ',
'zhongmu': 'ZGF',
'zhongning': 'VNJ',
'zhongningdong': 'ZDJ',
'zhongningnan': 'ZNJ',
'zhongshan': 'ZSZ',
'zhongshanbei': 'ZGQ',
'zhongshanxi': 'ZAZ',
'zhongwei': 'ZWJ',
'zhongxiang': 'ZTN',
'zhongzhai': 'ZZM',
'zhoujia': 'ZOB',
'zhoujiatun': 'ZOD',
'zhoukou': 'ZKN',
'zhoushuizi': 'ZIT',
'zhuanghebei': 'ZUT',
'zhuangqiao': 'ZQH',
'zhuangzhi': 'ZUX',
'zhucheng': 'ZQK',
'zhuhai': 'ZHQ',
'zhuhaibei': 'ZIQ',
'zhuji': 'ZDH',
'zhujiagou': 'ZUB',
'zhujiawan': 'CWJ',
'zhujiayao': 'ZUJ',
'zhumadian': 'ZDN',
'zhumadianxi': 'ZLN',
'zhuozhou': 'ZXP',
'zhuozhoudong': 'ZAP',
'zhuozidong': 'ZDC',
'zhuozishan': 'ZZC',
'zhurihe': 'ZRC',
'zhuwo': 'ZOP',
'zhuyangxi': 'ZXW',
'zhuyuanba': 'ZAW',
'zhuzhou': 'ZZQ',
'zhuzhouxi': 'ZAQ',
'zibo': 'ZBK',
'zichang': 'ZHY',
'zigong': 'ZGW',
'zijingguan': 'ZYP',
'zixi': 'ZXS',
'ziyang': 'ZVY',
'ziyangbei': 'FYW',
'zizhong': 'ZZW',
'zizhongbei': 'WZW',
'zizhou': 'ZZY',
'zongxi': 'ZOY',
'zoucheng': 'ZIK',
'zunyi': 'ZIW',
'zuoling': 'ZSN'}
| StarcoderdataPython |
14697 | <reponame>quadramadery/bfx-hf-indicators-py<gh_stars>1-10
from bfxhfindicators.indicator import Indicator
class WMA(Indicator):
def __init__(self, args = []):
[ period ] = args
d = 0
for i in range(period):
d += (i + 1)
self._d = d
self._p = period
self._buffer = []
super().__init__({
'args': args,
'id': 'wma',
'name': 'WMA(%f)' % period,
'seed_period': period
})
def reset(self):
super().reset()
self._buffer = []
def update(self, v):
if len(self._buffer) == 0:
self._buffer.append(v)
else:
self._buffer[-1] = v
if len(self._buffer) < self._p:
return
n = 0
for i in range(self._p):
n += self._buffer[-i - 1] * (self._p - i)
super().update(n / self._d)
return self.v()
def add(self, v):
self._buffer.append(v)
if len(self._buffer) > self._p:
del self._buffer[0]
elif len(self._buffer) < self._p:
return
n = 0
for i in range(self._p):
n += self._buffer[-i - 1] * (self._p - i)
super().add(n / self._d)
return self.v()
| StarcoderdataPython |
3357952 | # -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3XLS",
)
from io import BytesIO
from gluon import HTTP, current
from gluon.contenttype import contenttype
from gluon.storage import Storage
from ..s3codec import S3Codec
from ..s3utils import s3_str, s3_strip_markup, s3_get_foreign_key
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# The xlwt library supports a maximum of 182 characters in a single cell
MAX_CELL_SIZE = 182
# Customizable styles
COL_WIDTH_MULTIPLIER = 310
# Python xlwt Colours
# https://docs.google.com/spreadsheets/d/1ihNaZcUh7961yU7db1-Db0lbws4NT24B7koY8v8GHNQ/pubhtml?gid=1072579560&single=true
LARGE_HEADER_COLOUR = 0x2C # pale_blue
HEADER_COLOUR = 0x2C # pale_blue
SUB_HEADER_COLOUR = 0x18 # periwinkle
SUB_TOTALS_COLOUR = 0x96
TOTALS_COLOUR = 0x00
ROW_ALTERNATING_COLOURS = [0x2A, # light_green
0x2B, # light_yellow
]
ERROR = Storage(
XLRD_ERROR = "XLS export requires python-xlrd module to be installed on server",
XLWT_ERROR = "XLS export requires python-xlwt module to be installed on server",
)
# -------------------------------------------------------------------------
def extract(self, resource, list_fields):
"""
Extract the rows from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = dict(current.request.vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(query)
if orderby is None:
orderby = resource.get_config("orderby")
# Hierarchical FK Expansion:
# setting = {field_selector: [LevelLabel, LevelLabel, ...]}
expand_hierarchy = resource.get_config("xls_expand_hierarchy")
data = resource.select(list_fields,
left = left,
limit = None,
count = True,
getids = True,
orderby = orderby,
represent = True,
show_links = False,
raw_data = True if expand_hierarchy else False,
)
rfields = data.rfields
rows = data.rows
types = []
lfields = []
heading = {}
for rfield in rfields:
if rfield.show:
if expand_hierarchy:
levels = expand_hierarchy.get(rfield.selector)
else:
levels = None
if levels:
num_levels = len(levels)
colnames = self.expand_hierarchy(rfield, num_levels, rows)
lfields.extend(colnames)
types.extend(["string"] * num_levels)
T = current.T
for i, colname in enumerate(colnames):
heading[colname] = T(levels[i])
else:
lfields.append(rfield.colname)
heading[rfield.colname] = rfield.label or \
rfield.field.name.capitalize().replace("_", " ")
if rfield.ftype == "virtual":
types.append("string")
else:
types.append(rfield.ftype)
return (title, types, lfields, heading, rows)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param resource: the source of the data that is to be encoded
as a spreadsheet, can be either of:
1) an S3Resource
2) an array of value dicts (dict of
column labels as first item, list of
field types as second item)
3) a dict like:
{columns: [key, ...],
headers: {key: label},
types: {key: type},
rows: [{key:value}],
}
@param attr: keyword arguments (see below)
@keyword as_stream: return the buffer (BytesIO) rather than
its contents (str), useful when the output
is supposed to be stored locally
@keyword title: the main title of the report
@keyword list_fields: fields to include in list views
@keyword report_groupby: used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in
the heading
@keyword use_colour: True to add colour to the cells, default False
@keyword evenodd: render different background colours
for even/odd rows ("stripes")
"""
# Do not redirect from here!
# ...but raise proper status code, which can be caught by caller
try:
import xlwt
except ImportError:
error = self.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
error = self.ERROR.XLRD_ERROR
current.log.error(error)
raise HTTP(503, body=error)
import datetime
MAX_CELL_SIZE = self.MAX_CELL_SIZE
COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER
# Get the attributes
attr_get = attr.get
title = attr_get("title")
if title is None:
title = current.T("Report")
list_fields = attr_get("list_fields")
group = attr_get("dt_group")
use_colour = attr_get("use_colour", False)
evenodd = attr_get("evenodd", True)
# Extract the data from the resource
if isinstance(resource, dict):
headers = resource.get("headers", {})
lfields = resource.get("columns", list_fields)
column_types = resource.get("types")
types = [column_types[col] for col in lfields]
rows = resource.get("rows")
elif isinstance(resource, (list, tuple)):
headers = resource[0]
types = resource[1]
rows = resource[2:]
lfields = list_fields
else:
if not list_fields:
list_fields = resource.list_fields()
(title, types, lfields, headers, rows) = self.extract(resource,
list_fields,
)
# Verify columns in items
request = current.request
if len(rows) > 0 and len(lfields) > len(rows[0]):
msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields)
current.log.error(msg)
# Grouping
report_groupby = lfields[group] if group else None
groupby_label = headers[report_groupby] if report_groupby else None
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = settings.get_L10n_date_format()
date_format_str = str(date_format)
dt_format_translate = self.dt_format_translate
date_format = dt_format_translate(date_format)
time_format = dt_format_translate(settings.get_L10n_time_format())
datetime_format = dt_format_translate(settings.get_L10n_datetime_format())
title_row = settings.get_xls_title_row()
# Get styles
styles = self._styles(use_colour = use_colour,
evenodd = evenodd,
datetime_format = datetime_format,
)
# Create the workbook
book = xlwt.Workbook(encoding = "utf-8")
# Add sheets
sheets = []
# XLS exports are limited to 65536 rows per sheet, we bypass
# this by creating multiple sheets
row_limit = 65536
sheetnum = len(rows) / row_limit
# Can't have a / in the sheet_name, so replace any with a space
sheet_name = s3_str(title.replace("/", " "))
if len(sheet_name) > 28:
# Sheet name cannot be over 31 chars
# (take sheet number suffix into account)
sheet_name = sheet_name[:28]
count = 1
while len(sheets) <= sheetnum:
sheets.append(book.add_sheet("%s-%s" % (sheet_name, count)))
count += 1
if callable(title_row):
# Calling with sheet None to get the number of title rows
title_row_length = title_row(None)
else:
title_row_length = 2
# Add header row to all sheets, determine columns widths
header_style = styles["header"]
for sheet in sheets:
# Move this down if a title row will be added
if title_row:
header_row = sheet.row(title_row_length)
else:
header_row = sheet.row(0)
column_widths = []
has_id = False
col_index = 0
for selector in lfields:
if selector == report_groupby:
continue
label = headers[selector]
if label == "Id":
# Indicate to adjust col_index when writing out
has_id = True
column_widths.append(0)
col_index += 1
continue
if label == "Sort":
continue
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
header_row.write(write_col_index, str(label), header_style)
width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
width = min(width, 65535) # USHRT_MAX
column_widths.append(width)
sheet.col(write_col_index).width = width
col_index += 1
title = s3_str(title)
# Title row (optional, deployment setting)
if title_row:
T = current.T
large_header_style = styles["large_header"]
notes_style = styles["notes"]
for sheet in sheets:
if callable(title_row):
# Custom title rows
title_row(sheet)
else:
# First row => Title (standard = "title_list" CRUD string)
current_row = sheet.row(0)
if col_index > 0:
sheet.write_merge(0, 0, 0, col_index,
title,
large_header_style,
)
current_row.height = 500
# Second row => Export date/time
current_row = sheet.row(1)
current_row.write(0, "%s:" % T("Date Exported"), notes_style)
current_row.write(1, request.now, notes_style)
# Fix the size of the last column to display the date
if 16 * COL_WIDTH_MULTIPLIER > width:
sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER
# Initialize counters
total_cols = col_index
# Move the rows down if a title row is included
if title_row:
row_index = title_row_length
else:
row_index = 0
# Helper function to get the current row
def get_current_row(row_count, row_limit):
sheet_count = int(row_count / row_limit)
row_number = row_count - (sheet_count * row_limit)
if sheet_count > 0:
row_number += 1
return sheets[sheet_count], sheets[sheet_count].row(row_number)
# Write the table contents
subheading = None
odd_style = styles["odd"]
even_style = styles["even"]
subheader_style = styles["subheader"]
for row in rows:
# Current row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
# Group headers
if report_groupby:
represent = s3_strip_markup(s3_str(row[report_groupby]))
if subheading != represent:
# Start of new group - write group header
subheading = represent
current_sheet.write_merge(row_index, row_index, 0, total_cols,
subheading,
subheader_style,
)
# Move on to next row
row_index += 1
current_sheet, current_row = get_current_row(row_index, row_limit)
style = even_style if row_index % 2 == 0 else odd_style
col_index = 0
remaining_fields = lfields
# Custom row style?
row_style = None
if "_style" in row:
stylename = row["_style"]
if stylename in styles:
row_style = styles[stylename]
# Group header/footer row?
if "_group" in row:
group_info = row["_group"]
label = group_info.get("label")
totals = group_info.get("totals")
if label:
label = s3_strip_markup(s3_str(label))
style = row_style or subheader_style
span = group_info.get("span")
if span == 0:
current_sheet.write_merge(row_index,
row_index,
0,
total_cols - 1,
label,
style,
)
if totals:
# Write totals into the next row
row_index += 1
current_sheet, current_row = \
get_current_row(row_index, row_limit)
else:
current_sheet.write_merge(row_index,
row_index,
0,
span - 1,
label,
style,
)
col_index = span
remaining_fields = lfields[span:]
if not totals:
continue
for field in remaining_fields:
label = headers[field]
if label == groupby_label:
continue
if label == "Id":
# Skip the ID column from XLS exports
col_index += 1
continue
if field not in row:
represent = ""
else:
represent = s3_strip_markup(s3_str(row[field]))
coltype = types[col_index]
if coltype == "sort":
continue
if len(represent) > MAX_CELL_SIZE:
represent = represent[:MAX_CELL_SIZE]
value = represent
if coltype == "date":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
if has_id:
# Adjust for the skipped column
write_col_index = col_index - 1
else:
write_col_index = col_index
current_row.write(write_col_index, value, style)
width = len(represent) * COL_WIDTH_MULTIPLIER
if width > column_widths[col_index]:
column_widths[col_index] = width
current_sheet.col(write_col_index).width = width
col_index += 1
# Additional sheet settings
for sheet in sheets:
sheet.panes_frozen = True
sheet.horz_split_pos = 1
# Write output
output = BytesIO()
book.save(output)
output.seek(0)
if attr_get("as_stream", False):
return output
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, title)
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def expand_hierarchy(rfield, num_levels, rows):
"""
Expand a hierarchical foreign key column into one column
per hierarchy level
@param rfield: the column (S3ResourceField)
@param num_levels: the number of levels (from root)
@param rows: the Rows from S3ResourceData
@returns: list of keys (column names) for the inserted columns
"""
field = rfield.field
if not field or rfield.ftype[:9] != "reference":
return []
# Get the look-up table
ktablename = s3_get_foreign_key(field, m2m=False)[0]
if not ktablename:
return []
colname = rfield.colname
represent = field.represent
# Get the hierarchy
from ..s3hierarchy import S3Hierarchy
h = S3Hierarchy(ktablename)
if not h.config:
return []
# Collect the values from rows
values = set()
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
values.add(value)
# Generate the expanded values
expanded = h.repr_expand(values,
levels = num_levels,
represent = represent,
)
# ...and add them into the rows
colnames = ["%s__%s" % (colname, l) for l in range(num_levels)]
for row in rows:
value = row["_row"][colname]
if type(value) is list:
value = value[0]
hcols = expanded.get(value)
for level in range(num_levels):
row[colnames[level]] = hcols[level] if hcols else None
return colnames
# -------------------------------------------------------------------------
@staticmethod
def encode_pt(pt, title):
"""
Encode a S3PivotTable as XLS sheet
@param pt: the S3PivotTable
@param title: the title for the report
@returns: the XLS file as stream
"""
output = BytesIO()
book = S3PivotTableXLS(pt).encode(title)
book.save(output)
output.seek(0)
return output
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
}
PERCENT = "__percent__"
xlfmt = str(pyfmt).replace("%%", PERCENT)
for tag, translation in translate.items():
xlfmt = xlfmt.replace(tag, translation)
return xlfmt.replace(PERCENT, "%")
# -------------------------------------------------------------------------
@classmethod
def _styles(cls,
use_colour = False,
evenodd = True,
datetime_format = None,
):
"""
XLS encoder standard cell styles
@param use_colour: use background colour in cells
@param evenodd: render different background colours
for even/odd rows ("stripes")
@param datetime_format: the date/time format
"""
import xlwt
if datetime_format is None:
# Support easier usage from external functions
datetime_format = cls.dt_format_translate(current.deployment_settings.get_L10n_datetime_format())
# Styles
large_header = xlwt.XFStyle()
large_header.font.bold = True
large_header.font.height = 400
if use_colour:
SOLID_PATTERN = large_header.pattern.SOLID_PATTERN
large_header.alignment.horz = large_header.alignment.HORZ_CENTER
large_header.pattern.pattern = SOLID_PATTERN
large_header.pattern.pattern_fore_colour = cls.LARGE_HEADER_COLOUR
notes = xlwt.XFStyle()
notes.font.italic = True
notes.font.height = 160 # 160 Twips = 8 point
notes.num_format_str = datetime_format
header = xlwt.XFStyle()
header.font.bold = True
header.num_format_str = datetime_format
if use_colour:
header.pattern.pattern = SOLID_PATTERN
header.pattern.pattern_fore_colour = cls.HEADER_COLOUR
subheader = xlwt.XFStyle()
subheader.font.bold = True
if use_colour:
subheader.pattern.pattern = SOLID_PATTERN
subheader.pattern.pattern_fore_colour = cls.SUB_HEADER_COLOUR
subtotals = xlwt.XFStyle()
subtotals.font.bold = True
if use_colour:
subtotals.pattern.pattern = SOLID_PATTERN
subtotals.pattern.pattern_fore_colour = cls.SUB_TOTALS_COLOUR
totals = xlwt.XFStyle()
totals.font.bold = True
if use_colour:
totals.pattern.pattern = SOLID_PATTERN
totals.pattern.pattern_fore_colour = cls.TOTALS_COLOUR
odd = xlwt.XFStyle()
if use_colour and evenodd:
odd.pattern.pattern = SOLID_PATTERN
odd.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[0]
even = xlwt.XFStyle()
if use_colour and evenodd:
even.pattern.pattern = SOLID_PATTERN
even.pattern.pattern_fore_colour = cls.ROW_ALTERNATING_COLOURS[1]
return {"large_header": large_header,
"notes": notes,
"header": header,
"subheader": subheader,
"subtotals": subtotals,
"totals": totals,
"odd": odd,
"even": even,
}
# =============================================================================
class S3PivotTableXLS(object):
"""
XLS encoder for S3PivotTables
@todo: merge+DRY with S3XLS?
@todo: support multiple layers (=write multiple sheets)
@todo: handle huge pivot tables (=exceeding XLS rows/cols limits)
"""
def __init__(self, pt):
"""
Constructor
@param pt: the S3PivotTable to encode
"""
self.pt = pt
# Initialize properties
self._styles = None
self._formats = None
self.lookup = {}
self.valuemap = {}
# -------------------------------------------------------------------------
def encode(self, title):
"""
Convert this pivot table into an XLS file
@param title: the title of the report
@returns: the XLS workbook
"""
try:
import xlwt
except ImportError:
error = S3XLS.ERROR.XLWT_ERROR
current.log.error(error)
raise HTTP(503, body=error)
T = current.T
TOTAL = s3_str(s3_str(T("Total")).upper())
pt = self.pt
# Get report options
report_options = pt.resource.get_config("report_options", {})
# Report dimensions
fact = pt.facts[0]
layer = fact.layer
rows_dim = pt.rows
cols_dim = pt.cols
numrows = pt.numrows
numcols = pt.numcols
# Resource fields for dimensions
rfields = pt.rfields
fact_rfield = rfields[fact.selector]
rows_rfield = rfields[rows_dim] if rows_dim else None
cols_rfield = rfields[cols_dim] if cols_dim else None
# Dimension labels
get_label = fact._get_field_label
if rows_dim:
# Get row axis label
rows_label = s3_str(get_label(rows_rfield,
report_options.get("rows"),
))
else:
rows_label = ""
if cols_dim:
cols_label = s3_str(get_label(cols_rfield,
report_options.get("cols"),
))
else:
cols_label = ""
fact_label = s3_str(fact.get_label(fact_rfield,
report_options.get("fact"),
))
# Index of the column for row totals
total_column = (numcols + 1) if cols_dim else 1
# Sort+represent rows and columns
rows, cols = self.sortrepr()
# Create workbook and sheet
book = xlwt.Workbook(encoding = "utf-8")
sheet = book.add_sheet(s3_str(title))
write = self.write
# Write header
title_row = current.deployment_settings.get_xls_title_row()
if callable(title_row):
# Custom header (returns number of header rows)
title_length = title_row(sheet)
elif title_row:
# Default header
title_length = 2
# Report title
write(sheet, 0, 0, s3_str(title),
colspan = numcols + 2,
style = "title",
)
# Current date/time (in local timezone)
from ..s3datetime import S3DateTime
dt = S3DateTime.to_local(current.request.utcnow)
write(sheet, 1, 0, dt,
style = "subheader",
numfmt = "datetime",
)
else:
# No header
title_length = -1
rowindex = title_length + 1
# Fact label
if rows_dim and cols_dim:
write(sheet, rowindex, 0, fact_label,
style = "fact_label",
)
# Columns axis title
if cols_dim:
write(sheet, rowindex, 1, cols_label,
colspan = numcols,
style = "axis_title",
)
rowindex += 1
# Row axis title
write(sheet, rowindex, 0, rows_label,
style = "axis_title",
)
# Column labels
if cols_dim:
for idx, col in enumerate(cols):
write(sheet, rowindex, idx + 1, col[2]["text"],
style = "col_label",
)
total_label = TOTAL
else:
# Use fact title as row total label if there is no column axis
total_label = fact_label
# Row totals label
write(sheet, rowindex, total_column, total_label, style="total_right")
# Determine the number format for cell values
numfmt = self.number_format()
totfmt = "integer" if fact.method in ("count", "list") else numfmt
# Choose cell value style according to number format
fact_style = "numeric" if numfmt else None
# Get fact representation method
if fact.method == "list":
listrepr = self.listrepr
fk, fact_repr = pt._represents([layer])[fact.selector]
else:
listrepr = fk = fact_repr = None
# Write data rows (if any)
rowindex += 1
if rows_dim:
icell = pt.cell
for i in range(numrows):
row = rows[i]
# Row-label
write(sheet, rowindex + i, 0, row[2]["text"],
style = "row_label",
)
# Cell column values (if any)
if cols_dim:
for j in range(numcols):
cell = icell[row[0]][cols[j][0]]
if listrepr:
value = listrepr(cell, fact_rfield, fact_repr, fk=fk)
else:
value = cell[layer]
write(sheet, rowindex + i, j + 1, value,
numfmt = numfmt,
style = fact_style,
)
# Row-total
write(sheet, rowindex + i, total_column, row[1],
style = "total",
numfmt = totfmt,
)
rowindex += numrows
total_label = TOTAL
else:
# Use fact label as column totals label if
# there is no row dimension
total_label = fact_label
# Column totals label
write(sheet, rowindex, 0, total_label,
style = "total_left",
)
# Column totals
if cols_dim:
for i in range(numcols):
write(sheet, rowindex, i + 1, cols[i][1],
style = "total",
numfmt = totfmt,
)
# Grand total
total = pt.totals[layer]
write(sheet, rowindex, total_column, total,
style = "grand_total",
numfmt = totfmt,
)
return book
# -------------------------------------------------------------------------
def write(self,
sheet,
rowindex,
colindex,
value,
style = None,
numfmt = None,
rowspan = None,
colspan = None,
adjust = True,
):
"""
Write a value to a spreadsheet cell
@param sheet: the work sheet
@param rowindex: the row index of the cell
@param colindex: the column index of the cell
@param value: the value to write
@param style: a style name (see styles property)
@param numfmt: a number format name (see formats property)
@param rowspan: number of rows to merge
@param colspan: number of columns to merge
@param adjust: True to adjust column width and row height,
False to suppress automatic adjustment
"""
styles = self.styles
if style:
style = styles.get(style)
if style is None:
style = styles["default"]
# Apply number format
if numfmt:
style.num_format_str = self.formats.get(numfmt, "")
# Get the row
row = sheet.row(rowindex)
if type(value) is list:
labels = [s3_str(v) for v in value]
contents = "\n".join(labels)
else:
labels = [s3_str(value)]
contents = value
# Apply rowspan and colspan
rowspan = 0 if not rowspan or rowspan < 1 else rowspan - 1
colspan = 0 if not colspan or colspan < 1 else colspan - 1
if rowspan > 1 or colspan > 1:
# Write-merge
sheet.write_merge(rowindex, rowindex + rowspan,
colindex, colindex + colspan,
contents,
style,
)
else:
# Just write
row.write(colindex, contents, style)
# Reset number format
style.num_format_str = ""
# Adjust column width and row height
# NB approximations, no exact science (not possible except by
# enforcing a particular fixed-width font, which we don't
# want), so manual adjustments after export may still be
# necessary. Better solutions welcome!
if adjust:
fontsize = float(style.font.height)
# Adjust column width
col = sheet.col(colindex)
if not colspan:
if labels:
width = int(min(max(len(l) for l in labels), 28) *
fontsize * 5.0 / 3.0)
else:
width = 0
if width > col.width:
col.width = width
# Adjust row height
if not rowspan:
lineheight = 1.2 if style.font.bold else 1.0
import math
numlines = 0
width = (col.width * 0.8 * (colspan + 1))
for label in labels:
numlines += math.ceil(len(label) * fontsize / width)
if numlines > 1:
lines = min(numlines, 10)
height = int((lines + 0.8 / lineheight) *
fontsize * lineheight)
else:
height = int(fontsize * lineheight)
if height > row.height:
row.height = height
row.height_mismatch = 1
# -------------------------------------------------------------------------
@property
def styles(self):
"""
Style definitions for pivot tables (lazy property)
@returns: dict of named XFStyle instances
"""
styles = self._styles
if styles is None:
from xlwt import Alignment, XFStyle
# Alignments
center = Alignment()
center.horz = Alignment.HORZ_CENTER
center.vert = Alignment.VERT_CENTER
center.wrap = 1
centerleft = Alignment()
centerleft.horz = Alignment.HORZ_LEFT
centerleft.vert = Alignment.VERT_CENTER
centerleft.wrap = 1
bottomcentered = Alignment()
bottomcentered.horz = Alignment.HORZ_CENTER
bottomcentered.vert = Alignment.VERT_BOTTOM
bottomcentered.wrap = 1
bottomleft = Alignment()
bottomleft.horz = Alignment.HORZ_LEFT
bottomleft.vert = Alignment.VERT_BOTTOM
bottomleft.wrap = 1
bottomright = Alignment()
bottomright.horz = Alignment.HORZ_RIGHT
bottomright.vert = Alignment.VERT_BOTTOM
bottomright.wrap = 1
topleft = Alignment()
topleft.horz = Alignment.HORZ_LEFT
topleft.vert = Alignment.VERT_TOP
topleft.wrap = 1
topright = Alignment()
topright.horz = Alignment.HORZ_RIGHT
topright.vert = Alignment.VERT_TOP
topright.wrap = 1
# Styles
twips = lambda pt: 20 * pt # Points to Twips
def style(fontsize=10, bold=False, italic=False, align=None):
""" XFStyle builder helper """
style = XFStyle()
style.font.height = twips(fontsize)
style.font.bold = bold
style.font.italic = italic
if align is not None:
style.alignment = align
return style
self._styles = styles = {"default": style(align = topleft),
"numeric": style(align = bottomright),
"title": style(fontsize = 14,
bold = True,
align = bottomleft,
),
"subheader": style(fontsize = 8,
italic = True,
align = bottomleft,
),
"row_label": style(bold = True,
align = topleft,
),
"col_label": style(bold = True,
align = bottomcentered,
),
"fact_label": style(fontsize = 13,
bold = True,
align = centerleft,
),
"axis_title": style(fontsize = 11,
bold = True,
align = center,
),
"total": style(fontsize = 11,
bold = True,
italic = True,
align = topright,
),
"total_left": style(fontsize = 11,
bold = True,
italic = True,
align = topleft,
),
"total_right": style(fontsize = 11,
bold = True,
italic = True,
align = center,
),
"grand_total": style(fontsize = 12,
bold = True,
italic = True,
align = topright,
),
}
return styles
# -------------------------------------------------------------------------
@property
def formats(self):
"""
Number formats for pivot tables (lazy property)
@returns: dict of format strings
"""
formats = self._formats
if formats is None:
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
translate = S3XLS.dt_format_translate
date_format = translate(settings.get_L10n_date_format())
datetime_format = translate(settings.get_L10n_datetime_format())
time_format = translate(settings.get_L10n_time_format())
formats = {"date": date_format,
"datetime": datetime_format,
"time": time_format,
"integer": "0",
"double": "0.00"
}
self._formats = formats
return formats
# -------------------------------------------------------------------------
def number_format(self):
"""
Determine the number format for this pivot table
@returns: the number format key (see formats property)
"""
numfmt = None
pt = self.pt
fact = pt.facts[0]
rfield = pt.rfields[fact.selector]
ftype = rfield.ftype
if fact.method == "count":
numfmt = "integer"
elif ftype == "integer":
if fact.method == "avg":
# Average value of ints is a float
numfmt = "double"
else:
numfmt = "integer"
elif ftype in ("date", "datetime", "time", "double"):
numfmt = ftype
elif ftype == "virtual":
# Probe the first value
value = pt.cell[0][0][fact.layer]
if isinstance(value, int):
numfmt = "integer"
elif isinstance(value, float):
numfmt = "double"
else:
import datetime
if isinstance(value, datetime.datetime):
numfmt = "datetime"
elif isinstance(value, datetime.date):
numfmt = "date"
elif isinstance(value, datetime.time):
numfmt = "time"
return numfmt
# -------------------------------------------------------------------------
def sortrepr(self):
"""
Sort and represent pivot table axes
@returns: tuple (rows, cols), each a list of tuples:
(index, ...the index of the row/column in
the original cell array
total, ...total value of the row/column
{value: axis_value, ...group value of the row/column
text: axis_repr, ...representation of the group value
},
)
"""
pt = self.pt
rfields = pt.rfields
layer = pt.facts[0].layer
# Sort rows
rows_dim = pt.rows
rows_rfield = rfields[rows_dim] if rows_dim else None
row_repr = pt._represent_method(rows_dim)
irows = pt.row
rows = []
for i in range(pt.numrows):
irow = irows[i]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, irow[layer], header))
pt._sortdim(rows, rows_rfield, index=2)
# Sort columns
cols_dim = pt.cols
cols_rfield = rfields[cols_dim] if cols_dim else None
col_repr = pt._represent_method(cols_dim)
icols = pt.col
cols = []
for i in range(pt.numcols):
icol = icols[i]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, icol[layer], header))
pt._sortdim(cols, cols_rfield, index=2)
return rows, cols
# -------------------------------------------------------------------------
def listrepr(self, cell, rfield, represent, fk=True):
"""
Represent and sort a list of cell values (for "list" aggregation
method)
@param cell - the cell data
@param rfield - the fact S3ResourceField
@param represent - representation method for the fact field
@param fk - fact field is a foreign key
@returns: sorted list of represented cell values
"""
pt = self.pt
records = pt.records
colname = rfield.colname
lookup = self.lookup
valuemap = self.valuemap
keys = []
for record_id in cell["records"]:
record = records[record_id]
try:
fvalue = record[colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = represent(v)
else:
if v not in valuemap:
next_id = len(valuemap)
valuemap[v] = next_id
keys.append(next_id)
lookup[next_id] = represent(v)
else:
prev_id = valuemap[v]
if prev_id not in keys:
keys.append(prev_id)
keys.sort(key = lambda i: lookup[i])
items = [s3_str(lookup[key]) for key in keys if key in lookup]
return items
# =============================================================================
#class S3HTML2XLS(object):
# """
# Class that takes HTML in the form of web2py helper objects
# and converts it to XLS
#
# @ToDo: Complete this (e.g. start with a copy of S3html2pdf)
# See https://gist.github.com/JustOnce/2be3e4d951a66c22c5e0
# & http://pydoc.net/Python/Kiowa/0.2w.rc9/kiowa.utils.xls.html2xls/
#
# Places to use this:
# org_CapacityReport()
# """
#
# def __init__(self):
#
# pass
#
# # -------------------------------------------------------------------------
# def parse(self, html):
# """
# Entry point for class
# """
#
# return None
#
# END =========================================================================
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.