code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
#!/usr/bin/env python
"""
Extract UV out of mesh.
"""
import argparse
import pymesh
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__);
parser.add_argument("--save-uv", action="store_true");
parser.add_argument("input_mesh", help="input mesh");
parser.add_argument("output_mesh", help="output mesh");
return parser.parse_args();
def main():
args = parse_args();
mesh = pymesh.load_mesh(args.input_mesh);
if not mesh.has_attribute("corner_texture"):
raise RuntimeError("Mesh contains no uv!");
mesh.add_attribute("face_area");
cutted_mesh = pymesh.cut_mesh(mesh);
uvs = cutted_mesh.get_attribute("corner_texture").reshape((-1, 2));
faces = cutted_mesh.faces;
per_vertex_uv = np.ndarray((cutted_mesh.num_vertices, 2));
per_vertex_uv[faces.ravel(order="C")] = uvs;
if not args.save_uv:
cutted_mesh.add_attribute("u");
cutted_mesh.set_attribute("u", per_vertex_uv[:,0]);
cutted_mesh.add_attribute("v");
cutted_mesh.set_attribute("v", per_vertex_uv[:,1]);
pymesh.save_mesh(args.output_mesh, cutted_mesh, "u", "v");
else:
uv_mesh = pymesh.form_mesh(per_vertex_uv, faces);
pymesh.save_mesh(args.output_mesh, uv_mesh);
if __name__ == "__main__":
main();
| [
"pymesh.save_mesh",
"argparse.ArgumentParser",
"pymesh.load_mesh",
"pymesh.form_mesh",
"numpy.ndarray",
"pymesh.cut_mesh"
] | [((148, 192), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (171, 192), False, 'import argparse\n'), ((474, 507), 'pymesh.load_mesh', 'pymesh.load_mesh', (['args.input_mesh'], {}), '(args.input_mesh)\n', (490, 507), False, 'import pymesh\n'), ((671, 692), 'pymesh.cut_mesh', 'pymesh.cut_mesh', (['mesh'], {}), '(mesh)\n', (686, 692), False, 'import pymesh\n'), ((822, 863), 'numpy.ndarray', 'np.ndarray', (['(cutted_mesh.num_vertices, 2)'], {}), '((cutted_mesh.num_vertices, 2))\n', (832, 863), True, 'import numpy as np\n'), ((1156, 1213), 'pymesh.save_mesh', 'pymesh.save_mesh', (['args.output_mesh', 'cutted_mesh', '"""u"""', '"""v"""'], {}), "(args.output_mesh, cutted_mesh, 'u', 'v')\n", (1172, 1213), False, 'import pymesh\n'), ((1245, 1283), 'pymesh.form_mesh', 'pymesh.form_mesh', (['per_vertex_uv', 'faces'], {}), '(per_vertex_uv, faces)\n', (1261, 1283), False, 'import pymesh\n'), ((1294, 1337), 'pymesh.save_mesh', 'pymesh.save_mesh', (['args.output_mesh', 'uv_mesh'], {}), '(args.output_mesh, uv_mesh)\n', (1310, 1337), False, 'import pymesh\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (BatchNormLayer, Conv2d, DenseLayer,
FlattenLayer, InputLayer,
LocalResponseNormLayer, MaxPool2d)
from data_input import fer2013_input
CLASSES_NUM = fer2013_input.CLASSES_NUM
def vgg_net_model(x, y_correct, reuse):
w_init = tf.truncated_normal_initializer(stddev=5e-2)
w_init2 = tf.truncated_normal_initializer(stddev=0.04)
b_init2 = tf.constant_initializer(value=0.1)
with tf.variable_scope("vgg_net_model", reuse=reuse):
input_layer = InputLayer(x, name="input")
# 卷积层组 1
conv_1_1 = Conv2d(input_layer, 64, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_1_1')
conv_1_2 = Conv2d(conv_1_1, 64, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_1_2')
lrn_1 = LocalResponseNormLayer(
conv_1_2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn_1')
# 池化 1
pool_1 = MaxPool2d(lrn_1, (3, 3), (2, 2),
padding='SAME', name='lrn_1')
# 卷积层组 2
conv_2_1 = Conv2d(pool_1, 128, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_2_1')
conv_2_2 = Conv2d(conv_2_1, 128, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_2_2')
lrn_2 = LocalResponseNormLayer(
conv_2_2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn_2')
# 池化 2
pool_2 = MaxPool2d(lrn_2, (3, 3), (2, 2),
padding='SAME', name='pool_2')
# 卷积层组 3
conv_3_1 = Conv2d(pool_2, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_1')
conv_3_2 = Conv2d(conv_3_1, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_2')
conv_3_3 = Conv2d(conv_3_2, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_3')
lrn_3 = LocalResponseNormLayer(
conv_3_3, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn_3')
# 池化 3
pool_3 = MaxPool2d(lrn_3, (3, 3), (2, 2),
padding='SAME', name='pool_3')
# 卷积层组 4
conv_4_1 = Conv2d(pool_3, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_1')
conv_4_2 = Conv2d(conv_4_1, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_2')
conv_4_3 = Conv2d(conv_4_2, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_3')
lrn_4 = LocalResponseNormLayer(
conv_4_3, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn_4')
# 池化 4
pool_4 = MaxPool2d(lrn_4, (3, 3), (2, 2),
padding='SAME', name='pool_4')
# 卷积层组 4
conv_5_1 = Conv2d(pool_4, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_1')
conv_5_2 = Conv2d(conv_5_1, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_2')
conv_5_3 = Conv2d(conv_5_2, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_3')
lrn_5 = LocalResponseNormLayer(
conv_5_3, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='lrn_5')
# 池化 5
pool_5 = MaxPool2d(lrn_5, (3, 3), (2, 2),
padding='SAME', name='pool_5')
# 全连接层
flatten_layer = FlattenLayer(pool_5, name='flatten')
fc1 = DenseLayer(flatten_layer, 4096, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc1')
fc2 = DenseLayer(fc1, 4096, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc2')
fc3 = DenseLayer(fc2, 1000, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc3')
model = DenseLayer(fc3, CLASSES_NUM, act=None, W_init=w_init2, name='output')
y_pred = model.outputs
ce = tl.cost.cross_entropy(y_pred, y_correct, name='COST')
# l2 for the MLP, without this, the ACCURACY will be reduced by 15%.
l2 = 0
for p in tl.layers.get_variables_with_name('relu/W', True, True):
l2 += tf.contrib.layers.l2_regularizer(0.004)(p)
cost = ce + l2
correct_prediction = tf.equal(tf.argmax(y_pred, 1), y_correct)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return model, cost, accuracy
def vgg_net_model_bn(x, y_correct, reuse, is_train):
""" Batch normalization should be placed before rectifier. """
w_init = tf.truncated_normal_initializer(stddev=5e-2)
w_init2 = tf.truncated_normal_initializer(stddev=0.04)
b_init2 = tf.constant_initializer(value=0.1)
with tf.variable_scope("vgg_net_model_fn", reuse=reuse):
input_layer = InputLayer(x, name="input")
# 卷积层组 1
conv_1_1 = Conv2d(input_layer, 64, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_1_1')
conv_1_2 = Conv2d(conv_1_1, 64, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_1_2')
bn_1 = BatchNormLayer(conv_1_2, is_train, act=tf.nn.relu, name='bn_1')
# 池化 1
pool_1 = MaxPool2d(bn_1, (3, 3), (2, 2),
padding='SAME', name='lrn_1')
# 卷积层组 2
conv_2_1 = Conv2d(pool_1, 128, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_2_1')
conv_2_2 = Conv2d(conv_2_1, 128, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_2_2')
bn_2 = BatchNormLayer(conv_2_2, is_train, act=tf.nn.relu, name='bn_2')
# 池化 2
pool_2 = MaxPool2d(bn_2, (3, 3), (2, 2),
padding='SAME', name='pool_2')
# 卷积层组 3
conv_3_1 = Conv2d(pool_2, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_1')
conv_3_2 = Conv2d(conv_3_1, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_2')
conv_3_3 = Conv2d(conv_3_2, 256, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_3_3')
bn_3 = BatchNormLayer(conv_3_3, is_train, act=tf.nn.relu, name='bn_3')
# 池化 3
pool_3 = MaxPool2d(bn_3, (3, 3), (2, 2),
padding='SAME', name='pool_3')
# 卷积层组 4
conv_4_1 = Conv2d(pool_3, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_1')
conv_4_2 = Conv2d(conv_4_1, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_2')
conv_4_3 = Conv2d(conv_4_2, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_4_3')
bn_4 = BatchNormLayer(conv_4_3, is_train, act=tf.nn.relu, name='bn_4')
# 池化 4
pool_4 = MaxPool2d(bn_4, (3, 3), (2, 2),
padding='SAME', name='pool_4')
# 卷积层组 4
conv_5_1 = Conv2d(pool_4, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_1')
conv_5_2 = Conv2d(conv_5_1, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_2')
conv_5_3 = Conv2d(conv_5_2, 512, (3, 3), (1, 1), act=tf.nn.relu,
padding='SAME', W_init=w_init, name='conv_5_3')
bn_5 = BatchNormLayer(conv_5_3, is_train, act=tf.nn.relu, name='bn_5')
# 池化 5
pool_5 = MaxPool2d(bn_5, (3, 3), (2, 2),
padding='SAME', name='pool_5')
# 全连接层
flatten_layer = FlattenLayer(pool_5, name='flatten')
fc1 = DenseLayer(flatten_layer, 4096, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc1')
fc2 = DenseLayer(fc1, 4096, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc2')
fc3 = DenseLayer(fc2, 1000, act=tf.nn.relu,
W_init=w_init2, b_init=b_init2, name='fc3')
model = DenseLayer(fc3, CLASSES_NUM, act=None, W_init=w_init2, name='output')
y_pred = model.outputs
ce = tl.cost.cross_entropy(y_pred, y_correct, name='_cost')
# l2 for the MLP, without this, the ACCURACY will be reduced by 15%.
l2 = 0
for p in tl.layers.get_variables_with_name('relu/W', True, True):
l2 += tf.contrib.layers.l2_regularizer(0.004)(p)
cost = ce + l2
correct_prediction = tf.equal(tf.argmax(y_pred, 1), y_correct)
accurary = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return model, cost, accurary
def distort_fn(x, is_train=False):
"""
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the vgg_net_model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
图像处理如下:
它们被裁剪为24 x 24像素,集中用于评估或随机进行训练。
它们可以看作新数据,使 model 对动态范围不敏感。
对于培训,我们还应用一系列随机扭曲
人为增加数据集大小:
随机翻转图像从左到右。
随机扭曲图像亮度。
"""
# print('begin', x.shape, np.min(x), np.max(x))
x = tl.prepro.crop(x, 24, 24, is_random=is_train)
# print('after crop', x.shape, np.min(x), np.max(x))
if is_train:
# x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)
# print('after zoom', x.shape, np.min(x), np.max(x))
x = tl.prepro.flip_axis(x, axis=1, is_random=True)
# print('after flip',x.shape, np.min(x), np.max(x))
x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)
# print('after brightness',x.shape, np.min(x), np.max(x))
# tmp = np.max(x)
# x += np.random.uniform(-20, 20)
# x /= tmp
# normalize the image
x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0
# print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))
return x
| [
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.truncated_normal_initializer",
"tensorflow.cast",
"numpy.mean",
"tensorlayer.layers.DenseLayer",
"tensorlayer.prepro.brightness",
"tensorlayer.layers.get_variables_with_name",
"tensorlayer.prepro.crop",
"tensorlayer.layers.MaxPool2d",
"tensorflow.variable_scope",
"tensorlayer.cost.cross_entropy",
"tensorlayer.layers.InputLayer",
"numpy.std",
"tensorlayer.layers.FlattenLayer",
"tensorlayer.prepro.flip_axis",
"tensorlayer.layers.LocalResponseNormLayer",
"tensorlayer.layers.BatchNormLayer",
"tensorlayer.layers.Conv2d",
"tensorflow.argmax",
"tensorflow.constant_initializer"
] | [((420, 464), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.05)'}), '(stddev=0.05)\n', (451, 464), True, 'import tensorflow as tf\n'), ((479, 523), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.04)'}), '(stddev=0.04)\n', (510, 523), True, 'import tensorflow as tf\n'), ((538, 572), 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {'value': '(0.1)'}), '(value=0.1)\n', (561, 572), True, 'import tensorflow as tf\n'), ((5163, 5207), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.05)'}), '(stddev=0.05)\n', (5194, 5207), True, 'import tensorflow as tf\n'), ((5222, 5266), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.04)'}), '(stddev=0.04)\n', (5253, 5266), True, 'import tensorflow as tf\n'), ((5281, 5315), 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {'value': '(0.1)'}), '(value=0.1)\n', (5304, 5315), True, 'import tensorflow as tf\n'), ((10231, 10276), 'tensorlayer.prepro.crop', 'tl.prepro.crop', (['x', '(24)', '(24)'], {'is_random': 'is_train'}), '(x, 24, 24, is_random=is_train)\n', (10245, 10276), True, 'import tensorlayer as tl\n'), ((582, 629), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""vgg_net_model"""'], {'reuse': 'reuse'}), "('vgg_net_model', reuse=reuse)\n", (599, 629), True, 'import tensorflow as tf\n'), ((653, 680), 'tensorlayer.layers.InputLayer', 'InputLayer', (['x'], {'name': '"""input"""'}), "(x, name='input')\n", (663, 680), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((718, 825), 'tensorlayer.layers.Conv2d', 'Conv2d', (['input_layer', '(64)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_1_1"""'}), "(input_layer, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_1_1')\n", (724, 825), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((867, 972), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_1_1', '(64)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_1_2"""'}), "(conv_1_1, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init\n =w_init, name='conv_1_2')\n", (873, 972), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1010, 1117), 'tensorlayer.layers.LocalResponseNormLayer', 'LocalResponseNormLayer', (['conv_1_2'], {'depth_radius': '(4)', 'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)', 'name': '"""lrn_1"""'}), "(conv_1_2, depth_radius=4, bias=1.0, alpha=0.001 / \n 9.0, beta=0.75, name='lrn_1')\n", (1032, 1117), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1159, 1221), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['lrn_1', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""lrn_1"""'}), "(lrn_1, (3, 3), (2, 2), padding='SAME', name='lrn_1')\n", (1168, 1221), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1286, 1390), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_1', '(128)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_2_1"""'}), "(pool_1, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_2_1')\n", (1292, 1390), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1431, 1536), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_2_1', '(128)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_2_2"""'}), "(conv_2_1, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_2_2')\n", (1437, 1536), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1575, 1682), 'tensorlayer.layers.LocalResponseNormLayer', 'LocalResponseNormLayer', (['conv_2_2'], {'depth_radius': '(4)', 'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)', 'name': '"""lrn_2"""'}), "(conv_2_2, depth_radius=4, bias=1.0, alpha=0.001 / \n 9.0, beta=0.75, name='lrn_2')\n", (1597, 1682), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1724, 1787), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['lrn_2', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_2"""'}), "(lrn_2, (3, 3), (2, 2), padding='SAME', name='pool_2')\n", (1733, 1787), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1852, 1956), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_2', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_1"""'}), "(pool_2, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_3_1')\n", (1858, 1956), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((1997, 2102), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_3_1', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_2"""'}), "(conv_3_1, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_3_2')\n", (2003, 2102), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2144, 2249), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_3_2', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_3"""'}), "(conv_3_2, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_3_3')\n", (2150, 2249), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2288, 2395), 'tensorlayer.layers.LocalResponseNormLayer', 'LocalResponseNormLayer', (['conv_3_3'], {'depth_radius': '(4)', 'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)', 'name': '"""lrn_3"""'}), "(conv_3_3, depth_radius=4, bias=1.0, alpha=0.001 / \n 9.0, beta=0.75, name='lrn_3')\n", (2310, 2395), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2437, 2500), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['lrn_3', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_3"""'}), "(lrn_3, (3, 3), (2, 2), padding='SAME', name='pool_3')\n", (2446, 2500), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2565, 2669), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_3', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_1"""'}), "(pool_3, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_4_1')\n", (2571, 2669), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2710, 2815), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_4_1', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_2"""'}), "(conv_4_1, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_4_2')\n", (2716, 2815), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((2857, 2962), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_4_2', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_3"""'}), "(conv_4_2, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_4_3')\n", (2863, 2962), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3001, 3108), 'tensorlayer.layers.LocalResponseNormLayer', 'LocalResponseNormLayer', (['conv_4_3'], {'depth_radius': '(4)', 'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)', 'name': '"""lrn_4"""'}), "(conv_4_3, depth_radius=4, bias=1.0, alpha=0.001 / \n 9.0, beta=0.75, name='lrn_4')\n", (3023, 3108), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3150, 3213), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['lrn_4', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_4"""'}), "(lrn_4, (3, 3), (2, 2), padding='SAME', name='pool_4')\n", (3159, 3213), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3278, 3382), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_4', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_1"""'}), "(pool_4, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_5_1')\n", (3284, 3382), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3423, 3528), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_5_1', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_2"""'}), "(conv_5_1, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_5_2')\n", (3429, 3528), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3570, 3675), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_5_2', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_3"""'}), "(conv_5_2, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_5_3')\n", (3576, 3675), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3714, 3821), 'tensorlayer.layers.LocalResponseNormLayer', 'LocalResponseNormLayer', (['conv_5_3'], {'depth_radius': '(4)', 'bias': '(1.0)', 'alpha': '(0.001 / 9.0)', 'beta': '(0.75)', 'name': '"""lrn_5"""'}), "(conv_5_3, depth_radius=4, bias=1.0, alpha=0.001 / \n 9.0, beta=0.75, name='lrn_5')\n", (3736, 3821), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3863, 3926), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['lrn_5', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_5"""'}), "(lrn_5, (3, 3), (2, 2), padding='SAME', name='pool_5')\n", (3872, 3926), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((3994, 4030), 'tensorlayer.layers.FlattenLayer', 'FlattenLayer', (['pool_5'], {'name': '"""flatten"""'}), "(pool_5, name='flatten')\n", (4006, 4030), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((4046, 4142), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['flatten_layer', '(4096)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc1"""'}), "(flatten_layer, 4096, act=tf.nn.relu, W_init=w_init2, b_init=\n b_init2, name='fc1')\n", (4056, 4142), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((4177, 4263), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc1', '(4096)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc2"""'}), "(fc1, 4096, act=tf.nn.relu, W_init=w_init2, b_init=b_init2, name=\n 'fc2')\n", (4187, 4263), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((4298, 4384), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc2', '(1000)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc3"""'}), "(fc2, 1000, act=tf.nn.relu, W_init=w_init2, b_init=b_init2, name=\n 'fc3')\n", (4308, 4384), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((4422, 4491), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc3', 'CLASSES_NUM'], {'act': 'None', 'W_init': 'w_init2', 'name': '"""output"""'}), "(fc3, CLASSES_NUM, act=None, W_init=w_init2, name='output')\n", (4432, 4491), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((4538, 4591), 'tensorlayer.cost.cross_entropy', 'tl.cost.cross_entropy', (['y_pred', 'y_correct'], {'name': '"""COST"""'}), "(y_pred, y_correct, name='COST')\n", (4559, 4591), True, 'import tensorlayer as tl\n'), ((4701, 4756), 'tensorlayer.layers.get_variables_with_name', 'tl.layers.get_variables_with_name', (['"""relu/W"""', '(True)', '(True)'], {}), "('relu/W', True, True)\n", (4734, 4756), True, 'import tensorlayer as tl\n'), ((5326, 5376), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""vgg_net_model_fn"""'], {'reuse': 'reuse'}), "('vgg_net_model_fn', reuse=reuse)\n", (5343, 5376), True, 'import tensorflow as tf\n'), ((5400, 5427), 'tensorlayer.layers.InputLayer', 'InputLayer', (['x'], {'name': '"""input"""'}), "(x, name='input')\n", (5410, 5427), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((5465, 5572), 'tensorlayer.layers.Conv2d', 'Conv2d', (['input_layer', '(64)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_1_1"""'}), "(input_layer, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_1_1')\n", (5471, 5572), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((5614, 5719), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_1_1', '(64)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_1_2"""'}), "(conv_1_1, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init\n =w_init, name='conv_1_2')\n", (5620, 5719), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((5756, 5819), 'tensorlayer.layers.BatchNormLayer', 'BatchNormLayer', (['conv_1_2', 'is_train'], {'act': 'tf.nn.relu', 'name': '"""bn_1"""'}), "(conv_1_2, is_train, act=tf.nn.relu, name='bn_1')\n", (5770, 5819), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((5853, 5914), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['bn_1', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""lrn_1"""'}), "(bn_1, (3, 3), (2, 2), padding='SAME', name='lrn_1')\n", (5862, 5914), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((5979, 6083), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_1', '(128)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_2_1"""'}), "(pool_1, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_2_1')\n", (5985, 6083), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6124, 6229), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_2_1', '(128)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_2_2"""'}), "(conv_2_1, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_2_2')\n", (6130, 6229), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6267, 6330), 'tensorlayer.layers.BatchNormLayer', 'BatchNormLayer', (['conv_2_2', 'is_train'], {'act': 'tf.nn.relu', 'name': '"""bn_2"""'}), "(conv_2_2, is_train, act=tf.nn.relu, name='bn_2')\n", (6281, 6330), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6364, 6426), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['bn_2', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_2"""'}), "(bn_2, (3, 3), (2, 2), padding='SAME', name='pool_2')\n", (6373, 6426), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6491, 6595), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_2', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_1"""'}), "(pool_2, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_3_1')\n", (6497, 6595), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6636, 6741), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_3_1', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_2"""'}), "(conv_3_1, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_3_2')\n", (6642, 6741), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6783, 6888), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_3_2', '(256)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_3_3"""'}), "(conv_3_2, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_3_3')\n", (6789, 6888), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((6926, 6989), 'tensorlayer.layers.BatchNormLayer', 'BatchNormLayer', (['conv_3_3', 'is_train'], {'act': 'tf.nn.relu', 'name': '"""bn_3"""'}), "(conv_3_3, is_train, act=tf.nn.relu, name='bn_3')\n", (6940, 6989), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7023, 7085), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['bn_3', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_3"""'}), "(bn_3, (3, 3), (2, 2), padding='SAME', name='pool_3')\n", (7032, 7085), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7150, 7254), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_3', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_1"""'}), "(pool_3, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_4_1')\n", (7156, 7254), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7295, 7400), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_4_1', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_2"""'}), "(conv_4_1, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_4_2')\n", (7301, 7400), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7442, 7547), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_4_2', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_4_3"""'}), "(conv_4_2, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_4_3')\n", (7448, 7547), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7585, 7648), 'tensorlayer.layers.BatchNormLayer', 'BatchNormLayer', (['conv_4_3', 'is_train'], {'act': 'tf.nn.relu', 'name': '"""bn_4"""'}), "(conv_4_3, is_train, act=tf.nn.relu, name='bn_4')\n", (7599, 7648), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7682, 7744), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['bn_4', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_4"""'}), "(bn_4, (3, 3), (2, 2), padding='SAME', name='pool_4')\n", (7691, 7744), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7809, 7913), 'tensorlayer.layers.Conv2d', 'Conv2d', (['pool_4', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_1"""'}), "(pool_4, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=\n w_init, name='conv_5_1')\n", (7815, 7913), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((7954, 8059), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_5_1', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_2"""'}), "(conv_5_1, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_5_2')\n", (7960, 8059), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8101, 8206), 'tensorlayer.layers.Conv2d', 'Conv2d', (['conv_5_2', '(512)', '(3, 3)', '(1, 1)'], {'act': 'tf.nn.relu', 'padding': '"""SAME"""', 'W_init': 'w_init', 'name': '"""conv_5_3"""'}), "(conv_5_2, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME',\n W_init=w_init, name='conv_5_3')\n", (8107, 8206), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8244, 8307), 'tensorlayer.layers.BatchNormLayer', 'BatchNormLayer', (['conv_5_3', 'is_train'], {'act': 'tf.nn.relu', 'name': '"""bn_5"""'}), "(conv_5_3, is_train, act=tf.nn.relu, name='bn_5')\n", (8258, 8307), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8341, 8403), 'tensorlayer.layers.MaxPool2d', 'MaxPool2d', (['bn_5', '(3, 3)', '(2, 2)'], {'padding': '"""SAME"""', 'name': '"""pool_5"""'}), "(bn_5, (3, 3), (2, 2), padding='SAME', name='pool_5')\n", (8350, 8403), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8471, 8507), 'tensorlayer.layers.FlattenLayer', 'FlattenLayer', (['pool_5'], {'name': '"""flatten"""'}), "(pool_5, name='flatten')\n", (8483, 8507), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8523, 8619), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['flatten_layer', '(4096)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc1"""'}), "(flatten_layer, 4096, act=tf.nn.relu, W_init=w_init2, b_init=\n b_init2, name='fc1')\n", (8533, 8619), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8654, 8740), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc1', '(4096)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc2"""'}), "(fc1, 4096, act=tf.nn.relu, W_init=w_init2, b_init=b_init2, name=\n 'fc2')\n", (8664, 8740), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8775, 8861), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc2', '(1000)'], {'act': 'tf.nn.relu', 'W_init': 'w_init2', 'b_init': 'b_init2', 'name': '"""fc3"""'}), "(fc2, 1000, act=tf.nn.relu, W_init=w_init2, b_init=b_init2, name=\n 'fc3')\n", (8785, 8861), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((8899, 8968), 'tensorlayer.layers.DenseLayer', 'DenseLayer', (['fc3', 'CLASSES_NUM'], {'act': 'None', 'W_init': 'w_init2', 'name': '"""output"""'}), "(fc3, CLASSES_NUM, act=None, W_init=w_init2, name='output')\n", (8909, 8968), False, 'from tensorlayer.layers import BatchNormLayer, Conv2d, DenseLayer, FlattenLayer, InputLayer, LocalResponseNormLayer, MaxPool2d\n'), ((9015, 9069), 'tensorlayer.cost.cross_entropy', 'tl.cost.cross_entropy', (['y_pred', 'y_correct'], {'name': '"""_cost"""'}), "(y_pred, y_correct, name='_cost')\n", (9036, 9069), True, 'import tensorlayer as tl\n'), ((9179, 9234), 'tensorlayer.layers.get_variables_with_name', 'tl.layers.get_variables_with_name', (['"""relu/W"""', '(True)', '(True)'], {}), "('relu/W', True, True)\n", (9212, 9234), True, 'import tensorlayer as tl\n'), ((10495, 10541), 'tensorlayer.prepro.flip_axis', 'tl.prepro.flip_axis', (['x'], {'axis': '(1)', 'is_random': '(True)'}), '(x, axis=1, is_random=True)\n', (10514, 10541), True, 'import tensorlayer as tl\n'), ((10614, 10672), 'tensorlayer.prepro.brightness', 'tl.prepro.brightness', (['x'], {'gamma': '(0.1)', 'gain': '(1)', 'is_random': '(True)'}), '(x, gamma=0.1, gain=1, is_random=True)\n', (10634, 10672), True, 'import tensorlayer as tl\n'), ((4881, 4901), 'tensorflow.argmax', 'tf.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (4890, 4901), True, 'import tensorflow as tf\n'), ((4948, 4987), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4955, 4987), True, 'import tensorflow as tf\n'), ((9359, 9379), 'tensorflow.argmax', 'tf.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (9368, 9379), True, 'import tensorflow as tf\n'), ((9426, 9465), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (9433, 9465), True, 'import tensorflow as tf\n'), ((10865, 10875), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (10872, 10875), True, 'import numpy as np\n'), ((10883, 10892), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (10889, 10892), True, 'import numpy as np\n'), ((4776, 4815), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.004)'], {}), '(0.004)\n', (4808, 4815), True, 'import tensorflow as tf\n'), ((9254, 9293), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.004)'], {}), '(0.004)\n', (9286, 9293), True, 'import tensorflow as tf\n')] |
import csv
import os
import pandas as pd
from config import config as conn
from . import dir_helper as dirs
from . import filename_handler as fileh
from prettytable import PrettyTable as table
x = table()
y = table()
def CSV_exsist(file_name):
if dirs.CSVFileExsist(file_name,conn.CSV_FILES_PATH):
return True
else:
return False
def CSV_file_Create(file_name):
csvfile = file_name + '.csv'
with open(conn.CSV_FILES_PATH + conn.ROOT + csvfile,'w') as filecreate:
writer = csv.writer(filecreate)
writer.writerow(['Name','yeardate','time'])
def CSV_Row_Write(name,yrs,times):
csvfile = name + '.csv'
with open(conn.CSV_FILES_PATH + conn.ROOT + csvfile,'a') as csvwritefile:
writer = csv.writer(csvwritefile)
writer.writerow([name,yrs,times])
return True
def make_friends_csv():
csvfile = 'friends.csv'
with open(conn.FRIENDS_CSV_PATH + conn.ROOT + csvfile,'w') as csvwritefile:
writer = csv.writer(csvwritefile)
writer.writerow(['Name'])
return True
def update_friends_to_csv():
csvfile = 'friends.csv'
if dirs.isDir(conn.CSV_FILES_PATH + conn.ROOT):
if dirs.isContains(conn.CSV_FILES_PATH + conn.ROOT):
files = dirs.GetFiles(conn.CSV_FILES_PATH + conn.ROOT)
temp_name_list = []
for n in files:
name_parse = fileh.fileNeedName(n)
temp_name_list.append(name_parse)
if dirs.friends_csv_check(conn.FRIENDS_CSV_PATH + conn.ROOT):
for names in temp_name_list:
with open(conn.FRIENDS_CSV_PATH + conn.ROOT + csvfile,'a') as csvwritefile:
writer = csv.writer(csvwritefile)
writer.writerow([names])
else:
if make_friends_csv():
for name in temp_name_list:
with open(conn.FRIENDS_CSV_PATH + conn.ROOT + csvfile,'a') as csvwritefile:
writer = csv.writer(csvwritefile)
writer.writerow([name])
#Get the friends names and display it to the user
def read_friends_name():
print('**** Firends Name are ****')
print()
csv_file = conn.FRIENDS_CSV_PATH + conn.ROOT +'friends.csv'
with open(csv_file,'r') as read_csv:
reader = csv.reader(read_csv)
rows = list(reader)
for names in range(1,len(rows)):
print(rows[names][0])
#Get the specific friend amr file record details
def read_specific_user_rec(sweet_name):
target_csv = conn.CSV_FILES_PATH + conn.ROOT + sweet_name + '.csv'
with open(target_csv,'r') as read_csv:
reader = csv.reader(read_csv)
rows = list(reader)
x.field_names = [rows[0][0], rows[0][1], rows[0][2]]
for details in range(1,len(rows)):
year = rows[details][1][0] + rows[details][1][1] + rows[details][1][2] + rows[details][1][3]
month = rows[details][1][4] + rows[details][1][5]
day = rows[details][1][6] + rows[details][1][7]
time_meridiean = None
if int(rows[details][2][0] + rows[details][2][1]) > 12:
sr = int(rows[details][2][0] + rows[details][2][1]) - 12
hr = '0' + str(sr)
time_meridiean = "PM"
else:
hr = rows[details][2][0] + rows[details][2][1]
time_meridiean = "AM"
mit = rows[details][2][2] + rows[details][2][3]
sec = rows[details][2][4] + rows[details][2][5]
x.add_row([rows[details][0],day + "/" + month + "/" + year,str(hr) + ":" + mit + ":" + sec + "-" + time_meridiean])
print(x)
def get_specific_friend_rec():
date = str(input("Enter the date for needed record : "))
name = str(input("Enter the person name : "))
need_day = date[0:2]
need_month = date[3:5]
need_year = date[6:]
need_date_str = need_year+need_month+need_day
csv_file = conn.CSV_FILES_PATH + conn.ROOT + name + '.csv'
with open(csv_file,'r') as read_csv:
reader = csv.reader(read_csv)
rows = list(reader)
index = 0
need_file = []
for n in range(0,len(rows)):
if rows[index][1] == need_date_str:
need_file.append(rows[index])
index = index + 1
print(" ***** Available Records ***** ")
y.field_names = ["Record","Name", "Year", "Time"]
index_state = 1
for n in need_file:
time_meridiean_status = None
for_time = n[2][0:2]
for_min = n[2][2:4]
for_sec = n[2][4:]
for_year = n[1][0:4]
for_month = n[1][4:6]
for_day = n[1][6:]
if int(for_time) > 12:
sr = int(for_time) - 12
hr = '0' + str(sr)
time_meridiean_status = "PM"
else:
hr = str(for_time)
time_meridiean_status = "AM"
y.add_row([index_state,n[0],for_day + "/" + for_month + "/" + for_year, hr + ":" + for_min + ":" + for_sec + "-" + time_meridiean_status])
index_state = index_state + 1
print(y)
rec_get_access = int(input("Enter the record no you want to play : "))
need_enc_file = need_file[rec_get_access - 1][1] + "_" + need_file[rec_get_access - 1][2] + "-" + need_file[rec_get_access - 1][0] + conn.AES_AMR_REF
need_enc_file_name = need_file[rec_get_access - 1][1] + "_" + need_file[rec_get_access - 1][2] + "-" + need_file[rec_get_access - 1][0]
return need_enc_file,need_enc_file_name
| [
"prettytable.PrettyTable",
"csv.writer",
"csv.reader"
] | [((201, 208), 'prettytable.PrettyTable', 'table', ([], {}), '()\n', (206, 208), True, 'from prettytable import PrettyTable as table\n'), ((213, 220), 'prettytable.PrettyTable', 'table', ([], {}), '()\n', (218, 220), True, 'from prettytable import PrettyTable as table\n'), ((517, 539), 'csv.writer', 'csv.writer', (['filecreate'], {}), '(filecreate)\n', (527, 539), False, 'import csv\n'), ((751, 775), 'csv.writer', 'csv.writer', (['csvwritefile'], {}), '(csvwritefile)\n', (761, 775), False, 'import csv\n'), ((1004, 1028), 'csv.writer', 'csv.writer', (['csvwritefile'], {}), '(csvwritefile)\n', (1014, 1028), False, 'import csv\n'), ((2750, 2770), 'csv.reader', 'csv.reader', (['read_csv'], {}), '(read_csv)\n', (2760, 2770), False, 'import csv\n'), ((3139, 3159), 'csv.reader', 'csv.reader', (['read_csv'], {}), '(read_csv)\n', (3149, 3159), False, 'import csv\n'), ((4845, 4865), 'csv.reader', 'csv.reader', (['read_csv'], {}), '(read_csv)\n', (4855, 4865), False, 'import csv\n'), ((1929, 1953), 'csv.writer', 'csv.writer', (['csvwritefile'], {}), '(csvwritefile)\n', (1939, 1953), False, 'import csv\n'), ((2370, 2394), 'csv.writer', 'csv.writer', (['csvwritefile'], {}), '(csvwritefile)\n', (2380, 2394), False, 'import csv\n')] |
#!/usr/bin/env python
"""Implement the Rock class and other core rocks functionality."""
import datetime as dt
import json
from typing import Dict, List, Optional
import warnings
import numpy as np
import pandas as pd
import pydantic
import rich
import rocks
# ------
# ssoCard as pydantic model
# The lowest level in the ssoCard tree is the Value
class Error(pydantic.BaseModel):
min_: float = pydantic.Field(np.nan, alias="min")
max_: float = pydantic.Field(np.nan, alias="max")
class Value(pydantic.BaseModel):
error: Error = Error(**{})
value: Optional[float] = np.nan
path_unit: str = ""
def __str__(self):
"""Print the value of a numerical parameter including
its errors and its unit if available.
"""
unit = rocks.utils.get_unit(self.path_unit) if self.path_unit else ""
if abs(self.error.min_) == abs(self.error.max_):
return f"{self.value:.4} +- {self.error.max_:.4} {unit}"
else:
return f"{self.value:.4} +- ({self.error.max_:.4}, {self.error.min_:.4}) {unit}"
# The second lowest level is the Parameter
class Parameter(pydantic.BaseModel):
def __str__(self):
return json.dumps(json.loads(self.json()), indent=2, sort_keys=True)
# Other common branches are method and bibref
class Method(Parameter):
doi: Optional[str] = ""
name: Optional[str] = ""
year: Optional[int] = None
title: Optional[str] = ""
bibcode: Optional[str] = ""
shortbib: Optional[str] = ""
class Bibref(Parameter):
doi: Optional[str] = ""
year: Optional[int] = None
title: Optional[str] = ""
bibcode: Optional[str] = ""
shortbib: Optional[str] = ""
# And a special class for the Spin and Taxonomy lists
class ParameterList(list):
"""Subclass of <list> with a custom __str__ for the Spin and Taxonomy parameters."""
def __init__(self, list_):
"""Convert the list items to Spin or Taxonomy instances."""
if "class" in list_[0]:
list_ = [Taxonomy(**entry) for entry in list_]
else:
list_ = [Spin(**entry) for entry in list_]
return super().__init__(list_)
def __str__(self) -> str:
if hasattr(self[0], "class_"):
if len(self) == 1:
return self[0].class_
else:
classifications = []
for entry in self:
shortbib = ", ".join(bib.shortbib for bib in entry.bibref)
classifications.append(f"{entry.class_:<4}{shortbib}")
return "\n".join(classifications)
return super().__str__()
# ------
# Validators
def convert_spin_to_list(spins: Dict) -> List:
"""Convert the Spin dictionary from the ssoCard into a list.
Add the spin index as parameter to the Spin entries.
Parameters
----------
spin : dict
The dictionary located at parameters.physical.spin in the ssoCard.
Returns
-------
list
A list of dictionaries, with one dictionary for each entry in parameters.physical.spin
after removing the index layer.
"""
spin_dicts = []
for spin_id, spin_dict in spins.items():
spin_dict["id_"] = spin_id
spin_dicts.append(spin_dict)
return spin_dicts
# ------
# Dynamical parameters
class OrbitalElements(Parameter):
ceu: Value = Value(**{})
author: Optional[str] = ""
bibref: List[Bibref] = [Bibref(**{})]
ceu_rate: Value = Value(**{})
ref_epoch: Optional[float] = np.nan
inclination: Value = Value(**{})
mean_motion: Value = Value(**{})
orbital_arc: Optional[int] = None
eccentricity: Value = Value(**{})
mean_anomaly: Value = Value(**{})
node_longitude: Value = Value(**{})
orbital_period: Value = Value(**{})
semi_major_axis: Value = Value(**{})
number_observation: Optional[int] = None
perihelion_argument: Value = Value(**{})
class ProperElements(Parameter):
bibref: List[Bibref] = [Bibref(**{})]
proper_g: Value = Value(**{})
proper_s: Value = Value(**{})
proper_eccentricity: Value = Value(**{})
proper_inclination: Value = Value(**{})
proper_semi_major_axis: Value = Value(**{})
proper_sine_inclination: Value = Value(**{})
class Family(Parameter):
bibref: List[Bibref] = [Bibref(**{})]
family_name: Optional[str] = ""
family_number: Optional[int] = None
family_status: Optional[str] = ""
class PairMembers(Parameter):
sibling_name: Optional[str] = ""
pair_delta_v: Optional[float] = np.nan
pair_delta_a: Optional[float] = np.nan
pair_delta_e: Optional[float] = np.nan
pair_delta_i: Optional[float] = np.nan
sibling_number: Optional[int] = None
class Pair(Parameter):
members: List[PairMembers] = [PairMembers(**{})]
bibref: List[Bibref] = [Bibref(**{})]
class TisserandParameter(Value):
method: List[Method] = []
bibref: List[Bibref] = []
class Yarkovsky(Parameter):
S: Optional[float] = np.nan
A2: Value = Value(**{})
snr: Optional[float] = np.nan
dadt: Value = Value(**{})
bibref: List[Bibref] = [Bibref(**{})]
def __str__(self):
return "\n".join([self.A2.__str__(), self.dadt.__str__()])
class DynamicalParameters(Parameter):
pair: Pair = Pair(**{})
family: Family = Family(**{})
tisserand_parameter: TisserandParameter = TisserandParameter(**{})
yarkovsky: Yarkovsky = Yarkovsky(**{})
proper_elements: ProperElements = ProperElements(**{})
orbital_elements: OrbitalElements = OrbitalElements(**{})
def __str__(self):
return self.json()
# ------
# Physical Value
class Albedo(Value):
bibref: List[Bibref] = []
method: List[Method] = []
class Color(Value):
color: Value = Value(**{})
epoch: Optional[float] = np.nan
from_: Optional[str] = pydantic.Field("", alias="from")
bibref: Bibref = Bibref(**{})
observer: Optional[str] = ""
phot_sys: Optional[str] = ""
delta_time: Optional[float] = np.nan
id_filter_1: Optional[str] = ""
id_filter_2: Optional[str] = ""
class Colors(Parameter):
# Atlas
c_o: List[Color] = [pydantic.Field(Color(**{}), alias="c-o")]
# 2MASS / VISTA
J_H: List[Color] = [pydantic.Field(Color(**{}), alias="J-H")]
J_K: List[Color] = [pydantic.Field(Color(**{}), alias="J-K")]
H_K: List[Color] = [pydantic.Field(Color(**{}), alias="H-K")]
class Density(Value):
method: List[Method] = []
bibref: List[Bibref] = []
path_unit: str = "unit.physical.density.value"
class Diameter(Value):
method: List[Method] = [Method(**{})]
bibref: List[Bibref] = [Bibref(**{})]
path_unit: str = "unit.physical.diameter.value"
class Mass(Value):
bibref: List[Bibref] = [Bibref(**{})]
method: List[Method] = [Method(**{})]
path_unit: str = "unit.physical.mass.value"
class Phase(Parameter):
H: Value = Value(**{})
N: Optional[float] = np.nan
G1: Value = Value(**{})
G2: Value = Value(**{})
rms: Optional[float] = np.nan
phase: Error = Error(**{})
bibref: List[Bibref] = [Bibref(**{})]
facility: Optional[str] = ""
name_filter: Optional[str] = ""
class PhaseFunction(Parameter):
# Generic
generic_johnson_v: Phase = pydantic.Field(Phase(**{}), alias="Generic/Johnson.V")
# ATLAS
misc_atlas_cyan: Phase = pydantic.Field(Phase(**{}), alias="Misc/Atlas.cyan")
misc_atlas_orange: Phase = pydantic.Field(Phase(**{}), alias="Misc/Atlas.orange")
class Spin(Parameter):
t0: Optional[float] = np.nan
Wp: Optional[float] = np.nan
id_: Optional[int] = None
lat: Optional[Value] = Value(**{})
RA0: Optional[float] = np.nan
DEC0: Optional[float] = np.nan
long_: Optional[Value] = pydantic.Field(Value(**{}), alias="long")
period: Optional[Value] = Value(**{})
method: Optional[List[Method]] = [Method(**{})]
bibref: Optional[List[Bibref]] = [Bibref(**{})]
path_unit: str = "unit.physical.spin.value"
class Taxonomy(Parameter):
class_: Optional[str] = pydantic.Field("", alias="class")
scheme: Optional[str] = ""
bibref: Optional[List[Bibref]] = [Bibref(**{})]
method: Optional[List[Method]] = [Method(**{})]
waverange: Optional[str] = ""
def __str__(self):
if not self.class_:
return "No taxonomy on record."
return ", ".join(self.class_)
class ThermalInertia(Parameter):
TI: Value = Value(**{})
dsun: Optional[float] = np.nan
bibref: List[Bibref] = []
method: List[Method] = []
class AbsoluteMagnitude(Value):
G: Optional[float] = np.nan
bibref: List[Bibref] = []
class PhysicalParameters(Parameter):
mass: Mass = Mass(**{})
spin: ParameterList = ParameterList([{}])
colors: Colors = Colors(**{})
albedo: Albedo = Albedo(**{})
density: Density = Density(**{})
diameter: Diameter = Diameter(**{})
taxonomy: ParameterList = ParameterList([{"class": ""}])
phase_function: PhaseFunction = PhaseFunction(**{})
thermal_inertia: ThermalInertia = ThermalInertia(**{})
absolute_magnitude: AbsoluteMagnitude = AbsoluteMagnitude(**{})
_convert_spin_to_list: classmethod = pydantic.validator("spin", pre=True)(
convert_spin_to_list
)
_convert_list_to_parameterlist: classmethod = pydantic.validator(
"spin", "taxonomy", allow_reuse=True, pre=True
)(lambda list_: ParameterList(list_))
# ------
# Equation of state
class EqStateVector(Parameter):
ref_epoch: Optional[float] = np.nan
position: List[float] = [np.nan, np.nan, np.nan]
velocity: List[float] = [np.nan, np.nan, np.nan]
# ------
# Highest level branches
class Parameters(Parameter):
physical: PhysicalParameters = PhysicalParameters(**{})
dynamical: DynamicalParameters = DynamicalParameters(**{})
eq_state_vector: EqStateVector = EqStateVector(**{})
class Config:
arbitrary_types_allowed = True
class Link(Parameter):
unit: Optional[str] = ""
self_: Optional[str] = pydantic.Field("", alias="self")
quaero: Optional[str] = ""
description: Optional[str] = ""
class Ssocard(Parameter):
version: Optional[str] = ""
datetime: Optional[dt.datetime] = None
class Datacloud(Parameter):
"""The collection of links to datacloud catalogue associated to this ssoCard."""
astorb: Optional[str] = ""
binarymp: Optional[str] = ""
diamalbedo: Optional[str] = ""
families: Optional[str] = ""
masses: Optional[str] = ""
mpcatobs: Optional[str] = ""
mpcorb: Optional[str] = ""
pairs: Optional[str] = ""
taxonomy: Optional[str] = ""
class Rock(pydantic.BaseModel):
"""Instantiate a specific asteroid with data from its ssoCard."""
# the basics
id_: Optional[str] = pydantic.Field("", alias="id")
name: Optional[str] = ""
type_: Optional[str] = pydantic.Field("", alias="type")
class_: Optional[str] = pydantic.Field("", alias="class")
number: Optional[int] = None
parent: Optional[str] = ""
system: Optional[str] = ""
# the heart
parameters: Parameters = Parameters(**{})
# the meta
link: Link = Link(**{})
ssocard: Ssocard = Ssocard(**{})
datacloud: Datacloud = Datacloud(**{})
# the catalogues
astorb: rocks.datacloud.Astorb = rocks.datacloud.Astorb(**{})
binarymp: rocks.datacloud.Binarymp = rocks.datacloud.Binarymp(**{})
colors: rocks.datacloud.Colors = rocks.datacloud.Colors(**{})
diamalbedo: rocks.datacloud.Diamalbedo = rocks.datacloud.Diamalbedo(**{})
families: rocks.datacloud.Families = rocks.datacloud.Families(**{})
masses: rocks.datacloud.Masses = rocks.datacloud.Masses(**{})
mpcatobs: rocks.datacloud.Mpcatobs = rocks.datacloud.Mpcatobs(**{})
mpcorb: rocks.datacloud.Mpcorb = rocks.datacloud.Mpcorb(**{})
pairs: rocks.datacloud.Pairs = rocks.datacloud.Pairs(**{})
phase_functions: rocks.datacloud.PhaseFunction = rocks.datacloud.PhaseFunction(**{})
taxonomies: rocks.datacloud.Taxonomies = rocks.datacloud.Taxonomies(**{})
thermal_properties: rocks.datacloud.ThermalProperties = (
rocks.datacloud.ThermalProperties(**{})
)
yarkovskies: rocks.datacloud.Yarkovskies = rocks.datacloud.Yarkovskies(**{})
def __init__(self, id_, ssocard=None, datacloud=None, skip_id_check=False):
"""Identify a minor body and retrieve its properties from SsODNet.
Parameters
----------
id_ : str, int, float
Identifying asteroid name, designation, or number
ssocard : dict
Optional argument providing a dictionary to use as ssoCard.
Default is empty dictionary, triggering the query of an ssoCard.
datacloud : list of str
Optional list of additional catalogues to retrieve from datacloud.
Default is no additional catalogues.
skip_id_check : bool
Optional argument to prevent resolution of ID before getting ssoCard.
Default is False.
Returns
-------
rocks.core.Rock
An asteroid class instance, with its properties as attributes.
Notes
-----
If the asteroid could not be identified or the data contains invalid
types, the number is None and no further attributes but the name are set.
Example
-------
>>> from rocks import Rock
>>> ceres = Rock('ceres')
>>> ceres.taxonomy.class_
'C'
>>> ceres.taxonomy.shortbib
'DeMeo+2009'
>>> ceres.diameter.value
848.4
>>> ceres.diameter.unit
'km'
"""
if isinstance(datacloud, str):
datacloud = [datacloud]
id_provided = id_
if not skip_id_check:
_, _, id_ = rocks.identify(id_, return_id=True) # type: ignore
# Get ssoCard and datcloud catalogues
if not pd.isnull(id_):
if ssocard is None:
ssocard = rocks.ssodnet.get_ssocard(id_)
if ssocard is None:
# Asteroid does not have an ssoCard
# Instantiate minimal ssoCard for meaningful error output.
ssocard = {"name": id_provided}
rich.print(
f"Error 404: missing ssoCard for [green]{id_provided}[/green]."
)
# This only gets printed once
warnings.warn(
"See https://rocks.readthedocs.io/en/latest/tutorials.html#error-404 for help."
)
else:
if datacloud is not None:
for catalogue in datacloud:
ssocard = self.__add_datacloud_catalogue(
id_, catalogue, ssocard
)
else:
# Something failed. Instantiate minimal ssoCard for meaningful error output.
ssocard = {"name": id_provided}
# Deserialize the asteroid data
try:
super().__init__(**ssocard) # type: ignore
except pydantic.ValidationError as message:
self.__parse_error_message(message, id_, ssocard)
# Set the offending properties to None to allow for instantiation anyway
for error in message.errors():
# Dynamically remove offending parts of the ssoCard
offending_part = ssocard
for location in error["loc"][:-1]:
offending_part = offending_part[location]
del offending_part[error["loc"][-1]]
super().__init__(**ssocard) # type: ignore
# Convert the retrieve datacloud catalogues into DataCloudDataFrame objects
if datacloud is not None:
for catalogue in datacloud:
if catalogue in ["diameters", "albedos"]:
catalogue = "diamalbedo"
setattr(
self,
catalogue,
rocks.datacloud.DataCloudDataFrame(
data=getattr(self, catalogue).dict()
),
)
def __getattr__(self, name):
"""Implement attribute shortcuts. Gets called if __getattribute__ fails."""
# These are shortcuts
if name in self.__aliases["physical"].values():
return getattr(self.parameters.physical, name)
if name in self.__aliases["dynamical"].values():
return getattr(self.parameters.dynamical, name)
# TODO This could be coded in a more abstract way
# These are proper aliases
if name in self.__aliases["orbital_elements"].keys():
return getattr(
self.parameters.dynamical.orbital_elements,
self.__aliases["orbital_elements"][name],
)
if name in self.__aliases["proper_elements"].keys():
return getattr(
self.parameters.dynamical.proper_elements,
self.__aliases["proper_elements"][name],
)
if name in self.__aliases["physical"].keys():
return getattr(
self.parameters.physical,
self.__aliases["physical"][name],
)
if name in self.__aliases["diamalbedo"]:
return getattr(self, "diamalbedo")
raise AttributeError(
f"'Rock' object has no attribute '{name}'. Run "
f"'rocks parameters' to get a list of accepted properties."
)
def __repr__(self):
return (
self.__class__.__qualname__
+ f"(number={self.number!r}, name={self.name!r})"
)
def __str__(self):
return f"({self.number}) {self.name}"
def __hash__(self):
return hash(self.id_)
def __add_datacloud_catalogue(self, id_, catalogue, data):
"""Retrieve datacloud catalogue for asteroid and deserialize into
pydantic model."""
if catalogue not in rocks.datacloud.CATALOGUES.keys():
raise ValueError(
f"Unknown datacloud catalogue name: '{catalogue}'"
f"\nChoose from {rocks.datacloud.CATALOGUES.keys()}"
)
# get the SsODNet catalogue and the Rock's attribute names
catalogue_attribute = rocks.datacloud.CATALOGUES[catalogue]["attr_name"]
catalogue_ssodnet = rocks.datacloud.CATALOGUES[catalogue]["ssodnet_name"]
# retrieve the catalogue
cat = rocks.ssodnet.get_datacloud_catalogue(id_, catalogue_ssodnet)
if cat is None or not cat:
return data
# turn list of dict (catalogue entries) into dict of list
cat = {
key: [c[key] for c in cat]
if catalogue not in ["aams", "astorb", "pairs", "families"]
else cat[0][key]
for key in cat[0].keys()
}
# add 'preferred' attribute where applicable
if catalogue_ssodnet in ["taxonomy", "masses", "diamalbedo"]:
cat["preferred"] = [False] * len(list(cat.values())[0])
if catalogue_ssodnet in ["diamalbedo"]:
cat["preferred_albedo"] = [False] * len(list(cat.values())[0])
cat["preferred_diameter"] = [False] * len(list(cat.values())[0])
# add catalogue to Rock
data[catalogue_attribute] = cat
return data
def __parse_error_message(self, message, id_, data):
"""Print informative error message if ssocard data is invalid."""
print(f"\n{id_}:")
# Look up offending value in ssoCard
for error in message.errors():
value = data
for loc in error["loc"]:
try:
value = value[loc]
except TypeError:
break
rich.print(
f"Error: {' -> '.join([str(e) for e in error['loc']])} is invalid: {error['msg']}\n"
f"Passed value: {value}\n"
f"Replacing value with empty default to continue.\n"
)
__aliases = {
"dynamical": {
"parameters.dynamical.orbital_elements": "orbital_elements",
"parameters.dynamical.proper_elements": "proper_elements",
"parameters.dynamical.yarkovsky": "yarkovsky",
"parameters.dynamical.family": "family",
"parameters.dynamical.pair": "pair",
},
"physical": {
"H": "absolute_magnitude",
"parameters.physical.absolute_magnitude": "absolute_magnitude",
"parameters.physical.albedo": "albedo",
"parameters.physical.colors": "colors",
"parameters.physical.diameter": "diameter",
"parameters.physical.density": "density",
"parameters.physical.mass": "mass",
"parameters.physical.phase_function": "phase_function",
"parameters.physical.spin": "spin",
"parameters.physical.taxonomy": "taxonomy",
"parameters.physical.thermal_properties": "thermal_properties",
},
"orbital_elements": {
"a": "semi_major_axis",
"e": "eccentricity",
"i": "inclination",
},
"proper_elements": {
"ap": "proper_semi_major_axis",
"ep": "proper_eccentricity",
"ip": "proper_inclination",
"sinip": "proper_sine_inclination",
},
"diamalbedo": ["albedos", "diameters"],
}
def rocks_(ids, datacloud=None, progress=False):
"""Create multiple Rock instances.
Parameters
==========
ids : list of str, list of int, list of float, np.array, pd.Series
An iterable containing minor body identifiers.
datacloud : list of str
List of additional catalogues to retrieve from datacloud.
Default is no additional catalogues.
progress : bool
Show progress of instantiation. Default is False.
Returns
=======
list of rocks.core.Rock
A list of Rock instances
"""
# Get IDs
if len(ids) == 1 or isinstance(ids, str):
ids = [rocks.identify(ids, return_id=True, progress=progress)[-1]]
else:
_, _, ids = zip(*rocks.identify(ids, return_id=True, progress=progress))
# Load ssoCards asynchronously
rocks.ssodnet.get_ssocard(
[id_ for id_ in ids if not id_ is None], progress=progress
)
if datacloud is not None:
if isinstance(datacloud, str):
datacloud = [datacloud]
# Load datacloud catalogues asynchronously
for cat in datacloud:
if cat not in rocks.datacloud.CATALOGUES.keys():
raise ValueError(
f"Unknown datacloud catalogue name: '{cat}'"
f"\nChoose from {rocks.datacloud.CATALOGUES.keys()}"
)
rocks.ssodnet.get_datacloud_catalogue(
[id_ for id_ in ids if not id_ is None], cat, progress=progress
)
rocks_ = [
Rock(id_, skip_id_check=True, datacloud=datacloud) if not id_ is None else None
for id_ in ids
]
return rocks_
| [
"rocks.utils.get_unit",
"rich.print",
"rocks.datacloud.CATALOGUES.keys",
"rocks.ssodnet.get_ssocard",
"rocks.datacloud.Astorb",
"rocks.datacloud.Masses",
"rocks.datacloud.Pairs",
"rocks.datacloud.Yarkovskies",
"pydantic.Field",
"rocks.datacloud.Colors",
"rocks.datacloud.Binarymp",
"rocks.datacloud.PhaseFunction",
"warnings.warn",
"rocks.datacloud.Diamalbedo",
"pydantic.validator",
"rocks.datacloud.ThermalProperties",
"rocks.identify",
"rocks.datacloud.Mpcatobs",
"rocks.datacloud.Families",
"rocks.ssodnet.get_datacloud_catalogue",
"pandas.isnull",
"rocks.datacloud.Mpcorb",
"rocks.datacloud.Taxonomies"
] | [((404, 439), 'pydantic.Field', 'pydantic.Field', (['np.nan'], {'alias': '"""min"""'}), "(np.nan, alias='min')\n", (418, 439), False, 'import pydantic\n'), ((458, 493), 'pydantic.Field', 'pydantic.Field', (['np.nan'], {'alias': '"""max"""'}), "(np.nan, alias='max')\n", (472, 493), False, 'import pydantic\n'), ((5847, 5879), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""from"""'}), "('', alias='from')\n", (5861, 5879), False, 'import pydantic\n'), ((8051, 8084), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""class"""'}), "('', alias='class')\n", (8065, 8084), False, 'import pydantic\n'), ((10019, 10051), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""self"""'}), "('', alias='self')\n", (10033, 10051), False, 'import pydantic\n'), ((10775, 10805), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""id"""'}), "('', alias='id')\n", (10789, 10805), False, 'import pydantic\n'), ((10862, 10894), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""type"""'}), "('', alias='type')\n", (10876, 10894), False, 'import pydantic\n'), ((10923, 10956), 'pydantic.Field', 'pydantic.Field', (['""""""'], {'alias': '"""class"""'}), "('', alias='class')\n", (10937, 10956), False, 'import pydantic\n'), ((11298, 11326), 'rocks.datacloud.Astorb', 'rocks.datacloud.Astorb', ([], {}), '(**{})\n', (11320, 11326), False, 'import rocks\n'), ((11368, 11398), 'rocks.datacloud.Binarymp', 'rocks.datacloud.Binarymp', ([], {}), '(**{})\n', (11392, 11398), False, 'import rocks\n'), ((11436, 11464), 'rocks.datacloud.Colors', 'rocks.datacloud.Colors', ([], {}), '(**{})\n', (11458, 11464), False, 'import rocks\n'), ((11510, 11542), 'rocks.datacloud.Diamalbedo', 'rocks.datacloud.Diamalbedo', ([], {}), '(**{})\n', (11536, 11542), False, 'import rocks\n'), ((11584, 11614), 'rocks.datacloud.Families', 'rocks.datacloud.Families', ([], {}), '(**{})\n', (11608, 11614), False, 'import rocks\n'), ((11652, 11680), 'rocks.datacloud.Masses', 'rocks.datacloud.Masses', ([], {}), '(**{})\n', (11674, 11680), False, 'import rocks\n'), ((11722, 11752), 'rocks.datacloud.Mpcatobs', 'rocks.datacloud.Mpcatobs', ([], {}), '(**{})\n', (11746, 11752), False, 'import rocks\n'), ((11790, 11818), 'rocks.datacloud.Mpcorb', 'rocks.datacloud.Mpcorb', ([], {}), '(**{})\n', (11812, 11818), False, 'import rocks\n'), ((11854, 11881), 'rocks.datacloud.Pairs', 'rocks.datacloud.Pairs', ([], {}), '(**{})\n', (11875, 11881), False, 'import rocks\n'), ((11935, 11970), 'rocks.datacloud.PhaseFunction', 'rocks.datacloud.PhaseFunction', ([], {}), '(**{})\n', (11964, 11970), False, 'import rocks\n'), ((12016, 12048), 'rocks.datacloud.Taxonomies', 'rocks.datacloud.Taxonomies', ([], {}), '(**{})\n', (12042, 12048), False, 'import rocks\n'), ((12119, 12158), 'rocks.datacloud.ThermalProperties', 'rocks.datacloud.ThermalProperties', ([], {}), '(**{})\n', (12152, 12158), False, 'import rocks\n'), ((12212, 12245), 'rocks.datacloud.Yarkovskies', 'rocks.datacloud.Yarkovskies', ([], {}), '(**{})\n', (12239, 12245), False, 'import rocks\n'), ((22291, 22381), 'rocks.ssodnet.get_ssocard', 'rocks.ssodnet.get_ssocard', (['[id_ for id_ in ids if not id_ is None]'], {'progress': 'progress'}), '([id_ for id_ in ids if not id_ is None], progress\n =progress)\n', (22316, 22381), False, 'import rocks\n'), ((9186, 9222), 'pydantic.validator', 'pydantic.validator', (['"""spin"""'], {'pre': '(True)'}), "('spin', pre=True)\n", (9204, 9222), False, 'import pydantic\n'), ((9309, 9375), 'pydantic.validator', 'pydantic.validator', (['"""spin"""', '"""taxonomy"""'], {'allow_reuse': '(True)', 'pre': '(True)'}), "('spin', 'taxonomy', allow_reuse=True, pre=True)\n", (9327, 9375), False, 'import pydantic\n'), ((18484, 18545), 'rocks.ssodnet.get_datacloud_catalogue', 'rocks.ssodnet.get_datacloud_catalogue', (['id_', 'catalogue_ssodnet'], {}), '(id_, catalogue_ssodnet)\n', (18521, 18545), False, 'import rocks\n'), ((780, 816), 'rocks.utils.get_unit', 'rocks.utils.get_unit', (['self.path_unit'], {}), '(self.path_unit)\n', (800, 816), False, 'import rocks\n'), ((13793, 13828), 'rocks.identify', 'rocks.identify', (['id_'], {'return_id': '(True)'}), '(id_, return_id=True)\n', (13807, 13828), False, 'import rocks\n'), ((13907, 13921), 'pandas.isnull', 'pd.isnull', (['id_'], {}), '(id_)\n', (13916, 13921), True, 'import pandas as pd\n'), ((17990, 18023), 'rocks.datacloud.CATALOGUES.keys', 'rocks.datacloud.CATALOGUES.keys', ([], {}), '()\n', (18021, 18023), False, 'import rocks\n'), ((22845, 22951), 'rocks.ssodnet.get_datacloud_catalogue', 'rocks.ssodnet.get_datacloud_catalogue', (['[id_ for id_ in ids if not id_ is None]', 'cat'], {'progress': 'progress'}), '([id_ for id_ in ids if not id_ is\n None], cat, progress=progress)\n', (22882, 22951), False, 'import rocks\n'), ((13981, 14011), 'rocks.ssodnet.get_ssocard', 'rocks.ssodnet.get_ssocard', (['id_'], {}), '(id_)\n', (14006, 14011), False, 'import rocks\n'), ((14237, 14312), 'rich.print', 'rich.print', (['f"""Error 404: missing ssoCard for [green]{id_provided}[/green]."""'], {}), "(f'Error 404: missing ssoCard for [green]{id_provided}[/green].')\n", (14247, 14312), False, 'import rich\n'), ((14413, 14517), 'warnings.warn', 'warnings.warn', (['"""See https://rocks.readthedocs.io/en/latest/tutorials.html#error-404 for help."""'], {}), "(\n 'See https://rocks.readthedocs.io/en/latest/tutorials.html#error-404 for help.'\n )\n", (14426, 14517), False, 'import warnings\n'), ((22099, 22153), 'rocks.identify', 'rocks.identify', (['ids'], {'return_id': '(True)', 'progress': 'progress'}), '(ids, return_id=True, progress=progress)\n', (22113, 22153), False, 'import rocks\n'), ((22195, 22249), 'rocks.identify', 'rocks.identify', (['ids'], {'return_id': '(True)', 'progress': 'progress'}), '(ids, return_id=True, progress=progress)\n', (22209, 22249), False, 'import rocks\n'), ((22607, 22640), 'rocks.datacloud.CATALOGUES.keys', 'rocks.datacloud.CATALOGUES.keys', ([], {}), '()\n', (22638, 22640), False, 'import rocks\n'), ((18155, 18188), 'rocks.datacloud.CATALOGUES.keys', 'rocks.datacloud.CATALOGUES.keys', ([], {}), '()\n', (18186, 18188), False, 'import rocks\n'), ((22778, 22811), 'rocks.datacloud.CATALOGUES.keys', 'rocks.datacloud.CATALOGUES.keys', ([], {}), '()\n', (22809, 22811), False, 'import rocks\n')] |
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
from network_simulator.poissonPointProcess import generateUsers, generateUsersPPP
def poissonPoint():
x, y = generateUsersPPP(50, 0.2)
x1, y1 = generateUsers(50, len(x))
xy = np.vstack([x, y])
z = scipy.stats.gaussian_kde(xy)(xy)
xy1 = np.vstack([x1,y1])
z1 = scipy.stats.gaussian_kde(xy1)(xy1)
plt.figure(1)
plt.subplot(121)
plt.scatter(x, y, c=z)
plt.title("Poisson Point Process")
plt.xlabel("x")
plt.ylabel("y")
plt.subplot(122)
plt.scatter(x1, y1, c=z1)
plt.title("Python Uniform Random")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
poissonPoint()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"network_simulator.poissonPointProcess.generateUsersPPP",
"matplotlib.pyplot.figure",
"numpy.vstack",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((184, 209), 'network_simulator.poissonPointProcess.generateUsersPPP', 'generateUsersPPP', (['(50)', '(0.2)'], {}), '(50, 0.2)\n', (200, 209), False, 'from network_simulator.poissonPointProcess import generateUsers, generateUsersPPP\n'), ((259, 276), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (268, 276), True, 'import numpy as np\n'), ((329, 348), 'numpy.vstack', 'np.vstack', (['[x1, y1]'], {}), '([x1, y1])\n', (338, 348), True, 'import numpy as np\n'), ((397, 410), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (407, 410), True, 'import matplotlib.pyplot as plt\n'), ((416, 432), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (427, 432), True, 'import matplotlib.pyplot as plt\n'), ((437, 459), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'z'}), '(x, y, c=z)\n', (448, 459), True, 'import matplotlib.pyplot as plt\n'), ((464, 498), 'matplotlib.pyplot.title', 'plt.title', (['"""Poisson Point Process"""'], {}), "('Poisson Point Process')\n", (473, 498), True, 'import matplotlib.pyplot as plt\n'), ((503, 518), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (513, 518), True, 'import matplotlib.pyplot as plt\n'), ((523, 538), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (533, 538), True, 'import matplotlib.pyplot as plt\n'), ((544, 560), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (555, 560), True, 'import matplotlib.pyplot as plt\n'), ((565, 590), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1', 'y1'], {'c': 'z1'}), '(x1, y1, c=z1)\n', (576, 590), True, 'import matplotlib.pyplot as plt\n'), ((595, 629), 'matplotlib.pyplot.title', 'plt.title', (['"""Python Uniform Random"""'], {}), "('Python Uniform Random')\n", (604, 629), True, 'import matplotlib.pyplot as plt\n'), ((634, 649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (644, 649), True, 'import matplotlib.pyplot as plt\n'), ((654, 669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (664, 669), True, 'import matplotlib.pyplot as plt\n'), ((674, 684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (682, 684), True, 'import matplotlib.pyplot as plt\n')] |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-duration-pattern-1-NS"
@dataclass
class NistschemaSvIvListDurationPattern1:
class Meta:
name = "NISTSchema-SV-IV-list-duration-pattern-1"
namespace = "NISTSchema-SV-IV-list-duration-pattern-1-NS"
value: List[str] = field(
default_factory=list,
metadata={
"pattern": r"P\d\d75Y\d3M\d9DT0\dH\d2M3\dS P19\d\dY0\dM1\dDT0\dH1\dM\d1S P19\d\dY\d3M2\dDT\d4H\d7M\d7S P\d\d86Y\d9M\d5DT\d6H4\dM\d9S P19\d\dY\d2M\d2DT\d9H3\dM0\dS P\d\d90Y0\dM2\dDT\d7H2\dM0\dS P\d\d94Y\d0M1\dDT0\dH1\dM4\dS P\d\d71Y0\dM\d1DT\d3H\d7M4\dS",
"tokens": True,
}
)
| [
"dataclasses.field"
] | [((347, 720), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'pattern':\n 'P\\\\d\\\\d75Y\\\\d3M\\\\d9DT0\\\\dH\\\\d2M3\\\\dS P19\\\\d\\\\dY0\\\\dM1\\\\dDT0\\\\dH1\\\\dM\\\\d1S P19\\\\d\\\\dY\\\\d3M2\\\\dDT\\\\d4H\\\\d7M\\\\d7S P\\\\d\\\\d86Y\\\\d9M\\\\d5DT\\\\d6H4\\\\dM\\\\d9S P19\\\\d\\\\dY\\\\d2M\\\\d2DT\\\\d9H3\\\\dM0\\\\dS P\\\\d\\\\d90Y0\\\\dM2\\\\dDT\\\\d7H2\\\\dM0\\\\dS P\\\\d\\\\d94Y\\\\d0M1\\\\dDT0\\\\dH1\\\\dM4\\\\dS P\\\\d\\\\d71Y0\\\\dM\\\\d1DT\\\\d3H\\\\d7M4\\\\dS'\n , 'tokens': True}"}), "(default_factory=list, metadata={'pattern':\n 'P\\\\d\\\\d75Y\\\\d3M\\\\d9DT0\\\\dH\\\\d2M3\\\\dS P19\\\\d\\\\dY0\\\\dM1\\\\dDT0\\\\dH1\\\\dM\\\\d1S P19\\\\d\\\\dY\\\\d3M2\\\\dDT\\\\d4H\\\\d7M\\\\d7S P\\\\d\\\\d86Y\\\\d9M\\\\d5DT\\\\d6H4\\\\dM\\\\d9S P19\\\\d\\\\dY\\\\d2M\\\\d2DT\\\\d9H3\\\\dM0\\\\dS P\\\\d\\\\d90Y0\\\\dM2\\\\dDT\\\\d7H2\\\\dM0\\\\dS P\\\\d\\\\d94Y\\\\d0M1\\\\dDT0\\\\dH1\\\\dM4\\\\dS P\\\\d\\\\d71Y0\\\\dM\\\\d1DT\\\\d3H\\\\d7M4\\\\dS'\n , 'tokens': True})\n", (352, 720), False, 'from dataclasses import dataclass, field\n')] |
import os
import mysql.connector
from datetime import datetime
import speech_recognition as sr
from happytransformer import HappyTextClassification
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="without_knowledgebase_hate_speech_database"
)
happy_tc = HappyTextClassification("BERT", "Hate-speech-CNERG/dehatebert-mono-english")
speech_recognizer = sr.Recognizer()
def speech_to_text():
with sr.Microphone() as source:
print("Listening...")
speech_recognizer.pause_threshold = 1
audio = speech_recognizer.listen(source, phrase_time_limit=10) # phrase_time_limit=10
try:
print("Recognizing...")
text = speech_recognizer.recognize_google(audio)
return text
except Exception as e:
print(e)
def hate_speech_classification(text):
result = happy_tc.classify_text(text)
now = datetime.now()
currentTime = now.strftime("%I:%M:%S %p")
currentDate = datetime.today().strftime('%Y-%m-%d')
hate_words_list_str = ""
text = text.replace("'","") # Replace ' with nothing to fix the issue.
insert_cursor = mydb.cursor()
sqlCode = "INSERT INTO processed_data VALUES ('{}', '{}', '{}', '{}', '{}', '{}')".format("", str(text) , currentDate, currentTime, result.label, (float(result.score) * 100))
insert_cursor.execute(sqlCode)
mydb.commit()
print("Data inserted!")
def main_function():
while True:
text = speech_to_text()
if text != "" and text != None:
hate_speech_classification(text)
if __name__ == "__main__":
main_function() | [
"happytransformer.HappyTextClassification",
"speech_recognition.Recognizer",
"datetime.datetime.now",
"speech_recognition.Microphone",
"datetime.datetime.today"
] | [((301, 377), 'happytransformer.HappyTextClassification', 'HappyTextClassification', (['"""BERT"""', '"""Hate-speech-CNERG/dehatebert-mono-english"""'], {}), "('BERT', 'Hate-speech-CNERG/dehatebert-mono-english')\n", (324, 377), False, 'from happytransformer import HappyTextClassification\n'), ((399, 414), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (412, 414), True, 'import speech_recognition as sr\n'), ((918, 932), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (930, 932), False, 'from datetime import datetime\n'), ((447, 462), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (460, 462), True, 'import speech_recognition as sr\n'), ((991, 1007), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1005, 1007), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-08 08:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtailcommerce.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcommerce', '0006_product_image'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('title', models.CharField(max_length=255, unique=True, verbose_name='title')),
('active', models.BooleanField(default=False, verbose_name='active')),
('description', models.TextField(blank=True, help_text='For admin/backoffice purposes only.', verbose_name='description')),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='CategoryIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CategoryPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.TextBlock(icon='pilcrow')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('aligned_image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('alignment', wagtailcommerce.blocks.ImageFormatChoiceBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock()), ('attribution', wagtail.wagtailcore.blocks.CharBlock(required=False))), icon='image', label='Aligned image')), ('embed', wagtail.wagtailembeds.blocks.EmbedBlock()), ('document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse'))))),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='featured image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AlterField(
model_name='productpage',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image', verbose_name='featured image'),
),
migrations.AddField(
model_name='category',
name='category_page',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='wagtailcommerce.CategoryPage'),
),
migrations.AddField(
model_name='category',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.CharField"
] | [((3443, 3613), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailimages.Image"""', 'verbose_name': '"""featured image"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='wagtailimages.Image',\n verbose_name='featured image')\n", (3460, 3613), False, 'from django.db import migrations, models\n'), ((3733, 3862), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""wagtailcommerce.CategoryPage"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='wagtailcommerce.CategoryPage')\n", (3753, 3862), False, 'from django.db import migrations, models\n'), ((3978, 4113), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailimages.Image"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='wagtailimages.Image')\n", (3995, 4113), False, 'from django.db import migrations, models\n'), ((637, 730), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (653, 730), False, 'from django.db import migrations, models\n'), ((754, 799), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (770, 799), False, 'from django.db import migrations, models\n'), ((828, 857), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (855, 857), False, 'from django.db import migrations, models\n'), ((889, 927), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (916, 927), False, 'from django.db import migrations, models\n'), ((956, 1023), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)', 'verbose_name': '"""title"""'}), "(max_length=255, unique=True, verbose_name='title')\n", (972, 1023), False, 'from django.db import migrations, models\n'), ((1053, 1110), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""active"""'}), "(default=False, verbose_name='active')\n", (1072, 1110), False, 'from django.db import migrations, models\n'), ((1145, 1255), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""For admin/backoffice purposes only."""', 'verbose_name': '"""description"""'}), "(blank=True, help_text=\n 'For admin/backoffice purposes only.', verbose_name='description')\n", (1161, 1255), False, 'from django.db import migrations, models\n'), ((1489, 1659), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""wagtailcore.Page"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'wagtailcore.Page')\n", (1509, 1659), False, 'from django.db import migrations, models\n'), ((1906, 2076), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""wagtailcore.Page"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'wagtailcore.Page')\n", (1926, 2076), False, 'from django.db import migrations, models\n'), ((3027, 3197), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailimages.Image"""', 'verbose_name': '"""featured image"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='+', to='wagtailimages.Image',\n verbose_name='featured image')\n", (3044, 3197), False, 'from django.db import migrations, models\n')] |
__author__ = 'Mario'
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(0.001, 5, 100)
exp1 = 1*np.exp(X*-1)
exp2 = 2*np.exp(-2*X)
plt.plot(exp1,X,exp2,X)
plt.show() | [
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((78, 104), 'numpy.linspace', 'np.linspace', (['(0.001)', '(5)', '(100)'], {}), '(0.001, 5, 100)\n', (89, 104), True, 'import numpy as np\n'), ((150, 176), 'matplotlib.pyplot.plot', 'plt.plot', (['exp1', 'X', 'exp2', 'X'], {}), '(exp1, X, exp2, X)\n', (158, 176), True, 'import matplotlib.pyplot as plt\n'), ((174, 184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (182, 184), True, 'import matplotlib.pyplot as plt\n'), ((114, 128), 'numpy.exp', 'np.exp', (['(X * -1)'], {}), '(X * -1)\n', (120, 128), True, 'import numpy as np\n'), ((136, 150), 'numpy.exp', 'np.exp', (['(-2 * X)'], {}), '(-2 * X)\n', (142, 150), True, 'import numpy as np\n')] |
# Generated by Django 2.1.7 on 2019-02-27 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_text', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_description', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reply_description', models.CharField(max_length=500)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviewapp.Comment')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('restaurant_text', models.CharField(max_length=20)),
('restaurant_address', models.CharField(max_length=20)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviewapp.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_description', models.CharField(max_length=500)),
('review_rate', models.IntegerField(default=0)),
('review_likes', models.IntegerField(default=0)),
('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviewapp.Restaurant')),
],
),
migrations.AddField(
model_name='comment',
name='review',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviewapp.Review'),
),
]
| [
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((2374, 2464), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""reviewapp.Review"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'reviewapp.Review')\n", (2391, 2464), False, 'from django.db import migrations, models\n'), ((337, 430), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (353, 430), False, 'from django.db import migrations, models\n'), ((463, 494), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (479, 494), False, 'from django.db import migrations, models\n'), ((627, 720), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (643, 720), False, 'from django.db import migrations, models\n'), ((759, 791), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (775, 791), False, 'from django.db import migrations, models\n'), ((922, 1015), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (938, 1015), False, 'from django.db import migrations, models\n'), ((1052, 1084), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1068, 1084), False, 'from django.db import migrations, models\n'), ((1115, 1206), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""reviewapp.Comment"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'reviewapp.Comment')\n", (1132, 1206), False, 'from django.db import migrations, models\n'), ((1337, 1430), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1353, 1430), False, 'from django.db import migrations, models\n'), ((1465, 1496), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1481, 1496), False, 'from django.db import migrations, models\n'), ((1538, 1569), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1554, 1569), False, 'from django.db import migrations, models\n'), ((1601, 1693), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""reviewapp.Category"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'reviewapp.Category')\n", (1618, 1693), False, 'from django.db import migrations, models\n'), ((1820, 1913), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1836, 1913), False, 'from django.db import migrations, models\n'), ((1951, 1983), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1967, 1983), False, 'from django.db import migrations, models\n'), ((2018, 2048), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2037, 2048), False, 'from django.db import migrations, models\n'), ((2084, 2114), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2103, 2114), False, 'from django.db import migrations, models\n'), ((2148, 2242), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""reviewapp.Restaurant"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'reviewapp.Restaurant')\n", (2165, 2242), False, 'from django.db import migrations, models\n')] |
""" LSTM classifier """
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
class LSTMClassifier:
""" LSTM classifier """
def fit(self, X_train, y_train, X_val, y_val):
""" LSTM fit """
dataset_train = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train, tf.string), tf.cast(y_train, tf.int32))
)
dataset_train = dataset_train.shuffle(10000).batch(16)
dataset_val = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_val, tf.string), tf.cast(y_val, tf.int32))
)
dataset_val = dataset_val.batch(16)
encoder = TextVectorization(
max_tokens=10000, output_mode="int", output_sequence_length=128
)
dataset_train_features = dataset_train.map(lambda features, label: features)
encoder.adapt(dataset_train_features)
vocab = np.array(encoder.get_vocabulary())
embedding_dim = 64
vocab_length = len(vocab)
print(vocab_length)
x_in = Input(shape=(1,), dtype="string")
x = encoder(x_in)
x = Embedding(
input_dim=vocab_length,
output_dim=embedding_dim,
embeddings_initializer="uniform",
)(x)
x = LSTM(units=32, return_sequences=True)(x)
x = GlobalMaxPooling1D()(x)
x_out = Dense(1, activation="sigmoid")(x)
lstm_model = tf.keras.models.Model(
inputs=x_in, outputs=x_out, name="lstm_model"
)
lstm_model.summary()
lstm_model.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"],
)
history_lstm = lstm_model.fit(
dataset_train, epochs=4, validation_data=dataset_val
)
self.model = lstm_model
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric], "")
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, "val_" + metric])
plt.figure(figsize=(16, 8))
plt.subplot(1, 2, 1)
plot_graphs(history_lstm, "accuracy")
plt.ylim(None, 1)
plt.subplot(1, 2, 2)
plot_graphs(history_lstm, "loss")
plt.ylim(0, None)
# plt.show()
def predict(self, X_test):
""" predict """
y_hat = self.model.predict(X_test)
return np.around(y_hat)
| [
"tensorflow.keras.layers.Input",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.losses.BinaryCrossentropy",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.layers.experimental.preprocessing.TextVectorization",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.LSTM",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.Adam",
"numpy.around",
"tensorflow.keras.models.Model",
"matplotlib.pyplot.ylim",
"tensorflow.cast",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] | [((797, 883), 'tensorflow.keras.layers.experimental.preprocessing.TextVectorization', 'TextVectorization', ([], {'max_tokens': '(10000)', 'output_mode': '"""int"""', 'output_sequence_length': '(128)'}), "(max_tokens=10000, output_mode='int',\n output_sequence_length=128)\n", (814, 883), False, 'from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n'), ((1190, 1223), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""string"""'}), "(shape=(1,), dtype='string')\n", (1195, 1223), False, 'from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input\n'), ((1567, 1635), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'x_in', 'outputs': 'x_out', 'name': '"""lstm_model"""'}), "(inputs=x_in, outputs=x_out, name='lstm_model')\n", (1588, 1635), True, 'import tensorflow as tf\n'), ((2282, 2309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (2292, 2309), True, 'import matplotlib.pyplot as plt\n'), ((2318, 2338), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2329, 2338), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2410), 'matplotlib.pyplot.ylim', 'plt.ylim', (['None', '(1)'], {}), '(None, 1)\n', (2401, 2410), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2430, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2507), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'None'], {}), '(0, None)\n', (2498, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2659), 'numpy.around', 'np.around', (['y_hat'], {}), '(y_hat)\n', (2652, 2659), True, 'import numpy as np\n'), ((1262, 1359), 'tensorflow.keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'vocab_length', 'output_dim': 'embedding_dim', 'embeddings_initializer': '"""uniform"""'}), "(input_dim=vocab_length, output_dim=embedding_dim,\n embeddings_initializer='uniform')\n", (1271, 1359), False, 'from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input\n'), ((1418, 1455), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': '(32)', 'return_sequences': '(True)'}), '(units=32, return_sequences=True)\n', (1422, 1455), False, 'from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input\n'), ((1471, 1491), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (1489, 1491), False, 'from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input\n'), ((1511, 1541), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1516, 1541), False, 'from tensorflow.keras.layers import LSTM, Dense, Embedding, GlobalMaxPooling1D, Input\n'), ((2066, 2099), 'matplotlib.pyplot.plot', 'plt.plot', (['history.history[metric]'], {}), '(history.history[metric])\n', (2074, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2158), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_' + metric]", '""""""'], {}), "(history.history['val_' + metric], '')\n", (2120, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2171, 2191), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2181, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2222), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric'], {}), '(metric)\n', (2214, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2272), 'matplotlib.pyplot.legend', 'plt.legend', (["[metric, 'val_' + metric]"], {}), "([metric, 'val_' + metric])\n", (2245, 2272), True, 'import matplotlib.pyplot as plt\n'), ((469, 496), 'tensorflow.cast', 'tf.cast', (['X_train', 'tf.string'], {}), '(X_train, tf.string)\n', (476, 496), True, 'import tensorflow as tf\n'), ((498, 524), 'tensorflow.cast', 'tf.cast', (['y_train', 'tf.int32'], {}), '(y_train, tf.int32)\n', (505, 524), True, 'import tensorflow as tf\n'), ((671, 696), 'tensorflow.cast', 'tf.cast', (['X_val', 'tf.string'], {}), '(X_val, tf.string)\n', (678, 696), True, 'import tensorflow as tf\n'), ((698, 722), 'tensorflow.cast', 'tf.cast', (['y_val', 'tf.int32'], {}), '(y_val, tf.int32)\n', (705, 722), True, 'import tensorflow as tf\n'), ((1733, 1769), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), '()\n', (1767, 1769), True, 'import tensorflow as tf\n'), ((1793, 1819), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (1817, 1819), True, 'import tensorflow as tf\n')] |
from jinja2 import nodes
from jinja2.ext import Extension
import pygments
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class InlineCodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
for i, t in source:
yield i, t
class CodeStyle(Style):
default_style = ""
styles = {
Comment: 'italic #7CAFC2',
Keyword: 'bold #DC9656',
Name: '#BA8BAF',
Name.Function: '#F7CA88',
Name.Class: '#A16946',
String: '#AB4642'
}
formatter = HtmlFormatter(
style = CodeStyle,
nobackground = True,
)
iformatter = InlineCodeHtmlFormatter(
style = CodeStyle,
nobackground = True,
)
def code_highlight(full, language, filename, code):
try:
lexer = get_lexer_by_name(language, stripall=True)
insert = highlight(code, lexer, formatter if full else iformatter)
highlighted = True
except pygments.util.ClassNotFound:
try:
insert = guess_lexer(code)
highlighted = True
except pygments.util.ClassNotFound:
insert = code
highlighted = False
if not full and highlighted:
insert = insert[:-1] # Remove new line Pygments puts in for some reason
return (
'<div class="codebox">{0}'
'<button class="code-copy-btn">'
'<i class="fa fa-clipboard" aria-hidden="true"></i>'
'</button><hr><div class="code-copy">{1}</div></div>'
).format(
filename,
insert
) if full else (
'<span class="codebox">{0}</span>'
).format(
insert
)
def output_code_style(path):
path.write_text(formatter.get_style_defs('.codebox'))
def parse_expr_if(parser):
if parser.stream.skip_if('comma'):
return parser.parse_expression()
else:
return nodes.Const(None)
class CodeExtension(Extension):
tags = set(['code', 'icode'])
def __init__(self, environment):
super().__init__(environment)
environment.extend(
)
def parse(self, parser):
full_code = (parser.stream.current.value == 'code')
lineno = next(parser.stream).lineno # I honestly dont really know
# what this does
if parser.stream.current.test('string'): # Is there a string argument?
args = [
nodes.Const(full_code),
parser.parse_expression(), # Get First Argument (No Comma)
parse_expr_if(parser), # Get Second Argument (Comma in front)
]
body = parser.parse_statements(
['name:endcode' if full_code else 'name:endicode'],
drop_needle=True
)
else: # Else skip to 'block_end' and set Arguments to None
while True:
if not parser.stream.current.test('block_end'):
parser.stream.skip()
else:
break
args = [
nodes.Const(full_code),
nodes.Const(None),
nodes.Const(None),
]
body = parser.parse_statements(
['name:endcode' if full_code else 'name:endicode'],
drop_needle=True
)
return nodes.CallBlock(
self.call_method('_code_highlight_call', args),
[], [], body
).set_lineno(lineno)
def _code_highlight_call(self, *args, **kw):
return code_highlight(args[0], args[1], args[2], kw['caller']())
| [
"pygments.highlight",
"pygments.formatters.HtmlFormatter",
"jinja2.nodes.Const",
"pygments.lexers.guess_lexer",
"pygments.lexers.get_lexer_by_name"
] | [((903, 952), 'pygments.formatters.HtmlFormatter', 'HtmlFormatter', ([], {'style': 'CodeStyle', 'nobackground': '(True)'}), '(style=CodeStyle, nobackground=True)\n', (916, 952), False, 'from pygments.formatters import HtmlFormatter\n'), ((1135, 1177), 'pygments.lexers.get_lexer_by_name', 'get_lexer_by_name', (['language'], {'stripall': '(True)'}), '(language, stripall=True)\n', (1152, 1177), False, 'from pygments.lexers import get_lexer_by_name, guess_lexer\n'), ((1195, 1252), 'pygments.highlight', 'highlight', (['code', 'lexer', '(formatter if full else iformatter)'], {}), '(code, lexer, formatter if full else iformatter)\n', (1204, 1252), False, 'from pygments import highlight\n'), ((2199, 2216), 'jinja2.nodes.Const', 'nodes.Const', (['None'], {}), '(None)\n', (2210, 2216), False, 'from jinja2 import nodes\n'), ((1354, 1371), 'pygments.lexers.guess_lexer', 'guess_lexer', (['code'], {}), '(code)\n', (1365, 1371), False, 'from pygments.lexers import get_lexer_by_name, guess_lexer\n'), ((2740, 2762), 'jinja2.nodes.Const', 'nodes.Const', (['full_code'], {}), '(full_code)\n', (2751, 2762), False, 'from jinja2 import nodes\n'), ((3373, 3395), 'jinja2.nodes.Const', 'nodes.Const', (['full_code'], {}), '(full_code)\n', (3384, 3395), False, 'from jinja2 import nodes\n'), ((3413, 3430), 'jinja2.nodes.Const', 'nodes.Const', (['None'], {}), '(None)\n', (3424, 3430), False, 'from jinja2 import nodes\n'), ((3448, 3465), 'jinja2.nodes.Const', 'nodes.Const', (['None'], {}), '(None)\n', (3459, 3465), False, 'from jinja2 import nodes\n')] |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from djongo import models
# from djangotoolbox.fields import EmbeddedModelField
from django.utils import timezone
# Create your models here.
class UserProfileManager(BaseUserManager):
"""manager for user profiles"""
def create_user(self,
email,
nom,
phone,
Type,
password=None,
avatar = "",
prenom = "",
Pays = ""
):
"""create the new user profile"""
if not email:
raise ValueError("User most have a email")
email = self.normalize_email(email)
user = self.model(email=email,
nom=nom ,
phone = phone,
avatar = avatar,
Type = Type,
prenom = prenom,
Pays=Pays)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,
email,
nom,
phone,
Type,
password,
avatar = "",
prenom = "",
Pays = ""):
"""create and save superuser with given detail"""
user = self.create_user(email, nom, phone, Type, password, avatar, prenom, Pays)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin, models.Model):
TYPE = (
('lecteur', 'Lecteur'),
('auteur', 'Auteur'),
('visiteur', 'Visiteur'),
('admin', 'Admin'),
)
email = models.EmailField(max_length=255, unique=True)
nom = models.CharField(max_length=255)
prenom = models.CharField(max_length=255, blank = True)
phone = models.CharField(max_length=255, unique=True)
Pays = models.CharField(max_length=255, blank = True, null = True)
avatar = models.ImageField(upload_to="images/%Y/%m/%d", blank=True, null=True)
Type = models.CharField(max_length=25, choices=TYPE)
abonnements = models.ManyToManyField('self',
symmetrical = False ,
related_name = "abonnement",
blank = True)
abonnes = models.ManyToManyField('self',
symmetrical = False ,
related_name = "abonne",
blank = True)
# abonnes = models.ForeignKey('self', related_name = "abonne", on_delete=models.CASCADE, null = True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["nom", "phone"]
def get_last_name(self):
return self.last_name
class Categorie(models.Model):
"""docstring for categorie"""
CATEGORIE = (
('actualité', 'Actualité'),
('affaire', 'Affaire'),
('sport et loisir', 'Sport et Loisir'),
)
nom = models.CharField(max_length=25, choices=CATEGORIE, unique = True)
def __str__(self):
return self.nom
class Commentaire(models.Model):
"""docstring for Commentaires"""
user = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name = "comment_user"
)
date = models.DateTimeField(default = timezone.now)
texte = models.TextField()
reponses = models.ManyToManyField('self',
symmetrical = False ,
related_name = "reponse",
blank = True)
likes = models.ManyToManyField("Like",
related_name = "like_reponses",
blank = True
)
def __str__(self):
return "Commentaire de " + self.user.nom
class Like(models.Model):
"""docstring for Likes"""
user = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name = "like_user")
date = models.DateTimeField(default = timezone.now)
def __str__(self):
return self.user.nom + " à liker "
class Document(models.Model):
"""docstring for Document"""
TYPE = (
('ArtMag', 'magazine/journal'),
('livre', 'Livre')
)
titre = models.CharField(max_length=255, default = "NAN")
categorie = models.ForeignKey(Categorie,
on_delete = models.CASCADE,
blank = True)
descriptions = models.TextField(blank = True)
date = models.DateTimeField(default = timezone.now)
contenu = models.TextField(blank=True)
tags = models.CharField(max_length=300, blank=True)
auteur = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
lecteurs = models.ManyToManyField(UserProfile,
related_name = "lecteurs",
blank = True)
repost = models.ManyToManyField("Repost",
related_name = "repost_doc",
blank= True
)
commentaires = models.ManyToManyField(Commentaire,
related_name = "comment_doc",
blank= True
)
likes = models.ManyToManyField(Like,
related_name = "like_doc",
blank= True
)
# - Image de couverture
# - Abonnements
# - Parutions
Type = models.CharField(max_length=25, choices=TYPE)
def __str__(self):
return self.titre
class Repost(models.Model):
"""docstring for Repost"""
articlemagazine = models.ForeignKey(Document,
on_delete=models.CASCADE,
related_name = "articlerepost")
user = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name = "userrepost")
# texte = models.TextField()
date = models.DateTimeField(default = timezone.now)
likes = models.ManyToManyField(
"Like",
related_name = "likesrepost"
)
commentaires = models.ManyToManyField(
"Commentaire",
related_name = "commentRepost")
def __str__(self):
return "repost" + self.articlemagazine.titre
class Lecture(models.Model):
"""docstring for Lecture"""
date = models.DateTimeField(auto_now=True)
document = models.ForeignKey(Document,
on_delete=models.CASCADE,
related_name = "doclecture")
user = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name = "userlecture")
def __str__(self):
return self.document.titre
# def __str__(self):
# return self.articlemagazine.titre
# class Reponse(models.Model):
# """docstring for Reponses"""
# user = models.ForeignKey(UserProfile,
# on_delete=models.CASCADE,
# related_name = "userreponses")
# commentaires = models.ForeignKey(Commentaire,
# on_delete=models.CASCADE,
# related_name = "commentreponses")
# texte = models.CharField(max_length = 300)
# date = models.DateTimeField(auto_now=True)
# def __str__(self):
# return self.commentaires.texte
| [
"djongo.models.BooleanField",
"djongo.models.CharField",
"djongo.models.DateTimeField",
"djongo.models.ManyToManyField",
"djongo.models.ImageField",
"djongo.models.EmailField",
"djongo.models.ForeignKey",
"djongo.models.TextField"
] | [((1783, 1829), 'djongo.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (1800, 1829), False, 'from djongo import models\n'), ((1841, 1873), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1857, 1873), False, 'from djongo import models\n'), ((1888, 1932), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (1904, 1932), False, 'from djongo import models\n'), ((1948, 1993), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (1964, 1993), False, 'from djongo import models\n'), ((2006, 2061), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (2022, 2061), False, 'from djongo import models\n'), ((2080, 2149), 'djongo.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/%Y/%m/%d"""', 'blank': '(True)', 'null': '(True)'}), "(upload_to='images/%Y/%m/%d', blank=True, null=True)\n", (2097, 2149), False, 'from djongo import models\n'), ((2162, 2207), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'choices': 'TYPE'}), '(max_length=25, choices=TYPE)\n', (2178, 2207), False, 'from djongo import models\n'), ((2228, 2320), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {'symmetrical': '(False)', 'related_name': '"""abonnement"""', 'blank': '(True)'}), "('self', symmetrical=False, related_name='abonnement',\n blank=True)\n", (2250, 2320), False, 'from djongo import models\n'), ((2370, 2458), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {'symmetrical': '(False)', 'related_name': '"""abonne"""', 'blank': '(True)'}), "('self', symmetrical=False, related_name='abonne',\n blank=True)\n", (2392, 2458), False, 'from djongo import models\n'), ((2622, 2655), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2641, 2655), False, 'from djongo import models\n'), ((2672, 2706), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2691, 2706), False, 'from djongo import models\n'), ((3105, 3168), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'choices': 'CATEGORIE', 'unique': '(True)'}), '(max_length=25, choices=CATEGORIE, unique=True)\n', (3121, 3168), False, 'from djongo import models\n'), ((3308, 3398), 'djongo.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'on_delete': 'models.CASCADE', 'related_name': '"""comment_user"""'}), "(UserProfile, on_delete=models.CASCADE, related_name=\n 'comment_user')\n", (3325, 3398), False, 'from djongo import models\n'), ((3429, 3471), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (3449, 3471), False, 'from djongo import models\n'), ((3487, 3505), 'djongo.models.TextField', 'models.TextField', ([], {}), '()\n', (3503, 3505), False, 'from djongo import models\n'), ((3525, 3614), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {'symmetrical': '(False)', 'related_name': '"""reponse"""', 'blank': '(True)'}), "('self', symmetrical=False, related_name='reponse',\n blank=True)\n", (3547, 3614), False, 'from djongo import models\n'), ((3663, 3735), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""Like"""'], {'related_name': '"""like_reponses"""', 'blank': '(True)'}), "('Like', related_name='like_reponses', blank=True)\n", (3685, 3735), False, 'from djongo import models\n'), ((3920, 4007), 'djongo.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'on_delete': 'models.CASCADE', 'related_name': '"""like_user"""'}), "(UserProfile, on_delete=models.CASCADE, related_name=\n 'like_user')\n", (3937, 4007), False, 'from djongo import models\n'), ((4036, 4078), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (4056, 4078), False, 'from djongo import models\n'), ((4330, 4377), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""NAN"""'}), "(max_length=255, default='NAN')\n", (4346, 4377), False, 'from djongo import models\n'), ((4397, 4463), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Categorie'], {'on_delete': 'models.CASCADE', 'blank': '(True)'}), '(Categorie, on_delete=models.CASCADE, blank=True)\n', (4414, 4463), False, 'from djongo import models\n'), ((4507, 4535), 'djongo.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (4523, 4535), False, 'from djongo import models\n'), ((4550, 4592), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (4570, 4592), False, 'from djongo import models\n'), ((4610, 4638), 'djongo.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (4626, 4638), False, 'from djongo import models\n'), ((4651, 4695), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (4667, 4695), False, 'from djongo import models\n'), ((4710, 4766), 'djongo.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'on_delete': 'models.CASCADE'}), '(UserProfile, on_delete=models.CASCADE)\n', (4727, 4766), False, 'from djongo import models\n'), ((4784, 4856), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['UserProfile'], {'related_name': '"""lecteurs"""', 'blank': '(True)'}), "(UserProfile, related_name='lecteurs', blank=True)\n", (4806, 4856), False, 'from djongo import models\n'), ((4895, 4966), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""Repost"""'], {'related_name': '"""repost_doc"""', 'blank': '(True)'}), "('Repost', related_name='repost_doc', blank=True)\n", (4917, 4966), False, 'from djongo import models\n'), ((5025, 5100), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['Commentaire'], {'related_name': '"""comment_doc"""', 'blank': '(True)'}), "(Commentaire, related_name='comment_doc', blank=True)\n", (5047, 5100), False, 'from djongo import models\n'), ((5152, 5217), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['Like'], {'related_name': '"""like_doc"""', 'blank': '(True)'}), "(Like, related_name='like_doc', blank=True)\n", (5174, 5217), False, 'from djongo import models\n'), ((5344, 5389), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'choices': 'TYPE'}), '(max_length=25, choices=TYPE)\n', (5360, 5389), False, 'from djongo import models\n'), ((5529, 5617), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Document'], {'on_delete': 'models.CASCADE', 'related_name': '"""articlerepost"""'}), "(Document, on_delete=models.CASCADE, related_name=\n 'articlerepost')\n", (5546, 5617), False, 'from djongo import models\n'), ((5647, 5735), 'djongo.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'on_delete': 'models.CASCADE', 'related_name': '"""userrepost"""'}), "(UserProfile, on_delete=models.CASCADE, related_name=\n 'userrepost')\n", (5664, 5735), False, 'from djongo import models\n'), ((5798, 5840), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (5818, 5840), False, 'from djongo import models\n'), ((5856, 5914), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""Like"""'], {'related_name': '"""likesrepost"""'}), "('Like', related_name='likesrepost')\n", (5878, 5914), False, 'from djongo import models\n'), ((5966, 6033), 'djongo.models.ManyToManyField', 'models.ManyToManyField', (['"""Commentaire"""'], {'related_name': '"""commentRepost"""'}), "('Commentaire', related_name='commentRepost')\n", (5988, 6033), False, 'from djongo import models\n'), ((6238, 6273), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (6258, 6273), False, 'from djongo import models\n'), ((6290, 6375), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Document'], {'on_delete': 'models.CASCADE', 'related_name': '"""doclecture"""'}), "(Document, on_delete=models.CASCADE, related_name='doclecture'\n )\n", (6307, 6375), False, 'from djongo import models\n'), ((6404, 6493), 'djongo.models.ForeignKey', 'models.ForeignKey', (['UserProfile'], {'on_delete': 'models.CASCADE', 'related_name': '"""userlecture"""'}), "(UserProfile, on_delete=models.CASCADE, related_name=\n 'userlecture')\n", (6421, 6493), False, 'from djongo import models\n')] |
import torch.nn as nn
import numpy
class BinOp():
def __init__(self, model):
# count the number of Conv2d and Linear
count_targets = 0
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
count_targets = count_targets + 1
# ResBlock만 선택하기 위해서 manual하게 값을 매번 줘야함
start_range = 1
end_range = count_targets-5
self.bin_range = numpy.linspace(start_range,
end_range, end_range-start_range+1)\
.astype('int').tolist()
self.num_of_params = len(self.bin_range)
self.saved_params = []
self.target_params = []
self.target_modules = []
index = -1
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
index = index + 1
if index in self.bin_range:
tmp = m.weight.data.clone()
self.saved_params.append(tmp)
self.target_modules.append(m.weight)
def binarization(self):
self.meancenterConvParams()
self.clampConvParams()
self.save_params()
self.binarizeConvParams()
def meancenterConvParams(self):
for index in range(self.num_of_params):
s = self.target_modules[index].data.size()
negMean = self.target_modules[index].data.mean(1, keepdim=True).\
mul(-1).expand_as(self.target_modules[index].data)
self.target_modules[index].data = self.target_modules[index].data.add(negMean)
def clampConvParams(self):
for index in range(self.num_of_params):
self.target_modules[index].data = \
self.target_modules[index].data.clamp(-1.0, 1.0)
def save_params(self):
for index in range(self.num_of_params):
self.saved_params[index].copy_(self.target_modules[index].data)
def binarizeConvParams(self):
for index in range(self.num_of_params):
n = self.target_modules[index].data[0].nelement()
s = self.target_modules[index].data.size()
if len(s) == 4:
m = self.target_modules[index].data.norm(1, 3, keepdim=True)\
.sum(2, keepdim=True).sum(1, keepdim=True).div(n)
elif len(s) == 2:
m = self.target_modules[index].data.norm(1, 1, keepdim=True).div(n)
self.target_modules[index].data = \
self.target_modules[index].data.sign().mul(m.expand(s))
def restore(self):
for index in range(self.num_of_params):
self.target_modules[index].data.copy_(self.saved_params[index])
def updateBinaryGradWeight(self):
for index in range(self.num_of_params):
weight = self.target_modules[index].data
n = weight[0].nelement()
s = weight.size()
if len(s) == 4:
m = weight.norm(1, 3, keepdim=True)\
.sum(2, keepdim=True).sum(1, keepdim=True).div(n).expand(s)
elif len(s) == 2:
m = weight.norm(1, 1, keepdim=True).div(n).expand(s)
m[weight.lt(-1.0)] = 0
m[weight.gt(1.0)] = 0
m = m.mul(self.target_modules[index].grad.data)
m_add = weight.sign().mul(self.target_modules[index].grad.data)
if len(s) == 4:
m_add = m_add.sum(3, keepdim=True)\
.sum(2, keepdim=True).sum(1, keepdim=True).div(n).expand(s)
elif len(s) == 2:
m_add = m_add.sum(1, keepdim=True).div(n).expand(s)
m_add = m_add.mul(weight.sign())
self.target_modules[index].grad.data = m.add(m_add).mul(1.0-1.0/s[1]).mul(n)
self.target_modules[index].grad.data = self.target_modules[index].grad.data.mul(1e+9)
self.target_modules[index].grad.data = self.target_modules[index].grad.data.clamp(-5.0, 5.0) | [
"numpy.linspace"
] | [((444, 511), 'numpy.linspace', 'numpy.linspace', (['start_range', 'end_range', '(end_range - start_range + 1)'], {}), '(start_range, end_range, end_range - start_range + 1)\n', (458, 511), False, 'import numpy\n')] |
from distutils.core import setup
setup(
name='beancount-prices-custom',
version='1.0',
packages=['my_sources'],
license='MIT',
install_requires = [
'flake8',
'beancount'
]
)
| [
"distutils.core.setup"
] | [((34, 173), 'distutils.core.setup', 'setup', ([], {'name': '"""beancount-prices-custom"""', 'version': '"""1.0"""', 'packages': "['my_sources']", 'license': '"""MIT"""', 'install_requires': "['flake8', 'beancount']"}), "(name='beancount-prices-custom', version='1.0', packages=['my_sources'\n ], license='MIT', install_requires=['flake8', 'beancount'])\n", (39, 173), False, 'from distutils.core import setup\n')] |
import pdb
import re
import os
import json
import datetime
import time
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use("tkAgg")
import matplotlib.pyplot as plt
import seaborn as sns
import sleep_scorer.plottools as pt
import sleep_scorer.remtools as rt
class EEGPlotter(object):
"""dashboard-like plotting for EEG data
The panel view incorporates:
- raw signal timeseries
- features (power spectra)
- pca (or lowD) feature projections
- scores (human/model scores or other categoricals like
consensus/transition/conflict)
input:
------
std: (StagedTrialData object) (raw edr *and* features)
pca: (PCA object) pca avg/vecs/vals
scores : (ScoreBlock)
methods:
------
plot(fig, ti/tf): high level, just specify target fig and start/end times
TODO:
GENERAL
- general time series plotting (e.g. EMG power)
- general 2d/histogram plots (e.g. EMG power)
SPEED HAX
- fuse time series
- keep axes, clear data
- keep histograms
- scrolling w/pseudocache:
- plot some range beyond axes limits,
- adjust axes limits for small steps
- replot big chunks less often/ asneeded
PDF OVERLAY
- comet
- conflicts
- switch epochs
"""
def __init__(self, std=None, pca=None, scores=None, params={}):
self.std = std
self.pca = pca
self.scores = scores
self.df_prj = self.pca.project(std.features.data)
self.params = self.default_params()
self.params.update(params)
# FEATURES
self.X = self.std.features.data
# RAW features (normalize, stride)
# # raw features, look like shit w/current formatting
# self.df_feat = rt.SxxBundle.from_EDFData(self.std.edf).to_dataframe()
# stash (time consuming) computed values here for re-use
self.stash = {}
# initial viewstate
self.viewEpoch = 100
self.viewWidth = 20
# render the first frame
self.make_fig()
self.render()
@property
def viewrange(self):
ea = self.viewEpoch - self.viewWidth
eb = self.viewEpoch + self.viewWidth
return [ea, eb]
def default_params(self):
"""make default params"""
params = dict(
name='gallahad',
quest='grail'
)
return params
def about(self):
"""helpful information at a glance"""
print('------ EEGPlotter.about() ------')
print('params:', self.params)
self.scoreblock.about()
self.pca.about()
self.std.about()
def make_fig(self):
"""create the figure w/event handling"""
aa = '707070'
#aa = 'a0a0a0'
b0 = 'b0b0b0'
gg = 'c0c0c0'
pp = {
'lines.linewidth':2,
'axes.facecolor':'k',
'axes.edgecolor': gg,
'axes.labelcolor': gg,
'figure.facecolor':'k',
'figure.edgecolor':'k',
'savefig.facecolor':'k',
'savefig.edgecolor':'k',
'xtick.color' : gg,
'ytick.color' : gg,
'grid.color' : aa,
'text.color' : gg
}
matplotlib.rcParams.update(pp)
self.fig = plt.figure(figsize=(16, 8), dpi=80)
self.fig.canvas.mpl_connect('key_press_event', self.kupdate)
self.fig.canvas.mpl_connect('button_press_event', self.mupdate)
self.fig.canvas.mpl_connect('scroll_event', self.mupdate)
# self.fig.set_facecolor('k')
def kupdate(self, event):
"""keypress updates"""
# step sizes
s = [1, 5, 10]
# print(event.key)
if event.key == 'left':
self.lstep(inc=s[0])
# if event.key == 'shift+left':
# self.lstep(inc=s[1])
if event.key == 'ctrl+left':
self.lstep(inc=s[2])
if event.key == 'right':
self.rstep(inc=s[0])
# if event.key == 'shift+right':
# self.lstep(inc=s[1])
if event.key == 'ctrl+right':
self.rstep(inc=s[2])
if event.key == 'up':
self.viewWidth = max(self.viewWidth-1, 3)
self.render()
if event.key == 'ctrl+up':
self.viewWidth = max(self.viewWidth-2, 3)
self.render()
if event.key == 'down':
self.viewWidth += 1
self.render()
if event.key == 'ctrl+down':
self.viewWidth += 2
self.render()
def mupdate(self, event):
"""update when mouse buttons pushed or wheel spun"""
# step sizes
s = [1, 5, 10]
# STEP LEFT (backward in time)
if event.button == 1:
if event.key is None:
self.lstep(inc=s[0])
elif event.key == 'shift':
self.lstep(inc=s[1])
elif event.key == 'control':
self.lstep(inc=s[2])
# STEP RIGHT (forward in time)
if event.button == 3:
if event.key is None:
self.rstep(inc=s[0])
elif event.key == 'shift':
self.rstep(inc=s[1])
elif event.key == 'control':
self.rstep(inc=s[2])
# zoom out
if event.button == 'down':
self.viewWidth += 1
self.render()
# zoom in
if event.button == 'up':
self.viewWidth = max(self.viewWidth-1, 3)
self.render()
def rstep(self, inc=1):
"""step right, next epoch"""
self.viewEpoch += inc
self.render()
def lstep(self, inc=1):
"""step left, prev epoch"""
self.viewEpoch -= inc
self.render()
def render(self, viewEpoch=None, viewWidth=None):
"""render the figure
render?! I hardly know 'er!
"""
t00 = time.time()
sig_labels_plot = ['EEG1', 'EEG2', 'EMG']
if viewEpoch is not None:
self.viewEpoch = viewEpoch
if viewWidth is not None:
self.viewWidth = viewWidth
[ia, ib] = self.viewrange
ie = self.viewEpoch
chunksize = ib-ia
edf = self.std.edf
dfmerge = self.df_prj
num_epochs = edf.num_epochs
epoch_duration = edf.epoch_duration
spectrograms = edf.spectrograms
signal_traces = edf.signal_traces
t05 = time.time()
figx = self.fig
t10 = time.time()
#== plot AXES
# if self.fig.axes == []:
# self.ax = [
# plt.subplot2grid((4,7),(0,0), rowspan=1, colspan=4),
# plt.subplot2grid((4,7),(1,0), rowspan=1, colspan=4),
# plt.subplot2grid((4,7),(2,0), rowspan=2, colspan=2),
# plt.subplot2grid((4,7),(2,2), rowspan=2, colspan=2),
# plt.subplot2grid((4,7),(0,4), rowspan=4, colspan=2)
# ]
# else:
# #pdb.set_trace()
# for axi in self.fig.axes:
# axi.clear()
self.ax = [
plt.subplot2grid((4,7),(0,0), rowspan=1, colspan=4),
plt.subplot2grid((4,7),(1,0), rowspan=1, colspan=4),
plt.subplot2grid((4,7),(2,0), rowspan=2, colspan=2),
plt.subplot2grid((4,7),(2,2), rowspan=2, colspan=2),
plt.subplot2grid((4,7),(0,4), rowspan=4, colspan=2)
]
axx = self.ax[0:4]
axb = [self.ax[-1]]
t15 = time.time()
# print(' --')
# print(' t assign: %4.2f' % (t05-t00))
# print(' t fig : %4.2f' % (t10-t05))
# print(' t ax : %4.2f' % (t15-t10))
#======================================================================
#======================================================================
#======== LHS (signals/pca)
#======================================================================
#======================================================================
t20 = time.time()
#==================================================
#== panel 0, RAW signal time series
raw_stride = 5
dy_raw = -300
tr000 = time.time()
xxx, yyy, lbl = [], [], []
for i, label in enumerate(sig_labels_plot):
st = signal_traces[label]
ndxi = int(st.samples_per_epoch*ia)
ndxf = int(st.samples_per_epoch*ib)
ti = ndxi/st.f
tf = ndxf/st.f
xx = np.linspace(ti, tf, int(st.samples_per_epoch)*chunksize)
yy = st.sig[ndxi:ndxf]+dy_raw*i
xxx.append(xx[::raw_stride])
yyy.append(yy[::raw_stride])
lbl.append(label)
tr001 = time.time()
tr002 = time.time()
xxx = np.asarray(xxx).T
yyy = np.asarray(yyy).T
lobj = axx[0].plot(xxx, yyy, lw=1)
# BOX BOX
ndxe = int(st.samples_per_epoch*ie)
te = ndxe/st.f
xbox = [te, te, te-10, te-10, te]
ybox = [-900, 300, 300, -900, -900]
axx[0].plot(xbox, ybox, 'c-', ms=0, lw=2)
tr003 = time.time()
axx[0].set_ylim([-400+2*dy_raw, 400])
axx[0].set_xlim([xx[0], xx[-1]])
#axx[0].set_xticks(np.linspace(ti, tf, chunksize+1))
#axx[0].set_xticklabels([])
axx[0].grid(True)
axx[0].set_ylabel('raw signals')
axx[0].set_xlabel('t [s]')
axx[0].spines['top'].set_visible(False)
axx[0].spines['right'].set_visible(False)
axx[0].spines['bottom'].set_visible(False)
leg = axx[0].legend(lobj, lbl, loc='upper right') #, ncol=len(lbl))
leg.get_frame().set_edgecolor('none')
tr004 = time.time()
# print('raw 1 : %3.0f' % ((tr001-tr000)*1000))
# print('raw 2 : %3.0f' % ((tr002-tr001)*1000))
# print('raw 3 : %3.0f' % ((tr003-tr002)*1000))
# print('raw 4 : %3.0f' % ((tr004-tr003)*1000))
# PCA histos and projections
t40 = time.time()
#==================================================
#== panel 1, PC time series
pcvec_cols = ['PC1', 'PC2', 'PC3']
for i, col in enumerate(pcvec_cols):
dy = -1
xx = np.arange(ia+1, ib+1)
yy = dfmerge[col][ia:ib] + dy*i
axx[1].plot(xx, yy, '-o', ms=4, label=col)
# BOX BOX
xbox = [ie-0.5, ie-0.5, ie+0.5, ie+0.5, ie-0.5]
ybox = [-2.8, 0.8, 0.8, -2.8, -2.8]
axx[1].plot(xbox, ybox, 'c-', ms=0, lw=2)
axx[1].set_xlim([ia+0.5, ib+0.5])
axx[1].set_ylim(-2.9, 0.9)
axx[1].set_xlabel('Epoch')
axx[1].set_ylabel('PC projections')
axx[1].grid(True)
axx[1].spines['top'].set_visible(False)
axx[1].spines['right'].set_visible(False)
axx[1].spines['bottom'].set_visible(False)
leg = axx[1].legend(loc='upper right') #, ncol=len(pcvec_cols))
leg.get_frame().set_edgecolor('none')
#==================================================
#== panel 2 and 3: PC 2D projections
line_kwa = dict(
color='magenta',
marker='o',
lw=2,
ms=4,
)
# line_kwa_inner = dict(
# color='blue',
# marker='o',
# lw=0,
# ms=3,
# )
pca_hist_kwa = dict(
numsig=3,
numbin=60,
)
plt_hist_kwa = dict(
cmap='Greys_r',
levels='auto',
ptype='imshow',
)
tail_length = 7
t50 = time.time()
# pre-compute histograms and stash them
if 'h2d_32' not in self.stash.keys():
h2d = self.pca.project_histo(self.X, PCs=[3,2], **pca_hist_kwa).normalize().logscale()
self.stash['h2d_32'] = h2d
if 'h2d_12' not in self.stash.keys():
h2d = self.pca.project_histo(self.X, PCs=[1,2], **pca_hist_kwa).normalize().logscale()
self.stash['h2d_12'] = h2d
h2d_32 = self.stash['h2d_32']
h2d_12 = self.stash['h2d_12']
t55 = time.time()
#pt.plot_PCA_2D_hist(X=self.X, h2d=h2d_32, pca=self.pca, PCs=[3,2], ax=axx[2], **pca_hist_kwa, cbar=False)
pt.plot_2D_hist(h2d=h2d_32, ax=axx[2], **plt_hist_kwa, cbar=False)
pt.plot_pca_crosshair(ax=axx[2], sigX=h2d_32.varX, sigY=h2d_32.varY)
# axx[2].plot(dfmerge['PC3'][ia:ib], dfmerge['PC2'][ia:ib], **line_kwa)
# axx[2].plot(dfmerge['PC3'][ia:ib], dfmerge['PC2'][ia:ib], **line_kwa_inner)
axx[2].plot(dfmerge['PC3'][ie-tail_length:ie], dfmerge['PC2'][ie-tail_length:ie], **line_kwa)
axx[2].plot(dfmerge['PC3'][ie-1], dfmerge['PC2'][ie-1], 'co', mfc='w', mew=4, ms=12)
axx[2].set_xlabel('PC3')
axx[2].set_ylabel('PC2')
axx[2].spines['top'].set_visible(False)
axx[2].spines['right'].set_visible(False)
axx[2].spines['bottom'].set_visible(False)
axx[2].spines['left'].set_visible(False)
#pt.plot_PCA_2D_hist(X=self.X, h2d=h2d_12, pca=self.pca, PCs=[1,2], ax=axx[3], **pca_hist_kwa, cbar=True)
pt.plot_2D_hist(h2d=h2d_12, ax=axx[3], **plt_hist_kwa, cbar=False)
pt.plot_pca_crosshair(ax=axx[3], sigX=h2d_12.varX, sigY=h2d_12.varY)
# axx[3].plot(dfmerge['PC1'][ia:ib], dfmerge['PC2'][ia:ib], **line_kwa)
# axx[3].plot(dfmerge['PC1'][ia:ib], dfmerge['PC2'][ia:ib], **line_kwa_inner)
axx[3].plot(dfmerge['PC1'][ie-tail_length:ie], dfmerge['PC2'][ie-tail_length:ie], **line_kwa)
axx[3].plot(dfmerge['PC1'][ie-1], dfmerge['PC2'][ie-1], 'co', mfc='w', mew=4, ms=12)
axx[3].set_xlabel('PC1')
axx[3].set_ylabel('')
axx[3].set_yticklabels([])
axx[3].spines['top'].set_visible(False)
axx[3].spines['right'].set_visible(False)
axx[3].spines['bottom'].set_visible(False)
axx[3].spines['left'].set_visible(False)
# overlay scores on 2D histograms
# BROKEN (should use scoreblock not scorewizard)
try:
#print('ia,ib:', ia,ib)
dfj = dfmerge[ia+1:ib]
wk = dfj[dfj['cScoreNum'] == sw.scoreStr2Num['Wake']]
rm = dfj[dfj['cScoreNum'] == sw.scoreStr2Num['REM']]
nr = dfj[dfj['cScoreNum'] == sw.scoreStr2Num['Non REM']]
xx = dfj[dfj['cScoreNum'] == 0]
kwa = dict(lw=0, marker='o', mec='k', ms=7)
axx[2].plot(wk['PC2'], wk['PC3'], **kwa, color='blue', label='Wake')
axx[2].plot(rm['PC2'], rm['PC3'], **kwa, color='red', label='REM')
axx[2].plot(nr['PC2'], nr['PC3'], **kwa, color='green', label='Non REM')
axx[3].plot(wk['PC1'], wk['PC2'], **kwa, color='blue', label='Wake')
axx[3].plot(rm['PC1'], rm['PC2'], **kwa, color='red', label='REM')
axx[3].plot(nr['PC1'], nr['PC2'], **kwa, color='green', label='Non REM')
except:
pass
#======================================================================
#======================================================================
#============================= RHS (features)
#======================================================================
#======================================================================
t60 = time.time()
df_feat_index = self.std.features.df_index
unique_scores = ['all']
gt_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
fig, ax, channel_info = pt.plot_features_template(
y0=ia,
df_feat_index=df_feat_index,
unique_scores=unique_scores,
xpad=3,
boxheight=1,
fig=figx,
ax=axb
)
t65 = time.time()
# FEATURES
for i, chi in enumerate(channel_info):
# NOTE plot calls are rate limiting (10-15ms each)
# TODO: flip order of channels/epochs, concatenate channels w/padding
taa = time.time()
feature_scale = 4
xxx, yyy, ccc = [], [], []
xmax = 0
for it in range(ia, ib):
cc = gt_colors[it % len(gt_colors)]
xx = chi['xndx']
yy = self.std.features.data[chi['ndx'], it]*feature_scale+it+1
ccc.append(cc)
xxx.append(xx)
yyy.append(yy)
xmax = max(xmax, max(xx))
#axb[0].set_prop_cycle('color', ccc)
tbb = time.time()
axb[0].plot(np.asarray(xxx).T, np.asarray(yyy).T, color='gray')
# re-plot the current viewEpoch features in cyan
yve = self.X[chi['ndx'], ie-1]*feature_scale+ie
axb[0].plot(xx , yve, 'c-', lw=3)
tcc = time.time()
# print('ftplt 1 : %3.0f' % ((tbb-taa)*1000))
# print('ftplt 2 : %3.0f' % ((tcc-tbb)*1000))
t70 = time.time()
# SCORES
# TODO: make this a single call to scatter (won't work, scatter cannot use >1 marker)
# TODO: must call plot or scatter 1x per label -- speed test comparison
scoreTypes = self.scores.df['scoreType'].unique()
scoreTags = self.scores.df['scoreTag'].unique()
for j, scoreTag in enumerate(scoreTags):
data = self.scores.keeprows(conditions=[('scoreTag', scoreTag)])
# print(scoreTag)
# print(data.data)
dx = xmax+5+6*j
dfj = data.data.ravel()[ia:ib]
yvals = np.arange(ia, ib)+1
# print(yvals)
# print(dfj)
wk = yvals[dfj == 'Wake']
rm = yvals[dfj == 'REM']
nr = yvals[dfj == 'Non REM']
ttt01 = time.time()
axb[0].plot(0*wk+dx, wk, lw=0, marker=r'$\mathtt{W}$', color='blue', label='Wake', ms=12)
axb[0].plot(0*rm+dx, rm, lw=0, marker=r'$\mathtt{R}$', color='red', label='REM', ms=12)
axb[0].plot(0*nr+dx, nr, lw=0, marker=r'$\mathtt{N}$', color='green', label='Non REM', ms=12)
axb[0].text(dx, ia, scoreTag, rotation=90, ha='center', va='top', fontfamily='monospace')
ttt02 = time.time()
# # markers
# mdic = {}
# mdic['Wake'] = r'$\mathtt{W}$'
# mdic['REM'] = r'$\mathtt{R}$'
# mdic['Non REM'] = r'$\mathtt{N}$'
# cdic = {}
# cdic['Wake'] = 'blue'
# cdic['REM'] = 'red'
# cdic['Non REM'] = 'green'
# yy = dfj['Epoch#'].values
# xx = yy*0+dx
# colors = [cdic.get(x, 'gray') for x in dfj['Score'].values]
# markers = [mdic.get(x, 'x') for x in dfj['Score'].values]
# axb[0].scatter(xx, yy, c=colors, marker=markers)
axb[0].set_ylim([ia-1, ib+1])
t80 = time.time()
#======================================================================
#======================================================================
#======= ANNOTATIONS (render time, timestamp)
try:
for an in self.annotations:
an.remove()
except:
# only should happen once
plt.tight_layout()
plt.subplots_adjust(top=0.9)
t99 = time.time()
#txt = 'time to render: %3.0f ms' % ((t99-t00)*1000)
#tx0 = figx.text(0.99, 0.99, txt, ha='right', va='top', fontsize=24)
txt = datetime.datetime.now().replace(microsecond=0).isoformat().replace('T','\n')
tx1 = figx.text(0.01, 0.99, txt, ha='left', va='top', fontsize=20)
txt = 'trial: %i Epoch: %i' % (self.std.trial , self.viewEpoch)
tx2 = figx.text(0.5, 0.99, txt, ha='center', va='top', fontsize=42)
t_spam = []
t_spam.append('TIMING [ms]\n')
t_spam.append('ax setup : %3.0f\n' % ((t20-t00)*1000))
t_spam.append('signal_ts : %3.0f\n' % ((t40-t20)*1000))
t_spam.append('PCA_ts : %3.0f\n' % ((t50-t40)*1000))
t_spam.append('PCA_2dprj : %3.0f\n' % ((t55-t50)*1000))
t_spam.append('PCA_2dplt : %3.0f\n' % ((t60-t55)*1000))
t_spam.append('features 1: %3.0f\n' % ((t65-t60)*1000))
t_spam.append('features 2: %3.0f\n' % ((t70-t65)*1000))
t_spam.append('features 3: %3.0f\n' % ((t80-t70)*1000))
t_spam.append('tight ly : %3.0f\n' % ((t99-t80)*1000))
t_spam.append('TOTAL : %3.0f\n' % ((t99-t00)*1000))
txt = ''.join(t_spam)
tx3 = figx.text(0.99, 0.01, txt, ha='right', va='bottom', ma='left', fontsize=12, fontfamily='monospace')
self.annotations = [tx1, tx2, tx3]
figx.show()
| [
"matplotlib.pyplot.subplots_adjust",
"sleep_scorer.plottools.plot_pca_crosshair",
"matplotlib.rcParams.update",
"sleep_scorer.plottools.plot_features_template",
"matplotlib.use",
"numpy.asarray",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot2grid",
"time.time",
"sleep_scorer.plottools.plot_2D_hist",
"numpy.arange"
] | [((133, 156), 'matplotlib.use', 'matplotlib.use', (['"""tkAgg"""'], {}), "('tkAgg')\n", (147, 156), False, 'import matplotlib\n'), ((3331, 3361), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['pp'], {}), '(pp)\n', (3357, 3361), False, 'import matplotlib\n'), ((3382, 3417), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)', 'dpi': '(80)'}), '(figsize=(16, 8), dpi=80)\n', (3392, 3417), True, 'import matplotlib.pyplot as plt\n'), ((6002, 6013), 'time.time', 'time.time', ([], {}), '()\n', (6011, 6013), False, 'import time\n'), ((6538, 6549), 'time.time', 'time.time', ([], {}), '()\n', (6547, 6549), False, 'import time\n'), ((6590, 6601), 'time.time', 'time.time', ([], {}), '()\n', (6599, 6601), False, 'import time\n'), ((7617, 7628), 'time.time', 'time.time', ([], {}), '()\n', (7626, 7628), False, 'import time\n'), ((8174, 8185), 'time.time', 'time.time', ([], {}), '()\n', (8183, 8185), False, 'import time\n'), ((8352, 8363), 'time.time', 'time.time', ([], {}), '()\n', (8361, 8363), False, 'import time\n'), ((8906, 8917), 'time.time', 'time.time', ([], {}), '()\n', (8915, 8917), False, 'import time\n'), ((8943, 8954), 'time.time', 'time.time', ([], {}), '()\n', (8952, 8954), False, 'import time\n'), ((9303, 9314), 'time.time', 'time.time', ([], {}), '()\n', (9312, 9314), False, 'import time\n'), ((9893, 9904), 'time.time', 'time.time', ([], {}), '()\n', (9902, 9904), False, 'import time\n'), ((10182, 10193), 'time.time', 'time.time', ([], {}), '()\n', (10191, 10193), False, 'import time\n'), ((11792, 11803), 'time.time', 'time.time', ([], {}), '()\n', (11801, 11803), False, 'import time\n'), ((12314, 12325), 'time.time', 'time.time', ([], {}), '()\n', (12323, 12325), False, 'import time\n'), ((12450, 12516), 'sleep_scorer.plottools.plot_2D_hist', 'pt.plot_2D_hist', ([], {'h2d': 'h2d_32', 'ax': 'axx[2]', 'cbar': '(False)'}), '(h2d=h2d_32, ax=axx[2], **plt_hist_kwa, cbar=False)\n', (12465, 12516), True, 'import sleep_scorer.plottools as pt\n'), ((12525, 12593), 'sleep_scorer.plottools.plot_pca_crosshair', 'pt.plot_pca_crosshair', ([], {'ax': 'axx[2]', 'sigX': 'h2d_32.varX', 'sigY': 'h2d_32.varY'}), '(ax=axx[2], sigX=h2d_32.varX, sigY=h2d_32.varY)\n', (12546, 12593), True, 'import sleep_scorer.plottools as pt\n'), ((13345, 13411), 'sleep_scorer.plottools.plot_2D_hist', 'pt.plot_2D_hist', ([], {'h2d': 'h2d_12', 'ax': 'axx[3]', 'cbar': '(False)'}), '(h2d=h2d_12, ax=axx[3], **plt_hist_kwa, cbar=False)\n', (13360, 13411), True, 'import sleep_scorer.plottools as pt\n'), ((13420, 13488), 'sleep_scorer.plottools.plot_pca_crosshair', 'pt.plot_pca_crosshair', ([], {'ax': 'axx[3]', 'sigX': 'h2d_12.varX', 'sigY': 'h2d_12.varY'}), '(ax=axx[3], sigX=h2d_12.varX, sigY=h2d_12.varY)\n', (13441, 13488), True, 'import sleep_scorer.plottools as pt\n'), ((15556, 15567), 'time.time', 'time.time', ([], {}), '()\n', (15565, 15567), False, 'import time\n'), ((15756, 15890), 'sleep_scorer.plottools.plot_features_template', 'pt.plot_features_template', ([], {'y0': 'ia', 'df_feat_index': 'df_feat_index', 'unique_scores': 'unique_scores', 'xpad': '(3)', 'boxheight': '(1)', 'fig': 'figx', 'ax': 'axb'}), '(y0=ia, df_feat_index=df_feat_index, unique_scores\n =unique_scores, xpad=3, boxheight=1, fig=figx, ax=axb)\n', (15781, 15890), True, 'import sleep_scorer.plottools as pt\n'), ((15999, 16010), 'time.time', 'time.time', ([], {}), '()\n', (16008, 16010), False, 'import time\n'), ((17193, 17204), 'time.time', 'time.time', ([], {}), '()\n', (17202, 17204), False, 'import time\n'), ((19129, 19140), 'time.time', 'time.time', ([], {}), '()\n', (19138, 19140), False, 'import time\n'), ((19581, 19592), 'time.time', 'time.time', ([], {}), '()\n', (19590, 19592), False, 'import time\n'), ((7215, 7269), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 7)', '(0, 0)'], {'rowspan': '(1)', 'colspan': '(4)'}), '((4, 7), (0, 0), rowspan=1, colspan=4)\n', (7231, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7281, 7335), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 7)', '(1, 0)'], {'rowspan': '(1)', 'colspan': '(4)'}), '((4, 7), (1, 0), rowspan=1, colspan=4)\n', (7297, 7335), True, 'import matplotlib.pyplot as plt\n'), ((7347, 7401), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 7)', '(2, 0)'], {'rowspan': '(2)', 'colspan': '(2)'}), '((4, 7), (2, 0), rowspan=2, colspan=2)\n', (7363, 7401), True, 'import matplotlib.pyplot as plt\n'), ((7413, 7467), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 7)', '(2, 2)'], {'rowspan': '(2)', 'colspan': '(2)'}), '((4, 7), (2, 2), rowspan=2, colspan=2)\n', (7429, 7467), True, 'import matplotlib.pyplot as plt\n'), ((7479, 7533), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 7)', '(0, 4)'], {'rowspan': '(4)', 'colspan': '(2)'}), '((4, 7), (0, 4), rowspan=4, colspan=2)\n', (7495, 7533), True, 'import matplotlib.pyplot as plt\n'), ((8969, 8984), 'numpy.asarray', 'np.asarray', (['xxx'], {}), '(xxx)\n', (8979, 8984), True, 'import numpy as np\n'), ((9001, 9016), 'numpy.asarray', 'np.asarray', (['yyy'], {}), '(yyy)\n', (9011, 9016), True, 'import numpy as np\n'), ((10416, 10441), 'numpy.arange', 'np.arange', (['(ia + 1)', '(ib + 1)'], {}), '(ia + 1, ib + 1)\n', (10425, 10441), True, 'import numpy as np\n'), ((16241, 16252), 'time.time', 'time.time', ([], {}), '()\n', (16250, 16252), False, 'import time\n'), ((16757, 16768), 'time.time', 'time.time', ([], {}), '()\n', (16766, 16768), False, 'import time\n'), ((17044, 17055), 'time.time', 'time.time', ([], {}), '()\n', (17053, 17055), False, 'import time\n'), ((18001, 18012), 'time.time', 'time.time', ([], {}), '()\n', (18010, 18012), False, 'import time\n'), ((18462, 18473), 'time.time', 'time.time', ([], {}), '()\n', (18471, 18473), False, 'import time\n'), ((17792, 17809), 'numpy.arange', 'np.arange', (['ia', 'ib'], {}), '(ia, ib)\n', (17801, 17809), True, 'import numpy as np\n'), ((19506, 19524), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19522, 19524), True, 'import matplotlib.pyplot as plt\n'), ((19537, 19565), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (19556, 19565), True, 'import matplotlib.pyplot as plt\n'), ((16793, 16808), 'numpy.asarray', 'np.asarray', (['xxx'], {}), '(xxx)\n', (16803, 16808), True, 'import numpy as np\n'), ((16812, 16827), 'numpy.asarray', 'np.asarray', (['yyy'], {}), '(yyy)\n', (16822, 16827), True, 'import numpy as np\n'), ((19748, 19771), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19769, 19771), False, 'import datetime\n')] |
import os
import json
from os.path import join, isdir
from shutil import copyfile
import replik.console as console
import replik.constants as const
import replik.utils as utils
import replik.paths as paths
def execute(directory: str, simple=False):
"""pass"""
replik_dir = os.getcwd()
templates_dir = join(replik_dir, "templates")
replik_fname = const.replik_root_file(directory)
console.write(f"initialize at {directory}")
if const.is_replik_project(directory):
console.fail(f"Directory {directory} already contains a 'replik' project")
exit(0) # exit program
# create folder structure
console.info("project name:")
project_name = input()
if len(project_name) == 0:
console.fail("Name must be at least one character long!")
exit(0)
for forbidden_string in const.FORBIDDEN_CHARACTERS:
if forbidden_string in project_name:
console.fail(f"Name must not contain '{forbidden_string}'!")
exit(0)
print("\n")
username = const.get_username().lower().replace(" ", "_")
info = {
"name": project_name,
"username": username,
"tag": f"{username}/replik_{project_name}",
"docker_shm": "32g",
"memory": "8g",
"cpus": "4",
"gpus": "0",
"minimum_required_running_hours": 5,
"is_simple": simple,
"replik_version": const.VERSION,
"stdout_to_file": False,
}
os.makedirs(const.get_local_replik_dir(directory))
os.makedirs(join(const.get_local_replik_dir(directory), "logs"))
docker_dir = join(directory, "docker")
os.makedirs(docker_dir)
# handle .gitignore
gitignore_file = join(directory, ".gitignore")
with open(gitignore_file, "a+") as f:
f.write("output/\n")
f.write(".cache/\n")
f.write(".replik_paths.json\n")
if not simple:
# if not "simple": create additional boilerplate
project_dir = join(directory, project_name)
os.makedirs(project_dir)
script_dir = join(project_dir, "scripts")
os.makedirs(script_dir)
utils.copy2target("demo_script.py", templates_dir, script_dir)
output_dir = join(directory, "output")
os.makedirs(output_dir)
cache_dir = join(directory, ".cache")
os.makedirs(cache_dir)
# default paths
with open(paths.get_simple_path_fname(directory), "w") as f:
json.dump(["output", ".cache"], f, indent=4, sort_keys=True)
# copy docker files
utils.copy2target("hook_post_useradd", templates_dir, docker_dir)
utils.copy2target("hook_pre_useradd", templates_dir, docker_dir)
utils.copy2target("bashhook.sh", templates_dir, docker_dir)
utils.copy2target("killhook.sh", templates_dir, docker_dir)
utils.copy2target("Dockerfile", templates_dir, docker_dir)
dockerignore_tar = join(docker_dir, ".dockerignore")
copyfile(join(templates_dir, "dockerignore"), dockerignore_tar)
with open(replik_fname, "w") as f:
json.dump(info, f, indent=4, sort_keys=True)
| [
"replik.constants.replik_root_file",
"os.makedirs",
"replik.constants.get_local_replik_dir",
"replik.paths.get_simple_path_fname",
"replik.console.fail",
"replik.utils.copy2target",
"os.path.join",
"replik.constants.get_username",
"os.getcwd",
"replik.console.info",
"replik.console.write",
"json.dump",
"replik.constants.is_replik_project"
] | [((284, 295), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (293, 295), False, 'import os\n'), ((316, 345), 'os.path.join', 'join', (['replik_dir', '"""templates"""'], {}), "(replik_dir, 'templates')\n", (320, 345), False, 'from os.path import join, isdir\n'), ((365, 398), 'replik.constants.replik_root_file', 'const.replik_root_file', (['directory'], {}), '(directory)\n', (387, 398), True, 'import replik.constants as const\n'), ((404, 447), 'replik.console.write', 'console.write', (['f"""initialize at {directory}"""'], {}), "(f'initialize at {directory}')\n", (417, 447), True, 'import replik.console as console\n'), ((456, 490), 'replik.constants.is_replik_project', 'const.is_replik_project', (['directory'], {}), '(directory)\n', (479, 490), True, 'import replik.constants as const\n'), ((642, 671), 'replik.console.info', 'console.info', (['"""project name:"""'], {}), "('project name:')\n", (654, 671), True, 'import replik.console as console\n'), ((1604, 1629), 'os.path.join', 'join', (['directory', '"""docker"""'], {}), "(directory, 'docker')\n", (1608, 1629), False, 'from os.path import join, isdir\n'), ((1634, 1657), 'os.makedirs', 'os.makedirs', (['docker_dir'], {}), '(docker_dir)\n', (1645, 1657), False, 'import os\n'), ((1704, 1733), 'os.path.join', 'join', (['directory', '""".gitignore"""'], {}), "(directory, '.gitignore')\n", (1708, 1733), False, 'from os.path import join, isdir\n'), ((2543, 2608), 'replik.utils.copy2target', 'utils.copy2target', (['"""hook_post_useradd"""', 'templates_dir', 'docker_dir'], {}), "('hook_post_useradd', templates_dir, docker_dir)\n", (2560, 2608), True, 'import replik.utils as utils\n'), ((2613, 2677), 'replik.utils.copy2target', 'utils.copy2target', (['"""hook_pre_useradd"""', 'templates_dir', 'docker_dir'], {}), "('hook_pre_useradd', templates_dir, docker_dir)\n", (2630, 2677), True, 'import replik.utils as utils\n'), ((2682, 2741), 'replik.utils.copy2target', 'utils.copy2target', (['"""bashhook.sh"""', 'templates_dir', 'docker_dir'], {}), "('bashhook.sh', templates_dir, docker_dir)\n", (2699, 2741), True, 'import replik.utils as utils\n'), ((2746, 2805), 'replik.utils.copy2target', 'utils.copy2target', (['"""killhook.sh"""', 'templates_dir', 'docker_dir'], {}), "('killhook.sh', templates_dir, docker_dir)\n", (2763, 2805), True, 'import replik.utils as utils\n'), ((2810, 2868), 'replik.utils.copy2target', 'utils.copy2target', (['"""Dockerfile"""', 'templates_dir', 'docker_dir'], {}), "('Dockerfile', templates_dir, docker_dir)\n", (2827, 2868), True, 'import replik.utils as utils\n'), ((2892, 2925), 'os.path.join', 'join', (['docker_dir', '""".dockerignore"""'], {}), "(docker_dir, '.dockerignore')\n", (2896, 2925), False, 'from os.path import join, isdir\n'), ((500, 574), 'replik.console.fail', 'console.fail', (['f"""Directory {directory} already contains a \'replik\' project"""'], {}), '(f"Directory {directory} already contains a \'replik\' project")\n', (512, 574), True, 'import replik.console as console\n'), ((738, 795), 'replik.console.fail', 'console.fail', (['"""Name must be at least one character long!"""'], {}), "('Name must be at least one character long!')\n", (750, 795), True, 'import replik.console as console\n'), ((1477, 1514), 'replik.constants.get_local_replik_dir', 'const.get_local_replik_dir', (['directory'], {}), '(directory)\n', (1503, 1514), True, 'import replik.constants as const\n'), ((1973, 2002), 'os.path.join', 'join', (['directory', 'project_name'], {}), '(directory, project_name)\n', (1977, 2002), False, 'from os.path import join, isdir\n'), ((2011, 2035), 'os.makedirs', 'os.makedirs', (['project_dir'], {}), '(project_dir)\n', (2022, 2035), False, 'import os\n'), ((2057, 2085), 'os.path.join', 'join', (['project_dir', '"""scripts"""'], {}), "(project_dir, 'scripts')\n", (2061, 2085), False, 'from os.path import join, isdir\n'), ((2094, 2117), 'os.makedirs', 'os.makedirs', (['script_dir'], {}), '(script_dir)\n', (2105, 2117), False, 'import os\n'), ((2126, 2188), 'replik.utils.copy2target', 'utils.copy2target', (['"""demo_script.py"""', 'templates_dir', 'script_dir'], {}), "('demo_script.py', templates_dir, script_dir)\n", (2143, 2188), True, 'import replik.utils as utils\n'), ((2211, 2236), 'os.path.join', 'join', (['directory', '"""output"""'], {}), "(directory, 'output')\n", (2215, 2236), False, 'from os.path import join, isdir\n'), ((2245, 2268), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2256, 2268), False, 'import os\n'), ((2290, 2315), 'os.path.join', 'join', (['directory', '""".cache"""'], {}), "(directory, '.cache')\n", (2294, 2315), False, 'from os.path import join, isdir\n'), ((2324, 2346), 'os.makedirs', 'os.makedirs', (['cache_dir'], {}), '(cache_dir)\n', (2335, 2346), False, 'import os\n'), ((2939, 2974), 'os.path.join', 'join', (['templates_dir', '"""dockerignore"""'], {}), "(templates_dir, 'dockerignore')\n", (2943, 2974), False, 'from os.path import join, isdir\n'), ((3042, 3086), 'json.dump', 'json.dump', (['info', 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), '(info, f, indent=4, sort_keys=True)\n', (3051, 3086), False, 'import json\n'), ((925, 985), 'replik.console.fail', 'console.fail', (['f"""Name must not contain \'{forbidden_string}\'!"""'], {}), '(f"Name must not contain \'{forbidden_string}\'!")\n', (937, 985), True, 'import replik.console as console\n'), ((1538, 1575), 'replik.constants.get_local_replik_dir', 'const.get_local_replik_dir', (['directory'], {}), '(directory)\n', (1564, 1575), True, 'import replik.constants as const\n'), ((2453, 2513), 'json.dump', 'json.dump', (["['output', '.cache']", 'f'], {'indent': '(4)', 'sort_keys': '(True)'}), "(['output', '.cache'], f, indent=4, sort_keys=True)\n", (2462, 2513), False, 'import json\n'), ((2390, 2428), 'replik.paths.get_simple_path_fname', 'paths.get_simple_path_fname', (['directory'], {}), '(directory)\n', (2417, 2428), True, 'import replik.paths as paths\n'), ((1038, 1058), 'replik.constants.get_username', 'const.get_username', ([], {}), '()\n', (1056, 1058), True, 'import replik.constants as const\n')] |
# Generated by Django 2.1.7 on 2019-03-07 05:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_investigationinfo'),
]
operations = [
migrations.AddField(
model_name='discorduser',
name='monument_channel_id',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='discorduser',
name='monument_message_id',
field=models.BigIntegerField(blank=True, null=True),
),
]
| [
"django.db.models.BigIntegerField"
] | [((349, 394), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (371, 394), False, 'from django.db import migrations, models\n'), ((532, 577), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (554, 577), False, 'from django.db import migrations, models\n')] |
import numba
TPB = 16
@numba.cuda.jit
def matmul_sm(A, B, C):
# Define an array in the shared memory
# The size and type of the arrays must be known at compile time
sA = numba.cuda.shared.array(shape=(TPB, TPB), dtype=numba.float64)
sB = numba.cuda.shared.array(shape=(TPB, TPB), dtype=numba.float64)
x, y = numba.cuda.grid(2)
tx = numba.cuda.threadIdx.x
ty = numba.cuda.threadIdx.y
bpg = numba.cuda.gridDim.x # blocks per grid
if x >= C.shape[0] and y >= C.shape[1]:
return
tmp = 0.
for i in range(bpg):
sA[tx, ty] = A[x, ty + i * TPB]
sB[tx, ty] = B[tx + i * TPB, y]
# Wait until all threads finish preloading
numba.cuda.syncthreads()
for j in range(TPB):
tmp += sA[tx, j] * sB[j, ty]
# Wait until all threads finish computing
numba.cuda.syncthreads()
C[x, y] = tmp
| [
"numba.cuda.grid",
"numba.cuda.syncthreads",
"numba.cuda.shared.array"
] | [((183, 245), 'numba.cuda.shared.array', 'numba.cuda.shared.array', ([], {'shape': '(TPB, TPB)', 'dtype': 'numba.float64'}), '(shape=(TPB, TPB), dtype=numba.float64)\n', (206, 245), False, 'import numba\n'), ((255, 317), 'numba.cuda.shared.array', 'numba.cuda.shared.array', ([], {'shape': '(TPB, TPB)', 'dtype': 'numba.float64'}), '(shape=(TPB, TPB), dtype=numba.float64)\n', (278, 317), False, 'import numba\n'), ((330, 348), 'numba.cuda.grid', 'numba.cuda.grid', (['(2)'], {}), '(2)\n', (345, 348), False, 'import numba\n'), ((704, 728), 'numba.cuda.syncthreads', 'numba.cuda.syncthreads', ([], {}), '()\n', (726, 728), False, 'import numba\n'), ((859, 883), 'numba.cuda.syncthreads', 'numba.cuda.syncthreads', ([], {}), '()\n', (881, 883), False, 'import numba\n')] |
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.ensemble import VotingClassifier
URL= "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names=['sepal-length','sepal-width','petal-length','petal-width','class']
dataset=read_csv(URL,names=names)
print(dataset.shape)
print(dataset.head(20))
print(dataset.describe())
print(dataset.groupby('class').size())
#univariate plot-box and whisker plots
dataset.plot(kind='box',subplots=True,layout=(2,2),sharex=False,sharey=False)
pyplot.show()
#visualizing through histogram
dataset.hist()
pyplot.show()
#multivariate plots
scatter_matrix(dataset)
pyplot.show()
array=dataset.values
X=array[:,0:4]
Y=array[:,4]
X_train,X_validation,Y_train,Y_validation=train_test_split(X,Y,test_size=0.2,random_state=1)
models=[]
models.append(('LR',LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA',LinearDiscriminantAnalysis()))
models.append(('KNN',KNeighborsClassifier()))
models.append(('NB',GaussianNB()))
models.append(('SVM',SVC(gamma='auto')))
results=[]
names=[]
#calculating the accuracy score for each type of algorithm
for name,model in models:
kfold=StratifiedKFold(n_splits=10)
cv_results=cross_val_score(model,X_train,Y_train,cv=kfold,scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)'%(name, cv_results.mean(), cv_results.std()))
pyplot.boxplot(results,labels=names)
pyplot.title("Algorithm Comparison")
pyplot.show()
model=SVC(gamma='auto')
model.fit(X_train, Y_train)
pred=model.predict(X_validation)
print(accuracy_score(Y_validation,pred))
print(confusion_matrix(Y_validation,pred))
print(classification_report(Y_validation,pred))
| [
"matplotlib.pyplot.boxplot",
"pandas.plotting.scatter_matrix",
"sklearn.svm.SVC",
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.classification_report",
"sklearn.naive_bayes.GaussianNB",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.StratifiedKFold",
"matplotlib.pyplot.title",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.show"
] | [((928, 954), 'pandas.read_csv', 'read_csv', (['URL'], {'names': 'names'}), '(URL, names=names)\n', (936, 954), False, 'from pandas import read_csv\n'), ((1181, 1194), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1192, 1194), False, 'from matplotlib import pyplot\n'), ((1241, 1254), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1252, 1254), False, 'from matplotlib import pyplot\n'), ((1275, 1298), 'pandas.plotting.scatter_matrix', 'scatter_matrix', (['dataset'], {}), '(dataset)\n', (1289, 1298), False, 'from pandas.plotting import scatter_matrix\n'), ((1299, 1312), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1310, 1312), False, 'from matplotlib import pyplot\n'), ((1404, 1457), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, Y, test_size=0.2, random_state=1)\n', (1420, 1457), False, 'from sklearn.model_selection import train_test_split\n'), ((2068, 2105), 'matplotlib.pyplot.boxplot', 'pyplot.boxplot', (['results'], {'labels': 'names'}), '(results, labels=names)\n', (2082, 2105), False, 'from matplotlib import pyplot\n'), ((2105, 2141), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Algorithm Comparison"""'], {}), "('Algorithm Comparison')\n", (2117, 2141), False, 'from matplotlib import pyplot\n'), ((2142, 2155), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2153, 2155), False, 'from matplotlib import pyplot\n'), ((2163, 2180), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (2166, 2180), False, 'from sklearn.svm import SVC\n'), ((1834, 1862), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (1849, 1862), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1878, 1948), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'Y_train'], {'cv': 'kfold', 'scoring': '"""accuracy"""'}), "(model, X_train, Y_train, cv=kfold, scoring='accuracy')\n", (1893, 1948), False, 'from sklearn.model_selection import cross_val_score\n'), ((2249, 2283), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_validation', 'pred'], {}), '(Y_validation, pred)\n', (2263, 2283), False, 'from sklearn.metrics import accuracy_score\n'), ((2290, 2326), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_validation', 'pred'], {}), '(Y_validation, pred)\n', (2306, 2326), False, 'from sklearn.metrics import confusion_matrix\n'), ((2333, 2374), 'sklearn.metrics.classification_report', 'classification_report', (['Y_validation', 'pred'], {}), '(Y_validation, pred)\n', (2354, 2374), False, 'from sklearn.metrics import classification_report\n'), ((1485, 1542), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1503, 1542), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1566, 1594), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (1592, 1594), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1618, 1640), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1638, 1640), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1663, 1675), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1673, 1675), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1699, 1716), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (1702, 1716), False, 'from sklearn.svm import SVC\n')] |
import numpy as np
from NonLinearityFunctions import linear
def calc_theta_num(n, k):
return np.math.factorial(n+k)/(np.math.factorial(k)*np.math.factorial(n))
def inv_theta_num(n, x):
k = 0
while calc_theta_num(n, k) < x:
k += 1
return k
class Polynomial:
def __init__(self, dim, deg, func):
self.degree = deg
self.dimension = dim
self.function = func
self.theta = [np.random.rand() for _ in xrange(calc_theta_num(dim, deg))]
def __call__(self, point):
value = 0
deg = 0
degrees_arr = [0] * self.dimension
for i in xrange(len(self.theta)):
theta_mul = self.theta[i]
for n in xrange(len(degrees_arr)):
theta_mul *= point[n] ** degrees_arr[n]
value += theta_mul
if degrees_arr[-1] == deg:
deg += 1
degrees_arr = [deg] + ([0] * (self.dimension - 1))
else:
for j in xrange(len(degrees_arr) - 2, -1, -1):
if degrees_arr[j] != 0:
degrees_arr[j] -= 1
degrees_arr[j + 1] += 1
for t in xrange(j+2, len(degrees_arr)):
degrees_arr[j + 1] += degrees_arr[t]
degrees_arr[t] = 0
break
return self.function(value)
def cost(self, x, y):
j = 0
for i in xrange(len(x)):
j += (self(x)-y)**2
return j
def hypot(self, points):
a = []
for i in points:
a.append(self(i))
return a
def theta_x(self, point, index):
if index == 0:
return 1
deg = inv_theta_num(self.dimension, index + 1) - 1 # degree
degree_arr = [deg + 1]+([0]*(self.dimension-1))
for i in xrange(calc_theta_num(self.dimension, deg), index):
for j in xrange(len(degree_arr)-2,-1,-1):
if degree_arr[j] != 0:
degree_arr[j] -= 1
degree_arr[j+1] += 1
for t in xrange(j+2, len(degree_arr)):
degree_arr[j+1] += degree_arr[t]
degree_arr[t] = 0
break
thx = 1
for k in xrange(len(degree_arr)):
thx *= point[k] ** degree_arr[k]
return thx
def iteration(self, x, y, alpha=0.001):
derivatives = [0]*len(self.theta)
for i in xrange(len(x)):
output = self(x[i])
for j in xrange(len(self.theta)):
derivatives[j] += (output-y[i])*self.function(output, 1)*self.theta_x(x[i],j)
for i in xrange(len(self.theta)):
self.theta[i] -= alpha*derivatives[i]
class PolynomialLearner:
def __init__(self, out, dim=None, deg=None, kernel=None, functions=None):
assert (dim is not None and deg is not None) or kernel is not None
self.kernel = kernel
self.dimension = len(kernel) if kernel else dim
self.degree = deg if deg is not None else 1
if hasattr(functions, '__iter__') or hasattr(functions, '__getitem__'):
assert out == len(functions)
else:
functions = (functions if functions is not None else linear,)*out
self.polynomials = [Polynomial(dim=self.dimension, deg=self.degree, func=functions[i]) for i in xrange(out)]
self.outputs = out
def __len__(self):
return len(self.polynomials)
def __call__(self, point):
out = np.zeros(self.outputs)
if self.kernel is not None:
point = self.kernel.prepare(point)
for i in xrange(len(self.polynomials)):
out[i] = self.polynomials[i](point)
return out
def hypot(self, points):
a = []
for i in self.polynomials:
a.append(i.hypot(points if self.kernel is None else self.kernel.prepare(points)))
return np.array(a).T
def iteration(self, x, y, alpha=0.001):
y = np.array(y).T
for i in xrange(len(self.polynomials)):
self.polynomials[i].iteration(x, y[i], alpha) | [
"numpy.array",
"numpy.zeros",
"numpy.random.rand",
"numpy.math.factorial"
] | [((104, 128), 'numpy.math.factorial', 'np.math.factorial', (['(n + k)'], {}), '(n + k)\n', (121, 128), True, 'import numpy as np\n'), ((3674, 3696), 'numpy.zeros', 'np.zeros', (['self.outputs'], {}), '(self.outputs)\n', (3682, 3696), True, 'import numpy as np\n'), ((128, 148), 'numpy.math.factorial', 'np.math.factorial', (['k'], {}), '(k)\n', (145, 148), True, 'import numpy as np\n'), ((149, 169), 'numpy.math.factorial', 'np.math.factorial', (['n'], {}), '(n)\n', (166, 169), True, 'import numpy as np\n'), ((453, 469), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (467, 469), True, 'import numpy as np\n'), ((4095, 4106), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (4103, 4106), True, 'import numpy as np\n'), ((4169, 4180), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4177, 4180), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 13:55:29 2020
@author: s143818
"""
import cv2
import numpy as np
LOW_I_CUTOFF = 0.1 * 255
HIGH_I_CUTOFF = 0.9 * 255
def calcProjectionM(R, t, f, A):
A = np.c_[A,[0,0,0]]
P = A*np.c_[R,t]
#P = A*np.c_[[0,0,0]]
return P
def loadImInGrayscale(images):
gray = []
for image in images:
gray.append(cv2.imread(image, cv2.IMREAD_GRAYSCALE))
#cv2.imshow('Window',gray(0))
#cv2.waitKey(0)
return gray
def getMask(image):
imageSize = image.shape[::-1]
mask = np.ones((imageSize[1], imageSize[0]))
for x in range(imageSize[1]):
for y in range(imageSize[0]):
intensity = image[x, y]
if (intensity < LOW_I_CUTOFF) or (HIGH_I_CUTOFF < intensity):
mask[x, y] = 0
return mask
#XNOT WORKING YET!
def decodePhase(imPrim, imSeq):
F_primary = np.fft.fft(imPrim)
F_seq = np.fft.fft(imSeq);
channel = 1
imPrim = np.angle(F_primary[channel])
imSeq = np.angle(F_seq[channel])
phase = imPrim - imSeq % (2 * np.pi)
return phase
| [
"numpy.fft.fft",
"cv2.imread",
"numpy.ones",
"numpy.angle"
] | [((580, 617), 'numpy.ones', 'np.ones', (['(imageSize[1], imageSize[0])'], {}), '((imageSize[1], imageSize[0]))\n', (587, 617), True, 'import numpy as np\n'), ((915, 933), 'numpy.fft.fft', 'np.fft.fft', (['imPrim'], {}), '(imPrim)\n', (925, 933), True, 'import numpy as np\n'), ((946, 963), 'numpy.fft.fft', 'np.fft.fft', (['imSeq'], {}), '(imSeq)\n', (956, 963), True, 'import numpy as np\n'), ((1005, 1033), 'numpy.angle', 'np.angle', (['F_primary[channel]'], {}), '(F_primary[channel])\n', (1013, 1033), True, 'import numpy as np\n'), ((1046, 1070), 'numpy.angle', 'np.angle', (['F_seq[channel]'], {}), '(F_seq[channel])\n', (1054, 1070), True, 'import numpy as np\n'), ((403, 442), 'cv2.imread', 'cv2.imread', (['image', 'cv2.IMREAD_GRAYSCALE'], {}), '(image, cv2.IMREAD_GRAYSCALE)\n', (413, 442), False, 'import cv2\n')] |
import copy
import logging
from typing import Generator, Optional
from thenewboston_node.business_logic.models.block import Block
from thenewboston_node.business_logic.models.blockchain_state import BlockchainState
from .base import BlockchainBase
logger = logging.getLogger(__name__)
class MemoryBlockchain(BlockchainBase):
"""
A blockchain implementation primarily for use in unittesting and being used as an example implementation
"""
def __init__(
self,
*,
account_root_files: list[BlockchainState] = None,
blocks: Optional[list[Block]] = None,
drop_intermediate_account_root_files=True,
**kwargs,
):
super().__init__(**kwargs)
self.account_root_files: list[BlockchainState] = (
copy.deepcopy(account_root_files) if account_root_files else []
)
self.blocks: list[Block] = copy.deepcopy(blocks) if blocks else []
self.drop_intermediate_account_root_files = drop_intermediate_account_root_files
# Account root files related implemented methods
def persist_account_root_file(self, account_root_file: BlockchainState):
self.account_root_files.append(account_root_file)
def get_account_root_file_count(self) -> int:
return len(self.account_root_files)
def iter_account_root_files(self) -> Generator[BlockchainState, None, None]:
yield from self.account_root_files
def iter_account_root_files_reversed(self) -> Generator[BlockchainState, None, None]:
yield from reversed(self.account_root_files)
def make_account_root_file(self):
super().make_account_root_file()
account_root_files = self.account_root_files
if self.drop_intermediate_account_root_files and len(account_root_files) > 2:
self.account_root_files = [account_root_files[0], account_root_files[-1]]
# Blocks related implemented methods
def persist_block(self, block: Block):
self.blocks.append(copy.deepcopy(block))
def get_block_by_number(self, block_number: int) -> Optional[Block]:
if block_number < 0:
raise ValueError('block_number must be greater or equal to 0')
blocks = self.blocks
if not blocks:
return None
head_block_number = blocks[-1].message.block_number
if block_number > head_block_number:
return None
block_index = block_number - head_block_number - 1
try:
return blocks[block_index]
except IndexError:
assert blocks[0].message.block_number > block_number
return None
def get_block_count(self) -> int:
return len(self.blocks)
def iter_blocks(self) -> Generator[Block, None, None]:
yield from self.blocks
def iter_blocks_reversed(self) -> Generator[Block, None, None]:
yield from reversed(self.blocks)
def iter_blocks_from(self, block_number: int) -> Generator[Block, None, None]:
# TODO(dmu) MEDIUM: This is questionable if this implementation is faster than base implementation
# (because of extra memory use)
blocks = self.blocks
if blocks:
first_block_number = blocks[0].message.block_number
if first_block_number > block_number:
logger.warning('Missing blocks from %s to %s', block_number, first_block_number - 1)
start = 0
else:
start = block_number - first_block_number
else:
start = 0
yield from self.blocks[start:]
| [
"logging.getLogger",
"copy.deepcopy"
] | [((260, 287), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'import logging\n'), ((785, 818), 'copy.deepcopy', 'copy.deepcopy', (['account_root_files'], {}), '(account_root_files)\n', (798, 818), False, 'import copy\n'), ((894, 915), 'copy.deepcopy', 'copy.deepcopy', (['blocks'], {}), '(blocks)\n', (907, 915), False, 'import copy\n'), ((1993, 2013), 'copy.deepcopy', 'copy.deepcopy', (['block'], {}), '(block)\n', (2006, 2013), False, 'import copy\n')] |
import os
import shutil as su
import argparse as ap
__doc__=\
"""
処理内容
指定したファイルをbinaryで読み込みtxtとして出力する
"""
# CONST
VERSION = "0.0.1"
OUTPUT_DIR = "./result"
OUTPUT_FILE = "template.dat"
def main():
#---------------------------------------------------
# 引数チェック
#---------------------------------------------------
parser = ap.ArgumentParser(description=__doc__,
formatter_class=ap.RawDescriptionHelpFormatter)
# must args
parser.add_argument("filename", help="target file")
# option args
parser.add_argument("-v", "--version", action="version", version=VERSION)
parser.add_argument("-s", "--start", type=int, default=1,help="開始位置[int]")
parser.add_argument("-e", "--end" , type=int, default=0,help="終了位置[int]")
args = parser.parse_args()
#---------------------------------------------------
# 書き出し先
#---------------------------------------------------
# 既存であれば削除し新規作成
if os.path.isdir(OUTPUT_DIR):
su.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR)
#---------------------------------------------------
# MAIN処理
#---------------------------------------------------
# binaryで読み出し,txtで出力
print("start={},end={}".format(args.start,args.end))
with open(args.filename, "rb") as rf:
data = rf.read()
wpath = os.path.join(OUTPUT_DIR,OUTPUT_FILE)
with open(wpath, "w") as wf:
wf.write(str(data))
if __name__ == "__main__":
main()
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"os.path.isdir",
"shutil.rmtree"
] | [((348, 439), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'ap.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=ap.\n RawDescriptionHelpFormatter)\n', (365, 439), True, 'import argparse as ap\n'), ((979, 1004), 'os.path.isdir', 'os.path.isdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (992, 1004), False, 'import os\n'), ((1040, 1063), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1051, 1063), False, 'import os\n'), ((1014, 1035), 'shutil.rmtree', 'su.rmtree', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1023, 1035), True, 'import shutil as su\n'), ((1357, 1394), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', 'OUTPUT_FILE'], {}), '(OUTPUT_DIR, OUTPUT_FILE)\n', (1369, 1394), False, 'import os\n')] |
"""Models for the comments app."""
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.utils.timezone import now
from djangopress.blog.models import Post
@python_2_unicode_compatible
class Comment(models.Model):
"""Comment model."""
content = models.TextField()
author = models.CharField(max_length=255)
author_email = models.EmailField()
author_website = models.URLField(blank=True, null=True)
creation_date = models.DateTimeField(default=now)
post = models.ForeignKey(Post,
on_delete=models.CASCADE
)
def __str__(self):
return '%s on %s' % (self.author, self.post)
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((346, 364), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (362, 364), False, 'from django.db import models\n'), ((378, 410), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (394, 410), False, 'from django.db import models\n'), ((430, 449), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (447, 449), False, 'from django.db import models\n'), ((471, 509), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (486, 509), False, 'from django.db import models\n'), ((530, 563), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'now'}), '(default=now)\n', (550, 563), False, 'from django.db import models\n'), ((575, 624), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'on_delete': 'models.CASCADE'}), '(Post, on_delete=models.CASCADE)\n', (592, 624), False, 'from django.db import models\n')] |
import sqlite3
import numpy as np
from PIL import Image
# PATH = os.path.dirname(__file__)
DB_PATH = 'db/signals.sqlite'#os.path.join(PATH, 'signals.sqlite')
# TODO: fix problem on linux:
# File "main.py", line 25, in <module>
# File "db/video_data_db.py", line 14, in prepare_imageDB
# sqlite3.OperationalError: unable to open database file
# print(DB_PATH)
def prepare_imageDB():
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS images;")
cursor.execute(
"CREATE TABLE images"
"( id INTEGER PRIMARY KEY AUTOINCREMENT, dateTime TEXT, state Text, size TEXT, image BLOB);")
conn.commit()
conn.close()
def insert_image(data):
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
cursor.execute(f"""
INSERT INTO images
(dateTime, image, state, size)
VALUES (?, ?, ?, ?);
""", data)
conn.commit()
conn.close()
def get_image(ID):
conn = sqlite3.connect(DB_PATH)
cursor = conn.execute("SELECT image, size from images WHERE id = ?", [ID])
row = cursor.fetchone()
img = row[0]
size = tuple(map(lambda x: int(x), row[1].split(" ")))
np_arr = np.frombuffer(img, dtype=np.uint8).reshape(size)
conn.commit()
conn.close()
return Image.fromarray(np_arr)
| [
"numpy.frombuffer",
"PIL.Image.fromarray",
"sqlite3.connect"
] | [((404, 428), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (419, 428), False, 'import sqlite3\n'), ((733, 757), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (748, 757), False, 'import sqlite3\n'), ((993, 1017), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (1008, 1017), False, 'import sqlite3\n'), ((1312, 1335), 'PIL.Image.fromarray', 'Image.fromarray', (['np_arr'], {}), '(np_arr)\n', (1327, 1335), False, 'from PIL import Image\n'), ((1216, 1250), 'numpy.frombuffer', 'np.frombuffer', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1229, 1250), True, 'import numpy as np\n')] |
import hashlib
import json
import logging
import os
import sys
from collections import defaultdict
import requests
from GenomeFileUtil.authclient import KBaseAuth as _KBaseAuth
from installed_clients.AbstractHandleClient import AbstractHandle as HandleService
from installed_clients.AssemblySequenceAPIServiceClient import AssemblySequenceAPI
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WSLargeDataIOClient import WsLargeDataIO
from GenomeFileUtil.core import GenomeUtils
MAX_GENOME_SIZE = 2**30
class GenomeInterface:
def __init__(self, config):
self.handle_url = config.handleURL
self.shock_url = config.shockURL
self.sw_url = config.srvWizURL
self.token = config.token
self.auth_service_url = config.authServiceUrl
self.callback_url = config.callbackURL
self.re_api_url = config.re_api_url
self.auth_client = _KBaseAuth(self.auth_service_url)
self.dfu = DataFileUtil(self.callback_url)
self.taxon_wsname = config.raw['taxon-workspace-name']
self.scratch = config.raw['scratch']
self.ws_large_data = WsLargeDataIO(self.callback_url)
@staticmethod
def _validate_save_one_genome_params(params):
"""
_validate_save_one_genome_params:
validates params passed to save_one_genome method
"""
logging.info('start validating save_one_genome params')
# check for required parameters
for p in ['workspace', 'name', 'data']:
if p not in params:
raise ValueError(
'"{}" parameter is required, but missing'.format(p))
def _check_shock_response(self, response, errtxt):
"""
_check_shock_response: check shock node response (Copied from DataFileUtil)
"""
logging.info('start checking shock response')
if not response.ok:
try:
err = json.loads(response.content)['error'][0]
except Exception:
# this means shock is down or not responding.
logging.error("Couldn't parse response error content from Shock: " + response.content)
response.raise_for_status()
raise ValueError(errtxt + str(err))
def _own_handle(self, genome_data, handle_property):
"""
_own_handle: check that handle_property point to shock nodes owned by calling user
"""
logging.info('start checking handle {} ownership'.format(handle_property))
if handle_property in genome_data:
handle_id = genome_data[handle_property]
hs = HandleService(self.handle_url, token=self.token)
handles = hs.hids_to_handles([handle_id])
shock_id = handles[0]['id']
# Copy from DataFileUtil.own_shock_node implementation:
header = {'Authorization': 'Oauth {}'.format(self.token)}
res = requests.get(self.shock_url + '/node/' + shock_id +
'/acl/?verbosity=full',
headers=header, allow_redirects=True)
self._check_shock_response(
res, 'Error getting ACLs for Shock node {}: '.format(shock_id))
owner = res.json()['data']['owner']['username']
user_id = self.auth_client.get_user(self.token)
if owner != user_id:
logging.info('start copying node to owner: {}'.format(user_id))
dfu_shock = self.dfu.copy_shock_node({'shock_id': shock_id,
'make_handle': True})
handle_id = dfu_shock['handle']['hid']
genome_data[handle_property] = handle_id
def _check_dna_sequence_in_features(self, genome):
"""
_check_dna_sequence_in_features: check dna sequence in each feature
"""
logging.info('start checking dna sequence in each feature')
if 'features' in genome:
features_to_work = {}
for feature in genome['features']:
if not ('dna_sequence' in feature and feature['dna_sequence']):
features_to_work[feature['id']] = feature['location']
if len(features_to_work) > 0:
aseq = AssemblySequenceAPI(self.sw_url, token=self.token)
get_dna_params = {'requested_features': features_to_work}
if 'assembly_ref' in genome:
get_dna_params['assembly_ref'] = genome['assembly_ref']
elif 'contigset_ref' in genome:
get_dna_params['contigset_ref'] = genome['contigset_ref']
else:
# Nothing to do (it may be test genome without contigs)...
return
dna_sequences = aseq.get_dna_sequences(get_dna_params)[
'dna_sequences']
for feature in genome['features']:
if feature['id'] in dna_sequences:
feature['dna_sequence'] = dna_sequences[feature['id']]
feature['dna_sequence_length'] = len(feature['dna_sequence'])
def get_one_genome(self, params):
"""Fetch a genome using WSLargeDataIO and return it as a python dict"""
logging.info('fetching genome object')
res = self.ws_large_data.get_objects(params)['data'][0]
data = json.load(open(res['data_json_file']))
return data, res['info']
# return self.dfu.get_objects(params)['data'][0]
def save_one_genome(self, params):
logging.info('start saving genome object')
self._validate_save_one_genome_params(params)
workspace = params['workspace']
name = params['name']
data = params['data']
# XXX there is no `workspace_datatype` param in the spec
ws_datatype = params.get('workspace_datatype', "KBaseGenomes.Genome")
# XXX there is no `meta` param in the spec
meta = params.get('meta', {})
if "AnnotatedMetagenomeAssembly" in ws_datatype:
if params.get('upgrade') or 'feature_counts' not in data:
data = self._update_metagenome(data)
else:
if params.get('upgrade') or 'feature_counts' not in data:
data = self._update_genome(data)
# check all handles point to shock nodes owned by calling user
self._own_handle(data, 'genbank_handle_ref')
self._own_handle(data, 'gff_handle_ref')
if "AnnotatedMetagenomeAssembly" not in ws_datatype:
self._check_dna_sequence_in_features(data)
data['warnings'] = self.validate_genome(data)
# sort data
data = GenomeUtils.sort_dict(data)
# dump genome to scratch for upload
data_path = os.path.join(self.scratch, name + ".json")
json.dump(data, open(data_path, 'w'))
if 'hidden' in params and str(params['hidden']).lower() in ('yes', 'true', 't', '1'):
hidden = 1
else:
hidden = 0
if isinstance(workspace, int) or workspace.isdigit():
workspace_id = workspace
else:
workspace_id = self.dfu.ws_name_to_id(workspace)
save_params = {'id': workspace_id,
'objects': [{'type': ws_datatype,
'data_json_file': data_path,
'name': name,
'meta': meta,
'hidden': hidden}]}
dfu_oi = self.ws_large_data.save_objects(save_params)[0]
returnVal = {'info': dfu_oi, 'warnings': data.get('warnings', [])}
return returnVal
@staticmethod
def determine_tier(source):
"""
Given a user provided source parameter, assign a source and genome tier
"""
low_source = source.lower()
if 'refseq' in low_source:
if 'reference' in low_source:
return "RefSeq", ['Reference', 'Representative',
'ExternalDB']
if 'representative' in low_source:
return "RefSeq", ['Representative', 'ExternalDB']
if 'user' in low_source:
return "RefSeq", ['ExternalDB', 'User']
return "RefSeq", ['ExternalDB']
if 'phytozome' in low_source:
if 'flagship' in source:
return "Phytosome", ['Reference', 'Representative',
'ExternalDB']
return "Phytosome", ['Representative', 'ExternalDB']
if 'ensembl' in low_source:
if 'user' in low_source:
return "Ensembl", ['ExternalDB', 'User']
return "Ensembl", ['Representative', 'ExternalDB']
return source, ['User']
def _update_metagenome(self, genome):
"""Checks for missing required fields and fixes breaking changes"""
if 'molecule_type' not in genome:
genome['molecule_type'] = 'Unknown'
def _update_genome(self, genome):
"""Checks for missing required fields and fixes breaking changes"""
# do top level updates
ontologies_present = defaultdict(dict) # type: dict
ontologies_present.update(genome.get('ontologies_present', {}))
ontology_events = genome.get('ontology_events', [])
# NOTE: 'genome_tiers' not in Metagenome spec
if 'genome_tiers' not in genome:
genome['source'], genome['genome_tiers'] = self.determine_tier(genome['source'])
if 'molecule_type' not in genome:
genome['molecule_type'] = 'Unknown'
# If an NCBI taxonomy ID is provided, fetch additional data about the taxon
# NOTE: Metagenome object does not have a 'taxon_assignments' field
if 'taxon_assignments' in genome and genome['taxon_assignments'].get('ncbi'):
tax_id = int(genome['taxon_assignments']['ncbi'])
GenomeUtils.set_taxon_data(tax_id, self.re_api_url, genome)
else:
GenomeUtils.set_default_taxon_data(genome)
if any([x not in genome for x in ('dna_size', 'md5', 'gc_content', 'num_contigs')]):
if 'assembly_ref' in genome:
assembly_data = self.dfu.get_objects(
{'object_refs': [genome['assembly_ref']],
'ignore_errors': 0})['data'][0]['data']
genome["gc_content"] = assembly_data['gc_content']
genome["dna_size"] = assembly_data['dna_size']
genome["md5"] = assembly_data['md5']
genome["num_contigs"] = assembly_data['num_contigs']
if assembly_data.get('type'):
genome['genome_type'] = assembly_data['type']
elif 'contigset_ref' in genome:
contig_data = self.dfu.get_objects(
{'object_refs': [genome['contigset_ref']],
'included': ['contigs/[*]/length', 'md5'],
'ignore_errors': 0})['data'][0]['data']
genome["gc_content"] = None
genome["dna_size"] = sum((c['length'] for c in contig_data['contigs']))
genome["md5"] = contig_data['md5']
genome["num_contigs"] = len(contig_data['contigs'])
# NOTE: metagenomes do not have the following fields
if 'cdss' not in genome:
genome['cdss'] = []
if 'mrnas' not in genome:
genome['mrnas'] = []
if 'non_coding_features' not in genome:
genome['non_coding_features'] = []
# do feature level updates
retained_features = []
type_counts = defaultdict(int)
for field in ('mrnas', 'cdss', 'features'):
for i, feat in enumerate(genome.get(field, [])):
if 'function' in feat and not isinstance(feat, list):
feat['functions'] = feat['function'].split('; ')
del feat['function']
if 'aliases' in feat:
if not feat['aliases']:
del feat['aliases']
elif not isinstance(feat['aliases'][0], (list, tuple)):
feat['aliases'] = [['gene_synonym', x] for x in feat['aliases']]
if 'type' in feat:
type_counts[feat['type']] += 1
for ontology, terms in feat.get('ontology_terms', {}).items():
for term in terms.values():
if isinstance(term, list):
continue
ontologies_present[ontology][term['id']] = term['term_name']
term_evidence = []
for ev in term['evidence']:
ev['id'] = ontology
if "ontology_ref" in term:
ev['ontology_ref'] = term["ontology_ref"]
if ev not in ontology_events:
ontology_events.append(ev)
term_evidence.append(ontology_events.index(ev))
feat['ontology_terms'][ontology][term['id']] = term_evidence
# remove deprecated fields
feat.pop('protein_families', None)
feat.pop('atomic_regulons', None)
feat.pop('orthologs', None)
feat.pop('coexpressed_fids', None)
feat.pop('publications', None)
feat.pop('regulon_data', None)
feat.pop('subsystem_data', None)
if 'dna_sequence_length' not in feat:
feat['dna_sequence_length'] = sum(x[3] for x in feat['location'])
if 'protein_translation' in feat and 'protein_md5' not in feat:
feat['protein_md5'] = hashlib.md5(
feat.get('protein_translation', '').encode('utf8')
).hexdigest()
# split all the stuff lumped together in old versions into the
# right arrays
if field == 'features':
if feat.get('type', 'gene') == 'gene':
if not feat.get('cdss', []):
type_counts['non_coding_genes'] += 1
genome['non_coding_features'].append(feat)
else:
retained_features.append(feat)
elif feat.get('type', 'gene') == 'CDS':
if 'parent_gene' not in feat:
feat['parent_gene'] = ''
genome['cdss'].append(feat)
elif feat.get('type', 'gene') == 'mRNA':
if 'parent_gene' not in feat:
feat['parent_gene'] = ''
genome['mrnas'].append(feat)
genome['features'] = retained_features
if ontology_events:
genome['ontology_events'] = ontology_events
if ontologies_present:
genome['ontologies_present'] = ontologies_present
type_counts['mRNA'] = len(genome.get('mrnas', []))
type_counts['CDS'] = len(genome.get('cdss', []))
type_counts['protein_encoding_gene'] = len(genome['features'])
type_counts['non_coding_features'] = len(
genome.get('non_coding_features', []))
genome['feature_counts'] = type_counts
return genome
@staticmethod
def validate_genome(g):
"""
Run a series of checks on the genome object and return any warnings
"""
allowed_tiers = {'Representative', 'Reference', 'ExternalDB', 'User'}
logging.info('Validating genome object contents')
warnings = g.get('warnings', [])
# TODO: Determine whether these checks make any sense for Metagenome
# object. Looks like many don't.
# Add validations for Metagenome object
# this will fire for some annotation methods like PROKKA
if g.get('domain') == "Bacteria" and len(g.get('cdss', [])) != len(g['features']):
warnings.append("For prokaryotes, CDS array should generally be the"
" same length as the Features array.")
if g.get('domain') == "Eukaryota" and len(g.get('features', [])) == len(g.get('cdss', [])):
warnings.append("For Eukaryotes, CDS array should not be the same "
"length as the Features array due to RNA splicing.")
if g.get('molecule_type') not in {"DNA", 'ds-DNA'}:
if g.get('domain', '') not in {'Virus', 'Viroid'} and \
g['molecule_type'] not in {"DNA", 'ds-DNA'}:
warnings.append("Genome molecule_type {} is not expected "
"for domain {}.".format(g['molecule_type'],
g.get('domain', '')))
if "genome_tiers" in g and set(g['genome_tiers']) - allowed_tiers:
warnings.append("Undefined terms in genome_tiers: " + ", ".join(
set(g['genome_tiers']) - allowed_tiers))
assignments = g.get('taxon_assignments', {})
if 'ncbi' not in assignments or (
'taxon_ref' in g and g['taxon_ref'] == "ReferenceTaxons/unknown_taxon"):
warnings.append('Unable to determine organism taxonomy')
GenomeInterface.handle_large_genomes(g)
return warnings
@staticmethod
def handle_large_genomes(g):
"""Determines the size of various feature arrays and starts removing the dna_sequence if
the genome is getting too big to store in the workspace"""
def _get_size(obj):
return sys.getsizeof(json.dumps(obj))
# seems pretty uneccessary...
def sizeof_fmt(num):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %sB" % (num, unit)
num /= 1024.0
return "%.1f %sB" % (num, 'Yi')
feature_lists = ('mrnas', 'features', 'non_coding_features', 'cdss')
master_key_sizes = dict()
# Change want full breakdown to True if want to see break down of sizes.
# By making this a changeable flag it will run faster for standard uploads.
want_full_breakdown = False
for x in feature_lists:
if x in g:
need_to_remove_dna_sequence = _get_size(g) > MAX_GENOME_SIZE
if need_to_remove_dna_sequence or want_full_breakdown:
feature_type_dict_keys = dict()
for feature in g[x]:
for feature_key in list(feature.keys()):
if feature_key == "dna_sequence" and need_to_remove_dna_sequence:
# NOTE: should this get stored somewhere?
del (feature["dna_sequence"])
else:
if feature_key not in feature_type_dict_keys:
feature_type_dict_keys[feature_key] = 0
feature_type_dict_keys[feature_key] += sys.getsizeof(
feature[feature_key])
for feature_key in feature_type_dict_keys:
feature_type_dict_keys[feature_key] = sizeof_fmt(
feature_type_dict_keys[feature_key])
master_key_sizes[x] = feature_type_dict_keys
print(f"{x}: {sizeof_fmt(_get_size(g[x]))}")
total_size = _get_size(g)
print(f"Total size {sizeof_fmt(total_size)} ")
if want_full_breakdown:
print(f"Here is the breakdown of the sizes of feature lists elements : "
f"{str(master_key_sizes)}")
if total_size > MAX_GENOME_SIZE:
print(f"Here is the breakdown of the sizes of feature lists elements : "
f"{str(master_key_sizes)}")
raise ValueError(f"This genome size of {sizeof_fmt(total_size)} exceeds the maximum "
f"permitted size of {sizeof_fmt(MAX_GENOME_SIZE)}.\n"
f"Here is the breakdown for feature lists and their respective "
f"sizes:\n{master_key_sizes}")
| [
"GenomeFileUtil.core.GenomeUtils.set_default_taxon_data",
"GenomeFileUtil.core.GenomeUtils.sort_dict",
"json.loads",
"installed_clients.WSLargeDataIOClient.WsLargeDataIO",
"installed_clients.AssemblySequenceAPIServiceClient.AssemblySequenceAPI",
"sys.getsizeof",
"json.dumps",
"os.path.join",
"GenomeFileUtil.core.GenomeUtils.set_taxon_data",
"GenomeFileUtil.authclient.KBaseAuth",
"requests.get",
"collections.defaultdict",
"installed_clients.AbstractHandleClient.AbstractHandle",
"logging.error",
"logging.info",
"installed_clients.DataFileUtilClient.DataFileUtil"
] | [((926, 959), 'GenomeFileUtil.authclient.KBaseAuth', '_KBaseAuth', (['self.auth_service_url'], {}), '(self.auth_service_url)\n', (936, 959), True, 'from GenomeFileUtil.authclient import KBaseAuth as _KBaseAuth\n'), ((979, 1010), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['self.callback_url'], {}), '(self.callback_url)\n', (991, 1010), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((1148, 1180), 'installed_clients.WSLargeDataIOClient.WsLargeDataIO', 'WsLargeDataIO', (['self.callback_url'], {}), '(self.callback_url)\n', (1161, 1180), False, 'from installed_clients.WSLargeDataIOClient import WsLargeDataIO\n'), ((1390, 1445), 'logging.info', 'logging.info', (['"""start validating save_one_genome params"""'], {}), "('start validating save_one_genome params')\n", (1402, 1445), False, 'import logging\n'), ((1845, 1890), 'logging.info', 'logging.info', (['"""start checking shock response"""'], {}), "('start checking shock response')\n", (1857, 1890), False, 'import logging\n'), ((3916, 3975), 'logging.info', 'logging.info', (['"""start checking dna sequence in each feature"""'], {}), "('start checking dna sequence in each feature')\n", (3928, 3975), False, 'import logging\n'), ((5318, 5356), 'logging.info', 'logging.info', (['"""fetching genome object"""'], {}), "('fetching genome object')\n", (5330, 5356), False, 'import logging\n'), ((5614, 5656), 'logging.info', 'logging.info', (['"""start saving genome object"""'], {}), "('start saving genome object')\n", (5626, 5656), False, 'import logging\n'), ((6740, 6767), 'GenomeFileUtil.core.GenomeUtils.sort_dict', 'GenomeUtils.sort_dict', (['data'], {}), '(data)\n', (6761, 6767), False, 'from GenomeFileUtil.core import GenomeUtils\n'), ((6832, 6874), 'os.path.join', 'os.path.join', (['self.scratch', "(name + '.json')"], {}), "(self.scratch, name + '.json')\n", (6844, 6874), False, 'import os\n'), ((9236, 9253), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (9247, 9253), False, 'from collections import defaultdict\n'), ((11719, 11735), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (11730, 11735), False, 'from collections import defaultdict\n'), ((15771, 15820), 'logging.info', 'logging.info', (['"""Validating genome object contents"""'], {}), "('Validating genome object contents')\n", (15783, 15820), False, 'import logging\n'), ((2658, 2706), 'installed_clients.AbstractHandleClient.AbstractHandle', 'HandleService', (['self.handle_url'], {'token': 'self.token'}), '(self.handle_url, token=self.token)\n', (2671, 2706), True, 'from installed_clients.AbstractHandleClient import AbstractHandle as HandleService\n'), ((2958, 3075), 'requests.get', 'requests.get', (["(self.shock_url + '/node/' + shock_id + '/acl/?verbosity=full')"], {'headers': 'header', 'allow_redirects': '(True)'}), "(self.shock_url + '/node/' + shock_id + '/acl/?verbosity=full',\n headers=header, allow_redirects=True)\n", (2970, 3075), False, 'import requests\n'), ((9999, 10058), 'GenomeFileUtil.core.GenomeUtils.set_taxon_data', 'GenomeUtils.set_taxon_data', (['tax_id', 'self.re_api_url', 'genome'], {}), '(tax_id, self.re_api_url, genome)\n', (10025, 10058), False, 'from GenomeFileUtil.core import GenomeUtils\n'), ((10085, 10127), 'GenomeFileUtil.core.GenomeUtils.set_default_taxon_data', 'GenomeUtils.set_default_taxon_data', (['genome'], {}), '(genome)\n', (10119, 10127), False, 'from GenomeFileUtil.core import GenomeUtils\n'), ((4311, 4361), 'installed_clients.AssemblySequenceAPIServiceClient.AssemblySequenceAPI', 'AssemblySequenceAPI', (['self.sw_url'], {'token': 'self.token'}), '(self.sw_url, token=self.token)\n', (4330, 4361), False, 'from installed_clients.AssemblySequenceAPIServiceClient import AssemblySequenceAPI\n'), ((17852, 17867), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (17862, 17867), False, 'import json\n'), ((2108, 2198), 'logging.error', 'logging.error', (['("Couldn\'t parse response error content from Shock: " + response.content)'], {}), '("Couldn\'t parse response error content from Shock: " +\n response.content)\n', (2121, 2198), False, 'import logging\n'), ((1959, 1987), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (1969, 1987), False, 'import json\n'), ((19337, 19372), 'sys.getsizeof', 'sys.getsizeof', (['feature[feature_key]'], {}), '(feature[feature_key])\n', (19350, 19372), False, 'import sys\n')] |
from kivy.logger import Logger
import os
import paramiko
import vyattaconfparser
import json
class Router(object):
def __init__(self, config, resp_q):
self.commands = {
'connect': self.connect,
'disconnect': self.disconnect,
'drop_kids_packets': self.drop_kids_packets,
'drop_tv_packets': self.drop_tv_packets,
'allow_kids_packets': self.allow_kids_packets,
'allow_tv_packets': self.allow_tv_packets,
'show_firewall': self.show_firewall
}
try:
self.key_file = config['key_file']
self.ip = config['ip']
self.user = config['user']
self.q = resp_q
except Exception as e:
raise('Router:exception {e}'.format(e=e))
Logger.info(
f'Router: Initializing router data at {self.ip} with user {self.user}' \
f' key {self.key_file} and response queue {self.q}')
def do_seq(self, command_seq):
for command in command_seq:
print(command)
self.commands[command]()
def connect(self):
response = {'command': 'connect'}
if not os.access(self.key_file, os.F_OK):
Logger.error('Router: ssh key file not found')
response['status'] = 'Failure'
response['message'] = 'SSH key file not found'
else:
try:
key = paramiko.RSAKey.from_private_key_file(self.key_file)
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(hostname=self.ip, username=self.user, pkey=key, timeout=2)
response['status'] = 'Success'
response['message'] = 'OK'
except Exception as e:
response['status'] = 'Failure'
response['message'] = str(e)
self.q.put(response)
Logger.info(f'Router: {str(response)}')
def disconnect(self):
response = {'command': 'disconnect'}
try:
self.client.close()
response['status'] = 'Success'
response['message'] = 'OK'
except Exception as e:
response['status'] = 'Failure'
response['message'] = str(e)
self.q.put(response)
Logger.info(f'Router: {str(response)}')
def show_firewall(self):
response = {'command': 'show_firewall'}
# stdin, stdout, stderr = self.client.exec_command(
# '/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper show firewall')
stdin, stdout, stderr = self.client.exec_command('cat /config/config.boot')
err = stderr.read().decode()
if err is '':
try:
result = str(stdout.read().decode())
result = (vyattaconfparser.parse_conf(result))
# Uncomment to print pretty json
# print(json.dumps(result, indent=2))
response['status'] = 'Success'
response['message'] = {}
if 'disable' in result['firewall']['name']['WAN_IN']['rule']['10'].keys():
response['message']['kids'] = 'enabled'
else:
response['message']['kids'] = 'disabled'
if 'disable' in result['firewall']['name']['WAN_IN']['rule']['20'].keys():
response['message']['tv'] = 'enabled'
else:
response['message']['tv'] = 'disabled'
except Exception as e:
response['status'] = 'Failure'
response['message'] = str(e)
else:
response['status'] = 'Failure'
response['message'] = err
self.q.put(response)
Logger.info(f'Router: {response}')
def drop_kids_packets(self):
response = {'command': 'drop_kids_packets'}
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper delete firewall name WAN_IN rule 10 disable')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save')
err = stderr.read().decode()
if err is '':
response['status'] = 'Success'
response['message'] = 'OK'
self.q.put(response)
Logger.info(f'Router: {str(response)}')
return
response['status'] = 'Failure'
response['message'] = err
self.q.put(response)
Logger.info(f'Router: {str(response)}')
def allow_kids_packets(self):
response = {'command': 'allow_kids_packets'}
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper set firewall name WAN_IN rule 10 disable')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save')
err = stderr.read().decode()
if err is '':
response['status'] = 'Success'
response['message'] = 'OK'
self.q.put(response)
Logger.info(f'Router: {str(response)}')
return
response['status'] = 'Failure'
response['message'] = err
self.q.put(response)
Logger.info(f'Router: {str(response)}')
def drop_tv_packets(self):
response = {'command': 'drop_tv_packets'}
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper delete firewall name WAN_IN rule 20 disable')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save')
err = stderr.read().decode()
if err is '':
response['status'] = 'Success'
response['message'] = 'OK'
self.q.put(response)
Logger.info(f'Router: {str(response)}')
return
response['status'] = 'Failure'
response['message'] = err
self.q.put(response)
Logger.info(f'Router: {str(response)}')
def allow_tv_packets(self):
response = {'command': 'allow_tv_packets'}
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper set firewall name WAN_IN rule 20 disable')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit')
err = stderr.read().decode()
if err is '':
stdin, stdout, stderr = self.client.exec_command(
'/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save')
err = stderr.read().decode()
if err is '':
response['status'] = 'Success'
response['message'] = 'OK'
self.q.put(response)
Logger.info(f'Router: {str(response)}')
return
response['status'] = 'Failure'
response['message'] = err
self.q.put(response)
Logger.info(f'Router: {str(response)}')
| [
"vyattaconfparser.parse_conf",
"kivy.logger.Logger.error",
"kivy.logger.Logger.info",
"paramiko.RSAKey.from_private_key_file",
"paramiko.AutoAddPolicy",
"os.access",
"paramiko.SSHClient"
] | [((825, 966), 'kivy.logger.Logger.info', 'Logger.info', (['f"""Router: Initializing router data at {self.ip} with user {self.user} key {self.key_file} and response queue {self.q}"""'], {}), "(\n f'Router: Initializing router data at {self.ip} with user {self.user} key {self.key_file} and response queue {self.q}'\n )\n", (836, 966), False, 'from kivy.logger import Logger\n'), ((3898, 3932), 'kivy.logger.Logger.info', 'Logger.info', (['f"""Router: {response}"""'], {}), "(f'Router: {response}')\n", (3909, 3932), False, 'from kivy.logger import Logger\n'), ((1220, 1253), 'os.access', 'os.access', (['self.key_file', 'os.F_OK'], {}), '(self.key_file, os.F_OK)\n', (1229, 1253), False, 'import os\n'), ((1268, 1314), 'kivy.logger.Logger.error', 'Logger.error', (['"""Router: ssh key file not found"""'], {}), "('Router: ssh key file not found')\n", (1280, 1314), False, 'from kivy.logger import Logger\n'), ((1475, 1527), 'paramiko.RSAKey.from_private_key_file', 'paramiko.RSAKey.from_private_key_file', (['self.key_file'], {}), '(self.key_file)\n', (1512, 1527), False, 'import paramiko\n'), ((1559, 1579), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (1577, 1579), False, 'import paramiko\n'), ((2923, 2958), 'vyattaconfparser.parse_conf', 'vyattaconfparser.parse_conf', (['result'], {}), '(result)\n', (2950, 2958), False, 'import vyattaconfparser\n'), ((1637, 1661), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (1659, 1661), False, 'import paramiko\n')] |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='bci-typing',
author='<NAME>',
packages=find_packages(),
) | [
"setuptools.find_packages"
] | [((131, 146), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (144, 146), False, 'from setuptools import setup, find_packages\n')] |
import nd2
import numpy as np
def read(image):
return nd2.ND2File(image)
def parse(image, overlap, slices):
if 'P' not in image.sizes.keys():
tile = image.to_dask()
tiles = np.empty((1, 1), dtype=object)
tiles[0, 0] = tile
n_blocks = (1, 1)
overlap = (0, 0)
image_shape = tile.shape
else:
positions = None
for loop in image.experiment:
if isinstance(loop, nd2.structures.XYPosLoop):
positions = np.array([point.stagePositionUm[:2] for point in loop.parameters.points]).T
break
camera_transformation = np.array(image.metadata.channels[0].volume.cameraTransformationMatrix).reshape(2, 2)
x, y = np.linalg.inv(camera_transformation) @ positions
if positions[0, 1] - positions[0, 0] > 0:
x = -x
if positions[1, -1] - positions[1, 0] > 0:
y = -y
x_ndim = round(np.ptp(x) / abs(x[0] - x[1])) + 1
y_ndim = round(np.ptp(y) / abs(x[0] - x[1])) + 1
j = np.rint((x - min(x)) / (np.ptp(x) / (x_ndim - 1))).astype(int)
i = np.rint((y - min(y)) / (np.ptp(y) / (y_ndim - 1))).astype(int)
if overlap is None:
x_overlaps = np.empty(0)
y_overlaps = np.empty(0)
for col in np.unique(j):
x_overlaps = np.append(x_overlaps, np.mean(x[np.where(j == col)]))
for row in np.unique(i):
y_overlaps = np.append(y_overlaps, np.mean(y[np.where(i == row)]))
x_overlap = round(1 - (np.mean(np.diff(x_overlaps)) / image.metadata.channels[0].volume.axesCalibration[
0]) / image.attributes.widthPx, 2)
y_overlap = round(1 - (np.mean(np.diff(y_overlaps)) / image.metadata.channels[0].volume.axesCalibration[
1]) / image.attributes.heightPx, 2)
if not ((0 < x_overlap < 1) & (0 < y_overlap < 1)):
raise RuntimeError("Failed to determine overlap percentage from metadata.")
overlap = (y_overlap, x_overlap)
width = round(image.attributes.widthPx * (x_ndim - (x_ndim - 1) * overlap[1]))
height = round(image.attributes.heightPx * (y_ndim - (y_ndim - 1) * overlap[0]))
image_shape = None
image_array = image.to_dask()
tiles = np.empty(shape=(y_ndim, x_ndim), dtype=object)
for n in range(image.sizes['P']):
tile = image_array[n]
if tile.ndim > 2:
tile = tile[slices]
if image_shape is None:
image_shape = (*tile.shape[:-2], height, width)
tiles[i[n], j[n]] = tile
n_blocks = tiles.shape
return tiles, n_blocks, overlap, image_shape
| [
"numpy.ptp",
"numpy.unique",
"numpy.where",
"numpy.diff",
"nd2.ND2File",
"numpy.array",
"numpy.linalg.inv",
"numpy.empty"
] | [((61, 79), 'nd2.ND2File', 'nd2.ND2File', (['image'], {}), '(image)\n', (72, 79), False, 'import nd2\n'), ((204, 234), 'numpy.empty', 'np.empty', (['(1, 1)'], {'dtype': 'object'}), '((1, 1), dtype=object)\n', (212, 234), True, 'import numpy as np\n'), ((2338, 2384), 'numpy.empty', 'np.empty', ([], {'shape': '(y_ndim, x_ndim)', 'dtype': 'object'}), '(shape=(y_ndim, x_ndim), dtype=object)\n', (2346, 2384), True, 'import numpy as np\n'), ((741, 777), 'numpy.linalg.inv', 'np.linalg.inv', (['camera_transformation'], {}), '(camera_transformation)\n', (754, 777), True, 'import numpy as np\n'), ((1250, 1261), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1258, 1261), True, 'import numpy as np\n'), ((1287, 1298), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1295, 1298), True, 'import numpy as np\n'), ((1322, 1334), 'numpy.unique', 'np.unique', (['j'], {}), '(j)\n', (1331, 1334), True, 'import numpy as np\n'), ((1442, 1454), 'numpy.unique', 'np.unique', (['i'], {}), '(i)\n', (1451, 1454), True, 'import numpy as np\n'), ((640, 710), 'numpy.array', 'np.array', (['image.metadata.channels[0].volume.cameraTransformationMatrix'], {}), '(image.metadata.channels[0].volume.cameraTransformationMatrix)\n', (648, 710), True, 'import numpy as np\n'), ((509, 582), 'numpy.array', 'np.array', (['[point.stagePositionUm[:2] for point in loop.parameters.points]'], {}), '([point.stagePositionUm[:2] for point in loop.parameters.points])\n', (517, 582), True, 'import numpy as np\n'), ((954, 963), 'numpy.ptp', 'np.ptp', (['x'], {}), '(x)\n', (960, 963), True, 'import numpy as np\n'), ((1011, 1020), 'numpy.ptp', 'np.ptp', (['y'], {}), '(y)\n', (1017, 1020), True, 'import numpy as np\n'), ((1082, 1091), 'numpy.ptp', 'np.ptp', (['x'], {}), '(x)\n', (1088, 1091), True, 'import numpy as np\n'), ((1157, 1166), 'numpy.ptp', 'np.ptp', (['y'], {}), '(y)\n', (1163, 1166), True, 'import numpy as np\n'), ((1397, 1415), 'numpy.where', 'np.where', (['(j == col)'], {}), '(j == col)\n', (1405, 1415), True, 'import numpy as np\n'), ((1517, 1535), 'numpy.where', 'np.where', (['(i == row)'], {}), '(i == row)\n', (1525, 1535), True, 'import numpy as np\n'), ((1582, 1601), 'numpy.diff', 'np.diff', (['x_overlaps'], {}), '(x_overlaps)\n', (1589, 1601), True, 'import numpy as np\n'), ((1750, 1769), 'numpy.diff', 'np.diff', (['y_overlaps'], {}), '(y_overlaps)\n', (1757, 1769), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
import numpy as np
import random
from model.rnn_encoder import Hybrid_Alias_Sim
import torch.nn as nn
def vectorize(ex, char2ind):
alias1, alias2, neg = ex
neg_alias, neg_score = neg
vec_alias1 = list()
vec_alias2 = list()
vec_neg_alias = list()
vec_neg_score = list()
for word in alias1.split():
char_in_word = [char2ind[ch] if ch in char2ind else char2ind['<unk>'] for ch in word]
vec_alias1.append(char_in_word)
for word in alias2.split():
char_in_word = [char2ind[ch] if ch in char2ind else char2ind['<unk>'] for ch in word]
vec_alias2.append(char_in_word)
for i, nalias in enumerate(neg_alias):
if len(nalias) <= 1:
continue
vec_neg = list()
for word in nalias.split():
char_in_word = [char2ind[ch] if ch in char2ind else char2ind['<unk>'] for ch in word]
vec_neg.append(char_in_word)
if len(vec_neg) > 0:
vec_neg_alias.append(vec_neg)
if len(neg_score) > 0:
vec_neg_score.append(float(neg_score[i]))
assert len(vec_neg_alias) >= 5
return vec_alias1, vec_alias2, vec_neg_alias, vec_neg_score
class AliasDataset(Dataset):
def __init__(self, examples, ind2char, voc, char2ind, ngram, neg_num):
self.examples = examples
self.ind2char = ind2char
self.voc = voc
self.char2ind = char2ind
self.ngram = ngram
self.neg_num = neg_num
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.char2ind)
def lengths(self):
return [(len(alias1), len(alias2)) for alias1, alias2, _, _ in self.examples]
def val_batchify(batch):
x1_word_len, x1_char_len, x2_word_len, x2_char_len = list(), list(), list(), list()
x3 = list()
x3_word_mask = list()
x3_char_mask = list()
for ex in batch:
vec_alias1 = ex[0]
x1_word_len.append(len(vec_alias1))
for word in vec_alias1:
x1_char_len.append(len(word))
vec_alias2 = ex[1]
x2_word_len.append(len(vec_alias2))
for word in vec_alias2:
x2_char_len.append(len(word))
x3_word_len = list()
x3_char_len = list()
for neg_alias in ex[2]:
x3_word_len.append(len(neg_alias))
for word in neg_alias:
x3_char_len.append(len(word))
neg_v = torch.LongTensor(len(x3_word_len), max(x3_word_len), max(x3_char_len)).zero_()
neg_word_mask = torch.ByteTensor(len(x3_word_len), max(x3_word_len)).fill_(1)
neg_char_mask = torch.ByteTensor(len(x3_word_len), max(x3_word_len), max(x3_char_len)).fill_(1)
for i, neg_alias in enumerate(ex[2]):
for j, word in enumerate(neg_alias):
a3 = torch.LongTensor(word)
neg_v[i, j, :len(word)].copy_(a3)
neg_char_mask[i, j, :len(word)].fill_(0)
neg_word_mask[i, :len(neg_alias)].fill_(0)
x3.append(neg_v)
x3_word_mask.append(neg_word_mask)
x3_char_mask.append(neg_char_mask)
x1 = torch.LongTensor(len(x1_word_len), max(x1_word_len), max(x1_char_len)).zero_()
x1_word_mask = torch.ByteTensor(len(x1_word_len), max(x1_word_len)).fill_(1)
x1_char_mask = torch.ByteTensor(len(x1_word_len), max(x1_word_len), max(x1_char_len)).fill_(1)
x2 = torch.LongTensor(len(x2_word_len), max(x2_word_len), max(x2_char_len)).zero_()
x2_word_mask = torch.ByteTensor(len(x2_word_len), max(x2_word_len)).fill_(1)
x2_char_mask = torch.ByteTensor(len(x2_word_len), max(x2_word_len), max(x2_char_len)).fill_(1)
for i in range(len(x1_word_len)):
vec_alias1 = batch[i][0]
for j, word in enumerate(vec_alias1):
a1 = torch.LongTensor(word)
x1[i, j, :len(word)].copy_(a1)
x1_char_mask[i, j, :len(word)].fill_(0)
x1_word_mask[i, :len(vec_alias1)].fill_(0)
vec_alias2 = batch[i][1]
for j, word in enumerate(vec_alias2):
a2 = torch.LongTensor(word)
x2[i, j, :len(word)].copy_(a2)
x2_char_mask[i, j, :len(word)].fill_(0)
x2_word_mask[i, :len(vec_alias2)].fill_(0)
return x1, x1_word_mask, x1_char_mask, x2, x2_word_mask, x2_char_mask, x3, x3_word_mask, x3_char_mask
def train_batchify(batch):
num_neg = 5
'''
x1: pos_alias1, batch * max(x1_length) * max(char1_length)
x2: pos_alias2, batch * max(x2_length) * max(char2_length)
x3: neg_alias, (batch*num_neg) * max(x3_length)
'''
#### len(neg_subsamples) = len(batch) * num_neg
neg_alias = list()
x1_word_len, x1_char_len, x2_word_len, x2_char_len, x3_word_len, x3_char_len = list(), list(), list(), list(), list(), list()
for ex in batch:
vec_alias1 = ex[0]
x1_word_len.append(len(vec_alias1))
for word in vec_alias1:
x1_char_len.append(len(word))
vec_alias2 = ex[1]
x2_word_len.append(len(vec_alias2))
for word in vec_alias2:
x2_char_len.append(len(word))
neg_candidate = ex[2]
neg_score =np.array(ex[3])
neg_score /= neg_score.sum()
if len(neg_score) == 0:
indices = random.sample(range(len(neg_candidate)), num_neg)
elif random.random() > 0.5:
indices = np.random.choice(range(len(neg_candidate)), num_neg, replace=False, p=neg_score)
else:
indices = random.sample(range(len(neg_candidate)), num_neg)
#indices = random.sample(range(len(neg_candidate)), num_neg)
for i in range(num_neg):
neg_alias.append(list())
x3_word_len.append(list())
x3_char_len.append(list())
for i, ind in enumerate(indices):
neg_alias[i].append(neg_candidate[ind])
x3_word_len[i].append(len(neg_candidate[ind]))
for word in neg_candidate[ind]:
x3_char_len[i].append(len(word))
x1 = torch.LongTensor(len(x1_word_len), max(x1_word_len), max(x1_char_len)).zero_()
x1_word_mask = torch.ByteTensor(len(x1_word_len), max(x1_word_len)).fill_(1)
x1_char_mask = torch.ByteTensor(len(x1_word_len), max(x1_word_len), max(x1_char_len)).fill_(1)
x2 = torch.LongTensor(len(x2_word_len), max(x2_word_len), max(x2_char_len)).zero_()
x2_word_mask = torch.ByteTensor(len(x2_word_len), max(x2_word_len)).fill_(1)
x2_char_mask = torch.ByteTensor(len(x2_word_len), max(x2_word_len), max(x2_char_len)).fill_(1)
neg3, neg3_word_mask, neg3_char_mask = list(), list(), list()
for i in range(len(x1_word_len)):
vec_alias1 = batch[i][0]
for j, word in enumerate(vec_alias1):
a1 = torch.LongTensor(word)
x1[i, j, :len(word)].copy_(a1)
x1_char_mask[i, j, :len(word)].fill_(0)
x1_word_mask[i, :len(vec_alias1)].fill_(0)
vec_alias2 = batch[i][1]
for j, word in enumerate(vec_alias2):
a2 = torch.LongTensor(word)
x2[i, j, :len(word)].copy_(a2)
x2_char_mask[i, j, :len(word)].fill_(0)
x2_word_mask[i, :len(vec_alias2)].fill_(0)
for j in range(num_neg):
x3 = torch.LongTensor(len(x3_word_len[j]), max(x3_word_len[j]), max(x3_char_len[j])).zero_()
x3_word_mask = torch.ByteTensor(len(x3_word_len[j]), max(x3_word_len[j])).fill_(1)
x3_char_mask = torch.ByteTensor(len(x3_word_len[j]), max(x3_word_len[j]), max(x3_char_len[j])).fill_(1)
for i in range(len(neg_alias[j])):
vec_neg = neg_alias[j][i]
for k, word in enumerate(vec_neg):
a3 = torch.LongTensor(word)
x3[i, k, :len(word)].copy_(a3)
x3_char_mask[i, k, :len(word)].fill_(0)
x3_word_mask[i, :len(vec_neg)].fill_(0)
neg3.append(x3)
neg3_word_mask.append(x3_word_mask)
neg3_char_mask.append(x3_char_mask)
return x1, x1_word_mask, x1_char_mask, x2, x2_word_mask, x2_char_mask, neg3, neg3_word_mask, neg3_char_mask
| [
"random.random",
"numpy.array",
"torch.LongTensor"
] | [((5459, 5474), 'numpy.array', 'np.array', (['ex[3]'], {}), '(ex[3])\n', (5467, 5474), True, 'import numpy as np\n'), ((4068, 4090), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (4084, 4090), False, 'import torch\n'), ((4341, 4363), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (4357, 4363), False, 'import torch\n'), ((7094, 7116), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (7110, 7116), False, 'import torch\n'), ((7367, 7389), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (7383, 7389), False, 'import torch\n'), ((3076, 3098), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (3092, 3098), False, 'import torch\n'), ((5636, 5651), 'random.random', 'random.random', ([], {}), '()\n', (5649, 5651), False, 'import random\n'), ((8036, 8058), 'torch.LongTensor', 'torch.LongTensor', (['word'], {}), '(word)\n', (8052, 8058), False, 'import torch\n')] |
# -*-coding:utf-8-*-
import os
from threading import Thread
from flask.ext.mail import Message
from . import mail
from flask import current_app,render_template
#from ..manage import app
#原来的app换成current_app
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
#app_context =current_app.app_context()
#app_context.push() 我就说要在这里修改点什么
app = current_app._get_current_object() #原来要加这句的
msg=Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body=render_template(template+'.txt', **kwargs)
msg.html=render_template(template+'.html', **kwargs)
thr=Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
| [
"flask.render_template",
"flask.ext.mail.Message",
"flask.current_app._get_current_object",
"threading.Thread"
] | [((434, 467), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (465, 467), False, 'from flask import current_app, render_template\n'), ((487, 609), 'flask.ext.mail.Message', 'Message', (["(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject)"], {'sender': "app.config['FLASKY_MAIL_SENDER']", 'recipients': '[to]'}), "(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject, sender=app.\n config['FLASKY_MAIL_SENDER'], recipients=[to])\n", (494, 609), False, 'from flask.ext.mail import Message\n'), ((616, 660), 'flask.render_template', 'render_template', (["(template + '.txt')"], {}), "(template + '.txt', **kwargs)\n", (631, 660), False, 'from flask import current_app, render_template\n'), ((672, 717), 'flask.render_template', 'render_template', (["(template + '.html')"], {}), "(template + '.html', **kwargs)\n", (687, 717), False, 'from flask import current_app, render_template\n'), ((724, 772), 'threading.Thread', 'Thread', ([], {'target': 'send_async_email', 'args': '[app, msg]'}), '(target=send_async_email, args=[app, msg])\n', (730, 772), False, 'from threading import Thread\n')] |
#!/usr/bin/env python
import ipaddress
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit(1)
gw = None
try:
iprange = ipaddress.ip_network(unicode(sys.argv[1]))
except ValueError:
nic = ipaddress.ip_interface(unicode(sys.argv[1]))
iprange = nic.network
gw = nic.ip
print("export NETMASK=%s" % (iprange.netmask,))
if not gw:
hosts = list(iprange.hosts())
gw=hosts[0]
else:
hosts = [ip for ip in iprange.hosts() if ip>gw]
print("export GW=%s" % (gw,))
print("export DHCP_RANGE_START=%s" % (hosts[0],))
print("export DHCP_RANGE_END=%s" % (hosts[-1],))
| [
"sys.exit"
] | [((107, 118), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (115, 118), False, 'import sys\n')] |
'''
NMF.py
'''
from utils import create_mixture, do_STFT_on_data
import corpus
import time
import numpy as np
import theano as th
from theano import tensor as T
from librosa import load, stft, istft
from librosa.output import write_wav
from update_rules import update_h_beta, update_w_beta, update_w_cauchy, update_h_cauchy
from cost import cost_is, cost_cau, cost_euc, cost_kl
from utils import convolution, shift
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
import mir_eval
th.config.optimizer = 'None'
th.config.exception_verbosity = 'high'
class NMF(object):
"""docstring for NMF"""
def __init__(self, frequencies, time_steps, sources, X, conv=False, beta=2):
"""
NMF constructor
The W matrix shows the frequencies per source
The H matrix shows the activation per time step per source. Ie how does the time_step belongs to source X and source Y
Keyword arguments:
frequencies -- the number of frequencies we want to approximate
time_steps -- length of the file we want to approximate
sources -- the number of sources we want to recognise
X -- the magnitude of the original signal
convolution -- if we want to do convolution we need to add an extra dimension to the W matrix. (default False)
"""
super(NMF, self).__init__()
self._DEBUG = True
self._frequencies = frequencies
self._time_steps = time_steps
self._sources = sources
self._epochs = 900 # the number of epochs to run for
self._V = X
self._T = 0
self._beta = beta
if conv and not beta == 2: # in the case of convolution the W matrix has an extra dimension
self._T = 5
self._W = th.shared(value=np.asarray(np.random.rand(self._frequencies, self._sources, self._T)+np.ones((self._frequencies, self._sources, self._T)), dtype=th.config.floatX), name="W", borrow=True)
else:# otherwise the matrix is just 2D frequencies x sources
self._W = th.shared(value=np.asarray(np.random.rand(self._frequencies, self._sources)+np.ones((self._frequencies, self._sources)), dtype=th.config.floatX), name="W", borrow=True)
self._H = th.shared(value=np.asarray(np.random.rand(self._sources, self._time_steps)+np.ones((self._sources, self._time_steps)), dtype=th.config.floatX), name="H", borrow=True)
index = T.lscalar()
X = T.fmatrix()
self.reconstruct_func = th.function(inputs=[index, X],
outputs=self.reconstruct(self._W, self._H, X, index, conv),
name="reconstruct",
allow_input_downcast=True)
def train(self, cost, update_w, update_h, convolution=False, norm_W=0, norm_H=0, beta=0):
"""
Train the NMF
Keyword arguments:
cost -- the cost function
update_w -- the rule for updating the W matrix
update_h -- the update rule for the H marix
convolution -- if we want to do convolution we need to add an extra dimension to the W matrix. (default False)
norm_W -- normalise the W matrix with L1 norm (1) or L2 norm (2) (default 0)
norm_H -- normalise the H matrix with L1 norm (1) or L2 norm (2) (default 0)
beta -- the beta parameter that determines which update rule to use Eucledian (2) Kullback-Leibler (1) Itakura-Saito (0) (default 0)
"""
# the H train function
self.train_h = th.function(inputs=[],
outputs=[],
updates={self._H: update_h(self._V, self._W, self._H, beta, convolution, norm_H, self._T, self._time_steps)},
name="train_H",
allow_input_downcast=True)
# the W train function
self.train_w = th.function(inputs=[],
outputs=[],
updates={self._W: update_w(self._V, self._W, self._H, beta, convolution, norm_W, self._T, self._frequencies)},
name="train_W",
allow_input_downcast=True)
# the cost function
self.cost_func = th.function(inputs=[],
outputs=cost(self._V, self._W, self._H, self._frequencies, self._time_steps, convolution),
name="cost",
allow_input_downcast=True)
for epoch in range(self._epochs):
tick = time.time()
self.train_h()
self.train_w()
# scale both matrices
scale = T.sum(self._W, axis=0)
self._W = self._W * T.tile(T.pow(scale,-1),(self._frequencies,1))
self._H = self._H * T.transpose(T.tile(T.pow(scale,-1),(self._time_steps,1)))
if self._DEBUG:
print ('NMF -> iter {} time it took {}ms. This resulted in a loss of {}'.format(epoch, (time.time() - tick) * 1000, self.cost_func()))
# return train_func
def get_W_H(self):
return self._W.eval(), self._H.eval(), self._V
def reconstruct(self, W, H, X, k, conv=False):
"""
Reconstruct a source by applying a Wiener filter
Keyword arguments
W -- the frequency matrix F x K
H -- the activation matrix per timestep K x N
X -- the original input matrix which is NOT the magnitude F x N
k -- the source index we want to reconstruct
"""
if conv and not self._beta == 2:
V_hat = convolution(W, H) # reconstruct the approximation of V
W = W[:,k, :].reshape((-1,1, self._T)) # get a single column from the W matrix
H = H[k,:].reshape((1,-1)) # get a single row from the H matrix
V_k = T.zeros(V_hat.shape)
for t in range(self._T):
V_k = V_k + T.dot(W[:,:,t].reshape((-1,1)), shift(H,t))
return T.mul((V_k/V_hat),X) # apply the Wiener filter to X
else:
V_hat = th.dot(W, H) # reconstruct the approximation of V
W = W[:,k].reshape((-1,1)) # get a single column from the W matrix
H = H[k,:].reshape((1,-1)) # get a single row from the H matrix
return T.mul((T.dot(W,H)/V_hat),X) # apply the Wiener filter to X
# def kmeans(X, cluster_num, numepochs, learningrate=0.01, batchsize=100, verbose=True):
# '''
# klp_kmeans based NUMPY, better for small scale problems
# inherited from http://www.iro.umontreal.ca/~memisevr/code.html
# Error in casting to float from complex in line 192
# '''
#
# rng = np.random
# W =rng.randn(cluster_num, X.shape[1])
# X2 = (X**2).sum(1)[:, None]
# for epoch in range(numepochs):
# for i in range(0, X.shape[0], batchsize):
# D = -2*np.dot(W, X[i:i+batchsize,:].T) + (W**2).sum(1)[:, None] + X2[i:i+batchsize].T
# S = (D==D.min(0)[None,:]).astype("float").T
# W += learningrate * (np.dot(S.T, X[i:i+batchsize,:]) - S.sum(0)[:, None] * W)
# if verbose:
# print "epoch", epoch, "of", numepochs, " cost: ", D.min(0).sum()
# return W
def reconstruct_with_Z(k,Z, W, H, V):
# V_hat = th.dot(W, H)
# W = W[:,k].reshape((-1,1))
# H = H[k,:].reshape((1,-1))
# return T.mul((T.dot(W,H)/V_hat),V)
V_hat = np.dot(W,H)
H_k = np.multiply(H,(Z == k).astype(int).reshape(-1,1))
return np.multiply((np.dot(W,H_k)/V_hat), V)
# if __name__ == '__main__':
# # x, sr = load('Hannah_teun_317Mic18.WAV')
# # x, sr = load('H_T_200Mic1O.WAV')
# x1, sr = load('/home/tinus/Workspace/corpus/data/S0001.wav')
# x2, sr = load('/home/tinus/Workspace/corpus/data/S0006.wav')
# # x = (x1/2) + (x2/2)
# x = (x1) + (x2)
# X = stft(x, win_length=256,hop_length=128, n_fft=1024)
#
# V = np.abs(X)**2
#
# frequencies, time_steps = X.shape
# sources = 50
# nmf = NMF(frequencies, time_steps, sources, V)
# train = nmf.train(cost_is, update_w_beta, update_h_beta)
# # train = nmf.train(cost_cau, update_w_cauchy, update_h_cauchy)
# W, H, V = nmf.get_W_H()
#
# clusters = 2
# mbk = MiniBatchKMeans(init='k-means++', n_clusters=clusters, batch_size=sources,
# n_init=10, max_no_improvement=50, verbose=0)
#
# Z = mbk.fit_predict(H)
#
# # colors = ['r', 'g', 'b', 'y', 'c']
# # mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
# # # for k, col in zip(range(clusters), colors):
# # # cluster_center = mbk_means_cluster_centers[k]
# # # plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
# # # markeredgecolor='k', markersize=6)
# #
# # for k in range(clusters):
# # cluster_center = mbk_means_cluster_centers[k]
# # plt.plot(cluster_center[0], cluster_center[1], 'o',
# # markeredgecolor='k', markersize=6)
# #
# # plt.show()
# x_s = []
# for s in range(clusters):
# new_x = np.real(istft(reconstruct_with_Z(s, Z, W, H, V), win_length=256,hop_length=128))
# x_s.append(new_x)
# write_wav('tests/normal_cau_separated_{}.wav'.format(s), new_x, sr)
# x_s = np.stack(x_s)
# x_stacked = np.vstack((x1[:x_s.shape[1]], x2[:x_s.shape[1]]))
# bss = mir_eval.separation.bss_eval_sources(x_stacked, x_s)
# print(bss)
#
#
# # for s in range(sources):
# # new_x = np.real(istft(nmf.reconstruct_func(s, X), win_length=256,hop_length=128))
# # write_wav('normal_separated_{}.wav'.format(s), new_x, sr)
if __name__ == '__main__':
SDR = 0 # signal to distortion ratio over all files summed
SAR = 0 # signal to artifact ratio over all files summed
SIR = 0 # signal to inference ratio over all files summed
for i in range(100):
corpus_train = corpus.experiment_files_voc
mix, x1, x2 = create_mixture(corpus_train)
X, _, _ = do_STFT_on_data(mix, x1, x2)
X = X.T
V = np.abs(X)**2
frequencies, time_steps = X.shape
sources = 50
nmf = NMF(frequencies, time_steps, sources, V)
train = nmf.train(cost_is, update_w_beta, update_h_beta)
W, H, V = nmf.get_W_H()
clusters = 2
mbk = MiniBatchKMeans(init='k-means++', n_clusters=clusters, batch_size=sources,
n_init=10, max_no_improvement=50, verbose=0)
Z = mbk.fit_predict(H)
x_s = []
for s in range(clusters):
new_x = np.real(istft(reconstruct_with_Z(s, Z, W, H, V), win_length=256,hop_length=128))
x_s.append(new_x)
x_s = np.stack(x_s)
x_stacked = np.vstack((x1[:x_s.shape[1]], x2[:x_s.shape[1]]))
bss = mir_eval.separation.bss_eval_sources(x_stacked, x_s)
SDR += bss[0]
SIR += bss[1]
SAR += bss[2]
print(np.sum(SAR) / 200)
print(np.sum(SDR) / 200)
print(np.sum(SIR) / 200)
| [
"utils.shift",
"theano.tensor.lscalar",
"theano.tensor.mul",
"numpy.random.rand",
"theano.tensor.dot",
"mir_eval.separation.bss_eval_sources",
"utils.convolution",
"numpy.stack",
"numpy.dot",
"theano.tensor.zeros",
"numpy.vstack",
"numpy.abs",
"numpy.ones",
"theano.tensor.sum",
"utils.do_STFT_on_data",
"sklearn.cluster.MiniBatchKMeans",
"theano.tensor.fmatrix",
"theano.dot",
"time.time",
"utils.create_mixture",
"theano.tensor.pow",
"numpy.sum"
] | [((7458, 7470), 'numpy.dot', 'np.dot', (['W', 'H'], {}), '(W, H)\n', (7464, 7470), True, 'import numpy as np\n'), ((2433, 2444), 'theano.tensor.lscalar', 'T.lscalar', ([], {}), '()\n', (2442, 2444), True, 'from theano import tensor as T\n'), ((2457, 2468), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (2466, 2468), True, 'from theano import tensor as T\n'), ((10022, 10050), 'utils.create_mixture', 'create_mixture', (['corpus_train'], {}), '(corpus_train)\n', (10036, 10050), False, 'from utils import create_mixture, do_STFT_on_data\n'), ((10069, 10097), 'utils.do_STFT_on_data', 'do_STFT_on_data', (['mix', 'x1', 'x2'], {}), '(mix, x1, x2)\n', (10084, 10097), False, 'from utils import create_mixture, do_STFT_on_data\n'), ((10392, 10515), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'init': '"""k-means++"""', 'n_clusters': 'clusters', 'batch_size': 'sources', 'n_init': '(10)', 'max_no_improvement': '(50)', 'verbose': '(0)'}), "(init='k-means++', n_clusters=clusters, batch_size=sources,\n n_init=10, max_no_improvement=50, verbose=0)\n", (10407, 10515), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((10767, 10780), 'numpy.stack', 'np.stack', (['x_s'], {}), '(x_s)\n', (10775, 10780), True, 'import numpy as np\n'), ((10801, 10850), 'numpy.vstack', 'np.vstack', (['(x1[:x_s.shape[1]], x2[:x_s.shape[1]])'], {}), '((x1[:x_s.shape[1]], x2[:x_s.shape[1]]))\n', (10810, 10850), True, 'import numpy as np\n'), ((10865, 10917), 'mir_eval.separation.bss_eval_sources', 'mir_eval.separation.bss_eval_sources', (['x_stacked', 'x_s'], {}), '(x_stacked, x_s)\n', (10901, 10917), False, 'import mir_eval\n'), ((4658, 4669), 'time.time', 'time.time', ([], {}), '()\n', (4667, 4669), False, 'import time\n'), ((4781, 4803), 'theano.tensor.sum', 'T.sum', (['self._W'], {'axis': '(0)'}), '(self._W, axis=0)\n', (4786, 4803), True, 'from theano import tensor as T\n'), ((5698, 5715), 'utils.convolution', 'convolution', (['W', 'H'], {}), '(W, H)\n', (5709, 5715), False, 'from utils import convolution, shift\n'), ((5938, 5958), 'theano.tensor.zeros', 'T.zeros', (['V_hat.shape'], {}), '(V_hat.shape)\n', (5945, 5958), True, 'from theano import tensor as T\n'), ((6088, 6109), 'theano.tensor.mul', 'T.mul', (['(V_k / V_hat)', 'X'], {}), '(V_k / V_hat, X)\n', (6093, 6109), True, 'from theano import tensor as T\n'), ((6174, 6186), 'theano.dot', 'th.dot', (['W', 'H'], {}), '(W, H)\n', (6180, 6186), True, 'import theano as th\n'), ((7555, 7569), 'numpy.dot', 'np.dot', (['W', 'H_k'], {}), '(W, H_k)\n', (7561, 7569), True, 'import numpy as np\n'), ((10127, 10136), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (10133, 10136), True, 'import numpy as np\n'), ((10995, 11006), 'numpy.sum', 'np.sum', (['SAR'], {}), '(SAR)\n', (11001, 11006), True, 'import numpy as np\n'), ((11024, 11035), 'numpy.sum', 'np.sum', (['SDR'], {}), '(SDR)\n', (11030, 11035), True, 'import numpy as np\n'), ((11053, 11064), 'numpy.sum', 'np.sum', (['SIR'], {}), '(SIR)\n', (11059, 11064), True, 'import numpy as np\n'), ((4843, 4859), 'theano.tensor.pow', 'T.pow', (['scale', '(-1)'], {}), '(scale, -1)\n', (4848, 4859), True, 'from theano import tensor as T\n'), ((6405, 6416), 'theano.tensor.dot', 'T.dot', (['W', 'H'], {}), '(W, H)\n', (6410, 6416), True, 'from theano import tensor as T\n'), ((2277, 2324), 'numpy.random.rand', 'np.random.rand', (['self._sources', 'self._time_steps'], {}), '(self._sources, self._time_steps)\n', (2291, 2324), True, 'import numpy as np\n'), ((2325, 2367), 'numpy.ones', 'np.ones', (['(self._sources, self._time_steps)'], {}), '((self._sources, self._time_steps))\n', (2332, 2367), True, 'import numpy as np\n'), ((4933, 4949), 'theano.tensor.pow', 'T.pow', (['scale', '(-1)'], {}), '(scale, -1)\n', (4938, 4949), True, 'from theano import tensor as T\n'), ((6056, 6067), 'utils.shift', 'shift', (['H', 't'], {}), '(H, t)\n', (6061, 6067), False, 'from utils import convolution, shift\n'), ((1811, 1868), 'numpy.random.rand', 'np.random.rand', (['self._frequencies', 'self._sources', 'self._T'], {}), '(self._frequencies, self._sources, self._T)\n', (1825, 1868), True, 'import numpy as np\n'), ((1869, 1921), 'numpy.ones', 'np.ones', (['(self._frequencies, self._sources, self._T)'], {}), '((self._frequencies, self._sources, self._T))\n', (1876, 1921), True, 'import numpy as np\n'), ((2089, 2137), 'numpy.random.rand', 'np.random.rand', (['self._frequencies', 'self._sources'], {}), '(self._frequencies, self._sources)\n', (2103, 2137), True, 'import numpy as np\n'), ((2138, 2181), 'numpy.ones', 'np.ones', (['(self._frequencies, self._sources)'], {}), '((self._frequencies, self._sources))\n', (2145, 2181), True, 'import numpy as np\n'), ((5104, 5115), 'time.time', 'time.time', ([], {}), '()\n', (5113, 5115), False, 'import time\n')] |
################################################################################
# manage.py
# Runs the main operations for the server
# To make database migrations:
# - python manage.py makemigrations
# - python manage.py migrate
#
# To run server:
# - python manage.py runserver
################################################################################
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prototype.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"os.environ.setdefault",
"django.core.management.execute_from_command_line"
] | [((439, 508), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""prototype.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'prototype.settings')\n", (460, 508), False, 'import os\n'), ((580, 615), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['sys.argv'], {}), '(sys.argv)\n', (605, 615), False, 'from django.core.management import execute_from_command_line\n')] |
from tvm.topi.utils import get_stages_and_cfgs
from tvm import te
def schedule_depth_conv_fused_nhwc_auto(cfg, outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
stage_dict, layer_output_dict, param_dict, _, bn_relu, _ = get_stages_and_cfgs(s, outs)
# ######## Input data, weights, BN, etc
s[stage_dict['FusedConv2D_PaddedInput_0']].compute_inline()
PaddedSharedInput = s.cache_read(stage_dict['FusedConv2D_PaddedInput_0'], 'shared', [stage_dict['Output_0']])
FL_1 = s.cache_read(param_dict['Filter_0'], 'local', [stage_dict['Output_0']])
FS_2 = s.cache_read(param_dict['Filter_1'], 'shared', [stage_dict['Output_1']])
s[layer_output_dict['Layer_0']].set_scope('shared')
if bn_relu[0]:
s[stage_dict['Output_0_BiasAdd']].compute_inline()
s[stage_dict['Output_0']].set_scope('local')
BiasL_1 = s.cache_read(param_dict['Bias_0'], 'local', [stage_dict['Output_0_BiasAdd']])
DepthwiseLocalAccumulator = stage_dict['Output_0']
else:
DepthwiseLocalAccumulator = s.cache_write(layer_output_dict['Layer_0'], 'local')
if bn_relu[1]:
s[stage_dict['Output_1_BiasAdd']].compute_inline()
s[stage_dict['Output_1']].set_scope('local')
BiasL_2 = s.cache_read(param_dict['Bias_1'], 'local', [stage_dict['Output_1_BiasAdd']])
OL = stage_dict['Output_1']
else:
OL = s.cache_write(layer_output_dict['Layer_1'], 'local')
######## Blocks, threads and vthreads
block_x = te.thread_axis('blockIdx.x')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
vthread_x = te.thread_axis('vthread', name='vthread_x')
vthread_y = te.thread_axis('vthread', name='vthread_y')
vthread_z = te.thread_axis('vthread', name='vthread_z')
################################################################
######## Global output
n, h, w, c = s[layer_output_dict['Layer_1']].op.axis
ho, thz, thy, h = cfg['split_h'].apply(s, layer_output_dict['Layer_1'], h)
wo, vthy, w = cfg['split_w'].apply(s, layer_output_dict['Layer_1'], w)
recompute, reuse, thx = cfg['split_1_c'].apply(s, layer_output_dict['Layer_1'], c) # reuse > 1 ??
s[layer_output_dict['Layer_1']].reorder(n, ho, wo, recompute, reuse, vthy, thz, thy, thx, h, w)
fused_blx = s[layer_output_dict['Layer_1']].fuse(n, ho, wo, recompute)
s[layer_output_dict['Layer_1']].bind(fused_blx, block_x)
s[layer_output_dict['Layer_1']].bind(vthy, vthread_y)
s[layer_output_dict['Layer_1']].bind(reuse, vthread_x)
s[layer_output_dict['Layer_1']].bind(thz, thread_z)
s[layer_output_dict['Layer_1']].bind(thy, thread_y)
s[layer_output_dict['Layer_1']].bind(thx, thread_x)
num_thread_z = output_step_tile_size_h = cfg['split_h'].size[1]
num_thread_y = output_step_tile_size_w = cfg['split_h'].size[2]
num_thread_x = cfg['split_1_c'].size[-1]
output_tile_size_h = cfg['split_h'].size[1] * cfg['split_h'].size[2] * cfg['split_h'].size[3]
output_tile_size_w = cfg['split_w'].size[1] * cfg['split_w'].size[2]
######## Local output
s[OL].compute_at(s[layer_output_dict['Layer_1']], thx)
n, h, w, c = s[OL].op.axis
rc, _, _ = s[OL].op.reduce_axis
xocc, xoicc, xiicc = cfg['split_0_c'].apply(s, OL, rc)
s[OL].reorder(n, xocc, xoicc, h, w, c, xiicc)
if bn_relu[1]:
s[BiasL_2].compute_at(s[layer_output_dict['Layer_1']], thx)
######## Shared 1by1 filter
s[FS_2].compute_at(s[OL], xoicc)
h1, w1, i1, o1 = s[FS_2].op.axis
io = s[FS_2].fuse(i1, o1)
io, iox = s[FS_2].split(io, factor=num_thread_x * 4)
ioz, io = s[FS_2].split(io, nparts=num_thread_z)
ioy, io = s[FS_2].split(io, nparts=num_thread_y)
iox, io4 = s[FS_2].split(iox, factor=4)
s[FS_2].reorder(h1, w1, io, ioz, ioy, iox, io4)
s[FS_2].bind(iox, thread_x)
s[FS_2].bind(ioy, thread_y)
s[FS_2].bind(ioz, thread_z)
s[FS_2].vectorize(io4)
######## Intermediate output in shared memory
s[layer_output_dict['Layer_0']].compute_at(s[OL], xocc)
n, h, w, c = s[layer_output_dict['Layer_0']].op.axis
inter_co, inter_ci = s[layer_output_dict['Layer_0']].split(c, factor=num_thread_x)
ho, wo, h_tile, w_tile = s[layer_output_dict['Layer_0']].tile(h, w, x_factor=output_tile_size_h, y_factor=output_tile_size_w)
h_step, w_step, h_step_tile, w_step_tile = s[layer_output_dict['Layer_0']].tile(h_tile, w_tile, x_factor=output_step_tile_size_h, y_factor=output_step_tile_size_w)
s[layer_output_dict['Layer_0']].reorder(n, ho, wo, inter_co, h_step, w_step, h_step_tile, w_step_tile, inter_ci)
vthz = s[layer_output_dict['Layer_0']].fuse(h_step, w_step)
s[layer_output_dict['Layer_0']].bind(h_step_tile, thread_z)
s[layer_output_dict['Layer_0']].bind(w_step_tile, thread_y)
s[layer_output_dict['Layer_0']].bind(inter_ci, thread_x)
s[layer_output_dict['Layer_0']].bind(vthz, vthread_z)
######## Intermediate output local accumulator
s[DepthwiseLocalAccumulator].compute_at(s[layer_output_dict['Layer_0']], inter_ci)
ry, rx = s[DepthwiseLocalAccumulator].op.reduce_axis
n, h, w, c = s[DepthwiseLocalAccumulator].op.axis
s[DepthwiseLocalAccumulator].reorder(n, c, ry, rx, h, w)
if bn_relu[0]:
s[BiasL_1].compute_at(s[layer_output_dict['Layer_0']], inter_ci)
######## Depthwise filter
s[FL_1].compute_at(s[layer_output_dict['Layer_0']], inter_co)
# h, w, i, o = s[FL_1].op.axis
# io = s[FL_1].fuse(i, o)
# s[FL_1].bind(io, thread_x)
######## Shared Input
s[PaddedSharedInput].compute_at(s[layer_output_dict['Layer_0']], inter_co)
n, h, w, c = s[PaddedSharedInput].op.axis
co, ci = s[PaddedSharedInput].split(c, factor=num_thread_x)
ho, wo, h_tile, w_tile = s[PaddedSharedInput].tile(h, w, x_factor=output_step_tile_size_h, y_factor=output_step_tile_size_w)
s[PaddedSharedInput].reorder(co, n, ho, wo, h_tile, w_tile, ci)
s[PaddedSharedInput].bind(h_tile, thread_z)
s[PaddedSharedInput].bind(w_tile, thread_y)
s[PaddedSharedInput].bind(ci, thread_x)
return s
| [
"tvm.topi.utils.get_stages_and_cfgs",
"tvm.te.thread_axis",
"tvm.te.create_schedule"
] | [((193, 233), 'tvm.te.create_schedule', 'te.create_schedule', (['[x.op for x in outs]'], {}), '([x.op for x in outs])\n', (211, 233), False, 'from tvm import te\n'), ((297, 325), 'tvm.topi.utils.get_stages_and_cfgs', 'get_stages_and_cfgs', (['s', 'outs'], {}), '(s, outs)\n', (316, 325), False, 'from tvm.topi.utils import get_stages_and_cfgs\n'), ((1555, 1583), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (1569, 1583), False, 'from tvm import te\n'), ((1599, 1628), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (1613, 1628), False, 'from tvm import te\n'), ((1644, 1673), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (1658, 1673), False, 'from tvm import te\n'), ((1689, 1718), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (1703, 1718), False, 'from tvm import te\n'), ((1735, 1778), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {'name': '"""vthread_x"""'}), "('vthread', name='vthread_x')\n", (1749, 1778), False, 'from tvm import te\n'), ((1795, 1838), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {'name': '"""vthread_y"""'}), "('vthread', name='vthread_y')\n", (1809, 1838), False, 'from tvm import te\n'), ((1855, 1898), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {'name': '"""vthread_z"""'}), "('vthread', name='vthread_z')\n", (1869, 1898), False, 'from tvm import te\n')] |
from tests import TEST_ROLE, client
def test_role_base():
response = client.get(f'/role/{TEST_ROLE}')
assert response.status_code == 200
assert response.text.startswith("<html>\n <head>\n")
assert response.text.count(TEST_ROLE) == 17
| [
"tests.client.get"
] | [((75, 107), 'tests.client.get', 'client.get', (['f"""/role/{TEST_ROLE}"""'], {}), "(f'/role/{TEST_ROLE}')\n", (85, 107), False, 'from tests import TEST_ROLE, client\n')] |
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django_nose.tools import (
assert_code,
assert_contains,
assert_equal,
assert_not_contains,
)
from pontoon.base.models import (
Entity,
Locale,
Project,
Resource,
TranslatedResource,
Translation,
)
from pontoon.base.tests import (
EntityFactory,
LocaleFactory,
ProjectFactory,
ResourceFactory,
TestCase,
UserFactory,
)
class SuperuserTestCase(TestCase):
"""TestCase for tests that require a superuser to be logged in. """
def setUp(self):
self.user = UserFactory.create(is_superuser=True)
self.client.force_login(self.user)
class AdministrationViewsTests(TestCase):
"""Test views of the administration app without having a logged in superuser.
"""
def test_manage_project_strings(self):
project = ProjectFactory.create(data_source='database', repositories=[])
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
# Test with anonymous user.
response = self.client.get(url)
assert_code(response, 403)
# Test with a user that is not a superuser.
user = UserFactory.create()
self.client.force_login(user)
response = self.client.get(url)
assert_code(response, 403)
# Test with a superuser.
user.is_superuser = True
user.save()
response = self.client.get(url)
assert_code(response, 200)
class AdministrationViewsWithSuperuserTests(SuperuserTestCase):
"""Test views of the administration app with a superuser logged in by default.
"""
def test_manage_project(self):
url = reverse('pontoon.admin.project.new')
response = self.client.get(url)
assert_code(response, 200)
def test_manage_project_strings_bad_request(self):
# Tets an unknown project returns a 404 error.
url = reverse('pontoon.admin.project.strings', args=('unknown',))
response = self.client.get(url)
assert_code(response, 404)
def test_manage_project_strings_new(self):
project = ProjectFactory.create(data_source='database', repositories=[])
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
# Test sending a well-formatted batch of strings.
new_strings = """Hey, I just met you
And this is crazy
But here's my number
So call me maybe?
"""
response = self.client.post(url, {'new_strings': new_strings})
assert_code(response, 200)
# Verify a resource has been created.
resources = list(Resource.objects.filter(project=project))
assert_equal(len(resources), 1)
assert_equal(resources[0].path, 'database')
# Verify all strings have been created as entities.
entities = list(Entity.objects.filter(resource__project=project))
assert_equal(len(entities), 4)
expected_strings = [
'Hey, I just met you',
'And this is crazy',
'But here\'s my number',
'So call me maybe?',
]
assert_equal(sorted(expected_strings), sorted(x.string for x in entities))
# Verify new strings appear on the page.
assert_contains(response, 'Hey, I just met you')
def test_manage_project_strings_translated_resource(self):
"""Test that adding new strings to a project enables translation of that
project on all enabled locales.
"""
locale_kl = LocaleFactory.create(code='kl', name='Klingon')
locale_gs = LocaleFactory.create(code='gs', name='Geonosian')
project = ProjectFactory.create(
data_source='database',
locales=[locale_kl, locale_gs],
repositories=[]
)
locales_count = 2
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
new_strings = """
Morty, do you know what "Wubba lubba dub dub" means?
Oh that's just Rick's stupid non-sense catch phrase.
It's not.
In my people's tongue, it means "I am in great pain, please help me".
"""
strings_count = 4
response = self.client.post(url, {'new_strings': new_strings})
assert_code(response, 200)
# Verify no strings have been created as entities.
entities = list(Entity.objects.filter(resource__project=project))
assert_equal(len(entities), strings_count)
# Verify the resource has the right stats.
resources = Resource.objects.filter(project=project)
assert_equal(len(resources), 1)
resource = resources[0]
assert_equal(resource.total_strings, strings_count)
# Verify the correct TranslatedResource objects have been created.
translated_resources = TranslatedResource.objects.filter(resource__project=project)
assert_equal(len(translated_resources), locales_count)
# Verify stats have been correctly updated on locale, project and resource.
for tr in translated_resources:
assert_equal(tr.total_strings, strings_count)
project = Project.objects.get(id=project.id)
assert_equal(project.total_strings, strings_count * locales_count)
locale_kl = Locale.objects.get(id=locale_kl.id)
assert_equal(locale_kl.total_strings, strings_count)
locale_gs = Locale.objects.get(id=locale_gs.id)
assert_equal(locale_gs.total_strings, strings_count)
def test_manage_project_strings_new_all_empty(self):
"""Test that sending empty data doesn't create empty strings in the database.
"""
project = ProjectFactory.create(data_source='database', repositories=[])
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
# Test sending a well-formatted batch of strings.
new_strings = " \n \n\n"
response = self.client.post(url, {'new_strings': new_strings})
assert_code(response, 200)
# Verify no strings have been created as entities.
entities = list(Entity.objects.filter(resource__project=project))
assert_equal(len(entities), 0)
def test_manage_project_strings_list(self):
project = ProjectFactory.create(data_source='database', repositories=[])
resource = ResourceFactory.create(project=project)
nb_entities = 2
entities = EntityFactory.create_batch(nb_entities, resource=resource)
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
response = self.client.get(url)
assert_code(response, 200)
for i in range(nb_entities):
assert_contains(response, 'string %s' % i)
# Test editing strings and comments.
form_data = {
'form-TOTAL_FORMS': nb_entities,
'form-INITIAL_FORMS': nb_entities,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
'form-0-id': entities[0].id,
'form-0-string': 'changed 0',
'form-0-comment': 'Wubba lubba dub dub',
'form-1-id': entities[1].id,
'form-1-string': 'string 1',
'form-1-obsolete': 'on', # Remove this one.
}
response = self.client.post(url, form_data)
assert_code(response, 200)
assert_contains(response, 'changed 0')
assert_contains(response, 'Wubba lubba dub dub')
assert_not_contains(response, 'string 0')
assert_not_contains(response, 'string 1') # It's been removed.
def test_manage_project_strings_download_csv(self):
locale_kl = LocaleFactory.create(code='kl', name='Klingon')
locale_gs = LocaleFactory.create(code='gs', name='Geonosian')
project = ProjectFactory.create(
data_source='database',
locales=[locale_kl, locale_gs],
repositories=[]
)
url = reverse('pontoon.admin.project.strings', args=(project.slug,))
new_strings = """
And on the pedestal these words appear:
'My name is Ozymandias, king of kings:
Look on my works, ye Mighty, and despair!'
"""
response = self.client.post(url, {'new_strings': new_strings})
assert_code(response, 200)
# Test downloading the data.
response = self.client.get(url, {'format': 'csv'})
assert_code(response, 200)
assert_equal(response._headers['content-type'], ('Content-Type', 'text/csv'))
# Verify the original content is here.
assert_contains(response, 'pedestal')
assert_contains(response, 'Ozymandias')
assert_contains(response, 'Mighty')
# Verify we have the locale columns.
assert_contains(response, 'kl')
assert_contains(response, 'gs')
# Now add some translations.
entity = Entity.objects.filter(string='And on the pedestal these words appear:')[0]
Translation(
string='Et sur le piédestal il y a ces mots :',
entity=entity,
locale=locale_kl,
approved=True,
).save()
Translation(
string='Und auf dem Sockel steht die Schrift: ‚Mein Name',
entity=entity,
locale=locale_gs,
approved=True,
).save()
entity = Entity.objects.filter(string='\'My name is Ozymandias, king of kings:')[0]
Translation(
string='"Mon nom est Ozymandias, Roi des Rois.',
entity=entity,
locale=locale_kl,
approved=True,
).save()
Translation(
string='Ist Osymandias, aller Kön’ge König: –',
entity=entity,
locale=locale_gs,
approved=True,
).save()
entity = Entity.objects.filter(string='Look on my works, ye Mighty, and despair!\'')[0]
Translation(
string='Voyez mon œuvre, vous puissants, et désespérez !"',
entity=entity,
locale=locale_kl,
approved=True,
).save()
Translation(
string='Seht meine Werke, Mächt’ge, und erbebt!‘',
entity=entity,
locale=locale_gs,
approved=True,
).save()
response = self.client.get(url, {'format': 'csv'})
# Verify the translated content is here.
assert_contains(response, 'pedestal')
assert_contains(response, 'piédestal')
assert_contains(response, 'Sockel')
assert_contains(response, 'Mighty')
assert_contains(response, 'puissants')
assert_contains(response, 'Mächt’ge')
| [
"pontoon.base.tests.ResourceFactory.create",
"pontoon.base.models.Translation",
"django_nose.tools.assert_code",
"pontoon.base.tests.EntityFactory.create_batch",
"pontoon.base.models.Project.objects.get",
"pontoon.base.tests.UserFactory.create",
"pontoon.base.tests.ProjectFactory.create",
"pontoon.base.models.Locale.objects.get",
"django_nose.tools.assert_not_contains",
"django.core.urlresolvers.reverse",
"pontoon.base.tests.LocaleFactory.create",
"pontoon.base.models.Entity.objects.filter",
"pontoon.base.models.Resource.objects.filter",
"django_nose.tools.assert_contains",
"django_nose.tools.assert_equal",
"pontoon.base.models.TranslatedResource.objects.filter"
] | [((610, 647), 'pontoon.base.tests.UserFactory.create', 'UserFactory.create', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (628, 647), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((887, 949), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'repositories': '[]'}), "(data_source='database', repositories=[])\n", (908, 949), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((964, 1026), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (971, 1026), False, 'from django.core.urlresolvers import reverse\n'), ((1112, 1138), 'django_nose.tools.assert_code', 'assert_code', (['response', '(403)'], {}), '(response, 403)\n', (1123, 1138), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((1207, 1227), 'pontoon.base.tests.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (1225, 1227), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((1315, 1341), 'django_nose.tools.assert_code', 'assert_code', (['response', '(403)'], {}), '(response, 403)\n', (1326, 1341), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((1478, 1504), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (1489, 1504), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((1712, 1748), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.new"""'], {}), "('pontoon.admin.project.new')\n", (1719, 1748), False, 'from django.core.urlresolvers import reverse\n'), ((1797, 1823), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (1808, 1823), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((1949, 2008), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': "('unknown',)"}), "('pontoon.admin.project.strings', args=('unknown',))\n", (1956, 2008), False, 'from django.core.urlresolvers import reverse\n'), ((2057, 2083), 'django_nose.tools.assert_code', 'assert_code', (['response', '(404)'], {}), '(response, 404)\n', (2068, 2083), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((2150, 2212), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'repositories': '[]'}), "(data_source='database', repositories=[])\n", (2171, 2212), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((2227, 2289), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (2234, 2289), False, 'from django.core.urlresolvers import reverse\n'), ((2578, 2604), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (2589, 2604), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((2768, 2811), 'django_nose.tools.assert_equal', 'assert_equal', (['resources[0].path', '"""database"""'], {}), "(resources[0].path, 'database')\n", (2780, 2811), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((3306, 3354), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Hey, I just met you"""'], {}), "(response, 'Hey, I just met you')\n", (3321, 3354), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((3572, 3619), 'pontoon.base.tests.LocaleFactory.create', 'LocaleFactory.create', ([], {'code': '"""kl"""', 'name': '"""Klingon"""'}), "(code='kl', name='Klingon')\n", (3592, 3619), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((3640, 3689), 'pontoon.base.tests.LocaleFactory.create', 'LocaleFactory.create', ([], {'code': '"""gs"""', 'name': '"""Geonosian"""'}), "(code='gs', name='Geonosian')\n", (3660, 3689), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((3708, 3807), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'locales': '[locale_kl, locale_gs]', 'repositories': '[]'}), "(data_source='database', locales=[locale_kl, locale_gs\n ], repositories=[])\n", (3729, 3807), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((3890, 3952), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (3897, 3952), False, 'from django.core.urlresolvers import reverse\n'), ((4331, 4357), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (4342, 4357), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((4615, 4655), 'pontoon.base.models.Resource.objects.filter', 'Resource.objects.filter', ([], {'project': 'project'}), '(project=project)\n', (4638, 4655), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((4736, 4787), 'django_nose.tools.assert_equal', 'assert_equal', (['resource.total_strings', 'strings_count'], {}), '(resource.total_strings, strings_count)\n', (4748, 4787), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((4895, 4955), 'pontoon.base.models.TranslatedResource.objects.filter', 'TranslatedResource.objects.filter', ([], {'resource__project': 'project'}), '(resource__project=project)\n', (4928, 4955), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((5221, 5255), 'pontoon.base.models.Project.objects.get', 'Project.objects.get', ([], {'id': 'project.id'}), '(id=project.id)\n', (5240, 5255), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((5264, 5330), 'django_nose.tools.assert_equal', 'assert_equal', (['project.total_strings', '(strings_count * locales_count)'], {}), '(project.total_strings, strings_count * locales_count)\n', (5276, 5330), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((5352, 5387), 'pontoon.base.models.Locale.objects.get', 'Locale.objects.get', ([], {'id': 'locale_kl.id'}), '(id=locale_kl.id)\n', (5370, 5387), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((5396, 5448), 'django_nose.tools.assert_equal', 'assert_equal', (['locale_kl.total_strings', 'strings_count'], {}), '(locale_kl.total_strings, strings_count)\n', (5408, 5448), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((5470, 5505), 'pontoon.base.models.Locale.objects.get', 'Locale.objects.get', ([], {'id': 'locale_gs.id'}), '(id=locale_gs.id)\n', (5488, 5505), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((5514, 5566), 'django_nose.tools.assert_equal', 'assert_equal', (['locale_gs.total_strings', 'strings_count'], {}), '(locale_gs.total_strings, strings_count)\n', (5526, 5566), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((5741, 5803), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'repositories': '[]'}), "(data_source='database', repositories=[])\n", (5762, 5803), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((5818, 5880), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (5825, 5880), False, 'from django.core.urlresolvers import reverse\n'), ((6055, 6081), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (6066, 6081), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((6322, 6384), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'repositories': '[]'}), "(data_source='database', repositories=[])\n", (6343, 6384), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((6404, 6443), 'pontoon.base.tests.ResourceFactory.create', 'ResourceFactory.create', ([], {'project': 'project'}), '(project=project)\n', (6426, 6443), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((6487, 6545), 'pontoon.base.tests.EntityFactory.create_batch', 'EntityFactory.create_batch', (['nb_entities'], {'resource': 'resource'}), '(nb_entities, resource=resource)\n', (6513, 6545), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((6561, 6623), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (6568, 6623), False, 'from django.core.urlresolvers import reverse\n'), ((6673, 6699), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (6684, 6699), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7375, 7401), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (7386, 7401), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7410, 7448), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""changed 0"""'], {}), "(response, 'changed 0')\n", (7425, 7448), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7457, 7505), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Wubba lubba dub dub"""'], {}), "(response, 'Wubba lubba dub dub')\n", (7472, 7505), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7514, 7555), 'django_nose.tools.assert_not_contains', 'assert_not_contains', (['response', '"""string 0"""'], {}), "(response, 'string 0')\n", (7533, 7555), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7564, 7605), 'django_nose.tools.assert_not_contains', 'assert_not_contains', (['response', '"""string 1"""'], {}), "(response, 'string 1')\n", (7583, 7605), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((7705, 7752), 'pontoon.base.tests.LocaleFactory.create', 'LocaleFactory.create', ([], {'code': '"""kl"""', 'name': '"""Klingon"""'}), "(code='kl', name='Klingon')\n", (7725, 7752), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((7773, 7822), 'pontoon.base.tests.LocaleFactory.create', 'LocaleFactory.create', ([], {'code': '"""gs"""', 'name': '"""Geonosian"""'}), "(code='gs', name='Geonosian')\n", (7793, 7822), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((7841, 7940), 'pontoon.base.tests.ProjectFactory.create', 'ProjectFactory.create', ([], {'data_source': '"""database"""', 'locales': '[locale_kl, locale_gs]', 'repositories': '[]'}), "(data_source='database', locales=[locale_kl, locale_gs\n ], repositories=[])\n", (7862, 7940), False, 'from pontoon.base.tests import EntityFactory, LocaleFactory, ProjectFactory, ResourceFactory, TestCase, UserFactory\n'), ((7997, 8059), 'django.core.urlresolvers.reverse', 'reverse', (['"""pontoon.admin.project.strings"""'], {'args': '(project.slug,)'}), "('pontoon.admin.project.strings', args=(project.slug,))\n", (8004, 8059), False, 'from django.core.urlresolvers import reverse\n'), ((8337, 8363), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (8348, 8363), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8469, 8495), 'django_nose.tools.assert_code', 'assert_code', (['response', '(200)'], {}), '(response, 200)\n', (8480, 8495), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8504, 8581), 'django_nose.tools.assert_equal', 'assert_equal', (["response._headers['content-type']", "('Content-Type', 'text/csv')"], {}), "(response._headers['content-type'], ('Content-Type', 'text/csv'))\n", (8516, 8581), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8638, 8675), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""pedestal"""'], {}), "(response, 'pedestal')\n", (8653, 8675), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8684, 8723), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Ozymandias"""'], {}), "(response, 'Ozymandias')\n", (8699, 8723), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8732, 8767), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Mighty"""'], {}), "(response, 'Mighty')\n", (8747, 8767), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8822, 8853), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""kl"""'], {}), "(response, 'kl')\n", (8837, 8853), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8862, 8893), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""gs"""'], {}), "(response, 'gs')\n", (8877, 8893), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10451, 10488), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""pedestal"""'], {}), "(response, 'pedestal')\n", (10466, 10488), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10497, 10535), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""piédestal"""'], {}), "(response, 'piédestal')\n", (10512, 10535), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10544, 10579), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Sockel"""'], {}), "(response, 'Sockel')\n", (10559, 10579), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10589, 10624), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Mighty"""'], {}), "(response, 'Mighty')\n", (10604, 10624), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10633, 10671), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""puissants"""'], {}), "(response, 'puissants')\n", (10648, 10671), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((10680, 10717), 'django_nose.tools.assert_contains', 'assert_contains', (['response', '"""Mächt’ge"""'], {}), "(response, 'Mächt’ge')\n", (10695, 10717), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((2677, 2717), 'pontoon.base.models.Resource.objects.filter', 'Resource.objects.filter', ([], {'project': 'project'}), '(project=project)\n', (2700, 2717), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((2897, 2945), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'resource__project': 'project'}), '(resource__project=project)\n', (2918, 2945), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((4442, 4490), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'resource__project': 'project'}), '(resource__project=project)\n', (4463, 4490), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((5156, 5201), 'django_nose.tools.assert_equal', 'assert_equal', (['tr.total_strings', 'strings_count'], {}), '(tr.total_strings, strings_count)\n', (5168, 5201), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((6166, 6214), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'resource__project': 'project'}), '(resource__project=project)\n', (6187, 6214), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((6749, 6791), 'django_nose.tools.assert_contains', 'assert_contains', (['response', "('string %s' % i)"], {}), "(response, 'string %s' % i)\n", (6764, 6791), False, 'from django_nose.tools import assert_code, assert_contains, assert_equal, assert_not_contains\n'), ((8949, 9020), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'string': '"""And on the pedestal these words appear:"""'}), "(string='And on the pedestal these words appear:')\n", (8970, 9020), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9417, 9487), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'string': '"""\'My name is Ozymandias, king of kings:"""'}), '(string="\'My name is Ozymandias, king of kings:")\n', (9438, 9487), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9875, 9949), 'pontoon.base.models.Entity.objects.filter', 'Entity.objects.filter', ([], {'string': '"""Look on my works, ye Mighty, and despair!\'"""'}), '(string="Look on my works, ye Mighty, and despair!\'")\n', (9896, 9949), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9032, 9143), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '"""Et sur le piédestal il y a ces mots :"""', 'entity': 'entity', 'locale': 'locale_kl', 'approved': '(True)'}), "(string='Et sur le piédestal il y a ces mots :', entity=entity,\n locale=locale_kl, approved=True)\n", (9043, 9143), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9214, 9336), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '"""Und auf dem Sockel steht die Schrift: ‚Mein Name"""', 'entity': 'entity', 'locale': 'locale_gs', 'approved': '(True)'}), "(string='Und auf dem Sockel steht die Schrift: ‚Mein Name',\n entity=entity, locale=locale_gs, approved=True)\n", (9225, 9336), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9500, 9612), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '""""Mon nom est Ozymandias, Roi des Rois."""', 'entity': 'entity', 'locale': 'locale_kl', 'approved': '(True)'}), '(string=\'"Mon nom est Ozymandias, Roi des Rois.\', entity=entity,\n locale=locale_kl, approved=True)\n', (9511, 9612), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9683, 9794), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '"""Ist Osymandias, aller Kön’ge König: –"""', 'entity': 'entity', 'locale': 'locale_gs', 'approved': '(True)'}), "(string='Ist Osymandias, aller Kön’ge König: –', entity=entity,\n locale=locale_gs, approved=True)\n", (9694, 9794), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((9962, 10085), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '"""Voyez mon œuvre, vous puissants, et désespérez !\\""""', 'entity': 'entity', 'locale': 'locale_kl', 'approved': '(True)'}), '(string=\'Voyez mon œuvre, vous puissants, et désespérez !"\',\n entity=entity, locale=locale_kl, approved=True)\n', (9973, 10085), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n'), ((10156, 10271), 'pontoon.base.models.Translation', 'Translation', ([], {'string': '"""Seht meine Werke, Mächt’ge, und erbebt!‘"""', 'entity': 'entity', 'locale': 'locale_gs', 'approved': '(True)'}), "(string='Seht meine Werke, Mächt’ge, und erbebt!‘', entity=\n entity, locale=locale_gs, approved=True)\n", (10167, 10271), False, 'from pontoon.base.models import Entity, Locale, Project, Resource, TranslatedResource, Translation\n')] |
import numpy as np
import matplotlib.pyplot as plt
def drawGraph( xLabel, yLabel, printFigure, saveFigure, saveLink, *args ):
plt.plot(*args)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.show()
return
| [
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
] | [((133, 148), 'matplotlib.pyplot.plot', 'plt.plot', (['*args'], {}), '(*args)\n', (141, 148), True, 'import matplotlib.pyplot as plt\n'), ((153, 171), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xLabel'], {}), '(xLabel)\n', (163, 171), True, 'import matplotlib.pyplot as plt\n'), ((176, 194), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yLabel'], {}), '(yLabel)\n', (186, 194), True, 'import matplotlib.pyplot as plt\n'), ((199, 209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (207, 209), True, 'import matplotlib.pyplot as plt\n')] |
"""
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
For questions please contact:
<NAME> (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
from Sim_ATAV.common.controller_communication_interface import ControllerCommunicationInterface
from Sim_ATAV.simulation_control.item_description import ItemDescription
from Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.detection_performance_monitor \
import DetectionPerformanceMonitor
from Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.visibility_monitor import VisibilityMonitor
class CommunicationModule(object):
def __init__(self, controller=None):
self.controller = controller
def receive_and_update(self, cur_time_ms):
if (self.controller is not None and
self.controller.receiver is not None and
self.controller.controller_comm_interface is not None):
messages = self.controller.controller_comm_interface.receive_all_communication(self.controller.receiver)
command_list = self.controller.controller_comm_interface.extract_all_commands_from_message(messages)
if self.controller.ground_truth_detector is not None:
self.controller.ground_truth_detector.update_detections(command_list, cur_time_ms)
for command_item in command_list:
command = command_item[0]
if command == ControllerCommunicationInterface.SET_CONTROLLER_PARAMETERS_MESSAGE:
parameter = command_item[1]
if parameter.get_vehicle_id() == self.controller.self_vhc_id:
if parameter.get_parameter_name() == 'target_position':
parameter_data = parameter.get_parameter_data()
if self.controller.path_planner is not None:
self.controller.path_planner.add_waypoint(parameter_data)
elif command == ControllerCommunicationInterface.SET_DETECTION_MONITOR:
detection_eval_config = command_item[1]
if detection_eval_config.vehicle_id == self.controller.self_vhc_id:
if self.controller.detection_perf_monitor is None:
self.controller.detection_perf_monitor = \
DetectionPerformanceMonitor(ground_truth_detector=self.controller.ground_truth_detector)
self.controller.detection_perf_monitor.set_perception_system(
self.controller.perception_system)
self.controller.detection_perf_monitor.add_monitor(detection_eval_config)
elif command == ControllerCommunicationInterface.SET_VISIBILITY_MONITOR:
visibility_eval_config = command_item[1]
if visibility_eval_config.vehicle_id == self.controller.self_vhc_id:
if self.controller.visibility_monitor is None:
self.controller.visibility_monitor = \
VisibilityMonitor(ground_truth_detector=self.controller.ground_truth_detector)
self.controller.visibility_monitor.add_monitor(visibility_eval_config)
if self.controller.path_planner is not None:
self.controller.path_planner.apply_path_changes()
# print(self.controller.path_planner.path_following_tools.target_path)
# print(self.controller.path_planner.path_following_tools.path_details)
def transmit_control_data(self, control_throttle, control_steering):
if self.controller.emitter is not None:
message = self.controller.controller_comm_interface.generate_control_action_message(
self.controller.self_vhc_id,
ItemDescription.VEHICLE_CONTROL_THROTTLE,
control_throttle)
self.controller.emitter.send(message)
message = self.controller.controller_comm_interface.generate_control_action_message(
self.controller.self_vhc_id,
ItemDescription.VEHICLE_CONTROL_STEERING,
control_steering)
self.controller.emitter.send(message)
def transmit_detection_evaluation_data(self):
if self.controller.emitter is not None and self.controller.detection_perf_monitor is not None:
detection_evaluations = self.controller.detection_perf_monitor.get_evaluations()
for (idx, detection_evaluation) in enumerate(detection_evaluations):
message = self.controller.controller_comm_interface.generate_detection_evaluation_message(
idx=idx,value=detection_evaluation)
self.controller.emitter.send(message)
def transmit_visibility_evaluation_data(self):
if self.controller.emitter is not None and self.controller.visibility_monitor is not None:
visibility_evaluations = self.controller.visibility_monitor.get_evaluations()
for (idx, visibility_evaluation) in enumerate(visibility_evaluations):
message = self.controller.controller_comm_interface.generate_visibility_evaluation_message(
idx=idx,value=visibility_evaluation)
self.controller.emitter.send(message)
| [
"Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.visibility_monitor.VisibilityMonitor",
"Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.detection_performance_monitor.DetectionPerformanceMonitor"
] | [((2607, 2700), 'Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.detection_performance_monitor.DetectionPerformanceMonitor', 'DetectionPerformanceMonitor', ([], {'ground_truth_detector': 'self.controller.ground_truth_detector'}), '(ground_truth_detector=self.controller.\n ground_truth_detector)\n', (2634, 2700), False, 'from Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.detection_performance_monitor import DetectionPerformanceMonitor\n'), ((3360, 3438), 'Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.visibility_monitor.VisibilityMonitor', 'VisibilityMonitor', ([], {'ground_truth_detector': 'self.controller.ground_truth_detector'}), '(ground_truth_detector=self.controller.ground_truth_detector)\n', (3377, 3438), False, 'from Sim_ATAV.vehicle_control.controller_commons.perf_evaluation.visibility_monitor import VisibilityMonitor\n')] |
#!/bin/python
from contest.data.variables.description import read
if __name__ == '__main__':
description = read()
print(description)
| [
"contest.data.variables.description.read"
] | [((113, 119), 'contest.data.variables.description.read', 'read', ([], {}), '()\n', (117, 119), False, 'from contest.data.variables.description import read\n')] |
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import neptune
__all__ = [
'log_audio',
]
def log_audio(path_to_file, audio_name=None, experiment=None):
"""Logs audio file to 'artifacts/audio' with player.
Logs audio file to the 'artifacts/audio' in the experiment, where you can play it directly from the browser.
You can also download raw audio file to the local machine.
Just use "three vertical dots" located to the right from the player.
Args:
path_to_file (:obj:`str`): Path to audio file.
audio_name (:obj:`str`, optional, default is ``None``): Name to be displayed in artifacts/audio.
| If `None`, file name is used.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| For advanced users only. Pass Neptune
`Experiment <https://docs.neptune.ai/neptune-client/docs/experiment.html#neptune.experiments.Experiment>`_
object if you want to control to which experiment data is logged.
| If ``None``, log to currently active, and most recent experiment.
Example:
.. code:: python3
log_audio('audio-file.wav')
log_audio('/full/path/to/some/other/audio/file.mp3')
log_audio('/full/path/to/some/other/audio/file.mp3', 'my_audio')
Note:
Check out how the logged audio file looks in Neptune:
`here <https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1485/artifacts?path=audio%2F>`_.
"""
import base64
from io import StringIO
_exp = experiment if experiment else neptune
name, file_ext = os.path.split(path_to_file)[1].split('.')
if audio_name is None:
audio_name = name
else:
assert isinstance(audio_name, str), 'audio_name must be string, got {}'.format(type(audio_name))
encoded_sound = base64.b64encode(open(path_to_file, 'rb').read())
html = """<!DOCTYPE html>
<html>
<body>
<audio controls>
<source src='data:audio/{};base64,{}'>
</audio>
</body>
</html>""".format(file_ext, encoded_sound.decode())
buffer = StringIO(html)
buffer.seek(0)
_exp.log_artifact(buffer, 'audio/{}.html'.format(audio_name))
| [
"io.StringIO",
"os.path.split"
] | [((2713, 2727), 'io.StringIO', 'StringIO', (['html'], {}), '(html)\n', (2721, 2727), False, 'from io import StringIO\n'), ((2188, 2215), 'os.path.split', 'os.path.split', (['path_to_file'], {}), '(path_to_file)\n', (2201, 2215), False, 'import os\n')] |
from collections import namedtuple
from class_cluster import ClusterStructure, NoneType
FrameOfBlobs = namedtuple('FrameOfBlobs', 'I, Dy, Dx, G, M, blob_, dert__')
class CBlob(ClusterStructure):
# Dert params
I = int
Dy = int
Dx = int
G = int
M = int
# blob params
S = int
sign = NoneType
box = list
mask = object
root_dert__ = object
adj_blobs = list
fopen = bool
class CDeepBlob(ClusterStructure):
# Dert params
I = int
Dy = int
Dx = int
G = int
M = int
Dyy = int
Dyx = int
Dxy = int
Dxx = int
# blob params
S = int
sign = NoneType
box = list
mask = object
root_dert__ = object
adj_blobs = list
fopen = bool
fia = bool # flag: input is from comp angle
fca = bool # flag: current fork is comp angle
rdn = float
rng = int
Ls = int # for visibility and next-fork rdn
sub_layers = list | [
"collections.namedtuple"
] | [((104, 164), 'collections.namedtuple', 'namedtuple', (['"""FrameOfBlobs"""', '"""I, Dy, Dx, G, M, blob_, dert__"""'], {}), "('FrameOfBlobs', 'I, Dy, Dx, G, M, blob_, dert__')\n", (114, 164), False, 'from collections import namedtuple\n')] |
from psycopg2._json import Json
from psycopg2.extensions import register_adapter
# Enable dict-to-json conversion
register_adapter(dict, Json)
| [
"psycopg2.extensions.register_adapter"
] | [((115, 143), 'psycopg2.extensions.register_adapter', 'register_adapter', (['dict', 'Json'], {}), '(dict, Json)\n', (131, 143), False, 'from psycopg2.extensions import register_adapter\n')] |
"""
Simulator session base class for bonsai3 library
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
import abc
from typing import Optional
import jsons
from .exceptions import RetryTimeoutError, ServiceError
from .logger import Logger
from .simulator_protocol import (
Schema,
ServiceConfig,
SimulatorEvent,
SimulatorEventRequest,
SimulatorEventType,
SimulatorInterface,
)
from .simulator_client import SimulatorClient
log = Logger()
class SimulatorSession:
_config = ServiceConfig() # type: ServiceConfig
_last_event = None # type: Optional[SimulatorEvent]
def __init__(self, config: ServiceConfig):
self._last_event = None
self._config = config
self._client = SimulatorClient(config)
# interface and state
def get_state(self) -> Schema:
"""Called to retreive the current state of the simulator. """
raise NotImplementedError("get_state not implemented.")
def get_interface(self) -> SimulatorInterface:
"""Called to retreive the simulator interface during registration. """
raise NotImplementedError("get_interface not implemented.")
def get_simulator_context(self) -> str:
"""
Called to retrieve the simulator context field for the SimulatorInterface.
"""
return self._config.simulator_context or ""
def halted(self) -> bool:
"""
Should return weather the episode is halted, and
no further action will result in a state.
"""
raise NotImplementedError("halted not implemented.")
# callbacks
def registered(self):
"""Called after simulator is successfully registered. """
log.info("Registered.")
pass
@abc.abstractmethod
def episode_start(self, config: Schema) -> None:
"""Called at the start of each episode. """
raise NotImplementedError("episode_start not implemented.")
@abc.abstractmethod
def episode_step(self, action: Schema) -> None:
"""Called for each step of the episode. """
raise NotImplementedError("episode_step not implemented.")
def episode_finish(self, reason: str) -> None:
"""Called at the end of an episode. """
pass
# TODO
# def playback_start(self, config: dict):
# def playback_step(self, action: dict, stateDescription: dict, action: dict):
# def playback_finish(self):
def idle(self, callbackTime: int):
"""Called when the simulator should idle and perform no action. """
log.info("Idling...")
pass
def unregistered(self, reason: str):
"""Called when the simulator has been unregistered and should exit. """
log.info("Unregistered.")
pass
# main entrypoint for driving the simulation
def run(self) -> bool:
"""
Runs simulator. Returns false when the simulator should exit.
Example usage:
...
mySim = MySimulator(config)
while mySim.run():
continue
...
returns True if the simulator should continue.
returns False if the simulator should exit its simulation loop.
"""
# Flag to attempt to unregister sim on errors in SDK
unregister = False
try:
keep_going = False
if self._last_event:
log.info("Last Event: {}".format(self._last_event.type))
# if we've never gotten an event, register
if self._last_event is None:
self._last_event = self._client.register_simulator(self.get_interface())
# ...use the last event to request the next event
else:
event_request = SimulatorEventRequest()
event_request.sessionId = self._last_event.sessionId
event_request.sequenceId = self._last_event.sequenceId
event_request.halted = self.halted()
event_request.state = self.get_state()
log.debug("Event Request: {}".format(event_request.__dict__))
self._last_event = self.get_next_event(event_request)
# if we have an event, dispatch it
if self._last_event:
keep_going = self._dispatch_event(self._last_event)
# clear the last event if we should not keep going
if keep_going is False:
self._last_event = None
return keep_going
except KeyboardInterrupt:
unregister = True
except ServiceError as err:
unregister = True
log.error(err)
except RetryTimeoutError as err:
unregister = True
log.error(err)
except Exception as err:
unregister = True
log.error("Exiting due to the following error: {}".format(err))
raise err
finally:
if self._last_event is not None and unregister:
try:
log.debug("Attempting to unregister simulator.")
self._client.unregister_simulator(self._last_event.sessionId)
log.debug("Successfully unregistered simulator.")
except Exception as err:
log.error("Unregister simulator failed with error: {}".format(err))
return False
# implementation
def _event_from_json(self, json_text: str) -> SimulatorEvent:
"""Converts a json string into a SimulatorEvent."""
event_dict = jsons.loads(json_text)
log.debug("Event Response: {}".format(event_dict))
return SimulatorEvent(event_dict)
def get_next_event(self, event_request: SimulatorEventRequest) -> SimulatorEvent:
"""Requests the next event in the simulation.
Parameters
----------
event_request: SimulatorEventRequest
Returns
-------
SimulatorEvent
"""
return self._client.get_next_event(event_request)
def _dispatch_event(self, event: SimulatorEvent) -> bool:
"""
Examines the SimulatorEvent and calls one of the
dispatch functions for the appropriate event.
return false if there are no more events.
"""
if event.type == SimulatorEventType.Registered.name:
self.registered()
elif event.type == SimulatorEventType.EpisodeStart.name and event.episodeStart:
self.episode_start(event.episodeStart.config)
elif event.type == SimulatorEventType.EpisodeStep.name and event.episodeStep:
self.episode_step(event.episodeStep.action)
elif (
event.type == SimulatorEventType.EpisodeFinish.name and event.episodeFinish
):
self.episode_finish(event.episodeFinish.reason)
elif event.type == SimulatorEventType.Idle.name and event.idle:
try:
self.idle(event.idle.callbackTime)
except AttributeError:
# callbacktime is always 0. Sometimes the attribute is missing.
# Idle for 0 seconds if attribute is missing.
self.idle(0)
elif event.type == SimulatorEventType.Unregister.name and event.unregister:
self.unregistered(event.unregister.reason)
return False
return True
| [
"jsons.loads"
] | [((5559, 5581), 'jsons.loads', 'jsons.loads', (['json_text'], {}), '(json_text)\n', (5570, 5581), False, 'import jsons\n')] |
"""
This module contains tests for official test vectors.
Test vectors: https://github.com/paseto-standard/test-vectors
Docs: https://github.com/paseto-standard/paseto-spec
"""
import os
from typing import List
from unittest.mock import MagicMock, patch
import pytest
from paseto.paserk.keys import _create_asymmetric_key, _create_symmetric_key
from paseto.protocol import version4
from tests.conftest import get_test_vector
from tests.util import (
transform_test_case_for_v4_local,
transform_test_case_for_v4_public,
)
def get_test_cases(name: str) -> List[dict]:
"""Return test cases filtered by name."""
return [
test_case
for test_case in get_test_vector("v4")["tests"]
if test_case["name"].startswith(name)
]
# use a test nonce for reproducible tests
@patch.object(os, "urandom")
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
"test_name,nonce,raw_key_material,test_token,payload,footer,implicit_assertion",
[
transform_test_case_for_v4_local(test_case)
for test_case in get_test_cases("4-E")
],
)
def test_v4_local(
mock: MagicMock,
test_name: str,
nonce: bytes,
raw_key_material: bytes,
test_token: bytes,
payload: bytes,
footer: bytes,
implicit_assertion: bytes,
) -> None:
"""Tests for v4.local (Shared-Key Encryption)."""
# use non random nonce for reproducible tests
mock.return_value = nonce
key = _create_symmetric_key(4, raw_key_material)
# verify that encrypt produces expected token
assert test_token == version4.encrypt(
payload, key, footer, implicit_assertion
), test_name
# verify that decrypt produces expected payload
assert payload == version4.decrypt(
test_token, key, footer, implicit_assertion
), test_name
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
"test_name,raw_public_key,raw_secret_key,test_token,payload,footer,implicit_assertion",
[
transform_test_case_for_v4_public(test_case)
for test_case in get_test_cases("4-S")
],
)
def test_v4_public(
test_name: str,
raw_public_key: bytes,
raw_secret_key: bytes,
test_token: bytes,
payload: bytes,
footer: bytes,
implicit_assertion: bytes,
) -> None:
"""Tests for v4.public"""
public_key, secret_key = _create_asymmetric_key(4, raw_public_key, raw_secret_key)
# verify that sign produces expected token
assert test_token == version4.sign(
payload, secret_key, footer, implicit_assertion
), test_name
# verify that token contains expected payload
assert payload == version4.verify(
test_token, public_key, footer, implicit_assertion
), test_name
| [
"paseto.protocol.version4.verify",
"paseto.protocol.version4.sign",
"paseto.protocol.version4.decrypt",
"tests.util.transform_test_case_for_v4_local",
"tests.util.transform_test_case_for_v4_public",
"paseto.protocol.version4.encrypt",
"tests.conftest.get_test_vector",
"paseto.paserk.keys._create_asymmetric_key",
"unittest.mock.patch.object",
"paseto.paserk.keys._create_symmetric_key"
] | [((810, 837), 'unittest.mock.patch.object', 'patch.object', (['os', '"""urandom"""'], {}), "(os, 'urandom')\n", (822, 837), False, 'from unittest.mock import MagicMock, patch\n'), ((1456, 1498), 'paseto.paserk.keys._create_symmetric_key', '_create_symmetric_key', (['(4)', 'raw_key_material'], {}), '(4, raw_key_material)\n', (1477, 1498), False, 'from paseto.paserk.keys import _create_asymmetric_key, _create_symmetric_key\n'), ((2351, 2408), 'paseto.paserk.keys._create_asymmetric_key', '_create_asymmetric_key', (['(4)', 'raw_public_key', 'raw_secret_key'], {}), '(4, raw_public_key, raw_secret_key)\n', (2373, 2408), False, 'from paseto.paserk.keys import _create_asymmetric_key, _create_symmetric_key\n'), ((1575, 1633), 'paseto.protocol.version4.encrypt', 'version4.encrypt', (['payload', 'key', 'footer', 'implicit_assertion'], {}), '(payload, key, footer, implicit_assertion)\n', (1591, 1633), False, 'from paseto.protocol import version4\n'), ((1734, 1795), 'paseto.protocol.version4.decrypt', 'version4.decrypt', (['test_token', 'key', 'footer', 'implicit_assertion'], {}), '(test_token, key, footer, implicit_assertion)\n', (1750, 1795), False, 'from paseto.protocol import version4\n'), ((1000, 1043), 'tests.util.transform_test_case_for_v4_local', 'transform_test_case_for_v4_local', (['test_case'], {}), '(test_case)\n', (1032, 1043), False, 'from tests.util import transform_test_case_for_v4_local, transform_test_case_for_v4_public\n'), ((2482, 2544), 'paseto.protocol.version4.sign', 'version4.sign', (['payload', 'secret_key', 'footer', 'implicit_assertion'], {}), '(payload, secret_key, footer, implicit_assertion)\n', (2495, 2544), False, 'from paseto.protocol import version4\n'), ((2643, 2710), 'paseto.protocol.version4.verify', 'version4.verify', (['test_token', 'public_key', 'footer', 'implicit_assertion'], {}), '(test_token, public_key, footer, implicit_assertion)\n', (2658, 2710), False, 'from paseto.protocol import version4\n'), ((1992, 2036), 'tests.util.transform_test_case_for_v4_public', 'transform_test_case_for_v4_public', (['test_case'], {}), '(test_case)\n', (2025, 2036), False, 'from tests.util import transform_test_case_for_v4_local, transform_test_case_for_v4_public\n'), ((682, 703), 'tests.conftest.get_test_vector', 'get_test_vector', (['"""v4"""'], {}), "('v4')\n", (697, 703), False, 'from tests.conftest import get_test_vector\n')] |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import admin
urlpatterns = patterns("",
url(r"^$", TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r"^bkadmin/$", staff_member_required(
TemplateView.as_view(template_name="badgekit_admin.html")), name="badgekit-admin"),
url(r"^admin/", include(admin.site.urls)),
url(r"^account/", include("account.urls")),
url(r"^bk/", include("badgekit_webhooks.urls")),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.views.generic.TemplateView.as_view",
"django.conf.urls.include",
"django.conf.urls.static.static"
] | [((696, 757), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (702, 757), False, 'from django.conf.urls.static import static\n'), ((325, 376), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""homepage.html"""'}), "(template_name='homepage.html')\n", (345, 376), False, 'from django.views.generic import TemplateView\n'), ((550, 574), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (557, 574), False, 'from django.conf.urls import patterns, include, url\n'), ((599, 622), 'django.conf.urls.include', 'include', (['"""account.urls"""'], {}), "('account.urls')\n", (606, 622), False, 'from django.conf.urls import patterns, include, url\n'), ((642, 675), 'django.conf.urls.include', 'include', (['"""badgekit_webhooks.urls"""'], {}), "('badgekit_webhooks.urls')\n", (649, 675), False, 'from django.conf.urls import patterns, include, url\n'), ((446, 503), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""badgekit_admin.html"""'}), "(template_name='badgekit_admin.html')\n", (466, 503), False, 'from django.views.generic import TemplateView\n')] |
import event_calendar as ec
class DES_simulator:
''' Discrete event system simulator.
'''
def __init__(self):
''' Construct an event system simulator.
'''
# setup
self.time = 0
self.calendar = ec.event_calendar()
def add_event(self, t, f, data):
''' Add event to calendar.
Args:
t (float): fire time.
f (function): callback function.
data: custom callback data.
'''
self.calendar.push(t, f, data)
def simulate(self, model, T=24 * 3600):
''' Simulate discret event system.
Args:
model (:obj:DES_model): discrete event system model.
T (float): time horizon.
'''
# discrete event simulator
model.clear()
model.starting_events(self)
while (not self.calendar.is_empty()) and (self.time <= T):
self.time, f, data = self.calendar.pop() # get next event
f(self, data) # callback function
| [
"event_calendar.event_calendar"
] | [((249, 268), 'event_calendar.event_calendar', 'ec.event_calendar', ([], {}), '()\n', (266, 268), True, 'import event_calendar as ec\n')] |
"""
nba_odds_analysis.py
Author: <NAME>
Date Created: 1-29-2017
Description: Module that imports create_nba_game_dict from
nba_boxscore_sqlite.py and loops through
each game dict to display actual results/totals vs. the original
betting lines
"""
from nba_games import create_nba_game_dict
IDBase = '40090013'
IDList = []
for i in range(5):
IDList.append(IDBase + str(i))
nba_game_dict = create_nba_game_dict(IDList)
for key in nba_game_dict:
print(key)
print('Away Team: ' + nba_game_dict[key]['away_team'])
print('\t' + 'Points: ' + str(nba_game_dict[key]['away_box_score']['TEAM']['pts']))
print('Home Team: ' + nba_game_dict[key]['home_team'])
print('\t' + 'Points: ' + str(nba_game_dict[key]['home_box_score']['TEAM']['pts']))
print('Betting Line: ' + str(nba_game_dict[key]['odds_details']['line']))
print('Over/Under Line: ' + str(nba_game_dict[key]['odds_details']['over/under']))
print('\t' + 'Actual total: ' + str(nba_game_dict[key]['away_box_score']['TEAM']['pts'] + nba_game_dict[key]['home_box_score']['TEAM']['pts']))
print('\n')
| [
"nba_games.create_nba_game_dict"
] | [((459, 487), 'nba_games.create_nba_game_dict', 'create_nba_game_dict', (['IDList'], {}), '(IDList)\n', (479, 487), False, 'from nba_games import create_nba_game_dict\n')] |
"""Main module."""
import numpy as np
import pycorrel.utils
def iman_conover(sample, copula, sampling_axis=0, variables_axis=1):
"""Correlates a given sample using the Iman-Conover method.
The Iman-Conover methods enables to reorder a sample
to incorporate the dependency structure from a given copula.
Parameters
----------
sample : ndarray
The sample to correlate.
copula : pycorrel.copulas.Copula
The copula used to correlate the sample.
sampling_axis : int, optional
The axis along which to reorder the sample, by default 0.
variables_axis : int, optional
The axis along which variables span, by default 1.
Returns
-------
ndarray
The reordered sample.
"""
if copula.dimension != sample.shape[variables_axis]:
raise ValueError("Dimension mismatch between sample and copula.")
copula_sample = copula.draw(sample.shape[:variables_axis] +
sample.shape[variables_axis + 1:])
copula_sample = np.moveaxis(copula_sample, 0, variables_axis)
copula_ranks = pycorrel.utils.rankdata(sample, sampling_axis)
return pycorrel.utils.reorder(sample, copula_ranks, sampling_axis)
| [
"numpy.moveaxis"
] | [((1027, 1072), 'numpy.moveaxis', 'np.moveaxis', (['copula_sample', '(0)', 'variables_axis'], {}), '(copula_sample, 0, variables_axis)\n', (1038, 1072), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
def save_reconimgs_as_grid(input_imgs,output_imgs,path):
'''
:param input_imgs: orginal input images np.array (N,H,B,C) N: number of images
:param output_imgs:
:param epoch:
:param prefix:
:return:
'''
input_imgs = np.asarray(input_imgs)
if input_imgs.shape[0] == 1:
input_imgs = np.squeeze(input_imgs, axis=0)
input_imgs = input_imgs[:10, :]
input_imgs = input_imgs * 255
input_imgs = input_imgs.astype(np.uint8)
input_imgs1, input_imgs2 = np.split(input_imgs, 2, axis=0)
output_imgs = output_imgs[:10, :]
output_imgs = output_imgs * 255
output_imgs = output_imgs.astype(np.uint8)
output_imgs1, output_img2 = np.split(output_imgs, 2, axis=0)
grid = np.concatenate((input_imgs1, output_imgs1, input_imgs2, output_img2))
grid = python_image_grid(grid, [4, 5])
grid = np.squeeze(grid)
im = Image.fromarray(grid)
im.save(path)
def python_image_grid(input_array, grid_shape):
"""This is a pure python version of tfgan.eval.image_grid.
Args:
input_array: ndarray. Minibatch of images to format. A 4D numpy array
([batch size, height, width, num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
Returns:
Numpy array representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match.
ValueError: The input array isn't 4D.
"""
if grid_shape[0] * grid_shape[1] != int(input_array.shape[0]):
raise ValueError("Grid shape %s incompatible with minibatch size %i." %
(grid_shape, int(input_array.shape[0])))
if len(input_array.shape) != 4:
raise ValueError("Unrecognized input array format.")
image_shape = input_array.shape[1:3]
num_channels = input_array.shape[3]
height, width = (
grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1])
input_array = np.reshape(
input_array, tuple(grid_shape) + tuple(image_shape) + (num_channels,))
input_array = np.transpose(input_array, [0, 1, 3, 2, 4])
input_array = np.reshape(
input_array, [grid_shape[0], width, image_shape[0], num_channels])
input_array = np.transpose(input_array, [0, 2, 1, 3])
input_array = np.reshape(input_array, [height, width, num_channels])
return input_array | [
"PIL.Image.fromarray",
"numpy.reshape",
"numpy.asarray",
"numpy.squeeze",
"numpy.split",
"numpy.concatenate",
"numpy.transpose"
] | [((290, 312), 'numpy.asarray', 'np.asarray', (['input_imgs'], {}), '(input_imgs)\n', (300, 312), True, 'import numpy as np\n'), ((544, 575), 'numpy.split', 'np.split', (['input_imgs', '(2)'], {'axis': '(0)'}), '(input_imgs, 2, axis=0)\n', (552, 575), True, 'import numpy as np\n'), ((730, 762), 'numpy.split', 'np.split', (['output_imgs', '(2)'], {'axis': '(0)'}), '(output_imgs, 2, axis=0)\n', (738, 762), True, 'import numpy as np\n'), ((775, 844), 'numpy.concatenate', 'np.concatenate', (['(input_imgs1, output_imgs1, input_imgs2, output_img2)'], {}), '((input_imgs1, output_imgs1, input_imgs2, output_img2))\n', (789, 844), True, 'import numpy as np\n'), ((900, 916), 'numpy.squeeze', 'np.squeeze', (['grid'], {}), '(grid)\n', (910, 916), True, 'import numpy as np\n'), ((927, 948), 'PIL.Image.fromarray', 'Image.fromarray', (['grid'], {}), '(grid)\n', (942, 948), False, 'from PIL import Image\n'), ((2202, 2244), 'numpy.transpose', 'np.transpose', (['input_array', '[0, 1, 3, 2, 4]'], {}), '(input_array, [0, 1, 3, 2, 4])\n', (2214, 2244), True, 'import numpy as np\n'), ((2263, 2340), 'numpy.reshape', 'np.reshape', (['input_array', '[grid_shape[0], width, image_shape[0], num_channels]'], {}), '(input_array, [grid_shape[0], width, image_shape[0], num_channels])\n', (2273, 2340), True, 'import numpy as np\n'), ((2368, 2407), 'numpy.transpose', 'np.transpose', (['input_array', '[0, 2, 1, 3]'], {}), '(input_array, [0, 2, 1, 3])\n', (2380, 2407), True, 'import numpy as np\n'), ((2426, 2480), 'numpy.reshape', 'np.reshape', (['input_array', '[height, width, num_channels]'], {}), '(input_array, [height, width, num_channels])\n', (2436, 2480), True, 'import numpy as np\n'), ((367, 397), 'numpy.squeeze', 'np.squeeze', (['input_imgs'], {'axis': '(0)'}), '(input_imgs, axis=0)\n', (377, 397), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: set fenc=utf-8 ai ts=4 sw=4 sts=4 et:
"""
RoutineProfiles a bsnes assembly trace.
"""
import re
import sys
import argparse
import os.path
SNES_SCANLINES = 262
SNES_HTIME = 1374
COMMON_INTERRUPT_NAMES = (
'VBLANK', 'VBlank', 'NmiHandler', 'NMI',
'IrqHandler', 'IRQ',
'CopHandler', 'COP',
'ResetHandler', 'RESET',
'BreakHandler', 'BREAK',
'EmptyHandler',
)
class LineReader:
"""
A simple line reader that supports peeking at the next line.
"""
def __init__(self, fp):
self.fp = fp
self.line = None
self.lineno = 0
self.peek = fp.readline()
def __iter__(self):
return self
def __next__(self):
line = self.peek
if line:
self.line = line
self.lineno += 1
self.peek = self.fp.readline()
return line
else:
raise StopIteration
class MemoryMap:
def __init__(self):
self.addresses = dict()
def load(self, filename):
regex = re.compile(r'^([A-Za-z0-9_\-]+)\s+([A-Za-z0-9]{6})\s+[A-Z]{1,3}(?:\s+([A-Za-z0-9_\-]+)\s+([A-Za-z0-9]{6})\s)')
with open(filename, 'r') as fp:
for line in fp:
m = regex.match(line)
if m:
addr = int(m.group(2), 16) & 0x7FFFFF
self.addresses[addr] = m.group(1)
if m.group(3):
addr = int(m.group(4), 16) & 0x7FFFFF
self.addresses[addr] = m.group(3)
def nameAddressIfUnlabled(self, addr, name):
addr = addr & 0x7FFFFF
if addr not in self.addresses:
self.addresses[addr] = name
def nameForAddress(self, addr):
maddr = addr & 0x7FFFFF
if maddr in self.addresses:
return self.addresses[maddr]
else:
return None
def addressForName(self, name):
for a, n in self.addresses.items():
if n == name:
return a
return -1
def nameOrAddress(self, addr):
maddr = addr & 0x7FFFFF
if maddr in self.addresses:
return self.addresses[maddr]
else:
return '%06x' % addr
class RoutineProfile:
def __init__(self, addr, caller, callAddress):
self.address = addr
self.caller = caller
self.callAddress = callAddress
self.count = 0
self.time = 0
self.totaltime = 0
self._calls = dict() # dict of tuple (callAddress, routineAddress) -> RoutineProfile
def sorted_calls(self):
return sorted(self._calls.values(), key=lambda x: x.totaltime, reverse=True)
def calls(self):
return self._calls.values()
def get_or_make_call(self, callAddress, routineAddr):
test = (callAddress, routineAddr)
if test in self._calls:
p = self._calls[test]
else:
p = RoutineProfile(routineAddr, self, callAddress)
self._calls[test] = p
return p
def add_call(self, call):
self._calls[(call.callAddress, call.address)] = call
class RoutineTotal:
def __init__(self, addr):
self.address = addr
self.count = 0
self.time = 0
class Profile:
def __init__(self, memmap):
self.memmap = memmap
self.name = None
self.root = None
self.time = 0
self.waiTime = 0
self._interrupts = dict() # mapping (address) -> RoutineProfile
self.profiles = set()
def addInterrupts(self, *interrupts):
for i in interrupts:
try:
a = int(i, 16)
except:
a = self.memmap.addressForName(i)
if a >= 0:
rt = RoutineProfile(a, None, -1)
self._interrupts[a & 0x7FFFFF] = rt
self._interrupts[a | 0x800000] = rt
def readTrace(self, fp):
regex = re.compile(r'^([A-Za-z0-9]{6})\s+(\w+?)\s.+?V:\s*(\d+)\sH:\s*(\d+)')
addr_regex = re.compile(r'^([A-Za-z0-9]{6})\s')
profiles = set() # mapping address -> RoutineProfile
interruptStack = list()
reader = LineReader(fp)
m = regex.match(reader.peek)
if not m:
raise ValueError("Not in a trace format")
prevVTime = int(m.group(3))
prevHTime = int(m.group(4))
def time_since_last_line(m):
nonlocal prevVTime, prevHTime
vTime = int(m.group(3))
hTime = int(m.group(4))
if vTime < prevVTime:
vTime += SNES_SCANLINES
if prevVTime != vTime:
hTime += SNES_HTIME * (vTime - prevVTime)
time = hTime - prevHTime
prevVTime = int(m.group(3))
prevHTime = int(m.group(4))
return time
# First line may be VBlank, check first.
instAddr = int(m.group(1), 16)
if instAddr not in self._interrupts:
current = RoutineProfile(instAddr, None, -1)
profiles.add(current)
else:
# First line Interrupt;
current = None
# Mark first instruction as entry.
self.memmap.nameAddressIfUnlabled(instAddr, "__TRACE_START__")
for line in reader:
m = regex.match(line)
if m:
inst = m.group(2).lower()
instAddr = int(m.group(1), 16)
# Check to see if an interrupt occurred
if instAddr in self._interrupts:
interruptStack.append(current)
current = self._interrupts[instAddr]
current.count += 1
current.time += time_since_last_line(m)
if inst == 'jsr' or inst == 'jsl':
# Determine the location of the routine by address of next line
nm = addr_regex.match(reader.peek)
routineAddr = int(nm.group(1), 16)
p = current.get_or_make_call(instAddr, routineAddr)
p.count += 1
current = p
elif inst == 'rts' or inst == 'rtl':
# Return from previous instruction
if current.caller:
current = current.caller
else:
# Returning to a routine that hasn't been traced before.
# Determine the location of the routine by address of next line
nm = addr_regex.match(reader.peek)
routineAddr = int(nm.group(1), 16)
prev = current
current = RoutineProfile(routineAddr, None, -1)
prev.caller = current
current.add_call(prev)
profiles.discard(prev)
profiles.add(current)
elif inst == 'wai':
# Add time waiting for interrupt to current function, not the interrupt itself
nm = regex.match(reader.peek)
if nm:
t = time_since_last_line(nm)
current.time += t
self.waiTime += t
elif inst == 'rti':
# Return from interrupt
try:
current = interruptStack.pop()
except IndexError:
current = None
if current == None:
nm = addr_regex.match(reader.peek)
returnAddr = int(nm.group(1), 16)
current = RoutineProfile(returnAddr, None, -0xFF)
profiles.add(current)
# Only add interrupts that were called
for i in self._interrupts.values():
if i.count > 0:
self.profiles.add(i)
self.profiles.update(profiles)
self._calculate_total_time()
self._generate_routines_list()
def _calculate_total_time(self):
def recurse(proutine):
total = proutine.time
for p in proutine.calls():
total += recurse(p)
proutine.totaltime = total
return total
self.totaltime = 0
for p in self.profiles:
self.totaltime += recurse(p)
def _generate_routines_list(self):
routines = dict()
def recurse(proutine):
addr = proutine.address
if addr not in routines:
routines[addr] = RoutineTotal(addr)
routines[addr].count += proutine.count
routines[addr].time += proutine.time
for p in proutine.calls():
recurse(p)
for p in self.profiles:
recurse(p)
self.routines = sorted(routines.values(), key=lambda x: x.time, reverse=True)
def write_routine_times(fp, profile):
totaltime = profile.totaltime
memmap = profile.memmap
fp.write('<h2>Routines:</h2>')
fp.write('<table>')
fp.write('<thead><th colspan="2">Routine</th><th>Count</th><th>Cycles</th><th>Percentage</th></thead>')
fp.write('<tbody>')
for r in profile.routines:
fp.write("<tr>")
aname = memmap.nameForAddress(r.address)
if aname:
fp.write("<td><tt>%06x</tt></td><td>%s</td>" % (
r.address,
aname
))
elif r.address >= 0:
fp.write("<td><tt>%06x</tt></td><td> </td>" % (
r.address
))
else:
fp.write("<td><tt>NULL</tt></td><td> </td>")
fp.write("<td>%d</td><td>%d</td><td>%f%%</td></tr>" % (
r.count,
r.time,
r.time / totaltime * 100.0,
))
fp.write('</tbody>')
fp.write('</table>')
def write_profile_calls(fp, profile):
memmap = profile.memmap
def recurse(proutine, parent_totaltime):
fp.write("<li>")
aname = memmap.nameForAddress(proutine.address)
if aname:
fp.write("<tt>%06x</tt> %s" % (
proutine.address,
aname
))
elif proutine.address >= 0:
fp.write("<tt>%06x</tt>" % (
proutine.address
))
else:
fp.write("<tt>NULL</tt>")
fp.write(": Called at <tt>%06x</tt> %d times, %i cycles (%f%% parent, %f%% total)</li>" % (
proutine.callAddress,
proutine.count,
proutine.totaltime,
proutine.totaltime / parent_totaltime * 100.0,
proutine.totaltime / profile.totaltime * 100.0,
))
calls = proutine.sorted_calls()
if calls:
fp.write('<ul>')
for p in calls:
recurse(p, proutine.totaltime)
fp.write('</ul>')
for p in profile.profiles:
fp.write('<h2>%s:</h2>' % memmap.nameOrAddress(p.address))
fp.write('<ul>')
recurse(p, p.totaltime)
fp.write('</ul>')
def write_html(fp, profile):
fp.write("<html><title>%s tracelog profile</title><body>" % profile.name)
fp.write("<h1>%s</h1>" % profile.name)
p = profile.waiTime / profile.totaltime * 100
fp.write("<div>%d Cycles Traced, %d WAI cycles (%f%% WAI, %f%% non WAI).</div>" % (
profile.totaltime,
profile.waiTime,
p,
100 - p,
))
write_routine_times(fp, profile)
write_profile_calls(fp, profile)
fp.write('</body></html>')
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--memlog',
type=str,
help='Memlog file',
)
parser.add_argument('-i', '--interrupt', metavar='INTERRUPT',
dest='interrupts', type=str, action='append',
help="""Addresses / Names of interrupt. Can be used multiple times.
If memlog is supplied and this argument is not used, then common names
will be guessed from the memlog file."""
)
parser.add_argument('logfile',
type=str,
help='The SNES assembly trace file (`-` is stdin)',
)
parser.add_argument('htmlfile',
type=str,
help='The HTML output file (`-` is stdout)',
)
return parser.parse_args()
def main():
args = process_args()
memmap = MemoryMap()
profile = Profile(memmap)
if args.memlog:
memmap.load(args.memlog)
if args.interrupts:
profile.addInterrupts(*args.interrupts)
else:
profile.addInterrupts(*COMMON_INTERRUPT_NAMES)
if args.logfile == '-':
profile.name = 'SNES'
profile.readTrace(sys.stdin)
else:
with open(args.logfile, 'r') as fp:
profile.name = os.path.splitext(os.path.basename(args.logfile))[0]
profile.readTrace(fp)
if args.htmlfile == '-':
write_html(sys.stdout, profile)
else:
with open(args.htmlfile, 'w') as fp:
write_html(fp, profile)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"re.compile"
] | [((11747, 11772), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11770, 11772), False, 'import argparse\n'), ((1104, 1230), 're.compile', 're.compile', (['"""^([A-Za-z0-9_\\\\-]+)\\\\s+([A-Za-z0-9]{6})\\\\s+[A-Z]{1,3}(?:\\\\s+([A-Za-z0-9_\\\\-]+)\\\\s+([A-Za-z0-9]{6})\\\\s)"""'], {}), "(\n '^([A-Za-z0-9_\\\\-]+)\\\\s+([A-Za-z0-9]{6})\\\\s+[A-Z]{1,3}(?:\\\\s+([A-Za-z0-9_\\\\-]+)\\\\s+([A-Za-z0-9]{6})\\\\s)'\n )\n", (1114, 1230), False, 'import re\n'), ((4015, 4090), 're.compile', 're.compile', (['"""^([A-Za-z0-9]{6})\\\\s+(\\\\w+?)\\\\s.+?V:\\\\s*(\\\\d+)\\\\sH:\\\\s*(\\\\d+)"""'], {}), "('^([A-Za-z0-9]{6})\\\\s+(\\\\w+?)\\\\s.+?V:\\\\s*(\\\\d+)\\\\sH:\\\\s*(\\\\d+)')\n", (4025, 4090), False, 'import re\n'), ((4105, 4139), 're.compile', 're.compile', (['"""^([A-Za-z0-9]{6})\\\\s"""'], {}), "('^([A-Za-z0-9]{6})\\\\s')\n", (4115, 4139), False, 'import re\n')] |
import os
script_dir = os.path.dirname(__file__)
relative_path = "input.txt"
input_path = os.path.join(script_dir, relative_path)
GROUPS = list(open(input_path).read().split("\n\n"))
# PART 1
unique_sum = 0
for group in GROUPS:
group = group.replace("\n", "")
unique_chars = len(set(group))
unique_sum += unique_chars
print("Some of UNIQUE characters in a group - " + str(unique_sum))
# PART 2
common_sum = 0
for group in GROUPS:
individuals = group.split("\n")
sets = [set(x) for x in individuals]
common_chars = set.intersection(*sets)
common_sum += len(common_chars)
print("Some of COMMON characters in a group - " + str(common_sum)) | [
"os.path.dirname",
"os.path.join"
] | [((24, 49), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (39, 49), False, 'import os\n'), ((91, 130), 'os.path.join', 'os.path.join', (['script_dir', 'relative_path'], {}), '(script_dir, relative_path)\n', (103, 130), False, 'import os\n')] |
from .ResultComparer import ResultComparer, CompareResult
import logging
class ResultComparerSteadyState(ResultComparer):
def __init__(self):
from .TaskTypes import TaskTypes
ResultComparer.__init__(self, TaskTypes.steadystate)
def compare(self, expected, other, **kwargs):
result = CompareResult(self)
# compare status
if expected.status != other.status:
logging.debug('Status different: {0} != {1}'.format(expected.status, other.status))
result.fail_with(' Status different: {0} != {1}'.format(expected.status, other.status))
# species result: concentration / concentration rate / particle numbers / particle number rate
# expected_subset = self.get_subset(expected.data_frames[0], [0, 1, 2, 3])
# other_subset = self.get_subset(other.data_frames[0], [0, 1, 2, 3])
# species result: concentration
expected_subset = self.get_subset(expected.data_frames[0], [0])
other_subset = self.get_subset(other.data_frames[0], [0])
result.explicit_fail = result.explicit_fail or \
self.compare_df_unsorted(expected_subset,
other_subset,
desc=expected.data_descriptions[0]['desc'],
messages=result.messages, **kwargs)
# compare steady state fluxes
result.explicit_fail = result.explicit_fail or \
self.compare_df_unsorted(expected.data_frames[1],
other.data_frames[1],
desc=expected.data_descriptions[1]['desc'],
messages=result.messages, **kwargs)
if not result.explicit_fail:
logging.debug(' results matched')
# # compare full jacobian
# result.explicit_fail = result.explicit_fail or self.compare_df_unsorted(expected.data_frames[2],
# other.data_frames[2],
# desc="Full Jacobian",
# messages=result.messages, **kwargs)
# # compare full eigenvalues
# result.explicit_fail = result.explicit_fail or self.compare_df_unsorted(expected.data_frames[3],
# other.data_frames[3],
# desc="Full Eigenvalues",
# messages=result.messages, **kwargs)
return result
| [
"logging.debug"
] | [((1936, 1970), 'logging.debug', 'logging.debug', (['""" results matched"""'], {}), "(' results matched')\n", (1949, 1970), False, 'import logging\n')] |
import datetime
import math
import numpy as np
import pytest
from .country_report import CountryReport
from .formula import AtgFormula
def test_two_traces():
cumulative_active = np.array([0, 0, 1, 2, 5, 18, 45])
start_date = datetime.date(2020, 4, 17)
dates = [start_date + datetime.timedelta(days=d) for d in range(len(cumulative_active))]
report = CountryReport(
short_name="UK",
long_name="United Kingdom",
dates=dates,
daily_positive=None,
daily_dead=None,
daily_recovered=None,
daily_active=None,
cumulative_active=cumulative_active,
population=None,
)
display_until = datetime.date(2020, 4, 30)
f1 = AtgFormula(tg=2, a=47, exponent=1.5, min_case_count=2)
# The trace starts (t=0) at 2020-04-19. The maximum of this trace is at t = TG * exponent = 3.
max_t1, start_date1 = 3, datetime.date(2020, 4, 19)
length1 = math.ceil(2 * (1.5 + math.sqrt(1.5)))
f2 = AtgFormula(tg=3, a=12, exponent=2, min_case_count=1)
# The trace starts (t=0) at 2020-04-18. The maximum of this trace is at t = TG * exponent = 6.
max_t2, start_date2, length2 = 6, datetime.date(2020, 4, 18), math.ceil(3 * (2 + math.sqrt(2)))
trace_generator1 = f1.get_trace_generator(report)
trace1 = trace_generator1.generate_trace(display_until)
assert trace1.max_value == pytest.approx((47 / 2) * (max_t1 / 2) ** 1.5 * math.exp(-max_t1 / 2))
assert trace1.max_value_date == start_date1 + datetime.timedelta(days=max_t1)
assert trace1.xs[0] == start_date1
assert trace_generator1.display_at_least_until == start_date1 + datetime.timedelta(days=length1)
trace_generator2 = f2.get_trace_generator(report)
trace2 = trace_generator2.generate_trace(display_until)
assert trace2.max_value == pytest.approx((12 / 3) * (max_t2 / 3) ** 2 * math.exp(-max_t2 / 3))
assert trace2.max_value_date == start_date2 + datetime.timedelta(days=max_t2)
assert trace2.xs[0] == start_date2
assert trace_generator2.display_at_least_until == start_date2 + datetime.timedelta(days=length2)
| [
"math.sqrt",
"numpy.array",
"datetime.date",
"datetime.timedelta",
"math.exp"
] | [((186, 219), 'numpy.array', 'np.array', (['[0, 0, 1, 2, 5, 18, 45]'], {}), '([0, 0, 1, 2, 5, 18, 45])\n', (194, 219), True, 'import numpy as np\n'), ((237, 263), 'datetime.date', 'datetime.date', (['(2020)', '(4)', '(17)'], {}), '(2020, 4, 17)\n', (250, 263), False, 'import datetime\n'), ((674, 700), 'datetime.date', 'datetime.date', (['(2020)', '(4)', '(30)'], {}), '(2020, 4, 30)\n', (687, 700), False, 'import datetime\n'), ((894, 920), 'datetime.date', 'datetime.date', (['(2020)', '(4)', '(19)'], {}), '(2020, 4, 19)\n', (907, 920), False, 'import datetime\n'), ((1173, 1199), 'datetime.date', 'datetime.date', (['(2020)', '(4)', '(18)'], {}), '(2020, 4, 18)\n', (1186, 1199), False, 'import datetime\n'), ((290, 316), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd'}), '(days=d)\n', (308, 316), False, 'import datetime\n'), ((1501, 1532), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'max_t1'}), '(days=max_t1)\n', (1519, 1532), False, 'import datetime\n'), ((1640, 1672), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'length1'}), '(days=length1)\n', (1658, 1672), False, 'import datetime\n'), ((1937, 1968), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'max_t2'}), '(days=max_t2)\n', (1955, 1968), False, 'import datetime\n'), ((2076, 2108), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'length2'}), '(days=length2)\n', (2094, 2108), False, 'import datetime\n'), ((956, 970), 'math.sqrt', 'math.sqrt', (['(1.5)'], {}), '(1.5)\n', (965, 970), False, 'import math\n'), ((1428, 1449), 'math.exp', 'math.exp', (['(-max_t1 / 2)'], {}), '(-max_t1 / 2)\n', (1436, 1449), False, 'import math\n'), ((1864, 1885), 'math.exp', 'math.exp', (['(-max_t2 / 3)'], {}), '(-max_t2 / 3)\n', (1872, 1885), False, 'import math\n'), ((1220, 1232), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1229, 1232), False, 'import math\n')] |
'''
Created on 11-10-2012
@author: <NAME>
'''
import unittest
from unifac import atom
class AtomTest(unittest.TestCase):
def setUp(self):
self.atom = atom.Atom("C")
def test_create(self):
self.assertEqual(self.atom.symbol, "C")
def test_add_connected(self):
new_atom = atom.Atom("O")
self.atom.add_connected(new_atom)
self.assertEqual(self.atom.connected, [new_atom])
self.assertEqual(new_atom.connected, [self.atom])
def test_hydrogens(self):
self.assertEqual(self.atom.hydrogens, 4)
new_atom = atom.Atom("O")
self.atom.add_connected(new_atom)
self.assertEqual(self.atom.hydrogens, 3)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"unittest.main",
"unifac.atom.Atom"
] | [((796, 811), 'unittest.main', 'unittest.main', ([], {}), '()\n', (809, 811), False, 'import unittest\n'), ((166, 180), 'unifac.atom.Atom', 'atom.Atom', (['"""C"""'], {}), "('C')\n", (175, 180), False, 'from unifac import atom\n'), ((323, 337), 'unifac.atom.Atom', 'atom.Atom', (['"""O"""'], {}), "('O')\n", (332, 337), False, 'from unifac import atom\n'), ((608, 622), 'unifac.atom.Atom', 'atom.Atom', (['"""O"""'], {}), "('O')\n", (617, 622), False, 'from unifac import atom\n')] |
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+ Policia Nacional de Cabo Verde
Gestão de Visitas ao Hospital
"""
__author__ = '<NAME>'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Development"
__model_name__ = 'hospital.Hospital'
import auth, base_models
from orm import *
from form import *
from terceiro import Terceiro
class Hospital(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'hospital'
self.__title__ = 'Visitas ao Hospital'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
#self.__order_by__ = 'data_inicial'
self.__auth__ = {
'read':['All'],
'write':['Agente'],
'create':['Agente'],
'delete':['Chefe'],
'full_access':['Comandante']
}
#self.__get_options__ = ['nome']
self.ocorrencia = parent_field(view_order=1, name ='Ocorrencia', args='style:visibility="hidden"', model_name='ocorrencia.Ocorrencia', nolabel=True, onlist=False, column='numero')
self.data = date_field(view_order=2, name='Data', size=60)
self.hora = time_field(view_order=3, name='Hora', size=60)
self.terceiro = choice_field(view_order=4, name ='Terceiro', size=60, model='terceiro', column='nome', options="model.get_terceiros()")
self.tipo_ferimento = combo_field(view_order=5, name ='Tipo Ferimento', size=40, options=[('leve','Leve'), ('grave','Grave')])
self.resultado_ferimento = combo_field(view_order=6, name ='Resultado', size=40, options=[('tratamento','Tratamento'), ('Internamento','Internamento'), ('obito','Obito')])
self.descricao = text_field(view_order=7, name ='Descrição', args='rows=5', size=100, colspan=3)
def get_terceiros(self):
return Terceiro().get_options()
| [
"terceiro.Terceiro"
] | [((1858, 1868), 'terceiro.Terceiro', 'Terceiro', ([], {}), '()\n', (1866, 1868), False, 'from terceiro import Terceiro\n')] |
#!python
"""
Sparse matrix support is still limited in PyViennaCL. The construction
of spare matrices from host data is supported, as is sparse matrix-vector
multiplication and the use of iterative solvers (see iterative-solvers.py
in this directory). However, no other operations have yet been implemented,
and SciPy support is rudimentary.
Here, we demonstrate the construction of a CompressedMatrix instance, and the
calculation of a matrix-vector product.
Other sparse matrix formats are available, such as the CoordinateMatrix,
ELLMatrix and HybridMatrix. They are constructed and used identically to the
CompressedMatrix type.
"""
import pyviennacl as p
import random
# First, we create an empty 5 x 5 CompressedMatrix:
A = p.CompressedMatrix(5, 5)
# Let's set some random values of A.
#
# Importantly, setting individual elements of a PyViennaCL sparse matrix is
# not nearly as expensive as setting individual elements of a dense matrix or
# vector, since in the sparse matrix case, the elements are cached on the host
# and only transferred to the device when they are needed for some computation.
for i in range(6):
x = random.randrange(0, 4, 1)
y = random.randrange(0, 4, 1)
A[x, y] = random.random()
print("A is:\n%s" % A.value)
# Now, let's construct a simple vector of 5 elements.
b = p.Vector(5, 3.142)
print("b is %s" % b)
# Now, represent the product:
c = A * b
# And the result is only computed when we need to print it:
print("A * b = c is %s" % c)
| [
"pyviennacl.Vector",
"pyviennacl.CompressedMatrix",
"random.random",
"random.randrange"
] | [((735, 759), 'pyviennacl.CompressedMatrix', 'p.CompressedMatrix', (['(5)', '(5)'], {}), '(5, 5)\n', (753, 759), True, 'import pyviennacl as p\n'), ((1319, 1337), 'pyviennacl.Vector', 'p.Vector', (['(5)', '(3.142)'], {}), '(5, 3.142)\n', (1327, 1337), True, 'import pyviennacl as p\n'), ((1140, 1165), 'random.randrange', 'random.randrange', (['(0)', '(4)', '(1)'], {}), '(0, 4, 1)\n', (1156, 1165), False, 'import random\n'), ((1174, 1199), 'random.randrange', 'random.randrange', (['(0)', '(4)', '(1)'], {}), '(0, 4, 1)\n', (1190, 1199), False, 'import random\n'), ((1214, 1229), 'random.random', 'random.random', ([], {}), '()\n', (1227, 1229), False, 'import random\n')] |
# coding: utf-8
from __future__ import print_function
import os
from os.path import join as pjoin
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
PROJECT_NAME = 'isaac'
setup(
name=PROJECT_NAME,
version='1.0',
packages=find_packages(),
long_description='A machine learning library',
install_requires=[
"numpy",
]
)
| [
"setuptools.find_packages"
] | [((304, 319), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (317, 319), False, 'from setuptools import setup, find_packages\n')] |
# This program has been developed by students from the bachelor Computer Science at Utrecht University within the
# Software and Game project course
# ©Copyright Utrecht University Department of Information and Computing Sciences.
"""Handle the inactivity for a course."""
import datetime as dt
import json
import lib.ll_event_parsers
import assistants.learning_locker as ll_api
import assistants.moodle as moodle_api
from scheduler.models import InactivityNotificationSent
from lib.moodle_get_parsers import parse_enrolled_students, parse_course_info
from scheduler.utils import check_if_ended
def create_job(course_id, time_not_active, main_scheduler):
"""
Call all the methods that are needed to check the inactivity.
:param course_id: Id of the course the inactivity needs to be checked for.
:type course_id: int
:param time_not_active: Time interval the inactivity needs to be checked for.
:type time_not_active: int
"""
if(check_if_ended(course_id)):
main_scheduler.remove_inactivity_notification(course_id)
return
time = calculate_date(dt.date.today(), time_not_active)
if(check_if_older_than(time_not_active, course_id)):
viewed_courses = ll_api.get_viewed_courses(time, course_id)
list_viewed = lib.ll_event_parsers.parse_viewed_courses(viewed_courses)
enrolled_students = parse_enrolled_students(moodle_api.get_enrolled_users(course_id))
course_name = parse_course_info(moodle_api.get_course_by_id_field(course_id))
students = students_not_viewed(enrolled_students, list_viewed, course_id)
message = get_message(course_name, time_not_active)
if len(students) > 0:
send_message(students, message)
def check_if_older_than(time_not_active, id):
"""
Check if the course was created before the time for which inactivity is checked
:param id: Id of the course that needs to be checked.
:type id: int
:param time_not_active: Number of days of inactivity.
:type time_not_active: int
:return: If a course was created before
:rtype: bool
"""
course = moodle_api.get_course_by_id_field(id)
time_created = course['courses'][0]['timecreated']
date_created = dt.date.fromtimestamp(time_created)
difference = (dt.date.today() - date_created).days
if(difference < time_not_active):
return False
else:
return True
def calculate_date(date_today, time_not_active):
"""
Calculate the date since when the inactivity needs to be checked for.
:param date_today: Today's date.
:type date_today: date
:param time_not_active: Number of days of inactivity.
:type time_not_active: int
:return: Date that a message should be sent.
:rtype: int
"""
return date_today - dt.timedelta(days=time_not_active)
def get_database_object(course_id, error_on_not_found=True):
"""
Get the database entry and handle the does not exist error
:param course_id: The id of the course.
:type course_id: int
:param error_on_not_found: Whether the course exist in the database
:type error_on_not_found: bool
:return: database entry
:rtype: object
"""
try:
return InactivityNotificationSent.objects.get(pk=course_id)
except Exception as e:
print(e)
if error_on_not_found:
print("Course: " + str(course_id) + " could not be found in InactivityNotificationSent when searched in get_database_object")
return None
def students_not_viewed(students, list_viewed, course_id):
"""
Determine which students need to be send a message.
:param students: List of the enrolled students.
:type students: list(str)
:param list_viewed: List of the students who have viewed a course
:type list_viewed: list(str)
:return: A list of student ids.
:rtype: list(str)
"""
# Get students which already have gotten a notification
database_entry = get_database_object(course_id)
if database_entry is None:
new_entry = InactivityNotificationSent(str(course_id), '{}')
database_entry = new_entry
student_id_dict = json.loads(database_entry.args_json)
student_id_set = set(student_id_dict.keys())
student_send_notification = list(set(students) - set(list_viewed).union(student_id_set))
for student_id in student_send_notification:
student_id_dict[student_id] = 1
database_entry.args_json = json.dumps(student_id_dict)
database_entry.save()
return student_send_notification
def get_message(course_name, time_not_active):
"""
Format the message that needs to be send.
:param course_name: The name of the course.
:type course_name: str
:param time_not_active: Time that a student has not been active.
:type time_not_active: int
:return: Message to be send to users.
:rtype: str
"""
message = f'You have not viewed the course "{course_name}" in {time_not_active} day(s). ' \
f'We advise you to stay active, so that you will not miss anything.'
return message
def send_message(students, message):
"""
Loop over the list of student ids, and send a message to every student in the list.
:param students: List of student ids.
:type students: list(str)
:param message: The message that needs to be send.
:type message: str
"""
students = set(students)
moodle_api.send_bulk_messages(students, message)
| [
"json.loads",
"assistants.learning_locker.get_viewed_courses",
"scheduler.utils.check_if_ended",
"json.dumps",
"assistants.moodle.get_enrolled_users",
"scheduler.models.InactivityNotificationSent.objects.get",
"datetime.timedelta",
"datetime.date.fromtimestamp",
"assistants.moodle.get_course_by_id_field",
"datetime.date.today",
"assistants.moodle.send_bulk_messages"
] | [((964, 989), 'scheduler.utils.check_if_ended', 'check_if_ended', (['course_id'], {}), '(course_id)\n', (978, 989), False, 'from scheduler.utils import check_if_ended\n'), ((2122, 2159), 'assistants.moodle.get_course_by_id_field', 'moodle_api.get_course_by_id_field', (['id'], {}), '(id)\n', (2155, 2159), True, 'import assistants.moodle as moodle_api\n'), ((2234, 2269), 'datetime.date.fromtimestamp', 'dt.date.fromtimestamp', (['time_created'], {}), '(time_created)\n', (2255, 2269), True, 'import datetime as dt\n'), ((4155, 4191), 'json.loads', 'json.loads', (['database_entry.args_json'], {}), '(database_entry.args_json)\n', (4165, 4191), False, 'import json\n'), ((4454, 4481), 'json.dumps', 'json.dumps', (['student_id_dict'], {}), '(student_id_dict)\n', (4464, 4481), False, 'import json\n'), ((5415, 5463), 'assistants.moodle.send_bulk_messages', 'moodle_api.send_bulk_messages', (['students', 'message'], {}), '(students, message)\n', (5444, 5463), True, 'import assistants.moodle as moodle_api\n'), ((1099, 1114), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (1112, 1114), True, 'import datetime as dt\n'), ((1215, 1257), 'assistants.learning_locker.get_viewed_courses', 'll_api.get_viewed_courses', (['time', 'course_id'], {}), '(time, course_id)\n', (1240, 1257), True, 'import assistants.learning_locker as ll_api\n'), ((2798, 2832), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'time_not_active'}), '(days=time_not_active)\n', (2810, 2832), True, 'import datetime as dt\n'), ((3223, 3275), 'scheduler.models.InactivityNotificationSent.objects.get', 'InactivityNotificationSent.objects.get', ([], {'pk': 'course_id'}), '(pk=course_id)\n', (3261, 3275), False, 'from scheduler.models import InactivityNotificationSent\n'), ((1390, 1430), 'assistants.moodle.get_enrolled_users', 'moodle_api.get_enrolled_users', (['course_id'], {}), '(course_id)\n', (1419, 1430), True, 'import assistants.moodle as moodle_api\n'), ((1472, 1516), 'assistants.moodle.get_course_by_id_field', 'moodle_api.get_course_by_id_field', (['course_id'], {}), '(course_id)\n', (1505, 1516), True, 'import assistants.moodle as moodle_api\n'), ((2288, 2303), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (2301, 2303), True, 'import datetime as dt\n')] |
import urllib.request,json
from .models import New
# Getting api key
api_key = None
# Getting the new base url
base_url = None
# sources_url = app.config["NEWS_SOURCES_API"]
def configure_request(app):
global api_key,base_url
api_key = app.config['NEW_API_KEY']
base_url = app.config['NEW_API_BASE_URL']
print(base_url.format("sortBy",api_key))
def get_news(sortBy):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(sortBy,api_key)
# get_sources_url = sources_url.format(sortBy,api_key)
print(get_news_url)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
new_articles = None
if get_news_response['articles']:
new_articles_list = get_news_response['articles']
new_articles = process_articles(new_articles_list)
return new_articles
def process_articles(new_list):
'''
Function that processes the new result and then transform them to a list of Objects
Args:
new_list: A list of dictionaries that contain new details
Returns:
new_articles: Displays a list of new objects
'''
new_articles = []
for new_item in new_list:
id = new_item.get('id')
title = new_item.get('title')
description = new_item.get('description')
url = new_item.get('url')
urlToImage = new_item.get('urlToImage')
publishedAt = new_item.get('publishedAt')
content = new_item.get('content')
new_object = New(id,title,description,url,urlToImage,publishedAt,content)
new_articles.append(new_object)
return new_articles
def get_new():
source_url='https://newsapi.org/v2/sources?&apiKey=<KEY>'
# get_news_details_url = base_url.format(id,api_key)
new_object = None
with urllib.request.urlopen(source_url) as url:
new_details_data = url.read()
new_details_response = json.loads(new_details_data)
if new_details_response['sources']:
new_articles_list = new_details_response['sources']
new_object = process_sources(new_articles_list)
print(new_object)
return new_object
def process_sources(new_source):
'''
Function that processes the new result and then transform them to a list of Objects
Args:
new_list: A list of dictionaries that contain new details
Returns:
new_articles: Displays a list of new objects
'''
new_object = []
for source in new_source:
name = source.get('name')
description = source.get('description')
url = source.get('url')
category = source.get('category')
new_source = Source(name,description,category,url)
new_object.append(new_source)
return new_object
| [
"json.loads"
] | [((717, 742), 'json.loads', 'json.loads', (['get_news_data'], {}), '(get_news_data)\n', (727, 742), False, 'import urllib.request, json\n'), ((2017, 2045), 'json.loads', 'json.loads', (['new_details_data'], {}), '(new_details_data)\n', (2027, 2045), False, 'import urllib.request, json\n')] |
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.timezone import now
class BlogUser(AbstractUser):
"""
自定义用户,需要中 settings 中指明。
"""
nickname = models.CharField('昵称', max_length=100, blank=True)
created_time = models.DateTimeField('创建时间', default=now)
last_mod_time = models.DateTimeField('修改时间', max_length=100, blank=True)
source = models.CharField('创建来源', max_length=100, blank=True)
class Meta:
ordering = ['-id']
verbose_name = '用户'
verbose_name_plural = verbose_name
get_latest_by = 'id'
def __str__(self):
return self.email
| [
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((210, 260), 'django.db.models.CharField', 'models.CharField', (['"""昵称"""'], {'max_length': '(100)', 'blank': '(True)'}), "('昵称', max_length=100, blank=True)\n", (226, 260), False, 'from django.db import models\n'), ((280, 321), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""创建时间"""'], {'default': 'now'}), "('创建时间', default=now)\n", (300, 321), False, 'from django.db import models\n'), ((342, 398), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""修改时间"""'], {'max_length': '(100)', 'blank': '(True)'}), "('修改时间', max_length=100, blank=True)\n", (362, 398), False, 'from django.db import models\n'), ((412, 464), 'django.db.models.CharField', 'models.CharField', (['"""创建来源"""'], {'max_length': '(100)', 'blank': '(True)'}), "('创建来源', max_length=100, blank=True)\n", (428, 464), False, 'from django.db import models\n')] |
import setuptools
setuptools.setup()
install_requires=[
'pandas',
'delegator.py',
'lxml',
'click',
'python-Levenshtein'
]
| [
"setuptools.setup"
] | [((19, 37), 'setuptools.setup', 'setuptools.setup', ([], {}), '()\n', (35, 37), False, 'import setuptools\n')] |
import tensorflow as tf
from .object_classifier import ObjectClassifier
__all__ = [
"ObjectClassifier",
]
gpu_devices = tf.config.get_visible_devices("GPU")
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
# tf.config.set_logical_device_configuration(
# device,
# [
# tf.config.LogicalDeviceConfiguration(memory_limit=2000),
# ],
# )
| [
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.get_visible_devices"
] | [((127, 163), 'tensorflow.config.get_visible_devices', 'tf.config.get_visible_devices', (['"""GPU"""'], {}), "('GPU')\n", (156, 163), True, 'import tensorflow as tf\n'), ((195, 249), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (235, 249), True, 'import tensorflow as tf\n')] |
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
# Create your models here.
class Post(models.Model):
class Post_type(models.TextChoices):
BOAST = "Boast", _("Boast")
ROAST = "Roast", _("Roast")
post_type = models.CharField(
max_length=10,
choices=Post_type.choices,
default=Post_type.BOAST,
)
body = models.CharField(max_length=280)
upvotes = models.IntegerField(default=0)
downvotes = models.IntegerField(default=0)
post_date = models.DateTimeField(default=timezone.now)
DisplayFields = ["body", "upvotes", "downvotes", "post_date", "overall_votes"]
def __str__(self):
return self.post_type
@property
def overall_votes(self):
return self.upvotes - self.downvotes
| [
"django.db.models.DateTimeField",
"django.utils.translation.gettext_lazy",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((302, 390), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'Post_type.choices', 'default': 'Post_type.BOAST'}), '(max_length=10, choices=Post_type.choices, default=\n Post_type.BOAST)\n', (318, 390), False, 'from django.db import models\n'), ((429, 461), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(280)'}), '(max_length=280)\n', (445, 461), False, 'from django.db import models\n'), ((476, 506), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (495, 506), False, 'from django.db import models\n'), ((523, 553), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (542, 553), False, 'from django.db import models\n'), ((570, 612), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (590, 612), False, 'from django.db import models\n'), ((238, 248), 'django.utils.translation.gettext_lazy', '_', (['"""Boast"""'], {}), "('Boast')\n", (239, 248), True, 'from django.utils.translation import gettext_lazy as _\n'), ((274, 284), 'django.utils.translation.gettext_lazy', '_', (['"""Roast"""'], {}), "('Roast')\n", (275, 284), True, 'from django.utils.translation import gettext_lazy as _\n')] |
import binascii
# You'll need Erdpy librarie. Go to Elrond docs to get set up with Erdpy
from erdpy.accounts import Account
from erdpy.proxy import ElrondProxy
from erdpy.transactions import Transaction
# Change here the token decimals and the token Id
TOKEN_DECIMALS = 1000000000000000000 # 10^18 (18 decimals)
TOKEN_ID = "GOLDEN-335e6d"
def text_to_hex(text) :
return binascii.hexlify(text.encode()).decode()
# Sometimes need to add a 0 (if it's even, to be sure that is grouped by bytes)
def num_to_hex(num) :
hexa = format(num, "x")
if len(hexa)%2==1 :
return "0" + hexa
return hexa
# Convert to a bigUint (adding the decimals)
def int_to_BigInt(num, decimals) :
return int(f"{num*decimals:.1f}".split(".")[0])
def sendESDT(owner, receiver, token_id, amount, decimals):
payload = "ESDTTransfer@" + text_to_hex(token_id) + "@" + num_to_hex(int_to_BigInt(amount, decimals))
tx = Transaction()
tx.nonce = owner.nonce
tx.value = "0"
tx.sender = owner.address.bech32()
tx.receiver = receiver.address.bech32()
tx.gasPrice = gas_price
tx.gasLimit = 500000 # 500k is standard for an ESDT transfer as of today
tx.data = payload
tx.chainID = chain
tx.version = tx_version
tx.sign(owner)
tx_hash = tx.send(proxy)
owner.nonce+=1
return tx_hash
proxy_address = "https://gateway.elrond.com"
proxy = ElrondProxy(proxy_address)
network = proxy.get_network_config()
chain = network.chain_id
gas_price = network.min_gas_price
tx_version = network.min_tx_version
# The owner of the tokens, that will send them
# You'll need a pem file. If you don't you should derive one following Elrond's tutos
owner = Account(pem_file="wallet_owner.pem")
owner.sync_nonce(proxy)
# All addresses should be in a file (one address per line)
# If the file is formatted in another way, you should parse it differently
# Note : set() avoid address duplicates
for address in list(set(open("addresses.txt").read().split("\n"))) :
sendESDT(owner, Account(address), TOKEN_ID, 100, TOKEN_DECIMALS)
| [
"erdpy.transactions.Transaction",
"erdpy.proxy.ElrondProxy",
"erdpy.accounts.Account"
] | [((1443, 1469), 'erdpy.proxy.ElrondProxy', 'ElrondProxy', (['proxy_address'], {}), '(proxy_address)\n', (1454, 1469), False, 'from erdpy.proxy import ElrondProxy\n'), ((1746, 1782), 'erdpy.accounts.Account', 'Account', ([], {'pem_file': '"""wallet_owner.pem"""'}), "(pem_file='wallet_owner.pem')\n", (1753, 1782), False, 'from erdpy.accounts import Account\n'), ((964, 977), 'erdpy.transactions.Transaction', 'Transaction', ([], {}), '()\n', (975, 977), False, 'from erdpy.transactions import Transaction\n'), ((2073, 2089), 'erdpy.accounts.Account', 'Account', (['address'], {}), '(address)\n', (2080, 2089), False, 'from erdpy.accounts import Account\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Connecting to the database
from modules.create_db_components import create_connection
"""This module is used to display ticket information from the database."""
# Owned
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def display_info(conn):
cur = conn.cursor()
cur.execute("SELECT * FROM tickets")
conn.commit()
rows = cur.fetchall()
for element in rows:
print(element)
def display_info_category(conn, category):
cur = conn.cursor()
cur.execute("SELECT * FROM tickets WHERE category = ?", (category,))
conn.commit()
rows = cur.fetchall()
for element in rows:
print(element)
if __name__ == "__main__":
connection = create_connection(r"D:\eisen-tickets\assets\tickets.db")
display_info_category(connection, "DO")
| [
"modules.create_db_components.create_connection"
] | [((797, 855), 'modules.create_db_components.create_connection', 'create_connection', (['"""D:\\\\eisen-tickets\\\\assets\\\\tickets.db"""'], {}), "('D:\\\\eisen-tickets\\\\assets\\\\tickets.db')\n", (814, 855), False, 'from modules.create_db_components import create_connection\n')] |
from simple_youtube_api.Channel import Channel
from simple_youtube_api.LocalVideo import LocalVideo
import glob
import os
import random
import notifBot
# loggin into the channel
channel = Channel()
channel.login("src/client_id.json", "src/credentials.storage")
def upload():
myfile = "src/vidnum.txt"
with open(myfile, "r") as f:
num = int(f.read())
print(num)
print(type(num))
with open(myfile, "w") as f:
f.write(str(num+1))
# setting up the video that is going to be uploaded
video = LocalVideo(file_path="final.mp4")
# setting snippet
video.set_title("Top memes I found on Reddit #" + str(num))
video.set_description('''Top memes I found on Reddit #
Like, Share and Subscribe for daily videos
Music used in the Video:
<NAME> - Fluffing A Duck
Memes from subreddits:
r/memes,r/dankmemes,r/me_irl,r/HistoryMemes,r/BlackPeopleTwitter,r/facepalm,r/ihadastroke,r/woosh,r/technicallythetruth
Disclaimer:
I do not own any of the images shown in this video and I am not the creator of these memes.
''')
video.set_tags(["memes", "reddit","reddit memes","reddit compilation","meme compilation","funny","dank memes","funny memes","top memes","top reddit","top"])
video.set_default_language("en-US")
# setting status
video.set_embeddable(True)
video.set_license("creativeCommon")
video.set_privacy_status("public")
video.set_public_stats_viewable(True)
img = []
for file in glob.glob("processed/*.jpg"):
img.append(file)
rand = random.randrange(0,len(img))
# setting thumbnail
video.set_thumbnail_path(img[rand])
# uploading video and printing the results
video = channel.upload_video(video)
print(video.id)
print(video)
return [video.id , video , img[rand]]
| [
"simple_youtube_api.Channel.Channel",
"glob.glob",
"simple_youtube_api.LocalVideo.LocalVideo"
] | [((197, 206), 'simple_youtube_api.Channel.Channel', 'Channel', ([], {}), '()\n', (204, 206), False, 'from simple_youtube_api.Channel import Channel\n'), ((555, 588), 'simple_youtube_api.LocalVideo.LocalVideo', 'LocalVideo', ([], {'file_path': '"""final.mp4"""'}), "(file_path='final.mp4')\n", (565, 588), False, 'from simple_youtube_api.LocalVideo import LocalVideo\n'), ((1578, 1606), 'glob.glob', 'glob.glob', (['"""processed/*.jpg"""'], {}), "('processed/*.jpg')\n", (1587, 1606), False, 'import glob\n')] |
import time
import math
from functions import prime_sieve
times = []
times.append(time.clock())
limit = 10001
# nth prime bounded by n ln n + n ln ln n
bound = int( limit * math.log(limit) + limit * math.log(math.log(limit)) ) + 1
primes = prime_sieve(bound)
print(primes[limit - 1])
times.append(time.clock())
print(times[-1] - times[-2]) | [
"functions.prime_sieve",
"time.clock",
"math.log"
] | [((241, 259), 'functions.prime_sieve', 'prime_sieve', (['bound'], {}), '(bound)\n', (252, 259), False, 'from functions import prime_sieve\n'), ((82, 94), 'time.clock', 'time.clock', ([], {}), '()\n', (92, 94), False, 'import time\n'), ((299, 311), 'time.clock', 'time.clock', ([], {}), '()\n', (309, 311), False, 'import time\n'), ((174, 189), 'math.log', 'math.log', (['limit'], {}), '(limit)\n', (182, 189), False, 'import math\n'), ((209, 224), 'math.log', 'math.log', (['limit'], {}), '(limit)\n', (217, 224), False, 'import math\n')] |
# Generated by Django 2.2.13 on 2020-06-06 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notification', '0009_auto_20190818_1125'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='last_time',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"django.db.models.DateTimeField"
] | [((351, 394), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (371, 394), False, 'from django.db import migrations, models\n')] |
#md2html bulk converter for pubsidian
#ver 0.1-beta
#author: <NAME>, 2021
#notes: This converter is modified according to my own need. Please modify the code (eg. folders, outputs etc.) accordingly.
# Stable release will be coming soon.
#==============================================================================================================================
import markdown, re,os
#from markdown.treeprocessors import Treeprocessor
from markdown import markdown
from markdown.extensions.wikilinks import WikiLinkExtension
from urllib.parse import urlparse
from bs4 import BeautifulSoup
HOSTNAME="./Image/"
def find_md_links(md):
#Return dict of links in markdown
INLINE_LINK_RE = re.compile(r'\[([^\]]+)\]\(([^)]+)\)')
FOOTNOTE_LINK_TEXT_RE = re.compile(r'\[([^\]]+)\]\[(\d+)\]')
FOOTNOTE_LINK_URL_RE = re.compile(r'\[(\d+)\]:\s+(\S+)')
links = list(INLINE_LINK_RE.findall(md))
footnote_links = dict(FOOTNOTE_LINK_TEXT_RE.findall(md))
footnote_urls = dict(FOOTNOTE_LINK_URL_RE.findall(md))
for key in footnote_links.keys():
links.append((footnote_links[key], footnote_urls[footnote_links[key]]))
return links
def strip(text,sep):
return text.split(sep, 1)[0]
def highlight(text):
n = text.count("==")//2
t = text
for i in range(0,n):
t = t.replace("==","<mark>",1)
t = t.replace("==","</mark>",1)
return t
def md2html(fname):
inp = fname+'.md'
outp = "./pages/"+fname+'.html'
with open(inp, 'r') as f:
text = f.read()
text = markdown(text, extensions=[WikiLinkExtension(base_url='./pages/', end_url='.html')])
text = strip(text,"See more")
#print(find_md_links(text))
html = markdown(text)
html = highlight(html)
soup = BeautifulSoup(html,"html.parser")
for img in soup.findAll('img'):
urlInfo = urlparse(img['src'])
img['src'] = HOSTNAME + urlInfo.path
html = str(soup)
with open(outp, 'w') as f:
f.write(html)
def convert():
directory = r'./'
if not os.path.isdir(directory+"pages"):
os.mkdir("pages")
findex = []
for filename in os.listdir(directory):
if filename.endswith(".md"):
try:
md2html(filename[:-3])
findex.append(filename[:-3])
print("Converting "+filename)
except:
continue
else:
continue
f= open("data.json","w+")
f.write('{\n "graph": [],\n "links": [],\n "nodes": [\n')
for i in range(len(findex)):
if i==(len(findex)-1):
f.write('\t{"sl":'+str(i)+', "size": 10, "score": 0.5, "id": "'+findex[i]+'", "type":"circle"}\n')
else:
f.write('\t{"sl":'+str(i)+', "size": 10, "score": 0.5, "id": "'+findex[i]+'", "type":"circle"},\n')
f.write('],\n "directed": false,\n "multigraph": false\n}')
f.close()
print("\ndata.json has been created successfully.")
if __name__ == "__main__":
convert()
| [
"markdown.markdown",
"os.listdir",
"urllib.parse.urlparse",
"re.compile",
"bs4.BeautifulSoup",
"os.path.isdir",
"os.mkdir",
"markdown.extensions.wikilinks.WikiLinkExtension"
] | [((704, 746), 're.compile', 're.compile', (['"""\\\\[([^\\\\]]+)\\\\]\\\\(([^)]+)\\\\)"""'], {}), "('\\\\[([^\\\\]]+)\\\\]\\\\(([^)]+)\\\\)')\n", (714, 746), False, 'import markdown, re, os\n'), ((771, 812), 're.compile', 're.compile', (['"""\\\\[([^\\\\]]+)\\\\]\\\\[(\\\\d+)\\\\]"""'], {}), "('\\\\[([^\\\\]]+)\\\\]\\\\[(\\\\d+)\\\\]')\n", (781, 812), False, 'import markdown, re, os\n'), ((835, 872), 're.compile', 're.compile', (['"""\\\\[(\\\\d+)\\\\]:\\\\s+(\\\\S+)"""'], {}), "('\\\\[(\\\\d+)\\\\]:\\\\s+(\\\\S+)')\n", (845, 872), False, 'import markdown, re, os\n'), ((2209, 2230), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2219, 2230), False, 'import markdown, re, os\n'), ((1750, 1764), 'markdown.markdown', 'markdown', (['text'], {}), '(text)\n', (1758, 1764), False, 'from markdown import markdown\n'), ((1811, 1845), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1824, 1845), False, 'from bs4 import BeautifulSoup\n'), ((2112, 2146), 'os.path.isdir', 'os.path.isdir', (["(directory + 'pages')"], {}), "(directory + 'pages')\n", (2125, 2146), False, 'import markdown, re, os\n'), ((2154, 2171), 'os.mkdir', 'os.mkdir', (['"""pages"""'], {}), "('pages')\n", (2162, 2171), False, 'import markdown, re, os\n'), ((1907, 1927), 'urllib.parse.urlparse', 'urlparse', (["img['src']"], {}), "(img['src'])\n", (1915, 1927), False, 'from urllib.parse import urlparse\n'), ((1603, 1658), 'markdown.extensions.wikilinks.WikiLinkExtension', 'WikiLinkExtension', ([], {'base_url': '"""./pages/"""', 'end_url': '""".html"""'}), "(base_url='./pages/', end_url='.html')\n", (1620, 1658), False, 'from markdown.extensions.wikilinks import WikiLinkExtension\n')] |
from app_init import app
import funcs
@app.route('/index', methods=["POST", "GET"])
@app.route('/', methods=["POST", "GET"])
def index():
return funcs.route_index()
@app.route('/add/', methods=["POST", "GET"])
def add():
return funcs.route_add()
@app.route('/add_target/', methods=["POST", "GET"])
def add_target():
return funcs.route_add_target()
@app.route('/profile/', methods=["GET"])
def profile():
return funcs.route_profile()
@app.route('/profile/<username>/')
def login_username(username):
return funcs.route_profile_username(username)
@app.route('/profile/<username>/weight/', methods=["POST", "GET"])
@app.route('/profile/<username>/weight/<int:page>/', methods=['GET', 'POST'])
def weight(username, page=1):
return funcs.route_weight(page)
@app.route('/profile/<username>/edit_profile/', methods=["POST", "GET"])
def edit_profile(username):
return funcs.edit_profile(username)
@app.route("/login/", methods=["POST", "GET"])
def login():
return funcs.route_login()
@app.route("/logout/")
def logout():
return funcs.rout_logout()
@app.route('/registration/', methods=['POST', 'GET'])
def registration():
return funcs.rout_registration()
@app.route('/admin/', methods=['POST', 'GET'])
def admin():
return funcs.rout_admin()
@app.errorhandler(404)
def page_note_found(error):
return funcs.page_note_found(error)
if __name__ == '__main__': # Запуск сервера на локальном устройстве
app.run(debug=True) # отображение ошибок
| [
"funcs.route_profile",
"app_init.app.run",
"funcs.route_add",
"funcs.edit_profile",
"funcs.route_login",
"funcs.route_weight",
"funcs.rout_registration",
"funcs.page_note_found",
"funcs.route_profile_username",
"funcs.route_index",
"app_init.app.route",
"funcs.route_add_target",
"funcs.rout_logout",
"app_init.app.errorhandler",
"funcs.rout_admin"
] | [((41, 85), 'app_init.app.route', 'app.route', (['"""/index"""'], {'methods': "['POST', 'GET']"}), "('/index', methods=['POST', 'GET'])\n", (50, 85), False, 'from app_init import app\n'), ((87, 126), 'app_init.app.route', 'app.route', (['"""/"""'], {'methods': "['POST', 'GET']"}), "('/', methods=['POST', 'GET'])\n", (96, 126), False, 'from app_init import app\n'), ((174, 217), 'app_init.app.route', 'app.route', (['"""/add/"""'], {'methods': "['POST', 'GET']"}), "('/add/', methods=['POST', 'GET'])\n", (183, 217), False, 'from app_init import app\n'), ((261, 311), 'app_init.app.route', 'app.route', (['"""/add_target/"""'], {'methods': "['POST', 'GET']"}), "('/add_target/', methods=['POST', 'GET'])\n", (270, 311), False, 'from app_init import app\n'), ((369, 408), 'app_init.app.route', 'app.route', (['"""/profile/"""'], {'methods': "['GET']"}), "('/profile/', methods=['GET'])\n", (378, 408), False, 'from app_init import app\n'), ((460, 493), 'app_init.app.route', 'app.route', (['"""/profile/<username>/"""'], {}), "('/profile/<username>/')\n", (469, 493), False, 'from app_init import app\n'), ((577, 642), 'app_init.app.route', 'app.route', (['"""/profile/<username>/weight/"""'], {'methods': "['POST', 'GET']"}), "('/profile/<username>/weight/', methods=['POST', 'GET'])\n", (586, 642), False, 'from app_init import app\n'), ((644, 720), 'app_init.app.route', 'app.route', (['"""/profile/<username>/weight/<int:page>/"""'], {'methods': "['GET', 'POST']"}), "('/profile/<username>/weight/<int:page>/', methods=['GET', 'POST'])\n", (653, 720), False, 'from app_init import app\n'), ((790, 861), 'app_init.app.route', 'app.route', (['"""/profile/<username>/edit_profile/"""'], {'methods': "['POST', 'GET']"}), "('/profile/<username>/edit_profile/', methods=['POST', 'GET'])\n", (799, 861), False, 'from app_init import app\n'), ((933, 978), 'app_init.app.route', 'app.route', (['"""/login/"""'], {'methods': "['POST', 'GET']"}), "('/login/', methods=['POST', 'GET'])\n", (942, 978), False, 'from app_init import app\n'), ((1026, 1047), 'app_init.app.route', 'app.route', (['"""/logout/"""'], {}), "('/logout/')\n", (1035, 1047), False, 'from app_init import app\n'), ((1096, 1148), 'app_init.app.route', 'app.route', (['"""/registration/"""'], {'methods': "['POST', 'GET']"}), "('/registration/', methods=['POST', 'GET'])\n", (1105, 1148), False, 'from app_init import app\n'), ((1209, 1254), 'app_init.app.route', 'app.route', (['"""/admin/"""'], {'methods': "['POST', 'GET']"}), "('/admin/', methods=['POST', 'GET'])\n", (1218, 1254), False, 'from app_init import app\n'), ((1301, 1322), 'app_init.app.errorhandler', 'app.errorhandler', (['(404)'], {}), '(404)\n', (1317, 1322), False, 'from app_init import app\n'), ((151, 170), 'funcs.route_index', 'funcs.route_index', ([], {}), '()\n', (168, 170), False, 'import funcs\n'), ((240, 257), 'funcs.route_add', 'funcs.route_add', ([], {}), '()\n', (255, 257), False, 'import funcs\n'), ((341, 365), 'funcs.route_add_target', 'funcs.route_add_target', ([], {}), '()\n', (363, 365), False, 'import funcs\n'), ((435, 456), 'funcs.route_profile', 'funcs.route_profile', ([], {}), '()\n', (454, 456), False, 'import funcs\n'), ((535, 573), 'funcs.route_profile_username', 'funcs.route_profile_username', (['username'], {}), '(username)\n', (563, 573), False, 'import funcs\n'), ((762, 786), 'funcs.route_weight', 'funcs.route_weight', (['page'], {}), '(page)\n', (780, 786), False, 'import funcs\n'), ((901, 929), 'funcs.edit_profile', 'funcs.edit_profile', (['username'], {}), '(username)\n', (919, 929), False, 'import funcs\n'), ((1003, 1022), 'funcs.route_login', 'funcs.route_login', ([], {}), '()\n', (1020, 1022), False, 'import funcs\n'), ((1073, 1092), 'funcs.rout_logout', 'funcs.rout_logout', ([], {}), '()\n', (1090, 1092), False, 'import funcs\n'), ((1180, 1205), 'funcs.rout_registration', 'funcs.rout_registration', ([], {}), '()\n', (1203, 1205), False, 'import funcs\n'), ((1279, 1297), 'funcs.rout_admin', 'funcs.rout_admin', ([], {}), '()\n', (1295, 1297), False, 'import funcs\n'), ((1362, 1390), 'funcs.page_note_found', 'funcs.page_note_found', (['error'], {}), '(error)\n', (1383, 1390), False, 'import funcs\n'), ((1466, 1485), 'app_init.app.run', 'app.run', ([], {'debug': '(True)'}), '(debug=True)\n', (1473, 1485), False, 'from app_init import app\n')] |
import torch
import os
from utils import torch_distributed_zero_first
import PIL
from PIL import Image
from torchvision import transforms
from PIL import Image, ImageFile
import albumentations as A
from albumentations.pytorch import ToTensorV2
import json
import cv2
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
def create_dataloader(x,y,batch_size,hyp=None,mode="train",
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = TrainDataset(x,y,mode="train",
hyp=hyp, # augmentation hyperparameters
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
shuffle=True,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=TrainDataset.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class TrainDataset(torch.utils.data.Dataset):
def __init__(self,x,y,mode="train", hyp=None,rank=-1):
self.mode = mode
self.hyp = hyp
self.img_paths = x
self.labels = y
self.transformations = {
'train': self.get_train_transforms(),
'val': self.get_val_transforms(),
}
def get_train_transforms(self):
alb_transforms = A.Compose([
A.RandomBrightnessContrast(p=0.8),
A.HueSaturationValue(p=0.8),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=(0.8, 1.4), rotate_limit=0, p=0.1),
A.OneOf([
A.MotionBlur(blur_limit=25, p=1),
A.Blur(blur_limit=25, p=1),
# A.MedianBlur(blur_limit=25, p=1),
], p=0.3),
A.GaussNoise(p=0.5),
A.Normalize([self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']], [self.hyp['std0'], self.hyp['std1'], self.hyp['std2']]),
ToTensorV2(),
])
return alb_transforms
def get_val_transforms(self):
alb_transforms = A.Compose([
A.Normalize([self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']], [self.hyp['std0'], self.hyp['std1'], self.hyp['std2']]),
ToTensorV2(),
])
return alb_transforms
@staticmethod
def pil2cv(sample):
sample['image'] = np.array(sample['image'])[:, :, ::-1]
return sample
@staticmethod
def alb_transform_wrapper(transform, sample):
sample = TrainDataset.pil2cv(sample)
sample = transform(**sample)
return sample
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
path = self.img_paths[index]
image = Image.open(path).convert("RGB")
label = torch.tensor(self.labels[index],dtype=torch.float32)
sample = {"image": image}
sample = TrainDataset.alb_transform_wrapper(self.transformations[self.mode], sample)
image = sample["image"]
return image,label.long()
@staticmethod
def collate_fn(batch):
img, label = zip(*batch) # transposed
return torch.stack(img, 0), torch.stack(label, 0)
| [
"albumentations.ShiftScaleRotate",
"albumentations.pytorch.ToTensorV2",
"utils.torch_distributed_zero_first",
"PIL.Image.open",
"albumentations.RandomBrightnessContrast",
"albumentations.Blur",
"torch.stack",
"albumentations.GaussNoise",
"albumentations.HueSaturationValue",
"numpy.array",
"torch.tensor",
"torch.utils.data.distributed.DistributedSampler",
"albumentations.Normalize",
"albumentations.MotionBlur",
"os.cpu_count"
] | [((568, 602), 'utils.torch_distributed_zero_first', 'torch_distributed_zero_first', (['rank'], {}), '(rank)\n', (596, 602), False, 'from utils import torch_distributed_zero_first\n'), ((955, 1011), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['dataset'], {}), '(dataset)\n', (1002, 1011), False, 'import torch\n'), ((4102, 4155), 'torch.tensor', 'torch.tensor', (['self.labels[index]'], {'dtype': 'torch.float32'}), '(self.labels[index], dtype=torch.float32)\n', (4114, 4155), False, 'import torch\n'), ((3674, 3699), 'numpy.array', 'np.array', (["sample['image']"], {}), "(sample['image'])\n", (3682, 3699), True, 'import numpy as np\n'), ((4456, 4475), 'torch.stack', 'torch.stack', (['img', '(0)'], {}), '(img, 0)\n', (4467, 4475), False, 'import torch\n'), ((4477, 4498), 'torch.stack', 'torch.stack', (['label', '(0)'], {}), '(label, 0)\n', (4488, 4498), False, 'import torch\n'), ((843, 857), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (855, 857), False, 'import os\n'), ((2722, 2755), 'albumentations.RandomBrightnessContrast', 'A.RandomBrightnessContrast', ([], {'p': '(0.8)'}), '(p=0.8)\n', (2748, 2755), True, 'import albumentations as A\n'), ((2769, 2796), 'albumentations.HueSaturationValue', 'A.HueSaturationValue', ([], {'p': '(0.8)'}), '(p=0.8)\n', (2789, 2796), True, 'import albumentations as A\n'), ((2810, 2896), 'albumentations.ShiftScaleRotate', 'A.ShiftScaleRotate', ([], {'shift_limit': '(0.1)', 'scale_limit': '(0.8, 1.4)', 'rotate_limit': '(0)', 'p': '(0.1)'}), '(shift_limit=0.1, scale_limit=(0.8, 1.4), rotate_limit=0,\n p=0.1)\n', (2828, 2896), True, 'import albumentations as A\n'), ((3097, 3116), 'albumentations.GaussNoise', 'A.GaussNoise', ([], {'p': '(0.5)'}), '(p=0.5)\n', (3109, 3116), True, 'import albumentations as A\n'), ((3130, 3261), 'albumentations.Normalize', 'A.Normalize', (["[self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']]", "[self.hyp['std0'], self.hyp['std1'], self.hyp['std2']]"], {}), "([self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']], [\n self.hyp['std0'], self.hyp['std1'], self.hyp['std2']])\n", (3141, 3261), True, 'import albumentations as A\n'), ((3270, 3282), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (3280, 3282), False, 'from albumentations.pytorch import ToTensorV2\n'), ((3409, 3540), 'albumentations.Normalize', 'A.Normalize', (["[self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']]", "[self.hyp['std0'], self.hyp['std1'], self.hyp['std2']]"], {}), "([self.hyp['mean0'], self.hyp['mean1'], self.hyp['mean2']], [\n self.hyp['std0'], self.hyp['std1'], self.hyp['std2']])\n", (3420, 3540), True, 'import albumentations as A\n'), ((3549, 3561), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (3559, 3561), False, 'from albumentations.pytorch import ToTensorV2\n'), ((4054, 4070), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (4064, 4070), False, 'from PIL import Image, ImageFile\n'), ((2932, 2964), 'albumentations.MotionBlur', 'A.MotionBlur', ([], {'blur_limit': '(25)', 'p': '(1)'}), '(blur_limit=25, p=1)\n', (2944, 2964), True, 'import albumentations as A\n'), ((2982, 3008), 'albumentations.Blur', 'A.Blur', ([], {'blur_limit': '(25)', 'p': '(1)'}), '(blur_limit=25, p=1)\n', (2988, 3008), True, 'import albumentations as A\n')] |
# -*- coding: UTF-8 -*-
from timezonefinder import TimezoneFinder
from datetime import datetime
from pytz import timezone
"""
This class uses to do all the timezone translate function.
"""
class TimeZoneHelper:
def __init__(self,localTimeZone):
self.tf = TimezoneFinder()
self.LocalTimeZone = localTimeZone
def getTimezone(self,lnglatStr):
if lnglatStr:
t = lnglatStr.split(",")
if len(t)==2:
latitude, longitude = float(t[0]), float(t[1])
return self.tf.timezone_at(lng=longitude, lat=latitude) # returns 'Europe/Berlin'
#depend on the setting localTimeZone when construct
#return Local Date Time
def getLocalTime(self,utcTime):
return self.zoneUtcToLocalTime(self.LocalTimeZone,utcTime)
#depend on the setting localTimeZone when construct
#return UTC Date Time
def getUTCTime(self,localTime):
UTC = timezone('UTC')
locationZone = timezone(self.LocalTimeZone)
return locationZone.localize(localTime).astimezone(UTC)
#gps to local datetime
def gpsUtcToLocalTime(self,lnglatStr,utcTime):
zone = self.getTimezone(lnglatStr)
return self.zoneUtcToLocalTime(zone,utcTime)
#timezone to local datetime
def zoneUtcToLocalTime(self,zone,utcTime):
UTC = timezone('UTC')
locationZone = timezone(zone)
return UTC.localize(utcTime).astimezone(locationZone)
#timezone to local datetime
def UtcToUtcTime(self,utcTime):
UTC = timezone('UTC')
locationZone = timezone('UTC')
return UTC.localize(utcTime).astimezone(locationZone)
if __name__ == "__main__":
helper = TimeZoneHelper()
zone = helper.getTimezone('25.088791666666665, 121.46666666666667')
print(zone)
t = helper.zoneUtcToLocalTime(zone,datetime.now())
print(t)
| [
"pytz.timezone",
"timezonefinder.TimezoneFinder",
"datetime.datetime.now"
] | [((273, 289), 'timezonefinder.TimezoneFinder', 'TimezoneFinder', ([], {}), '()\n', (287, 289), False, 'from timezonefinder import TimezoneFinder\n'), ((950, 965), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (958, 965), False, 'from pytz import timezone\n'), ((997, 1025), 'pytz.timezone', 'timezone', (['self.LocalTimeZone'], {}), '(self.LocalTimeZone)\n', (1005, 1025), False, 'from pytz import timezone\n'), ((1359, 1374), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (1367, 1374), False, 'from pytz import timezone\n'), ((1406, 1420), 'pytz.timezone', 'timezone', (['zone'], {}), '(zone)\n', (1414, 1420), False, 'from pytz import timezone\n'), ((1566, 1581), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (1574, 1581), False, 'from pytz import timezone\n'), ((1613, 1628), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (1621, 1628), False, 'from pytz import timezone\n'), ((1884, 1898), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1896, 1898), False, 'from datetime import datetime\n')] |
from pyrogram import Client
from os import getenv
from config import API_ID,API_HASH,BOT_TOKEN,PLUGINS
Client('bot', api_id=API_ID, api_hash=API_HASH, bot_token=BOT_TOKEN,plugins=PLUGINS).run() | [
"pyrogram.Client"
] | [((105, 194), 'pyrogram.Client', 'Client', (['"""bot"""'], {'api_id': 'API_ID', 'api_hash': 'API_HASH', 'bot_token': 'BOT_TOKEN', 'plugins': 'PLUGINS'}), "('bot', api_id=API_ID, api_hash=API_HASH, bot_token=BOT_TOKEN,\n plugins=PLUGINS)\n", (111, 194), False, 'from pyrogram import Client\n')] |
import requests
import sys
import tempfile
import subprocess
import os
import re
ip = sys.argv[1]
r = re.compile(r'<b>ID:</b> (\d+)</p>')
host = f'http://{ip}:4000'
resp = requests.get(host + '/feed')
for v_id in r.findall(resp.text):
resp = requests.get(host + "/vtt/?id=10/../" + v_id)
print(resp.text)
| [
"requests.get",
"re.compile"
] | [((104, 139), 're.compile', 're.compile', (['"""<b>ID:</b> (\\\\d+)</p>"""'], {}), "('<b>ID:</b> (\\\\d+)</p>')\n", (114, 139), False, 'import re\n'), ((175, 203), 'requests.get', 'requests.get', (["(host + '/feed')"], {}), "(host + '/feed')\n", (187, 203), False, 'import requests\n'), ((250, 295), 'requests.get', 'requests.get', (["(host + '/vtt/?id=10/../' + v_id)"], {}), "(host + '/vtt/?id=10/../' + v_id)\n", (262, 295), False, 'import requests\n')] |
import re
def convert_to_edi(filename):
"""
Converts the file to edifact format file in same location
:param filename:
filename with relative path
:return:
doesnot return but stores converted file in same location
"""
input_file = open(filename, 'r')
contents = input_file.read()
contents = re.sub(r"\\\*", "convertingReleaseCharecter", contents)
contents = re.sub(r"\\\+", "convertingReleaseCharecter1", contents)
contents = re.sub(r"\\\:", "convertingReleaseCharecter2", contents)
contents = re.sub(r"\*O", "", contents)
contents = re.sub(r"''", "", contents)
contents = re.sub(r"\+", "\x1d", contents)
contents = re.sub(r":", "\x1f", contents)
contents = re.sub(r"'&", "\x1c", contents)
contents = re.sub(r"\*", "\x19", contents)
contents = re.sub(r"\n", "", contents)
contents = re.sub(r"\r", "", contents)
contents = re.sub(r"\'", "\x1c", contents)
contents = re.sub(r"convertingReleaseCharecter", "*", contents)
contents = re.sub(r"convertingReleaseCharecter1", '+', contents)
contents = re.sub(r"convertingReleaseCharecter2", ":", contents)
output_file = open(filename.split(".")[0] + ".edi", "w")
output_file.write(contents)
input_file.close()
output_file.close()
| [
"re.sub"
] | [((357, 414), 're.sub', 're.sub', (['"""\\\\\\\\\\\\*"""', '"""convertingReleaseCharecter"""', 'contents'], {}), "('\\\\\\\\\\\\*', 'convertingReleaseCharecter', contents)\n", (363, 414), False, 'import re\n'), ((428, 486), 're.sub', 're.sub', (['"""\\\\\\\\\\\\+"""', '"""convertingReleaseCharecter1"""', 'contents'], {}), "('\\\\\\\\\\\\+', 'convertingReleaseCharecter1', contents)\n", (434, 486), False, 'import re\n'), ((500, 558), 're.sub', 're.sub', (['"""\\\\\\\\\\\\:"""', '"""convertingReleaseCharecter2"""', 'contents'], {}), "('\\\\\\\\\\\\:', 'convertingReleaseCharecter2', contents)\n", (506, 558), False, 'import re\n'), ((572, 600), 're.sub', 're.sub', (['"""\\\\*O"""', '""""""', 'contents'], {}), "('\\\\*O', '', contents)\n", (578, 600), False, 'import re\n'), ((616, 642), 're.sub', 're.sub', (['"""\'\'"""', '""""""', 'contents'], {}), '("\'\'", \'\', contents)\n', (622, 642), False, 'import re\n'), ((659, 690), 're.sub', 're.sub', (['"""\\\\+"""', '"""\x1d"""', 'contents'], {}), "('\\\\+', '\\x1d', contents)\n", (665, 690), False, 'import re\n'), ((706, 735), 're.sub', 're.sub', (['""":"""', '"""\x1f"""', 'contents'], {}), "(':', '\\x1f', contents)\n", (712, 735), False, 'import re\n'), ((752, 782), 're.sub', 're.sub', (['"""\'&"""', '"""\x1c"""', 'contents'], {}), '("\'&", \'\\x1c\', contents)\n', (758, 782), False, 'import re\n'), ((799, 830), 're.sub', 're.sub', (['"""\\\\*"""', '"""\x19"""', 'contents'], {}), "('\\\\*', '\\x19', contents)\n", (805, 830), False, 'import re\n'), ((846, 873), 're.sub', 're.sub', (['"""\\\\n"""', '""""""', 'contents'], {}), "('\\\\n', '', contents)\n", (852, 873), False, 'import re\n'), ((889, 916), 're.sub', 're.sub', (['"""\\\\r"""', '""""""', 'contents'], {}), "('\\\\r', '', contents)\n", (895, 916), False, 'import re\n'), ((932, 963), 're.sub', 're.sub', (['"""\\\\\'"""', '"""\x1c"""', 'contents'], {}), '("\\\\\'", \'\\x1c\', contents)\n', (938, 963), False, 'import re\n'), ((979, 1030), 're.sub', 're.sub', (['"""convertingReleaseCharecter"""', '"""*"""', 'contents'], {}), "('convertingReleaseCharecter', '*', contents)\n", (985, 1030), False, 'import re\n'), ((1047, 1099), 're.sub', 're.sub', (['"""convertingReleaseCharecter1"""', '"""+"""', 'contents'], {}), "('convertingReleaseCharecter1', '+', contents)\n", (1053, 1099), False, 'import re\n'), ((1116, 1168), 're.sub', 're.sub', (['"""convertingReleaseCharecter2"""', '""":"""', 'contents'], {}), "('convertingReleaseCharecter2', ':', contents)\n", (1122, 1168), False, 'import re\n')] |
__author__ = 'Kalyan'
problem = """
We are going to revisit unit6 assignment3 for this problem.
Given an input file of words (mixed case). Group those words into anagram groups and write them
into the destination file so that words in larger anagram groups come before words in smaller anagram sets.
With in an anagram group, order them in case insensitive ascending sorting order.
If 2 anagram groups have same count, then set with smaller starting word comes first.
For e.g. if source contains (ant, Tan, cat, TAC, Act, bat, Tab), the anagram groups are (ant, Tan), (bat, Tab)
and (Act, cat, TAC) and destination should contain Act, cat, TAC, ant, Tan, bat, Tab (one word in each line).
the (ant, Tan) set comes before (bat, Tab) as ant < bat.
At first sight, this looks like a big problem, but you can decompose into smaller problems and crack each one.
This program should be written as a command line script. It takes one argument the input file of words and outputs
<input>-results.txt where <input>.txt is the input file of words.
"""
import sys
import unit6utils
import string
import os
def open_input_file(file, mode="rt"):
mod_dir = unit6utils.get_module_dir()
test_file = unit6utils.get_input_file(file)
return open(test_file, mode)
def open_temp_file(file, mode):
data_dir = os.getenv("DATA_DIR", default=unit6utils.get_temp_dir())
out_file = os.path.join(data_dir, file)
return open(out_file, mode)
def are_anagrams(first, second):
if first != None and second != None:
return sorted("".join(first.lower().split())) == sorted("".join(second.lower().split()))
else:
return False
def anagram_sort(source,destination):
result,anagram_sub_groups,anagram_full_groups=[],[],[]
for line in open_input_file(source):
if line.strip() != '' and line.strip()[0] != '#':
result.append(line.strip())
while result != []:
anagram_sub_groups = [result[i] for i in range(len(result)) if are_anagrams(result[0], result[i])]
anagram_full_groups.append(anagram_sub_groups)
[result.remove(i) for i in anagram_sub_groups]
[i.sort(key=lambda x: x.lower()) for i in anagram_full_groups]
anagram_full_groups.sort(key=len, reverse=True)
result2 = list(anagram_full_groups)
anagram_full_groups.sort(
cmp=lambda x, y: cmp(map(str.lower, x), map(str.lower, y)) if len(x) == len(y) else cmp(result2.index(x),
result2.index(y)))
anagram_full_groups = [j for i in anagram_full_groups for j in i]
temp = anagram_full_groups[-1]
anagram_full_groups = map(lambda x: x + "\n", anagram_full_groups[:-1])
anagram_full_groups.append(temp)
f=open_temp_file(destination,"wt")
f.write("".join(anagram_full_groups))
if __name__ == "__main__":
sys.argv
source=unit6utils.get_input_file(sys.argv[1])
destination = unit6utils.get_temp_file(sys.argv[1][:-4]+'-results.txt')
anagram_sort(source,destination)
# sys.exit(main())
| [
"unit6utils.get_input_file",
"unit6utils.get_temp_dir",
"os.path.join",
"unit6utils.get_module_dir",
"unit6utils.get_temp_file"
] | [((1191, 1218), 'unit6utils.get_module_dir', 'unit6utils.get_module_dir', ([], {}), '()\n', (1216, 1218), False, 'import unit6utils\n'), ((1236, 1267), 'unit6utils.get_input_file', 'unit6utils.get_input_file', (['file'], {}), '(file)\n', (1261, 1267), False, 'import unit6utils\n'), ((1424, 1452), 'os.path.join', 'os.path.join', (['data_dir', 'file'], {}), '(data_dir, file)\n', (1436, 1452), False, 'import os\n'), ((2994, 3032), 'unit6utils.get_input_file', 'unit6utils.get_input_file', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3019, 3032), False, 'import unit6utils\n'), ((3052, 3111), 'unit6utils.get_temp_file', 'unit6utils.get_temp_file', (["(sys.argv[1][:-4] + '-results.txt')"], {}), "(sys.argv[1][:-4] + '-results.txt')\n", (3076, 3111), False, 'import unit6utils\n'), ((1381, 1406), 'unit6utils.get_temp_dir', 'unit6utils.get_temp_dir', ([], {}), '()\n', (1404, 1406), False, 'import unit6utils\n')] |
# coding: utf-8
import setuptools
setuptools.setup(setup_requires=['pbr'], pbr=True)
| [
"setuptools.setup"
] | [((35, 85), 'setuptools.setup', 'setuptools.setup', ([], {'setup_requires': "['pbr']", 'pbr': '(True)'}), "(setup_requires=['pbr'], pbr=True)\n", (51, 85), False, 'import setuptools\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pprint
import vpp
from flask import Flask, request, jsonify, json, url_for, abort, Response
from flask import redirect, render_template
app = Flask(__name__)
app.debug = True
pp = pprint.PrettyPrinter(indent=2)
'''
comment
'''
v = vpp.vpp_ctl()
@app.route('/vpp_version')
def get_vpp_version():
ver = v.get_version()
return ver
@app.route('/status')
def get_status():
return json.dumps({'error': 'OK'})
# see 00_notebook/home_backup/src/CNI_script/server.py
# request.form['scripts'] // for POST
# request.args.get('') // for GET
@app.route('/vtep', methods={'POST','GET'})
def add_vtep():
if request.method == "POST":
srcip = request.json['src']
dstip = request.json['dst']
vni = int(request.json['vni'])
rv = v.create_vxlan_vtep(srcip, dstip, vni)
if rv == None:
return json.dumps({'error': 'NG!'})
#print(rv)
return json.dumps({'error': 'OK', "sw_if_index": rv})
else: # case for GET
# vpp_ip, koko_ip, vxid
vxlan_list = v.get_vxlan_vtep()
return json.dumps({'vxlan': vxlan_list, 'error': 'OK'})
@app.route('/vtep/delete', methods={'POST'})
def delete_vtep():
srcip = request.json['src']
dstip = request.json['dst']
vni = int(request.json['vni'])
rv = v.delete_vxlan_vtep(srcip, dstip, vni)
if rv != None:
return json.dumps({'error': 'NG!'})
return json.dumps({'error': 'OK'})
@app.route('/vtep_ifindex/<vppip>/<hostip>/<vnid>', methods={'GET'})
def get_ifindex(vppip, hostip, vnid):
ifindex = v.get_vxlan_ifindex(vppip, hostip, int(vnid))
if ifindex == None:
return json.dumps({'error': 'Not found'})
return json.dumps({'error': 'OK', 'ifindex':ifindex})
@app.route('/vtep_connect', methods={'POST','GET'})
def xconnect():
if request.method == "POST":
# vxid1, vxid2
sw_if_index1 = int(request.json['sw_if_index1'])
sw_if_index2 = int(request.json['sw_if_index2'])
rv = v.add_xconnect_idx(sw_if_index1, sw_if_index2)
if rv != None:
return json.dumps({'error': 'NG'})
return json.dumps({'error': 'OK'})
else: # case for GET
# vpp_ip, koko_ip, vxid
xconnect_list = v.get_xconnect()
return json.dumps({'xconnect': xconnect_list, 'error': 'OK'})
@app.route('/vtep_connect/delete', methods={'POST'})
def delete_xconnect():
# vxid1, vxid2
print("DELETE:" + request.json['sw_if_index1'] + '/' + request.json['sw_if_index2'])
sw_if_index1 = int(request.json['sw_if_index1'])
sw_if_index2 = int(request.json['sw_if_index2'])
rv = v.del_xconnect_idx(sw_if_index1, sw_if_index2)
if rv != None:
return json.dumps({'error': 'NG'})
return json.dumps({'error': 'OK'})
if app.debug:
from werkzeug.debug import DebuggedApplication
app.wsgi_app = DebuggedApplication(app.wsgi_app, True)
# in case of gunicorn,
# gunicorn server:app --bind <ip>:5000
if __name__ == '__main__':
app.run()
| [
"vpp.vpp_ctl",
"flask.Flask",
"flask.json.dumps",
"werkzeug.debug.DebuggedApplication",
"pprint.PrettyPrinter"
] | [((197, 212), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((235, 265), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (255, 265), False, 'import pprint\n'), ((287, 300), 'vpp.vpp_ctl', 'vpp.vpp_ctl', ([], {}), '()\n', (298, 300), False, 'import vpp\n'), ((445, 472), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK'}"], {}), "({'error': 'OK'})\n", (455, 472), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((1467, 1494), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK'}"], {}), "({'error': 'OK'})\n", (1477, 1494), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((1748, 1795), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK', 'ifindex': ifindex}"], {}), "({'error': 'OK', 'ifindex': ifindex})\n", (1758, 1795), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2796, 2823), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK'}"], {}), "({'error': 'OK'})\n", (2806, 2823), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2909, 2948), 'werkzeug.debug.DebuggedApplication', 'DebuggedApplication', (['app.wsgi_app', '(True)'], {}), '(app.wsgi_app, True)\n', (2928, 2948), False, 'from werkzeug.debug import DebuggedApplication\n'), ((972, 1018), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK', 'sw_if_index': rv}"], {}), "({'error': 'OK', 'sw_if_index': rv})\n", (982, 1018), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((1131, 1179), 'flask.json.dumps', 'json.dumps', (["{'vxlan': vxlan_list, 'error': 'OK'}"], {}), "({'vxlan': vxlan_list, 'error': 'OK'})\n", (1141, 1179), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((1427, 1455), 'flask.json.dumps', 'json.dumps', (["{'error': 'NG!'}"], {}), "({'error': 'NG!'})\n", (1437, 1455), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((1702, 1736), 'flask.json.dumps', 'json.dumps', (["{'error': 'Not found'}"], {}), "({'error': 'Not found'})\n", (1712, 1736), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2179, 2206), 'flask.json.dumps', 'json.dumps', (["{'error': 'OK'}"], {}), "({'error': 'OK'})\n", (2189, 2206), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2321, 2375), 'flask.json.dumps', 'json.dumps', (["{'xconnect': xconnect_list, 'error': 'OK'}"], {}), "({'xconnect': xconnect_list, 'error': 'OK'})\n", (2331, 2375), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2757, 2784), 'flask.json.dumps', 'json.dumps', (["{'error': 'NG'}"], {}), "({'error': 'NG'})\n", (2767, 2784), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((909, 937), 'flask.json.dumps', 'json.dumps', (["{'error': 'NG!'}"], {}), "({'error': 'NG!'})\n", (919, 937), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n'), ((2136, 2163), 'flask.json.dumps', 'json.dumps', (["{'error': 'NG'}"], {}), "({'error': 'NG'})\n", (2146, 2163), False, 'from flask import Flask, request, jsonify, json, url_for, abort, Response\n')] |
import string
from math import gcd
char_num_map = {j: i for i, j in enumerate(string.ascii_lowercase + " ")}
num_char_map = {i: j for i, j in enumerate(string.ascii_lowercase + " ")}
def encrypt(text, key):
encrypted = ""
multiply_key = key[0]
addition_key = key[1]
text = text.lower()
for letter in text:
letter_key = char_num_map[letter]
multiplication = (letter_key * multiply_key) % vocab_size
addition = (multiplication + addition_key) % vocab_size
encrypted += num_char_map[addition]
return encrypted
def multiplicative_inverse(a, m):
if gcd(a, m) == 1:
a = a % m
for x in range(1, m):
if ((a * x) % m == 1):
return x
return 1
def additive_inverse(a, m):
return (-a + m) % m
def decrypt(text, key):
decrypted = ""
multiply_key = key[0]
addition_key = key[1]
multiply_key_inv = multiplicative_inverse(multiply_key, vocab_size)
addition_key_inv = additive_inverse(addition_key, vocab_size)
text = text.lower()
for letter in text:
letter_key = char_num_map[letter]
addition = (letter_key + addition_key_inv) % vocab_size
multiplication = (addition * multiply_key_inv) % vocab_size
decrypted += num_char_map[multiplication]
return decrypted
if __name__ == "__main__":
vocab_size = len(string.ascii_lowercase + " ")
message = input("Enter text: ")
encrypted_message = encrypt(message, (7, 2))
decrypted_message = decrypt(encrypted_message, (7, 2))
print(encrypted_message)
print(decrypted_message)
| [
"math.gcd"
] | [((610, 619), 'math.gcd', 'gcd', (['a', 'm'], {}), '(a, m)\n', (613, 619), False, 'from math import gcd\n')] |
from dplaapi import models
def test_db_can_be_instantiated():
assert models.db
def test_account_can_be_instantiated():
account = models.Account(
key='<KEY>',
email='<EMAIL>')
assert account
| [
"dplaapi.models.Account"
] | [((142, 186), 'dplaapi.models.Account', 'models.Account', ([], {'key': '"""<KEY>"""', 'email': '"""<EMAIL>"""'}), "(key='<KEY>', email='<EMAIL>')\n", (156, 186), False, 'from dplaapi import models\n')] |
import calendar
import datetime
import os
import shutil
import rasterio
import rasterstats
import netCDF4
import numpy
import pandas
import statistics
from .options import app_settings, gldas_variables, gfs_variables
from .app import Earthobserver as App
def newchart(data):
"""
Determines the environment for generating a timeseries chart
:param data: a JSON object with params from the UI/API call
:return:
"""
# input parameters
var = str(data['variable'])
model = data['model']
loc_type = data['loc_type']
# environment settings
configs = app_settings()
path = configs['threddsdatadir']
timestamp = configs['timestamp']
# list the netcdfs to be processed
if model == 'gldas':
path = os.path.join(path, 'gldas', 'raw')
allfiles = os.listdir(path)
files = [nc for nc in allfiles if nc.endswith('.nc4')]
if data['time'] != 'alltimes':
files = [i for i in files if data['time'] in i]
variables = gldas_variables()
for key in variables:
if variables[key] == data['variable']:
name = key
name = name
break
if model == 'gfs':
path = os.path.join(path, 'gfs', timestamp, 'netcdfs')
allfiles = os.listdir(path)
files = [nc for nc in allfiles if nc.startswith(data['level']) and nc.endswith('.nc')]
variables = gfs_variables()
for option in variables:
if option[1] == data['variable']:
name = option[0]
break
files.sort()
if loc_type == 'Point':
values, units = pointchart(var, data['coords'], path, files)
type = 'Values at a Point'
elif loc_type == 'Polygon':
values, units = polychart(var, data['coords'], path, files)
type = 'Averaged over a Polygon'
elif loc_type == 'Shapefile':
values, units = shpchart(var, path, files, data['region'], data['user'])
if data['region'] == 'customshape':
type = 'Average for user\'s shapefile'
else:
type = 'Average for ' + data['region']
values.sort(key=lambda tup: tup[0])
resp = {'values': values, 'units': units, 'variable': var, 'type': type, 'name': name}
# if model == 'gldas':
# resp['multiline'], resp['boxplot'], resp['categories'] = makestatplots(values, data['time'])
return resp
def pointchart(var, coords, path, files):
"""
Description: generates a timeseries for a given point and given variable defined by the user.
Arguments: A dictionary object from the AJAX-ed JSON object that contains coordinates and the variable name.
Author: <NAME>
Dependencies: netcdf4, numpy, datetime, os, calendar, app_settings (options)
Last Updated: Oct 11 2018
"""
# return items
values = []
# get a list of the lat/lon and units using a reference file
nc_obj = netCDF4.Dataset(os.path.join(path, files[0]), 'r')
nc_lons = nc_obj['lon'][:]
nc_lats = nc_obj['lat'][:]
units = nc_obj[var].__dict__['units']
# get the index number of the lat/lon for the point
lon_indx = (numpy.abs(nc_lons - int(coords[0]))).argmin()
lat_indx = (numpy.abs(nc_lats - int(coords[1]))).argmin()
nc_obj.close()
# extract values at each timestep
for nc in files:
# get the time value for each file
nc_obj = netCDF4.Dataset(path + '/' + nc, 'r')
t_val = nc_obj['time'].__dict__['begin_date']
try:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d")
except ValueError:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d%H")
time = calendar.timegm(t_val.utctimetuple()) * 1000
# slice the array at the area you want
val = float(nc_obj[var][0, lat_indx, lon_indx].data)
values.append((time, val))
nc_obj.close()
return values, units
def polychart(var, coords, path, files):
"""
Description: generates a timeseries for a given point and given variable defined by the user.
Arguments: A dictionary object from the AJAX-ed JSON object that contains coordinates and the variable name.
Author: <NAME>
Dependencies: netcdf4, numpy, datetime, os, calendar, app_settings (options)
Last Updated: May 14 2019
"""
# return items
values = []
# get a list of the latitudes and longitudes and the units
nc_obj = netCDF4.Dataset(os.path.join(path, str(files[0])), 'r')
nc_lons = nc_obj['lon'][:]
nc_lats = nc_obj['lat'][:]
units = nc_obj[var].__dict__['units']
# get a bounding box of the rectangle in terms of the index number of their lat/lons
minlon = (numpy.abs(nc_lons - int(coords[0][1][0]))).argmin()
maxlon = (numpy.abs(nc_lons - int(coords[0][3][0]))).argmin()
maxlat = (numpy.abs(nc_lats - int(coords[0][1][1]))).argmin()
minlat = (numpy.abs(nc_lats - int(coords[0][3][1]))).argmin()
nc_obj.close()
# extract values at each timestep
for nc in files:
# set the time value for each file
nc_obj = netCDF4.Dataset(path + '/' + nc, 'r')
t_val = nc_obj['time'].__dict__['begin_date']
try:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d")
except ValueError:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d%H")
time = calendar.timegm(t_val.utctimetuple()) * 1000
# slice the array, drop nan values, get the mean, append to list of values
array = nc_obj[var][0, minlat:maxlat, minlon:maxlon].data
array[array < -5000] = numpy.nan # If you have fill values, change the comparator to git rid of it
array = array.flatten()
array = array[~numpy.isnan(array)]
values.append((time, float(array.mean())))
nc_obj.close()
return values, units
def shpchart(var, path, files, region, user):
"""
Description: This script accepts a netcdf file in a geographic coordinate system, specifically the NASA GLDAS
netcdfs, and extracts the data from one variable and the lat/lon steps to create a geotiff of that information.
Dependencies: netCDF4, numpy, rasterio, rasterstats, os, shutil, calendar, datetime, app_settings (options)
Params: View README.md
Returns: Creates a geotiff named 'geotiff.tif' in the directory specified
Author: <NAME>, RCH Engineering, March 2019
"""
# return items
values = []
# Remove old geotiffs before filling it
wrkpath = App.get_app_workspace().path
geotiffdir = os.path.join(wrkpath, 'geotiffs')
if os.path.isdir(geotiffdir):
shutil.rmtree(geotiffdir)
os.mkdir(geotiffdir)
# open the netcdf and get metadata
nc_obj = netCDF4.Dataset(os.path.join(path, files[0]), 'r')
lat = nc_obj.variables['lat'][:]
lon = nc_obj.variables['lon'][:]
units = nc_obj[var].__dict__['units']
geotransform = rasterio.transform.from_origin(lon.min(), lat.max(), lat[1] - lat[0], lon[1] - lon[0])
nc_obj.close()
# read netcdf, create geotiff, zonal statistics, format outputs for highcharts plotting
for file in files:
# open the netcdf and get the data array
nc_obj = netCDF4.Dataset(os.path.join(path, file), 'r')
var_data = nc_obj.variables[var][:] # this is the array of values for the nc_obj
array = numpy.asarray(var_data)[0, :, :] # converting the data type
array[array < -9000] = numpy.nan # use the comparator to drop nodata fills
array = array[::-1] # vertically flip array so tiff orientation is right (you just have to, try it)
# create the timesteps for the highcharts plot
t_val = nc_obj['time'].__dict__['begin_date']
try:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d")
except ValueError:
t_val = datetime.datetime.strptime(t_val, "%Y%m%d%H")
time = calendar.timegm(t_val.utctimetuple()) * 1000
# file paths and settings
if region == 'customshape':
shppath = App.get_user_workspace(user).path
shp = [i for i in os.listdir(shppath) if i.endswith('.shp')]
shppath = os.path.join(shppath, shp[0])
else:
shppath = os.path.join(wrkpath, 'shapefiles', region, region.replace(' ', '') + '.shp')
gtiffpath = os.path.join(wrkpath, 'geotiffs', 'geotiff.tif')
with rasterio.open(gtiffpath, 'w', driver='GTiff', height=len(lat), width=len(lon), count=1, dtype='float32',
nodata=numpy.nan, crs='+proj=latlong', transform=geotransform) as newtiff:
newtiff.write(array, 1) # data, band number
stats = rasterstats.zonal_stats(shppath, gtiffpath, stats="mean")
values.append((time, stats[0]['mean']))
if os.path.isdir(geotiffdir):
shutil.rmtree(geotiffdir)
return values, units
def makestatplots(values, time):
"""
Calculates statistics for the array of timeseries values and returns arrays for a highcharts boxplot
Dependencies: statistics, pandas, datetime, calendar
"""
df = pandas.DataFrame(values, columns=['dates', 'values'])
multiline = {'yearmulti': {'min': [], 'max': [], 'mean': []},
'monthmulti': {'min': [], 'max': [], 'mean': []}}
boxplot = {'yearbox': [], 'monthbox': []}
months = dict((n, m) for n, m in enumerate(calendar.month_name))
numyears = int(datetime.datetime.now().strftime("%Y")) - 1999 # not 2000 because we include that year
categories = {'month': [months[i + 1] for i in range(12)], 'year': [i + 2000 for i in range(numyears)]}
if time == 'alltimes':
for i in range(1, 13): # static 13 to go to years
tmp = df[int(df['dates'][-2]) == i]['values']
std = statistics.stdev(tmp)
ymin = min(tmp)
ymax = max(tmp)
mean = sum(tmp) / len(tmp)
boxplot['monthbox'].append([months[i], ymin, mean - std, mean, mean + std, ymax])
multiline['monthmulti']['min'].append((months[i], ymin))
multiline['monthmulti']['mean'].append((months[i], mean))
multiline['monthmulti']['max'].append((months[i], ymax))
for i in range(numyears):
tmp = df[int(df['dates'][0:3]) == i + 2000]['values']
std = statistics.stdev(tmp)
ymin = min(tmp)
ymax = max(tmp)
mean = sum(tmp) / len(tmp)
boxplot['yearbox'].append([i, ymin, mean - std, mean, mean + std, ymax])
multiline['yearmulti']['min'].append((i + 2000, ymin))
multiline['yearmulti']['mean'].append((i + 2000, mean))
multiline['yearmulti']['max'].append((i + 2000, ymax))
return multiline, boxplot, categories
| [
"statistics.stdev",
"os.listdir",
"datetime.datetime.strptime",
"netCDF4.Dataset",
"os.path.join",
"numpy.asarray",
"datetime.datetime.now",
"os.path.isdir",
"numpy.isnan",
"os.mkdir",
"shutil.rmtree",
"pandas.DataFrame",
"rasterstats.zonal_stats"
] | [((6550, 6583), 'os.path.join', 'os.path.join', (['wrkpath', '"""geotiffs"""'], {}), "(wrkpath, 'geotiffs')\n", (6562, 6583), False, 'import os\n'), ((6591, 6616), 'os.path.isdir', 'os.path.isdir', (['geotiffdir'], {}), '(geotiffdir)\n', (6604, 6616), False, 'import os\n'), ((6656, 6676), 'os.mkdir', 'os.mkdir', (['geotiffdir'], {}), '(geotiffdir)\n', (6664, 6676), False, 'import os\n'), ((8795, 8820), 'os.path.isdir', 'os.path.isdir', (['geotiffdir'], {}), '(geotiffdir)\n', (8808, 8820), False, 'import os\n'), ((9104, 9157), 'pandas.DataFrame', 'pandas.DataFrame', (['values'], {'columns': "['dates', 'values']"}), "(values, columns=['dates', 'values'])\n", (9120, 9157), False, 'import pandas\n'), ((761, 795), 'os.path.join', 'os.path.join', (['path', '"""gldas"""', '"""raw"""'], {}), "(path, 'gldas', 'raw')\n", (773, 795), False, 'import os\n'), ((815, 831), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (825, 831), False, 'import os\n'), ((1230, 1277), 'os.path.join', 'os.path.join', (['path', '"""gfs"""', 'timestamp', '"""netcdfs"""'], {}), "(path, 'gfs', timestamp, 'netcdfs')\n", (1242, 1277), False, 'import os\n'), ((1297, 1313), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1307, 1313), False, 'import os\n'), ((2957, 2985), 'os.path.join', 'os.path.join', (['path', 'files[0]'], {}), '(path, files[0])\n', (2969, 2985), False, 'import os\n'), ((3415, 3452), 'netCDF4.Dataset', 'netCDF4.Dataset', (["(path + '/' + nc)", '"""r"""'], {}), "(path + '/' + nc, 'r')\n", (3430, 3452), False, 'import netCDF4\n'), ((5093, 5130), 'netCDF4.Dataset', 'netCDF4.Dataset', (["(path + '/' + nc)", '"""r"""'], {}), "(path + '/' + nc, 'r')\n", (5108, 5130), False, 'import netCDF4\n'), ((6626, 6651), 'shutil.rmtree', 'shutil.rmtree', (['geotiffdir'], {}), '(geotiffdir)\n', (6639, 6651), False, 'import shutil\n'), ((6746, 6774), 'os.path.join', 'os.path.join', (['path', 'files[0]'], {}), '(path, files[0])\n', (6758, 6774), False, 'import os\n'), ((8337, 8385), 'os.path.join', 'os.path.join', (['wrkpath', '"""geotiffs"""', '"""geotiff.tif"""'], {}), "(wrkpath, 'geotiffs', 'geotiff.tif')\n", (8349, 8385), False, 'import os\n'), ((8681, 8738), 'rasterstats.zonal_stats', 'rasterstats.zonal_stats', (['shppath', 'gtiffpath'], {'stats': '"""mean"""'}), "(shppath, gtiffpath, stats='mean')\n", (8704, 8738), False, 'import rasterstats\n'), ((8830, 8855), 'shutil.rmtree', 'shutil.rmtree', (['geotiffdir'], {}), '(geotiffdir)\n', (8843, 8855), False, 'import shutil\n'), ((3540, 3583), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d"""'], {}), "(t_val, '%Y%m%d')\n", (3566, 3583), False, 'import datetime\n'), ((5218, 5261), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d"""'], {}), "(t_val, '%Y%m%d')\n", (5244, 5261), False, 'import datetime\n'), ((7220, 7244), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (7232, 7244), False, 'import os\n'), ((7357, 7380), 'numpy.asarray', 'numpy.asarray', (['var_data'], {}), '(var_data)\n', (7370, 7380), False, 'import numpy\n'), ((7754, 7797), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d"""'], {}), "(t_val, '%Y%m%d')\n", (7780, 7797), False, 'import datetime\n'), ((8173, 8202), 'os.path.join', 'os.path.join', (['shppath', 'shp[0]'], {}), '(shppath, shp[0])\n', (8185, 8202), False, 'import os\n'), ((9785, 9806), 'statistics.stdev', 'statistics.stdev', (['tmp'], {}), '(tmp)\n', (9801, 9806), False, 'import statistics\n'), ((10322, 10343), 'statistics.stdev', 'statistics.stdev', (['tmp'], {}), '(tmp)\n', (10338, 10343), False, 'import statistics\n'), ((3631, 3676), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d%H"""'], {}), "(t_val, '%Y%m%d%H')\n", (3657, 3676), False, 'import datetime\n'), ((5309, 5354), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d%H"""'], {}), "(t_val, '%Y%m%d%H')\n", (5335, 5354), False, 'import datetime\n'), ((5727, 5745), 'numpy.isnan', 'numpy.isnan', (['array'], {}), '(array)\n', (5738, 5745), False, 'import numpy\n'), ((7845, 7890), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['t_val', '"""%Y%m%d%H"""'], {}), "(t_val, '%Y%m%d%H')\n", (7871, 7890), False, 'import datetime\n'), ((8108, 8127), 'os.listdir', 'os.listdir', (['shppath'], {}), '(shppath)\n', (8118, 8127), False, 'import os\n'), ((9426, 9449), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9447, 9449), False, 'import datetime\n')] |
from pathlib import Path
data_path = Path(__file__).parent
states_data = data_path / "states.geojson"
power_plants_data = data_path / "power_plants.geojson"
| [
"pathlib.Path"
] | [((38, 52), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (42, 52), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ <NAME> + <NAME>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# beziercurve.py
#
from pagebot.constants import ORIGIN
from pagebot.elements.element import Element
from pagebot.toolbox.color import noColor
class BezierCurve(Element):
"""Implements a (Base)BezierPath as an element.
TODO: isOpenLine?
TODO: what about contours (supported by BaseBezierPath)?
TODO: what about components (currently not implemented by BaseBezierPath)?
"""
def __init__(self, points=None, closed=True, **kwargs):
if points is None:
points = []
# Force copy, so calling function can't change size cache.
self.points = points[:]
self.closed = closed
Element.__init__(self, **kwargs)
def translatePoint(self, p0, p):
x0, y0, _ = p0
x, y = p
return x0 + x, y0 + y
def build(self, view, origin=ORIGIN, **kwargs):
pOrigin = self.getPosition(view, origin)
self.buildFrame(view, pOrigin) # Draw optional frame or borders.
view.drawElementFrame(self, pOrigin)
self.context.newPath()
self.context.fill(self.css('fill', noColor))
self.context.stroke(self.css('stroke', noColor), self.css('strokeWidth'))
p0 = self.translatePoint(pOrigin, self.points[0])
self.context.moveTo(p0)
for point in self.points[1:]:
if len(point) == 2:
p = self.translatePoint(pOrigin, point)
self.context.lineTo(p)
elif len(point) == 3:
cp0 = self.translatePoint(pOrigin, point[0])
cp1 = self.translatePoint(pOrigin, point[1])
p = self.translatePoint(pOrigin, point[2])
self.context.curveTo(cp0, cp1, p)
if self.closed:
self.context.closePath()
self.context.drawPath()
self.buildChildElements(view, pOrigin, **kwargs)
self.restore(view, pOrigin)
self.drawMeta(view, origin)
| [
"pagebot.elements.element.Element.__init__"
] | [((1063, 1095), 'pagebot.elements.element.Element.__init__', 'Element.__init__', (['self'], {}), '(self, **kwargs)\n', (1079, 1095), False, 'from pagebot.elements.element import Element\n')] |
import logging
import sys
from mealie.core.config import DATA_DIR
LOGGER_FILE = DATA_DIR.joinpath("mealie.log")
DATE_FORMAT = "%d-%b-%y %H:%M:%S"
LOGGER_FORMAT = "%(levelname)s: %(asctime)s \t%(message)s"
logging.basicConfig(level=logging.INFO, format=LOGGER_FORMAT, datefmt="%d-%b-%y %H:%M:%S")
def logger_init() -> logging.Logger:
""" Returns the Root Loggin Object for Mealie """
logger = logging.getLogger("mealie")
logger.propagate = False
# File Handler
output_file_handler = logging.FileHandler(LOGGER_FILE)
handler_format = logging.Formatter(LOGGER_FORMAT, datefmt=DATE_FORMAT)
output_file_handler.setFormatter(handler_format)
# Stdout
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(handler_format)
logger.addHandler(output_file_handler)
logger.addHandler(stdout_handler)
return logger
root_logger = logger_init()
def get_logger(module=None) -> logging.Logger:
""" Returns a child logger for mealie """
global root_logger
if module is None:
return root_logger
return root_logger.getChild(module)
| [
"logging.basicConfig",
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"mealie.core.config.DATA_DIR.joinpath",
"logging.FileHandler"
] | [((82, 113), 'mealie.core.config.DATA_DIR.joinpath', 'DATA_DIR.joinpath', (['"""mealie.log"""'], {}), "('mealie.log')\n", (99, 113), False, 'from mealie.core.config import DATA_DIR\n'), ((208, 303), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'LOGGER_FORMAT', 'datefmt': '"""%d-%b-%y %H:%M:%S"""'}), "(level=logging.INFO, format=LOGGER_FORMAT, datefmt=\n '%d-%b-%y %H:%M:%S')\n", (227, 303), False, 'import logging\n'), ((405, 432), 'logging.getLogger', 'logging.getLogger', (['"""mealie"""'], {}), "('mealie')\n", (422, 432), False, 'import logging\n'), ((508, 540), 'logging.FileHandler', 'logging.FileHandler', (['LOGGER_FILE'], {}), '(LOGGER_FILE)\n', (527, 540), False, 'import logging\n'), ((562, 615), 'logging.Formatter', 'logging.Formatter', (['LOGGER_FORMAT'], {'datefmt': 'DATE_FORMAT'}), '(LOGGER_FORMAT, datefmt=DATE_FORMAT)\n', (579, 615), False, 'import logging\n'), ((704, 737), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (725, 737), False, 'import logging\n')] |
#
# This is the Robotics Language compiler
#
# Parse.py: Parses the language
#
# Created on: 11 February, 2019
# Author: <NAME>
# Licence: Apache 2.0
# Copyright: 2014-2017 Robot Care Systems BV, The Hague, The Netherlands. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import copy
from jinja2 import Template
from RoboticsLanguage.Base import Utilities
from RoboticsLanguage.Inputs.RoL import Parse
def getType(value):
mapping = {'str': 'Strings', 'int': 'Integers', 'float': 'Reals', 'bool': 'Booleans'}
return mapping[filter(lambda x: type(value) is x, [bool, int, float, str])[0].__name__]
def convertParameters(input):
output = copy.copy(input)
# extract parameters
output['parameters'] = []
for key, value in input['parameters'].iteritems():
type = getType(value)
if type == 'Strings':
value = '"' + value + '"'
output['parameters'].append({'name': key, 'type': getType(value), 'value': value})
# extract topics
output['topics'] = []
for key, value in input['topics'].iteritems():
[name, type] = value.split(' ')
output['topics'].append({'variable': key, 'type': type, 'name': name})
return output
def parse(text, parameters):
# parse JSON into dictionary
text_dictionary = yaml.safe_load(text)
# convert into more descriptive dictionary
discriptive_dictionary = convertParameters(text_dictionary)
# save the data in the parameters to be used by the GUI
parameters['Inputs']['FaultDetectionTopics']['data'] = discriptive_dictionary
# open template file
with open(Utilities.myPluginPath(parameters) + '/Support/fault_detection_topic.rol.template', 'r') as file:
template = Template(file.read())
# render the template with the data
rol_code = template.render(**discriptive_dictionary)
# print intermediate rol code is requested
if parameters['Inputs']['FaultDetectionTopics']['showRol']:
Utilities.printSource(rol_code, 'coffeescript', parameters)
if parameters['Inputs']['FaultDetectionTopics']['showYAML']:
Utilities.printParameters(discriptive_dictionary, parameters)
# parse generated rol code
code, parameters = Parse.parse(rol_code, parameters)
# add fault detection gui to the outputs
outputs = Utilities.ensureList(parameters['globals']['output'])
outputs.append('FaultDetectionTopics')
parameters['globals']['output'] = outputs
return code, parameters
| [
"RoboticsLanguage.Inputs.RoL.Parse.parse",
"RoboticsLanguage.Base.Utilities.printParameters",
"RoboticsLanguage.Base.Utilities.ensureList",
"RoboticsLanguage.Base.Utilities.printSource",
"yaml.safe_load",
"RoboticsLanguage.Base.Utilities.myPluginPath",
"copy.copy"
] | [((1217, 1233), 'copy.copy', 'copy.copy', (['input'], {}), '(input)\n', (1226, 1233), False, 'import copy\n'), ((1814, 1834), 'yaml.safe_load', 'yaml.safe_load', (['text'], {}), '(text)\n', (1828, 1834), False, 'import yaml\n'), ((2700, 2733), 'RoboticsLanguage.Inputs.RoL.Parse.parse', 'Parse.parse', (['rol_code', 'parameters'], {}), '(rol_code, parameters)\n', (2711, 2733), False, 'from RoboticsLanguage.Inputs.RoL import Parse\n'), ((2790, 2843), 'RoboticsLanguage.Base.Utilities.ensureList', 'Utilities.ensureList', (["parameters['globals']['output']"], {}), "(parameters['globals']['output'])\n", (2810, 2843), False, 'from RoboticsLanguage.Base import Utilities\n'), ((2459, 2518), 'RoboticsLanguage.Base.Utilities.printSource', 'Utilities.printSource', (['rol_code', '"""coffeescript"""', 'parameters'], {}), "(rol_code, 'coffeescript', parameters)\n", (2480, 2518), False, 'from RoboticsLanguage.Base import Utilities\n'), ((2587, 2648), 'RoboticsLanguage.Base.Utilities.printParameters', 'Utilities.printParameters', (['discriptive_dictionary', 'parameters'], {}), '(discriptive_dictionary, parameters)\n', (2612, 2648), False, 'from RoboticsLanguage.Base import Utilities\n'), ((2118, 2152), 'RoboticsLanguage.Base.Utilities.myPluginPath', 'Utilities.myPluginPath', (['parameters'], {}), '(parameters)\n', (2140, 2152), False, 'from RoboticsLanguage.Base import Utilities\n')] |
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.validators import ASCIIUsernameValidator
from django.db import models
from django.db.models import Sum, Count, FloatField
from django.db.models.functions import Coalesce, Greatest
class User(AbstractUser):
username_validator = ASCIIUsernameValidator
REQUIRED_FIELDS = []
courses = models.ManyToManyField(
'Course',
related_name='users',
through='UserCourseRelationship',
)
SERIALIZED_FIELDS = (
'id',
'username',
'password',
'first_name',
'last_name',
)
class Meta:
ordering = ('-id',)
class Course(models.Model):
name = models.CharField(max_length=255, null=False, blank=False)
description = models.TextField(null=True, blank=True)
reward = models.CharField(max_length=255, null=False, blank=False)
is_finished = models.BooleanField(default=False)
SERIALIZED_FIELDS = (
'id',
'name',
'description',
'reward',
'is_finished',
)
def get_user_grade(self, user):
rel = self.uc_rels.filter(user=user).prefetch_related('grades').first()
if not rel:
return -1
grade = rel.grades.aggregate(
grade=Coalesce(Sum('value'), 1) / Greatest(Coalesce(Count('*', output_field=FloatField()), 0), 0.1),
)['grade']
return grade
class Meta:
ordering = ('-id',)
class Grade(models.Model):
value = models.FloatField(null=False, blank=False)
rel = models.ForeignKey('UserCourseRelationship', related_name='grades', on_delete=models.CASCADE)
comment = models.CharField(max_length=255, null=False, blank=False)
SERIALIZED_FIELDS = (
'id',
'value',
'rel',
'comment',
)
class UserCourseRelationship(models.Model):
user = models.ForeignKey('User', related_name='uc_rels', on_delete=models.CASCADE, blank=True)
course = models.ForeignKey('Course', related_name='uc_rels', on_delete=models.CASCADE)
level = models.CharField(
max_length=1,
choices=(
('P', 'Pupil'),
('T', 'Teacher'),
),
default='P',
)
SERIALIZED_FIELDS = (
'id',
'user',
'course',
'level',
)
class Meta:
unique_together = ('user', 'course')
| [
"django.db.models.Sum",
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.CharField"
] | [((374, 467), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Course"""'], {'related_name': '"""users"""', 'through': '"""UserCourseRelationship"""'}), "('Course', related_name='users', through=\n 'UserCourseRelationship')\n", (396, 467), False, 'from django.db import models\n'), ((710, 767), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(False)', 'blank': '(False)'}), '(max_length=255, null=False, blank=False)\n', (726, 767), False, 'from django.db import models\n'), ((786, 825), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (802, 825), False, 'from django.db import models\n'), ((839, 896), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(False)', 'blank': '(False)'}), '(max_length=255, null=False, blank=False)\n', (855, 896), False, 'from django.db import models\n'), ((915, 949), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (934, 949), False, 'from django.db import models\n'), ((1515, 1557), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(False)', 'blank': '(False)'}), '(null=False, blank=False)\n', (1532, 1557), False, 'from django.db import models\n'), ((1568, 1664), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""UserCourseRelationship"""'], {'related_name': '"""grades"""', 'on_delete': 'models.CASCADE'}), "('UserCourseRelationship', related_name='grades',\n on_delete=models.CASCADE)\n", (1585, 1664), False, 'from django.db import models\n'), ((1675, 1732), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(False)', 'blank': '(False)'}), '(max_length=255, null=False, blank=False)\n', (1691, 1732), False, 'from django.db import models\n'), ((1888, 1979), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'related_name': '"""uc_rels"""', 'on_delete': 'models.CASCADE', 'blank': '(True)'}), "('User', related_name='uc_rels', on_delete=models.CASCADE,\n blank=True)\n", (1905, 1979), False, 'from django.db import models\n'), ((1989, 2066), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Course"""'], {'related_name': '"""uc_rels"""', 'on_delete': 'models.CASCADE'}), "('Course', related_name='uc_rels', on_delete=models.CASCADE)\n", (2006, 2066), False, 'from django.db import models\n'), ((2080, 2171), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': "(('P', 'Pupil'), ('T', 'Teacher'))", 'default': '"""P"""'}), "(max_length=1, choices=(('P', 'Pupil'), ('T', 'Teacher')),\n default='P')\n", (2096, 2171), False, 'from django.db import models\n'), ((1302, 1314), 'django.db.models.Sum', 'Sum', (['"""value"""'], {}), "('value')\n", (1305, 1314), False, 'from django.db.models import Sum, Count, FloatField\n'), ((1363, 1375), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (1373, 1375), False, 'from django.db.models import Sum, Count, FloatField\n')] |
"""
main.py: Script to invoke screen fitting algorithm
Copyright (c) 2022, SKAO / Science Data Processor
SPDX-License-Identifier: BSD-3-Clause
"""
import argparse
from ska_sdp_screen_fitting.make_aterm_images import make_aterm_image
def start():
"""
This is the entry point for the executable
"""
description_text = "Make a-term images from solutions.\n"
parser = argparse.ArgumentParser(
description=description_text,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("h5parmfile", help="Filename of input H5parm")
parser.add_argument(
"--soltabname", help="Name of soltab", type=str, default="phase000"
)
parser.add_argument(
"--screen_type",
help="Type of screen",
type=str,
default="tessellated",
)
parser.add_argument(
"--outroot", help="Root of output images", type=str, default=""
)
parser.add_argument(
"--bounds_deg", help="Bounds list in deg", type=str, default=None
)
parser.add_argument(
"--bounds_mid_deg",
help="Bounds mid list in deg",
type=str,
default=None,
)
parser.add_argument(
"--skymodel", help="Filename of sky model", type=str, default=None
)
parser.add_argument(
"--solsetname", help="Solset name", type=str, default="sol000"
)
parser.add_argument(
"--padding_fraction", help="Padding fraction", type=float, default=1.4
)
parser.add_argument(
"--cellsize_deg", help="Cell size in deg", type=float, default=0.2
)
parser.add_argument(
"--smooth_deg", help="Smooth scale in degree", type=float, default=0.0
)
parser.add_argument(
"--ncpu", help="Number of CPUs to use", type=int, default=0
)
args = parser.parse_args()
make_aterm_image(
args.h5parmfile,
soltabname=args.soltabname,
screen_type=args.screen_type,
outroot=args.outroot,
bounds_deg=args.bounds_deg,
bounds_mid_deg=args.bounds_mid_deg,
skymodel=args.skymodel,
solsetname=args.solsetname,
padding_fraction=args.padding_fraction,
cellsize_deg=args.cellsize_deg,
smooth_deg=args.smooth_deg,
ncpu=args.ncpu,
)
| [
"ska_sdp_screen_fitting.make_aterm_images.make_aterm_image",
"argparse.ArgumentParser"
] | [((403, 508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description_text', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=description_text, formatter_class=\n argparse.RawTextHelpFormatter)\n', (426, 508), False, 'import argparse\n'), ((1855, 2219), 'ska_sdp_screen_fitting.make_aterm_images.make_aterm_image', 'make_aterm_image', (['args.h5parmfile'], {'soltabname': 'args.soltabname', 'screen_type': 'args.screen_type', 'outroot': 'args.outroot', 'bounds_deg': 'args.bounds_deg', 'bounds_mid_deg': 'args.bounds_mid_deg', 'skymodel': 'args.skymodel', 'solsetname': 'args.solsetname', 'padding_fraction': 'args.padding_fraction', 'cellsize_deg': 'args.cellsize_deg', 'smooth_deg': 'args.smooth_deg', 'ncpu': 'args.ncpu'}), '(args.h5parmfile, soltabname=args.soltabname, screen_type=\n args.screen_type, outroot=args.outroot, bounds_deg=args.bounds_deg,\n bounds_mid_deg=args.bounds_mid_deg, skymodel=args.skymodel, solsetname=\n args.solsetname, padding_fraction=args.padding_fraction, cellsize_deg=\n args.cellsize_deg, smooth_deg=args.smooth_deg, ncpu=args.ncpu)\n', (1871, 2219), False, 'from ska_sdp_screen_fitting.make_aterm_images import make_aterm_image\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by <NAME> and <NAME>
# email: <EMAIL> and <EMAIL>
import torch
import numpy as np
import torch.nn.functional as F
from copy import deepcopy
# Prioritized Path Board
class PrioritizedBoard():
def __init__(self, cfg, CHOICE_NUM=6, sta_num=(4, 4, 4, 4, 4), acc_gap=5):
self.cfg = cfg
self.prioritized_board = []
self.choice_num = CHOICE_NUM
self.sta_num = sta_num
self.acc_gap = acc_gap
# select teacher from prioritized board
def select_teacher(self, model, random_cand):
if self.cfg.SUPERNET.PICK_METHOD == 'top1':
meta_value, teacher_cand = 0.5, sorted(
self.prioritized_board, reverse=True)[0][3]
elif self.cfg.SUPERNET.PICK_METHOD == 'meta':
meta_value, cand_idx, teacher_cand = -1000000000, -1, None
for now_idx, item in enumerate(self.prioritized_board):
inputx = item[4]
output = F.softmax(model(inputx, random_cand), dim=1)
weight = model.module.forward_meta(output - item[5])
if weight > meta_value:
meta_value = weight
cand_idx = now_idx
teacher_cand = self.prioritized_board[cand_idx][3]
assert teacher_cand is not None
meta_value = torch.sigmoid(-weight)
else:
raise ValueError('Method Not supported')
return meta_value, teacher_cand
def board_size(self):
return len(self.prioritized_board)
# get prob from config file
def get_prob(self):
if self.cfg.SUPERNET.HOW_TO_PROB == 'even' or (
self.cfg.SUPERNET.HOW_TO_PROB == 'teacher' and len(self.prioritized_board) == 0):
return None
elif self.cfg.SUPERNET.HOW_TO_PROB == 'pre_prob':
return self.cfg.SUPERNET.PRE_PROB
elif self.cfg.SUPERNET.HOW_TO_PROB == 'teacher':
op_dict = {}
for i in range(self.choice_num):
op_dict[i] = 0
for item in self.prioritized_board:
cand = item[3]
for block in cand:
for op in block:
op_dict[op] += 1
sum_op = 0
for i in range(self.choice_num):
sum_op = sum_op + op_dict[i]
prob = []
for i in range(self.choice_num):
prob.append(float(op_dict[i]) / sum_op)
del op_dict, sum_op
return prob
# sample random architecture
def get_cand_with_prob(self, prob=None):
if prob is None:
get_random_cand = [
np.random.choice(
self.choice_num,
item).tolist() for item in self.sta_num]
else:
get_random_cand = [
np.random.choice(
self.choice_num,
item,
prob).tolist() for item in self.sta_num]
return get_random_cand
def isUpdate(self, current_epoch, prec1, flops):
if current_epoch <= self.cfg.SUPERNET.META_STA_EPOCH:
return False
if len(self.prioritized_board) < self.cfg.SUPERNET.POOL_SIZE:
return True
if prec1 > self.prioritized_board[-1][1] + self.acc_gap:
return True
if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]:
return True
return False
def update_prioritized_board(self, inputs, teacher_output, outputs, current_epoch, prec1, flops, cand):
if self.isUpdate(current_epoch, prec1, flops):
val_prec1 = prec1
training_data = deepcopy(inputs[:self.cfg.SUPERNET.SLICE].detach())
if len(self.prioritized_board) == 0:
features = deepcopy(outputs[:self.cfg.SUPERNET.SLICE].detach())
else:
features = deepcopy(
teacher_output[:self.cfg.SUPERNET.SLICE].detach())
self.prioritized_board.append(
(val_prec1,
prec1,
flops,
cand,
training_data,
F.softmax(
features,
dim=1)))
self.prioritized_board = sorted(self.prioritized_board, reverse=True)
if len(self.prioritized_board) > self.cfg.SUPERNET.POOL_SIZE:
self.prioritized_board = sorted(self.prioritized_board, reverse=True)
del self.prioritized_board[-1]
| [
"numpy.random.choice",
"torch.sigmoid",
"torch.nn.functional.softmax"
] | [((1398, 1420), 'torch.sigmoid', 'torch.sigmoid', (['(-weight)'], {}), '(-weight)\n', (1411, 1420), False, 'import torch\n'), ((4273, 4299), 'torch.nn.functional.softmax', 'F.softmax', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (4282, 4299), True, 'import torch.nn.functional as F\n'), ((2735, 2774), 'numpy.random.choice', 'np.random.choice', (['self.choice_num', 'item'], {}), '(self.choice_num, item)\n', (2751, 2774), True, 'import numpy as np\n'), ((2913, 2958), 'numpy.random.choice', 'np.random.choice', (['self.choice_num', 'item', 'prob'], {}), '(self.choice_num, item, prob)\n', (2929, 2958), True, 'import numpy as np\n')] |
from sanic import response
from ..config import settings
def handle_cors(request, allowed_methods, allowed_headers):
if "ORIGIN" in request.headers:
origin = request.headers["ORIGIN"]
if origin in settings.ALLOWED_ORIGINS:
headers = {
"Access-Control-Allow-Methods": allowed_methods,
"Access-Control-Allow-Headers": allowed_headers,
}
return response.raw("", status=204, headers=headers)
else:
return response.raw("", status=204)
else:
return response.raw("", status=204)
| [
"sanic.response.raw"
] | [((566, 594), 'sanic.response.raw', 'response.raw', (['""""""'], {'status': '(204)'}), "('', status=204)\n", (578, 594), False, 'from sanic import response\n'), ((433, 478), 'sanic.response.raw', 'response.raw', (['""""""'], {'status': '(204)', 'headers': 'headers'}), "('', status=204, headers=headers)\n", (445, 478), False, 'from sanic import response\n'), ((512, 540), 'sanic.response.raw', 'response.raw', (['""""""'], {'status': '(204)'}), "('', status=204)\n", (524, 540), False, 'from sanic import response\n')] |
import torch
if __name__ == '__main__':
data = torch.load('checkpoints/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth',
map_location='cpu')
trained_sd = data['state_dict']
new_sd = {}
new_sd['stem.0.weight'] = trained_sd['backbone.conv1.weight']
new_sd['stem.1.weight'] = trained_sd['backbone.bn1.weight']
new_sd['stem.1.bias'] = trained_sd['backbone.bn1.weight']
new_sd['stem.1.running_mean'] = trained_sd['backbone.bn1.running_mean']
new_sd['stem.1.running_var'] = trained_sd['backbone.bn1.running_var']
new_sd['stem.1.num_batches_tracked'] = trained_sd['backbone.bn1.num_batches_tracked']
torch.save(new_sd, 'resnet50_stem.pth')
| [
"torch.load",
"torch.save"
] | [((52, 157), 'torch.load', 'torch.load', (['"""checkpoints/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth"""'], {'map_location': '"""cpu"""'}), "('checkpoints/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth'\n , map_location='cpu')\n", (62, 157), False, 'import torch\n'), ((654, 693), 'torch.save', 'torch.save', (['new_sd', '"""resnet50_stem.pth"""'], {}), "(new_sd, 'resnet50_stem.pth')\n", (664, 693), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
def sreluPlot(x):
if x > 0.8:
return 0.8 + 0.2 * (x - 0.8)
elif x > -0.8:
return x
else:
return -0.8 + 0.2 * (x + 0.8)
colors = ['b','g','r','c','m','y', 'k']
functions = {"relu": "ReLU", "sigmoid": "Sigmoid", "tanh": "Tanh", "selu": "SELU", "softsign": "Softsign", "softplus": "Softplus", "srelu": "SReLU"}
x_data = np.arange(-3, 3, 0.01)
relu = tf.keras.activations.relu(x_data)
sigmoid = tf.keras.activations.sigmoid(x_data)
tanh = tf.keras.activations.tanh(x_data)
selu = tf.keras.activations.selu(x_data)
softsign = tf.keras.activations.softsign(x_data)
softplus = tf.keras.activations.softplus(x_data)
srelu = []
for x in x_data:
srelu.append(sreluPlot(x))
plt.plot(x_data, relu.numpy(), color = 'b', label="ReLU")
plt.plot(x_data, sigmoid.numpy(), color = 'g', label="Sigmoid")
plt.plot(x_data, tanh.numpy(), color = 'r', label="Tanh")
plt.plot(x_data, selu.numpy(), color = 'c', label="SELU")
plt.plot(x_data, softsign.numpy(), color = 'm', label="Softsign")
plt.plot(x_data, softplus.numpy(), color = 'y', label="Softplus")
plt.plot(x_data, srelu, 'k', label="SReLU")
plt.legend(loc=2, prop={'size': 10})
plt.grid(True)
# plt.xlim([70, 100])
plt.tight_layout()
plt.show()
# plt.savefig("activation_functions.pdf")
# plt.close() | [
"matplotlib.pyplot.grid",
"tensorflow.keras.activations.selu",
"tensorflow.keras.activations.softsign",
"matplotlib.pyplot.legend",
"tensorflow.keras.activations.softplus",
"matplotlib.pyplot.plot",
"tensorflow.keras.activations.tanh",
"tensorflow.keras.activations.relu",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.activations.sigmoid",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((433, 455), 'numpy.arange', 'np.arange', (['(-3)', '(3)', '(0.01)'], {}), '(-3, 3, 0.01)\n', (442, 455), True, 'import numpy as np\n'), ((464, 497), 'tensorflow.keras.activations.relu', 'tf.keras.activations.relu', (['x_data'], {}), '(x_data)\n', (489, 497), True, 'import tensorflow as tf\n'), ((508, 544), 'tensorflow.keras.activations.sigmoid', 'tf.keras.activations.sigmoid', (['x_data'], {}), '(x_data)\n', (536, 544), True, 'import tensorflow as tf\n'), ((552, 585), 'tensorflow.keras.activations.tanh', 'tf.keras.activations.tanh', (['x_data'], {}), '(x_data)\n', (577, 585), True, 'import tensorflow as tf\n'), ((593, 626), 'tensorflow.keras.activations.selu', 'tf.keras.activations.selu', (['x_data'], {}), '(x_data)\n', (618, 626), True, 'import tensorflow as tf\n'), ((638, 675), 'tensorflow.keras.activations.softsign', 'tf.keras.activations.softsign', (['x_data'], {}), '(x_data)\n', (667, 675), True, 'import tensorflow as tf\n'), ((687, 724), 'tensorflow.keras.activations.softplus', 'tf.keras.activations.softplus', (['x_data'], {}), '(x_data)\n', (716, 724), True, 'import tensorflow as tf\n'), ((1156, 1199), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'srelu', '"""k"""'], {'label': '"""SReLU"""'}), "(x_data, srelu, 'k', label='SReLU')\n", (1164, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1237), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)', 'prop': "{'size': 10}"}), "(loc=2, prop={'size': 10})\n", (1211, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1252), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1246, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1293), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1291, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1303, 1305), True, 'import matplotlib.pyplot as plt\n')] |
# Generated by Django 3.1.7 on 2021-07-20 19:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('book_outlet', '0006_books_extra'),
]
operations = [
migrations.RemoveField(
model_name='books',
name='extra',
),
]
| [
"django.db.migrations.RemoveField"
] | [((224, 280), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""books"""', 'name': '"""extra"""'}), "(model_name='books', name='extra')\n", (246, 280), False, 'from django.db import migrations\n')] |
import json
import glob
import re
# remove the year from the end of the even name as we can add it automatically during display
# also we won't want it after we merge the event series
for filename in glob.glob('data/events/*.json'):
#print(filename)
with open(filename) as fh:
data = json.load(fh)
#print(data['name'])
data['name'] = re.sub(r'(\s*-)?\s*\d\d\d\d\s*$', '', data['name'])
#print(data['name'])
with open(filename, 'w') as fh:
json.dump(data, fh, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
| [
"json.load",
"re.sub",
"json.dump",
"glob.glob"
] | [((202, 233), 'glob.glob', 'glob.glob', (['"""data/events/*.json"""'], {}), "('data/events/*.json')\n", (211, 233), False, 'import glob\n'), ((360, 417), 're.sub', 're.sub', (['"""(\\\\s*-)?\\\\s*\\\\d\\\\d\\\\d\\\\d\\\\s*$"""', '""""""', "data['name']"], {}), "('(\\\\s*-)?\\\\s*\\\\d\\\\d\\\\d\\\\d\\\\s*$', '', data['name'])\n", (366, 417), False, 'import re\n'), ((302, 315), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (311, 315), False, 'import json\n'), ((481, 574), 'json.dump', 'json.dump', (['data', 'fh'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')", 'ensure_ascii': '(False)'}), "(data, fh, sort_keys=True, indent=4, separators=(',', ': '),\n ensure_ascii=False)\n", (490, 574), False, 'import json\n')] |
# Adapted for numpy/ma/cdms2 by convertcdms.py
# <NAME> Reproduction Package
from . import graphics
import vcs
import MV2
import numpy
class WKPlot(object):
def __init__(self, x=None, datawc_x1=-15, datawc_x2=15, datawc_y1=0, datawc_y2=.8, min=-1.4,
max=2., delta_isofill=.2, delta_isoline=.1, days_lines=(30, 6, 3, 2,), H=[12., 25., 50.]):
"""
Plotting class for WK data
"""
if x is None:
self.x = vcs.init()
else:
self.x = x
self.min = min
self.max = max
self.delta_isoline = delta_isoline
self.days_lines = days_lines
self.H = H
self.delta_isofill = delta_isofill
self.datawc_x1 = datawc_x1
self.datawc_x2 = datawc_x2
self.datawc_y1 = datawc_y1
self.datawc_y2 = datawc_y2
return
__slots__ = [
'min',
'max',
'delta_isoline',
'delta_isofill',
'days_lines',
'x',
'H',
'datawc_x1',
'datawc_x2',
'datawc_y1',
'datawc_y2',
]
def plot_figure1(self, S, A, bg=0, x=None, min=None, max=None,
delta_isofill=None, delta_isoline=None, days_lines=None):
title = 'Figure 1'
displays = [] # store displays used
if min is None:
min = self.min
if max is None:
max = self.max
if delta_isoline is None:
delta_isoline = self.delta_isoline
if days_lines is None:
days_lines = self.days_lines
if delta_isofill is None:
delta_isofill = self.delta_isofill
if x is None:
x = self.x
x.landscape()
tmpl, tmplnoleg, isof, isol1, isol2 = graphics.createTemplateandGM(
x, min, max, delta_isofill, delta_isoline, days_lines, ntemplate=2)
for gm in isof, isol1, isol2:
gm.datawc_x1 = self.datawc_x1
gm.datawc_x2 = self.datawc_x2
gm.datawc_y1 = self.datawc_y1
gm.datawc_y2 = self.datawc_y2
tmpl2 = x.createtemplate(source=tmpl.name)
tmpl2.moveto(.54, .2)
tmpl2noleg = x.createtemplate(source=tmpl2.name)
tmpl2noleg.legend.priority = 0
for (sym, templ, templnoleg) in [
(-1, tmpl, tmplnoleg), (1, tmpl2, tmpl2noleg)]:
if sym == -1:
power = A
else:
power = S
id = power.id
power = MV2.log10(power)
power.id = id
fq = power.getAxis(0)
fq.id = 'Frequency (CPD)'
w = power.getAxis(1)
w.id = 'Westward Zonal Wave Number Eastward'
displays.append(x.plot(power, isof, templ, bg=bg))
displays.append(x.plot(power, isol1, templnoleg, bg=bg))
displays.append(x.plot(power, isol2, templnoleg, bg=bg))
tt = x.createtext()
tt.x = [.5]
tt.y = [.97]
tt.height = 25
tt.halign = 'center'
tt.string = [title, ]
displays.append(x.plot(tt, bg=bg))
x.update()
return displays
def plot_figure2(self, bg_power, bg=0, x=None, min=-1.4, max=2.,
delta_isofill=None, delta_isoline=.1, days_lines=(30, 6, 3, 2,)):
displays = [] # store displays used
title = 'Background power'
if min is None:
min = self.min
if max is None:
max = self.max
if delta_isoline is None:
delta_isoline = self.delta_isoline
if delta_isofill is None:
delta_isofill = self.delta_isofill
if days_lines is None:
days_lines = self.days_lines
if x is None:
x = self.x
orientation = 'portrait'
x.portrait()
tmpl, tmplnoleg, isof, isol1, isol2 = graphics.createTemplateandGM(
x, min, max, delta_isofill, delta_isoline, days_lines, orientation=orientation)
for gm in isof, isol1, isol2:
gm.datawc_x1 = self.datawc_x1
gm.datawc_x2 = self.datawc_x2
gm.datawc_y1 = self.datawc_y1
gm.datawc_y2 = self.datawc_y2
fq = bg_power.getAxis(0)
fq.id = 'Frequency (CPD)'
w = bg_power.getAxis(1)
w.id = 'Westward Zonal Wave Number Eastward'
displays.append(x.plot(bg_power, isof, tmpl, bg=bg))
displays.append(x.plot(bg_power, isol1, tmplnoleg, bg=bg))
displays.append(x.plot(bg_power, isol2, tmplnoleg, bg=bg))
tt = x.createtext()
tt.x = [.5]
tt.y = [.97]
tt.string = [title, ]
tt.halign = 'center'
tt.height = 25
displays.append(x.plot(tt, bg=bg))
return displays
def plot_figure3(self, S, A, bg=0, x=None, min=None, max=None,
delta_isofill=None, delta_isoline=None, days_lines=None, H=None):
title = 'Figure 3'
displays = [] # store displays used
if min is None:
min = self.min
if max is None:
max = self.max
if delta_isoline is None:
delta_isoline = self.delta_isoline
if days_lines is None:
days_lines = self.days_lines
if H is None:
H = self.H
if delta_isofill is None:
delta_isofill = self.delta_isofill
if x is None:
x = self.x
x.landscape()
tmpl, tmplnoleg, isof, isol1, isol2 = graphics.createTemplateandGM(
x, min, max, delta_isofill, delta_isoline, days_lines, ntemplate=2)
for gm in isof, isol1, isol2:
gm.datawc_x1 = self.datawc_x1
gm.datawc_x2 = self.datawc_x2
gm.datawc_y1 = self.datawc_y1
gm.datawc_y2 = self.datawc_y2
tmpl2 = x.createtemplate(source=tmpl.name)
tmpl2.moveto(.54, .2)
tmpl2noleg = x.createtemplate(source=tmpl2.name)
tmpl2noleg.legend.priority = 0
for (sym, templ, templnoleg) in [
(-1, tmpl, tmplnoleg), (1, tmpl2, tmpl2noleg)]:
if sym == -1:
power = A
else:
power = S
id = power.id
power.id = id
fq = power.getAxis(0)
oidf = fq.id
fq.id = 'Frequency (CPD)'
w = power.getAxis(1)
oidw = w.id
w.id = 'Westward Zonal Wave Number Eastward'
displays.append(x.plot(power, isof, templ, bg=bg))
displays.append(x.plot(power, isol1, templnoleg, bg=bg))
displays.append(x.plot(power, isol2, templnoleg, bg=bg))
# Put back the original ids
fq.id = oidf
w.id = oidw
# Ok now the curves
# First the graphic stuff
ln_tmp = x.createline()
ln_tmp.width = 2
ln_tmp.color = ["grey"]
yx = x.createyxvsx()
yx.datawc_x1 = isol1.datawc_x1
yx.datawc_x2 = isol1.datawc_x2
yx.datawc_y1 = isol1.datawc_y1
yx.datawc_y2 = isol1.datawc_y2
yx.xticlabels1 = {}
yx.yticlabels1 = {}
yx.xticlabels2 = {}
yx.yticlabels2 = {}
yx.linewidth = ln_tmp.width[0]
yx.linecolor = ln_tmp.color[0]
yx.linetype = ln_tmp.type[0]
yx.marker = 1
yx.markersize = 1
yx.markercolor = "grey"
# Now the equations
g = 9.81
lat = 0.
ll = 2. * numpy.pi * 6.37E6 * numpy.cos(abs(lat))
Beta = 2. * 7.292E-5 * numpy.cos(abs(lat)) / 6.37E6
wvn = power.getAxis(1)[:] # get the wave number
k = 2 * numpy.pi * wvn / ll
kax = power.getAxis(1)
tmplnoleg.dataname.priority = 0
tmpl2noleg.dataname.priority = 0
for h in H:
# Anti-Symetric
# First type
t1 = numpy.ma.sqrt(1. + (4 * Beta) /
(k * k * numpy.ma.sqrt(g * h)))
t1 = numpy.ma.masked_greater(t1, 1.e30)
t1b = k * numpy.ma.sqrt(g * h) * (1. + t1) / 2.
t1 = k * numpy.ma.sqrt(g * h) * (1. - t1) / 2.
# second type
t2 = numpy.ma.sqrt(numpy.ma.sqrt(g * h) * Beta)
# MRG wave and IG n=0 waves
MRGandIG0 = numpy.ma.where(numpy.ma.equal(k, 0), t2, t1)
MRGandIG0 = numpy.ma.where(numpy.ma.greater(k, 0), t1b, MRGandIG0)
MRGandIG0 = converttofreq(MRGandIG0, kax)
# IG n=2 waves
n = 2
dl = Beta * numpy.ma.sqrt(g * h)
t3 = numpy.ma.sqrt((2 * n + 1.) * dl + g * h * k * k)
for i in range(5):
t3 = numpy.ma.sqrt((2 * n + 1.) * dl + (g * h) *
k * k + g * h * Beta * k / t3)
IG2 = converttofreq(t3, kax)
# ER wave, n=1
n = 1
t4 = (Beta / numpy.ma.sqrt(g * h)) * (2 * n + 1.)
t4 = -Beta * k / (k * k + t4)
ER = numpy.ma.ones(k.shape, 'f')
ER = numpy.ma.masked_equal(ER, 1)
ER = numpy.ma.where(numpy.ma.less(k, 0.), t4, ER)
ER = converttofreq(ER, kax)
# Kelvin Wave
KW = numpy.ma.array(k * numpy.ma.sqrt(g * h))
KW = converttofreq(KW, kax)
# IG n=1
n = 1
dl = Beta * numpy.ma.sqrt(g * h)
t5 = numpy.ma.sqrt((2 * n + 1.) * dl + g * h * k * k)
for i in range(5):
t5 = numpy.ma.sqrt((2 * n + 1.) * dl + (g * h) *
k * k + g * h * Beta * k / t5)
IG = converttofreq(t5, kax)
okid = kax.id
kax.id = 'Westward Zonal Wave Number Eastward'
displays.append(x.plot(MRGandIG0, tmplnoleg, yx, bg=bg))
displays.append(x.plot(IG2, tmplnoleg, yx, bg=bg))
displays.append(x.plot(ER, tmpl2noleg, yx, bg=bg))
displays.append(x.plot(KW, tmpl2noleg, yx, bg=bg))
displays.append(x.plot(IG, tmpl2noleg, yx, bg=bg))
kax.id = okid
# Now plot the labels....
# h
t = x.createtext()
t.string = [str(h)]
xx = 12
t.x = [xx]
t.y = [
float(
MRGandIG0(
planetaryzonalwavenumber=(
xx,
xx,
'ccb'),
squeeze=1))]
t.height = 15
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl.data.x1,
tmpl.data.x2,
tmpl.data.y1,
tmpl.data.y2]
t.priority = 2
t.color = "grey"
displays.append(x.plot(t, bg=bg))
xx = 0
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.x = [xx]
t.y = [
float(IG2(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
displays.append(x.plot(t, bg=bg))
if h == H[2]:
xx = 5
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl.data.x1,
tmpl.data.x2,
tmpl.data.y1,
tmpl.data.y2]
t.x = [xx]
t.y = [
float(
MRGandIG0(
planetaryzonalwavenumber=(
xx,
xx,
'ccb'),
squeeze=1))]
t.string = ['n=0 EIG']
displays.append(x.plot(t, bg=bg))
if h == H[1]:
xx = -12
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl.data.x1,
tmpl.data.x2,
tmpl.data.y1,
tmpl.data.y2]
t.x = [xx]
t.y = [
float(IG2(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = ['n=2 WIG']
displays.append(x.plot(t, bg=bg))
xx = 6
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl.data.x1,
tmpl.data.x2,
tmpl.data.y1,
tmpl.data.y2]
t.x = [xx]
t.y = [
float(IG2(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = ['n=2 EIG']
displays.append(x.plot(t, bg=bg))
xx = -8
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl.data.x1,
tmpl.data.x2,
tmpl.data.y1,
tmpl.data.y2]
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.x = [xx]
t.y = [
float(
MRGandIG0(
planetaryzonalwavenumber=(
xx,
xx,
'ccb'),
squeeze=1))]
t.string = ['MRG']
displays.append(x.plot(t, bg=bg))
# Ok now the string are in the second template
xx = 0
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx, ]
t.y = [
float(IG(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = [str(h), ]
displays.append(x.plot(t, bg=bg))
if h == H[0]:
xx = 12
elif h == H[1]:
xx = 9
else:
xx = 7
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [
float(KW(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
displays.append(x.plot(t, bg=bg))
if h == H[0]:
xx = 3
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [
float(KW(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = [' MJO']
displays.append(x.plot(t, bg=bg))
xx = 7
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [
float(KW(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = ['Kelvin']
displays.append(x.plot(t, bg=bg))
if h == H[1]:
xx = -13
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [
float(IG(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = ['n=1 WIG']
displays.append(x.plot(t, bg=bg))
xx = 6
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [
float(IG(planetaryzonalwavenumber=(xx, xx, 'ccb'), squeeze=1))]
t.string = ['n=1 EIG']
displays.append(x.plot(t, bg=bg))
if h == H[-1]:
xx = -12
t = x.createtext(Tt_source=t.Tt_name, To_source=t.To_name)
t.worldcoordinate = [
yx.datawc_x1,
yx.datawc_x2,
yx.datawc_y1,
yx.datawc_y2]
t.viewport = [
tmpl2.data.x1,
tmpl2.data.x2,
tmpl2.data.y1,
tmpl2.data.y2]
t.x = [xx]
t.y = [float(ER(planetaryzonalwavenumber=(
xx, xx, 'ccb'), squeeze=1)) + .02]
t.string = ['n=1 ER']
displays.append(x.plot(t, bg=bg))
tt = x.createtext()
tt.x = [.5]
tt.y = [.97]
tt.halign = 'center'
tt.height = 25
tt.string = [title, ]
displays.append(x.plot(tt, bg=bg))
return displays
def converttofreq(data, kax):
P = 2 * numpy.pi / (data * 24 * 60 * 60)
P = MV2.array(1. / P)
P.setAxis(0, kax)
return P
| [
"MV2.log10",
"numpy.ma.masked_equal",
"vcs.init",
"numpy.ma.ones",
"numpy.ma.sqrt",
"MV2.array",
"numpy.ma.less",
"numpy.ma.greater",
"numpy.ma.masked_greater",
"numpy.ma.equal"
] | [((19375, 19393), 'MV2.array', 'MV2.array', (['(1.0 / P)'], {}), '(1.0 / P)\n', (19384, 19393), False, 'import MV2\n'), ((467, 477), 'vcs.init', 'vcs.init', ([], {}), '()\n', (475, 477), False, 'import vcs\n'), ((2511, 2527), 'MV2.log10', 'MV2.log10', (['power'], {}), '(power)\n', (2520, 2527), False, 'import MV2\n'), ((8009, 8043), 'numpy.ma.masked_greater', 'numpy.ma.masked_greater', (['t1', '(1e+30)'], {}), '(t1, 1e+30)\n', (8032, 8043), False, 'import numpy\n'), ((8601, 8650), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['((2 * n + 1.0) * dl + g * h * k * k)'], {}), '((2 * n + 1.0) * dl + g * h * k * k)\n', (8614, 8650), False, 'import numpy\n'), ((9020, 9047), 'numpy.ma.ones', 'numpy.ma.ones', (['k.shape', '"""f"""'], {}), "(k.shape, 'f')\n", (9033, 9047), False, 'import numpy\n'), ((9065, 9093), 'numpy.ma.masked_equal', 'numpy.ma.masked_equal', (['ER', '(1)'], {}), '(ER, 1)\n', (9086, 9093), False, 'import numpy\n'), ((9423, 9472), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['((2 * n + 1.0) * dl + g * h * k * k)'], {}), '((2 * n + 1.0) * dl + g * h * k * k)\n', (9436, 9472), False, 'import numpy\n'), ((8330, 8350), 'numpy.ma.equal', 'numpy.ma.equal', (['k', '(0)'], {}), '(k, 0)\n', (8344, 8350), False, 'import numpy\n'), ((8399, 8421), 'numpy.ma.greater', 'numpy.ma.greater', (['k', '(0)'], {}), '(k, 0)\n', (8415, 8421), False, 'import numpy\n'), ((8563, 8583), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (8576, 8583), False, 'import numpy\n'), ((8702, 8775), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['((2 * n + 1.0) * dl + g * h * k * k + g * h * Beta * k / t3)'], {}), '((2 * n + 1.0) * dl + g * h * k * k + g * h * Beta * k / t3)\n', (8715, 8775), False, 'import numpy\n'), ((9126, 9147), 'numpy.ma.less', 'numpy.ma.less', (['k', '(0.0)'], {}), '(k, 0.0)\n', (9139, 9147), False, 'import numpy\n'), ((9385, 9405), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (9398, 9405), False, 'import numpy\n'), ((9524, 9597), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['((2 * n + 1.0) * dl + g * h * k * k + g * h * Beta * k / t5)'], {}), '((2 * n + 1.0) * dl + g * h * k * k + g * h * Beta * k / t5)\n', (9537, 9597), False, 'import numpy\n'), ((8221, 8241), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (8234, 8241), False, 'import numpy\n'), ((8924, 8944), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (8937, 8944), False, 'import numpy\n'), ((9259, 9279), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (9272, 9279), False, 'import numpy\n'), ((8066, 8086), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (8079, 8086), False, 'import numpy\n'), ((8125, 8145), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (8138, 8145), False, 'import numpy\n'), ((7969, 7989), 'numpy.ma.sqrt', 'numpy.ma.sqrt', (['(g * h)'], {}), '(g * h)\n', (7982, 7989), False, 'import numpy\n')] |
"""
Should emit:
B022 - on lines 8
"""
import contextlib
with contextlib.suppress():
raise ValueError
with contextlib.suppress(ValueError):
raise ValueError
exceptions_to_suppress = []
if True:
exceptions_to_suppress.append(ValueError)
with contextlib.suppress(*exceptions_to_suppress):
raise ValueError
| [
"contextlib.suppress"
] | [((64, 85), 'contextlib.suppress', 'contextlib.suppress', ([], {}), '()\n', (83, 85), False, 'import contextlib\n'), ((114, 145), 'contextlib.suppress', 'contextlib.suppress', (['ValueError'], {}), '(ValueError)\n', (133, 145), False, 'import contextlib\n'), ((258, 302), 'contextlib.suppress', 'contextlib.suppress', (['*exceptions_to_suppress'], {}), '(*exceptions_to_suppress)\n', (277, 302), False, 'import contextlib\n')] |
# Generated by Django 3.0.7 on 2020-06-05 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentification', '0002_auto_20200605_1618'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('username', models.CharField(primary_key=True, help_text='Please enter your username.', max_length=20, serialize=False)),
('first_name', models.CharField(help_text='Please enter your firstname.', max_length=20)),
('last_name', models.CharField(help_text='Please enter your lastname.', max_length=30)),
('email', models.CharField(help_text='Please enter your email address.', max_length=25)),
('pwd', models.EmailField(help_text='Please enter your password.', max_length=256)),
],
options={
'ordering': ['-username'],
},
),
migrations.DeleteModel(
name='User',
),
]
| [
"django.db.models.EmailField",
"django.db.migrations.DeleteModel",
"django.db.models.CharField"
] | [((993, 1028), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""User"""'}), "(name='User')\n", (1015, 1028), False, 'from django.db import migrations, models\n'), ((350, 461), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'help_text': '"""Please enter your username."""', 'max_length': '(20)', 'serialize': '(False)'}), "(primary_key=True, help_text='Please enter your username.',\n max_length=20, serialize=False)\n", (366, 461), False, 'from django.db import migrations, models\n'), ((491, 564), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Please enter your firstname."""', 'max_length': '(20)'}), "(help_text='Please enter your firstname.', max_length=20)\n", (507, 564), False, 'from django.db import migrations, models\n'), ((597, 669), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Please enter your lastname."""', 'max_length': '(30)'}), "(help_text='Please enter your lastname.', max_length=30)\n", (613, 669), False, 'from django.db import migrations, models\n'), ((698, 775), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Please enter your email address."""', 'max_length': '(25)'}), "(help_text='Please enter your email address.', max_length=25)\n", (714, 775), False, 'from django.db import migrations, models\n'), ((802, 876), 'django.db.models.EmailField', 'models.EmailField', ([], {'help_text': '"""Please enter your password."""', 'max_length': '(256)'}), "(help_text='Please enter your password.', max_length=256)\n", (819, 876), False, 'from django.db import migrations, models\n')] |
from urllib import request
from bs4 import BeautifulSoup as bs
html=request.urlopen('http://python-data.dr-chuck.net/comments_389063.html').read()
soup = bs(html)
tags=soup('span')
sum=0
for find in tags:
sum=sum+int(find.contents[0])
print(sum)
| [
"bs4.BeautifulSoup",
"urllib.request.urlopen"
] | [((154, 162), 'bs4.BeautifulSoup', 'bs', (['html'], {}), '(html)\n', (156, 162), True, 'from bs4 import BeautifulSoup as bs\n'), ((68, 139), 'urllib.request.urlopen', 'request.urlopen', (['"""http://python-data.dr-chuck.net/comments_389063.html"""'], {}), "('http://python-data.dr-chuck.net/comments_389063.html')\n", (83, 139), False, 'from urllib import request\n')] |
import unittest
import mock
import struct
from ebmlite.core import * # type: ignore
import numpy as np # type: ignore
from idelib.importer import openFile
from idelib.parsers import ChannelDataBlockParser, ChannelDataBlock
from .file_streams import makeStreamLike
class TestChannelDataBlockParser(unittest.TestCase):
""" Tests for ChannelDataBlockParser """
def setUp(self):
self.doc = openFile(makeStreamLike('./testing/SSX70065.IDE'))
chDatBlockEl = self.doc.ebmldoc.children[161]
self.element = [x for x in self.doc.ebmldoc.value if type(x) is chDatBlockEl and x[0].value == 32][0]
self.block = ChannelDataBlock(self.element)
self.parser = ChannelDataBlockParser(self.doc)
def testConstructor(self):
self.assertIsNone(self.parser.children)
self.assertFalse(self.parser.isSubElement)
self.assertFalse(self.parser.isHeader)
self.assertIs(self.doc, self.parser.doc)
self.assertIs(ChannelDataBlock, self.parser.product)
self.assertEqual(ChannelDataBlock.__name__, self.parser.elementName)
self.assertEqual(10**6 / 2**15, self.parser.timeScalar)
self.assertEqual({}, self.parser.timestampOffset)
self.assertEqual({}, self.parser.lastStamp)
self.assertEqual({}, self.parser.timeScalars)
self.assertEqual({}, self.parser.timeModulus, )
def testParse(self):
""" Test parsing for ChannelDataBlocks, which is basically the same """
# Straightforward case
ch = self.doc.channels[self.block.channel]
self.assertEqual(self.parser.parse(self.element), self.block.getNumSamples(ch.parser) * len(ch.children))
# None element
self.assertRaises(TypeError, self.parser.parse, None)
def testGetElementName(self):
""" Test getElementName from ElementHandler. """
self.assertEqual(self.parser.getElementName(self.element), "'ChannelDataBlock' (0xa1) @1322")
def testMakesData(self):
""" Test makesData from ElementHandler. """
self.assertTrue(self.parser.makesData())
class TestChannelDataBlock(unittest.TestCase):
""" Tests for ChannelDataBlock """
# NOTE: payload and parse* definitely need to be re-written, and tested
# with new stuff, but (most of) the other stuff should be fine as-is
def setUp(self):
self.doc = openFile(makeStreamLike('./testing/SSX70065.IDE'))
self.ebmldoc = self.doc.ebmldoc
def chFilter(x):
return type(x) is self.ebmldoc.children[161] and x[0].value == 32
self.element = [x for x in self.ebmldoc.value if chFilter(x)][0]
self.block = ChannelDataBlock(self.element)
def testConstructor(self):
self.assertIs(self.block.element, self.element)
self.assertIsNone(self.block.numSamples)
self.assertIsNone(self.block.sampleRate)
self.assertIsNone(self.block.sampleTime)
self.assertIsNone(self.block.indexRange)
# self.assertIsNone(self.block.minMeanMax)
# self.assertIsNone(self.block.min)
# self.assertIsNone(self.block.mean)
# self.assertIsNone(self.block.max)
self.assertIsNone(self.block._rollingMean)
self.assertIsNone(self.block._rollingMeanLen)
self.assertFalse(self.block.cache)
self.assertEqual(self.block.maxTimestamp, 16777216)
self.assertEqual(self.block.timeScalar, 30.517578125)
self.assertEqual(self.block.blockIndex, -1)
self.assertEqual(self.block.startTime, 211)
self.assertEqual(self.block.endTime, 14721)
self.assertEqual(self.block.payloadSize, 8142)
def testRepr(self):
self.assertEqual(repr(self.block),
'<ChannelDataBlock Channel: 32>')
def testPayload(self):
np.testing.assert_array_equal(self.block.payload,
np.asarray(self.block._payloadEl.value))
def testGetHeader(self):
self.assertEqual(self.block.getHeader(), (211, 32))
def testToNpTypestr(self):
for stype, nptype in ChannelDataBlock.TO_NP_TYPESTR.items():
for endian in ('<', '>'):
assert (
struct.calcsize(endian+stype)
== np.dtype(endian+nptype).itemsize
)
def testGetNumSamples(self):
self.assertEqual(self.block.getNumSamples(self.doc.channels[32].parser), 1357)
def testIsValidLength(self):
parser = self.doc.channels[32].parser
self.assertTrue(self.block.isValidLength(parser))
self.block.payloadSize -= 1
self.assertFalse(self.block.isValidLength(parser))
def tuplify(arr):
out = []
for i in range(len(arr.T)):
out.append(tuple(arr[:, i]))
return out | [
"idelib.parsers.ChannelDataBlock.TO_NP_TYPESTR.items",
"struct.calcsize",
"idelib.parsers.ChannelDataBlock",
"idelib.parsers.ChannelDataBlockParser",
"numpy.asarray",
"numpy.dtype"
] | [((645, 675), 'idelib.parsers.ChannelDataBlock', 'ChannelDataBlock', (['self.element'], {}), '(self.element)\n', (661, 675), False, 'from idelib.parsers import ChannelDataBlockParser, ChannelDataBlock\n'), ((699, 731), 'idelib.parsers.ChannelDataBlockParser', 'ChannelDataBlockParser', (['self.doc'], {}), '(self.doc)\n', (721, 731), False, 'from idelib.parsers import ChannelDataBlockParser, ChannelDataBlock\n'), ((2829, 2859), 'idelib.parsers.ChannelDataBlock', 'ChannelDataBlock', (['self.element'], {}), '(self.element)\n', (2845, 2859), False, 'from idelib.parsers import ChannelDataBlockParser, ChannelDataBlock\n'), ((4267, 4305), 'idelib.parsers.ChannelDataBlock.TO_NP_TYPESTR.items', 'ChannelDataBlock.TO_NP_TYPESTR.items', ([], {}), '()\n', (4303, 4305), False, 'from idelib.parsers import ChannelDataBlockParser, ChannelDataBlock\n'), ((4075, 4114), 'numpy.asarray', 'np.asarray', (['self.block._payloadEl.value'], {}), '(self.block._payloadEl.value)\n', (4085, 4114), True, 'import numpy as np\n'), ((4390, 4421), 'struct.calcsize', 'struct.calcsize', (['(endian + stype)'], {}), '(endian + stype)\n', (4405, 4421), False, 'import struct\n'), ((4443, 4468), 'numpy.dtype', 'np.dtype', (['(endian + nptype)'], {}), '(endian + nptype)\n', (4451, 4468), True, 'import numpy as np\n')] |
Subsets and Splits