code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
#!/user/bin/env python
import yaml
import xmltodict
from ncclient import manager
# Define a filter using <filter> tag to
# retrieve the openconfig-interfaces model
INTERFACE_STATE_XML = """
<filter>
<interfaces xmlns="http://openconfig.net/yang/interfaces">
<interface>
<state/>
</interface>
</interfaces>
</filter>
"""
def main():
with open('device.yml', 'r') as fp:
device = yaml.safe_load(fp.read())
# Connect
with manager.connect(**device, hostkey_verify=False) as mgr:
# Use .get() to retrieve the configuration using a filter
resp = mgr.get(INTERFACE_STATE_XML).xml
# Load the xml response data as a Python dict
resp_dict = xmltodict.parse(resp)
interfaces = resp_dict.get('rpc-reply')['data']['interfaces']['interface']
# Display operational for interfaces
for iface in interfaces:
state = iface['state']
if state['oper-status'] == 'UP':
name = state['name']
counters = state['counters']
in_packets = int(counters['in-unicast-pkts'])
out_packets = int(counters['out-unicast-pkts'])
print(f'Interface: {name}')
print(f'\tInput Packets: {in_packets}')
print(f'\tOutput Packets: {out_packets}')
print(f'\tTotal Packets: {in_packets + out_packets}')
if __name__ == "__main__":
main()
| [
"ncclient.manager.connect",
"xmltodict.parse"
] | [((485, 532), 'ncclient.manager.connect', 'manager.connect', ([], {'hostkey_verify': '(False)'}), '(**device, hostkey_verify=False)\n', (500, 532), False, 'from ncclient import manager\n'), ((737, 758), 'xmltodict.parse', 'xmltodict.parse', (['resp'], {}), '(resp)\n', (752, 758), False, 'import xmltodict\n')] |
from tests.edge_walk.auto_gen.data.const import A, GOAL, INIT, IS, THIS
# State
from tests.edge_walk.code.states1.goal import GoalState
from tests.edge_walk.code.states1.init import InitState
from tests.edge_walk.code.states1.init_this import InitThisState
from tests.edge_walk.code.states1.init_this_is import InitThisIsState
from tests.edge_walk.code.states1.init_this_is_a import InitThisIsAState
# State wrapper
from tests.edge_walk.code.states2.goal import create_goal
from tests.edge_walk.code.states2.init_this_is_a import create_init_this_is_a
from tests.edge_walk.code.states2.init_this_is import create_init_this_is
from tests.edge_walk.code.states2.init_this import create_init_this
from tests.edge_walk.code.states2.init import create_init
state_gen = {
INIT: {
"": lambda: create_init(InitState()),
THIS: {
"": lambda: create_init_this(InitThisState()),
IS: {
"": lambda: create_init_this_is(InitThisIsState()),
A: {
"": lambda: create_init_this_is_a(InitThisIsAState()),
},
},
},
},
GOAL: lambda: create_goal(GoalState()),
}
| [
"tests.edge_walk.code.states1.init_this_is.InitThisIsState",
"tests.edge_walk.code.states1.init_this_is_a.InitThisIsAState",
"tests.edge_walk.code.states1.goal.GoalState",
"tests.edge_walk.code.states1.init_this.InitThisState",
"tests.edge_walk.code.states1.init.InitState"
] | [((1166, 1177), 'tests.edge_walk.code.states1.goal.GoalState', 'GoalState', ([], {}), '()\n', (1175, 1177), False, 'from tests.edge_walk.code.states1.goal import GoalState\n'), ((813, 824), 'tests.edge_walk.code.states1.init.InitState', 'InitState', ([], {}), '()\n', (822, 824), False, 'from tests.edge_walk.code.states1.init import InitState\n'), ((884, 899), 'tests.edge_walk.code.states1.init_this.InitThisState', 'InitThisState', ([], {}), '()\n', (897, 899), False, 'from tests.edge_walk.code.states1.init_this import InitThisState\n'), ((968, 985), 'tests.edge_walk.code.states1.init_this_is.InitThisIsState', 'InitThisIsState', ([], {}), '()\n', (983, 985), False, 'from tests.edge_walk.code.states1.init_this_is import InitThisIsState\n'), ((1063, 1081), 'tests.edge_walk.code.states1.init_this_is_a.InitThisIsAState', 'InitThisIsAState', ([], {}), '()\n', (1079, 1081), False, 'from tests.edge_walk.code.states1.init_this_is_a import InitThisIsAState\n')] |
from pyvmmonitor_core.math_utils import almost_equal, is_clockwise
def test_compute_distance():
from pyvmmonitor_core.math_utils import calculate_distance
assert almost_equal(calculate_distance((10, 10), (0, 0)), 14.142135623730951)
def test_is_clockwise():
assert is_clockwise([(0, 0), (10, 0), (10, 10)])
assert not is_clockwise([(0, 0), (10, 0), (10, -10)])
def test_is_point_close_to_line():
from pyvmmonitor_core.math_utils import is_point_close_to_line
assert is_point_close_to_line((0, 0), [(0, 0), (0, 1)])
assert is_point_close_to_line((0, 0.5), [(0, 0), (0, 1)])
assert is_point_close_to_line((0.1, 0.5), [(0, 0), (0, 1)])
assert not is_point_close_to_line((0, 1.5), [(0, 0), (0, 1)])
assert not is_point_close_to_line((0.5, 1.0), [(0, 0), (0, 1)])
| [
"pyvmmonitor_core.math_utils.is_point_close_to_line",
"pyvmmonitor_core.math_utils.is_clockwise",
"pyvmmonitor_core.math_utils.calculate_distance"
] | [((287, 328), 'pyvmmonitor_core.math_utils.is_clockwise', 'is_clockwise', (['[(0, 0), (10, 0), (10, 10)]'], {}), '([(0, 0), (10, 0), (10, 10)])\n', (299, 328), False, 'from pyvmmonitor_core.math_utils import almost_equal, is_clockwise\n'), ((507, 555), 'pyvmmonitor_core.math_utils.is_point_close_to_line', 'is_point_close_to_line', (['(0, 0)', '[(0, 0), (0, 1)]'], {}), '((0, 0), [(0, 0), (0, 1)])\n', (529, 555), False, 'from pyvmmonitor_core.math_utils import is_point_close_to_line\n'), ((567, 617), 'pyvmmonitor_core.math_utils.is_point_close_to_line', 'is_point_close_to_line', (['(0, 0.5)', '[(0, 0), (0, 1)]'], {}), '((0, 0.5), [(0, 0), (0, 1)])\n', (589, 617), False, 'from pyvmmonitor_core.math_utils import is_point_close_to_line\n'), ((629, 681), 'pyvmmonitor_core.math_utils.is_point_close_to_line', 'is_point_close_to_line', (['(0.1, 0.5)', '[(0, 0), (0, 1)]'], {}), '((0.1, 0.5), [(0, 0), (0, 1)])\n', (651, 681), False, 'from pyvmmonitor_core.math_utils import is_point_close_to_line\n'), ((187, 223), 'pyvmmonitor_core.math_utils.calculate_distance', 'calculate_distance', (['(10, 10)', '(0, 0)'], {}), '((10, 10), (0, 0))\n', (205, 223), False, 'from pyvmmonitor_core.math_utils import calculate_distance\n'), ((345, 387), 'pyvmmonitor_core.math_utils.is_clockwise', 'is_clockwise', (['[(0, 0), (10, 0), (10, -10)]'], {}), '([(0, 0), (10, 0), (10, -10)])\n', (357, 387), False, 'from pyvmmonitor_core.math_utils import almost_equal, is_clockwise\n'), ((698, 748), 'pyvmmonitor_core.math_utils.is_point_close_to_line', 'is_point_close_to_line', (['(0, 1.5)', '[(0, 0), (0, 1)]'], {}), '((0, 1.5), [(0, 0), (0, 1)])\n', (720, 748), False, 'from pyvmmonitor_core.math_utils import is_point_close_to_line\n'), ((764, 816), 'pyvmmonitor_core.math_utils.is_point_close_to_line', 'is_point_close_to_line', (['(0.5, 1.0)', '[(0, 0), (0, 1)]'], {}), '((0.5, 1.0), [(0, 0), (0, 1)])\n', (786, 816), False, 'from pyvmmonitor_core.math_utils import is_point_close_to_line\n')] |
#!python
import string
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
# implement is_palindrome_iterative and is_palindrome_recursive below, then
# change this to call your implementation to verify it passes all tests
assert isinstance(text, str), 'input is not a string: {}'.format(text)
# return is_palindrome_iterative(text)
return is_palindrome_recursive(text)
def is_palindrome_iterative(text):
# TODO: implement the is_palindrome function iteratively here
# once implemented, change is_palindrome to call is_palindrome_iterative
# to verify that your iterative implementation passes all tests
cleaned_string = ''
# Cleaning the string to have only characters
for letter in text.lower():
if string.ascii_lowercase.find(letter) >= 0:
cleaned_string += letter
left_index = 0
right_index = len(cleaned_string) - 1
# Loop until reaches the middle because that's when they cross so can stop loop
while left_index < right_index:
if cleaned_string[left_index] != cleaned_string[right_index]:
return False
left_index += 1
right_index -= 1
return True
def is_palindrome_recursive(text, left=None, right=None):
# TODO: implement the is_palindrome function recursively here
# once implemented, change is_palindrome to call is_palindrome_recursive
# to verify that your iterative implementation passes all tests
cleaned_string = ''
# Cleaning the string to have only characters
for letter in text.lower():
if string.ascii_lowercase.find(letter) >= 0:
cleaned_string += letter
# If left and right None then set then to be 0, and lenth of string - 1 respectively
if left is None and right is None:
left = 0
right = len(cleaned_string) - 1
# Three base cases
if len(cleaned_string) == 0: # This if the string is empty
return True
elif cleaned_string[left] != cleaned_string[right]:
return False
elif left >= right: # When it reaches the mid point and not returning False to must be True
return True
else:
return is_palindrome_recursive(cleaned_string, left+1, right-1) # Left pointer moves up, right down
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
main()
| [
"string.ascii_lowercase.find"
] | [((1134, 1169), 'string.ascii_lowercase.find', 'string.ascii_lowercase.find', (['letter'], {}), '(letter)\n', (1161, 1169), False, 'import string\n'), ((1934, 1969), 'string.ascii_lowercase.find', 'string.ascii_lowercase.find', (['letter'], {}), '(letter)\n', (1961, 1969), False, 'import string\n')] |
from django.contrib.auth.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
def group_required(*group_names):
def in_groups(u):
if u.is_authenticated():
if u.groups.filter(name__in=group_names).count() > 0 or u.is_superuser:
return True
raise PermissionDenied("Not part of required group")
actual_decorator = user_passes_test(
in_groups,
login_url='index:staffLogin',
redirect_field_name='next',
)
return actual_decorator
def superuser_required(fn):
def wrapper(*args, **kw):
request = args[0]
if request.user.is_superuser:
return fn(*args, **kw)
raise PermissionDenied("Didnt think so!")
return wrapper | [
"django.contrib.auth.decorators.user_passes_test",
"django.core.exceptions.PermissionDenied"
] | [((399, 488), 'django.contrib.auth.decorators.user_passes_test', 'user_passes_test', (['in_groups'], {'login_url': '"""index:staffLogin"""', 'redirect_field_name': '"""next"""'}), "(in_groups, login_url='index:staffLogin',\n redirect_field_name='next')\n", (415, 488), False, 'from django.contrib.auth.decorators import user_passes_test\n'), ((328, 374), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', (['"""Not part of required group"""'], {}), "('Not part of required group')\n", (344, 374), False, 'from django.core.exceptions import PermissionDenied\n'), ((717, 752), 'django.core.exceptions.PermissionDenied', 'PermissionDenied', (['"""Didnt think so!"""'], {}), "('Didnt think so!')\n", (733, 752), False, 'from django.core.exceptions import PermissionDenied\n')] |
# Import EEZYbotARM library
from easyEEZYbotARM.kinematic_model import EEZYbotARM_Mk2
# Initialise robot arm with initial joint angles
myRobotArm = EEZYbotARM_Mk2(initial_q1=0, initial_q2=70, initial_q3=-100)
myRobotArm.plot() # plot it
# Assign new joint angles
a1 = 20 # joint angle 1
a2 = 80 # joint angle 2
a3 = -90 # joint angle 3
# Compute forward kinematics
x, y, z = myRobotArm.forwardKinematics(q1=a1, q2=a2, q3=a3)
# Print the result
print('With joint angles(degrees) q1={}, q2={}, q3={}, the cartesian position of the end effector(mm) is x={}, y={}, z={}'.format(a1, a2, a3, x, y, z))
# Visualise the new joint angles
myRobotArm.updateJointAngles(q1=a1, q2=a2, q3=a3)
myRobotArm.plot()
| [
"easyEEZYbotARM.kinematic_model.EEZYbotARM_Mk2"
] | [((149, 209), 'easyEEZYbotARM.kinematic_model.EEZYbotARM_Mk2', 'EEZYbotARM_Mk2', ([], {'initial_q1': '(0)', 'initial_q2': '(70)', 'initial_q3': '(-100)'}), '(initial_q1=0, initial_q2=70, initial_q3=-100)\n', (163, 209), False, 'from easyEEZYbotARM.kinematic_model import EEZYbotARM_Mk2\n')] |
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
class encoder(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X):
return self
def transform(self, X):
transformed = X.copy()
for i in X.columns:
if X[i].nunique()<len(X[i])/5:
if X[i].nunique()>5:
enc = LabelEncoder()
transformed[i] = enc.fit_transform(X[i])
else:
enc = OneHotEncoder()
transformed[i] = enc.fit_transform(X[[i]])
temp_X = pd.DataFrame(OneHotEncoder(drop='first', handle_unknown='ignore').fit_transform(X[[i]]).toarray())
transformed = pd.concat([transformed, temp_X], axis=1)
transformed.drop([i], axis=1, inplace=True)
else:
transformed.drop([i], axis=1, inplace=True)
return transformed
class Imputer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self,X):
return self
def transform(self,X):
imputer = SimpleImputer(strategy='most_frequent')
self.X = imputer.fit_transform(X)
self.X = pd.DataFrame(self.X)
return self.X
def Cleaner(df):
# df = pd.read_csv("titanic.csv")
# print(df.head(5))
X,y = df.iloc[:,:-1],df.iloc[:,-1]
num_col = X.select_dtypes(include='number')
cat_col = X.select_dtypes(include='object')
num_features = X.select_dtypes(include='number').columns
cat_features = X.select_dtypes(include='object').columns
#encd= encoder()
#X2 = encd.fit_transform(cat_col)
#print(X2)
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('std_scaler', StandardScaler())
])
#num_transformed = num_pipeline.fit_transform(num_col)
#print(num_transformed)
cat_pipeline = Pipeline([
('imputer', Imputer()),
('encoder', encoder()),
])
#cat_transformed = cat_pipeline.fit_transform(cat_col)
#print (cat_transformed)
from sklearn.compose import ColumnTransformer
data_pipeline = ColumnTransformer([
('numerical', num_pipeline, num_features),
('categorical', cat_pipeline, cat_features)
])
processed_X = pd.DataFrame(data_pipeline.fit_transform(X))
data = pd.concat([processed_X, y], axis=1)
data.dropna(axis=0, how='any', inplace=True)
return data
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.StandardScaler",
"sklearn.impute.SimpleImputer",
"sklearn.compose.ColumnTransformer",
"pandas.DataFrame",
"pandas.concat"
] | [((2427, 2539), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('numerical', num_pipeline, num_features), ('categorical', cat_pipeline,\n cat_features)]"], {}), "([('numerical', num_pipeline, num_features), (\n 'categorical', cat_pipeline, cat_features)])\n", (2444, 2539), False, 'from sklearn.compose import ColumnTransformer\n'), ((2669, 2704), 'pandas.concat', 'pd.concat', (['[processed_X, y]'], {'axis': '(1)'}), '([processed_X, y], axis=1)\n', (2678, 2704), True, 'import pandas as pd\n'), ((1401, 1440), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1414, 1440), False, 'from sklearn.impute import SimpleImputer\n'), ((1500, 1520), 'pandas.DataFrame', 'pd.DataFrame', (['self.X'], {}), '(self.X)\n', (1512, 1520), True, 'import pandas as pd\n'), ((2005, 2037), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (2018, 2037), False, 'from sklearn.impute import SimpleImputer\n'), ((2059, 2075), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2073, 2075), False, 'from sklearn.preprocessing import StandardScaler\n'), ((629, 643), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (641, 643), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((770, 785), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (783, 785), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1011, 1051), 'pandas.concat', 'pd.concat', (['[transformed, temp_X]'], {'axis': '(1)'}), '([transformed, temp_X], axis=1)\n', (1020, 1051), True, 'import pandas as pd\n'), ((891, 943), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'drop': '"""first"""', 'handle_unknown': '"""ignore"""'}), "(drop='first', handle_unknown='ignore')\n", (904, 943), False, 'from sklearn.preprocessing import OneHotEncoder\n')] |
from plyfile import PlyData, PlyElement
import numpy as np
import open3d as o3d
import scipy
from typing import Dict, Tuple
from .utils import color_range_rgb_to_8bit_rgb
# TODO: change value names
#################
### ply file ###
#################
class Mesh:
@staticmethod
def read(file_path: str) -> Tuple[np.ndarray, np.ndarray, dict]:
"""read a triangle mesh file.
Args:
file_path: a triangle mesh file (support: obj, ply)
Return:
vertices: vertices xyz of mesh (N, 3)
triangles: triangle edge indices (M, 3)
data: other data
"""
def _obj(file_path):
obj = o3d.io.read_triangle_mesh(file_path)
_vertices = np.asarray(obj.vertices, dtype=np.float32)
_triangles = np.asarray(obj.triangles, dtype=np.uint32)
_data = None
return _vertices, _triangles, _data
def _ply(file_path):
ply = o3d.io.read_triangle_mesh(file_path)
_vertices = np.asarray(ply.vertices, dtype=np.float32)
_triangles = np.asarray(ply.triangles, dtype=np.uint32)
if ply.has_vertex_colors():
_data = np.asarray(ply.vertex_colors)
else:
_data = None
return _vertices, _triangles, _data
support = {
"obj": _obj,
"ply": _ply,
}
extension = file_path.split(".")[-1]
if extension in support:
vertices, triangles, data = support[extension](file_path)
else:
raise NotImplementedError()
return vertices, triangles, data
@staticmethod
def write(filename: str, vertices: np.ndarray, triangles: np.ndarray):
# Vertex
vertex = []
vertex_prop = []
vertex.extend([*vertices.T])
vertex_prop.extend([("x", "f4"), ("y", "f4"), ("z", "f4")])
ply_vertex = np.empty(len(vertices), dtype=vertex_prop)
for i, p in enumerate(vertex_prop):
ply_vertex[p[0]] = vertex[i]
# triangle face
face = []
face_prop = []
face.extend([triangles])
face_prop.extend([("vertex_indices", "i4", (3,))])
ply_face = np.empty(len(triangles), dtype=face_prop)
for i, p in enumerate(face_prop):
ply_face[p[0]] = face[i]
# write ply file
ply = PlyData(
[
PlyElement.describe(ply_vertex, "vertex"),
PlyElement.describe(ply_face, "face"),
],
text=True,
)
ply.write(filename)
class Points:
@staticmethod
def read(filename: str) -> Tuple[np.ndarray, np.ndarray, dict]:
"""read a point cloud file.
Args:
file_path: a point cloud file (support: ply, pcd)
Return:
xyz: xyz (N, 3)
rgb: colors (M, 3)
data: other data
"""
def _ply(filename):
plydata = PlyData.read(filename)
ply_points = plydata["vertex"]
ply_properties = ply_points.data.dtype.names
# XYZ
xyz_properties = ["x", "y", "z"]
xyz = np.array([ply_points[c] for c in xyz_properties], dtype=np.float32).T
# Color
rgb_properties = ["red", "green", "blue"]
rgb = None
if set(rgb_properties) <= set(ply_properties):
rgb = np.array(
[ply_points[c] for c in rgb_properties], dtype=np.uint32
).T
data = {}
for prop in ply_properties:
if not prop in xyz_properties and not prop in rgb_properties:
data[prop] = ply_points[prop]
return xyz, rgb, data
def _pcd(filename):
pcd = o3d.io.read_point_cloud(filename)
xyz = np.asarray(pcd.points, dtype=np.float32)
rgb = np.asarray(pcd.colors, dtype=np.uint32)
data = None
return xyz, rgb, data
support = {"ply": _ply, "pcd": _pcd}
extension = filename.split(".")[-1] # TODO
if extension in support:
xyz, rgb, data = support[extension](filename)
else:
raise NotImplementedError("This funcation support followeing")
return xyz, rgb, data
@staticmethod
def write(
filename: str,
xyz: np.ndarray,
colors: np.ndarray = None,
color_range: list = [0, 255],
additional_data: Dict[str, np.ndarray] = None,
):
"""
Write a point cloud into a ply file.
"""
# Point cloud data and properties for writing
points = []
prop = []
# XYZ
points.extend([*xyz.T])
prop.extend([("x", "f4"), ("y", "f4"), ("z", "f4")])
# Color
if colors is not None:
# to 0 ~ 255 color range
colors = color_range_rgb_to_8bit_rgb(colors, color_range)
points.extend([*colors.T])
prop.extend([("red", "u1"), ("green", "u1"), ("blue", "u1")])
# other data
if not additional_data is None:
for key in additional_data:
assert len(additional_data[key].shape) == 1
points.append(additional_data[key])
prop.append((key, additional_data[key].dtype.str))
ply_data = np.empty(len(xyz), dtype=prop)
for i, p in enumerate(prop):
ply_data[p[0]] = points[i]
ply = PlyData([PlyElement.describe(ply_data, "vertex")], text=True)
ply.write(filename)
if __name__ == "__main__":
xyz, colors, _ = Points.read("../../data/pcl_data/biwi_face_database/model.pcd")
| [
"numpy.asarray",
"open3d.io.read_triangle_mesh",
"numpy.array",
"open3d.io.read_point_cloud",
"plyfile.PlyElement.describe",
"plyfile.PlyData.read"
] | [((680, 716), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['file_path'], {}), '(file_path)\n', (705, 716), True, 'import open3d as o3d\n'), ((741, 783), 'numpy.asarray', 'np.asarray', (['obj.vertices'], {'dtype': 'np.float32'}), '(obj.vertices, dtype=np.float32)\n', (751, 783), True, 'import numpy as np\n'), ((809, 851), 'numpy.asarray', 'np.asarray', (['obj.triangles'], {'dtype': 'np.uint32'}), '(obj.triangles, dtype=np.uint32)\n', (819, 851), True, 'import numpy as np\n'), ((973, 1009), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['file_path'], {}), '(file_path)\n', (998, 1009), True, 'import open3d as o3d\n'), ((1034, 1076), 'numpy.asarray', 'np.asarray', (['ply.vertices'], {'dtype': 'np.float32'}), '(ply.vertices, dtype=np.float32)\n', (1044, 1076), True, 'import numpy as np\n'), ((1102, 1144), 'numpy.asarray', 'np.asarray', (['ply.triangles'], {'dtype': 'np.uint32'}), '(ply.triangles, dtype=np.uint32)\n', (1112, 1144), True, 'import numpy as np\n'), ((3007, 3029), 'plyfile.PlyData.read', 'PlyData.read', (['filename'], {}), '(filename)\n', (3019, 3029), False, 'from plyfile import PlyData, PlyElement\n'), ((3840, 3873), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['filename'], {}), '(filename)\n', (3863, 3873), True, 'import open3d as o3d\n'), ((3892, 3932), 'numpy.asarray', 'np.asarray', (['pcd.points'], {'dtype': 'np.float32'}), '(pcd.points, dtype=np.float32)\n', (3902, 3932), True, 'import numpy as np\n'), ((3951, 3990), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {'dtype': 'np.uint32'}), '(pcd.colors, dtype=np.uint32)\n', (3961, 3990), True, 'import numpy as np\n'), ((1209, 1238), 'numpy.asarray', 'np.asarray', (['ply.vertex_colors'], {}), '(ply.vertex_colors)\n', (1219, 1238), True, 'import numpy as np\n'), ((2450, 2491), 'plyfile.PlyElement.describe', 'PlyElement.describe', (['ply_vertex', '"""vertex"""'], {}), "(ply_vertex, 'vertex')\n", (2469, 2491), False, 'from plyfile import PlyData, PlyElement\n'), ((2509, 2546), 'plyfile.PlyElement.describe', 'PlyElement.describe', (['ply_face', '"""face"""'], {}), "(ply_face, 'face')\n", (2528, 2546), False, 'from plyfile import PlyData, PlyElement\n'), ((3212, 3279), 'numpy.array', 'np.array', (['[ply_points[c] for c in xyz_properties]'], {'dtype': 'np.float32'}), '([ply_points[c] for c in xyz_properties], dtype=np.float32)\n', (3220, 3279), True, 'import numpy as np\n'), ((5546, 5585), 'plyfile.PlyElement.describe', 'PlyElement.describe', (['ply_data', '"""vertex"""'], {}), "(ply_data, 'vertex')\n", (5565, 5585), False, 'from plyfile import PlyData, PlyElement\n'), ((3461, 3527), 'numpy.array', 'np.array', (['[ply_points[c] for c in rgb_properties]'], {'dtype': 'np.uint32'}), '([ply_points[c] for c in rgb_properties], dtype=np.uint32)\n', (3469, 3527), True, 'import numpy as np\n')] |
import torch
import logging
# I don't use hyper parameter dict in here
# because several moudles become a submodule
# on other repository.
class GE2E(torch.nn.Module):
def __init__(self, mel_dims, lstm_size, lstm_stacks, embedding_size):
super(GE2E, self).__init__()
self.lstm_stacks = lstm_stacks
self.layer_Dict = torch.nn.ModuleDict()
self.layer_Dict['Prenet'] = Linear(
in_features= mel_dims,
out_features= lstm_size,
)
for index in range(self.lstm_stacks):
self.layer_Dict['LSTM_{}'.format(index)] = torch.nn.LSTM(
input_size= lstm_size,
hidden_size= lstm_size,
bias= True,
batch_first= True
)
for name, parameter in self.layer_Dict['LSTM_{}'.format(index)].named_parameters():
if 'weight' in name:
torch.nn.init.xavier_uniform_(parameter)
elif 'bias' in name:
torch.nn.init.zeros_(parameter)
self.layer_Dict['Linear'] = Linear(
in_features= lstm_size,
out_features= embedding_size,
)
def forward(self, mels, samples= 1):
'''
mels: [Batch, Mel_dim, Time]
'''
x = mels.transpose(2, 1) # [Batch, Time, Mel_dim]
x = self.layer_Dict['Prenet'](x) # [Batch, Time, LSTM_dim]
if 'cuda' != x.device: torch.cuda.synchronize()
for index in range(self.lstm_stacks):
self.layer_Dict['LSTM_{}'.format(index)].flatten_parameters()
x = self.layer_Dict['LSTM_{}'.format(index)](x)[0] + \
(x if index < self.lstm_stacks - 1 else 0) # [Batch, Time, LSTM_dim]
if 'cuda' != x.device: torch.cuda.synchronize()
x = self.layer_Dict['Linear'](x[:, -1, :]) # [Batch, Emb_dim]
if 'cuda' != x.device: torch.cuda.synchronize()
x = x.view(-1, samples, x.size(1)).mean(dim= 1) # [Batch, Samples, Emb_dim] -> [Batch, Emb_dim]
x = torch.nn.functional.normalize(x, p=2, dim= 1)
return x
class Linear(torch.nn.Linear):
def __init__(self, w_init_gain= 'linear', *args, **kwagrs):
self.w_init_gain = w_init_gain
super(Linear, self).__init__(*args, **kwagrs)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(
self.weight,
gain=torch.nn.init.calculate_gain(self.w_init_gain)
)
if not self.bias is None:
torch.nn.init.zeros_(self.bias)
class GE2E_Loss(torch.nn.Module):
def __init__(self, init_weight= 10.0, init_bias= -5.0):
super(GE2E_Loss, self).__init__()
self.weight = torch.nn.Parameter(torch.tensor(init_weight))
self.bias = torch.nn.Parameter(torch.tensor(init_bias))
self.layer_Dict = torch.nn.ModuleDict()
self.layer_Dict['Consine_Similarity'] = torch.nn.CosineSimilarity(dim= 2)
self.layer_Dict['Cross_Entroy_Loss'] = torch.nn.CrossEntropyLoss()
def forward(self, embeddings, pattern_per_Speaker):
'''
embeddings: [Batch, Emb_dim]
The target of softmax is always 0.
'''
x = embeddings.view(
embeddings.size(0) // pattern_per_Speaker,
pattern_per_Speaker,
-1
) # [Speakers, Pattern_per_Speaker, Emb_dim]
centroid_for_Within = x.sum(dim= 1, keepdim= True).expand(-1, x.size(1), -1) # [Speakers, Pattern_per_Speaker, Emb_dim]
centroid_for_Between = x.mean(dim= 1) # [Speakers, Emb_dim]
within_Cosine_Similarities = self.layer_Dict['Consine_Similarity'](x, centroid_for_Within) # [Speakers, Pattern_per_Speaker]
within_Cosine_Similarities = self.weight * within_Cosine_Similarities + self.bias
between_Cosine_Simiarity_Filter = torch.eye(x.size(0)).to(embeddings.device)
between_Cosine_Simiarity_Filter = 1.0 - between_Cosine_Simiarity_Filter.unsqueeze(1).expand(-1, x.size(1), -1) # [speaker, pattern_per_Speaker, speaker]
between_Cosine_Simiarity_Filter = between_Cosine_Simiarity_Filter.bool()
between_Cosine_Simiarities = self.layer_Dict['Consine_Similarity']( #[speaker * pattern_per_Speaker, speaker]
embeddings.unsqueeze(dim= 1).expand(-1, centroid_for_Between.size(0), -1), # [Speakers * Pattern_per_Speaker, Speakers, Emb_dim]
centroid_for_Between.unsqueeze(dim= 0).expand(embeddings.size(0), -1, -1), #[Speakers * Pattern_per_Speaker, Speakers, Emb_dim]
)
between_Cosine_Simiarities = self.weight * between_Cosine_Simiarities + self.bias
between_Cosine_Simiarities = between_Cosine_Simiarities.view(x.size(0), x.size(1), x.size(0)) # [speaker, pattern_per_Speaker, speaker]
between_Cosine_Simiarities = torch.masked_select(between_Cosine_Simiarities, between_Cosine_Simiarity_Filter)
between_Cosine_Simiarities = between_Cosine_Simiarities.view(x.size(0), x.size(1), x.size(0) - 1) # [speaker, pattern_per_Speaker, speaker - 1]
logits = torch.cat([within_Cosine_Similarities.unsqueeze(2), between_Cosine_Simiarities], dim = 2)
logits = logits.view(embeddings.size(0), -1) # [speaker * pattern_per_Speaker, speaker]
labels = torch.zeros(embeddings.size(0), dtype= torch.long).to(embeddings.device)
return self.layer_Dict['Cross_Entroy_Loss'](logits, labels) | [
"torch.nn.CosineSimilarity",
"torch.nn.CrossEntropyLoss",
"torch.nn.LSTM",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.zeros_",
"torch.nn.functional.normalize",
"torch.masked_select",
"torch.cuda.synchronize",
"torch.tensor",
"torch.nn.ModuleDict",
"torch.nn.init.calculate_gain"
] | [((350, 371), 'torch.nn.ModuleDict', 'torch.nn.ModuleDict', ([], {}), '()\n', (369, 371), False, 'import torch\n'), ((2079, 2123), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['x'], {'p': '(2)', 'dim': '(1)'}), '(x, p=2, dim=1)\n', (2108, 2123), False, 'import torch\n'), ((2881, 2902), 'torch.nn.ModuleDict', 'torch.nn.ModuleDict', ([], {}), '()\n', (2900, 2902), False, 'import torch\n'), ((2951, 2983), 'torch.nn.CosineSimilarity', 'torch.nn.CosineSimilarity', ([], {'dim': '(2)'}), '(dim=2)\n', (2976, 2983), False, 'import torch\n'), ((3032, 3059), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3057, 3059), False, 'import torch\n'), ((4856, 4941), 'torch.masked_select', 'torch.masked_select', (['between_Cosine_Simiarities', 'between_Cosine_Simiarity_Filter'], {}), '(between_Cosine_Simiarities, between_Cosine_Simiarity_Filter\n )\n', (4875, 4941), False, 'import torch\n'), ((605, 696), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': 'lstm_size', 'hidden_size': 'lstm_size', 'bias': '(True)', 'batch_first': '(True)'}), '(input_size=lstm_size, hidden_size=lstm_size, bias=True,\n batch_first=True)\n', (618, 696), False, 'import torch\n'), ((1463, 1487), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1485, 1487), False, 'import torch\n'), ((1937, 1961), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1959, 1961), False, 'import torch\n'), ((2553, 2584), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (2573, 2584), False, 'import torch\n'), ((2763, 2788), 'torch.tensor', 'torch.tensor', (['init_weight'], {}), '(init_weight)\n', (2775, 2788), False, 'import torch\n'), ((2829, 2852), 'torch.tensor', 'torch.tensor', (['init_bias'], {}), '(init_bias)\n', (2841, 2852), False, 'import torch\n'), ((1799, 1823), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1821, 1823), False, 'import torch\n'), ((2446, 2492), 'torch.nn.init.calculate_gain', 'torch.nn.init.calculate_gain', (['self.w_init_gain'], {}), '(self.w_init_gain)\n', (2474, 2492), False, 'import torch\n'), ((932, 972), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['parameter'], {}), '(parameter)\n', (961, 972), False, 'import torch\n'), ((1030, 1061), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['parameter'], {}), '(parameter)\n', (1050, 1061), False, 'import torch\n')] |
# This code is all CDS code
# Author: <NAME> (<EMAIL>)
import collections
import numpy as np
import pandas as pd
import pickle
from scipy.stats import zscore, pearsonr
# YOU MAY HAVE TO CHANGE THE BELOW LINES
achilles_dataseta_file = 'data/ceresgeneeffects.csv'
cell_line_info_file = 'data/sample_info.csv'
ccle_expression_file = 'data/CCLE_expression.csv'
ccle_mutation_file = 'data/CCLE_mutations.csv'
# YOU MAY HAVE TO CHANGE THE ABOVE LINES
# Read in CERES data
GENE_EFFECT_MATRIX = pd.read_csv(achilles_dataseta_file, header=0, index_col=0)
# Get all cell lines in CERES data
all_achilles_cell_lines = GENE_EFFECT_MATRIX.columns.tolist()
# Read in the sample info file and subset it to all CEREs cell lines
cell_line_info_df = pd.read_csv(cell_line_info_file, header=0)
cell_line_info_df = cell_line_info_df.loc[cell_line_info_df['CCLE_Name'].isin(all_achilles_cell_lines)]
# Remove cell lines where the lineage is not defined
cell_line_info_df = cell_line_info_df[cell_line_info_df['lineage'].notna()]
# Get all unique lineages
all_lineages = set(cell_line_info_df['lineage'].tolist())
print(f"Number of lineages included: {len(all_lineages)}")
# Dictionaries mapping between a cell line's DepMap_ID (used in CCLE files) and CCLE_Name (used in CEREs files)
ccle_id_to_depmap_id = dict()
depmap_id_to_ccle_id = dict()
# Read through sample info file to populate above dictionaries
for index, row in cell_line_info_df.iterrows():
ccle_id = row["CCLE_Name"]
depmap_id = row['DepMap_ID']
if ccle_id not in ccle_id_to_depmap_id:
ccle_id_to_depmap_id[ccle_id] = depmap_id
depmap_id_to_ccle_id[depmap_id] = ccle_id
else:
print('ERROR!')
quit()
# This next section outputs a pickled dictionary mapping all lineages to all their cell lines for later use
lineage_to_cell_lines = dict()
for lineage in all_lineages:
cell_line_info_df_ = cell_line_info_df.loc[cell_line_info_df['lineage'] == lineage]
lineage_to_cell_lines[lineage] = set(cell_line_info_df_['DepMap_ID'])
with open('achilles_data/lineage_to_cell_lines.pkl', 'wb') as handle:
pickle.dump(lineage_to_cell_lines, handle)
# End section
# Dictionary containing the lineages of cell lines with >= 15 cell lines. These are the cell lines we will few shot learn over
lineages_to_few_shot_over = dict()
for lineage in all_lineages:
# Subset dataframe containing sample information to each lineage, and check how many rows (cell lines) are in each lineage
cell_line_info_df_ = cell_line_info_df.loc[cell_line_info_df['lineage'] == lineage]
if cell_line_info_df_.shape[0] >= 15:
lineages_to_few_shot_over[lineage] = set(cell_line_info_df_['DepMap_ID'])
# Rename CERES matrix cell line names to their DepMap_IDs
#-------------------------------- GENE EFFECT MATRIX -------------------------------------------------------------------------------------------------------------------#
GENE_EFFECT_MATRIX.columns = [ccle_id_to_depmap_id[element] for element in GENE_EFFECT_MATRIX.columns]
all_cell_lines_to_include = GENE_EFFECT_MATRIX.columns.tolist()
GENE_EFFECT_MATRIX = GENE_EFFECT_MATRIX.T
# Get a csv listing all genes to run
genes_to_run_df_dict = collections.defaultdict(list)
GENE_EFFECT_MATRIX_Z_SCORE = GENE_EFFECT_MATRIX.apply(zscore)
all_genes_to_test_over = list()
for gene in GENE_EFFECT_MATRIX_Z_SCORE.columns:
column_list = GENE_EFFECT_MATRIX_Z_SCORE[gene].tolist()
cell_lines_with_high_z_score = [element for element in column_list if element >= 6 or element <= -6]
if len(cell_lines_with_high_z_score) > 0:
all_genes_to_test_over.append(gene)
genes_to_run_df_dict["Gene"].append(gene)
genes_to_run_df = pd.DataFrame.from_dict(genes_to_run_df_dict)
genes_to_run_df.to_csv("achilles_data/to_run_genes_feature_selection.csv", header=True, index=False)
#-------------------------------- END GENE EFFECT MATRIX -------------------------------------------------------------------------------------------------------------------#
#--------------------------------- EXPRESSION DATA -------------------------------------------------------------------------------------------------------------------#
# Read in CCLE Expression data and subset to cell lines in CERES file. Rename expression feature columns as well.
CCLE_EXPRESSION_MATRIX = pd.read_csv(ccle_expression_file, header=0, index_col=0)
CCLE_EXPRESSION_MATRIX = CCLE_EXPRESSION_MATRIX.T
CCLE_EXPRESSION_MATRIX = CCLE_EXPRESSION_MATRIX[all_cell_lines_to_include]
CCLE_EXPRESSION_MATRIX = CCLE_EXPRESSION_MATRIX.T
# The following code excludes all genes whos stdev is in the lower 10th percentile, as describe in the TCRP paper
expression_stdevs = CCLE_EXPRESSION_MATRIX.std()
tenth_percentile_expression_stdev = np.percentile(expression_stdevs.tolist(), 10)
to_keep_gene_expression_genes = list()
for gene in expression_stdevs.index:
if expression_stdevs[gene] > tenth_percentile_expression_stdev:
to_keep_gene_expression_genes.append(gene)
CCLE_EXPRESSION_MATRIX = CCLE_EXPRESSION_MATRIX[to_keep_gene_expression_genes]
# Section end
# This following code maps HGNC gene symbols, which are what is used as gene names in the CEREs data,
# to depmap gene IDs, which are gene names used in the CCLE matricies, and vice versa
hgnc_id_to_depmap_id = dict()
depmap_id_to_hgnc_id = dict()
for depmap_id in CCLE_EXPRESSION_MATRIX.columns:
hgnc_id = depmap_id.split(' ')[0]
if hgnc_id in hgnc_id_to_depmap_id or depmap_id in depmap_id_to_hgnc_id:
print("ERROR!")
quit()
else:
hgnc_id_to_depmap_id[hgnc_id] = depmap_id
depmap_id_to_hgnc_id[depmap_id] = hgnc_id
# Section end
# This next section finds gene expression features that correlate with each gene KO we want to predict
# Co expression is defined in the TCRP paper as > 0.4 or < 0.4 correlation with the gene KO's expression
gene_to_coexpression_features = collections.defaultdict(set)
all_genes_to_test_over = [element for element in all_genes_to_test_over if element in hgnc_id_to_depmap_id]
for gene in all_genes_to_test_over:
depmap_id = hgnc_id_to_depmap_id[gene]
gene_expression = CCLE_EXPRESSION_MATRIX[depmap_id]
for column in CCLE_EXPRESSION_MATRIX.columns:
if column != depmap_id:
partner_expression = CCLE_EXPRESSION_MATRIX[column]
corr = pearsonr(gene_expression.tolist(), partner_expression.tolist())[0]
if abs(corr) > 0.4:
gene_to_coexpression_features[gene].add(column)
# Section end
# convert nested defaultdict to regular dict for pickling
def default_to_regular(d):
if isinstance(d, collections.defaultdict):
d = {k: default_to_regular(v) for k, v in d.items()}
return d
gene_to_coexpression_features = default_to_regular(gene_to_coexpression_features)
# output pickled file of all gene expression features to use based on co expression
with open('achilles_data/gene_to_coexpression_features.pkl', 'wb') as handle:
pickle.dump(gene_to_coexpression_features, handle)
# Rename columns in ccle expression matrix so differentiate them from mutation features
CCLE_EXPRESSION_MATRIX.columns = [element + " EXPRESSION" for element in CCLE_EXPRESSION_MATRIX.columns]
#--------------------------------- EXPRESSION END -------------------------------------------------------------------------------------------------------------------#
#--------------------------------- MUTATAION DATA -------------------------------------------------------------------------------------------------------------------#
# Read in CCLE Mutation data and subset to rows that contain info for cell lines in CERES and also exclude Silent mutations
CCLE_MUTATION_DATA = pd.read_csv(ccle_mutation_file, header=0, index_col=0, dtype=str)
CCLE_MUTATION_DATA = CCLE_MUTATION_DATA.loc[CCLE_MUTATION_DATA['DepMap_ID'].isin(all_cell_lines_to_include)]
CCLE_MUTATION_DATA = CCLE_MUTATION_DATA.loc[CCLE_MUTATION_DATA['Variant_Classification'] != "Silent"]
# One-hot encode mutation data as described in the TCRP paper
CCLE_MUTATION_MATRIX_DICT = collections.defaultdict(dict)
all_genes_with_mutations = set()
for index, row in CCLE_MUTATION_DATA.iterrows():
depmap_id = row['DepMap_ID']
gene_symbol = row['Hugo_Symbol']
entrez_id = row['Entrez_Gene_Id']
depmap_gene_string = f"{gene_symbol} ({entrez_id})"
all_genes_with_mutations.add(depmap_gene_string)
if depmap_gene_string not in CCLE_MUTATION_MATRIX_DICT[depmap_id]:
CCLE_MUTATION_MATRIX_DICT[depmap_id][depmap_gene_string] = 1.0
# Process data to be fed into pandas to create a dataframe
CCLE_MUTATION_MATRIX_DF_DICT = collections.defaultdict(list)
all_genes_with_mutations = sorted(list(all_genes_with_mutations))
for cell_line in all_cell_lines_to_include:
CCLE_MUTATION_MATRIX_DF_DICT['Cell Line'].append(cell_line)
for gene in all_genes_with_mutations:
if gene in CCLE_MUTATION_MATRIX_DICT[cell_line]:
CCLE_MUTATION_MATRIX_DF_DICT[gene + " MUTATAION"].append(CCLE_MUTATION_MATRIX_DICT[cell_line][gene])
else:
CCLE_MUTATION_MATRIX_DF_DICT[gene + " MUTATAION"].append(0.0)
# Create mutation csv feature matrix file
CCLE_MUTATION_MATRIX = pd.DataFrame.from_dict(CCLE_MUTATION_MATRIX_DF_DICT)
CCLE_MUTATION_MATRIX = CCLE_MUTATION_MATRIX.set_index('Cell Line')
# This next section includes gene mutation features that have more than 10 counts for all cell lines,
# as described in the TCRP paper
mutation_sums = CCLE_MUTATION_MATRIX.sum()
to_keep_gene_mutation_genes = list()
for gene in mutation_sums.index:
if mutation_sums[gene] >= 10:
to_keep_gene_mutation_genes.append(gene)
CCLE_MUTATION_MATRIX = CCLE_MUTATION_MATRIX[to_keep_gene_mutation_genes]
# Section end
#--------------------------------- MUTATAION DATA END -------------------------------------------------------------------------------------------------------------------#
# Merge mutation and expression data into one feature matrix
FEATURE_MATRIX = CCLE_EXPRESSION_MATRIX.merge(CCLE_MUTATION_MATRIX, left_index=True, right_index=True)
# Output data, namely, feature matrix, labels matrix, and the dictionary mapping 9 lineages to few shot over and their cell lines
FEATURE_MATRIX.to_csv("achilles_data/feature_matrix.csv", header=True, index=True)
GENE_EFFECT_MATRIX.to_csv("achilles_data/labels_matrix.csv", header=True, index=True)
with open('achilles_data/lineages_to_fewshot_over.pkl', 'wb') as handle:
pickle.dump(lineages_to_few_shot_over, handle)
| [
"pandas.DataFrame.from_dict",
"pickle.dump",
"collections.defaultdict",
"pandas.read_csv"
] | [((490, 548), 'pandas.read_csv', 'pd.read_csv', (['achilles_dataseta_file'], {'header': '(0)', 'index_col': '(0)'}), '(achilles_dataseta_file, header=0, index_col=0)\n', (501, 548), True, 'import pandas as pd\n'), ((736, 778), 'pandas.read_csv', 'pd.read_csv', (['cell_line_info_file'], {'header': '(0)'}), '(cell_line_info_file, header=0)\n', (747, 778), True, 'import pandas as pd\n'), ((3192, 3221), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3215, 3221), False, 'import collections\n'), ((3688, 3732), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['genes_to_run_df_dict'], {}), '(genes_to_run_df_dict)\n', (3710, 3732), True, 'import pandas as pd\n'), ((4316, 4372), 'pandas.read_csv', 'pd.read_csv', (['ccle_expression_file'], {'header': '(0)', 'index_col': '(0)'}), '(ccle_expression_file, header=0, index_col=0)\n', (4327, 4372), True, 'import pandas as pd\n'), ((5901, 5929), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (5924, 5929), False, 'import collections\n'), ((7699, 7764), 'pandas.read_csv', 'pd.read_csv', (['ccle_mutation_file'], {'header': '(0)', 'index_col': '(0)', 'dtype': 'str'}), '(ccle_mutation_file, header=0, index_col=0, dtype=str)\n', (7710, 7764), True, 'import pandas as pd\n'), ((8067, 8096), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (8090, 8096), False, 'import collections\n'), ((8629, 8658), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8652, 8658), False, 'import collections\n'), ((9199, 9251), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['CCLE_MUTATION_MATRIX_DF_DICT'], {}), '(CCLE_MUTATION_MATRIX_DF_DICT)\n', (9221, 9251), True, 'import pandas as pd\n'), ((2103, 2145), 'pickle.dump', 'pickle.dump', (['lineage_to_cell_lines', 'handle'], {}), '(lineage_to_cell_lines, handle)\n', (2114, 2145), False, 'import pickle\n'), ((6972, 7022), 'pickle.dump', 'pickle.dump', (['gene_to_coexpression_features', 'handle'], {}), '(gene_to_coexpression_features, handle)\n', (6983, 7022), False, 'import pickle\n'), ((10454, 10500), 'pickle.dump', 'pickle.dump', (['lineages_to_few_shot_over', 'handle'], {}), '(lineages_to_few_shot_over, handle)\n', (10465, 10500), False, 'import pickle\n')] |
from flask import render_template, Response
from app import models
from app.data import FIELD_MAPPING
import ujson
def register(app):
@app.route("/")
def index():
return render_template("index.html")
##
# Data endpoints.
# High-level %'s, used to power the donuts.
@app.route("/data/reports/<report_name>.json")
def report(report_name):
response = Response(ujson.dumps(models.Report.latest().get(report_name, {})))
response.headers['Content-Type'] = 'application/json'
return response
# Detailed data per-domain, used to power the data tables.
@app.route("/data/domains/<report_name>.<ext>")
def domain_report(report_name, ext):
domains = models.Domain.eligible(report_name)
domains = sorted(domains, key=lambda k: k['domain'])
if ext == "json":
response = Response(ujson.dumps({'data': domains}))
response.headers['Content-Type'] = 'application/json'
elif ext == "csv":
response = Response(models.Domain.to_csv(domains, report_name))
response.headers['Content-Type'] = 'text/csv'
return response
@app.route("/https/domains/")
def https_domains():
return render_template("https/domains.html")
@app.route("/https/guidance/")
def https_guide():
return render_template("https/guide.html")
# Sanity-check RSS feed, shows the latest report.
@app.route("/data/reports/feed/")
def report_feed():
return render_template("feed.xml")
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
| [
"flask.render_template",
"app.models.Report.latest",
"ujson.dumps",
"app.models.Domain.to_csv",
"app.models.Domain.eligible"
] | [((189, 218), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (204, 218), False, 'from flask import render_template, Response\n'), ((726, 761), 'app.models.Domain.eligible', 'models.Domain.eligible', (['report_name'], {}), '(report_name)\n', (748, 761), False, 'from app import models\n'), ((1233, 1270), 'flask.render_template', 'render_template', (['"""https/domains.html"""'], {}), "('https/domains.html')\n", (1248, 1270), False, 'from flask import render_template, Response\n'), ((1346, 1381), 'flask.render_template', 'render_template', (['"""https/guide.html"""'], {}), "('https/guide.html')\n", (1361, 1381), False, 'from flask import render_template, Response\n'), ((1514, 1541), 'flask.render_template', 'render_template', (['"""feed.xml"""'], {}), "('feed.xml')\n", (1529, 1541), False, 'from flask import render_template, Response\n'), ((1611, 1638), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (1626, 1638), False, 'from flask import render_template, Response\n'), ((880, 910), 'ujson.dumps', 'ujson.dumps', (["{'data': domains}"], {}), "({'data': domains})\n", (891, 910), False, 'import ujson\n'), ((1033, 1075), 'app.models.Domain.to_csv', 'models.Domain.to_csv', (['domains', 'report_name'], {}), '(domains, report_name)\n', (1053, 1075), False, 'from app import models\n'), ((418, 440), 'app.models.Report.latest', 'models.Report.latest', ([], {}), '()\n', (438, 440), False, 'from app import models\n')] |
from unittest import TestCase
from array import array
from pymanylinuxdemo.extension import dot
class TestExtension(TestCase):
def test_1(self):
self.assertAlmostEqual(dot([1,2,3], [2,3,4]), 20.0)
def test_2(self):
self.assertAlmostEqual(dot([1,1.5,2.5,4], [-1.0,-2.0,-3.0,4]), 4.5)
def test_3(self):
x = array('d', [1.0, 2.0, 3.14])
self.assertAlmostEqual(dot(x, x), 14.8596)
| [
"array.array",
"pymanylinuxdemo.extension.dot"
] | [((345, 373), 'array.array', 'array', (['"""d"""', '[1.0, 2.0, 3.14]'], {}), "('d', [1.0, 2.0, 3.14])\n", (350, 373), False, 'from array import array\n'), ((182, 207), 'pymanylinuxdemo.extension.dot', 'dot', (['[1, 2, 3]', '[2, 3, 4]'], {}), '([1, 2, 3], [2, 3, 4])\n', (185, 207), False, 'from pymanylinuxdemo.extension import dot\n'), ((265, 309), 'pymanylinuxdemo.extension.dot', 'dot', (['[1, 1.5, 2.5, 4]', '[-1.0, -2.0, -3.0, 4]'], {}), '([1, 1.5, 2.5, 4], [-1.0, -2.0, -3.0, 4])\n', (268, 309), False, 'from pymanylinuxdemo.extension import dot\n'), ((405, 414), 'pymanylinuxdemo.extension.dot', 'dot', (['x', 'x'], {}), '(x, x)\n', (408, 414), False, 'from pymanylinuxdemo.extension import dot\n')] |
#!/usr/bin/python3
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This quick hack writes a bash script that uses the PTTools to stitch a
# panorama from all photos in the current directory. It assumes that all of the
# photos are JPGs in the current directory, and that all of the JPGs in the
# current directory are photos for the panorama.
#
# A short (i.e., non-comprehensive) list of choices the output script makes for
# you would include:
# * using CPFind as the control point detector;
# * continuously overwriting the same project file instead of leaving
# multiple project files behind to allow for problem tracing;
# * treating the first file (according to standard lexicographic sort by
# filename) as the reference (or "anchor") image for the purposes of both
# position and exposure, which often winds up not being the best choice;
# * assuming that the input images are taken with a rectilinear lens;
# * running Celeste;
# * running CPFind's version of Celeste instead of Celeste standalone;
# * using the --multirow match detection algorithm, which is generally
# pretty good, but which is not perfect for all possible scenarios, and
# which does unnecessary work in single-row panoramas, sometimes causing
# problems on its own;
# * running CPClean with default parameters;
# * automatically optimizing control points, which is almost certainly a
# good idea in most, but not all, cases;
# * trying to find a suitable projection type, which is often basically
# successful but rarely makes the absolute best possible choice;
# * doing photometric optimization, which wastes time if the shots were
# manually shot at the same exposure;
# * trying to find vertical control points, which is often successful and
# frequently a good idea, though the process can go astray;
# * automatically calculating ostensibly optimal canvas and crop sizes; and
# * using hugin_executor as the stitching program (PTBatchGUI might also be
# used for this purpose).
import os, glob, subprocess
import postprocessor as pprocessor
the_files = sorted(list(set(glob.glob('*JPG') + glob.glob('*jpg'))))
the_files_list = ' '.join(the_files)
project_file = the_files[0] + ".pto"
if the_files:
the_script = """#!/usr/bin/env bash
pto_gen -o %s %s
""" % (project_file, the_files_list)
the_script = the_script + """
cpfind --multirow --celeste -o %s %s
cpclean -o %s %s
linefind -o %s %s
autooptimiser -a -l -s -m -o %s %s
pano_modify --canvas=AUTO --crop=AUTO -o %s %s
# hugin_executor -s %s # Uncomment to stitch the panorama immediately
""" % tuple([project_file] * 11)
script_file_name = os.path.splitext(the_files[0])[0] + '-pano.SH'
with open(script_file_name, mode='w') as script_file:
script_file.write(''.join(the_script))
os.chmod(script_file_name, os.stat(script_file_name).st_mode | 0o111) # or, in Bash, "chmod a+x SCRIPT_FILE_NAME"
# pprocessor.run_shell_scripts() # uncomment this line to automatically run all scripts in the directory.
else:
raise IndexError('You must call panorama_script_generator.py in a folder with at least one .jpg or .JPG file;\n current working directory is %s' % os.getcwd()) | [
"os.stat",
"os.path.splitext",
"glob.glob",
"os.getcwd"
] | [((3818, 3848), 'os.path.splitext', 'os.path.splitext', (['the_files[0]'], {}), '(the_files[0])\n', (3834, 3848), False, 'import os, glob, subprocess\n'), ((4373, 4384), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4382, 4384), False, 'import os, glob, subprocess\n'), ((3242, 3259), 'glob.glob', 'glob.glob', (['"""*JPG"""'], {}), "('*JPG')\n", (3251, 3259), False, 'import os, glob, subprocess\n'), ((3262, 3279), 'glob.glob', 'glob.glob', (['"""*jpg"""'], {}), "('*jpg')\n", (3271, 3279), False, 'import os, glob, subprocess\n'), ((4006, 4031), 'os.stat', 'os.stat', (['script_file_name'], {}), '(script_file_name)\n', (4013, 4031), False, 'import os, glob, subprocess\n')] |
from rest_framework import generics
from races.serializers import RaceCompleteSerializer
from races.models import Race
class RaceListCreateView(generics.ListCreateAPIView):
queryset = Race.objects.all()
serializer_class = RaceCompleteSerializer
class RaceDeatailsRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
queryset = Race.objects.all()
serializer_class = RaceCompleteSerializer
lookup_field = "race_uuid"
| [
"races.models.Race.objects.all"
] | [((191, 209), 'races.models.Race.objects.all', 'Race.objects.all', ([], {}), '()\n', (207, 209), False, 'from races.models import Race\n'), ((357, 375), 'races.models.Race.objects.all', 'Race.objects.all', ([], {}), '()\n', (373, 375), False, 'from races.models import Race\n')] |
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from app.models.base import Base
class PokemonBaseStats(Base):
__tablename__ = 'pokemon_base_stats'
poke_id = Column(Integer, ForeignKey('pokemon.poke_id'), primary_key = True)
hp = Column(Integer, nullable=False)
attack_stat = Column(Integer, nullable=True)
defense_stat = Column(Integer, nullable=True)
special_attack_stat = Column(Integer, nullable=True)
special_defense_stat = Column(Integer, nullable=True)
speed = Column(Integer, nullable=True)
#...
pokemon = relationship("Pokemon", back_populates="pokemon_base_stats")
def __repr__(self):
return "<PokemonBaseStats(poke_id='%d', hp='%d', speed='%d')>" % (
self.poke_id, self.hp, self.speed) | [
"sqlalchemy.orm.relationship",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((288, 319), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (294, 319), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((338, 368), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (344, 368), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((388, 418), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (394, 418), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((445, 475), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (451, 475), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((503, 533), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (509, 533), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((546, 576), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (552, 576), False, 'from sqlalchemy import Column, Integer, ForeignKey\n'), ((601, 661), 'sqlalchemy.orm.relationship', 'relationship', (['"""Pokemon"""'], {'back_populates': '"""pokemon_base_stats"""'}), "('Pokemon', back_populates='pokemon_base_stats')\n", (613, 661), False, 'from sqlalchemy.orm import relationship\n'), ((228, 257), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""pokemon.poke_id"""'], {}), "('pokemon.poke_id')\n", (238, 257), False, 'from sqlalchemy import Column, Integer, ForeignKey\n')] |
from piglow import PiGlow
def main():
piglow = PiGlow()
piglow.all(0)
if __name__ == '__main__':
main()
| [
"piglow.PiGlow"
] | [((53, 61), 'piglow.PiGlow', 'PiGlow', ([], {}), '()\n', (59, 61), False, 'from piglow import PiGlow\n')] |
# Copyright 2012-2017, <NAME> and The Tor Project
# See LICENSE for licensing information
import io
import re
import time
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.response
import stem.version
from stem.util import str_type, int_type, connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
# because some positional arguments, like circuit paths, can have an equal
# sign.
KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)=(\S*)$')
QUOTED_KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)="(.*)"$')
CELL_TYPE = re.compile('^[a-z0-9_]+$')
PARSE_NEWCONSENSUS_EVENTS = True
class Event(stem.response.ControlMessage):
"""
Base for events we receive asynchronously, as described in section 4.1 of the
`control-spec
<https://gitweb.torproject.org/torspec.git/tree/control-spec.txt>`_.
:var str type: event type
:var int arrived_at: unix timestamp for when the message arrived
:var list positional_args: positional arguments of the event
:var dict keyword_args: key/value arguments of the event
"""
_POSITIONAL_ARGS = () # attribute names for recognized positional arguments
_KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes
_QUOTED = () # positional arguments that are quoted
_OPTIONALLY_QUOTED = () # positional arguments that may or may not be quoted
_SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args
_VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support
def _parse_message(self, arrived_at = None):
if arrived_at is None:
arrived_at = int(time.time())
if not str(self).strip():
raise stem.ProtocolError('Received a blank tor event. Events must at the very least have a type.')
self.type = str(self).split()[0]
self.arrived_at = arrived_at
# if we're a recognized event type then translate ourselves into that subclass
if self.type in EVENT_TYPE_TO_CLASS:
self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
self.positional_args = []
self.keyword_args = {}
if not self._SKIP_PARSING:
self._parse_standard_attr()
self._parse()
def _parse_standard_attr(self):
"""
Most events are of the form...
650 *( positional_args ) *( key "=" value )
This parses this standard format, populating our **positional_args** and
**keyword_args** attributes and creating attributes if it's in our event's
**_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
"""
# Tor events contain some number of positional arguments followed by
# key/value mappings. Parsing keyword arguments from the end until we hit
# something that isn't a key/value mapping. The rest are positional.
content = str(self)
while True:
match = QUOTED_KW_ARG.match(content)
if not match:
match = KW_ARG.match(content)
if match:
content, keyword, value = match.groups()
self.keyword_args[keyword] = value
else:
break
# Setting attributes for the fields that we recognize.
self.positional_args = content.split()[1:]
positional = list(self.positional_args)
for attr_name in self._POSITIONAL_ARGS:
attr_value = None
if positional:
if attr_name in self._QUOTED or (attr_name in self._OPTIONALLY_QUOTED and positional[0].startswith('"')):
attr_values = [positional.pop(0)]
if not attr_values[0].startswith('"'):
raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
while True:
if not positional:
raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
attr_values.append(positional.pop(0))
if attr_values[-1].endswith('"'):
break
attr_value = ' '.join(attr_values)[1:-1]
else:
attr_value = positional.pop(0)
setattr(self, attr_name, attr_value)
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
def _iso_timestamp(self, timestamp):
"""
Parses an iso timestamp (ISOTime2Frac in the control-spec).
:param str timestamp: timestamp to parse
:returns: **datetime** with the parsed timestamp
:raises: :class:`stem.ProtocolError` if timestamp is malformed
"""
if timestamp is None:
return None
try:
return str_tools._parse_iso_timestamp(timestamp)
except ValueError as exc:
raise stem.ProtocolError('Unable to parse timestamp (%s): %s' % (exc, self))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
def _log_if_unrecognized(self, attr, attr_enum):
"""
Checks if an attribute exists in a given enumeration, logging a message if
it isn't. Attributes can either be for a string or collection of strings
:param str attr: name of the attribute to check
:param stem.util.enum.Enum enum: enumeration to check against
"""
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, (bytes, str_type)):
attr_values = [attr_values]
for value in attr_values:
if value not in attr_enum:
log_id = 'event.%s.unknown_%s.%s' % (self.type.lower(), attr, value)
unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self)
log.log_once(log_id, log.INFO, unrecognized_msg)
class AddrMapEvent(Event):
"""
Event that indicates a new address mapping.
The ADDRMAP event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
.. versionchanged:: 1.1.0
Added the cached attribute.
:var str hostname: address being resolved
:var str destination: destination of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
:var str error: error code if the resolution failed
:var datetime utc_expiry: expiration time of the resolution in UTC
:var bool cached: **True** if the resolution will be kept until it expires,
**False** otherwise or **None** if undefined
"""
_POSITIONAL_ARGS = ('hostname', 'destination', 'expiry')
_KEYWORD_ARGS = {
'error': 'error',
'EXPIRES': 'utc_expiry',
'CACHED': 'cached',
}
_OPTIONALLY_QUOTED = ('expiry')
def _parse(self):
if self.destination == '<error>':
self.destination = None
if self.expiry is not None:
if self.expiry == 'NEVER':
self.expiry = None
else:
try:
self.expiry = stem.util.str_tools._parse_timestamp(self.expiry)
except ValueError:
raise stem.ProtocolError('Unable to parse date in ADDRMAP event: %s' % self)
if self.utc_expiry is not None:
self.utc_expiry = stem.util.str_tools._parse_timestamp(self.utc_expiry)
if self.cached is not None:
if self.cached == 'YES':
self.cached = True
elif self.cached == 'NO':
self.cached = False
else:
raise stem.ProtocolError("An ADDRMAP event's CACHED mapping can only be 'YES' or 'NO': %s" % self)
class AuthDirNewDescEvent(Event):
"""
Event specific to directory authorities, indicating that we just received new
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha and
removed in 0.3.2.1-alpha. (:spec:`6e887ba`)
.. deprecated:: 1.6.0
Tor dropped this event as of version 0.3.2.1. (:spec:`6e887ba`)
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
:var str descriptor: content of the descriptor
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_AUTHDIR_NEWDESCS
def _parse(self):
lines = str(self).split('\n')
if len(lines) < 5:
raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
elif lines[-1] != 'OK':
raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
# TODO: For stem 2.0.0 we should consider changing 'descriptor' to a
# ServerDescriptor instance.
self.action = lines[1]
self.message = lines[2]
self.descriptor = '\n'.join(lines[3:-1])
class BandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor.
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
"""
_POSITIONAL_ARGS = ('read', 'written')
def _parse(self):
if not self.read:
raise stem.ProtocolError('BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
class BuildTimeoutSetEvent(Event):
"""
Event indicating that the timeout value for a circuit has changed. This was
first added in tor version 0.2.2.7.
The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
:var stem.TimeoutSetType set_type: way in which the timeout is changing
:var int total_times: circuit build times tor used to determine the timeout
:var int timeout: circuit timeout value in milliseconds
:var int xm: Pareto parameter Xm in milliseconds
:var float alpha: Pareto parameter alpha
:var float quantile: CDF quantile cutoff point
:var float timeout_rate: ratio of circuits that have time out
:var int close_timeout: duration to keep measurement circuits in milliseconds
:var float close_rate: ratio of measurement circuits that are closed
"""
_POSITIONAL_ARGS = ('set_type',)
_KEYWORD_ARGS = {
'TOTAL_TIMES': 'total_times',
'TIMEOUT_MS': 'timeout',
'XM': 'xm',
'ALPHA': 'alpha',
'CUTOFF_QUANTILE': 'quantile',
'TIMEOUT_RATE': 'timeout_rate',
'CLOSE_MS': 'close_timeout',
'CLOSE_RATE': 'close_rate',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_BUILDTIMEOUT_SET
def _parse(self):
# convert our integer and float parameters
for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, int(param_value))
except ValueError:
raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be an integer: %s' % (param, self))
for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, float(param_value))
except ValueError:
raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be a float: %s' % (param, self))
self._log_if_unrecognized('set_type', stem.TimeoutSetType)
class CircuitEvent(Event):
"""
Event that indicates that a circuit has changed.
The fingerprint or nickname values in our 'path' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The CIRC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
.. versionchanged:: 1.4.0
Added the socks_username and socks_password attributes which is used for
`stream isolation
<https://gitweb.torproject.org/torspec.git/tree/proposals/171-separate-streams.txt>`_.
:var str id: circuit identifier
:var stem.CircStatus status: reported status for the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircClosureReason reason: reason for the circuit to be closed
:var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
:var str socks_username: username for using this circuit
:var str socks_password: password for using this circuit
"""
_POSITIONAL_ARGS = ('id', 'status', 'path')
_KEYWORD_ARGS = {
'BUILD_FLAGS': 'build_flags',
'PURPOSE': 'purpose',
'HS_STATE': 'hs_state',
'REND_QUERY': 'rend_query',
'TIME_CREATED': 'created',
'REASON': 'reason',
'REMOTE_REASON': 'remote_reason',
'SOCKS_USERNAME': 'socks_username',
'SOCKS_PASSWORD': '<PASSWORD>',
}
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.CircStatus)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('reason', stem.CircClosureReason)
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
def _compare(self, other, method):
if not isinstance(other, CircuitEvent):
return False
for attr in ('id', 'status', 'path', 'build_flags', 'purpose', 'hs_state', 'rend_query', 'created', 'reason', 'remote_reason', 'socks_username', 'socks_password'):
my_attr = getattr(self, attr)
other_attr = getattr(other, attr)
# Our id attribute is technically a string, but Tor conventionally uses
# ints. Attempt to handle as ints if that's the case so we get numeric
# ordering.
if attr == 'id' and my_attr and other_attr:
if my_attr.isdigit() and other_attr.isdigit():
my_attr = int(my_attr)
other_attr = int(other_attr)
if my_attr is None:
my_attr = ''
if other_attr is None:
other_attr = ''
if my_attr != other_attr:
return method(my_attr, other_attr)
return True
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
class CircMinorEvent(Event):
"""
Event providing information about minor changes in our circuits. This was
first added in tor version 0.2.3.11.
The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
:var str id: circuit identifier
:var stem.CircEvent event: type of change in the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircPurpose old_purpose: prior purpose for the circuit
:var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
"""
_POSITIONAL_ARGS = ('id', 'event', 'path')
_KEYWORD_ARGS = {
'BUILD_FLAGS': 'build_flags',
'PURPOSE': 'purpose',
'HS_STATE': 'hs_state',
'REND_QUERY': 'rend_query',
'TIME_CREATED': 'created',
'OLD_PURPOSE': 'old_purpose',
'OLD_HS_STATE': 'old_hs_state',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_MINOR
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('event', stem.CircEvent)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('old_purpose', stem.CircPurpose)
self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState)
class ClientsSeenEvent(Event):
"""
Periodic event on bridge relays that provides a summary of our users.
The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
:var datetime start_time: time in UTC that we started collecting these stats
:var dict locales: mapping of country codes to a rounded count for the number of users
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
"""
_KEYWORD_ARGS = {
'TimeStarted': 'start_time',
'CountrySummary': 'locales',
'IPVersions': 'ip_versions',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CLIENTS_SEEN
def _parse(self):
if self.start_time is not None:
self.start_time = stem.util.str_tools._parse_timestamp(self.start_time)
if self.locales is not None:
locale_to_count = {}
for entry in self.locales.split(','):
if '=' not in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
locale, count = entry.split('=', 1)
if len(locale) != 2:
raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
elif not count.isdigit():
raise stem.ProtocolError('Locale count was non-numeric (%s): %s' % (count, self))
elif locale in locale_to_count:
raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
locale_to_count[locale] = int(count)
self.locales = locale_to_count
if self.ip_versions is not None:
protocol_to_count = {}
for entry in self.ip_versions.split(','):
if '=' not in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): %s' % (count, self))
protocol_to_count[protocol] = int(count)
self.ip_versions = protocol_to_count
class ConfChangedEvent(Event):
"""
Event that indicates that our configuration changed, either in response to a
SETCONF or RELOAD signal.
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED
def _parse(self):
self.config = {}
# Skip first and last line since they're the header and footer. For
# instance...
#
# 650-CONF_CHANGED
# 650-ExitNodes=caerSidi
# 650-ExitPolicy
# 650-MaxCircuitDirtiness=20
# 650 OK
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
else:
key, value = line, None
self.config[key] = value
class DescChangedEvent(Event):
"""
Event that indicates that our descriptor has changed.
The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_DESCCHANGED
class GuardEvent(Event):
"""
Event that indicates that our guard relays have changed. The 'endpoint' could
be either a...
* fingerprint
* 'fingerprint=nickname' pair
The derived 'endpoint_*' attributes are generally more useful.
The GUARD event was introduced in tor version 0.1.2.5-alpha.
:var stem.GuardType guard_type: purpose the guard relay is for
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint
:var str endpoint_nickname: endpoint's nickname if it was provided
:var stem.GuardStatus status: status of the guard relay
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_GUARD
_POSITIONAL_ARGS = ('guard_type', 'endpoint', 'status')
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
raise stem.ProtocolError("GUARD's endpoint doesn't match a ServerSpec: %s" % self)
self._log_if_unrecognized('guard_type', stem.GuardType)
self._log_if_unrecognized('status', stem.GuardStatus)
class HSDescEvent(Event):
"""
Event triggered when we fetch a hidden service descriptor that currently isn't in our cache.
The HS_DESC event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.3.0
Added the reason attribute.
.. versionchanged:: 1.5.0
Added the replica attribute.
:var stem.HSDescAction action: what is happening with the descriptor
:var str address: hidden service address
:var stem.HSAuth authentication: service's authentication method
:var str directory: hidden service directory servicing the request
:var str directory_fingerprint: hidden service directory's finterprint
:var str directory_nickname: hidden service directory's nickname if it was provided
:var str descriptor_id: descriptor identifier
:var stem.HSDescReason reason: reason the descriptor failed to be fetched
:var int replica: replica number the descriptor involves
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC
_POSITIONAL_ARGS = ('action', 'address', 'authentication', 'directory', 'descriptor_id')
_KEYWORD_ARGS = {'REASON': 'reason', 'REPLICA': 'replica'}
def _parse(self):
self.directory_fingerprint = None
self.directory_nickname = None
if self.directory != 'UNKNOWN':
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self)
if self.replica is not None:
if not self.replica.isdigit():
raise stem.ProtocolError('HS_DESC event got a non-numeric replica count (%s): %s' % (self.replica, self))
self.replica = int(self.replica)
self._log_if_unrecognized('action', stem.HSDescAction)
self._log_if_unrecognized('authentication', stem.HSAuth)
class HSDescContentEvent(Event):
"""
Provides the content of hidden service descriptors we fetch.
The HS_DESC_CONTENT event was introduced in tor version 0.2.7.1-alpha.
.. versionadded:: 1.4.0
:var str address: hidden service address
:var str descriptor_id: descriptor identifier
:var str directory: hidden service directory servicing the request
:var str directory_fingerprint: hidden service directory's finterprint
:var str directory_nickname: hidden service directory's nickname if it was provided
:var stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor descriptor: descriptor that was retrieved
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC_CONTENT
_POSITIONAL_ARGS = ('address', 'descriptor_id', 'directory')
def _parse(self):
if self.address == 'UNKNOWN':
self.address = None
self.directory_fingerprint = None
self.directory_nickname = None
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC_CONTENT's directory doesn't match a ServerSpec: %s" % self)
# skip the first line (our positional arguments) and last ('OK')
desc_content = str_tools._to_bytes('\n'.join(str(self).splitlines()[1:-1]))
self.descriptor = None
if desc_content:
self.descriptor = list(stem.descriptor.hidden_service_descriptor._parse_file(io.BytesIO(desc_content)))[0]
class LogEvent(Event):
"""
Tor logging event. These are the most visible kind of event since, by
default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
The logging events were some of the first Control Protocol V1 events
and were introduced in tor version 0.1.1.1-alpha.
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
"""
_SKIP_PARSING = True
def _parse(self):
self.runlevel = self.type
self._log_if_unrecognized('runlevel', stem.Runlevel)
# message is our content, minus the runlevel and ending "OK" if a
# multi-line message
self.message = str(self)[len(self.runlevel) + 1:].rstrip('\nOK')
class NetworkStatusEvent(Event):
"""
Event for when our copy of the consensus has changed. This was introduced in
tor version 0.1.2.3.
The NS event was introduced in tor version 0.1.2.3-alpha.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NS
def _parse(self):
content = str(self).lstrip('NS\n').rstrip('\nOK')
# TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match
# our other events.
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
False,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NetworkLivenessEvent(Event):
"""
Event for when the network becomes reachable or unreachable.
The NETWORK_LIVENESS event was introduced in tor version 0.2.7.2-alpha.
.. versionadded:: 1.5.0
:var str status: status of the network ('UP', 'DOWN', or possibly other
statuses in the future)
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_NETWORK_LIVENESS
_POSITIONAL_ARGS = ('status',)
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
:class:`~stem.response.events.NetworkStatusEvent`, except that it contains
the whole consensus so anything not listed is implicitly no longer
recommended.
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
.. versionchanged:: 1.6.0
Added the consensus_content attribute.
.. deprecated:: 1.6.0
In Stem 2.0 we'll remove the desc attribute, so this event only provides
the unparsed consensus. Callers can then parse it if they'd like. To drop
parsing before then you can set...
::
stem.response.events.PARSE_NEWCONSENSUS_EVENTS = False
:var str consensus_content: consensus content
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS
def _parse(self):
self.consensus_content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK')
# TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match
# our other events.
if PARSE_NEWCONSENSUS_EVENTS:
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(self.consensus_content)),
False,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
else:
self.desc = None
class NewDescEvent(Event):
"""
Event that indicates that a new descriptor is available.
The fingerprint or nickname values in our 'relays' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The NEWDESC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var tuple relays: **(fingerprint, nickname)** tuples for the relays with
new descriptors
"""
def _parse(self):
self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
class ORConnEvent(Event):
"""
Event that indicates a change in a relay connection. The 'endpoint' could be
any of several things including a...
* fingerprint
* nickname
* 'fingerprint=nickname' pair
* address:port
The derived 'endpoint_*' attributes are generally more useful.
The ORCONN event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha. Its id attribute was added in
version 0.2.5.2-alpha.
.. versionchanged:: 1.2.0
Added the id attribute.
:var str id: connection identifier
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint if it was provided
:var str endpoint_nickname: endpoint's nickname if it was provided
:var str endpoint_address: endpoint's address if it was provided
:var int endpoint_port: endpoint's port if it was provided
:var stem.ORStatus status: state of the connection
:var stem.ORClosureReason reason: reason for the connection to be closed
:var int circ_count: number of established and pending circuits
"""
_POSITIONAL_ARGS = ('endpoint', 'status')
_KEYWORD_ARGS = {
'REASON': 'reason',
'NCIRCS': 'circ_count',
'ID': 'id',
}
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
self.endpoint_address = None
self.endpoint_port = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
if ':' not in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.rsplit(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
self.endpoint_address = address
self.endpoint_port = int(port)
if self.circ_count is not None:
if not self.circ_count.isdigit():
raise stem.ProtocolError('ORCONN event got a non-numeric circuit count (%s): %s' % (self.circ_count, self))
self.circ_count = int(self.circ_count)
if self.id and not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.ORStatus)
self._log_if_unrecognized('reason', stem.ORClosureReason)
class SignalEvent(Event):
"""
Event that indicates that tor has received and acted upon a signal being sent
to the process. As of tor version 0.2.4.6 the only signals conveyed by this
event are...
* RELOAD
* DUMP
* DEBUG
* NEWNYM
* CLEARDNSCACHE
The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
:var stem.Signal signal: signal that tor received
"""
_POSITIONAL_ARGS = ('signal',)
_VERSION_ADDED = stem.version.Requirement.EVENT_SIGNAL
def _parse(self):
# log if we recieved an unrecognized signal
expected_signals = (
stem.Signal.RELOAD,
stem.Signal.DUMP,
stem.Signal.DEBUG,
stem.Signal.NEWNYM,
stem.Signal.CLEARDNSCACHE,
)
self._log_if_unrecognized('signal', expected_signals)
class StatusEvent(Event):
"""
Notification of a change in tor's state. These are generally triggered for
the same sort of things as log messages of the NOTICE level or higher.
However, unlike :class:`~stem.response.events.LogEvent` these contain well
formed data.
The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
in tor version 0.1.2.3-alpha.
:var stem.StatusType status_type: category of the status event
:var stem.Runlevel runlevel: runlevel of the logged message
:var str action: activity that caused this message
:var dict arguments: attributes about the event
"""
_POSITIONAL_ARGS = ('runlevel', 'action')
_VERSION_ADDED = stem.version.Requirement.EVENT_STATUS
def _parse(self):
if self.type == 'STATUS_GENERAL':
self.status_type = stem.StatusType.GENERAL
elif self.type == 'STATUS_CLIENT':
self.status_type = stem.StatusType.CLIENT
elif self.type == 'STATUS_SERVER':
self.status_type = stem.StatusType.SERVER
else:
raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
# Just an alias for our parent class' keyword_args since that already
# parses these for us. Unlike our other event types Tor commonly supplies
# arbitrary key/value pairs for these, so making an alias here to better
# draw attention that the StatusEvent will likely have them.
self.arguments = self.keyword_args
self._log_if_unrecognized('runlevel', stem.Runlevel)
class StreamEvent(Event):
"""
Event that indicates that a stream has changed.
The STREAM event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str id: stream identifier
:var stem.StreamStatus status: reported status for the stream
:var str circ_id: circuit that the stream is attached to, this is **None** of
the stream is unattached
:var str target: destination of the stream
:var str target_address: destination address (ip, hostname, or '(Tor_internal)')
:var int target_port: destination port
:var stem.StreamClosureReason reason: reason for the stream to be closed
:var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed
:var stem.StreamSource source: origin of the REMAP request
:var str source_addr: requester of the connection
:var str source_address: requester address (ip or hostname)
:var int source_port: requester port
:var stem.StreamPurpose purpose: purpose for the stream
"""
_POSITIONAL_ARGS = ('id', 'status', 'circ_id', 'target')
_KEYWORD_ARGS = {
'REASON': 'reason',
'REMOTE_REASON': 'remote_reason',
'SOURCE': 'source',
'SOURCE_ADDR': 'source_addr',
'PURPOSE': 'purpose',
}
def _parse(self):
if self.target is None:
raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
else:
if ':' not in self.target:
raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
address, port = self.target.rsplit(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Target location's port is invalid: %s" % self)
self.target_address = address
self.target_port = int(port)
if self.source_addr is None:
self.source_address = None
self.source_port = None
else:
if ':' not in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.rsplit(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
self.source_address = address
self.source_port = int(port)
# spec specifies a circ_id of zero if the stream is unattached
if self.circ_id == '0':
self.circ_id = None
self._log_if_unrecognized('reason', stem.StreamClosureReason)
self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
self._log_if_unrecognized('purpose', stem.StreamPurpose)
class StreamBwEvent(Event):
"""
Event (emitted approximately every second) with the bytes sent and received
by the application since the last such event on this stream.
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
.. versionchanged:: 1.6.0
Added the time attribute.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
:var datetime time: time when the measurement was recorded
"""
_POSITIONAL_ARGS = ('id', 'written', 'read', 'time')
_VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW
def _parse(self):
if not tor_tools.is_valid_stream_id(self.id):
raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.written:
raise stem.ProtocolError('STREAM_BW event is missing its written value')
elif not self.read:
raise stem.ProtocolError('STREAM_BW event is missing its read value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
self.time = self._iso_timestamp(self.time)
class TransportLaunchedEvent(Event):
"""
Event triggered when a pluggable transport is launched.
The TRANSPORT_LAUNCHED event was introduced in tor version 0.2.5.0-alpha.
.. versionadded:: 1.1.0
:var str type: 'server' or 'client'
:var str name: name of the pluggable transport
:var str address: IPv4 or IPv6 address where the transport is listening for
connections
:var int port: port where the transport is listening for connections
"""
_POSITIONAL_ARGS = ('type', 'name', 'address', 'port')
_VERSION_ADDED = stem.version.Requirement.EVENT_TRANSPORT_LAUNCHED
def _parse(self):
if self.type not in ('server', 'client'):
raise stem.ProtocolError("Transport type should either be 'server' or 'client': %s" % self)
if not connection.is_valid_ipv4_address(self.address) and \
not connection.is_valid_ipv6_address(self.address):
raise stem.ProtocolError("Transport address isn't a valid IPv4 or IPv6 address: %s" % self)
if not connection.is_valid_port(self.port):
raise stem.ProtocolError('Transport port is invalid: %s' % self)
self.port = int(self.port)
class ConnectionBandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor on a
per-connection basis.
The CONN_BW event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Renamed 'type' attribute to 'conn_type' so it wouldn't be override parent
class attribute with the same name.
:var str id: connection identifier
:var stem.ConnectionType conn_type: connection type
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
"""
_KEYWORD_ARGS = {
'ID': 'id',
'TYPE': 'conn_type',
'READ': 'read',
'WRITTEN': 'written',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CONN_BW
def _parse(self):
if not self.id:
raise stem.ProtocolError('CONN_BW event is missing its id')
elif not self.conn_type:
raise stem.ProtocolError('CONN_BW event is missing its connection type')
elif not self.read:
raise stem.ProtocolError('CONN_BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('CONN_BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A CONN_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
elif not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self._log_if_unrecognized('conn_type', stem.ConnectionType)
class CircuitBandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor on a
per-circuit basis.
The CIRC_BW event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Added the time attribute.
:var str id: circuit identifier
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
:var datetime time: time when the measurement was recorded
"""
_KEYWORD_ARGS = {
'ID': 'id',
'READ': 'read',
'WRITTEN': 'written',
'TIME': 'time',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_BW
def _parse(self):
if not self.id:
raise stem.ProtocolError('CIRC_BW event is missing its id')
elif not self.read:
raise stem.ProtocolError('CIRC_BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('CIRC_BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
elif not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self.time = self._iso_timestamp(self.time)
class CellStatsEvent(Event):
"""
Event emitted every second with a count of the number of cells types broken
down by the circuit. **These events are only emitted if TestingTorNetwork is
set.**
The CELL_STATS event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
:var str id: circuit identifier
:var str inbound_queue: inbound queue identifier
:var str inbound_connection: inbound connection identifier
:var dict inbound_added: mapping of added inbound cell types to their count
:var dict inbound_removed: mapping of removed inbound cell types to their count
:var dict inbound_time: mapping of inbound cell types to the time they took to write in milliseconds
:var str outbound_queue: outbound queue identifier
:var str outbound_connection: outbound connection identifier
:var dict outbound_added: mapping of added outbound cell types to their count
:var dict outbound_removed: mapping of removed outbound cell types to their count
:var dict outbound_time: mapping of outbound cell types to the time they took to write in milliseconds
"""
_KEYWORD_ARGS = {
'ID': 'id',
'InboundQueue': 'inbound_queue',
'InboundConn': 'inbound_connection',
'InboundAdded': 'inbound_added',
'InboundRemoved': 'inbound_removed',
'InboundTime': 'inbound_time',
'OutboundQueue': 'outbound_queue',
'OutboundConn': 'outbound_connection',
'OutboundAdded': 'outbound_added',
'OutboundRemoved': 'outbound_removed',
'OutboundTime': 'outbound_time',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CELL_STATS
def _parse(self):
if self.id and not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif self.inbound_queue and not tor_tools.is_valid_circuit_id(self.inbound_queue):
raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_queue, self))
elif self.inbound_connection and not tor_tools.is_valid_connection_id(self.inbound_connection):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_connection, self))
elif self.outbound_queue and not tor_tools.is_valid_circuit_id(self.outbound_queue):
raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_queue, self))
elif self.outbound_connection and not tor_tools.is_valid_connection_id(self.outbound_connection):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_connection, self))
self.inbound_added = _parse_cell_type_mapping(self.inbound_added)
self.inbound_removed = _parse_cell_type_mapping(self.inbound_removed)
self.inbound_time = _parse_cell_type_mapping(self.inbound_time)
self.outbound_added = _parse_cell_type_mapping(self.outbound_added)
self.outbound_removed = _parse_cell_type_mapping(self.outbound_removed)
self.outbound_time = _parse_cell_type_mapping(self.outbound_time)
class TokenBucketEmptyEvent(Event):
"""
Event emitted when refilling an empty token bucket. **These events are only
emitted if TestingTorNetwork is set.**
The TB_EMPTY event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
:var stem.TokenBucket bucket: bucket being refilled
:var str id: connection identifier
:var int read: time in milliseconds since the read bucket was last refilled
:var int written: time in milliseconds since the write bucket was last refilled
:var int last_refill: time in milliseconds the bucket has been empty since last refilled
"""
_POSITIONAL_ARGS = ('bucket',)
_KEYWORD_ARGS = {
'ID': 'id',
'READ': 'read',
'WRITTEN': 'written',
'LAST': 'last_refill',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_TB_EMPTY
def _parse(self):
if self.id and not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.read.isdigit():
raise stem.ProtocolError("A TB_EMPTY's READ value should be a positive numeric value, received: %s" % self)
elif not self.written.isdigit():
raise stem.ProtocolError("A TB_EMPTY's WRITTEN value should be a positive numeric value, received: %s" % self)
elif not self.last_refill.isdigit():
raise stem.ProtocolError("A TB_EMPTY's LAST value should be a positive numeric value, received: %s" % self)
self.read = int(self.read)
self.written = int(self.written)
self.last_refill = int(self.last_refill)
self._log_if_unrecognized('bucket', stem.TokenBucket)
def _parse_cell_type_mapping(mapping):
"""
Parses a mapping of the form...
key1:value1,key2:value2...
... in which keys are strings and values are integers.
:param str mapping: value to be parsed
:returns: dict of **str => int** mappings
:rasies: **stem.ProtocolError** if unable to parse the mapping
"""
if mapping is None:
return None
results = {}
for entry in mapping.split(','):
if ':' not in entry:
raise stem.ProtocolError("Mappings are expected to be of the form 'key:value', got '%s': %s" % (entry, mapping))
key, value = entry.rsplit(':', 1)
if not CELL_TYPE.match(key):
raise stem.ProtocolError("Key had invalid characters, got '%s': %s" % (key, mapping))
elif not value.isdigit():
raise stem.ProtocolError("Values should just be integers, got '%s': %s" % (value, mapping))
results[key] = int(value)
return results
EVENT_TYPE_TO_CLASS = {
'ADDRMAP': AddrMapEvent,
'AUTHDIR_NEWDESCS': AuthDirNewDescEvent,
'BUILDTIMEOUT_SET': BuildTimeoutSetEvent,
'BW': BandwidthEvent,
'CELL_STATS': CellStatsEvent,
'CIRC': CircuitEvent,
'CIRC_BW': CircuitBandwidthEvent,
'CIRC_MINOR': CircMinorEvent,
'CLIENTS_SEEN': ClientsSeenEvent,
'CONF_CHANGED': ConfChangedEvent,
'CONN_BW': ConnectionBandwidthEvent,
'DEBUG': LogEvent,
'DESCCHANGED': DescChangedEvent,
'ERR': LogEvent,
'GUARD': GuardEvent,
'HS_DESC': HSDescEvent,
'HS_DESC_CONTENT': HSDescContentEvent,
'INFO': LogEvent,
'NETWORK_LIVENESS': NetworkLivenessEvent,
'NEWCONSENSUS': NewConsensusEvent,
'NEWDESC': NewDescEvent,
'NOTICE': LogEvent,
'NS': NetworkStatusEvent,
'ORCONN': ORConnEvent,
'SIGNAL': SignalEvent,
'STATUS_CLIENT': StatusEvent,
'STATUS_GENERAL': StatusEvent,
'STATUS_SERVER': StatusEvent,
'STREAM': StreamEvent,
'STREAM_BW': StreamBwEvent,
'TB_EMPTY': TokenBucketEmptyEvent,
'TRANSPORT_LAUNCHED': TransportLaunchedEvent,
'WARN': LogEvent,
# accounting for a bug in tor 0.2.0.22
'STATUS_SEVER': StatusEvent,
}
| [
"stem.util.str_tools._parse_timestamp",
"stem.util.tor_tools.is_valid_connection_id",
"re.compile",
"stem.util.connection.is_valid_port",
"stem.control._parse_circ_entry",
"stem.util.connection.is_valid_ipv6_address",
"stem.util.log.log_once",
"stem.util.str_tools._to_bytes",
"stem.util.tor_tools.is_valid_stream_id",
"stem.util.connection.is_valid_ipv4_address",
"io.BytesIO",
"stem.util.int_type",
"stem.util.tor_tools.is_valid_circuit_id",
"stem.version.Version",
"stem.ProtocolError",
"time.time",
"stem.control._parse_circ_path",
"stem.util.str_tools._parse_iso_timestamp"
] | [((493, 536), 're.compile', 're.compile', (['"""^(.*) ([A-Za-z0-9_]+)=(\\\\S*)$"""'], {}), "('^(.*) ([A-Za-z0-9_]+)=(\\\\S*)$')\n", (503, 536), False, 'import re\n'), ((552, 595), 're.compile', 're.compile', (['"""^(.*) ([A-Za-z0-9_]+)="(.*)"$"""'], {}), '(\'^(.*) ([A-Za-z0-9_]+)="(.*)"$\')\n', (562, 595), False, 'import re\n'), ((608, 634), 're.compile', 're.compile', (['"""^[a-z0-9_]+$"""'], {}), "('^[a-z0-9_]+$')\n", (618, 634), False, 'import re\n'), ((1535, 1572), 'stem.version.Version', 'stem.version.Version', (['"""0.1.1.1-alpha"""'], {}), "('0.1.1.1-alpha')\n", (1555, 1572), False, 'import stem\n'), ((9642, 9661), 'stem.util.int_type', 'int_type', (['self.read'], {}), '(self.read)\n', (9650, 9661), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((9681, 9703), 'stem.util.int_type', 'int_type', (['self.written'], {}), '(self.written)\n', (9689, 9703), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((38358, 38377), 'stem.util.int_type', 'int_type', (['self.read'], {}), '(self.read)\n', (38366, 38377), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((38397, 38419), 'stem.util.int_type', 'int_type', (['self.written'], {}), '(self.written)\n', (38405, 38419), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((41172, 41191), 'stem.util.int_type', 'int_type', (['self.read'], {}), '(self.read)\n', (41180, 41191), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((41211, 41233), 'stem.util.int_type', 'int_type', (['self.written'], {}), '(self.written)\n', (41219, 41233), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((42662, 42681), 'stem.util.int_type', 'int_type', (['self.read'], {}), '(self.read)\n', (42670, 42681), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((42701, 42723), 'stem.util.int_type', 'int_type', (['self.written'], {}), '(self.written)\n', (42709, 42723), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((1781, 1878), 'stem.ProtocolError', 'stem.ProtocolError', (['"""Received a blank tor event. Events must at the very least have a type."""'], {}), "(\n 'Received a blank tor event. Events must at the very least have a type.')\n", (1799, 1878), False, 'import stem\n'), ((4646, 4687), 'stem.util.str_tools._parse_iso_timestamp', 'str_tools._parse_iso_timestamp', (['timestamp'], {}), '(timestamp)\n', (4676, 4687), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((7230, 7283), 'stem.util.str_tools._parse_timestamp', 'stem.util.str_tools._parse_timestamp', (['self.utc_expiry'], {}), '(self.utc_expiry)\n', (7266, 7283), False, 'import stem\n'), ((8399, 8546), 'stem.ProtocolError', 'stem.ProtocolError', (['"""AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating \'OK\'"""'], {}), '(\n "AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating \'OK\'"\n )\n', (8417, 8546), False, 'import stem\n'), ((9278, 9334), 'stem.ProtocolError', 'stem.ProtocolError', (['"""BW event is missing its read value"""'], {}), "('BW event is missing its read value')\n", (9296, 9334), False, 'import stem\n'), ((13639, 13679), 'stem.control._parse_circ_path', 'stem.control._parse_circ_path', (['self.path'], {}), '(self.path)\n', (13668, 13679), False, 'import stem\n'), ((13844, 13882), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.id'], {}), '(self.id)\n', (13873, 13882), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((13896, 14018), 'stem.ProtocolError', 'stem.ProtocolError', (['("Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))'], {}), '(\n "Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (13914, 14018), False, 'import stem\n'), ((17034, 17074), 'stem.control._parse_circ_path', 'stem.control._parse_circ_path', (['self.path'], {}), '(self.path)\n', (17063, 17074), False, 'import stem\n'), ((17239, 17277), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.id'], {}), '(self.id)\n', (17268, 17277), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((17291, 17413), 'stem.ProtocolError', 'stem.ProtocolError', (['("Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))'], {}), '(\n "Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (17309, 17413), False, 'import stem\n'), ((18503, 18556), 'stem.util.str_tools._parse_timestamp', 'stem.util.str_tools._parse_timestamp', (['self.start_time'], {}), '(self.start_time)\n', (18539, 18556), False, 'import stem\n'), ((21949, 21994), 'stem.control._parse_circ_entry', 'stem.control._parse_circ_entry', (['self.endpoint'], {}), '(self.endpoint)\n', (21979, 21994), False, 'import stem\n'), ((25138, 25184), 'stem.control._parse_circ_entry', 'stem.control._parse_circ_entry', (['self.directory'], {}), '(self.directory)\n', (25168, 25184), False, 'import stem\n'), ((31109, 31154), 'stem.control._parse_circ_entry', 'stem.control._parse_circ_entry', (['self.endpoint'], {}), '(self.endpoint)\n', (31139, 31154), False, 'import stem\n'), ((31906, 32031), 'stem.ProtocolError', 'stem.ProtocolError', (['("Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))'], {}), '(\n "Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (31924, 32031), False, 'import stem\n'), ((35802, 35868), 'stem.ProtocolError', 'stem.ProtocolError', (['("STREAM event didn\'t have a target: %s" % self)'], {}), '("STREAM event didn\'t have a target: %s" % self)\n', (35820, 35868), False, 'import stem\n'), ((37774, 37811), 'stem.util.tor_tools.is_valid_stream_id', 'tor_tools.is_valid_stream_id', (['self.id'], {}), '(self.id)\n', (37802, 37811), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((37825, 37945), 'stem.ProtocolError', 'stem.ProtocolError', (['("Stream IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))'], {}), '(\n "Stream IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))\n', (37843, 37945), False, 'import stem\n'), ((39139, 39229), 'stem.ProtocolError', 'stem.ProtocolError', (['("Transport type should either be \'server\' or \'client\': %s" % self)'], {}), '(\n "Transport type should either be \'server\' or \'client\': %s" % self)\n', (39157, 39229), False, 'import stem\n'), ((39361, 39451), 'stem.ProtocolError', 'stem.ProtocolError', (['("Transport address isn\'t a valid IPv4 or IPv6 address: %s" % self)'], {}), '(\n "Transport address isn\'t a valid IPv4 or IPv6 address: %s" % self)\n', (39379, 39451), False, 'import stem\n'), ((39459, 39494), 'stem.util.connection.is_valid_port', 'connection.is_valid_port', (['self.port'], {}), '(self.port)\n', (39483, 39494), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((39508, 39566), 'stem.ProtocolError', 'stem.ProtocolError', (["('Transport port is invalid: %s' % self)"], {}), "('Transport port is invalid: %s' % self)\n", (39526, 39566), False, 'import stem\n'), ((40411, 40464), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CONN_BW event is missing its id"""'], {}), "('CONN_BW event is missing its id')\n", (40429, 40464), False, 'import stem\n'), ((42015, 42068), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CIRC_BW event is missing its id"""'], {}), "('CIRC_BW event is missing its id')\n", (42033, 42068), False, 'import stem\n'), ((44462, 44584), 'stem.ProtocolError', 'stem.ProtocolError', (['("Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))'], {}), '(\n "Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (44480, 44584), False, 'import stem\n'), ((46854, 46979), 'stem.ProtocolError', 'stem.ProtocolError', (['("Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))'], {}), '(\n "Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (46872, 46979), False, 'import stem\n'), ((48058, 48174), 'stem.ProtocolError', 'stem.ProtocolError', (['("Mappings are expected to be of the form \'key:value\', got \'%s\': %s" % (\n entry, mapping))'], {}), '(\n "Mappings are expected to be of the form \'key:value\', got \'%s\': %s" % (\n entry, mapping))\n', (48076, 48174), False, 'import stem\n'), ((48250, 48329), 'stem.ProtocolError', 'stem.ProtocolError', (['("Key had invalid characters, got \'%s\': %s" % (key, mapping))'], {}), '("Key had invalid characters, got \'%s\': %s" % (key, mapping))\n', (48268, 48329), False, 'import stem\n'), ((1725, 1736), 'time.time', 'time.time', ([], {}), '()\n', (1734, 1736), False, 'import time\n'), ((4730, 4800), 'stem.ProtocolError', 'stem.ProtocolError', (["('Unable to parse timestamp (%s): %s' % (exc, self))"], {}), "('Unable to parse timestamp (%s): %s' % (exc, self))\n", (4748, 4800), False, 'import stem\n'), ((8577, 8640), 'stem.ProtocolError', 'stem.ProtocolError', (['"""AUTHDIR_NEWDESCS doesn\'t end with an \'OK\'"""'], {}), '("AUTHDIR_NEWDESCS doesn\'t end with an \'OK\'")\n', (8595, 8640), False, 'import stem\n'), ((9374, 9433), 'stem.ProtocolError', 'stem.ProtocolError', (['"""BW event is missing its written value"""'], {}), "('BW event is missing its written value')\n", (9392, 9433), False, 'import stem\n'), ((22038, 22114), 'stem.ProtocolError', 'stem.ProtocolError', (['("GUARD\'s endpoint doesn\'t match a ServerSpec: %s" % self)'], {}), '("GUARD\'s endpoint doesn\'t match a ServerSpec: %s" % self)\n', (22056, 22114), False, 'import stem\n'), ((23607, 23653), 'stem.control._parse_circ_entry', 'stem.control._parse_circ_entry', (['self.directory'], {}), '(self.directory)\n', (23637, 23653), False, 'import stem\n'), ((23866, 23969), 'stem.ProtocolError', 'stem.ProtocolError', (["('HS_DESC event got a non-numeric replica count (%s): %s' % (self.replica,\n self))"], {}), "('HS_DESC event got a non-numeric replica count (%s): %s' %\n (self.replica, self))\n", (23884, 23969), False, 'import stem\n'), ((25228, 25320), 'stem.ProtocolError', 'stem.ProtocolError', (['("HS_DESC_CONTENT\'s directory doesn\'t match a ServerSpec: %s" % self)'], {}), '(\n "HS_DESC_CONTENT\'s directory doesn\'t match a ServerSpec: %s" % self)\n', (25246, 25320), False, 'import stem\n'), ((29574, 29611), 'stem.control._parse_circ_entry', 'stem.control._parse_circ_entry', (['entry'], {}), '(entry)\n', (29604, 29611), False, 'import stem\n'), ((31679, 31784), 'stem.ProtocolError', 'stem.ProtocolError', (["('ORCONN event got a non-numeric circuit count (%s): %s' % (self.circ_count,\n self))"], {}), "('ORCONN event got a non-numeric circuit count (%s): %s' %\n (self.circ_count, self))\n", (31697, 31784), False, 'import stem\n'), ((31851, 31892), 'stem.util.tor_tools.is_valid_connection_id', 'tor_tools.is_valid_connection_id', (['self.id'], {}), '(self.id)\n', (31883, 31892), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((35926, 36013), 'stem.ProtocolError', 'stem.ProtocolError', (['("Target location must be of the form \'address:port\': %s" % self)'], {}), '("Target location must be of the form \'address:port\': %s" %\n self)\n', (35944, 36013), False, 'import stem\n'), ((36074, 36121), 'stem.util.connection.is_valid_port', 'connection.is_valid_port', (['port'], {'allow_zero': '(True)'}), '(port, allow_zero=True)\n', (36098, 36121), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((36139, 36205), 'stem.ProtocolError', 'stem.ProtocolError', (['("Target location\'s port is invalid: %s" % self)'], {}), '("Target location\'s port is invalid: %s" % self)\n', (36157, 36205), False, 'import stem\n'), ((36437, 36524), 'stem.ProtocolError', 'stem.ProtocolError', (['("Source location must be of the form \'address:port\': %s" % self)'], {}), '("Source location must be of the form \'address:port\': %s" %\n self)\n', (36455, 36524), False, 'import stem\n'), ((36590, 36637), 'stem.util.connection.is_valid_port', 'connection.is_valid_port', (['port'], {'allow_zero': '(True)'}), '(port, allow_zero=True)\n', (36614, 36637), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((36655, 36721), 'stem.ProtocolError', 'stem.ProtocolError', (['("Source location\'s port is invalid: %s" % self)'], {}), '("Source location\'s port is invalid: %s" % self)\n', (36673, 36721), False, 'import stem\n'), ((37976, 38042), 'stem.ProtocolError', 'stem.ProtocolError', (['"""STREAM_BW event is missing its written value"""'], {}), "('STREAM_BW event is missing its written value')\n", (37994, 38042), False, 'import stem\n'), ((39237, 39283), 'stem.util.connection.is_valid_ipv4_address', 'connection.is_valid_ipv4_address', (['self.address'], {}), '(self.address)\n', (39269, 39283), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((39301, 39347), 'stem.util.connection.is_valid_ipv6_address', 'connection.is_valid_ipv6_address', (['self.address'], {}), '(self.address)\n', (39333, 39347), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((40506, 40572), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CONN_BW event is missing its connection type"""'], {}), "('CONN_BW event is missing its connection type')\n", (40524, 40572), False, 'import stem\n'), ((42105, 42166), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CIRC_BW event is missing its read value"""'], {}), "('CIRC_BW event is missing its read value')\n", (42123, 42166), False, 'import stem\n'), ((44410, 44448), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.id'], {}), '(self.id)\n', (44439, 44448), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((44674, 44804), 'stem.ProtocolError', 'stem.ProtocolError', (['("Queue IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.inbound_queue, self))'], {}), '(\n "Queue IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.inbound_queue, self))\n', (44692, 44804), False, 'import stem\n'), ((46799, 46840), 'stem.util.tor_tools.is_valid_connection_id', 'tor_tools.is_valid_connection_id', (['self.id'], {}), '(self.id)\n', (46831, 46840), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((47016, 47127), 'stem.ProtocolError', 'stem.ProtocolError', (['("A TB_EMPTY\'s READ value should be a positive numeric value, received: %s" %\n self)'], {}), '(\n "A TB_EMPTY\'s READ value should be a positive numeric value, received: %s"\n % self)\n', (47034, 47127), False, 'import stem\n'), ((48372, 48461), 'stem.ProtocolError', 'stem.ProtocolError', (['("Values should just be integers, got \'%s\': %s" % (value, mapping))'], {}), '("Values should just be integers, got \'%s\': %s" % (value,\n mapping))\n', (48390, 48461), False, 'import stem\n'), ((5720, 5768), 'stem.util.log.log_once', 'log.log_once', (['log_id', 'log.INFO', 'unrecognized_msg'], {}), '(log_id, log.INFO, unrecognized_msg)\n', (5732, 5768), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((7005, 7054), 'stem.util.str_tools._parse_timestamp', 'stem.util.str_tools._parse_timestamp', (['self.expiry'], {}), '(self.expiry)\n', (7041, 7054), False, 'import stem\n'), ((7461, 7558), 'stem.ProtocolError', 'stem.ProtocolError', (['("An ADDRMAP event\'s CACHED mapping can only be \'YES\' or \'NO\': %s" % self)'], {}), '(\n "An ADDRMAP event\'s CACHED mapping can only be \'YES\' or \'NO\': %s" % self)\n', (7479, 7558), False, 'import stem\n'), ((9510, 9634), 'stem.ProtocolError', 'stem.ProtocolError', (['("A BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)'], {}), '(\n "A BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)\n', (9528, 9634), False, 'import stem\n'), ((18708, 18851), 'stem.ProtocolError', 'stem.ProtocolError', (['("The CLIENTS_SEEN\'s CountrySummary should be a comma separated listing of \'<locale>=<count>\' mappings: %s"\n % self)'], {}), '(\n "The CLIENTS_SEEN\'s CountrySummary should be a comma separated listing of \'<locale>=<count>\' mappings: %s"\n % self)\n', (18726, 18851), False, 'import stem\n'), ((18933, 19028), 'stem.ProtocolError', 'stem.ProtocolError', (['("Locales should be a two character code, got \'%s\': %s" % (locale, self))'], {}), '("Locales should be a two character code, got \'%s\': %s" %\n (locale, self))\n', (18951, 19028), False, 'import stem\n'), ((19541, 19682), 'stem.ProtocolError', 'stem.ProtocolError', (['("The CLIENTS_SEEN\'s IPVersions should be a comma separated listing of \'<protocol>=<count>\' mappings: %s"\n % self)'], {}), '(\n "The CLIENTS_SEEN\'s IPVersions should be a comma separated listing of \'<protocol>=<count>\' mappings: %s"\n % self)\n', (19559, 19682), False, 'import stem\n'), ((19769, 19854), 'stem.ProtocolError', 'stem.ProtocolError', (["('IP protocol count was non-numeric (%s): %s' % (count, self))"], {}), "('IP protocol count was non-numeric (%s): %s' % (count, self)\n )\n", (19787, 19854), False, 'import stem\n'), ((23701, 23780), 'stem.ProtocolError', 'stem.ProtocolError', (['("HS_DESC\'s directory doesn\'t match a ServerSpec: %s" % self)'], {}), '("HS_DESC\'s directory doesn\'t match a ServerSpec: %s" % self)\n', (23719, 23780), False, 'import stem\n'), ((26992, 27020), 'stem.util.str_tools._to_bytes', 'str_tools._to_bytes', (['content'], {}), '(content)\n', (27011, 27020), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((31235, 31326), 'stem.ProtocolError', 'stem.ProtocolError', (['("ORCONN endpoint is neither a relay nor \'address:port\': %s" % self)'], {}), '(\n "ORCONN endpoint is neither a relay nor \'address:port\': %s" % self)\n', (31253, 31326), False, 'import stem\n'), ((31388, 31418), 'stem.util.connection.is_valid_port', 'connection.is_valid_port', (['port'], {}), '(port)\n', (31412, 31418), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((31434, 31511), 'stem.ProtocolError', 'stem.ProtocolError', (['("ORCONN\'s endpoint location\'s port is invalid: %s" % self)'], {}), '("ORCONN\'s endpoint location\'s port is invalid: %s" % self)\n', (31452, 31511), False, 'import stem\n'), ((38079, 38142), 'stem.ProtocolError', 'stem.ProtocolError', (['"""STREAM_BW event is missing its read value"""'], {}), "('STREAM_BW event is missing its read value')\n", (38097, 38142), False, 'import stem\n'), ((40609, 40670), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CONN_BW event is missing its read value"""'], {}), "('CONN_BW event is missing its read value')\n", (40627, 40670), False, 'import stem\n'), ((42206, 42270), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CIRC_BW event is missing its written value"""'], {}), "('CIRC_BW event is missing its written value')\n", (42224, 42270), False, 'import stem\n'), ((44611, 44660), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.inbound_queue'], {}), '(self.inbound_queue)\n', (44640, 44660), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((44908, 45049), 'stem.ProtocolError', 'stem.ProtocolError', (['("Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.inbound_connection, self))'], {}), '(\n "Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.inbound_connection, self))\n', (44926, 45049), False, 'import stem\n'), ((47167, 47281), 'stem.ProtocolError', 'stem.ProtocolError', (['("A TB_EMPTY\'s WRITTEN value should be a positive numeric value, received: %s"\n % self)'], {}), '(\n "A TB_EMPTY\'s WRITTEN value should be a positive numeric value, received: %s"\n % self)\n', (47185, 47281), False, 'import stem\n'), ((3580, 3698), 'stem.ProtocolError', 'stem.ProtocolError', (['("The %s value should be quoted, but didn\'t have a starting quote: %s" % (\n attr_name, self))'], {}), '(\n "The %s value should be quoted, but didn\'t have a starting quote: %s" %\n (attr_name, self))\n', (3598, 3698), False, 'import stem\n'), ((7098, 7168), 'stem.ProtocolError', 'stem.ProtocolError', (["('Unable to parse date in ADDRMAP event: %s' % self)"], {}), "('Unable to parse date in ADDRMAP event: %s' % self)\n", (7116, 7168), False, 'import stem\n'), ((11195, 11290), 'stem.ProtocolError', 'stem.ProtocolError', (["('The %s of a BUILDTIMEOUT_SET should be an integer: %s' % (param, self))"], {}), "('The %s of a BUILDTIMEOUT_SET should be an integer: %s' %\n (param, self))\n", (11213, 11290), False, 'import stem\n'), ((11541, 11634), 'stem.ProtocolError', 'stem.ProtocolError', (["('The %s of a BUILDTIMEOUT_SET should be a float: %s' % (param, self))"], {}), "('The %s of a BUILDTIMEOUT_SET should be a float: %s' % (\n param, self))\n", (11559, 11634), False, 'import stem\n'), ((19075, 19150), 'stem.ProtocolError', 'stem.ProtocolError', (["('Locale count was non-numeric (%s): %s' % (count, self))"], {}), "('Locale count was non-numeric (%s): %s' % (count, self))\n", (19093, 19150), False, 'import stem\n'), ((25599, 25623), 'io.BytesIO', 'io.BytesIO', (['desc_content'], {}), '(desc_content)\n', (25609, 25623), False, 'import io\n'), ((28822, 28865), 'stem.util.str_tools._to_bytes', 'str_tools._to_bytes', (['self.consensus_content'], {}), '(self.consensus_content)\n', (28841, 28865), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((38219, 38350), 'stem.ProtocolError', 'stem.ProtocolError', (['("A STREAM_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)'], {}), '(\n "A STREAM_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)\n', (38237, 38350), False, 'import stem\n'), ((40710, 40774), 'stem.ProtocolError', 'stem.ProtocolError', (['"""CONN_BW event is missing its written value"""'], {}), "('CONN_BW event is missing its written value')\n", (40728, 40774), False, 'import stem\n'), ((42347, 42476), 'stem.ProtocolError', 'stem.ProtocolError', (['("A CIRC_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)'], {}), '(\n "A CIRC_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)\n', (42365, 42476), False, 'import stem\n'), ((44837, 44894), 'stem.util.tor_tools.is_valid_connection_id', 'tor_tools.is_valid_connection_id', (['self.inbound_connection'], {}), '(self.inbound_connection)\n', (44869, 44894), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((45141, 45272), 'stem.ProtocolError', 'stem.ProtocolError', (['("Queue IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.outbound_queue, self))'], {}), '(\n "Queue IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.outbound_queue, self))\n', (45159, 45272), False, 'import stem\n'), ((47325, 47436), 'stem.ProtocolError', 'stem.ProtocolError', (['("A TB_EMPTY\'s LAST value should be a positive numeric value, received: %s" %\n self)'], {}), '(\n "A TB_EMPTY\'s LAST value should be a positive numeric value, received: %s"\n % self)\n', (47343, 47436), False, 'import stem\n'), ((3764, 3881), 'stem.ProtocolError', 'stem.ProtocolError', (['("The %s value should be quoted, but didn\'t have an ending quote: %s" % (\n attr_name, self))'], {}), '(\n "The %s value should be quoted, but didn\'t have an ending quote: %s" %\n (attr_name, self))\n', (3782, 3881), False, 'import stem\n'), ((19207, 19300), 'stem.ProtocolError', 'stem.ProtocolError', (['("CountrySummary had multiple mappings for \'%s\': %s" % (locale, self))'], {}), '("CountrySummary had multiple mappings for \'%s\': %s" % (\n locale, self))\n', (19225, 19300), False, 'import stem\n'), ((40851, 40980), 'stem.ProtocolError', 'stem.ProtocolError', (['("A CONN_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)'], {}), '(\n "A CONN_BW event\'s bytes sent and received should be a positive numeric value, received: %s"\n % self)\n', (40869, 40980), False, 'import stem\n'), ((42480, 42518), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.id'], {}), '(self.id)\n', (42509, 42518), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((42532, 42654), 'stem.ProtocolError', 'stem.ProtocolError', (['("Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s" %\n (self.id, self))'], {}), '(\n "Circuit IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (42550, 42654), False, 'import stem\n'), ((45077, 45127), 'stem.util.tor_tools.is_valid_circuit_id', 'tor_tools.is_valid_circuit_id', (['self.outbound_queue'], {}), '(self.outbound_queue)\n', (45106, 45127), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((45378, 45520), 'stem.ProtocolError', 'stem.ProtocolError', (['("Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.outbound_connection, self))'], {}), '(\n "Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.outbound_connection, self))\n', (45396, 45520), False, 'import stem\n'), ((40984, 41025), 'stem.util.tor_tools.is_valid_connection_id', 'tor_tools.is_valid_connection_id', (['self.id'], {}), '(self.id)\n', (41016, 41025), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n'), ((41039, 41164), 'stem.ProtocolError', 'stem.ProtocolError', (['("Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))'], {}), '(\n "Connection IDs must be one to sixteen alphanumeric characters, got \'%s\': %s"\n % (self.id, self))\n', (41057, 41164), False, 'import stem\n'), ((45306, 45364), 'stem.util.tor_tools.is_valid_connection_id', 'tor_tools.is_valid_connection_id', (['self.outbound_connection'], {}), '(self.outbound_connection)\n', (45338, 45364), False, 'from stem.util import str_type, int_type, connection, log, str_tools, tor_tools\n')] |
import re
from atlas.conf import settings
from atlas.modules import (
constants as swagger_constants,
exceptions,
mixins,
utils
)
from atlas.modules.resource_data_generator import constants as resource_constants
from atlas.modules.helpers import resource_map
class AutoGenerator(mixins.YAMLReadWriteMixin):
"""
Auto Generate Resource Mapping from Swagger definition.
Auto update Swagger definition and Resource Mapping file
"""
def __init__(self, swagger_file=None):
super().__init__()
self.swagger_file = swagger_file or settings.SWAGGER_FILE
self.specs = self.read_file_from_input(self.swagger_file, {})
self.spec_definitions = self.format_references(self.specs.get(swagger_constants.DEFINITIONS, {}).keys())
self.resource_map_resolver = resource_map.ResourceMapResolver()
self.resource_map_resolver.resolve_resources()
self.resource_keys = self.format_references(self.resource_map_resolver.resource_map.keys())
self.new_resources = set()
# Keep list of refs which are already processed to avoid duplicate processing
self.processed_refs = set()
# For any single operation, maintain a list of parameters
self.resource_params = set()
@staticmethod
def format_references(references) -> set:
"""
Convert all resource keys to uniform format
Uniformity is ensured by:
- Making sure everything is in lower case
- removing _, - from strings
This does mean that abc_d, and ab_cd refers to same keys
However, we estimate that this has lower probability
than users writing Swagger correctly to adhere to correct casing in their references
:return: Set of all Resource Keys
"""
return {"".join([x.lower() for x in re.sub("-", "_", key).split("_")]) for key in references}
def add_resource(self, resource):
if not resource:
return ""
resource = "".join([x.lower() for x in re.sub("-", "_", resource).split("_")])
if resource not in self.resource_keys:
self.new_resources.add(resource)
self.resource_keys.add(resource)
return resource
def add_reference_definition(self, reference, fields):
"""
Add a virtual reference for every resource in Swagger definition
"""
definitions = self.specs.get(swagger_constants.DEFINITIONS, {})
if reference in self.spec_definitions or reference in self.processed_refs:
return # We already have reference with same name, or have processed it earlier, so do nothing
definitions[reference] = {
swagger_constants.TYPE: swagger_constants.OBJECT,
# Without dict initialization, it is copying some auto-generated IDs also.
# When have time, investigate!
swagger_constants.PROPERTIES: {
fields[swagger_constants.PARAMETER_NAME]: dict(fields)
}
}
self.processed_refs.add(reference)
def extract_resource_name_from_param(self, param_name, url_path, param_type=swagger_constants.PATH_PARAM):
"""
Extract Resource Name from parameter name
Names could be either snake case (foo_id) or camelCase (fooId)
In case of URL Params, further they could be foo/id
Return None if no such resource could be found
"""
resource_name = utils.extract_resource_name_from_param(param_name, url_path, param_type)
if not resource_name and param_name in self.resource_keys:
resource_name = param_name
return resource_name
def parse_params(self, params, url):
# Reset the params
self.resource_params = set()
for param in params:
ref = param.get(swagger_constants.REF)
if ref:
param = utils.resolve_reference(self.specs, ref)
param_type = param.get(swagger_constants.IN_)
_name = param.get(swagger_constants.PARAMETER_NAME)
if not param_type:
raise exceptions.ImproperSwaggerException(f"Param type not defined for {_name}")
if param_type in swagger_constants.URL_PARAMS:
# Check if resource is defined
resource = param.get(swagger_constants.RESOURCE)
# Generate resources if none found. Do not generate if empty string
if resource is None:
resource = self.extract_resource_name_from_param(_name, url, param_type)
if resource:
resource = self.add_resource(resource)
resource_alias = self.resource_map_resolver.get_alias(resource)
param[swagger_constants.RESOURCE] = resource_alias
self.add_reference_definition(resource_alias, param)
self.resource_params.add(resource_alias)
elif param_type == swagger_constants.BODY_PARAM:
self.resolve_body_param(param)
return self.resource_params
def resolve_body_param(self, body_config):
schema = body_config.get(swagger_constants.SCHEMA, {})
self.resolve_schema(schema)
def resolve_schema(self, schema):
"""
We can only associate Complete references, and not in-line definitions
"""
ref = schema.get(swagger_constants.REF)
if ref:
self.get_ref_name_and_config(ref)
def resolve_all_of_element(self, ref_name, schema):
# Try resolving any references
self.resolve_schema(schema)
_type = schema.get(swagger_constants.TYPE)
if _type == swagger_constants.OBJECT:
self.parse_reference_properties(ref_name, schema.get(swagger_constants.PROPERTIES, {}))
def get_ref_name_and_config(self, ref):
if not isinstance(ref, str):
print(f"\nWARNING: Only string references supported. Found: {ref}\n")
return
ref_config = utils.resolve_reference(self.specs, ref)
ref_name = ref.split("/")[-1]
self.parse_reference(ref_name, ref_config)
def parse_reference(self, ref_name, ref_config):
if ref_name in self.processed_refs:
return # This has already been processed, so no need to do it again
for element in ref_config.get(swagger_constants.ALL_OF, []):
self.resolve_all_of_element(ref_name, element)
self.parse_reference_properties(ref_name, ref_config.get(swagger_constants.PROPERTIES, {}))
def parse_reference_properties(self, ref_name, properties):
# By adding it to processed list before even processing, we avoid cycles
self.processed_refs.add(ref_name)
for key, value in properties.items():
resource = ""
if key in settings.SWAGGER_REFERENCE_FIELD_RESOURCE_IDENTIFIERS:
resource = value.get(swagger_constants.RESOURCE, utils.convert_to_snake_case(ref_name))
value[swagger_constants.READ_ONLY] = True
elif swagger_constants.REF in value:
self.get_ref_name_and_config(value[swagger_constants.REF])
# Commenting this out, as adding this logic generated lots of false positive resources
# elif key in self.resource_keys:
# resource = key
if resource:
resource = self.add_resource(resource)
resource_alias = self.resource_map_resolver.get_alias(resource)
value[swagger_constants.RESOURCE] = resource_alias
self.resource_params.add(resource_alias)
def parse(self):
for url, path_config in self.specs.get(swagger_constants.PATHS, {}).items():
parameters = path_config.get(swagger_constants.PARAMETERS)
common_resources = set()
if parameters:
common_resources = self.parse_params(parameters, url)
for method, method_config in path_config.items():
method_resources = set()
if method in swagger_constants.VALID_METHODS:
parameters = method_config.get(swagger_constants.PARAMETERS)
# Detect Operation ID in swagger, and if not present, generate and write back in Swagger
# Operation IDs are used as primary key throughout application
op_id = method_config.get(swagger_constants.OPERATION)
if not op_id:
method_config[swagger_constants.OPERATION] = utils.operation_id_name(url, method)
if parameters:
method_resources = self.parse_params(parameters, url)
all_resources = common_resources.union(method_resources)
if len(all_resources) > 1:
method_config[swagger_constants.DEPENDENT_RESOURCES] = all_resources
for ref_name, ref_config in self.specs.get(swagger_constants.DEFINITIONS, {}).items():
self.parse_reference(ref_name, ref_config)
def update(self):
# Update Specs File
self.write_file_to_output(self.swagger_file, self.specs, append_mode=False)
# Update Resource Mapping File
auto_resource = {
resource: {resource_constants.DUMMY_DEF: "# Add your definition here"} for resource in self.new_resources
}
self.write_file_to_input(
settings.MAPPING_FILE, {**self.resource_map_resolver.resource_map, **auto_resource}, append_mode=False
)
| [
"atlas.modules.exceptions.ImproperSwaggerException",
"atlas.modules.utils.extract_resource_name_from_param",
"atlas.modules.utils.operation_id_name",
"atlas.modules.helpers.resource_map.ResourceMapResolver",
"atlas.modules.utils.convert_to_snake_case",
"atlas.modules.utils.resolve_reference",
"re.sub"
] | [((822, 856), 'atlas.modules.helpers.resource_map.ResourceMapResolver', 'resource_map.ResourceMapResolver', ([], {}), '()\n', (854, 856), False, 'from atlas.modules.helpers import resource_map\n'), ((3480, 3552), 'atlas.modules.utils.extract_resource_name_from_param', 'utils.extract_resource_name_from_param', (['param_name', 'url_path', 'param_type'], {}), '(param_name, url_path, param_type)\n', (3518, 3552), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((6065, 6105), 'atlas.modules.utils.resolve_reference', 'utils.resolve_reference', (['self.specs', 'ref'], {}), '(self.specs, ref)\n', (6088, 6105), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((3923, 3963), 'atlas.modules.utils.resolve_reference', 'utils.resolve_reference', (['self.specs', 'ref'], {}), '(self.specs, ref)\n', (3946, 3963), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((4141, 4215), 'atlas.modules.exceptions.ImproperSwaggerException', 'exceptions.ImproperSwaggerException', (['f"""Param type not defined for {_name}"""'], {}), "(f'Param type not defined for {_name}')\n", (4176, 4215), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((7013, 7050), 'atlas.modules.utils.convert_to_snake_case', 'utils.convert_to_snake_case', (['ref_name'], {}), '(ref_name)\n', (7040, 7050), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((8633, 8669), 'atlas.modules.utils.operation_id_name', 'utils.operation_id_name', (['url', 'method'], {}), '(url, method)\n', (8656, 8669), False, 'from atlas.modules import constants as swagger_constants, exceptions, mixins, utils\n'), ((2049, 2075), 're.sub', 're.sub', (['"""-"""', '"""_"""', 'resource'], {}), "('-', '_', resource)\n", (2055, 2075), False, 'import re\n'), ((1856, 1877), 're.sub', 're.sub', (['"""-"""', '"""_"""', 'key'], {}), "('-', '_', key)\n", (1862, 1877), False, 'import re\n')] |
from django.urls import reverse, resolve
def test_calendar():
path = reverse("calendar")
assert path == "/calendar/"
assert resolve(path).view_name == "calendar"
def test_calendar_day():
path = reverse("day", kwargs={"month": 2, "day": 2})
assert path == "/calendar/day/2/2"
assert resolve(path).view_name == "day"
| [
"django.urls.resolve",
"django.urls.reverse"
] | [((75, 94), 'django.urls.reverse', 'reverse', (['"""calendar"""'], {}), "('calendar')\n", (82, 94), False, 'from django.urls import reverse, resolve\n'), ((214, 259), 'django.urls.reverse', 'reverse', (['"""day"""'], {'kwargs': "{'month': 2, 'day': 2}"}), "('day', kwargs={'month': 2, 'day': 2})\n", (221, 259), False, 'from django.urls import reverse, resolve\n'), ((138, 151), 'django.urls.resolve', 'resolve', (['path'], {}), '(path)\n', (145, 151), False, 'from django.urls import reverse, resolve\n'), ((310, 323), 'django.urls.resolve', 'resolve', (['path'], {}), '(path)\n', (317, 323), False, 'from django.urls import reverse, resolve\n')] |
import m3u8
from playlists import *
def test_should_parse_simple_playlist_from_string():
data = m3u8.parse(SIMPLE_PLAYLIST)
assert 5220 == data['targetduration']
assert ['http://media.example.com/entire.ts'] == [c['uri'] for c in data['segments']]
assert [5220] == [c['duration'] for c in data['segments']]
def test_should_parse_non_integer_duration_from_playlist_string():
data = m3u8.parse(PLAYLIST_WITH_NON_INTEGER_DURATION)
assert 5220.5 == data['targetduration']
assert [5220.5] == [c['duration'] for c in data['segments']]
def test_should_parse_simple_playlist_from_string_with_different_linebreaks():
data = m3u8.parse(SIMPLE_PLAYLIST.replace('\n', '\r\n'))
assert 5220 == data['targetduration']
assert ['http://media.example.com/entire.ts'] == [c['uri'] for c in data['segments']]
assert [5220] == [c['duration'] for c in data['segments']]
def test_should_parse_sliding_window_playlist_from_string():
data = m3u8.parse(SLIDING_WINDOW_PLAYLIST)
assert 8 == data['targetduration']
assert 2680 == data['media_sequence']
assert ['https://priv.example.com/fileSequence2680.ts',
'https://priv.example.com/fileSequence2681.ts',
'https://priv.example.com/fileSequence2682.ts'] == [c['uri'] for c in data['segments']]
assert [8, 8, 8] == [c['duration'] for c in data['segments']]
def test_should_parse_playlist_with_encripted_segments_from_string():
data = m3u8.parse(PLAYLIST_WITH_ENCRIPTED_SEGMENTS)
assert 7794 == data['media_sequence']
assert 15 == data['targetduration']
assert 'AES-128' == data['key']['method']
assert 'https://priv.example.com/key.php?r=52' == data['key']['uri']
assert ['http://media.example.com/fileSequence52-1.ts',
'http://media.example.com/fileSequence52-2.ts',
'http://media.example.com/fileSequence52-3.ts'] == [c['uri'] for c in data['segments']]
assert [15, 15, 15] == [c['duration'] for c in data['segments']]
def test_should_load_playlist_with_iv_from_string():
data = m3u8.parse(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
assert "/hls-key/key.bin" == data['key']['uri']
assert "AES-128" == data['key']['method']
assert "0X10ef8f758ca555115584bb5b3c687f52" == data['key']['iv']
def test_should_parse_title_from_playlist():
data = m3u8.parse(SIMPLE_PLAYLIST_WITH_TITLE)
assert 1 == len(data['segments'])
assert 5220 == data['segments'][0]['duration']
assert "A sample title" == data['segments'][0]['title']
assert "http://media.example.com/entire.ts" == data['segments'][0]['uri']
def test_should_parse_variant_playlist():
data = m3u8.parse(VARIANT_PLAYLIST)
playlists = list(data['playlists'])
assert True == data['is_variant']
assert 4 == len(playlists)
assert 'http://example.com/low.m3u8' == playlists[0]['uri']
assert '1' == playlists[0]['stream_info']['program_id']
assert '1280000' == playlists[0]['stream_info']['bandwidth']
assert 'http://example.com/audio-only.m3u8' == playlists[-1]['uri']
assert '1' == playlists[-1]['stream_info']['program_id']
assert '65000' == playlists[-1]['stream_info']['bandwidth']
assert 'mp4a.40.5,avc1.42801e' == playlists[-1]['stream_info']['codecs']
def test_should_parse_endlist_playlist():
data = m3u8.parse(SIMPLE_PLAYLIST)
assert True == data['is_endlist']
data = m3u8.parse(SLIDING_WINDOW_PLAYLIST)
assert False == data['is_endlist']
def test_should_parse_ALLOW_CACHE():
data = m3u8.parse(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
assert 'no' == data['allow_cache']
def test_should_parse_VERSION():
data = m3u8.parse(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)
assert '2' == data['version']
| [
"m3u8.parse"
] | [((101, 128), 'm3u8.parse', 'm3u8.parse', (['SIMPLE_PLAYLIST'], {}), '(SIMPLE_PLAYLIST)\n', (111, 128), False, 'import m3u8\n'), ((403, 449), 'm3u8.parse', 'm3u8.parse', (['PLAYLIST_WITH_NON_INTEGER_DURATION'], {}), '(PLAYLIST_WITH_NON_INTEGER_DURATION)\n', (413, 449), False, 'import m3u8\n'), ((968, 1003), 'm3u8.parse', 'm3u8.parse', (['SLIDING_WINDOW_PLAYLIST'], {}), '(SLIDING_WINDOW_PLAYLIST)\n', (978, 1003), False, 'import m3u8\n'), ((1453, 1497), 'm3u8.parse', 'm3u8.parse', (['PLAYLIST_WITH_ENCRIPTED_SEGMENTS'], {}), '(PLAYLIST_WITH_ENCRIPTED_SEGMENTS)\n', (1463, 1497), False, 'import m3u8\n'), ((2053, 2104), 'm3u8.parse', 'm3u8.parse', (['PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV'], {}), '(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)\n', (2063, 2104), False, 'import m3u8\n'), ((2329, 2367), 'm3u8.parse', 'm3u8.parse', (['SIMPLE_PLAYLIST_WITH_TITLE'], {}), '(SIMPLE_PLAYLIST_WITH_TITLE)\n', (2339, 2367), False, 'import m3u8\n'), ((2649, 2677), 'm3u8.parse', 'm3u8.parse', (['VARIANT_PLAYLIST'], {}), '(VARIANT_PLAYLIST)\n', (2659, 2677), False, 'import m3u8\n'), ((3307, 3334), 'm3u8.parse', 'm3u8.parse', (['SIMPLE_PLAYLIST'], {}), '(SIMPLE_PLAYLIST)\n', (3317, 3334), False, 'import m3u8\n'), ((3385, 3420), 'm3u8.parse', 'm3u8.parse', (['SLIDING_WINDOW_PLAYLIST'], {}), '(SLIDING_WINDOW_PLAYLIST)\n', (3395, 3420), False, 'import m3u8\n'), ((3509, 3560), 'm3u8.parse', 'm3u8.parse', (['PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV'], {}), '(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)\n', (3519, 3560), False, 'import m3u8\n'), ((3645, 3696), 'm3u8.parse', 'm3u8.parse', (['PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV'], {}), '(PLAYLIST_WITH_ENCRIPTED_SEGMENTS_AND_IV)\n', (3655, 3696), False, 'import m3u8\n')] |
import adsk.core
import adsk.fusion
import traceback
import math
import collections
from adsk.fusion import BRepFaces
from .Fusion360Utilities.Fusion360Utilities import AppObjects, combine_feature
from .Fusion360Utilities.Fusion360CommandBase import Fusion360CommandBase
Point = collections.namedtuple("Point", ["x", "y"])
def pointy_hex_corner(center, size, i):
angle_deg = 60 * i - 30
angle_rad = math.pi / 180 * angle_deg
return adsk.core.Point3D.create(center.x + size * math.cos(angle_rad),
center.y + size * math.sin(angle_rad),
0)
def pointy_square_corner(center, size, i):
angle_deg = 90 * i
angle_rad = math.pi / 180 * angle_deg
return adsk.core.Point3D.create(center.x + size * math.cos(angle_rad),
center.y + size * math.sin(angle_rad),
0)
def pointy_shape_corner(center, size, i, offset, sides):
angle_deg = (360 / sides) * i - offset
angle_rad = math.pi / 180 * angle_deg
return adsk.core.Point3D.create(center.x + size * math.cos(angle_rad),
center.y + size * math.sin(angle_rad),
0)
# Alternate Feature Method, cut
def hex_sketch(center, input_size, thickness):
# Get the root component of the active design.
ao = AppObjects()
gap = thickness / math.sqrt(3)
sides = 6
corner_function = pointy_hex_corner
spoke = (input_size / 2) - gap
# Create a new sketch on the xy plane.
sketches = ao.root_comp.sketches
xy_plane = ao.root_comp.xYConstructionPlane
sketch = sketches.add(xy_plane)
lines = sketch.sketchCurves.sketchLines
start_point = pointy_hex_corner(center, spoke, 0)
previous_point = start_point
first = True
for corner in range(1, sides):
new_point = corner_function(center, spoke, corner)
line = lines.addByTwoPoints(previous_point, new_point)
if first:
start_point = line.startSketchPoint
first = False
previous_point = line.endSketchPoint
lines.addByTwoPoints(previous_point, start_point)
# Get the profile defined by the circle.
prof = sketch.profiles.item(0)
return prof
def start_sketch(z):
ao = AppObjects()
# Get construction planes
planes = ao.root_comp.constructionPlanes
xy_plane = ao.root_comp.xYConstructionPlane
plane_input = planes.createInput()
offset_value = adsk.core.ValueInput.createByReal(z)
plane_input.setByOffset(xy_plane, offset_value)
sketch_plane = planes.add(plane_input)
sketches = ao.root_comp.sketches
sketch = sketches.add(sketch_plane)
return sketch
# Alternate Feature Method, cut
def circle_sketch(center, input_size, gap):
spoke = (input_size / 2) - gap
sketch = start_sketch(center.z)
sketch_circles = sketch.sketchCurves.sketchCircles
circle_point = adsk.core.Point3D.create(center.x, center.y, 0)
sketch_circles.addByCenterRadius(circle_point, spoke)
prof = sketch.profiles.item(0)
return prof
# Alternate Feature Method, cut
def shape_sketch(center, input_size, gap, sides, corner_function):
spoke = (input_size / 2) - gap
sketch = start_sketch(center.z)
lines = sketch.sketchCurves.sketchLines
start_point = corner_function(center, spoke, 0)
previous_point = start_point
first = True
for corner in range(1, sides):
new_point = corner_function(center, spoke, corner)
line = lines.addByTwoPoints(previous_point, new_point)
if first:
start_point = line.startSketchPoint
first = False
previous_point = line.endSketchPoint
lines.addByTwoPoints(previous_point, start_point)
# Get the profile defined by the circle.
prof = sketch.profiles.item(0)
return prof
# Alternate Feature Method, cut
def shape_sketch2(center, input_size, gap, sides, offset):
spoke = (input_size / 2) - gap
sketch = start_sketch(center.z)
lines = sketch.sketchCurves.sketchLines
start_point = pointy_shape_corner(center, spoke, 0, offset, sides)
previous_point = start_point
first = True
for corner in range(1, sides):
new_point = pointy_shape_corner(center, spoke, corner, offset, sides)
line = lines.addByTwoPoints(previous_point, new_point)
if first:
start_point = line.startSketchPoint
first = False
previous_point = line.endSketchPoint
lines.addByTwoPoints(previous_point, start_point)
# Get the profile defined by the circle.
prof = sketch.profiles.item(0)
return prof
def shape_extrude(prof, height):
ao = AppObjects()
extrude_features = ao.root_comp.features.extrudeFeatures
extrude_input = extrude_features.createInput(prof, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
extrude_input.setSymmetricExtent(height, True)
extrude_feature = extrude_features.add(extrude_input)
return extrude_feature
# Generic pattern in X Y Directions
def cut_pattern(extrude_collection, x_qty, d1_space, y_qty, d2_space):
ao = AppObjects()
pattern_features = ao.root_comp.features.rectangularPatternFeatures
x_axis = ao.root_comp.xConstructionAxis
y_axis = ao.root_comp.yConstructionAxis
pattern_type = adsk.fusion.PatternDistanceType.SpacingPatternDistanceType
pattern_input = pattern_features.createInput(extrude_collection, x_axis, x_qty, d1_space, pattern_type)
pattern_input.directionTwoEntity = y_axis
pattern_input.distanceTwo = d2_space
pattern_input.quantityTwo = y_qty
pattern_input.isSymmetricInDirectionOne = True
pattern_input.isSymmetricInDirectionTwo = True
pattern_feature = ao.root_comp.features.rectangularPatternFeatures.add(pattern_input)
return pattern_feature
# Not Used
def second_hex_body(size, hex_body, core_body, x_space, y_space):
ao = AppObjects()
copy_collection = adsk.core.ObjectCollection.create()
copy_collection.add(hex_body)
copy_body_feature = ao.root_comp.features.copyPasteBodies.add(copy_collection)
hex_body_2 = copy_body_feature.bodies[0]
move_collection = adsk.core.ObjectCollection.create()
move_collection.add(hex_body_2)
transform = adsk.core.Matrix3D.create()
transform.translation = adsk.core.Vector3D.create(x_space / 2, .75 * y_space, 0)
move_input = ao.root_comp.features.moveFeatures.createInput(move_collection, transform)
ao.root_comp.features.moveFeatures.add(move_input)
return hex_body_2
def create_core_body(input_body, input_shell_thickness):
ao = AppObjects()
# Shell Main body
shell_features = ao.root_comp.features.shellFeatures
input_collection = adsk.core.ObjectCollection.create()
input_collection.add(input_body)
shell_input = shell_features.createInput(input_collection)
shell_input.insideThickness = adsk.core.ValueInput.createByReal(input_shell_thickness)
shell_feature = shell_features.add(shell_input)
# Offset internal faces 0
shell_faces = shell_feature.faces
tools = adsk.core.ObjectCollection.create()
for face in shell_faces:
tools.add(face)
distance = adsk.core.ValueInput.createByReal(0)
offset_features = ao.root_comp.features.offsetFeatures
offset_input = offset_features.createInput(tools, distance,
adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
offset_feature = offset_features.add(offset_input)
# Boundary FIll
offset_tools = adsk.core.ObjectCollection.create()
for body in offset_feature.bodies:
offset_tools.add(body)
boundary_fills = ao.root_comp.features.boundaryFillFeatures
boundary_fill_input = boundary_fills.createInput(offset_tools,
adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
cell = boundary_fill_input.bRepCells.item(0)
cell.isSelected = True
boundary_fill = boundary_fills.add(boundary_fill_input)
core_body = boundary_fill.bodies[0]
# Remove extra surface
remove_features = ao.root_comp.features.removeFeatures
for body in offset_feature.bodies:
remove_features.add(body)
return core_body
# Class for a Fusion 360 Command
# Place your program logic here
# Delete the line that says "pass" for any method you want to use
class FillerCommand(Fusion360CommandBase):
# Run whenever a user makes any change to a value or selection in the addin UI
# Commands in here will be run through the Fusion processor and changes will be reflected in Fusion graphics area
def on_preview(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, args, input_values):
pass
# Run after the command is finished.
# Can be used to launch another command automatically or do other clean up.
def on_destroy(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, reason, input_values):
pass
# Run when any input is changed.
# Can be used to check a value and then update the add-in UI accordingly
def on_input_changed(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, changed_input,
input_values):
pass
# Run when the user presses OK
# This is typically where your main program logic would go
def on_execute(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, args, input_values):
# Get a reference to all relevant application objects in a dictionary
ao = AppObjects()
# Get the values from the user input
infill_type = input_values['type_input']
body_type = input_values['body_type_input']
input_size = input_values['size_input']
input_shell_thickness = input_values['shell_input']
input_rib_thickness = input_values['rib_input']
all_selections = input_values['selection_input']
start_body = adsk.fusion.BRepBody.cast(all_selections[0])
bounding_box = start_body.boundingBox
start_volume = start_body.volume
start_body_count = ao.design.rootComponent.bRepBodies.count
if body_type == "Create Shell":
# Create Core Body of input body
core_body = create_core_body(start_body, input_shell_thickness)
else:
core_body = start_body
# General bounding box and pattern
extent_vector = bounding_box.maxPoint.asVector()
extent_vector.subtract(bounding_box.minPoint.asVector())
mid_vector = extent_vector.copy()
mid_vector.scaleBy(.5)
mid_vector.add(bounding_box.minPoint.asVector())
height_raw = extent_vector.z * 1.1
height = adsk.core.ValueInput.createByReal(height_raw)
# Hex specific
if infill_type == "Hex":
gap = input_rib_thickness / math.sqrt(3)
sides = 6
offset = 30
corner_function = pointy_hex_corner
x_space = math.sqrt(3) * input_size / 4
y_space = 3 * input_size / 4
# Square specific
elif infill_type == "Square":
gap = input_rib_thickness * math.sqrt(2) / 2
sides = 4
offset = 0
corner_function = pointy_square_corner
x_space = input_size / 2
y_space = input_size / 2
# Triangle specific
elif infill_type == "Triangle":
gap = input_rib_thickness
sides = 3
offset = 60
x_space = input_size / 4
y_space = math.sqrt(3) * input_size / 4
elif infill_type == "Circle":
gap = input_rib_thickness / 2
x_space = input_size / 2
y_space = math.sqrt(3) * input_size / 2
else:
return
# TODO at center of volume
# cp_1 = adsk.core.Point3D.create(0, 0, 0)
cp_1 = mid_vector.asPoint()
cp_2 = adsk.core.Point3D.create(cp_1.x + x_space, cp_1.y + y_space, cp_1.z)
if infill_type in ['Square', 'Hex']:
prof_1 = shape_sketch2(cp_1, input_size, gap, sides, offset)
prof_2 = shape_sketch2(cp_2, input_size, gap, sides, offset)
elif infill_type in ['Triangle']:
cp_3 = adsk.core.Point3D.create(cp_1.x + input_size, cp_1.y, cp_1.z)
cp_4 = adsk.core.Point3D.create(cp_1.x + (3 * x_space), cp_1.y + y_space, cp_1.z)
prof_1 = shape_sketch2(cp_1, input_size, gap, sides, 0)
prof_2 = shape_sketch2(cp_2, input_size, gap, sides, offset)
prof_3 = shape_sketch2(cp_3, input_size, gap, sides, offset)
prof_4 = shape_sketch2(cp_4, input_size, gap, sides, 0)
elif infill_type in ['Circle']:
prof_1 = circle_sketch(cp_1, input_size, gap)
prof_2 = circle_sketch(cp_2, input_size, gap)
else:
return
d1_space = adsk.core.ValueInput.createByReal(x_space * 2)
d2_space = adsk.core.ValueInput.createByReal(y_space * 2)
x_qty_raw = math.ceil(extent_vector.x / d1_space.realValue) + 4
y_qty_raw = math.ceil(extent_vector.y / d2_space.realValue) + 4
x_qty = adsk.core.ValueInput.createByReal(x_qty_raw)
y_qty = adsk.core.ValueInput.createByReal(y_qty_raw)
extrude_cut_collection = adsk.core.ObjectCollection.create()
extrude_1 = shape_extrude(prof_1, height)
extrude_2 = shape_extrude(prof_2, height)
extrude_cut_collection.add(extrude_1.bodies[0])
extrude_cut_collection.add(extrude_2.bodies[0])
if infill_type in ['Triangle']:
d1_space = adsk.core.ValueInput.createByReal(3 * input_size / 2)
x_qty = adsk.core.ValueInput.createByReal(x_qty_raw / 2)
extrude_3 = shape_extrude(prof_3, height)
extrude_4 = shape_extrude(prof_4, height)
extrude_cut_collection.add(extrude_3.bodies[0])
extrude_cut_collection.add(extrude_4.bodies[0])
cut_pattern(extrude_cut_collection, x_qty, d1_space, y_qty, d2_space)
cut_tools = adsk.core.ObjectCollection.create()
cut_tools.add(extrude_1.bodies[0])
cut_tools.add(extrude_2.bodies[0])
return
body_count = ao.design.rootComponent.bRepBodies.count
for count in range(body_count - (2 * x_qty_raw * y_qty_raw), body_count):
cut_tools.add(ao.design.rootComponent.bRepBodies[count])
combine_features = ao.root_comp.features.combineFeatures
cut_combine_input = combine_features.createInput(core_body, cut_tools)
cut_combine_input.operation = adsk.fusion.FeatureOperations.CutFeatureOperation
combine_features.add(cut_combine_input)
if body_type == "Create Shell":
final_combine_tools = adsk.core.ObjectCollection.create()
body_count = ao.design.rootComponent.bRepBodies.count
for count in range(start_body_count, body_count):
ao.ui.messageBox('here')
final_combine_tools.add(ao.design.rootComponent.bRepBodies[count])
# final_combine_tools.add(core_body)
final_combine_input = combine_features.createInput(start_body, final_combine_tools)
final_combine_input.operation = adsk.fusion.FeatureOperations.JoinFeatureOperation
combine_features.add(final_combine_input)
else:
pass
final_volume = start_body.volume
ao.ui.messageBox(
'The final percentage infill is: {0:.2g}% \n'.format(100 * final_volume / start_volume)
)
# Run when the user selects your command icon from the Fusion 360 UI
# Typically used to create and display a command dialog box
# The following is a basic sample of a dialog UI
def on_create(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs):
ao = AppObjects()
# ao.ui.messageBox("Test")
# Create a default value using a string
default_size = adsk.core.ValueInput.createByString('.5 in')
default_shell = adsk.core.ValueInput.createByString('.3 in')
default_rib = adsk.core.ValueInput.createByString('.1 in')
ao = AppObjects()
# Create a few inputs in the UI
inputs.addValueInput('size_input', 'Size (Major Diameter)',
ao.units_manager.defaultLengthUnits, default_size)
inputs.addValueInput('shell_input', 'Shell Thickness (Outer Wall)',
ao.units_manager.defaultLengthUnits, default_shell)
inputs.addValueInput('rib_input', 'Rib Thickness',
ao.units_manager.defaultLengthUnits, default_rib)
# inputs.addBoolValueInput('bool_input', '***Sample***Checked', True)
# inputs.addStringValueInput('string_input', '***Sample***String Value', 'Default value')
selection = inputs.addSelectionInput('selection_input', 'Body for Infill', 'Select a solid body')
selection.addSelectionFilter("SolidBodies")
selection.setSelectionLimits(1, 1)
drop_down_input = inputs.addDropDownCommandInput('type_input', 'Infill Style',
adsk.core.DropDownStyles.TextListDropDownStyle)
drop_down_input.listItems.add('Hex', True)
drop_down_input.listItems.add('Square', False)
drop_down_input.listItems.add('Triangle', False)
drop_down_input.listItems.add('Circle', False)
radio = inputs.addRadioButtonGroupCommandInput("body_type_input", "Infill Type")
radio.listItems.add("Create Shell", True)
radio.listItems.add("Direct Cut", False)
| [
"collections.namedtuple",
"math.ceil",
"math.sqrt",
"math.cos",
"math.sin"
] | [((282, 325), 'collections.namedtuple', 'collections.namedtuple', (['"""Point"""', "['x', 'y']"], {}), "('Point', ['x', 'y'])\n", (304, 325), False, 'import collections\n'), ((1436, 1448), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1445, 1448), False, 'import math\n'), ((13144, 13191), 'math.ceil', 'math.ceil', (['(extent_vector.x / d1_space.realValue)'], {}), '(extent_vector.x / d1_space.realValue)\n', (13153, 13191), False, 'import math\n'), ((13216, 13263), 'math.ceil', 'math.ceil', (['(extent_vector.y / d2_space.realValue)'], {}), '(extent_vector.y / d2_space.realValue)\n', (13225, 13263), False, 'import math\n'), ((492, 511), 'math.cos', 'math.cos', (['angle_rad'], {}), '(angle_rad)\n', (500, 511), False, 'import math\n'), ((567, 586), 'math.sin', 'math.sin', (['angle_rad'], {}), '(angle_rad)\n', (575, 586), False, 'import math\n'), ((791, 810), 'math.cos', 'math.cos', (['angle_rad'], {}), '(angle_rad)\n', (799, 810), False, 'import math\n'), ((866, 885), 'math.sin', 'math.sin', (['angle_rad'], {}), '(angle_rad)\n', (874, 885), False, 'import math\n'), ((1124, 1143), 'math.cos', 'math.cos', (['angle_rad'], {}), '(angle_rad)\n', (1132, 1143), False, 'import math\n'), ((1199, 1218), 'math.sin', 'math.sin', (['angle_rad'], {}), '(angle_rad)\n', (1207, 1218), False, 'import math\n'), ((10961, 10973), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (10970, 10973), False, 'import math\n'), ((11090, 11102), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (11099, 11102), False, 'import math\n'), ((11266, 11278), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (11275, 11278), False, 'import math\n'), ((11665, 11677), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (11674, 11677), False, 'import math\n'), ((11835, 11847), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (11844, 11847), False, 'import math\n')] |
from time import time
from bioptim import ControlType
from .JumperOcp import JumperOcp, Jumper
def generate_table(out):
root_path_model = "/".join(__file__.split("/")[:-1])
jumper_model = Jumper(root_path_model + "/models/")
jumper = JumperOcp(jumper=jumper_model, control_type=ControlType.CONSTANT, n_phases=3)
tic = time()
sol = jumper.solve(limit_memory_max_iter=400, exact_max_iter=1000, force_no_graph=True, linear_solver="ma57")
toc = time() - tic
sol_merged = sol.merge_phases()
out.nx = sol_merged.states["all"].shape[0]
out.nu = sol_merged.controls["all"].shape[0]
out.ns = sol_merged.ns[0]
out.solver.append(out.Solver("Ipopt"))
out.solver[0].n_iteration = sol.iterations
out.solver[0].cost = sol.cost
out.solver[0].convergence_time = toc
out.solver[0].compute_error_single_shooting(sol, 1)
| [
"time.time"
] | [((339, 345), 'time.time', 'time', ([], {}), '()\n', (343, 345), False, 'from time import time\n'), ((470, 476), 'time.time', 'time', ([], {}), '()\n', (474, 476), False, 'from time import time\n')] |
from flask import Flask, request, abort
from werkzeug.datastructures import ImmutableMultiDict
import json
from api_types import (
DifficultyLevel,
ParkTrailsResponse,
ParkTrailsRequest,
TrailDetailsResponse,
)
from trail_selector import select_trails
app = Flask(__name__)
DAYS_ARG = "days"
DIFFICULTY_ARG = "difficulty"
PARTY_SIZE_ARG = "party_size"
DEFAULT_DAYS = 3
DEFAULT_DIFFICULTY = DifficultyLevel.medium
DEFAULT_PARTY_SIZE = 4
def parse_park_trails_args(args: ImmutableMultiDict) -> ParkTrailsRequest:
days = DEFAULT_DAYS
difficulty = DEFAULT_DIFFICULTY
party_size = DEFAULT_PARTY_SIZE
if DAYS_ARG in args:
try:
days = int(args[DAYS_ARG])
except ValueError:
abort(400)
if DIFFICULTY_ARG in args:
try:
difficulty = DifficultyLevel[args[DIFFICULTY_ARG]]
except KeyError:
abort(400)
if PARTY_SIZE_ARG in args:
try:
party_size = int(args[PARTY_SIZE_ARG])
except ValueError:
abort(400)
return ParkTrailsRequest(days, difficulty, party_size)
@app.route('/api/0/park_trails/<park_name>')
def park_trails(park_name: str) -> str:
trails_request = parse_park_trails_args(request.args)
response = select_trails(trails_request)
return json.dumps(response._asdict())
@app.route('/api/0/trail/<int:tid>')
def trail_details(tid: int) -> str:
trail_name = f"Trail #{tid}"
response = TrailDetailsResponse(trail_name, "Some useful information about the trail here")
return json.dumps(response._asdict())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080) | [
"api_types.TrailDetailsResponse",
"flask.abort",
"flask.Flask",
"trail_selector.select_trails",
"api_types.ParkTrailsRequest"
] | [((268, 283), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'from flask import Flask, request, abort\n'), ((1064, 1111), 'api_types.ParkTrailsRequest', 'ParkTrailsRequest', (['days', 'difficulty', 'party_size'], {}), '(days, difficulty, party_size)\n', (1081, 1111), False, 'from api_types import DifficultyLevel, ParkTrailsResponse, ParkTrailsRequest, TrailDetailsResponse\n'), ((1272, 1301), 'trail_selector.select_trails', 'select_trails', (['trails_request'], {}), '(trails_request)\n', (1285, 1301), False, 'from trail_selector import select_trails\n'), ((1467, 1552), 'api_types.TrailDetailsResponse', 'TrailDetailsResponse', (['trail_name', '"""Some useful information about the trail here"""'], {}), "(trail_name, 'Some useful information about the trail here'\n )\n", (1487, 1552), False, 'from api_types import DifficultyLevel, ParkTrailsResponse, ParkTrailsRequest, TrailDetailsResponse\n'), ((738, 748), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (743, 748), False, 'from flask import Flask, request, abort\n'), ((894, 904), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (899, 904), False, 'from flask import Flask, request, abort\n'), ((1040, 1050), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1045, 1050), False, 'from flask import Flask, request, abort\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ._clierror import ConflictRequestError
from ._utils import wait_till_end
from .vendored_sdks.appplatform.v2022_03_01_preview import models as models_20220301preview
from azure.cli.core.azclierror import (AzureInternalError, CLIInternalError)
from azure.core.exceptions import HttpResponseError
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.commands import arm as _arm
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import (ResourceType, get_sdk)
from knack.log import get_logger
from time import sleep
logger = get_logger(__name__)
ENABLE_LOWER = "enable"
DISABLE_LOWER = "disable"
UPDATING_LOWER = "updating"
DELETING_LOWER = "deleting"
APP_CREATE_OR_UPDATE_SLEEP_INTERVAL = 2
def app_identity_assign(cmd,
client,
resource_group,
service,
name,
role=None,
scope=None,
system_assigned=None,
user_assigned=None):
"""
Note: Always use sync method to operate managed identity to avoid data inconsistency.
:param role: role name of role assignment for system-assigned managed identity.
:param scope: scope of role assignment for system-assigned managed identity.
:param system_assigned: 1. None or False: Don't change system-assigned managed identity.
2. Enable system-assigned managed identity on app.
:param user_assigned: 1. None: Don't change user-assigned managed identities.
2. A non-empty list of user-assigned managed identity resource id to app.
3. A empty list: should be blocked by validator.
"""
# TODO(jiec): Retire legacy identity assign after migration.
poller = None
if _is_legacy_identity_assign(system_assigned, user_assigned):
poller = _legacy_app_identity_assign(cmd, client, resource_group, service, name)
else:
poller = _new_app_identity_assign(cmd, client, resource_group, service, name, system_assigned, user_assigned)
wait_till_end(poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
if role and scope:
_create_role_assignment(cmd, client, resource_group, service, name, role, scope)
return client.apps.get(resource_group, service, name)
def app_identity_remove(cmd,
client,
resource_group,
service,
name,
system_assigned=None,
user_assigned=None):
"""
Note: Always use sync method to operate managed identity to avoid data inconsistency.
:param system_assigned: 1) None or False: Don't change system-assigned managed identity.
2) True: remove system-assigned managed identity
:param user_assigned: 1) None: Don't change user-assigned managed identities.
2) An empty list: remove all user-assigned managed identities.
3) A non-empty list of user-assigned managed identity resource id to remove.
"""
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError("Failed to remove managed identities since app is in {} state.".format(app.properties.provisioning_state))
if not app.identity:
logger.warning("Skip remove managed identity since no identities assigned to app.")
return
if not app.identity.type:
raise AzureInternalError("Invalid existed identity type {}.".format(app.identity.type))
if app.identity.type == models_20220301preview.ManagedIdentityType.NONE:
logger.warning("Skip remove managed identity since identity type is {}.".format(app.identity.type))
return
# TODO(jiec): For back-compatible, convert to remove system-assigned only case. Remove code after migration.
if system_assigned is None and user_assigned is None:
system_assigned = True
new_user_identities = _get_new_user_identities_for_remove(app.identity.user_assigned_identities, user_assigned)
new_identity_type = _get_new_identity_type_for_remove(app.identity.type, system_assigned, new_user_identities)
user_identity_payload = _get_user_identity_payload_for_remove(new_identity_type, user_assigned)
target_identity = models_20220301preview.ManagedIdentityProperties()
target_identity.type = new_identity_type
target_identity.user_assigned_identities = user_identity_payload
app_resource = models_20220301preview.AppResource()
app_resource.identity = target_identity
poller = client.apps.begin_update(resource_group, service, name, app_resource)
wait_till_end(cmd, poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
else:
return client.apps.get(resource_group, service, name)
def app_identity_force_set(cmd,
client,
resource_group,
service,
name,
system_assigned,
user_assigned):
"""
:param system_assigned: string, disable or enable
:param user_assigned: 1. A single-element string list with 'disable'
2. A non-empty list of user-assigned managed identity resource ID.
"""
exist_app = client.apps.get(resource_group, service, name)
if _app_not_updatable(exist_app):
raise ConflictRequestError("Failed to force set managed identities since app is in {} state.".format(
exist_app.properties.provisioning_state))
new_identity_type = _get_new_identity_type_for_force_set(system_assigned, user_assigned)
user_identity_payload = _get_user_identity_payload_for_force_set(user_assigned)
target_identity = models_20220301preview.ManagedIdentityProperties()
target_identity.type = new_identity_type
target_identity.user_assigned_identities = user_identity_payload
# All read-only attributes will be droped by SDK automatically.
exist_app.identity = target_identity
poller = client.apps.begin_create_or_update(resource_group, service, name, exist_app)
wait_till_end(cmd, poller)
poller.result()
if "succeeded" != poller.status().lower():
return poller
else:
return client.apps.get(resource_group, service, name)
def app_identity_show(cmd, client, resource_group, service, name):
app = client.apps.get(resource_group, service, name)
return app.identity
def _is_legacy_identity_assign(system_assigned, user_assigned):
return not system_assigned and not user_assigned
def _legacy_app_identity_assign(cmd, client, resource_group, service, name):
"""
Enable system-assigned managed identity on app.
"""
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError("Failed to enable system-assigned managed identity since app is in {} state.".format(
app.properties.provisioning_state))
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED
if app.identity and app.identity.type in (models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
target_identity = models_20220301preview.ManagedIdentityProperties(type=new_identity_type)
app_resource = models_20220301preview.AppResource(identity=target_identity)
logger.warning("Start to enable system-assigned managed identity.")
return client.apps.begin_update(resource_group, service, name, app_resource)
def _new_app_identity_assign(cmd, client, resource_group, service, name, system_assigned, user_assigned):
app = client.apps.get(resource_group, service, name)
if _app_not_updatable(app):
raise ConflictRequestError(
"Failed to assign managed identities since app is in {} state.".format(app.properties.provisioning_state))
new_identity_type = _get_new_identity_type_for_assign(app, system_assigned, user_assigned)
user_identity_payload = _get_user_identity_payload_for_assign(new_identity_type, user_assigned)
identity_payload = models_20220301preview.ManagedIdentityProperties()
identity_payload.type = new_identity_type
identity_payload.user_assigned_identities = user_identity_payload
app_resource = models_20220301preview.AppResource(identity=identity_payload)
logger.warning("Start to assign managed identities to app.")
return client.apps.begin_update(resource_group, service, name, app_resource)
def _get_new_identity_type_for_assign(app, system_assigned, user_assigned):
new_identity_type = None
if app.identity and app.identity.type:
new_identity_type = app.identity.type
else:
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
if system_assigned:
if new_identity_type in (models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED
if user_assigned:
if new_identity_type in (models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED,
models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED
if not new_identity_type or new_identity_type == models_20220301preview.ManagedIdentityType.NONE:
raise CLIInternalError("Internal error: invalid new identity type:{}.".format(new_identity_type))
return new_identity_type
def _get_user_identity_payload_for_assign(new_identity_type, new_user_identity_rid_list):
"""
:param new_user_identity_rid_list: 1. None object.
2. A non-empty list of user-assigned managed identity resource ID.
:return 1. None object.
2. A dict from user-assigned managed identity to an empty object.
"""
uid_payload = {}
if new_identity_type == models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED:
pass
elif new_identity_type in (models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
if new_user_identity_rid_list:
for rid in new_user_identity_rid_list:
uid_payload[rid] = models_20220301preview.UserAssignedManagedIdentity()
if len(uid_payload) == 0:
uid_payload = None
return uid_payload
def _create_role_assignment(cmd, client, resource_group, service, name, role, scope):
app = client.apps.get(resource_group, service, name)
if not app.identity or not app.identity.principal_id:
raise AzureInternalError(
"Failed to create role assignment without object ID(principal ID) of system-assigned managed identity.")
identity_role_id = _arm.resolve_role_id(cmd.cli_ctx, role, scope)
assignments_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_assignments
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=identity_role_id,
principal_id=app.identity.principal_id)
logger.warning("Creating an assignment with a role '%s' on the scope of '%s'", identity_role_id, scope)
retry_times = 36
assignment_name = _arm._gen_guid()
for i in range(0, retry_times):
try:
assignments_client.create(scope=scope, role_assignment_name=assignment_name,
parameters=parameters)
break
except (HttpResponseError, CloudError) as ex:
if 'role assignment already exists' in ex.message:
logger.warning('Role assignment already exists')
break
elif i < retry_times and ' does not exist in the directory ' in ex.message:
sleep(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL)
logger.warning('Retrying role assignment creation: %s/%s', i + 1,
retry_times)
continue
else:
raise
def _get_new_user_identities_for_remove(exist_user_identity_dict, user_identity_list_to_remove):
"""
:param exist_user_identity_dict: A dict from user-assigned managed identity resource id to identity objecct.
:param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove.
:return A list of string of user-assigned managed identity resource ID.
"""
if not exist_user_identity_dict:
return []
# None
if user_identity_list_to_remove is None:
return list(exist_user_identity_dict.keys())
# Empty list means remove all user-assigned managed identities
if len(user_identity_list_to_remove) == 0:
return []
# Non-empty list
new_identities = []
for id in exist_user_identity_dict.keys():
if not id.lower() in user_identity_list_to_remove:
new_identities.append(id)
return new_identities
def _get_new_identity_type_for_remove(exist_identity_type, is_remove_system_identity, new_user_identities):
new_identity_type = exist_identity_type
exist_identity_type_str = exist_identity_type.lower()
if exist_identity_type_str == models_20220301preview.ManagedIdentityType.NONE.lower():
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED.lower():
if is_remove_system_identity:
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
else:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED
elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.USER_ASSIGNED.lower():
if not new_user_identities:
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
else:
new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED
elif exist_identity_type_str == models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.lower():
if is_remove_system_identity and not new_user_identities:
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
elif not is_remove_system_identity and not new_user_identities:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED
elif is_remove_system_identity and new_user_identities:
new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED
else:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
else:
raise AzureInternalError("Invalid identity type: {}.".format(exist_identity_type_str))
return new_identity_type
def _get_user_identity_payload_for_remove(new_identity_type, user_identity_list_to_remove):
"""
:param new_identity_type: ManagedIdentityType
:param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove.
:return None object or a non-empty dict from user-assigned managed identity resource id to None object
"""
user_identity_payload = {}
if new_identity_type in (models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED):
# empty list means remove all user-assigned managed identites
if user_identity_list_to_remove is not None and len(user_identity_list_to_remove) == 0:
raise CLIInternalError("When remove all user-assigned managed identities, "
"target identity type should not be {}.".format(new_identity_type))
# non-empty list
elif user_identity_list_to_remove:
for id in user_identity_list_to_remove:
user_identity_payload[id] = None
if not user_identity_payload:
user_identity_payload = None
return user_identity_payload
def _get_new_identity_type_for_force_set(system_assigned, user_assigned):
new_identity_type = models_20220301preview.ManagedIdentityType.NONE
if DISABLE_LOWER == system_assigned and DISABLE_LOWER != user_assigned[0]:
new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED
elif ENABLE_LOWER == system_assigned and DISABLE_LOWER == user_assigned[0]:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED
elif ENABLE_LOWER == system_assigned and DISABLE_LOWER != user_assigned[0]:
new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
return new_identity_type
def _get_user_identity_payload_for_force_set(user_assigned):
if DISABLE_LOWER == user_assigned[0]:
return None
user_identity_payload = {}
for user_identity_resource_id in user_assigned:
user_identity_payload[user_identity_resource_id] = models_20220301preview.UserAssignedManagedIdentity()
if not user_identity_payload:
user_identity_payload = None
return user_identity_payload
def _app_not_updatable(app):
return app.properties \
and app.properties.provisioning_state \
and app.properties.provisioning_state.lower() in [UPDATING_LOWER, DELETING_LOWER]
| [
"azure.cli.core.commands.arm._gen_guid",
"knack.log.get_logger",
"time.sleep",
"azure.cli.core.profiles.get_sdk",
"azure.cli.core.commands.arm.resolve_role_id",
"azure.cli.core.commands.client_factory.get_mgmt_service_client",
"azure.cli.core.azclierror.AzureInternalError"
] | [((947, 967), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (957, 967), False, 'from knack.log import get_logger\n'), ((12023, 12069), 'azure.cli.core.commands.arm.resolve_role_id', '_arm.resolve_role_id', (['cmd.cli_ctx', 'role', 'scope'], {}), '(cmd.cli_ctx, role, scope)\n', (12043, 12069), True, 'from azure.cli.core.commands import arm as _arm\n'), ((12219, 12365), 'azure.cli.core.profiles.get_sdk', 'get_sdk', (['cmd.cli_ctx', 'ResourceType.MGMT_AUTHORIZATION', '"""RoleAssignmentCreateParameters"""'], {'mod': '"""models"""', 'operation_group': '"""role_assignments"""'}), "(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,\n 'RoleAssignmentCreateParameters', mod='models', operation_group=\n 'role_assignments')\n", (12226, 12365), False, 'from azure.cli.core.profiles import ResourceType, get_sdk\n'), ((12771, 12787), 'azure.cli.core.commands.arm._gen_guid', '_arm._gen_guid', ([], {}), '()\n', (12785, 12787), True, 'from azure.cli.core.commands import arm as _arm\n'), ((11862, 11995), 'azure.cli.core.azclierror.AzureInternalError', 'AzureInternalError', (['"""Failed to create role assignment without object ID(principal ID) of system-assigned managed identity."""'], {}), "(\n 'Failed to create role assignment without object ID(principal ID) of system-assigned managed identity.'\n )\n", (11880, 11995), False, 'from azure.cli.core.azclierror import AzureInternalError, CLIInternalError\n'), ((12095, 12164), 'azure.cli.core.commands.client_factory.get_mgmt_service_client', 'get_mgmt_service_client', (['cmd.cli_ctx', 'ResourceType.MGMT_AUTHORIZATION'], {}), '(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION)\n', (12118, 12164), False, 'from azure.cli.core.commands.client_factory import get_mgmt_service_client\n'), ((13313, 13355), 'time.sleep', 'sleep', (['APP_CREATE_OR_UPDATE_SLEEP_INTERVAL'], {}), '(APP_CREATE_OR_UPDATE_SLEEP_INTERVAL)\n', (13318, 13355), False, 'from time import sleep\n')] |
import subprocess
from configparser import ConfigParser
from functools import cached_property
from services import logger
class PostgresBackup:
def __init__(self, config: ConfigParser):
self.config = config
self.result = None
def __str__(self):
if not self.result:
return None
return '\n'.join(self.result.splitlines())
@cached_property
def pg_kwargs(self):
return {
'user': self.config.get('postgresql', 'user'),
'password': self.config.get('postgresql', 'password'),
'host': self.config.get('postgresql', 'host'),
'port': self.config.get('postgresql', 'port'),
'db': self.config.get('postgresql', 'db')
}
def process(self, print_results=True):
process = subprocess.Popen(
[
'psql',
f"--dbname=postgresql://{self.pg_kwargs['user']}:{self.pg_kwargs['password']}@"
f"{self.pg_kwargs['host']}:{self.pg_kwargs['port']}/{self.pg_kwargs['db']}",
'--list',
],
stdout=subprocess.PIPE,
)
output = process.communicate()[0]
if int(process.returncode) != 0:
logger.error(f'Command failed with return code {process.returncode}')
raise Exception('Non-zero return code!')
self.result = output
if print_results:
print(self)
return self.result
| [
"subprocess.Popen",
"services.logger.error"
] | [((811, 1033), 'subprocess.Popen', 'subprocess.Popen', (['[\'psql\',\n f"--dbname=postgresql://{self.pg_kwargs[\'user\']}:{self.pg_kwargs[\'password\']}@{self.pg_kwargs[\'host\']}:{self.pg_kwargs[\'port\']}/{self.pg_kwargs[\'db\']}"\n , \'--list\']'], {'stdout': 'subprocess.PIPE'}), '([\'psql\',\n f"--dbname=postgresql://{self.pg_kwargs[\'user\']}:{self.pg_kwargs[\'password\']}@{self.pg_kwargs[\'host\']}:{self.pg_kwargs[\'port\']}/{self.pg_kwargs[\'db\']}"\n , \'--list\'], stdout=subprocess.PIPE)\n', (827, 1033), False, 'import subprocess\n'), ((1239, 1308), 'services.logger.error', 'logger.error', (['f"""Command failed with return code {process.returncode}"""'], {}), "(f'Command failed with return code {process.returncode}')\n", (1251, 1308), False, 'from services import logger\n')] |
"""Add circulating supply
Revision ID: c<PASSWORD>
Revises: <PASSWORD>
Create Date: 2020-06-05 14:58:15.471738
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('circulating_supply',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('snapshot_date', sa.DateTime(timezone=True), server_default=sa.text(u'now()'), nullable=True, index=True),
sa.Column('supply_amount', sa.Float(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text(u'now()'), nullable=True),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('circulating_supply')
| [
"sqlalchemy.Float",
"sqlalchemy.text",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer"
] | [((798, 833), 'alembic.op.drop_table', 'op.drop_table', (['"""circulating_supply"""'], {}), "('circulating_supply')\n", (811, 833), False, 'from alembic import op\n'), ((740, 769), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (763, 769), True, 'import sqlalchemy as sa\n'), ((419, 431), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (429, 431), True, 'import sqlalchemy as sa\n'), ((481, 507), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (492, 507), True, 'import sqlalchemy as sa\n'), ((602, 612), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (610, 612), True, 'import sqlalchemy as sa\n'), ((658, 684), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (669, 684), True, 'import sqlalchemy as sa\n'), ((524, 541), 'sqlalchemy.text', 'sa.text', (['u"""now()"""'], {}), "(u'now()')\n", (531, 541), True, 'import sqlalchemy as sa\n'), ((701, 718), 'sqlalchemy.text', 'sa.text', (['u"""now()"""'], {}), "(u'now()')\n", (708, 718), True, 'import sqlalchemy as sa\n')] |
"""
.. module:: DialOutClient
:platform: Unix, Windows
:synopsis: A TCP server for listening to streams of telemetry data from a Cisco device
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import json
import gzip
from logging import getLogger, Logger
from datetime import datetime
from struct import Struct
from typing import List, Dict, Tuple, Any
from concurrent.futures import ThreadPoolExecutor, Future
from tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPRequest, HTTPResponse
from tornado.tcpserver import TCPServer
from tornado.iostream import IOStream
from tornado.netutil import bind_sockets
from tornado.process import fork_processes
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from multiprocessing import Process, Queue
class DialOutClient(Process, TCPServer):
"""Create a TCP dial out server to listen for telemetry data from a Cisco device
:param address: The IP address to bind to
:type address: str
:param port: The port to listen on
:type port: str
:param batch_size: The number of messages to gather before uploading
:type batch_size: int
:param log_name: Used for getting the application log
:type log_name: str
"""
def __init__(self, data_queue: Queue, log_name: str, inputs: Dict[str,str], name: str) -> None:
Process.__init__(self, name=name)
TCPServer.__init__(self, max_buffer_size=10485760000, read_chunk_size=104857600)
self.address: str = inputs["address"]
self.port: int = inputs["port"]
self.log: Logger = getLogger(log_name)
self.log.info("Starting dial out client[%s]", self.name)
self.url: str = f"http://{self.address}:{self.port}"
self._header_size: int = 12
self._header_struct: Struct = Struct(">hhhhi")
self.data_queue: Queue = data_queue
async def handle_stream(self, stream: IOStream, address: Tuple[str, str]) -> None:
"""
:param stream: Client IOStream to read telemetry data from
:type stream: IOStream
:param address: The IP address and port on which a client connects to the server
:type address: Tuple[str,str]
:return: None
"""
try:
self.log.info(f"Got Connection from {address[0]}:{address[1]}")
while not stream.closed():
header_data: bytes = await stream.read_bytes(self._header_size)
(msg_type, encode_type, msg_version, flags, msg_length,) = self._header_struct.unpack(header_data)
# encoding = {1: "gpb", 2: "json"}[encode_type]
# implement json encoding
msg_data: bytes = b""
while len(msg_data) < msg_length:
packet: bytes = await stream.read_bytes(msg_length - len(msg_data))
msg_data += packet
self.data_queue.put_nowait(("ems", msg_data, None, None, address[0]))
except StreamClosedError as error:
self.log.error(f'{address[0]}:{address[1]} {error}')
stream.close()
def run(self):
sockets = bind_sockets(self.port)
fork_processes(0)
self.log.info("Started dial out server listening on %s:%s", self.address, self.port)
self.add_sockets(sockets)
IOLoop.current().set_default_executor(ThreadPoolExecutor(10))
IOLoop.current().start()
| [
"logging.getLogger",
"multiprocessing.Process.__init__",
"struct.Struct",
"concurrent.futures.ThreadPoolExecutor",
"tornado.ioloop.IOLoop.current",
"tornado.netutil.bind_sockets",
"tornado.tcpserver.TCPServer.__init__",
"tornado.process.fork_processes"
] | [((1342, 1375), 'multiprocessing.Process.__init__', 'Process.__init__', (['self'], {'name': 'name'}), '(self, name=name)\n', (1358, 1375), False, 'from multiprocessing import Process, Queue\n'), ((1384, 1469), 'tornado.tcpserver.TCPServer.__init__', 'TCPServer.__init__', (['self'], {'max_buffer_size': '(10485760000)', 'read_chunk_size': '(104857600)'}), '(self, max_buffer_size=10485760000, read_chunk_size=104857600\n )\n', (1402, 1469), False, 'from tornado.tcpserver import TCPServer\n'), ((1578, 1597), 'logging.getLogger', 'getLogger', (['log_name'], {}), '(log_name)\n', (1587, 1597), False, 'from logging import getLogger, Logger\n'), ((1798, 1814), 'struct.Struct', 'Struct', (['""">hhhhi"""'], {}), "('>hhhhi')\n", (1804, 1814), False, 'from struct import Struct\n'), ((3133, 3156), 'tornado.netutil.bind_sockets', 'bind_sockets', (['self.port'], {}), '(self.port)\n', (3145, 3156), False, 'from tornado.netutil import bind_sockets\n'), ((3165, 3182), 'tornado.process.fork_processes', 'fork_processes', (['(0)'], {}), '(0)\n', (3179, 3182), False, 'from tornado.process import fork_processes\n'), ((3356, 3378), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(10)'], {}), '(10)\n', (3374, 3378), False, 'from concurrent.futures import ThreadPoolExecutor, Future\n'), ((3318, 3334), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (3332, 3334), False, 'from tornado.ioloop import IOLoop\n'), ((3388, 3404), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (3402, 3404), False, 'from tornado.ioloop import IOLoop\n')] |
from django import template
register = template.Library()
@register.simple_tag
def create_list(*args):
return list(args)
| [
"django.template.Library"
] | [((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')] |
"""
Magnetic field ... er, fields.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from yt.units import dimensions
from yt.units.unit_object import Unit
from yt.utilities.physical_constants import mu_0
from yt.fields.derived_field import \
ValidateParameter
from .field_plugin_registry import \
register_field_plugin
from yt.utilities.math_utils import \
get_sph_theta_component, \
get_sph_phi_component
mag_factors = {dimensions.magnetic_field_cgs: 4.0*np.pi,
dimensions.magnetic_field_mks: mu_0}
@register_field_plugin
def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
unit_system = registry.ds.unit_system
axis_names = registry.ds.coordinates.axis_order
if (ftype,"magnetic_field_%s" % axis_names[0]) not in registry:
return
u = registry[ftype,"magnetic_field_%s" % axis_names[0]].units
def _magnetic_field_strength(field,data):
B2 = (data[ftype,"magnetic_field_%s" % axis_names[0]]**2 +
data[ftype,"magnetic_field_%s" % axis_names[1]]**2 +
data[ftype,"magnetic_field_%s" % axis_names[2]]**2)
return np.sqrt(B2)
registry.add_field((ftype,"magnetic_field_strength"), sampling_type="cell",
function=_magnetic_field_strength,
units=u)
def _magnetic_energy(field, data):
B = data[ftype,"magnetic_field_strength"]
return 0.5*B*B/mag_factors[B.units.dimensions]
registry.add_field((ftype, "magnetic_energy"), sampling_type="cell",
function=_magnetic_energy,
units=unit_system["pressure"])
def _plasma_beta(field,data):
return data[ftype,'pressure']/data[ftype,'magnetic_energy']
registry.add_field((ftype, "plasma_beta"), sampling_type="cell",
function=_plasma_beta,
units="")
def _magnetic_pressure(field,data):
return data[ftype,'magnetic_energy']
registry.add_field((ftype, "magnetic_pressure"), sampling_type="cell",
function=_magnetic_pressure,
units=unit_system["pressure"])
if registry.ds.geometry == "cartesian":
def _magnetic_field_poloidal(field,data):
normal = data.get_field_parameter("normal")
d = data[ftype,'magnetic_field_x']
Bfields = data.ds.arr(
[data[ftype,'magnetic_field_x'],
data[ftype,'magnetic_field_y'],
data[ftype,'magnetic_field_z']],
d.units)
theta = data["index", 'spherical_theta']
phi = data["index", 'spherical_phi']
return get_sph_theta_component(Bfields, theta, phi, normal)
def _magnetic_field_toroidal(field,data):
normal = data.get_field_parameter("normal")
d = data[ftype,'magnetic_field_x']
Bfields = data.ds.arr(
[data[ftype,'magnetic_field_x'],
data[ftype,'magnetic_field_y'],
data[ftype,'magnetic_field_z']],
d.units)
phi = data["index", 'spherical_phi']
return get_sph_phi_component(Bfields, phi, normal)
elif registry.ds.geometry == "cylindrical":
def _magnetic_field_poloidal(field, data):
r = data["index", "r"]
z = data["index", "z"]
d = np.sqrt(r*r+z*z)
return (data[ftype, "magnetic_field_r"]*(r/d) +
data[ftype, "magnetic_field_z"]*(z/d))
def _magnetic_field_toroidal(field, data):
return data[ftype,"magnetic_field_theta"]
elif registry.ds.geometry == "spherical":
def _magnetic_field_poloidal(field, data):
return data[ftype,"magnetic_field_theta"]
def _magnetic_field_toroidal(field, data):
return data[ftype,"magnetic_field_phi"]
else:
# Unidentified geometry--set to None
_magnetic_field_toroidal = None
_magnetic_field_poloidal = None
registry.add_field((ftype, "magnetic_field_poloidal"), sampling_type="cell",
function=_magnetic_field_poloidal,
units=u, validators=[ValidateParameter("normal")])
registry.add_field((ftype, "magnetic_field_toroidal"), sampling_type="cell",
function=_magnetic_field_toroidal,
units=u, validators=[ValidateParameter("normal")])
def _alfven_speed(field,data):
B = data[ftype,'magnetic_field_strength']
return B/np.sqrt(mag_factors[B.units.dimensions]*data[ftype,'density'])
registry.add_field((ftype, "alfven_speed"), sampling_type="cell", function=_alfven_speed,
units=unit_system["velocity"])
def _mach_alfven(field,data):
return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed']
registry.add_field((ftype, "mach_alfven"), sampling_type="cell", function=_mach_alfven,
units="dimensionless")
def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"):
r"""
This routine sets up special aliases between dataset-specific magnetic fields
and the default magnetic fields in yt so that unit conversions between different
unit systems can be handled properly. This is only called from the `setup_fluid_fields`
method of a frontend's :class:`FieldInfoContainer` instance.
Parameters
----------
registry : :class:`FieldInfoContainer`
The field registry that these definitions will be installed into.
ds_ftype : string
The field type for the fields we're going to alias, e.g. "flash", "enzo", "athena", etc.
ds_fields : list of strings
The fields that will be aliased.
ftype : string, optional
The resulting field type of the fields. Default "gas".
Examples
--------
>>> class PlutoFieldInfo(ChomboFieldInfo):
... def setup_fluid_fields(self):
... from yt.fields.magnetic_field import \
... setup_magnetic_field_aliases
... setup_magnetic_field_aliases(self, "chombo", ["bx%s" % ax for ax in [1,2,3]])
"""
unit_system = registry.ds.unit_system
ds_fields = [(ds_ftype, fd) for fd in ds_fields]
if ds_fields[0] not in registry:
return
from_units = Unit(registry[ds_fields[0]].units,
registry=registry.ds.unit_registry)
if dimensions.current_mks in unit_system.base_units:
to_units = unit_system["magnetic_field_mks"]
equiv = "SI"
else:
to_units = unit_system["magnetic_field_cgs"]
equiv = "CGS"
if from_units.dimensions == to_units.dimensions:
convert = lambda x: x.in_units(to_units)
else:
convert = lambda x: x.to_equivalent(to_units, equiv)
def mag_field(fd):
def _mag_field(field, data):
return convert(data[fd])
return _mag_field
for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields):
registry.add_field((ftype,"magnetic_field_%s" % ax), sampling_type="cell",
function=mag_field(fd),
units=unit_system[to_units.dimensions])
| [
"numpy.sqrt",
"yt.utilities.math_utils.get_sph_theta_component",
"yt.fields.derived_field.ValidateParameter",
"yt.units.unit_object.Unit",
"yt.utilities.math_utils.get_sph_phi_component"
] | [((6662, 6732), 'yt.units.unit_object.Unit', 'Unit', (['registry[ds_fields[0]].units'], {'registry': 'registry.ds.unit_registry'}), '(registry[ds_fields[0]].units, registry=registry.ds.unit_registry)\n', (6666, 6732), False, 'from yt.units.unit_object import Unit\n'), ((1466, 1477), 'numpy.sqrt', 'np.sqrt', (['B2'], {}), '(B2)\n', (1473, 1477), True, 'import numpy as np\n'), ((2996, 3048), 'yt.utilities.math_utils.get_sph_theta_component', 'get_sph_theta_component', (['Bfields', 'theta', 'phi', 'normal'], {}), '(Bfields, theta, phi, normal)\n', (3019, 3048), False, 'from yt.utilities.math_utils import get_sph_theta_component, get_sph_phi_component\n'), ((3513, 3556), 'yt.utilities.math_utils.get_sph_phi_component', 'get_sph_phi_component', (['Bfields', 'phi', 'normal'], {}), '(Bfields, phi, normal)\n', (3534, 3556), False, 'from yt.utilities.math_utils import get_sph_theta_component, get_sph_phi_component\n'), ((4871, 4936), 'numpy.sqrt', 'np.sqrt', (["(mag_factors[B.units.dimensions] * data[ftype, 'density'])"], {}), "(mag_factors[B.units.dimensions] * data[ftype, 'density'])\n", (4878, 4936), True, 'import numpy as np\n'), ((3743, 3765), 'numpy.sqrt', 'np.sqrt', (['(r * r + z * z)'], {}), '(r * r + z * z)\n', (3750, 3765), True, 'import numpy as np\n'), ((4544, 4571), 'yt.fields.derived_field.ValidateParameter', 'ValidateParameter', (['"""normal"""'], {}), "('normal')\n", (4561, 4571), False, 'from yt.fields.derived_field import ValidateParameter\n'), ((4738, 4765), 'yt.fields.derived_field.ValidateParameter', 'ValidateParameter', (['"""normal"""'], {}), "('normal')\n", (4755, 4765), False, 'from yt.fields.derived_field import ValidateParameter\n')] |
import cv2
import numpy as np
from collections import defaultdict
def segment_by_angle_kmeans(lines, k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented | [
"cv2.kmeans",
"numpy.array",
"collections.defaultdict",
"numpy.cos",
"numpy.sin"
] | [((647, 687), 'numpy.array', 'np.array', (['[line[0][1] for line in lines]'], {}), '([line[0][1] for line in lines])\n', (655, 687), True, 'import numpy as np\n'), ((1102, 1119), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1113, 1119), False, 'from collections import defaultdict\n'), ((925, 976), 'cv2.kmeans', 'cv2.kmeans', (['pts', 'k', 'None', 'criteria', 'attempts', 'flags'], {}), '(pts, k, None, criteria, attempts, flags)\n', (935, 976), False, 'import cv2\n'), ((777, 794), 'numpy.cos', 'np.cos', (['(2 * angle)'], {}), '(2 * angle)\n', (783, 794), True, 'import numpy as np\n'), ((794, 811), 'numpy.sin', 'np.sin', (['(2 * angle)'], {}), '(2 * angle)\n', (800, 811), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Article, Language, Feed
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('published', 'title', 'valid')
list_display_links = ('published', 'title')
prepopulated_fields = {'slug': ('title',), }
list_select_related = ('language', 'thumb')
raw_id_fields = ('authors', 'things', 'people')
search_fields = ('title', 'description', 'url', 'text', 'html')
# @admin.register(Language)
# class LanguageAdmin(admin.ModelAdmin):
# list_display = ('code', 'name')
@admin.register(Feed)
class FeedAdmin(admin.ModelAdmin):
list_display = ('url', 'domain', 'valid')
| [
"django.contrib.admin.register"
] | [((175, 198), 'django.contrib.admin.register', 'admin.register', (['Article'], {}), '(Article)\n', (189, 198), False, 'from django.contrib import admin\n'), ((665, 685), 'django.contrib.admin.register', 'admin.register', (['Feed'], {}), '(Feed)\n', (679, 685), False, 'from django.contrib import admin\n')] |
import os
import argparse
def get_args():
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--input-dir',
type = str,
help = 'input directory with source data',
required = False
)
args_parser.add_argument(
'--input-file',
type = str,
help = 'input file with source data',
required = False
)
args_parser.add_argument(
'--output-dir',
type = str,
help = 'output directory with result data',
required = True
)
return args_parser.parse_args()
def convert_playlist(aimp_playlist_directory, aimp_playlist_filename):
def get_m3u8_playlist_lines():
def get_relative_audiofile_path(audiofile_path):
paths = {audiofile_path, args.output_dir}
common_path = os.path.commonpath(paths)
if common_path != '':
result = os.path.relpath(audiofile_path, start = common_path)
else:
result = audiofile_path
return result
result = []
result.append('#EXTM3U')
is_content = False
for (index, line) in enumerate(aimp_playlist_lines):
if line == '#-----CONTENT-----#':
is_content = True
continue
if not is_content:
continue
if line.startswith('#-----'):
is_content = False
continue
if line.startswith('-'):
continue
line_parts = line.split('|')
audiofile_path = get_relative_audiofile_path(line_parts[0])
result.append('#EXTINF:')
result.append(audiofile_path)
return result
def write_m3u8_playlist():
def get_m3u8_playlist_path():
aimp_playlist_filename_without_extension = os.path.splitext(aimp_playlist_filename)[0]
m3u8_playlist_filename = '{}.m3u8'.format(aimp_playlist_filename_without_extension)
return os.path.join(args.output_dir, m3u8_playlist_filename)
playlist_path = get_m3u8_playlist_path()
playlist_data = '\n'.join(m3u8_playlist_lines)
with open(playlist_path, "w", encoding = 'utf-8-sig') as m3u8file:
m3u8file.write(playlist_data)
file_path = os.path.join(aimp_playlist_directory, aimp_playlist_filename)
with open(file_path, encoding = 'utf_16_le') as handle:
aimp_playlist_lines = handle.read().splitlines()
m3u8_playlist_lines = get_m3u8_playlist_lines()
write_m3u8_playlist()
args = get_args()
if args.input_file != None:
dirname = os.path.dirname(args.input_file)
basename = os.path.basename(args.input_file)
convert_playlist(dirname, basename)
elif args.input_dir != None:
files = os.walk(args.input_dir)
for root, dirs, filenames in files:
for filename in filenames:
filename_extension = os.path.splitext(filename)[1]
if filename_extension == '.aimppl4':
convert_playlist(args.input_dir, filename) | [
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"os.path.dirname",
"os.path.basename",
"os.path.commonpath",
"os.walk",
"os.path.relpath"
] | [((62, 87), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (85, 87), False, 'import argparse\n'), ((2398, 2459), 'os.path.join', 'os.path.join', (['aimp_playlist_directory', 'aimp_playlist_filename'], {}), '(aimp_playlist_directory, aimp_playlist_filename)\n', (2410, 2459), False, 'import os\n'), ((2761, 2793), 'os.path.dirname', 'os.path.dirname', (['args.input_file'], {}), '(args.input_file)\n', (2776, 2793), False, 'import os\n'), ((2812, 2845), 'os.path.basename', 'os.path.basename', (['args.input_file'], {}), '(args.input_file)\n', (2828, 2845), False, 'import os\n'), ((2938, 2961), 'os.walk', 'os.walk', (['args.input_dir'], {}), '(args.input_dir)\n', (2945, 2961), False, 'import os\n'), ((869, 894), 'os.path.commonpath', 'os.path.commonpath', (['paths'], {}), '(paths)\n', (887, 894), False, 'import os\n'), ((2096, 2149), 'os.path.join', 'os.path.join', (['args.output_dir', 'm3u8_playlist_filename'], {}), '(args.output_dir, m3u8_playlist_filename)\n', (2108, 2149), False, 'import os\n'), ((955, 1005), 'os.path.relpath', 'os.path.relpath', (['audiofile_path'], {'start': 'common_path'}), '(audiofile_path, start=common_path)\n', (970, 1005), False, 'import os\n'), ((1915, 1955), 'os.path.splitext', 'os.path.splitext', (['aimp_playlist_filename'], {}), '(aimp_playlist_filename)\n', (1931, 1955), False, 'import os\n'), ((3089, 3115), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3105, 3115), False, 'import os\n')] |
import time
class FiboIter():
def __init__(self, max:int):
self.max = max
def __iter__(self):
self.num1 = 0
self.num2 = 1
self.counter = 0
return self
def __next__(self):
if self.counter == 0:
self.counter += 1
return self.num1
elif self.counter == 1:
self.counter += 1
return self.num2
else:
if self.counter <= self.max:
self.aux = self.num1 + self.num2
self.num1, self.num2 = self.num2, self.aux
self.counter += 1
return self.aux
else:
raise StopIteration
if __name__ == '__main__':
fibonacci = FiboIter
for element in fibonacci(50):
print(element)
time.sleep(0.5)
| [
"time.sleep"
] | [((812, 827), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (822, 827), False, 'import time\n')] |
import json
import unittest
from choria_external.exceptions import InvalidRPCData
from mco_agent.protocol import (
ExternalRPCRequest, ExternalActivationRequest,
ActivationReply, ActionReply)
class TestProtocolMessage(unittest.TestCase):
def setUp(self):
pass
def test_parse_valid_external_request(self):
request = ExternalRPCRequest.from_dict({
'$schema': 'https://choria.io/schemas/mcorpc/external/v1/rpc_request.json',
'protocol': 'io.choria.mcorpc.external.v1.rpc_request',
'agent': 'testagent',
'action': 'testaction',
'requestid': '123',
'senderid': '123',
'callerid': '123',
'collective': 'mcollective',
'ttl': 30,
'msgtime': 123,
'data': {},
})
self.assertIsInstance(request, ExternalRPCRequest)
self.assertEqual('testagent', request.agent)
self.assertEqual('testaction', request.action)
def test_parse_invalid_external_request(self):
with self.assertRaises(InvalidRPCData):
ExternalRPCRequest.from_dict({
'invalid_dict': True,
})
with self.assertRaises(InvalidRPCData):
ExternalRPCRequest.from_dict({
'protocol': 'io.choria.mcorpc.external.v1.rpc_request',
'agent': 'testagent',
'action': 'testaction',
'requestid': '123',
'senderid': '123',
'callerid': '123',
'collective': 'mcollective',
'ttl': 30,
'msgtime': 123,
'data': {},
'extra_field': True,
})
def test_parse_valid_activation_check(self):
request = ExternalActivationRequest.from_dict({
'$schema': 'https://choria.io/schemas/mcorpc/external/v1/activation_request.json',
'protocol': 'io.choria.mcorpc.external.v1.activation_request',
'agent': 'testagent',
})
self.assertIsInstance(request, ExternalActivationRequest)
self.assertEqual('testagent', request.agent)
def test_activation_reply_mark_failure(self):
reply = ActivationReply()
self.assertTrue(reply.successful())
self.assertTrue(reply.activate)
reply.fail()
self.assertFalse(reply.activate)
def test_activation_reply_serialisation(self):
reply = ActivationReply()
self.assertEqual('{"activate": true}', reply.to_json())
def test_activation_reply_inactive_serialisation(self):
reply = ActivationReply()
reply.activate = False
self.assertEqual('{"activate": false}', reply.to_json())
def test_action_reply_reply_serialisation(self):
reply = ActionReply()
result = json.loads(reply.to_json())
self.assertEqual(3, len(result.keys()))
self.assertIn('statuscode', result)
self.assertIn('statusmsg', result)
self.assertIn('data', result)
def test_action_reply_mark_failure(self):
reply = ActionReply()
self.assertEqual(0, reply.statuscode)
self.assertEqual('', reply.statusmsg)
self.assertTrue(reply.successful())
reply.fail(1, 'test')
self.assertEqual(1, reply.statuscode)
self.assertEqual('test', reply.statusmsg)
self.assertFalse(reply.successful())
| [
"mco_agent.protocol.ExternalRPCRequest.from_dict",
"mco_agent.protocol.ActivationReply",
"mco_agent.protocol.ActionReply",
"mco_agent.protocol.ExternalActivationRequest.from_dict"
] | [((352, 706), 'mco_agent.protocol.ExternalRPCRequest.from_dict', 'ExternalRPCRequest.from_dict', (["{'$schema': 'https://choria.io/schemas/mcorpc/external/v1/rpc_request.json',\n 'protocol': 'io.choria.mcorpc.external.v1.rpc_request', 'agent':\n 'testagent', 'action': 'testaction', 'requestid': '123', 'senderid':\n '123', 'callerid': '123', 'collective': 'mcollective', 'ttl': 30,\n 'msgtime': 123, 'data': {}}"], {}), "({'$schema':\n 'https://choria.io/schemas/mcorpc/external/v1/rpc_request.json',\n 'protocol': 'io.choria.mcorpc.external.v1.rpc_request', 'agent':\n 'testagent', 'action': 'testaction', 'requestid': '123', 'senderid':\n '123', 'callerid': '123', 'collective': 'mcollective', 'ttl': 30,\n 'msgtime': 123, 'data': {}})\n", (380, 706), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((1794, 2011), 'mco_agent.protocol.ExternalActivationRequest.from_dict', 'ExternalActivationRequest.from_dict', (["{'$schema':\n 'https://choria.io/schemas/mcorpc/external/v1/activation_request.json',\n 'protocol': 'io.choria.mcorpc.external.v1.activation_request', 'agent':\n 'testagent'}"], {}), "({'$schema':\n 'https://choria.io/schemas/mcorpc/external/v1/activation_request.json',\n 'protocol': 'io.choria.mcorpc.external.v1.activation_request', 'agent':\n 'testagent'})\n", (1829, 2011), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((2234, 2251), 'mco_agent.protocol.ActivationReply', 'ActivationReply', ([], {}), '()\n', (2249, 2251), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((2466, 2483), 'mco_agent.protocol.ActivationReply', 'ActivationReply', ([], {}), '()\n', (2481, 2483), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((2625, 2642), 'mco_agent.protocol.ActivationReply', 'ActivationReply', ([], {}), '()\n', (2640, 2642), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((2809, 2822), 'mco_agent.protocol.ActionReply', 'ActionReply', ([], {}), '()\n', (2820, 2822), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((3104, 3117), 'mco_agent.protocol.ActionReply', 'ActionReply', ([], {}), '()\n', (3115, 3117), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((1110, 1162), 'mco_agent.protocol.ExternalRPCRequest.from_dict', 'ExternalRPCRequest.from_dict', (["{'invalid_dict': True}"], {}), "({'invalid_dict': True})\n", (1138, 1162), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n'), ((1255, 1551), 'mco_agent.protocol.ExternalRPCRequest.from_dict', 'ExternalRPCRequest.from_dict', (["{'protocol': 'io.choria.mcorpc.external.v1.rpc_request', 'agent':\n 'testagent', 'action': 'testaction', 'requestid': '123', 'senderid':\n '123', 'callerid': '123', 'collective': 'mcollective', 'ttl': 30,\n 'msgtime': 123, 'data': {}, 'extra_field': True}"], {}), "({'protocol':\n 'io.choria.mcorpc.external.v1.rpc_request', 'agent': 'testagent',\n 'action': 'testaction', 'requestid': '123', 'senderid': '123',\n 'callerid': '123', 'collective': 'mcollective', 'ttl': 30, 'msgtime': \n 123, 'data': {}, 'extra_field': True})\n", (1283, 1551), False, 'from mco_agent.protocol import ExternalRPCRequest, ExternalActivationRequest, ActivationReply, ActionReply\n')] |
import sys
from rpython.rlib.objectmodel import we_are_translated, fetch_translated_config
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rarithmetic import r_longlong
from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.annlowlevel import llhelper, hlstr
from rpython.rtyper.annlowlevel import cast_gcref_to_instance
from rpython.rtyper.lltypesystem import lltype, rffi
CMD_PRINT = 1
CMD_BACKTRACE = 2
CMD_LOCALS = 3
CMD_BREAKPOINTS = 4
CMD_STACKID = 5
CMD_ATTACHID = 6
CMD_COMPILEWATCH= 7
CMD_CHECKWATCH = 8
CMD_WATCHVALUES = 9
ANSWER_LINECACHE= 19
ANSWER_TEXT = 20
ANSWER_STACKID = 21
ANSWER_NEXTNID = 22
ANSWER_WATCH = 23
ANSWER_CHBKPT = 24
def stop_point(place=0):
"""Indicates a point in the execution of the RPython program where
the reverse-debugger can stop. When reverse-debugging, we see
the "time" as the index of the stop-point that happened.
"""
if we_are_translated():
if fetch_translated_config().translation.reverse_debugger:
llop.revdb_stop_point(lltype.Void, place)
def register_debug_command(command, lambda_func):
"""Register the extra RPython-implemented debug command."""
def send_answer(cmd, arg1=0, arg2=0, arg3=0, extra=""):
"""For RPython debug commands: writes an answer block to stdout"""
llop.revdb_send_answer(lltype.Void, cmd, arg1, arg2, arg3, extra)
def send_output(text):
send_answer(ANSWER_TEXT, extra=text)
def send_print(text):
send_answer(ANSWER_TEXT, 1, extra=text) # adds a newline
def send_nextnid(unique_id):
send_answer(ANSWER_NEXTNID, unique_id)
def send_watch(text, ok_flag):
send_answer(ANSWER_WATCH, ok_flag, extra=text)
def send_linecache(filename, linenum, strip=True):
send_answer(ANSWER_LINECACHE, linenum, int(strip), extra=filename)
def send_change_breakpoint(breakpointnum, newtext=''):
send_answer(ANSWER_CHBKPT, breakpointnum, extra=newtext)
def current_time():
"""For RPython debug commands: returns the current time."""
return llop.revdb_get_value(lltype.SignedLongLong, 'c')
def current_break_time():
"""Returns the time configured for the next break. When going forward,
this is the target time at which we'll stop going forward."""
return llop.revdb_get_value(lltype.SignedLongLong, 'b')
def total_time():
"""For RPython debug commands: returns the total time (measured
as the total number of stop-points)."""
return llop.revdb_get_value(lltype.SignedLongLong, 't')
def currently_created_objects():
"""For RPython debug commands: returns the current value of
the object creation counter. All objects created so far have
a lower unique id; all objects created afterwards will have a
unique id greater or equal."""
return llop.revdb_get_value(lltype.SignedLongLong, 'u')
def current_place():
"""For RPython debug commands: the value of the 'place' argument
passed to stop_point().
"""
return llop.revdb_get_value(lltype.Signed, 'p')
def flag_io_disabled():
"""Returns True if we're in the debugger typing commands."""
if we_are_translated():
if fetch_translated_config().translation.reverse_debugger:
flag = llop.revdb_get_value(lltype.Signed, 'i')
return flag != ord('R') # FID_REGULAR_MODE
return False
## @specialize.arg(1)
## def go_forward(time_delta, callback):
## """For RPython debug commands: tells that after this function finishes,
## the debugger should run the 'forward <time_delta>' command and then
## invoke the 'callback' with no argument.
## """
## _change_time('f', time_delta, callback)
def breakpoint(num):
llop.revdb_breakpoint(lltype.Void, num)
def set_thread_breakpoint(tnum):
llop.revdb_set_thread_breakpoint(lltype.Void, tnum)
@specialize.argtype(0)
def get_unique_id(x):
"""Returns the creation number of the object 'x'. For objects created
by the program, it is globally unique, monotonic, and reproducible
among multiple processes. For objects created by a debug command,
this returns a (random) negative number. Right now, this returns 0
for all prebuilt objects.
"""
return llop.revdb_get_unique_id(lltype.SignedLongLong, x)
def track_object(unique_id, callback):
"""Track the creation of the object given by its unique_id, which must
be in the future (i.e. >= currently_created_objects()). Call this
before go_forward(). If go_forward() goes over the creation of this
object, then 'callback(gcref)' is called. Careful in callback(),
gcref is not fully initialized and should not be immediately read from,
only stored for later. The purpose of callback() is to possibly
call track_object() again to track the next object, and/or to call
breakpoint(). Note: object tracking remains activated until one of:
(1) we reach the creation time in go_forward(); (2) we call
track_object() to track a different object; (3) we call jump_in_time().
"""
ll_callback = llhelper(_CALLBACK_GCREF_FNPTR, callback)
llop.revdb_track_object(lltype.Void, unique_id, ll_callback)
def watch_save_state(force=False):
return llop.revdb_watch_save_state(lltype.Bool, force)
def watch_restore_state(any_watch_point):
llop.revdb_watch_restore_state(lltype.Void, any_watch_point)
def split_breakpoints_arg(breakpoints):
# RPython generator to help in splitting the string arg in CMD_BREAKPOINTS
n = 0
i = 0
while i < len(breakpoints):
kind = breakpoints[i]
i += 1
if kind != '\x00':
length = (ord(breakpoints[i]) |
(ord(breakpoints[i + 1]) << 8) |
(ord(breakpoints[i + 2]) << 16))
assert length >= 0
i += 3
yield n, kind, breakpoints[i : i + length]
i += length
n += 1
assert i == len(breakpoints)
# ____________________________________________________________
## @specialize.arg(2)
## def _change_time(mode, time, callback):
## ll_callback = llhelper(_CALLBACK_NOARG_FNPTR, callback)
## llop.revdb_change_time(lltype.Void, mode, time, ll_callback)
## _CALLBACK_NOARG_FNPTR = lltype.Ptr(lltype.FuncType([], lltype.Void))
_CALLBACK_GCREF_FNPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF],
lltype.Void))
_CMDPTR = rffi.CStructPtr('rpy_revdb_command_s',
('cmd', rffi.INT),
('arg1', lltype.SignedLongLong),
('arg2', lltype.SignedLongLong),
('arg3', lltype.SignedLongLong),
hints={'ignore_revdb': True})
class RegisterDebugCommand(ExtRegistryEntry):
_about_ = register_debug_command
def compute_result_annotation(self, s_command_num, s_lambda_func):
command_num = s_command_num.const
lambda_func = s_lambda_func.const
assert isinstance(command_num, (int, str))
t = self.bookkeeper.annotator.translator
if t.config.translation.reverse_debugger:
func = lambda_func()
try:
cmds = t.revdb_commands
except AttributeError:
cmds = t.revdb_commands = {}
old_func = cmds.setdefault(command_num, func)
assert old_func is func
s_func = self.bookkeeper.immutablevalue(func)
arg_getter = getattr(self, 'arguments_' + str(command_num),
self.default_arguments)
self.bookkeeper.emulate_pbc_call(self.bookkeeper.position_key,
s_func, arg_getter())
def default_arguments(self):
from rpython.annotator import model as annmodel
from rpython.rtyper import llannotation
return [llannotation.SomePtr(ll_ptrtype=_CMDPTR),
annmodel.SomeString()]
def arguments_ALLOCATING(self):
from rpython.annotator import model as annmodel
from rpython.rtyper import llannotation
return [annmodel.SomeInteger(knowntype=r_longlong),
llannotation.lltype_to_annotation(llmemory.GCREF)]
def arguments_WATCHING(self):
raise Exception("XXX remove me")
def specialize_call(self, hop):
hop.exception_cannot_occur()
# ____________________________________________________________
# Emulation for strtod() and dtoa() when running debugger commands
# (we can't easily just call C code there). The emulation can return
# a crude result. Hack hack hack.
_INVALID_STRTOD = -3.46739514239368e+113
def emulate_strtod(input):
d = llop.revdb_strtod(lltype.Float, input)
if d == _INVALID_STRTOD:
raise ValueError
return d
def emulate_dtoa(value):
s = llop.revdb_dtoa(lltype.Ptr(rstr.STR), value)
s = hlstr(s)
assert s is not None
return s
def emulate_modf(x):
return (llop.revdb_modf(lltype.Float, x, 0),
llop.revdb_modf(lltype.Float, x, 1))
def emulate_frexp(x):
return (llop.revdb_frexp(lltype.Float, x, 0),
int(llop.revdb_frexp(lltype.Float, x, 1)))
| [
"rpython.rlib.objectmodel.we_are_translated",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_breakpoint",
"rpython.rtyper.llannotation.SomePtr",
"rpython.rlib.objectmodel.fetch_translated_config",
"rpython.rlib.objectmodel.specialize.argtype",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_modf",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value",
"rpython.rtyper.lltypesystem.rffi.CStructPtr",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_set_thread_breakpoint",
"rpython.rtyper.lltypesystem.lltype.FuncType",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_stop_point",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_watch_restore_state",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_strtod",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_track_object",
"rpython.rtyper.annlowlevel.llhelper",
"rpython.rtyper.lltypesystem.lltype.Ptr",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_unique_id",
"rpython.rtyper.annlowlevel.hlstr",
"rpython.rtyper.llannotation.lltype_to_annotation",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_send_answer",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_watch_save_state",
"rpython.annotator.model.SomeInteger",
"rpython.rtyper.lltypesystem.lloperation.llop.revdb_frexp",
"rpython.annotator.model.SomeString"
] | [((3952, 3973), 'rpython.rlib.objectmodel.specialize.argtype', 'specialize.argtype', (['(0)'], {}), '(0)\n', (3970, 3973), False, 'from rpython.rlib.objectmodel import specialize\n'), ((6534, 6730), 'rpython.rtyper.lltypesystem.rffi.CStructPtr', 'rffi.CStructPtr', (['"""rpy_revdb_command_s"""', "('cmd', rffi.INT)", "('arg1', lltype.SignedLongLong)", "('arg2', lltype.SignedLongLong)", "('arg3', lltype.SignedLongLong)"], {'hints': "{'ignore_revdb': True}"}), "('rpy_revdb_command_s', ('cmd', rffi.INT), ('arg1', lltype.\n SignedLongLong), ('arg2', lltype.SignedLongLong), ('arg3', lltype.\n SignedLongLong), hints={'ignore_revdb': True})\n", (6549, 6730), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((1082, 1101), 'rpython.rlib.objectmodel.we_are_translated', 'we_are_translated', ([], {}), '()\n', (1099, 1101), False, 'from rpython.rlib.objectmodel import we_are_translated, fetch_translated_config\n'), ((1471, 1536), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_send_answer', 'llop.revdb_send_answer', (['lltype.Void', 'cmd', 'arg1', 'arg2', 'arg3', 'extra'], {}), '(lltype.Void, cmd, arg1, arg2, arg3, extra)\n', (1493, 1536), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((2180, 2228), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.SignedLongLong', '"""c"""'], {}), "(lltype.SignedLongLong, 'c')\n", (2200, 2228), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((2409, 2457), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.SignedLongLong', '"""b"""'], {}), "(lltype.SignedLongLong, 'b')\n", (2429, 2457), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((2600, 2648), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.SignedLongLong', '"""t"""'], {}), "(lltype.SignedLongLong, 't')\n", (2620, 2648), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((2925, 2973), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.SignedLongLong', '"""u"""'], {}), "(lltype.SignedLongLong, 'u')\n", (2945, 2973), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((3112, 3152), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.Signed', '"""p"""'], {}), "(lltype.Signed, 'p')\n", (3132, 3152), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((3250, 3269), 'rpython.rlib.objectmodel.we_are_translated', 'we_are_translated', ([], {}), '()\n', (3267, 3269), False, 'from rpython.rlib.objectmodel import we_are_translated, fetch_translated_config\n'), ((3820, 3859), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_breakpoint', 'llop.revdb_breakpoint', (['lltype.Void', 'num'], {}), '(lltype.Void, num)\n', (3841, 3859), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((3898, 3949), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_set_thread_breakpoint', 'llop.revdb_set_thread_breakpoint', (['lltype.Void', 'tnum'], {}), '(lltype.Void, tnum)\n', (3930, 3949), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((4334, 4384), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_unique_id', 'llop.revdb_get_unique_id', (['lltype.SignedLongLong', 'x'], {}), '(lltype.SignedLongLong, x)\n', (4358, 4384), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((5169, 5210), 'rpython.rtyper.annlowlevel.llhelper', 'llhelper', (['_CALLBACK_GCREF_FNPTR', 'callback'], {}), '(_CALLBACK_GCREF_FNPTR, callback)\n', (5177, 5210), False, 'from rpython.rtyper.annlowlevel import llhelper, hlstr\n'), ((5215, 5275), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_track_object', 'llop.revdb_track_object', (['lltype.Void', 'unique_id', 'll_callback'], {}), '(lltype.Void, unique_id, ll_callback)\n', (5238, 5275), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((5323, 5370), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_watch_save_state', 'llop.revdb_watch_save_state', (['lltype.Bool', 'force'], {}), '(lltype.Bool, force)\n', (5350, 5370), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((5418, 5478), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_watch_restore_state', 'llop.revdb_watch_restore_state', (['lltype.Void', 'any_watch_point'], {}), '(lltype.Void, any_watch_point)\n', (5448, 5478), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((6425, 6471), 'rpython.rtyper.lltypesystem.lltype.FuncType', 'lltype.FuncType', (['[llmemory.GCREF]', 'lltype.Void'], {}), '([llmemory.GCREF], lltype.Void)\n', (6440, 6471), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((8804, 8842), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_strtod', 'llop.revdb_strtod', (['lltype.Float', 'input'], {}), '(lltype.Float, input)\n', (8821, 8842), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((8997, 9005), 'rpython.rtyper.annlowlevel.hlstr', 'hlstr', (['s'], {}), '(s)\n', (9002, 9005), False, 'from rpython.rtyper.annlowlevel import llhelper, hlstr\n'), ((8960, 8980), 'rpython.rtyper.lltypesystem.lltype.Ptr', 'lltype.Ptr', (['rstr.STR'], {}), '(rstr.STR)\n', (8970, 8980), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((9078, 9113), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_modf', 'llop.revdb_modf', (['lltype.Float', 'x', '(0)'], {}), '(lltype.Float, x, 0)\n', (9093, 9113), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((9127, 9162), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_modf', 'llop.revdb_modf', (['lltype.Float', 'x', '(1)'], {}), '(lltype.Float, x, 1)\n', (9142, 9162), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((9199, 9235), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_frexp', 'llop.revdb_frexp', (['lltype.Float', 'x', '(0)'], {}), '(lltype.Float, x, 0)\n', (9215, 9235), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((1182, 1223), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_stop_point', 'llop.revdb_stop_point', (['lltype.Void', 'place'], {}), '(lltype.Void, place)\n', (1203, 1223), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((3357, 3397), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_get_value', 'llop.revdb_get_value', (['lltype.Signed', '"""i"""'], {}), "(lltype.Signed, 'i')\n", (3377, 3397), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((7989, 8029), 'rpython.rtyper.llannotation.SomePtr', 'llannotation.SomePtr', ([], {'ll_ptrtype': '_CMDPTR'}), '(ll_ptrtype=_CMDPTR)\n', (8009, 8029), False, 'from rpython.rtyper import llannotation\n'), ((8047, 8068), 'rpython.annotator.model.SomeString', 'annmodel.SomeString', ([], {}), '()\n', (8066, 8068), True, 'from rpython.annotator import model as annmodel\n'), ((8227, 8269), 'rpython.annotator.model.SomeInteger', 'annmodel.SomeInteger', ([], {'knowntype': 'r_longlong'}), '(knowntype=r_longlong)\n', (8247, 8269), True, 'from rpython.annotator import model as annmodel\n'), ((8287, 8336), 'rpython.rtyper.llannotation.lltype_to_annotation', 'llannotation.lltype_to_annotation', (['llmemory.GCREF'], {}), '(llmemory.GCREF)\n', (8320, 8336), False, 'from rpython.rtyper import llannotation\n'), ((9253, 9289), 'rpython.rtyper.lltypesystem.lloperation.llop.revdb_frexp', 'llop.revdb_frexp', (['lltype.Float', 'x', '(1)'], {}), '(lltype.Float, x, 1)\n', (9269, 9289), False, 'from rpython.rtyper.lltypesystem.lloperation import llop\n'), ((1114, 1139), 'rpython.rlib.objectmodel.fetch_translated_config', 'fetch_translated_config', ([], {}), '()\n', (1137, 1139), False, 'from rpython.rlib.objectmodel import we_are_translated, fetch_translated_config\n'), ((3282, 3307), 'rpython.rlib.objectmodel.fetch_translated_config', 'fetch_translated_config', ([], {}), '()\n', (3305, 3307), False, 'from rpython.rlib.objectmodel import we_are_translated, fetch_translated_config\n')] |
from balutils.CatalogFeature import SimpleCatalog
import numpy as np
class Detection(SimpleCatalog):
"""
Adds detection catalog functionality to the catalog.
"""
def applyTo(self, catalog: Catalog) -> None:
self.parent.applyTo(catalog)
'''
Balrog stack versions 1.4 and below have a small bug that
seems to duplicate exactly 1 object, so check for these
'''
unq, unq_idx, unq_cnt = np.unique(
catalog._cat['bal_id'],
return_inverse=True,
return_counts=True
)
Nunq = len(unq)
if Nunq != catalog.Nobjs:
Ndups = catalog.Nobjs - Nunq
dup_ids = unq[np.where(unq_cnt > 1)]
print('Warning: Detection catalog has {} duplicate(s)!'.format(Ndups))
print('Removing the following duplicates from detection catalog:')
print(dup_ids)
Nbefore = catalog.Nobjs
for did in dup_ids:
indx = np.where(catalog._cat['bal_id']==did)[0]
L = len(indx)
for i in range(L-1): # keep last one
catalog._cat.remove_row(indx[i])
catalog.Nobjs = len(catalog._cat)
assert catalog.Nobjs == (Nbefore - Ndups)
print('{} duplicates removed, catalog size now {}'.format(Ndups, catalog.Nobjs))
return
| [
"numpy.where",
"numpy.unique"
] | [((457, 531), 'numpy.unique', 'np.unique', (["catalog._cat['bal_id']"], {'return_inverse': '(True)', 'return_counts': '(True)'}), "(catalog._cat['bal_id'], return_inverse=True, return_counts=True)\n", (466, 531), True, 'import numpy as np\n'), ((705, 726), 'numpy.where', 'np.where', (['(unq_cnt > 1)'], {}), '(unq_cnt > 1)\n', (713, 726), True, 'import numpy as np\n'), ((1009, 1048), 'numpy.where', 'np.where', (["(catalog._cat['bal_id'] == did)"], {}), "(catalog._cat['bal_id'] == did)\n", (1017, 1048), True, 'import numpy as np\n')] |
# Generated by Django 2.0.6 on 2018-07-24 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0055_auto_20180724_1042'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='logo_file',
field=models.ImageField(blank=True, help_text='Upload an image file that represents your organization', null=True, upload_to='orglogos/', verbose_name='Organization logo'),
),
]
| [
"django.db.models.ImageField"
] | [((349, 523), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'help_text': '"""Upload an image file that represents your organization"""', 'null': '(True)', 'upload_to': '"""orglogos/"""', 'verbose_name': '"""Organization logo"""'}), "(blank=True, help_text=\n 'Upload an image file that represents your organization', null=True,\n upload_to='orglogos/', verbose_name='Organization logo')\n", (366, 523), False, 'from django.db import migrations, models\n')] |
import torch
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
import json
import codecs
import pandas as pd
import csv
if __name__ == '__main__':
for index in range(32,33):
print(index)
json_file = '/datanew/hwb/data/Football/SoftWare/{}/{}.json'.format(index,index)
with codecs.open(json_file, 'r', 'utf-8-sig') as f:
action_datas = json.load(f)
data = action_datas['data']
Team = {}
for item in data:
team = item['team']
name = item['name']
num = item['num']
# name = team
if team not in Team:
Team[team] = {}
if num not in Team[team]:
Team[team][num] = name
else:
continue
with open('/datanew/hwb/data/Football/SoftWare/四官报告/{}_siguan.csv'.format(index),'w',encoding='utf-8-sig') as f:
writer = csv.writer(f)
for index, team in enumerate(Team.keys()):
writer.writerow([team])
for index, team in enumerate(Team.keys()):
for [num,name] in Team[team].items():
writer.writerow([index+1,num,name])
# f.writelines('{}\t{}\t{}\n'.format(index+1,num, name))
print(index+1 ,num,name)
print() | [
"json.load",
"codecs.open",
"csv.writer"
] | [((305, 345), 'codecs.open', 'codecs.open', (['json_file', '"""r"""', '"""utf-8-sig"""'], {}), "(json_file, 'r', 'utf-8-sig')\n", (316, 345), False, 'import codecs\n'), ((379, 391), 'json.load', 'json.load', (['f'], {}), '(f)\n', (388, 391), False, 'import json\n'), ((922, 935), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (932, 935), False, 'import csv\n')] |
# coding: utf-8
'''
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'mymedia.exceptions.exception_handler'
}
'''
from rest_framework.views import exception_handler as default_handler
from rest_framework.response import Response
from rest_framework import status
from .encoders import BaseObjectEncoder
import traceback
def exception_handler(exc, context):
response = default_handler(exc, context)
setattr(exc, 'exc', exc.__class__.__name__)
content = BaseObjectEncoder.to_json(exc)
return Response(content, status=status.HTTP_400_BAD_REQUEST)
| [
"rest_framework.response.Response",
"rest_framework.views.exception_handler"
] | [((369, 398), 'rest_framework.views.exception_handler', 'default_handler', (['exc', 'context'], {}), '(exc, context)\n', (384, 398), True, 'from rest_framework.views import exception_handler as default_handler\n'), ((503, 556), 'rest_framework.response.Response', 'Response', (['content'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(content, status=status.HTTP_400_BAD_REQUEST)\n', (511, 556), False, 'from rest_framework.response import Response\n')] |
import unittest
import numpy as np
from topoml_util import geom_scaler as gs
# noinspection PyUnresolvedReferences
dummy_geom = np.zeros((1, 1, 5))
square = np.array([[
[0., 0., 1., 0., 0.],
[1., 0., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 0., 0., 0., 1.],
]])
square_duplicate_nodes = np.array([[
[0., 0., 1., 0., 0.],
[1., 0., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 0., 0., 0., 1.],
]])
rectangle = np.array([[
[0., 0., 1., 0., 0.],
[1., 0., 1., 0., 0.],
[1., 2., 1., 0., 0.],
[0., 2., 1., 0., 0.],
[0., 0., 0., 0., 1.],
]])
normalized_square = np.array([[
[-1., -1., 1., 0., 0.],
[ 1., -1., 1., 0., 0.],
[ 1., 1., 1., 0., 0.],
[-1., 1., 1., 0., 0.],
[-1., -1., 0., 0., 1.],
]])
class TestGeomScaler(unittest.TestCase):
def test_localized_mean(self):
means = gs.localized_mean(square)
for mean in means[0]:
self.assertTrue((mean == 0.5).all())
def test_localized_mean_rectangle(self):
means = gs.localized_mean(rectangle)
self.assertEqual(means[0, 0, 0], 0.5)
self.assertEqual(means[0, 0, 1], 1)
def test_localized_mean_dup_nodes(self):
means = gs.localized_mean(square_duplicate_nodes)
self.assertTrue((means == 0.75).all())
def test_scaling_square(self):
scale = gs.scale(square)
self.assertEqual(scale, 0.5)
def test_scaling_square_dup_nodes(self):
scale = gs.scale(square_duplicate_nodes)
self.assertEqual(scale, 0.5)
def test_transform(self):
# scaled_square = square[0] * 2
# scaled_square[4, 12] = 1.
scale = gs.scale(square)
n_square = gs.transform(square, scale=scale)
self.assertTrue((n_square == normalized_square).all())
coords = [geom[:, :2].flatten() for geom in n_square]
coords = [item for sublist in coords for item in sublist]
std = np.std(coords)
self.assertAlmostEqual(std, 1., 1)
def test_upsized_transform(self):
square_0 = square[0] * 2
square_0[:4, 2] = 1.
square_0[4, 4] = 1.
scale = gs.scale([square_0])
n_square = gs.transform([square_0], scale=scale)
self.assertTrue((n_square == normalized_square).all())
coords = [geom[:, :2].flatten() for geom in n_square]
coords = [item for sublist in coords for item in sublist]
std = np.std(coords)
self.assertAlmostEqual(std, 1., 1)
| [
"topoml_util.geom_scaler.scale",
"numpy.array",
"numpy.zeros",
"topoml_util.geom_scaler.localized_mean",
"numpy.std",
"topoml_util.geom_scaler.transform"
] | [((131, 150), 'numpy.zeros', 'np.zeros', (['(1, 1, 5)'], {}), '((1, 1, 5))\n', (139, 150), True, 'import numpy as np\n'), ((161, 312), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0,\n 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 1.0,\n 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]]])\n', (169, 312), True, 'import numpy as np\n'), ((333, 602), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, \n 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 1.0,\n 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]]])\n', (341, 602), True, 'import numpy as np\n'), ((596, 747), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 2.0, 1.0, 0.0,\n 0.0], [0.0, 2.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [1.0, 2.0,\n 1.0, 0.0, 0.0], [0.0, 2.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0]]])\n', (604, 747), True, 'import numpy as np\n'), ((763, 925), 'numpy.array', 'np.array', (['[[[-1.0, -1.0, 1.0, 0.0, 0.0], [1.0, -1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, \n 0.0, 0.0], [-1.0, 1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 0.0, 0.0, 1.0]]]'], {}), '([[[-1.0, -1.0, 1.0, 0.0, 0.0], [1.0, -1.0, 1.0, 0.0, 0.0], [1.0, \n 1.0, 1.0, 0.0, 0.0], [-1.0, 1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 0.0, 0.0,\n 1.0]]])\n', (771, 925), True, 'import numpy as np\n'), ((1013, 1038), 'topoml_util.geom_scaler.localized_mean', 'gs.localized_mean', (['square'], {}), '(square)\n', (1030, 1038), True, 'from topoml_util import geom_scaler as gs\n'), ((1180, 1208), 'topoml_util.geom_scaler.localized_mean', 'gs.localized_mean', (['rectangle'], {}), '(rectangle)\n', (1197, 1208), True, 'from topoml_util import geom_scaler as gs\n'), ((1361, 1402), 'topoml_util.geom_scaler.localized_mean', 'gs.localized_mean', (['square_duplicate_nodes'], {}), '(square_duplicate_nodes)\n', (1378, 1402), True, 'from topoml_util import geom_scaler as gs\n'), ((1502, 1518), 'topoml_util.geom_scaler.scale', 'gs.scale', (['square'], {}), '(square)\n', (1510, 1518), True, 'from topoml_util import geom_scaler as gs\n'), ((1618, 1650), 'topoml_util.geom_scaler.scale', 'gs.scale', (['square_duplicate_nodes'], {}), '(square_duplicate_nodes)\n', (1626, 1650), True, 'from topoml_util import geom_scaler as gs\n'), ((1811, 1827), 'topoml_util.geom_scaler.scale', 'gs.scale', (['square'], {}), '(square)\n', (1819, 1827), True, 'from topoml_util import geom_scaler as gs\n'), ((1847, 1880), 'topoml_util.geom_scaler.transform', 'gs.transform', (['square'], {'scale': 'scale'}), '(square, scale=scale)\n', (1859, 1880), True, 'from topoml_util import geom_scaler as gs\n'), ((2086, 2100), 'numpy.std', 'np.std', (['coords'], {}), '(coords)\n', (2092, 2100), True, 'import numpy as np\n'), ((2289, 2309), 'topoml_util.geom_scaler.scale', 'gs.scale', (['[square_0]'], {}), '([square_0])\n', (2297, 2309), True, 'from topoml_util import geom_scaler as gs\n'), ((2329, 2366), 'topoml_util.geom_scaler.transform', 'gs.transform', (['[square_0]'], {'scale': 'scale'}), '([square_0], scale=scale)\n', (2341, 2366), True, 'from topoml_util import geom_scaler as gs\n'), ((2572, 2586), 'numpy.std', 'np.std', (['coords'], {}), '(coords)\n', (2578, 2586), True, 'import numpy as np\n')] |
import pickle
import warnings
import numpy as np
import pandas as pd
from copy import deepcopy as copy
from treelib import Tree
from functools import reduce
class SuperTree(Tree):
# tested
def get_children_ids(self, nid):
return self.is_branch(nid)
def get_bfs_nodes(self, ):
"""
get BFS (breadth first search) node ids
:return: ids
"""
# tested
nodes = {}
for i in range(self.depth() + 1):
nodes[i] = []
for node in self.expand_tree(mode=2):
if self.level(node) == i:
nodes[i].append(node)
return nodes
def get_bfs_data(self, ):
"""
get BFS (breadth first search) nodes data, in {layer_number: [values]} format
:return: data: {layer_number: [values]}
"""
# tested
nodes = self.get_bfs_nodes()
return {i: list(map(lambda x: self[x].data, nodes[i])) for i in range(self.depth() + 1)}
def get_dfs_nodes(self, ):
"""
get DFS (depth first search) node ids
:return: ids
"""
# tested
return self.get_paths_to_leaves()
def get_paths_to_leaves(self, ):
"""
:return:
"""
res = []
for leaf in self.leaves():
res.append([nid for nid in self.rsearch(leaf.identifier)][::-1])
return res
def get_dfs_data(self, dfs_nodes=None):
"""
get DFS (depth first search) nodes data, in [[val1, val2, ...], [val1, ...]] format
the order of dfs node ids is preserved.
:return: data: [[val1, val2, ...], [val1, ...]]
"""
# tested
if not dfs_nodes:
dfs_nodes = self.get_dfs_nodes() # list
return [list(map(lambda x: self[x].data, path)) for path in dfs_nodes]
def init_nodes_data(self, value=0):
"""
initiate node values, you can specify the init_value of all nodes with :param value
:return: None
"""
# tested
for id in self.expand_tree(mode=1):
self[id].data = value
def from_paths(self, paths):
"""
construct a super_tree from node id paths
:param paths: node id paths, eg [['country','china'],['country','usa','texas']]
:return: None
"""
# tested
# check duplicated son-fathur relationship
for path in paths:
current_node = self.root
for nid in path:
children_ids = self.get_children_ids(current_node)
if nid not in children_ids:
self.create_node(identifier=nid, parent=current_node)
current_node = nid
'''
def from_child_father(self, ):
return None
'''
def from_pickle(self, file: str):
"""
restore a super_tree from a pickle file
:param file: path to pickle file
:return: a super_tree obj.
"""
# tested
with open(file, 'rb') as f:
stree = pickle.load(f)
return stree
def get_path_to_node(self, node_id: str):
"""
get the path of any node in the tree
:param node_id: node id
:return: path
"""
# tested
path = [nid for nid in self.rsearch(node_id)]
path.reverse()
return path
def fill_with(self, data: dict):
"""
update values for nodes on the tree
fix multiple update issues. 2020/3/23
:param data: data, eg.{id: value}
:return: None
"""
# tested
for nid, val in data.items():
self[nid].data = val
def to_pickle(self, file: str):
"""
backup super_tree to a pickle file
:param file: the path of pickle file
:return: None
"""
# tested
with open(file, 'wb') as f:
pickle.dump(self, f)
def to_matrix_npy(self, file: str, dtype=np.float32):
"""
save matrix to npy file
:param file: the path of npy file
:param dtype: data type
:return: None
"""
# tested
matrix = self.get_matrix(dtype=dtype)
np.save(file, matrix)
def copy(self, using_pickle=True):
"""
get a deep copy of super_tree
:return: new copy of super_tree
"""
if not using_pickle:
return deepcopy(self)
else:
return pickle.loads(pickle.dumps(self))
# return super_tree(self.subtree(self.root), deep=True)
def remove_levels(self, level: int):
"""
remove levels that are greater that :param level
:param level: integer
:return: None
"""
# tested
nids = list(self.expand_tree(mode=1))[::-1]
for nid in nids:
if self.level(nid) >= level:
self.remove_node(nid) # check
def save_paths_to_csv(self, file: str, fill_na=True):
"""
save id paths of super_tree to a csv file
:param file: csv file path
:param fill_na: if True, fill na with ''
:return: None
"""
# tested
paths = self.get_paths_to_leaves()
df = pd.DataFrame(paths)
if fill_na:
df.fillna('')
df.to_csv(file, sep=',')
def get_top_down_ids(self, max_level=-1):
"""
get node ids in top-down order, tested.
:return: [id1, id2, ...]
"""
if max_level == -1:
return [nid for nid in self.expand_tree(mode=self.WIDTH)]
else:
return [nid for nid in self.expand_tree(mode=self.WIDTH) if self.level(nid) <= max_level]
def get_bottom_up_ids(self, ):
"""
get node ids in bottom-up order, tested.
:return: [id1, id2, ...]
"""
top_down_ids = self.get_top_down_ids()
ids = copy(top_down_ids)
ids.reverse()
return ids
def get_ids_by_level(self, ):
"""
get node ids grouped by level, tested.
:return: {1: [id1, ...], 2: [idn, ...], ...}
"""
levels = range(self.depth() + 1)
ids = {level: [] for level in levels}
for nid in self.expand_tree(mode=self.WIDTH):
ids[self.level(nid)].append(nid)
return ids
def get_paths_to_level(self, level=-1, ids_by_level=None, include_inner_leaves=True):
"""
get paths to specific level of the tree, you can specify the ids_by_level :param to \
speed up this calculation, tested
:param include_inner_leaves: paths to inner leaves are included with this set to be True
:param ids_by_level: ids_by_level dict calculated by `get_ids_by_level` method
:param level: level
:return: [path1, path2, path3, ...], each path: [id1, id2, id3, ...]
"""
if not ids_by_level:
msg = '''Calculating ids_by_level could be time-consuming, \
you can specify the `ids_by_level` parameter to speed up.
e.g. `ids=tree.get_ids_by_level(); tree.get_paths_to_level(level=1; ids_by_level=ids)`.'''.replace('\t', '')
warnings.warn(msg, SyntaxWarning)
ids_by_level = self.get_ids_by_level()
if level == -1:
return self.get_paths_to_leaves()
elif include_inner_leaves:
print('include_inner_leaves: yes')
inner_leaves = [node.identifier for node in self.leaves() if self.level(node.identifier) < level]
ids_use = ids_by_level[level] + inner_leaves
else:
print('include_inner_leaves: no')
ids_use = ids_by_level[level]
return [[nid_ for nid_ in self.rsearch(nid)][::-1] for nid in ids_use]
def get_matrix(self, paths=None, ncol=None, dtype=np.float32):
"""
get matrix (like dfs data format but is a numpy n-dimensional array), tested
:param ncol: number of columns of matrix
:param paths: paths you want to generate matrix, will calculate itself (time-consuming) if absent \
:param dtype: data type
:return: matrix
"""
# tested
if not paths:
paths = self.get_paths_to_leaves()
if not ncol:
ncol = max([len(i) for i in paths])
nrow = len(paths)
Matrix = np.zeros(ncol * nrow, dtype=dtype).reshape(nrow, ncol)
for row, path in enumerate(paths):
for col, nid in enumerate(path):
Matrix[row, col] = self[nid].data
return Matrix
def update_values(self, bottom_up_ids=None):
"""
starting from the leaf node to the root node, update the value of each node in the tree
new_value = old_value + sum([child.data for child in children])
:return: None
"""
# tested
if not bottom_up_ids:
msg = '''Calculating the bottom up ids could be very time-consuming, \
you can specify the `bottom_up_ids` parameter to speed up.
e.g. `ids=tree.get_bottom_up_ids; tree.update_value(bottom_up_ids=ids)`.'''.replace('\t', '')
warnings.warn(msg, SyntaxWarning)
bottom_up_ids = self.get_bottom_up_ids()
for nid in bottom_up_ids:
d = sum([node.data for node in self.children(nid)])
self[nid].data = self[nid].data + d
"""
get_bottom_up_ids() tested
get_top_down_ids() tested
update_values() tested
get_matrix() tested
get_ids_by_level() tested
get_paths_to_level() tested
"""
# paths_to_leaves replacement
| [
"pickle.dump",
"pickle.dumps",
"pickle.load",
"numpy.zeros",
"copy.deepcopy",
"pandas.DataFrame",
"warnings.warn",
"numpy.save"
] | [((3588, 3609), 'numpy.save', 'np.save', (['file', 'matrix'], {}), '(file, matrix)\n', (3595, 3609), True, 'import numpy as np\n'), ((4457, 4476), 'pandas.DataFrame', 'pd.DataFrame', (['paths'], {}), '(paths)\n', (4469, 4476), True, 'import pandas as pd\n'), ((5029, 5047), 'copy.deepcopy', 'copy', (['top_down_ids'], {}), '(top_down_ids)\n', (5033, 5047), True, 'from copy import deepcopy as copy\n'), ((2615, 2629), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2626, 2629), False, 'import pickle\n'), ((3331, 3351), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (3342, 3351), False, 'import pickle\n'), ((6162, 6195), 'warnings.warn', 'warnings.warn', (['msg', 'SyntaxWarning'], {}), '(msg, SyntaxWarning)\n', (6175, 6195), False, 'import warnings\n'), ((7886, 7919), 'warnings.warn', 'warnings.warn', (['msg', 'SyntaxWarning'], {}), '(msg, SyntaxWarning)\n', (7899, 7919), False, 'import warnings\n'), ((3814, 3832), 'pickle.dumps', 'pickle.dumps', (['self'], {}), '(self)\n', (3826, 3832), False, 'import pickle\n'), ((7182, 7216), 'numpy.zeros', 'np.zeros', (['(ncol * nrow)'], {'dtype': 'dtype'}), '(ncol * nrow, dtype=dtype)\n', (7190, 7216), True, 'import numpy as np\n')] |
import discord
from discord.ext import commands
class Voice_Text_Link(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if ctx.channel.type == discord.ChannelType.private:
return True
if await self.bot.is_owner(ctx.author):
return True
enabled = await self.bot.pg_conn.fetchval("""
SELECT enabled FROM cogs_data
WHERE guild_id = $1
""", ctx.guild.id)
if f"Bot.cogs.{self.qualified_name}" in enabled:
return True
return False
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState):
join_overwrites = {
member.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False),
member: discord.PermissionOverwrite(read_messages=True, send_messages=True, read_message_history=True)
}
leave_overwrites = {
member.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False),
member: discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False)
}
enabled = await self.bot.pg_conn.fetchval("""
SELECT enabled FROM cogs_data
WHERE guild_id = $1
""", member.guild.id)
if f"Bot.cogs.{self.qualified_name}" in enabled:
if after.channel and (not before.channel):
after_text_channel_id, history_for_text = await self.bot.pg_conn.fetchval("""
SELECT (text_channel_id, history_for_text) FROM voice_text_data
WHERE guild_id = $1 AND voice_channel_id = $2
""", member.guild.id, after.channel.id)
channel: discord.TextChannel = discord.utils.get(member.guild.text_channels, id=after_text_channel_id)
if channel:
join_overwrites = {
member.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False, read_message_history=False),
member: discord.PermissionOverwrite(read_messages=True, send_messages=True, read_message_history=True if history_for_text else False)
}
await channel.edit(overwrites=join_overwrites)
elif (not after.channel) and before.channel:
before_text_channel_id = await self.bot.pg_conn.fetchval("""
SELECT text_channel_id FROM voice_text_data
WHERE guild_id = $1 AND voice_channel_id = $2
""", member.guild.id, before.channel.id)
channel: discord.TextChannel = discord.utils.get(member.guild.text_channels, id=before_text_channel_id)
if channel:
await channel.edit(overwrites=leave_overwrites)
await channel.set_permissions(member, overwrite=None)
elif after.channel and before.channel:
if not after.channel == before.channel:
before_text_channel_id = await self.bot.pg_conn.fetchval("""
SELECT text_channel_id FROM voice_text_data
WHERE guild_id = $1 AND voice_channel_id = $2
""", member.guild.id, before.channel.id)
after_text_channel_id = await self.bot.pg_conn.fetchval("""
SELECT text_channel_id FROM voice_text_data
WHERE guild_id = $1 AND voice_channel_id = $2
""", member.guild.id, after.channel.id)
before_channel: discord.TextChannel = discord.utils.get(member.guild.text_channels, id=before_text_channel_id)
after_channel: discord.TextChannel = discord.utils.get(member.guild.text_channels, id=after_text_channel_id)
if before_channel:
await before_channel.edit(overwrites=leave_overwrites)
if after_channel:
await after_channel.edit(overwrites=join_overwrites)
@commands.group(aliases=["vtl", "voice_link"], invoke_without_command=True, help="Returns all Voice Text Links.")
async def voice_text_link(self, ctx):
voice_text_data = await self.bot.pg_conn.fetch("""
SELECT * FROM voice_text_data
WHERE guild_id = $1
""", ctx.guild.id)
embed = discord.Embed()
embed.title = "Available voice text links!"
msg = ''
for index, voice_text_link1 in enumerate(voice_text_data):
index += 1
voice_channel = discord.utils.get(ctx.guild.voice_channels, id=voice_text_link1['voice_channel_id'])
text_channel = discord.utils.get(ctx.guild.text_channels, id=voice_text_link1['text_channel_id'])
msg += f"{index}. {voice_channel.mention} -> {text_channel.mention}\n"
embed.description = msg
embed.set_author(name=ctx.me.name, icon_url=ctx.me.avatar_url)
await ctx.send(embed=embed)
@voice_text_link.command(name="add", help='Creates a new a voice text link', aliases=['+'])
async def vtl_add(self, ctx, voice_channel: discord.VoiceChannel, text_channel: discord.TextChannel):
if (voice_channel or text_channel) is None:
await ctx.send("You didn't send voice channel or text channel!")
else:
await self.bot.pg_conn.execute("""
INSERT INTO voice_text_data (guild_id, voice_channel_id, text_channel_id)
VALUES ($1, $2, $3)
""", ctx.guild.id, voice_channel.id, text_channel.id)
await ctx.send(f"Added the voice text link! {voice_channel.name} -> {text_channel.name}")
@voice_text_link.command(name="remove", help="Deletes a existing voice text link", aliases=['-'])
async def vtl_remove(self, ctx, voice_channel: discord.VoiceChannel, text_channel: discord.TextChannel):
if voice_channel is None or text_channel is None:
await ctx.send("You didn't send voice channel or text channel!")
else:
await self.bot.pg_conn.execute("""
DELETE FROM voice_text_data
WHERE guild_id = $1 AND voice_channel_id = $2 AND text_channel_id = $3
""", ctx.guild.id, voice_channel.id, text_channel.id)
await ctx.send(f"Removed the voice text link! {voice_channel.name} -> {text_channel.name}")
def setup(bot):
bot.add_cog(Voice_Text_Link(bot))
| [
"discord.ext.commands.Cog.listener",
"discord.PermissionOverwrite",
"discord.utils.get",
"discord.ext.commands.group",
"discord.Embed"
] | [((588, 611), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (609, 611), False, 'from discord.ext import commands\n'), ((4233, 4349), 'discord.ext.commands.group', 'commands.group', ([], {'aliases': "['vtl', 'voice_link']", 'invoke_without_command': '(True)', 'help': '"""Returns all Voice Text Links."""'}), "(aliases=['vtl', 'voice_link'], invoke_without_command=True,\n help='Returns all Voice Text Links.')\n", (4247, 4349), False, 'from discord.ext import commands\n'), ((4616, 4631), 'discord.Embed', 'discord.Embed', ([], {}), '()\n', (4629, 4631), False, 'import discord\n'), ((801, 902), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(False)', 'send_messages': '(False)', 'read_message_history': '(False)'}), '(read_messages=False, send_messages=False,\n read_message_history=False)\n', (828, 902), False, 'import discord\n'), ((920, 1018), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)', 'send_messages': '(True)', 'read_message_history': '(True)'}), '(read_messages=True, send_messages=True,\n read_message_history=True)\n', (947, 1018), False, 'import discord\n'), ((1093, 1194), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(False)', 'send_messages': '(False)', 'read_message_history': '(False)'}), '(read_messages=False, send_messages=False,\n read_message_history=False)\n', (1120, 1194), False, 'import discord\n'), ((1212, 1313), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(False)', 'send_messages': '(False)', 'read_message_history': '(False)'}), '(read_messages=False, send_messages=False,\n read_message_history=False)\n', (1239, 1313), False, 'import discord\n'), ((4819, 4908), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.voice_channels'], {'id': "voice_text_link1['voice_channel_id']"}), "(ctx.guild.voice_channels, id=voice_text_link1[\n 'voice_channel_id'])\n", (4836, 4908), False, 'import discord\n'), ((4931, 5018), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.text_channels'], {'id': "voice_text_link1['text_channel_id']"}), "(ctx.guild.text_channels, id=voice_text_link1[\n 'text_channel_id'])\n", (4948, 5018), False, 'import discord\n'), ((1945, 2016), 'discord.utils.get', 'discord.utils.get', (['member.guild.text_channels'], {'id': 'after_text_channel_id'}), '(member.guild.text_channels, id=after_text_channel_id)\n', (1962, 2016), False, 'import discord\n'), ((2842, 2914), 'discord.utils.get', 'discord.utils.get', (['member.guild.text_channels'], {'id': 'before_text_channel_id'}), '(member.guild.text_channels, id=before_text_channel_id)\n', (2859, 2914), False, 'import discord\n'), ((2136, 2237), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(False)', 'send_messages': '(False)', 'read_message_history': '(False)'}), '(read_messages=False, send_messages=False,\n read_message_history=False)\n', (2163, 2237), False, 'import discord\n'), ((2267, 2396), 'discord.PermissionOverwrite', 'discord.PermissionOverwrite', ([], {'read_messages': '(True)', 'send_messages': '(True)', 'read_message_history': '(True if history_for_text else False)'}), '(read_messages=True, send_messages=True,\n read_message_history=True if history_for_text else False)\n', (2294, 2396), False, 'import discord\n'), ((3792, 3864), 'discord.utils.get', 'discord.utils.get', (['member.guild.text_channels'], {'id': 'before_text_channel_id'}), '(member.guild.text_channels, id=before_text_channel_id)\n', (3809, 3864), False, 'import discord\n'), ((3922, 3993), 'discord.utils.get', 'discord.utils.get', (['member.guild.text_channels'], {'id': 'after_text_channel_id'}), '(member.guild.text_channels, id=after_text_channel_id)\n', (3939, 3993), False, 'import discord\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import iris
from cube_funcs import get_dates
from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import unify_time_units
import warnings
warnings.filterwarnings("ignore")
def read_cmip_cube(fname, constraint=None):
try:
cube = iris.load_cube(fname, constraint=constraint)
except:
cubes = iris.load(fname, constraints=constraint)
equalise_attributes(cubes)
unify_time_units(cubes)
try:
cube = cubes.merge_cube()
except:
cube = cubes.concatenate_cube()
print(cube.shape)
return(cube)
def iris_read(data_path, standard_name, short_name=None):
cubes = iris.load(data_path, standard_name)
if short_name is not None:
var_name_temp = iris.Constraint(cube_func=lambda cube:
cube.var_name == short_name)
cubes = cubes.extract(var_name_temp)
return(cubes)
#%%
# change path to location of data
# data should be sorted into folders by model name
path = '/nfs/a68/gyjcab/datasets/cmip5/'
# edit to read in another variable
var = 'evspsbl'
# 13 CMIP5 models analysed in study
models = ['ACCESS1-3', 'bcc-csm1-1','BNU-ESM', 'CanESM2','CCSM4', 'CESM1-BGC',
'FIO-ESM', 'HadGEM2-CC', 'HadGEM2-ES', 'inmcm4',
'IPSL-CM5A-LR', 'MPI-ESM-LR','NorESM1-M']
# loop over models, read in data and calculate mean of multiple runs
for model in models:
constraint = None
if model == 'HadGEM2-ES':
constraint = iris.Constraint(time=lambda cell:
1859 <= cell.point.year <= 2004)
if model == 'HadGEM2-CC':
constraint = iris.Constraint(time=lambda cell:
1960 <= cell.point.year <= 2004)
print(model)
temp1 = temp2 = temp3 = temp4 = temp5 = temp6 = temp7 = None
temp8 = temp9 = temp10 = temp11 = temp12 = temp13 = temp14 = None
# path to historical data
fpath = path + var + '/historical/' + model
temp1 = read_cmip_cube(fpath + '/*r1i1p1_*.nc', constraint=constraint)
try:
temp2 = read_cmip_cube(fpath + '/*r2i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp3 = read_cmip_cube(fpath + '/*r3i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp4 = read_cmip_cube(fpath + '/*r4i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp5 = read_cmip_cube(fpath + '/*r5i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp6 = read_cmip_cube(fpath + '/*r6i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp7 = read_cmip_cube(fpath + '/*r7i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp8 = read_cmip_cube(fpath + '/*r8i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp9 = read_cmip_cube(fpath + '/*r9i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp10 = read_cmip_cube(fpath + '/*r10i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp11 = read_cmip_cube(fpath + '/*r11i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp12 = read_cmip_cube(fpath + '/*r12i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp13 = read_cmip_cube(fpath + '/*r13i1p1*.nc', constraint=constraint)
except OSError:
pass
try:
temp14 = read_cmip_cube(fpath + '/*r114i1p1*.nc', constraint=constraint)
except OSError:
pass
temp_list = [temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9,
temp10, temp11, temp12, temp13, temp14]
# get list of model runs
temp_list_trim = [x for x in temp_list if x is not None]
print(len(temp_list_trim))
# if length only 1 save as new netcdf
if len(temp_list_trim) == 1:
ensemble_mean = temp_list_trim[0]
ensemble_mean.standard_name = temp_list_trim[0].standard_name
ensemble_mean.attributes = temp_list_trim[0].attributes
dates = get_dates(ensemble_mean, verbose=False)
outpath = (fpath + '/test_ensemble_mean_historical_' +
model + '_' + var + '_' +
str(dates[0].year) + str(dates[0].month).zfill(2) + '_' +
str(dates[-1].year) + str(dates[-1].month).zfill(2) + '.nc')
print(outpath)
iris.save(ensemble_mean, outpath)
continue
else:
# if multiple runs calculate mean of runs
n = len(temp_list_trim)
print(n)
equalise_attributes(temp_list_trim)
unify_time_units(temp_list_trim)
if n == 2:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1])/n
if n == 3:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2])/n
if n == 4:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3])/n
if n == 5:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4])/n
if n == 6:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5])/n
if n == 7:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6])/n
if n == 8:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7])/n
if n == 9:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8])/n
if n == 10:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8] + temp_list_trim[9])/n
if n == 11:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8] + temp_list_trim[9] +
temp_list_trim[10])/n
if n == 12:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8] + temp_list_trim[9] +
temp_list_trim[10] + temp_list_trim[11])/n
if n == 13:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8] + temp_list_trim[9] +
temp_list_trim[10] + temp_list_trim[11] +
temp_list_trim[12])/n
if n == 14:
ensemble_mean = (temp_list_trim[0] + temp_list_trim[1] +
temp_list_trim[2] + temp_list_trim[3] +
temp_list_trim[4] + temp_list_trim[5] +
temp_list_trim[6] + temp_list_trim[7] +
temp_list_trim[8] + temp_list_trim[9] +
temp_list_trim[10] + temp_list_trim[11] +
temp_list_trim[12] + temp_list_trim[13])/n
# save as new netcdf
ensemble_mean.standard_name = temp_list_trim[0].standard_name
ensemble_mean.attributes = temp_list_trim[0].attributes
dates = get_dates(ensemble_mean, verbose=False)
outpath = (fpath + '/test_ensemble_mean_historical_' +
model + '_' + var + '_' +
str(dates[0].year) + str(dates[0].month).zfill(2) + '_' +
str(dates[-1].year) + str(dates[-1].month).zfill(2) + '.nc')
print(outpath)
iris.save(ensemble_mean, outpath)
| [
"warnings.filterwarnings",
"iris.util.unify_time_units",
"iris.save",
"iris.experimental.equalise_cubes.equalise_attributes",
"cube_funcs.get_dates",
"iris.load_cube",
"iris.Constraint",
"iris.load"
] | [((213, 246), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (236, 246), False, 'import warnings\n'), ((730, 765), 'iris.load', 'iris.load', (['data_path', 'standard_name'], {}), '(data_path, standard_name)\n', (739, 765), False, 'import iris\n'), ((316, 360), 'iris.load_cube', 'iris.load_cube', (['fname'], {'constraint': 'constraint'}), '(fname, constraint=constraint)\n', (330, 360), False, 'import iris\n'), ((821, 888), 'iris.Constraint', 'iris.Constraint', ([], {'cube_func': '(lambda cube: cube.var_name == short_name)'}), '(cube_func=lambda cube: cube.var_name == short_name)\n', (836, 888), False, 'import iris\n'), ((1572, 1638), 'iris.Constraint', 'iris.Constraint', ([], {'time': '(lambda cell: 1859 <= cell.point.year <= 2004)'}), '(time=lambda cell: 1859 <= cell.point.year <= 2004)\n', (1587, 1638), False, 'import iris\n'), ((1727, 1793), 'iris.Constraint', 'iris.Constraint', ([], {'time': '(lambda cell: 1960 <= cell.point.year <= 2004)'}), '(time=lambda cell: 1960 <= cell.point.year <= 2004)\n', (1742, 1793), False, 'import iris\n'), ((4321, 4360), 'cube_funcs.get_dates', 'get_dates', (['ensemble_mean'], {'verbose': '(False)'}), '(ensemble_mean, verbose=False)\n', (4330, 4360), False, 'from cube_funcs import get_dates\n'), ((4666, 4699), 'iris.save', 'iris.save', (['ensemble_mean', 'outpath'], {}), '(ensemble_mean, outpath)\n', (4675, 4699), False, 'import iris\n'), ((4843, 4878), 'iris.experimental.equalise_cubes.equalise_attributes', 'equalise_attributes', (['temp_list_trim'], {}), '(temp_list_trim)\n', (4862, 4878), False, 'from iris.experimental.equalise_cubes import equalise_attributes\n'), ((4887, 4919), 'iris.util.unify_time_units', 'unify_time_units', (['temp_list_trim'], {}), '(temp_list_trim)\n', (4903, 4919), False, 'from iris.util import unify_time_units\n'), ((9210, 9249), 'cube_funcs.get_dates', 'get_dates', (['ensemble_mean'], {'verbose': '(False)'}), '(ensemble_mean, verbose=False)\n', (9219, 9249), False, 'from cube_funcs import get_dates\n'), ((9555, 9588), 'iris.save', 'iris.save', (['ensemble_mean', 'outpath'], {}), '(ensemble_mean, outpath)\n', (9564, 9588), False, 'import iris\n'), ((389, 429), 'iris.load', 'iris.load', (['fname'], {'constraints': 'constraint'}), '(fname, constraints=constraint)\n', (398, 429), False, 'import iris\n'), ((438, 464), 'iris.experimental.equalise_cubes.equalise_attributes', 'equalise_attributes', (['cubes'], {}), '(cubes)\n', (457, 464), False, 'from iris.experimental.equalise_cubes import equalise_attributes\n'), ((473, 496), 'iris.util.unify_time_units', 'unify_time_units', (['cubes'], {}), '(cubes)\n', (489, 496), False, 'from iris.util import unify_time_units\n')] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import itertools
import numpy
import awkward1
import cupy
def test_from_cupy():
cupy_array_1d = cupy.arange(10)
cupy_array_2d = cupy.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8]])
ak_cupy_array_1d = awkward1.from_cupy(cupy_array_1d)
ak_cupy_array_2d = awkward1.from_cupy(cupy_array_2d)
for i in range(10):
assert ak_cupy_array_1d[i] == cupy_array_1d[i]
for i in range(4):
for j in range(2):
assert ak_cupy_array_2d[i][j] == cupy_array_2d[i][j]
def test_from_cupy_tolist():
cupy_array_1d = cupy.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
ak_cupy_array_1d = awkward1.from_cupy(cupy_array_1d)
assert awkward1.to_list(ak_cupy_array_1d.layout) == [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
def test_NumpyArray_constructor():
assert awkward1.kernels(awkward1.layout.NumpyArray(numpy.array([1, 2, 3]))) == "cpu"
assert awkward1.kernels(awkward1.layout.NumpyArray(cupy.array([1, 2, 3]))) == "cuda"
def test_add():
one = awkward1.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]], kernels="cuda")
two = awkward1.Array([100, 200, 300], kernels="cuda")
assert awkward1.kernels(one) == "cuda"
assert awkward1.kernels(two) == "cuda"
three = one + two
assert awkward1.to_list(three) == [[101.1, 102.2, 103.3], [], [304.4, 305.5]]
assert awkward1.kernels(three) == "cuda"
def test_add_2():
one = awkward1.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]], kernels="cuda")
two = 100
assert awkward1.kernels(one) == "cuda"
three = one + two
assert awkward1.to_list(three) == [[101.1, 102.2, 103.3], [], [104.4, 105.5]]
assert awkward1.kernels(three) == "cuda"
| [
"cupy.arange",
"awkward1.to_list",
"numpy.array",
"awkward1.kernels",
"awkward1.Array",
"cupy.array",
"awkward1.from_cupy"
] | [((246, 261), 'cupy.arange', 'cupy.arange', (['(10)'], {}), '(10)\n', (257, 261), False, 'import cupy\n'), ((282, 342), 'cupy.array', 'cupy.array', (['[[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8]]'], {}), '([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8]])\n', (292, 342), False, 'import cupy\n'), ((367, 400), 'awkward1.from_cupy', 'awkward1.from_cupy', (['cupy_array_1d'], {}), '(cupy_array_1d)\n', (385, 400), False, 'import awkward1\n'), ((424, 457), 'awkward1.from_cupy', 'awkward1.from_cupy', (['cupy_array_2d'], {}), '(cupy_array_2d)\n', (442, 457), False, 'import awkward1\n'), ((705, 747), 'cupy.array', 'cupy.array', (['[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]'], {}), '([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n', (715, 747), False, 'import cupy\n'), ((772, 805), 'awkward1.from_cupy', 'awkward1.from_cupy', (['cupy_array_1d'], {}), '(cupy_array_1d)\n', (790, 805), False, 'import awkward1\n'), ((1137, 1202), 'awkward1.Array', 'awkward1.Array', (['[[1.1, 2.2, 3.3], [], [4.4, 5.5]]'], {'kernels': '"""cuda"""'}), "([[1.1, 2.2, 3.3], [], [4.4, 5.5]], kernels='cuda')\n", (1151, 1202), False, 'import awkward1\n'), ((1213, 1260), 'awkward1.Array', 'awkward1.Array', (['[100, 200, 300]'], {'kernels': '"""cuda"""'}), "([100, 200, 300], kernels='cuda')\n", (1227, 1260), False, 'import awkward1\n'), ((1526, 1591), 'awkward1.Array', 'awkward1.Array', (['[[1.1, 2.2, 3.3], [], [4.4, 5.5]]'], {'kernels': '"""cuda"""'}), "([[1.1, 2.2, 3.3], [], [4.4, 5.5]], kernels='cuda')\n", (1540, 1591), False, 'import awkward1\n'), ((818, 859), 'awkward1.to_list', 'awkward1.to_list', (['ak_cupy_array_1d.layout'], {}), '(ak_cupy_array_1d.layout)\n', (834, 859), False, 'import awkward1\n'), ((1272, 1293), 'awkward1.kernels', 'awkward1.kernels', (['one'], {}), '(one)\n', (1288, 1293), False, 'import awkward1\n'), ((1315, 1336), 'awkward1.kernels', 'awkward1.kernels', (['two'], {}), '(two)\n', (1331, 1336), False, 'import awkward1\n'), ((1380, 1403), 'awkward1.to_list', 'awkward1.to_list', (['three'], {}), '(three)\n', (1396, 1403), False, 'import awkward1\n'), ((1462, 1485), 'awkward1.kernels', 'awkward1.kernels', (['three'], {}), '(three)\n', (1478, 1485), False, 'import awkward1\n'), ((1617, 1638), 'awkward1.kernels', 'awkward1.kernels', (['one'], {}), '(one)\n', (1633, 1638), False, 'import awkward1\n'), ((1682, 1705), 'awkward1.to_list', 'awkward1.to_list', (['three'], {}), '(three)\n', (1698, 1705), False, 'import awkward1\n'), ((1764, 1787), 'awkward1.kernels', 'awkward1.kernels', (['three'], {}), '(three)\n', (1780, 1787), False, 'import awkward1\n'), ((986, 1008), 'numpy.array', 'numpy.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (997, 1008), False, 'import numpy\n'), ((1075, 1096), 'cupy.array', 'cupy.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1085, 1096), False, 'import cupy\n')] |
# Generated by Django 3.0.6 on 2020-07-10 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registrations', '0004_auto_20200621_0910'),
]
operations = [
migrations.AlterField(
model_name='saccodriver',
name='last_status_update_date',
field=models.DateField(blank=True, null=True),
),
]
| [
"django.db.models.DateField"
] | [((364, 403), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (380, 403), False, 'from django.db import migrations, models\n')] |
"""Example AWS Lambda function for chip-n-scale"""
import os
import pg8000
from typing import Dict, Any
from download_and_predict.base import DownloadAndPredict
from download_and_predict.custom_types import SQSEvent
def handler(event: SQSEvent, context: Dict[str, Any]) -> None:
# read all our environment variables to throw errors early
imagery = os.getenv('TILE_ENDPOINT')
db = os.getenv('DATABASE_URL')
prediction_endpoint=os.getenv('PREDICTION_ENDPOINT')
assert(imagery)
assert(db)
assert(prediction_endpoint)
# instantiate our DownloadAndPredict class
dap = DownloadAndPredict(
imagery=imagery,
db=db,
prediction_endpoint=prediction_endpoint
)
# get tiles from our SQS event
tiles = dap.get_tiles(event)
# construct a payload for our prediction endpoint
tile_indices, payload = dap.get_prediction_payload(tiles)
# send prediction request
content = dap.post_prediction(payload)
# save prediction request to db
dap.save_to_db(
tile_indices,
content['predictions'],
result_wrapper=lambda x: pg8000.PGJsonb(x)
)
| [
"download_and_predict.base.DownloadAndPredict",
"pg8000.PGJsonb",
"os.getenv"
] | [((359, 385), 'os.getenv', 'os.getenv', (['"""TILE_ENDPOINT"""'], {}), "('TILE_ENDPOINT')\n", (368, 385), False, 'import os\n'), ((395, 420), 'os.getenv', 'os.getenv', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (404, 420), False, 'import os\n'), ((445, 477), 'os.getenv', 'os.getenv', (['"""PREDICTION_ENDPOINT"""'], {}), "('PREDICTION_ENDPOINT')\n", (454, 477), False, 'import os\n'), ((604, 692), 'download_and_predict.base.DownloadAndPredict', 'DownloadAndPredict', ([], {'imagery': 'imagery', 'db': 'db', 'prediction_endpoint': 'prediction_endpoint'}), '(imagery=imagery, db=db, prediction_endpoint=\n prediction_endpoint)\n', (622, 692), False, 'from download_and_predict.base import DownloadAndPredict\n'), ((1116, 1133), 'pg8000.PGJsonb', 'pg8000.PGJsonb', (['x'], {}), '(x)\n', (1130, 1133), False, 'import pg8000\n')] |
import json
from os import getenv
from Common import path
class OAuthConfig:
_instance = None
def __init__(self):
super().__init__()
stage = getenv("STAGES")
self.HTTP = "http"
with open(path.APP_CONFIG_ABS_PATH, "r") as json_file:
content = json.load(json_file)
self.IS_DOCKER = content["STORAGE_SERVICE_IS_USING_DOCKER"]
if stage:
self.PORT = 8080
self.URL = f"{self.HTTP}://oauth-server.{stage}.svc.cluster.local:{self.PORT}"
elif self.IS_DOCKER:
self.PORT = 3001
self.URL = f"{self.HTTP}://host.docker.internal:{self.PORT}"
else:
self.PORT = 3001
self.URL = f"{self.HTTP}://localhost:{self.PORT}"
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = OAuthConfig()
return cls._instance
| [
"json.load",
"os.getenv"
] | [((169, 185), 'os.getenv', 'getenv', (['"""STAGES"""'], {}), "('STAGES')\n", (175, 185), False, 'from os import getenv\n'), ((300, 320), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (309, 320), False, 'import json\n')] |
# from django.db import models
#
# Create your models here.
from django.contrib import admin
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.shortcuts import reverse
from djongo import models
#18.216.224.202u
Available_Size = (
('S', 'small'),
('M', 'medium'),
('L', 'large'),
('XL', 'extra large'),
)
Available_Color = (
('w','white'),
('b','black'),
('g','green'),
('y','yellow'),
)
Trasaction_status = (
('1', 'Done'),
('2', 'pending'),
('3', '--------'),
)
PAYMENT_MODE = (
('online', 'online'),
('offline', 'offline'),
)
RANKING = (
('promoted', 'Promoted'),
('best_seller', 'Best Seller'),
)
class Categories(models.Model):
objects = models.DjongoManager()
_id = models.ObjectIdField(primary_key=True)
parent = models.ForeignKey("self", on_delete=models.CASCADE, null=True, blank=True)
category_name = models.CharField(max_length=100,null=True,blank=True)
category_name_de = models.CharField(max_length=100,null=True,blank=True)
picture = models.ImageField(null=True,blank=True, upload_to="images")
active = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__(self):
return self.category_name or ''
class Meta:
db_table = 'odata_category'
class Customer(models.Model):
"""This model is used for customer"""
_id = models.ObjectIdField(primary_key=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=100,null=True,blank=True)
last_name = models.CharField(max_length=100,null=True,blank=True)
address1 = models.CharField(max_length=100,null=True,blank=True)
address2 = models.CharField(max_length=100,null=True,blank=True)
city = models.CharField(max_length=100,null=True,blank=True)
state = models.CharField(max_length=100,null=True,blank=True)
postal_code = models.IntegerField(null=True,blank=True)
country = models.CharField(max_length=100,null=True,blank=True)
phone = models.CharField(max_length=10,null=True,blank=True)
# password = models.CharField(max_length=100)
salutation = models.CharField(max_length=100,null=True,blank=True)
credit_card = models.CharField(max_length=15, null=True,blank=True)
credit_card_type_id = models.CharField(max_length=100)
mm_yy = models.CharField(max_length=7, null=True, blank=True)
billing_address = models.CharField(max_length=250, null=True,blank=True)
billing_city = models.CharField(max_length=100, null=True,blank=True)
billing_postal_code = models.CharField(max_length=100, null=True,blank=True)
billing_country = models.CharField(max_length=100, null=True,blank=True)
ship_address = models.CharField(max_length=250, null=True,blank=True)
ship_city = models.CharField(max_length=250, null=True,blank=True)
ship_region = models.CharField(max_length=250, null=True,blank=True)
ship_postal_code = models.CharField(max_length=100, null=True,blank=True)
ship_country = models.CharField(max_length=100, null=True,blank=True)
marketing_code = models.CharField(max_length=100, null=True, blank=True)
source = models.CharField(max_length=100, null=True, blank=True)
medium = models.CharField(max_length=100, null=True, blank=True)
gcustid = models.CharField(max_length=512, null=True, blank=True)
gclid = models.CharField(max_length=1024, null=True, blank=True)
fbclid = models.CharField(max_length=1024, null=True, blank=True)
date_entered = models.DateTimeField(auto_now_add=True)
terms_condition = models.BooleanField(default=True)
data_privacy = models.BooleanField(default=True)
guest_login = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__(self):
if self.first_name is not None and self.last_name is not None:
return self.first_name+' '+self.last_name
elif self.first_name is not None:
return self.first_name
else:
return ''
@classmethod
def get(cls, email):
try:
user_detail = User.objects.get(email=email)
return cls.objects.get(user=user_detail)
except User.DoesNotExist:
return None
class UserForgotPassword(models.Model):
_id = models.ObjectIdField(primary_key=True)
user = models.OneToOneField(User, related_name='reset_password_user', on_delete=models.CASCADE)
token = models.CharField(max_length=10)
is_consumed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# @receiver(post_save, sender=User)
# def create_user_customer(sender, instance, created, **kwargs):
# if created:
# Customer.objects.create(user=instance)
# @receiver(post_save, sender=User)
# def save_user_customer(sender, instance, **kwargs):
# instance.customer.save()
class Product(models.Model):
"""This models is used for Products Details."""
_id = models.ObjectIdField(primary_key=True)
vendor_product_id = models.CharField(max_length=50,null=True,blank=True)
product_name = models.CharField(max_length=100)
category = models.CharField(max_length=250, default='')
quantity = models.IntegerField(default=0)
price = models.FloatField()
msrp = models.CharField(max_length=100, null=True,blank=True)
ean = models.CharField(max_length=255, null=True, blank=True)
title = models.CharField(max_length=1024)
title_de = models.CharField(max_length=1024, blank=True, null=True)
size = models.CharField(max_length=100,null=True,blank=True)
color = models.CharField(max_length=100,null=True,blank=True)
discount = models.DecimalField(decimal_places=2, max_digits=10, verbose_name="Discount %")
product_available = models.BooleanField(default=False)
picture = models.URLField(null=True,blank=True)
ranking = models.CharField(max_length=15,choices=RANKING, null=True,blank=True)
description = models.TextField(max_length=200, null=True,blank=True)
description_de = models.TextField(max_length=200, blank=True, null=True)
product_highlight = models.TextField(max_length=200, null=True,blank=True)
product_highlight_de = models.TextField(max_length=200, blank=True, null=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
objects = models.DjongoManager()
def __str__(self):
return self.product_name or ' '
def get_absolute_url(self):
return reverse("products", args=[str(self.id)])
@classmethod
def get(cls, pro_id):
from bson import ObjectId
try:
return cls.objects.get(pk=ObjectId(pro_id))
except cls.DoesNotExist:
return None
class ProductImage(models.Model):
_id = models.ObjectIdField(primary_key=True)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
image_order = models.IntegerField()
image = models.URLField(null=True,blank=True)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
class ProductVariant(models.Model):
_id = models.ObjectIdField(primary_key=True)
parent_product = models.ForeignKey(Product, on_delete=models.CASCADE)
size = models.CharField(max_length=100)
color = models.CharField(max_length=100)
material = models.CharField(max_length=100)
image = models.URLField(null=True, blank=True)
# class Shipper(models.Model):
# _id = models.ObjectIdField(primary_key=True)
# company_name = models.CharField(max_length=100)
# phone = models.CharField(max_length=10)
# created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# class Order(models.Model):
# _id = models.ObjectIdField(primary_key=True)
# customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
# order_number = models.CharField(max_length=100,null=True,blank=True)
# payment_id = models.CharField(max_length=50,null=True,blank=True)
# order_date = models.DateTimeField(auto_now_add=True)
# ship_date = models.DateTimeField(default='')
# required_date = models.DateField()
# freight = models.CharField(max_length=100,null=True,blank=True)
# sale_tax = models.CharField(max_length=100,null=True,blank=True)
# timestamp = models.DateTimeField(auto_now_add=True)
# transaction_status = models.CharField(max_length=1,choices=Trasaction_status)
# error_lock = models.CharField(max_length=100,null=True,blank=True)
# error_msg = models.CharField(max_length=100,null=True,blank=True)
# fullfiled = models.BooleanField(default=False)
# deleted = models.BooleanField(default=False)
# paid = models.BooleanField(default=False)
# payment_date = models.DateTimeField(auto_now_add=True)
# created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# def __str__(self):
# return self.order_number
class Payment(models.Model):
_id = models.ObjectIdField(primary_key=True)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
order = models.CharField(max_length=100)
invoice = models.CharField(max_length=100)
amount = models.DecimalField(max_digits=19, decimal_places=2)
payment_type = models.CharField(max_length=50)
status = models.CharField(max_length=15)
date_of_payment = models.DateField()
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# class OrderDetail(models.Model):
# _id = models.ObjectIdField(primary_key=True)
# order = models.ForeignKey(Order, on_delete=models.DO_NOTHING)
# product = models.ForeignKey(Product, on_delete=models.DO_NOTHING, null=False, blank=False)
# order_number = models.CharField(max_length=100,null=True,blank=True)
# payment_id = models.CharField(max_length=50,null=True,blank=True)
# price = models.DecimalField(max_digits=19, decimal_places=2)
# quatity = models.IntegerField()
# discount = models.DecimalField(decimal_places=2, max_digits=10)
# total = models.IntegerField()
# idsku = models.CharField(max_length=100,null=True,blank=True)
# size = models.CharField(max_length=3,choices=Available_Size)
# color = models.CharField(max_length=3,choices=Available_Color)
# fullfield = models.BooleanField(default=False)
# bill_date = models.DateField(default='')
# created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True, auto_now_add=False)
# def __str__(self):
# return self.order.order_number
class NewsletterSubscription(models.Model):
_id = models.ObjectIdField(primary_key=True)
salutation = models.CharField(max_length=3)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.CharField(max_length=100)
data_acceptance = models.BooleanField()
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=False) | [
"djongo.models.BooleanField",
"djongo.models.CharField",
"djongo.models.ObjectIdField",
"djongo.models.DateTimeField",
"django.contrib.auth.models.User.objects.get",
"djongo.models.DjongoManager",
"djongo.models.DateField",
"djongo.models.ImageField",
"djongo.models.FloatField",
"djongo.models.ForeignKey",
"djongo.models.DecimalField",
"djongo.models.URLField",
"djongo.models.IntegerField",
"bson.ObjectId",
"djongo.models.OneToOneField",
"djongo.models.TextField"
] | [((818, 840), 'djongo.models.DjongoManager', 'models.DjongoManager', ([], {}), '()\n', (838, 840), False, 'from djongo import models\n'), ((851, 889), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (871, 889), False, 'from djongo import models\n'), ((903, 977), 'djongo.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), "('self', on_delete=models.CASCADE, null=True, blank=True)\n", (920, 977), False, 'from djongo import models\n'), ((998, 1053), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1014, 1053), False, 'from djongo import models\n'), ((1077, 1132), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1093, 1132), False, 'from djongo import models\n'), ((1147, 1207), 'djongo.models.ImageField', 'models.ImageField', ([], {'null': '(True)', 'blank': '(True)', 'upload_to': '"""images"""'}), "(null=True, blank=True, upload_to='images')\n", (1164, 1207), False, 'from djongo import models\n'), ((1220, 1254), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1239, 1254), False, 'from djongo import models\n'), ((1272, 1327), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (1292, 1327), False, 'from djongo import models\n'), ((1345, 1400), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (1365, 1400), False, 'from djongo import models\n'), ((1615, 1653), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1635, 1653), False, 'from djongo import models\n'), ((1665, 1717), 'djongo.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1685, 1717), False, 'from djongo import models\n'), ((1735, 1790), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1751, 1790), False, 'from djongo import models\n'), ((1805, 1860), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1821, 1860), False, 'from djongo import models\n'), ((1878, 1933), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1894, 1933), False, 'from djongo import models\n'), ((1947, 2002), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (1963, 2002), False, 'from djongo import models\n'), ((2012, 2067), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2028, 2067), False, 'from djongo import models\n'), ((2078, 2133), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2094, 2133), False, 'from djongo import models\n'), ((2150, 2192), 'djongo.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2169, 2192), False, 'from djongo import models\n'), ((2206, 2261), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2222, 2261), False, 'from djongo import models\n'), ((2272, 2326), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (2288, 2326), False, 'from djongo import models\n'), ((2392, 2447), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2408, 2447), False, 'from djongo import models\n'), ((2464, 2518), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'null': '(True)', 'blank': '(True)'}), '(max_length=15, null=True, blank=True)\n', (2480, 2518), False, 'from djongo import models\n'), ((2544, 2576), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2560, 2576), False, 'from djongo import models\n'), ((2589, 2642), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(7)', 'null': '(True)', 'blank': '(True)'}), '(max_length=7, null=True, blank=True)\n', (2605, 2642), False, 'from djongo import models\n'), ((2665, 2720), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (2681, 2720), False, 'from djongo import models\n'), ((2739, 2794), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2755, 2794), False, 'from djongo import models\n'), ((2824, 2879), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2840, 2879), False, 'from djongo import models\n'), ((2901, 2956), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (2917, 2956), False, 'from djongo import models\n'), ((2975, 3030), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (2991, 3030), False, 'from djongo import models\n'), ((3046, 3101), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (3062, 3101), False, 'from djongo import models\n'), ((3119, 3174), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)', 'blank': '(True)'}), '(max_length=250, null=True, blank=True)\n', (3135, 3174), False, 'from djongo import models\n'), ((3197, 3252), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3213, 3252), False, 'from djongo import models\n'), ((3271, 3326), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3287, 3326), False, 'from djongo import models\n'), ((3347, 3402), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3363, 3402), False, 'from djongo import models\n'), ((3416, 3471), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3432, 3471), False, 'from djongo import models\n'), ((3485, 3540), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3501, 3540), False, 'from djongo import models\n'), ((3555, 3610), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'null': '(True)', 'blank': '(True)'}), '(max_length=512, null=True, blank=True)\n', (3571, 3610), False, 'from djongo import models\n'), ((3623, 3679), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1024, null=True, blank=True)\n', (3639, 3679), False, 'from djongo import models\n'), ((3693, 3749), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'null': '(True)', 'blank': '(True)'}), '(max_length=1024, null=True, blank=True)\n', (3709, 3749), False, 'from djongo import models\n'), ((3769, 3808), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3789, 3808), False, 'from djongo import models\n'), ((3831, 3864), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3850, 3864), False, 'from djongo import models\n'), ((3884, 3917), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3903, 3917), False, 'from djongo import models\n'), ((3936, 3970), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3955, 3970), False, 'from djongo import models\n'), ((3988, 4043), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (4008, 4043), False, 'from djongo import models\n'), ((4061, 4116), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (4081, 4116), False, 'from djongo import models\n'), ((4658, 4696), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (4678, 4696), False, 'from djongo import models\n'), ((4708, 4801), 'djongo.models.OneToOneField', 'models.OneToOneField', (['User'], {'related_name': '"""reset_password_user"""', 'on_delete': 'models.CASCADE'}), "(User, related_name='reset_password_user', on_delete=\n models.CASCADE)\n", (4728, 4801), False, 'from djongo import models\n'), ((4809, 4840), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (4825, 4840), False, 'from djongo import models\n'), ((4859, 4893), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4878, 4893), False, 'from djongo import models\n'), ((4911, 4950), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4931, 4950), False, 'from djongo import models\n'), ((4968, 5003), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (4988, 5003), False, 'from djongo import models\n'), ((5405, 5443), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (5425, 5443), False, 'from djongo import models\n'), ((5476, 5530), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (5492, 5530), False, 'from djongo import models\n'), ((5548, 5580), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (5564, 5580), False, 'from djongo import models\n'), ((5600, 5644), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'default': '""""""'}), "(max_length=250, default='')\n", (5616, 5644), False, 'from djongo import models\n'), ((5660, 5690), 'djongo.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5679, 5690), False, 'from djongo import models\n'), ((5703, 5722), 'djongo.models.FloatField', 'models.FloatField', ([], {}), '()\n', (5720, 5722), False, 'from djongo import models\n'), ((5734, 5789), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (5750, 5789), False, 'from djongo import models\n'), ((5799, 5854), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (5815, 5854), False, 'from djongo import models\n'), ((5867, 5900), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(1024)'}), '(max_length=1024)\n', (5883, 5900), False, 'from djongo import models\n'), ((5916, 5972), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'blank': '(True)', 'null': '(True)'}), '(max_length=1024, blank=True, null=True)\n', (5932, 5972), False, 'from djongo import models\n'), ((5988, 6043), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (6004, 6043), False, 'from djongo import models\n'), ((6054, 6109), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (6070, 6109), False, 'from djongo import models\n'), ((6123, 6202), 'djongo.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)', 'verbose_name': '"""Discount %"""'}), "(decimal_places=2, max_digits=10, verbose_name='Discount %')\n", (6142, 6202), False, 'from djongo import models\n'), ((6227, 6261), 'djongo.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6246, 6261), False, 'from djongo import models\n'), ((6284, 6322), 'djongo.models.URLField', 'models.URLField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (6299, 6322), False, 'from djongo import models\n'), ((6336, 6407), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'choices': 'RANKING', 'null': '(True)', 'blank': '(True)'}), '(max_length=15, choices=RANKING, null=True, blank=True)\n', (6352, 6407), False, 'from djongo import models\n'), ((6424, 6479), 'djongo.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (6440, 6479), False, 'from djongo import models\n'), ((6500, 6555), 'djongo.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (6516, 6555), False, 'from djongo import models\n'), ((6580, 6635), 'djongo.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (6596, 6635), False, 'from djongo import models\n'), ((6662, 6717), 'djongo.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (6678, 6717), False, 'from djongo import models\n'), ((6735, 6790), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (6755, 6790), False, 'from djongo import models\n'), ((6808, 6863), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (6828, 6863), False, 'from djongo import models\n'), ((6878, 6900), 'djongo.models.DjongoManager', 'models.DjongoManager', ([], {}), '()\n', (6898, 6900), False, 'from djongo import models\n'), ((7312, 7350), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7332, 7350), False, 'from djongo import models\n'), ((7365, 7417), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Product'], {'on_delete': 'models.CASCADE'}), '(Product, on_delete=models.CASCADE)\n', (7382, 7417), False, 'from djongo import models\n'), ((7436, 7457), 'djongo.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (7455, 7457), False, 'from djongo import models\n'), ((7470, 7508), 'djongo.models.URLField', 'models.URLField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (7485, 7508), False, 'from djongo import models\n'), ((7525, 7580), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (7545, 7580), False, 'from djongo import models\n'), ((7598, 7653), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (7618, 7653), False, 'from djongo import models\n'), ((7706, 7744), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7726, 7744), False, 'from djongo import models\n'), ((7766, 7818), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Product'], {'on_delete': 'models.CASCADE'}), '(Product, on_delete=models.CASCADE)\n', (7783, 7818), False, 'from djongo import models\n'), ((7830, 7862), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (7846, 7862), False, 'from djongo import models\n'), ((7875, 7907), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (7891, 7907), False, 'from djongo import models\n'), ((7923, 7955), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (7939, 7955), False, 'from djongo import models\n'), ((7968, 8006), 'djongo.models.URLField', 'models.URLField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (7983, 8006), False, 'from djongo import models\n'), ((9694, 9732), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (9714, 9732), False, 'from djongo import models\n'), ((9748, 9801), 'djongo.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'on_delete': 'models.CASCADE'}), '(Customer, on_delete=models.CASCADE)\n', (9765, 9801), False, 'from djongo import models\n'), ((9814, 9846), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (9830, 9846), False, 'from djongo import models\n'), ((9861, 9893), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (9877, 9893), False, 'from djongo import models\n'), ((9907, 9959), 'djongo.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(19)', 'decimal_places': '(2)'}), '(max_digits=19, decimal_places=2)\n', (9926, 9959), False, 'from djongo import models\n'), ((9979, 10010), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (9995, 10010), False, 'from djongo import models\n'), ((10024, 10055), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (10040, 10055), False, 'from djongo import models\n'), ((10078, 10096), 'djongo.models.DateField', 'models.DateField', ([], {}), '()\n', (10094, 10096), False, 'from djongo import models\n'), ((10114, 10169), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (10134, 10169), False, 'from djongo import models\n'), ((10187, 10242), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (10207, 10242), False, 'from djongo import models\n'), ((11446, 11484), 'djongo.models.ObjectIdField', 'models.ObjectIdField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (11466, 11484), False, 'from djongo import models\n'), ((11502, 11532), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (11518, 11532), False, 'from djongo import models\n'), ((11550, 11582), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (11566, 11582), False, 'from djongo import models\n'), ((11599, 11631), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (11615, 11631), False, 'from djongo import models\n'), ((11644, 11676), 'djongo.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (11660, 11676), False, 'from djongo import models\n'), ((11699, 11720), 'djongo.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (11718, 11720), False, 'from djongo import models\n'), ((11738, 11793), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (11758, 11793), False, 'from djongo import models\n'), ((11811, 11866), 'djongo.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'auto_now_add': '(False)'}), '(auto_now=True, auto_now_add=False)\n', (11831, 11866), False, 'from djongo import models\n'), ((4465, 4494), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (4481, 4494), False, 'from django.contrib.auth.models import User\n'), ((7191, 7207), 'bson.ObjectId', 'ObjectId', (['pro_id'], {}), '(pro_id)\n', (7199, 7207), False, 'from bson import ObjectId\n')] |
import sys
sys.path.append('../../src/')
from p4rrot.generator_tools import *
from p4rrot.known_types import *
from p4rrot.standard_fields import *
from p4rrot.core.commands import *
UID.reset()
fp = FlowProcessor(
istruct = [('msg_in',string_t(12))],
locals = [('l',bool_t),('msg_tmp',string_t(12))],
ostruct = [('msg_out',string_t(12))]
)
(
fp
.add(AssignConst('msg_tmp',b'Hello World!'))
.add(Equals('l','msg_in','msg_tmp'))
.add(If('l'))
.add(AssignConst('msg_out',b'HELLO! :) '))
.add(SendBack())
.Else()
.add(StrictAssignVar('msg_out','msg_in'))
.EndIf()
)
fs = FlowSelector(
'IPV4_UDP',
[(UdpDstPort,5555),(UdpLen,8+13)],
fp
)
solution = Solution()
solution.add_flow_processor(fp)
solution.add_flow_selector(fs)
solution.get_generated_code().dump('test.p4app')
| [
"sys.path.append"
] | [((11, 40), 'sys.path.append', 'sys.path.append', (['"""../../src/"""'], {}), "('../../src/')\n", (26, 40), False, 'import sys\n')] |
import curses
import subprocess
class LdmGtk:
LDM_GTK_CONF = '/etc/lightdm/lightdm-gtk-greeter.conf'
@staticmethod
def get_bg() -> str:
"""
Fetch the background image of the LDM's GTK greeter from the config file
:return: Full path to the background image
"""
with open(LdmGtk.LDM_GTK_CONF) as ldm_file:
for line in ldm_file:
if line.startswith('background'):
return line.strip().split(' ')[2]
raise LookupError(
"[ERR] Couldn't locate the LDM greeter's background!")
@staticmethod
def set_bg(win, ldm_bg_name: str, wall_name: str) -> bool:
"""
Set the background image of the LDM's GTK greeter in the config file
:return: True on success
"""
if ldm_bg_name == wall_name:
win.addstr(
'[!] Cannot change DM background to the same one!\n', curses.color_pair(5))
win.getkey()
return False
try:
subprocess.check_call(
['sudo', 'sed', '-i', f"s/{ldm_bg_name}/{wall_name}/g", LdmGtk.LDM_GTK_CONF])
except (KeyboardInterrupt, PermissionError, subprocess.CalledProcessError):
win.addstr("[X] An external error occurred while change DM's background!\n",
curses.color_pair(2))
win.getkey()
return False
win.addstr('[+] Lock-screen background replaced!\n',
curses.color_pair(3))
win.getkey()
return True
| [
"curses.color_pair",
"subprocess.check_call"
] | [((1036, 1139), 'subprocess.check_call', 'subprocess.check_call', (["['sudo', 'sed', '-i', f's/{ldm_bg_name}/{wall_name}/g', LdmGtk.LDM_GTK_CONF]"], {}), "(['sudo', 'sed', '-i',\n f's/{ldm_bg_name}/{wall_name}/g', LdmGtk.LDM_GTK_CONF])\n", (1057, 1139), False, 'import subprocess\n'), ((1502, 1522), 'curses.color_pair', 'curses.color_pair', (['(3)'], {}), '(3)\n', (1519, 1522), False, 'import curses\n'), ((938, 958), 'curses.color_pair', 'curses.color_pair', (['(5)'], {}), '(5)\n', (955, 958), False, 'import curses\n'), ((1349, 1369), 'curses.color_pair', 'curses.color_pair', (['(2)'], {}), '(2)\n', (1366, 1369), False, 'import curses\n')] |
import os
from NewsData import create_mini_batch, NewsDataset, NewsDoc
import torch
from transformers import BertForSequenceClassification
from transformers import BertTokenizer
root = os.path.join('..', 'data')
#PRETRAINED_MODEL_NAME = "clue/roberta_chinese_base"
PRETRAINED_MODEL_NAME = "hfl/chinese-roberta-wwm-ext"
#PRETRAINED_MODEL_NAME = "hfl/chinese-roberta-wwm-ext-large"
NUM_LABELS = 2
BATCH_SIZE = 2
classifier_model_idx = 4
train = True
#EPOCHS = 5
EPOCHS = 7
cuda_num = "cuda:0"
setup = "Setup1"
#setup = ""
TrainFile = "TrainClassify" + setup + ".csv"
ValidFile = "ValidClassify" + setup + ".csv"
#ValidFile = "CheckClassify.csv"
#ValidFile = "WeakClassify.csv"
ModelName = "RobertaWwmExtClassifier" + setup + "Model{}"
def get_predictions(model, dataloader,device):
predictions = None
correct = 0
tp = 0
relative = 0
unrelative = 0
tn = 0
total = 0
model.eval()
with torch.no_grad():
for data in dataloader:
if next(model.parameters()).is_cuda:
data = [t.to(device) for t in data if t is not None]
tokens_tensors, segments_tensors, masks_tensors, labels = data
outputs = model(input_ids=tokens_tensors,
# token_type_ids=segments_tensors,
attention_mask=masks_tensors)
logits = outputs[0]
_, pred = torch.max(logits.data, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
for i,la in enumerate(labels):
if la == 1:
relative+=1
if pred[i] == 1:
tp+=1
else:
unrelative+=1
if pred[i]==0:
tn+=1
if predictions is None:
predictions = pred
else:
predictions = torch.cat((predictions, pred))
acc = correct / total
print("relative accuracy = {}/{} = {}".format(tp,relative,tp/relative))
print("unrelative accuracy = {}/{} = {}".format(tn,unrelative,tn/unrelative))
return predictions, acc
def classifier():
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)
device = torch.device(cuda_num if torch.cuda.is_available() else "cpu")
model = BertForSequenceClassification.from_pretrained(
PRETRAINED_MODEL_NAME, num_labels=NUM_LABELS)
model = model.to(device)
model.load_state_dict(
torch.load(os.path.join(ModelName.format(classifier_model_idx),
'pytorch_model.bin'), map_location="cpu"))
model.eval()
def predict(doc):
predset = NewsDoc(tokenizer=tokenizer, doc=doc)
dataloader = torch.utils.data.DataLoader(
predset,batch_size=1,shuffle=True,collate_fn=create_mini_batch)
with torch.no_grad():
data = next(iter(dataloader))
if next(model.parameters()).is_cuda:
data = [t.to(device) for t in data if t is not None]
#if next(model.parameters()).is_cuda:
# data = [t.to(device) for t in data if t is not None]
tokens_tensors, segments_tensors, masks_tensors, labels = data
outputs = model(input_ids=tokens_tensors,
# token_type_ids=segments_tensors,
attention_mask=masks_tensors)
logits = outputs[0]
_, pred = torch.max(logits.data, 1)
return bool(pred[0])
return predict, None
if __name__ == "__main__":
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)
device = torch.device(cuda_num if torch.cuda.is_available() else "cpu")
model = BertForSequenceClassification.from_pretrained(
PRETRAINED_MODEL_NAME, num_labels=NUM_LABELS)
model = model.to(device)
if train:
trainset = NewsDataset(os.path.join(root, TrainFile), tokenizer=tokenizer)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
collate_fn=create_mini_batch,shuffle=True)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
for epoch in range(EPOCHS):
running_loss = 0.0
for data in trainloader:
tokens_tensors, segments_tensors, \
masks_tensors, labels = [t.to(device) for t in data]
optimizer.zero_grad()
outputs = model(input_ids=tokens_tensors,
# token_type_ids=segments_tensors,
attention_mask=masks_tensors,
labels=labels)
loss = outputs[0]
loss.backward()
optimizer.step()
running_loss += loss.item()
_, acc = get_predictions(model, trainloader, device=device)
print('[epoch %d] loss: %.3f, acc: %.3f' %
(epoch + 1, running_loss, acc))
model.save_pretrained(ModelName.format(epoch))
else:
for model_idx in range(0, EPOCHS):
print("epoch {}:".format(model_idx))
model.load_state_dict(
torch.load(os.path.join(ModelName.format(model_idx),
'pytorch_model.bin'), map_location="cpu"))
trainset = NewsDataset(os.path.join(root, ValidFile), tokenizer=tokenizer)
testloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
collate_fn=create_mini_batch)
_, acc = get_predictions(model, testloader, device=device)
print(acc)
| [
"NewsData.NewsDoc",
"torch.max",
"transformers.BertTokenizer.from_pretrained",
"os.path.join",
"transformers.BertForSequenceClassification.from_pretrained",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cat"
] | [((186, 212), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (198, 212), False, 'import os\n'), ((2199, 2251), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['PRETRAINED_MODEL_NAME'], {}), '(PRETRAINED_MODEL_NAME)\n', (2228, 2251), False, 'from transformers import BertTokenizer\n'), ((2341, 2436), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['PRETRAINED_MODEL_NAME'], {'num_labels': 'NUM_LABELS'}), '(PRETRAINED_MODEL_NAME,\n num_labels=NUM_LABELS)\n', (2386, 2436), False, 'from transformers import BertForSequenceClassification\n'), ((3608, 3660), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['PRETRAINED_MODEL_NAME'], {}), '(PRETRAINED_MODEL_NAME)\n', (3637, 3660), False, 'from transformers import BertTokenizer\n'), ((3750, 3845), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['PRETRAINED_MODEL_NAME'], {'num_labels': 'NUM_LABELS'}), '(PRETRAINED_MODEL_NAME,\n num_labels=NUM_LABELS)\n', (3795, 3845), False, 'from transformers import BertForSequenceClassification\n'), ((921, 936), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (934, 936), False, 'import torch\n'), ((2694, 2731), 'NewsData.NewsDoc', 'NewsDoc', ([], {'tokenizer': 'tokenizer', 'doc': 'doc'}), '(tokenizer=tokenizer, doc=doc)\n', (2701, 2731), False, 'from NewsData import create_mini_batch, NewsDataset, NewsDoc\n'), ((2753, 2852), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['predset'], {'batch_size': '(1)', 'shuffle': '(True)', 'collate_fn': 'create_mini_batch'}), '(predset, batch_size=1, shuffle=True, collate_fn\n =create_mini_batch)\n', (2780, 2852), False, 'import torch\n'), ((4001, 4110), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'BATCH_SIZE', 'collate_fn': 'create_mini_batch', 'shuffle': '(True)'}), '(trainset, batch_size=BATCH_SIZE, collate_fn=\n create_mini_batch, shuffle=True)\n', (4028, 4110), False, 'import torch\n'), ((1393, 1418), 'torch.max', 'torch.max', (['logits.data', '(1)'], {}), '(logits.data, 1)\n', (1402, 1418), False, 'import torch\n'), ((2290, 2315), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2313, 2315), False, 'import torch\n'), ((2876, 2891), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2889, 2891), False, 'import torch\n'), ((3481, 3506), 'torch.max', 'torch.max', (['logits.data', '(1)'], {}), '(logits.data, 1)\n', (3490, 3506), False, 'import torch\n'), ((3699, 3724), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3722, 3724), False, 'import torch\n'), ((3927, 3956), 'os.path.join', 'os.path.join', (['root', 'TrainFile'], {}), '(root, TrainFile)\n', (3939, 3956), False, 'import os\n'), ((5494, 5589), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'BATCH_SIZE', 'collate_fn': 'create_mini_batch'}), '(trainset, batch_size=BATCH_SIZE, collate_fn=\n create_mini_batch)\n', (5521, 5589), False, 'import torch\n'), ((1920, 1950), 'torch.cat', 'torch.cat', (['(predictions, pred)'], {}), '((predictions, pred))\n', (1929, 1950), False, 'import torch\n'), ((5417, 5446), 'os.path.join', 'os.path.join', (['root', 'ValidFile'], {}), '(root, ValidFile)\n', (5429, 5446), False, 'import os\n')] |
import cv2
import time
cascade_src = 'cars.xml'
# video = 'data/Cars_On_Highway.mp4'
video = 'data/video1.avi'
# video = 'data/video2.avi'
def detectCars(filename):
rectangles = []
cascade = cv2.CascadeClassifier(cascade_src)
vc = cv2.VideoCapture(filename)
if vc.isOpened():
rval , frame = vc.read()
else:
rval = False
while rval:
rval, frame = vc.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# car detection
cars = cascade.detectMultiScale(gray, 1.3, 3)
if len(cars) > 0:
print("Car detected at " + str(vc.get(cv2.CAP_PROP_POS_MSEC)/1000) + "seconds")
time.sleep(2)
if cv2.waitKey(33) == ord('q'):
break
vc.release()
detectCars(video)
| [
"time.sleep",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((198, 232), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['cascade_src'], {}), '(cascade_src)\n', (219, 232), False, 'import cv2\n'), ((241, 267), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (257, 267), False, 'import cv2\n'), ((402, 441), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (414, 441), False, 'import cv2\n'), ((628, 641), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (638, 641), False, 'import time\n'), ((650, 665), 'cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (661, 665), False, 'import cv2\n')] |
from dataclasses import dataclass
from logging import Logger, getLogger
from pathlib import Path
import pandas as pd
from omegaconf import MISSING, DictConfig
from fseval.config.callbacks.to_csv import ToCSVCallback
from fseval.types import TerminalColor
from ._base_export_callback import BaseExportCallback
@dataclass
class CSVCallback(BaseExportCallback, ToCSVCallback):
"""CSV support for fseval. Uploads general information on the experiment to
a `experiments` table and provides a hook for uploading custom tables. Use the
`on_table` hook in your pipeline to upload a DataFrame to a certain database table.
"""
def __post_init__(self):
# assert dir param was given
assert self.dir != MISSING, (
"The CSV callback did not receive a `dir` param. All results will be "
+ "written to files in this dir. This is required to export to CSV files."
)
# upgrade dir to Path type
self.save_dir = Path(self.dir)
# create directories where necessary
if not self.save_dir.is_dir(): # ensure directories exist
self.save_dir.mkdir(parents=True) # parents=True so creates recursively
# print save path
dir_abs_str = TerminalColor.blue(self.save_dir.absolute())
self.logger: Logger = getLogger(__name__)
self.logger.info(f"CSV callback enabled. Writing .csv files to: {dir_abs_str}")
def should_insert_header(self, filepath: Path) -> bool:
if filepath.exists():
# when the target `.csv` file already exists, omit header.
return False
else:
# otherwise, add a header to the csv file.
return True
def on_begin(self, config: DictConfig):
df = self.get_experiment_config(config)
# write experiment config to `experiments.csv`
filepath = self.save_dir / "experiments.csv"
header = self.should_insert_header(filepath)
df.to_csv(filepath, mode=self.mode, header=header)
# log
filepath_abs_str = TerminalColor.blue(filepath.absolute())
self.logger.info(
f"Written experiment config to: {filepath_abs_str} {TerminalColor.green('✓')}"
)
def on_table(self, df: pd.DataFrame, name: str):
# make sure experiment `id` is added to this table. this allows a user to JOIN
# the results back into each other, after being distributed over several
# database tables.
df = self.add_experiment_id(df)
# upload table to CSV file, named after the table name
filepath = self.save_dir / f"{name}.csv"
header = self.should_insert_header(filepath)
df.to_csv(filepath, mode=self.mode, header=header)
# log table upload
filepath_abs_str = TerminalColor.blue(filepath.absolute())
self.logger.info(
f"Written `{name}` table to: {filepath_abs_str} {TerminalColor.green('✓')}"
)
| [
"logging.getLogger",
"fseval.types.TerminalColor.green",
"pathlib.Path"
] | [((983, 997), 'pathlib.Path', 'Path', (['self.dir'], {}), '(self.dir)\n', (987, 997), False, 'from pathlib import Path\n'), ((1320, 1339), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1329, 1339), False, 'from logging import Logger, getLogger\n'), ((2194, 2218), 'fseval.types.TerminalColor.green', 'TerminalColor.green', (['"""✓"""'], {}), "('✓')\n", (2213, 2218), False, 'from fseval.types import TerminalColor\n'), ((2927, 2951), 'fseval.types.TerminalColor.green', 'TerminalColor.green', (['"""✓"""'], {}), "('✓')\n", (2946, 2951), False, 'from fseval.types import TerminalColor\n')] |
import sys,os
import boto3
from pprint import pprint
AUTOSTART = False
AUTOSTOP = False
## Our main "part"
if( __name__ == "__main__" ):
ec2con = boto3.resource("ec2")
keys = {}
instance_id_list = []
instance_states = {}
instances = ec2con.instances.filter( Filters=[ {'Name': 'instance-state-name', 'Values': ['*']} ])
for instance in instances:
print( instance.id, instance.instance_type, instance.image_id, instance.state )
if instance.state['Name'] not in instance_states:
instance_states[ instance.state['Name'] ] = [];
instance_states[ instance.state['Name'] ].append( instance.id );
if( instance.key_name not in keys ):
kv = ec2con.KeyPair( instance.key_name )
keys[ instance.key_name ] = kv.key_fingerprint
for interface in instance.network_interfaces_attribute:
print( "VPC: %(vpcid)s" % { "vpcid":interface['VpcId'] } )
print("#####################################################################")
print("Found states:" )
pprint( instance_states )
if 'stopped' in instance_states and len( instance_states['stopped'] ) > 0:
client = boto3.client("ec2")
print("Can start nodes: %(s)s" % { "s": ",".join( instance_states['stopped'] ) } )
if AUTOSTART:
start_state = client.start_instances( InstanceIds=instance_states['stopped'] )
elif 'running' in instance_states and len( instance_states['running'] ) > 0:
client = boto3.client("ec2")
print("Can stop nodes: %(s)s" % { "s": ",".join( instance_states['running'] ) } )
if AUTOSTOP:
stop_state = client.stop_instances( InstanceIds=instance_states['running'] )
else:
print("No actions taken on any insatnces")
print("#########################")
if( len( keys.keys() ) > 1 ):
print("WARN: More then one key-pair used, %(n)s" % {"n": len( keys.keys() ) })
if( len( keys.keys() ) > 0 ):
pprint( keys )
print("#####################################################################")
defaultvpc = boto3.client("ec2").describe_vpcs()['Vpcs'][0]['VpcId'];
for vpc in ec2con.Vpc( defaultvpc ).security_groups.all():
pprint( vpc )
| [
"boto3.resource",
"boto3.client",
"pprint.pprint"
] | [((152, 173), 'boto3.resource', 'boto3.resource', (['"""ec2"""'], {}), "('ec2')\n", (166, 173), False, 'import boto3\n'), ((979, 1002), 'pprint.pprint', 'pprint', (['instance_states'], {}), '(instance_states)\n', (985, 1002), False, 'from pprint import pprint\n'), ((1094, 1113), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (1106, 1113), False, 'import boto3\n'), ((1824, 1836), 'pprint.pprint', 'pprint', (['keys'], {}), '(keys)\n', (1830, 1836), False, 'from pprint import pprint\n'), ((2054, 2065), 'pprint.pprint', 'pprint', (['vpc'], {}), '(vpc)\n', (2060, 2065), False, 'from pprint import pprint\n'), ((1388, 1407), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (1400, 1407), False, 'import boto3\n'), ((1935, 1954), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (1947, 1954), False, 'import boto3\n')] |
import pandas as pd
from spherical_geometry.polygon import SphericalPolygon
import numpy as np
lon = [91.3320117152011, 74.6060844556399,
174.409435753150, 144.284491292185, 91.3320117152011]
lat = [9.37366242174489, 61.1396992149365,
48.6744705245903, 2.08633373396527, 9.37366242174489]
coordinate = []
for i, j in zip(lon, lat):
phi = np.deg2rad(i)
theta = np.deg2rad(90-j)
x = np.sin(theta)*np.cos(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(theta)
coordinate.append((x, y, z))
sp = SphericalPolygon(coordinate)
def filter_in_box(row):
lat = row.evla
lon = row.evlo
phi = np.deg2rad(lon)
theta = np.deg2rad(90-lat)
x = np.sin(theta)*np.cos(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(theta)
return sp.contains_point((x, y, z))
def main():
data = pd.read_csv(
"./data/list_227evts_info_EARA2014_inversion", sep="\s+")
data_selected = data[data.apply(filter_in_box, axis=1)]
data_selected.to_csv(
"./data/list_227evts_info_EARA2014_inversion.selected", sep=" ", index=False)
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"spherical_geometry.polygon.SphericalPolygon",
"numpy.deg2rad",
"numpy.cos",
"numpy.sin"
] | [((528, 556), 'spherical_geometry.polygon.SphericalPolygon', 'SphericalPolygon', (['coordinate'], {}), '(coordinate)\n', (544, 556), False, 'from spherical_geometry.polygon import SphericalPolygon\n'), ((357, 370), 'numpy.deg2rad', 'np.deg2rad', (['i'], {}), '(i)\n', (367, 370), True, 'import numpy as np\n'), ((383, 401), 'numpy.deg2rad', 'np.deg2rad', (['(90 - j)'], {}), '(90 - j)\n', (393, 401), True, 'import numpy as np\n'), ((476, 489), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (482, 489), True, 'import numpy as np\n'), ((631, 646), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (641, 646), True, 'import numpy as np\n'), ((659, 679), 'numpy.deg2rad', 'np.deg2rad', (['(90 - lat)'], {}), '(90 - lat)\n', (669, 679), True, 'import numpy as np\n'), ((754, 767), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (760, 767), True, 'import numpy as np\n'), ((833, 903), 'pandas.read_csv', 'pd.read_csv', (['"""./data/list_227evts_info_EARA2014_inversion"""'], {'sep': '"""\\\\s+"""'}), "('./data/list_227evts_info_EARA2014_inversion', sep='\\\\s+')\n", (844, 903), True, 'import pandas as pd\n'), ((408, 421), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (414, 421), True, 'import numpy as np\n'), ((422, 433), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (428, 433), True, 'import numpy as np\n'), ((442, 455), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (448, 455), True, 'import numpy as np\n'), ((456, 467), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (462, 467), True, 'import numpy as np\n'), ((686, 699), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (692, 699), True, 'import numpy as np\n'), ((700, 711), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (706, 711), True, 'import numpy as np\n'), ((720, 733), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (726, 733), True, 'import numpy as np\n'), ((734, 745), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (740, 745), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
from torch import nn
class OrganDistanceLoss(nn.Module):
def __init__(self, device, voxel_temperature: float, organ_temperature: float):
super(OrganDistanceLoss, self).__init__()
self.device = device
self.voxel_temperature = voxel_temperature
self.organ_temperature = organ_temperature
def forward(
self, predictions: torch.Tensor, anchors: torch.Tensor, lengths: torch.Tensor,
) -> torch.Tensor:
"""Computes the minimum distance to organ loss.
Arguments:
predictions: Tensor with shape [batch_size, 3]
anchors: Tensor with shape [batch_size, max_organs_in_batch, num_sampled_points, 3]
lengths: Tensor with shape [batch_size]
devce: A torch device - either cpu or gpu
"""
mask = (
torch.arange(torch.max(lengths))
.expand(lengths.size()[0], torch.max(lengths))
.to(self.device)
< lengths.unsqueeze(1)
).float()
mask[torch.where(mask == 0)] = 1e15
mask = mask.unsqueeze(2)
predictions = predictions.unsqueeze(1).unsqueeze(2)
distances = (predictions - anchors).norm(p=2, dim=3)
distances_masked = distances * mask
distances_weights = F.softmin(distances_masked / self.voxel_temperature, dim=2)
organ_distances_masked = (distances_masked * distances_weights).sum(dim=2)
organ_distances_weights = F.softmin(
organ_distances_masked / self.organ_temperature, dim=1
)
loss = (organ_distances_masked * organ_distances_weights).sum(dim=1).mean(dim=0)
return loss
class BaselineRegLoss(nn.Module):
def __init__(self):
super(BaselineRegLoss, self).__init__()
self.mse = nn.MSELoss()
def forward(self, predictions: torch.Tensor, anchors: torch.Tensor) -> torch.Tensor:
batch_size = predictions.size()[0]
return self.mse(predictions, anchors.view(batch_size, -1, 3).mean(1))
| [
"torch.nn.functional.softmin",
"torch.max",
"torch.nn.MSELoss",
"torch.where"
] | [((1319, 1378), 'torch.nn.functional.softmin', 'F.softmin', (['(distances_masked / self.voxel_temperature)'], {'dim': '(2)'}), '(distances_masked / self.voxel_temperature, dim=2)\n', (1328, 1378), True, 'import torch.nn.functional as F\n'), ((1496, 1561), 'torch.nn.functional.softmin', 'F.softmin', (['(organ_distances_masked / self.organ_temperature)'], {'dim': '(1)'}), '(organ_distances_masked / self.organ_temperature, dim=1)\n', (1505, 1561), True, 'import torch.nn.functional as F\n'), ((1820, 1832), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1830, 1832), False, 'from torch import nn\n'), ((1062, 1084), 'torch.where', 'torch.where', (['(mask == 0)'], {}), '(mask == 0)\n', (1073, 1084), False, 'import torch\n'), ((947, 965), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (956, 965), False, 'import torch\n'), ((888, 906), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (897, 906), False, 'import torch\n')] |
"""Tests for the main paper-git command line application."""
import pytest
from io import StringIO
from unittest.mock import patch
from papergit.bin.paper_git import main
@pytest.mark.usefixtures('initialize_fixture')
class TestPaperGitCommand(object):
"""Tests for the main paper-git command."""
def test_usage_with_no_arguments(self):
# Test that the command runs without any arguments.
testargs = ['paper-git']
output = StringIO()
with patch('sys.argv', testargs), patch('sys.stdout', output):
with pytest.raises(SystemExit):
main()
assert 'usage' in output.getvalue()
def test_usage_with_bad_arguments(self):
# Test that the command prints usage with wrong arguments.
testargs = ['paper-git', 'badcommand']
output = StringIO()
with patch('sys.argv', testargs), patch('sys.stderr', output):
with pytest.raises(SystemExit):
main()
assert 'usage' in output.getvalue()
def test_initialization(self):
# Test paper-git with a valid command.
testargs = ['paper-git', 'list']
output = StringIO()
error = StringIO()
with patch('sys.argv', testargs), \
patch('sys.stdout', output), patch('sys.stderr', error):
main()
assert output.getvalue() == ''
assert error.getvalue() == ''
| [
"pytest.raises",
"pytest.mark.usefixtures",
"unittest.mock.patch",
"io.StringIO",
"papergit.bin.paper_git.main"
] | [((176, 221), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""initialize_fixture"""'], {}), "('initialize_fixture')\n", (199, 221), False, 'import pytest\n'), ((460, 470), 'io.StringIO', 'StringIO', ([], {}), '()\n', (468, 470), False, 'from io import StringIO\n'), ((830, 840), 'io.StringIO', 'StringIO', ([], {}), '()\n', (838, 840), False, 'from io import StringIO\n'), ((1164, 1174), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1172, 1174), False, 'from io import StringIO\n'), ((1191, 1201), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1199, 1201), False, 'from io import StringIO\n'), ((484, 511), 'unittest.mock.patch', 'patch', (['"""sys.argv"""', 'testargs'], {}), "('sys.argv', testargs)\n", (489, 511), False, 'from unittest.mock import patch\n'), ((513, 540), 'unittest.mock.patch', 'patch', (['"""sys.stdout"""', 'output'], {}), "('sys.stdout', output)\n", (518, 540), False, 'from unittest.mock import patch\n'), ((854, 881), 'unittest.mock.patch', 'patch', (['"""sys.argv"""', 'testargs'], {}), "('sys.argv', testargs)\n", (859, 881), False, 'from unittest.mock import patch\n'), ((883, 910), 'unittest.mock.patch', 'patch', (['"""sys.stderr"""', 'output'], {}), "('sys.stderr', output)\n", (888, 910), False, 'from unittest.mock import patch\n'), ((1215, 1242), 'unittest.mock.patch', 'patch', (['"""sys.argv"""', 'testargs'], {}), "('sys.argv', testargs)\n", (1220, 1242), False, 'from unittest.mock import patch\n'), ((1262, 1289), 'unittest.mock.patch', 'patch', (['"""sys.stdout"""', 'output'], {}), "('sys.stdout', output)\n", (1267, 1289), False, 'from unittest.mock import patch\n'), ((1291, 1317), 'unittest.mock.patch', 'patch', (['"""sys.stderr"""', 'error'], {}), "('sys.stderr', error)\n", (1296, 1317), False, 'from unittest.mock import patch\n'), ((1331, 1337), 'papergit.bin.paper_git.main', 'main', ([], {}), '()\n', (1335, 1337), False, 'from papergit.bin.paper_git import main\n'), ((559, 584), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (572, 584), False, 'import pytest\n'), ((602, 608), 'papergit.bin.paper_git.main', 'main', ([], {}), '()\n', (606, 608), False, 'from papergit.bin.paper_git import main\n'), ((929, 954), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (942, 954), False, 'import pytest\n'), ((972, 978), 'papergit.bin.paper_git.main', 'main', ([], {}), '()\n', (976, 978), False, 'from papergit.bin.paper_git import main\n')] |
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from argparse import Namespace
from croud.api import Client
from croud.config import get_output_format
from croud.printer import print_response, print_warning
def transform_roles_list(key):
def _transform(field):
return ",\n".join(f"{r[key]}: {r['role_fqn']}" for r in field)
return _transform
def users_list(args: Namespace) -> None:
client = Client.from_args(args)
if args.no_org:
print_warning(
"The --no-org argument is deprecated. Please use --no-roles instead."
)
no_roles = {"no-roles": "1"} if (args.no_roles or args.no_org) else None
data, errors = client.get("/api/v2/users/", params=no_roles)
print_response(
data=data,
errors=errors,
output_fmt=get_output_format(args),
keys=["uid", "email", "username", "organization_roles", "project_roles"],
transforms={
"organization_roles": transform_roles_list("organization_id"),
"project_roles": transform_roles_list("project_id"),
},
)
| [
"croud.config.get_output_format",
"croud.printer.print_warning",
"croud.api.Client.from_args"
] | [((1366, 1388), 'croud.api.Client.from_args', 'Client.from_args', (['args'], {}), '(args)\n', (1382, 1388), False, 'from croud.api import Client\n'), ((1417, 1506), 'croud.printer.print_warning', 'print_warning', (['"""The --no-org argument is deprecated. Please use --no-roles instead."""'], {}), "(\n 'The --no-org argument is deprecated. Please use --no-roles instead.')\n", (1430, 1506), False, 'from croud.printer import print_response, print_warning\n'), ((1748, 1771), 'croud.config.get_output_format', 'get_output_format', (['args'], {}), '(args)\n', (1765, 1771), False, 'from croud.config import get_output_format\n')] |
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
"""
Simple but comprehensive toolset for transforming VCF output
from raw scores (Conv1D or other GPU inferencing) to fields
usable by VCFEval (and other upstream variant tools).
-- threshold by model score
-- threshold separately for SNP and Indel
-- threshold called variants by hetero/homozygous -- output 0/1 or 1/1 in the VCF
-- output a model confidence score, for ROC in VCFEval
-- TODO: If 2+ variants per location (above threhold), choose top 2 [multi-allele]
-- TODO: If single-allele and multi-allele predictions, threshold between those
This is important, because VCFEval needs 0/1 type preditions.
Probably a good idea to pre-filter the inputs -- since data will be processed
in series, and stored in memory.
Example command:
python format_vcf.py --input_file f4xnp6dc_epoch3_output_HG02_ALL-multiAF-DVAR.vcf \
--output_file epoch3_output_multiAF_thresh_test.vcf \
--threshold 0.5 --indel_threshold 0.1 --zygo_threshold 0.4
@author: nyakovenko
"""
import argparse
from argparse import RawTextHelpFormatter
from collections import defaultdict
import functools
import multiprocessing
from multiprocessing import Pool
import os
import tempfile
import tqdm
import logging
import subprocess
import time
# Inline -- for now
NONCE = 0.0000001
# We don't really need score bucket -- can use 50 but VCFEval will take longer
SCORE_BUCKETS = 50
# Easier to parse
def print_lines(lines):
for l in lines:
print(l)
def filter_format_vcf(args):
input_fname = args.input_file
output_fname = args.output_file
# Unnecessary way to get length for TQDM -- can't do it any faster
num_lines = sum(1 for line in open(input_fname, 'r'))
print('reading %d lines from %s' % (num_lines, input_fname))
print('writing to %s' % output_fname)
#set thresholds
snp_threshold = args.snp_threshold
snp_hz_threshold = args.snp_zygo_threshold
if args.indel_threshold > 0.:
indel_threshold = args.indel_threshold
indel_hz_threshold = args.indel_zygo_threshold
if args.long_indel_threshold > 0.:
long_indel_threshold = args.long_indel_threshold
long_indel_hz_threshold = args. long_indel_zygo_threshold
else:
long_indel_threshold = indel_threshold
long_indel_hz_threshold = indel_hz_threshold
if args.delete_threshold > 0.:
delete_threshold = args.delete_threshold
delete_hz_threshold = args.delete_zygo_threshold
else:
delete_threshold = indel_threshold
delete_hz_threshold = indel_hz_threshold
else:
indel_threshold = snp_threshold
indel_hz_threshold = snp_hz_threshold
long_indel_threshold = indel_threshold
long_indel_hz_threshold = indel_hz_threshold
debug = args.debug
#initialize variables to store results
curr_line = 0
curr_pos, curr_chrom = None, None
curr_pos_lines = list()
curr_pos_threshold_scores = list()
curr_pos_gts = list()
with open(input_fname, 'r') as fin:
with open(output_fname, 'w') as fout:
for line in tqdm.tqdm(fin, total=num_lines):
curr_line += 1
if line[0] == '#':
# copy header
# TODO: Extend CVS columns?
fout.write(line)
else:
items = line.strip('\n').split('\t')
if debug:
print(line)
print(items)
# should have 10-11 items (11th is appended "GT:1/1")
assert len(items) == 10 or len(items) == 11, 'Line should have 10-11 items (11th is appended "GT:1/1")\n%s' % line
scores = items[2].split(';')
scores = {a:float(b) for a,b in [s.split('=') for s in scores]}
if debug:
print(scores)
# Threshold on VT (variant type) score
# TODO: Choose based on binary score? Command line option
threshold_score = 1.0 - scores['NV']
ref_bases = items[3]
var_bases = items[4]
is_snp = (len(ref_bases) == 1 and len(var_bases) == 1)
is_indel = ~is_snp
is_long_indel = (len(ref_bases) >= 3 or len(var_bases) >= 3)
is_delete = (len(ref_bases) > 1 and len(var_bases) == 1) and ~is_long_indel
threshold = snp_threshold if is_snp else (long_indel_threshold if is_long_indel else (delete_threshold if is_delete else indel_threshold))
threshold_score_margin = threshold_score - threshold
if threshold_score_margin >= 0.:
# format this line
# default = heterozygous
gt_string = '0/1'
hz_score = scores['OV']
# normalize -- TODO: Option?
#hz_score = hz_score / (1.0 - scores['NV'] + NONCE)
hz_score = hz_score
hz_threshold = snp_hz_threshold if is_snp else (long_indel_hz_threshold if is_long_indel else (delete_hz_threshold if is_delete else indel_hz_threshold))
if hz_score >= hz_threshold:
gt_string = '1/1'
# Create a sort of quality score?
# Arbitrary, so scale to min threshold(?)
# NOTE: Binarize -- else too many endpoints for VCFEval
q_score = threshold_score_margin / (1.0 - threshold)
q_score = int(q_score * SCORE_BUCKETS)
new_items = items[0:9] + ['%s:%s' % (gt_string, q_score)]
new_line = '\t'.join(new_items)
if debug:
print(new_line)
#if this is the first variant line: store results and move on to the next line
if curr_pos is None:
curr_chrom = items[0]
curr_pos = items[1]
curr_pos_lines = [new_line]
curr_pos_threshold_scores = [threshold_score]
curr_pos_gts = [gt_string]
#if this position is the same as curr_pos - append results to previous ones and move on to the next line
elif curr_chrom == items[0] and curr_pos == items[1]:
if curr_pos == '182303657':
print('appending')
print(new_line)
curr_pos_lines.append(new_line)
curr_pos_threshold_scores.append(threshold_score)
curr_pos_gts.append(gt_string)
#if this is a new position - write the results for curr_pos to output, then overwrite with new results
else:
#if there is a homozygous variant - discard all others
if '1/1' in curr_pos_gts:
# TODO: Sort by OV score
best_index = curr_pos_gts.index('1/1')
if len(curr_pos_lines) > 1:
print('----------------\nChoosing *single* homozygous var for possible multi-allele')
print(curr_pos_lines[best_index])
print('discarding %d other lines:' % (len(curr_pos_lines)-1))
print_lines([curr_pos_lines[i] for i in (set(range(len(curr_pos_lines))) - set([best_index]))])
# Look at *second* position. Do not over-write if second best result is still very strong
#top2 = [curr_pos_threshold_scores.index(i) for i in sorted(curr_pos_threshold_scores, reverse=True)[:2]]
sorted_pair = sorted(list(zip(curr_pos_threshold_scores, curr_pos_lines)), reverse=True)
#print(sorted_pair)
top2 = [curr_pos_lines.index(j) for (i,j) in sorted_pair][:2]
assert top2[0] != top2[1]
if curr_pos_threshold_scores[top2[1]] >= args.multiallele_homozygous_second_threshold:
print('A*Skipping* single homozygous because second result too good %.5f' % curr_pos_threshold_scores[top2[1]])
elif curr_pos_threshold_scores[top2[0]] >= args.multiallele_homozygous_second_threshold and curr_pos_gts[top2[0]] != '1/1':
print('B*Skipping* single homozygous because second result too good %.5f' % curr_pos_threshold_scores[top2[0]])
else:
curr_pos_lines = [curr_pos_lines[best_index]]
#if curr_pos has >2 heterozygous variants, store 2 with highest threshold score
if len(curr_pos_lines)>2:
#top2 = [curr_pos_threshold_scores.index(i) for i in sorted(curr_pos_threshold_scores, reverse=True)[:2]]
sorted_pair = sorted(list(zip(curr_pos_threshold_scores, curr_pos_lines)), reverse=True)
#print(sorted_pair)
top2 = [curr_pos_lines.index(j) for (i,j) in sorted_pair][:2]
assert top2[0] != top2[1]
if len(curr_pos_lines) > 2:
print('----------------\nChoosing *two* hetero var for possible multi-allele')
print_lines([curr_pos_lines[i] for i in top2])
# Discard second allele, if below threshold
if curr_pos_threshold_scores[top2[1]] <= args.multiallele_second_threshold:
print('C*Skipping* second allele because its not good enough.')
print_lines([curr_pos_lines[i] for i in top2[1:]])
top2 = top2[:1]
#time.sleep(2)
print('discarding %d other lines:' % (len(curr_pos_lines)-2))
print_lines([curr_pos_lines[i] for i in (set(range(len(curr_pos_lines))) - set(top2))])
curr_pos_lines = [curr_pos_lines[i] for i in top2]
#write to file
for output_line in curr_pos_lines:
fout.write(output_line+'\n')
#overwrite with new position
curr_chrom = items[0]
curr_pos = items[1]
curr_pos_lines = [new_line]
curr_pos_threshold_scores = [threshold_score]
curr_pos_gts = [gt_string]
#if this is the last line in the file: write results
if curr_line==num_lines:
#if there is a homozygous variant - discard all others
if '1/1' in curr_pos_gts:
curr_pos_lines = [curr_pos_lines[curr_pos_gts.index('1/1')]]
#if curr_pos has >2 heterozygous variants, store 2 with highest threshold score
if(len(curr_pos_lines)>2):
top2 = [curr_pos_threshold_scores.index(i) for i in sorted(curr_pos_threshold_scores)[-2:]]
curr_pos_lines = [curr_pos_lines[i] for i in top2]
#write to file
for output_line in curr_pos_lines:
fout.write(output_line+'\n')
def main():
# Training settings
print("Start program")
parser = argparse.ArgumentParser(description='Context module')
parser.add_argument('--input_file', type=str, default="", help='input vcf file')
parser.add_argument('--output_file', type=str, default="", help='output vcf file')
parser.add_argument('--snp_threshold', type=float, default=0.3, help='min variant call score')
parser.add_argument('--indel_threshold', type=float, default=0., help='(optional) threshold for indel only')
parser.add_argument('--long_indel_threshold', type=float, default=0., help='(optional) threshold for indel only')
parser.add_argument('--delete_threshold', type=float, default=0., help='')
parser.add_argument('--snp_zygo_threshold', type=float, default=0.5, help='min homozygous score for 1/1 predict')
parser.add_argument('--indel_zygo_threshold', type=float, default=0.5, help='(optional) min homozygous score for 1/1 predict for indel only')
parser.add_argument('--long_indel_zygo_threshold', type=float, default=0.5, help='(optional) min homozygous score for 1/1 predict for indel only')
parser.add_argument('--delete_zygo_threshold', type=float, default=0.5, help='')
parser.add_argument('--multiallele_second_threshold', type=float, default=0.7, help='Dont add *second* allele if low probability variant (can be way above basic threshold')
parser.add_argument('--multiallele_homozygous_second_threshold', type=float, default=0.9, help='dont over-write multi-allele if first homozygous but second var very strong alsl')
parser.add_argument('--debug', action='store_true', default=False, help='debug while conversion?')
# NOTE: By default, all non-homozygous predictions (that still meet variant call threshold) treated as 0/1
args = parser.parse_args()
print(args)
# Perform all operations inline (while possible)
filter_format_vcf(args)
if __name__ == '__main__':
main()
| [
"tqdm.tqdm",
"argparse.ArgumentParser"
] | [((12404, 12457), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Context module"""'}), "(description='Context module')\n", (12427, 12457), False, 'import argparse\n'), ((3167, 3198), 'tqdm.tqdm', 'tqdm.tqdm', (['fin'], {'total': 'num_lines'}), '(fin, total=num_lines)\n', (3176, 3198), False, 'import tqdm\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, IntegerField, SelectField
from satori.config import verbose
class EditConfigurationForm(FlaskForm):
flaskPort = IntegerField(verbose('flaskPort'), validators=[])
nodejsPort = IntegerField(verbose('nodejsPort'), validators=[])
dataPath = StringField(verbose('dataPath'), validators=[])
modelPath = StringField(verbose('modelPath'), validators=[])
defaultSource = SelectField(verbose('defaultSource'), validators=[], choices=['streamr', 'satori'])
submit = SubmitField('Save')
| [
"satori.config.verbose",
"wtforms.SubmitField"
] | [((558, 577), 'wtforms.SubmitField', 'SubmitField', (['"""Save"""'], {}), "('Save')\n", (569, 577), False, 'from wtforms import StringField, SubmitField, IntegerField, SelectField\n'), ((208, 228), 'satori.config.verbose', 'verbose', (['"""flaskPort"""'], {}), "('flaskPort')\n", (215, 228), False, 'from satori.config import verbose\n'), ((275, 296), 'satori.config.verbose', 'verbose', (['"""nodejsPort"""'], {}), "('nodejsPort')\n", (282, 296), False, 'from satori.config import verbose\n'), ((340, 359), 'satori.config.verbose', 'verbose', (['"""dataPath"""'], {}), "('dataPath')\n", (347, 359), False, 'from satori.config import verbose\n'), ((404, 424), 'satori.config.verbose', 'verbose', (['"""modelPath"""'], {}), "('modelPath')\n", (411, 424), False, 'from satori.config import verbose\n'), ((473, 497), 'satori.config.verbose', 'verbose', (['"""defaultSource"""'], {}), "('defaultSource')\n", (480, 497), False, 'from satori.config import verbose\n')] |
import os
import toml
import argparse
from pprint import pprint
import torch
from torch.utils.data import DataLoader
import utils
from utils import CONFIG
from trainer import Trainer
from tester import Tester
from dataloader.image_file import ImageFileTrain, ImageFileTest
from dataloader.data_generator import DataGenerator
from dataloader.prefetcher import Prefetcher
def main():
# Train or Test
if CONFIG.phase.lower() == "train":
# set distributed training
if CONFIG.dist:
CONFIG.gpu = CONFIG.local_rank
torch.cuda.set_device(CONFIG.gpu)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
CONFIG.world_size = torch.distributed.get_world_size()
# Create directories if not exist.
if CONFIG.local_rank == 0:
utils.make_dir(CONFIG.log.logging_path)
utils.make_dir(CONFIG.log.tensorboard_path)
utils.make_dir(CONFIG.log.checkpoint_path)
# Create a logger
logger, tb_logger = utils.get_logger(CONFIG.log.logging_path,
CONFIG.log.tensorboard_path,
logging_level=CONFIG.log.logging_level)
train_image_file = ImageFileTrain(alpha_dir=CONFIG.data.train_alpha,
fg_dir=CONFIG.data.train_fg,
bg_dir=CONFIG.data.train_bg)
test_image_file = ImageFileTest(alpha_dir=CONFIG.data.test_alpha,
merged_dir=CONFIG.data.test_merged,
trimap_dir=CONFIG.data.test_trimap)
train_dataset = DataGenerator(train_image_file, phase='train')
test_dataset = DataGenerator(test_image_file, phase='val')
if CONFIG.dist:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
train_sampler = None
test_sampler = None
train_dataloader = DataLoader(train_dataset,
batch_size=CONFIG.model.batch_size,
shuffle=(train_sampler is None),
num_workers=CONFIG.data.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True)
train_dataloader = Prefetcher(train_dataloader)
test_dataloader = DataLoader(test_dataset,
batch_size=1,
shuffle=False,
num_workers=CONFIG.data.workers,
sampler=test_sampler,
drop_last=False)
trainer = Trainer(train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
logger=logger,
tb_logger=tb_logger)
trainer.train()
elif CONFIG.phase.lower() == "test":
CONFIG.log.logging_path += "_test"
if CONFIG.test.alpha_path is not None:
utils.make_dir(CONFIG.test.alpha_path)
utils.make_dir(CONFIG.log.logging_path)
# Create a logger
logger = utils.get_logger(CONFIG.log.logging_path,
logging_level=CONFIG.log.logging_level)
test_image_file = ImageFileTest(alpha_dir=CONFIG.test.alpha,
merged_dir=CONFIG.test.merged,
trimap_dir=CONFIG.test.trimap)
test_dataset = DataGenerator(test_image_file, phase='test', test_scale=CONFIG.test.scale)
test_dataloader = DataLoader(test_dataset,
batch_size=CONFIG.test.batch_size,
shuffle=False,
num_workers=CONFIG.data.workers,
drop_last=False)
tester = Tester(test_dataloader=test_dataloader)
tester.test()
else:
raise NotImplementedError("Unknown Phase: {}".format(CONFIG.phase))
if __name__ == '__main__':
print('Torch Version: ', torch.__version__)
parser = argparse.ArgumentParser()
parser.add_argument('--phase', type=str, default='train')
parser.add_argument('--config', type=str, default='config/gca-dist.toml')
parser.add_argument('--local_rank', type=int, default=0)
# Parse configuration
args = parser.parse_args()
with open(args.config) as f:
utils.load_config(toml.load(f))
# Check if toml config file is loaded
if CONFIG.is_default:
raise ValueError("No .toml config loaded.")
CONFIG.phase = args.phase
CONFIG.log.logging_path = os.path.join(CONFIG.log.logging_path, CONFIG.version)
CONFIG.log.tensorboard_path = os.path.join(CONFIG.log.tensorboard_path, CONFIG.version)
CONFIG.log.checkpoint_path = os.path.join(CONFIG.log.checkpoint_path, CONFIG.version)
if CONFIG.test.alpha_path is not None:
CONFIG.test.alpha_path = os.path.join(CONFIG.test.alpha_path, CONFIG.version)
if args.local_rank == 0:
print('CONFIG: ')
pprint(CONFIG)
CONFIG.local_rank = args.local_rank
# Train or Test
main()
| [
"trainer.Trainer",
"utils.get_logger",
"argparse.ArgumentParser",
"dataloader.image_file.ImageFileTest",
"os.path.join",
"torch.cuda.set_device",
"dataloader.prefetcher.Prefetcher",
"dataloader.data_generator.DataGenerator",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.init_process_group",
"toml.load",
"utils.make_dir",
"torch.utils.data.DataLoader",
"utils.CONFIG.phase.lower",
"dataloader.image_file.ImageFileTrain",
"pprint.pprint",
"tester.Tester",
"torch.distributed.get_world_size"
] | [((4454, 4479), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4477, 4479), False, 'import argparse\n'), ((4994, 5047), 'os.path.join', 'os.path.join', (['CONFIG.log.logging_path', 'CONFIG.version'], {}), '(CONFIG.log.logging_path, CONFIG.version)\n', (5006, 5047), False, 'import os\n'), ((5082, 5139), 'os.path.join', 'os.path.join', (['CONFIG.log.tensorboard_path', 'CONFIG.version'], {}), '(CONFIG.log.tensorboard_path, CONFIG.version)\n', (5094, 5139), False, 'import os\n'), ((5173, 5229), 'os.path.join', 'os.path.join', (['CONFIG.log.checkpoint_path', 'CONFIG.version'], {}), '(CONFIG.log.checkpoint_path, CONFIG.version)\n', (5185, 5229), False, 'import os\n'), ((430, 450), 'utils.CONFIG.phase.lower', 'CONFIG.phase.lower', ([], {}), '()\n', (448, 450), False, 'from utils import CONFIG\n'), ((1061, 1175), 'utils.get_logger', 'utils.get_logger', (['CONFIG.log.logging_path', 'CONFIG.log.tensorboard_path'], {'logging_level': 'CONFIG.log.logging_level'}), '(CONFIG.log.logging_path, CONFIG.log.tensorboard_path,\n logging_level=CONFIG.log.logging_level)\n', (1077, 1175), False, 'import utils\n'), ((1289, 1401), 'dataloader.image_file.ImageFileTrain', 'ImageFileTrain', ([], {'alpha_dir': 'CONFIG.data.train_alpha', 'fg_dir': 'CONFIG.data.train_fg', 'bg_dir': 'CONFIG.data.train_bg'}), '(alpha_dir=CONFIG.data.train_alpha, fg_dir=CONFIG.data.\n train_fg, bg_dir=CONFIG.data.train_bg)\n', (1303, 1401), False, 'from dataloader.image_file import ImageFileTrain, ImageFileTest\n'), ((1507, 1631), 'dataloader.image_file.ImageFileTest', 'ImageFileTest', ([], {'alpha_dir': 'CONFIG.data.test_alpha', 'merged_dir': 'CONFIG.data.test_merged', 'trimap_dir': 'CONFIG.data.test_trimap'}), '(alpha_dir=CONFIG.data.test_alpha, merged_dir=CONFIG.data.\n test_merged, trimap_dir=CONFIG.data.test_trimap)\n', (1520, 1631), False, 'from dataloader.image_file import ImageFileTrain, ImageFileTest\n'), ((1732, 1778), 'dataloader.data_generator.DataGenerator', 'DataGenerator', (['train_image_file'], {'phase': '"""train"""'}), "(train_image_file, phase='train')\n", (1745, 1778), False, 'from dataloader.data_generator import DataGenerator\n'), ((1802, 1845), 'dataloader.data_generator.DataGenerator', 'DataGenerator', (['test_image_file'], {'phase': '"""val"""'}), "(test_image_file, phase='val')\n", (1815, 1845), False, 'from dataloader.data_generator import DataGenerator\n'), ((2158, 2348), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'CONFIG.model.batch_size', 'shuffle': '(train_sampler is None)', 'num_workers': 'CONFIG.data.workers', 'pin_memory': '(True)', 'sampler': 'train_sampler', 'drop_last': '(True)'}), '(train_dataset, batch_size=CONFIG.model.batch_size, shuffle=\n train_sampler is None, num_workers=CONFIG.data.workers, pin_memory=True,\n sampler=train_sampler, drop_last=True)\n', (2168, 2348), False, 'from torch.utils.data import DataLoader\n'), ((2597, 2625), 'dataloader.prefetcher.Prefetcher', 'Prefetcher', (['train_dataloader'], {}), '(train_dataloader)\n', (2607, 2625), False, 'from dataloader.prefetcher import Prefetcher\n'), ((2652, 2782), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'CONFIG.data.workers', 'sampler': 'test_sampler', 'drop_last': '(False)'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=CONFIG.\n data.workers, sampler=test_sampler, drop_last=False)\n', (2662, 2782), False, 'from torch.utils.data import DataLoader\n'), ((2982, 3097), 'trainer.Trainer', 'Trainer', ([], {'train_dataloader': 'train_dataloader', 'test_dataloader': 'test_dataloader', 'logger': 'logger', 'tb_logger': 'tb_logger'}), '(train_dataloader=train_dataloader, test_dataloader=test_dataloader,\n logger=logger, tb_logger=tb_logger)\n', (2989, 3097), False, 'from trainer import Trainer\n'), ((5306, 5358), 'os.path.join', 'os.path.join', (['CONFIG.test.alpha_path', 'CONFIG.version'], {}), '(CONFIG.test.alpha_path, CONFIG.version)\n', (5318, 5358), False, 'import os\n'), ((5422, 5436), 'pprint.pprint', 'pprint', (['CONFIG'], {}), '(CONFIG)\n', (5428, 5436), False, 'from pprint import pprint\n'), ((577, 610), 'torch.cuda.set_device', 'torch.cuda.set_device', (['CONFIG.gpu'], {}), '(CONFIG.gpu)\n', (598, 610), False, 'import torch\n'), ((623, 697), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (659, 697), False, 'import torch\n'), ((730, 764), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (762, 764), False, 'import torch\n'), ((856, 895), 'utils.make_dir', 'utils.make_dir', (['CONFIG.log.logging_path'], {}), '(CONFIG.log.logging_path)\n', (870, 895), False, 'import utils\n'), ((908, 951), 'utils.make_dir', 'utils.make_dir', (['CONFIG.log.tensorboard_path'], {}), '(CONFIG.log.tensorboard_path)\n', (922, 951), False, 'import utils\n'), ((964, 1006), 'utils.make_dir', 'utils.make_dir', (['CONFIG.log.checkpoint_path'], {}), '(CONFIG.log.checkpoint_path)\n', (978, 1006), False, 'import utils\n'), ((1899, 1961), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (1946, 1961), False, 'import torch\n'), ((1989, 2050), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['test_dataset'], {}), '(test_dataset)\n', (2036, 2050), False, 'import torch\n'), ((3206, 3226), 'utils.CONFIG.phase.lower', 'CONFIG.phase.lower', ([], {}), '()\n', (3224, 3226), False, 'from utils import CONFIG\n'), ((3387, 3426), 'utils.make_dir', 'utils.make_dir', (['CONFIG.log.logging_path'], {}), '(CONFIG.log.logging_path)\n', (3401, 3426), False, 'import utils\n'), ((3471, 3557), 'utils.get_logger', 'utils.get_logger', (['CONFIG.log.logging_path'], {'logging_level': 'CONFIG.log.logging_level'}), '(CONFIG.log.logging_path, logging_level=CONFIG.log.\n logging_level)\n', (3487, 3557), False, 'import utils\n'), ((3614, 3722), 'dataloader.image_file.ImageFileTest', 'ImageFileTest', ([], {'alpha_dir': 'CONFIG.test.alpha', 'merged_dir': 'CONFIG.test.merged', 'trimap_dir': 'CONFIG.test.trimap'}), '(alpha_dir=CONFIG.test.alpha, merged_dir=CONFIG.test.merged,\n trimap_dir=CONFIG.test.trimap)\n', (3627, 3722), False, 'from dataloader.image_file import ImageFileTrain, ImageFileTest\n'), ((3822, 3896), 'dataloader.data_generator.DataGenerator', 'DataGenerator', (['test_image_file'], {'phase': '"""test"""', 'test_scale': 'CONFIG.test.scale'}), "(test_image_file, phase='test', test_scale=CONFIG.test.scale)\n", (3835, 3896), False, 'from dataloader.data_generator import DataGenerator\n'), ((3923, 4051), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'CONFIG.test.batch_size', 'shuffle': '(False)', 'num_workers': 'CONFIG.data.workers', 'drop_last': '(False)'}), '(test_dataset, batch_size=CONFIG.test.batch_size, shuffle=False,\n num_workers=CONFIG.data.workers, drop_last=False)\n', (3933, 4051), False, 'from torch.utils.data import DataLoader\n'), ((4214, 4253), 'tester.Tester', 'Tester', ([], {'test_dataloader': 'test_dataloader'}), '(test_dataloader=test_dataloader)\n', (4220, 4253), False, 'from tester import Tester\n'), ((4798, 4810), 'toml.load', 'toml.load', (['f'], {}), '(f)\n', (4807, 4810), False, 'import toml\n'), ((3340, 3378), 'utils.make_dir', 'utils.make_dir', (['CONFIG.test.alpha_path'], {}), '(CONFIG.test.alpha_path)\n', (3354, 3378), False, 'import utils\n')] |
#!/usr/bin/env python3
import click
import yaml
import re
import pprint
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
pp = pprint.PrettyPrinter(indent=2)
modRE = re.compile(r"https://www.curseforge.com/minecraft/mc-mods/([\w-]+)")
resourceRE = re.compile(r"https://www.curseforge.com/minecraft/texture-packs/([\w-]+)")
@click.command()
@click.option('-i', '--input', default="mods.md", help='Input file')
@click.option('-u', '--update', default="packmaker.yml", help='The packmaker yaml file to update')
def update(input, update):
with open(input, 'r', encoding="utf-8") as i_file:
content = i_file.read()
mods = set(modRE.findall(content))
resourcepacks = set(resourceRE.findall(content))
#pp.pprint(mods)
#pp.pprint(resourcepacks)
with open(update, 'r', encoding="utf-8") as u_file:
manifest = yaml.load(u_file.read(), Loader=Loader)
#pp.pprint(manifest)
old_mods = {}
for mod_entry in manifest['mods']:
for (name, opts) in mod_entry.items():
if name in mods:
old_mods[name] = opts
else:
print(f"filtering out mod {name}")
# pp.pprint(old_mods)
for m in mods:
if m not in old_mods:
old_mods[m] = {}
print(f"added mod {m}")
manifest['mods'] = [{k: v} for (k, v) in old_mods.items()]
manifest['resourcepacks'] = [{r: {}} for r in resourcepacks]
# pp.pprint(manifest)
with open(update, 'w') as u_file:
yaml.dump(
manifest,
u_file,
Dumper=Dumper,
explicit_start=True,
width=100,
indent=2,
)
if __name__ == '__main__':
update()
| [
"yaml.dump",
"click.option",
"re.compile",
"pprint.PrettyPrinter",
"click.command"
] | [((200, 230), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (220, 230), False, 'import pprint\n'), ((240, 308), 're.compile', 're.compile', (['"""https://www.curseforge.com/minecraft/mc-mods/([\\\\w-]+)"""'], {}), "('https://www.curseforge.com/minecraft/mc-mods/([\\\\w-]+)')\n", (250, 308), False, 'import re\n'), ((322, 396), 're.compile', 're.compile', (['"""https://www.curseforge.com/minecraft/texture-packs/([\\\\w-]+)"""'], {}), "('https://www.curseforge.com/minecraft/texture-packs/([\\\\w-]+)')\n", (332, 396), False, 'import re\n'), ((399, 414), 'click.command', 'click.command', ([], {}), '()\n', (412, 414), False, 'import click\n'), ((416, 483), 'click.option', 'click.option', (['"""-i"""', '"""--input"""'], {'default': '"""mods.md"""', 'help': '"""Input file"""'}), "('-i', '--input', default='mods.md', help='Input file')\n", (428, 483), False, 'import click\n'), ((485, 587), 'click.option', 'click.option', (['"""-u"""', '"""--update"""'], {'default': '"""packmaker.yml"""', 'help': '"""The packmaker yaml file to update"""'}), "('-u', '--update', default='packmaker.yml', help=\n 'The packmaker yaml file to update')\n", (497, 587), False, 'import click\n'), ((1483, 1571), 'yaml.dump', 'yaml.dump', (['manifest', 'u_file'], {'Dumper': 'Dumper', 'explicit_start': '(True)', 'width': '(100)', 'indent': '(2)'}), '(manifest, u_file, Dumper=Dumper, explicit_start=True, width=100,\n indent=2)\n', (1492, 1571), False, 'import yaml\n')] |
#!/usr/bin/env python
"""This module contains all the logic required to create and delete
a CouchDB database for tor_async_councdb's basic sample.
"""
import os.path
import sys
from tor_async_couchdb import installer
class CommandLineParser(installer.CommandLineParser):
def __init__(self):
description = (
"This utility used to create and/or delete the CouchDB "
"database for tor_async_couchdb's basic sample."
)
installer.CommandLineParser.__init__(
self,
description,
"tor_async_couchdb_sample")
if __name__ == "__main__":
design_docs = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'design_docs')
seed_docs = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'seed_docs')
sys.exit(installer.main(CommandLineParser(), design_docs, seed_docs))
| [
"tor_async_couchdb.installer.CommandLineParser.__init__"
] | [((472, 559), 'tor_async_couchdb.installer.CommandLineParser.__init__', 'installer.CommandLineParser.__init__', (['self', 'description', '"""tor_async_couchdb_sample"""'], {}), "(self, description,\n 'tor_async_couchdb_sample')\n", (508, 559), False, 'from tor_async_couchdb import installer\n')] |
def leiaint(num):
from termcolor import colored
while True:
n = input(str(num))
if n.isnumeric():
n = int(n)
break
else:
print(colored('ERRO!! Digite um número inteiro válido.', 'red'))
return n
n = leiaint('Digite um número: ')
print(f'Você acabou de digitar o número {n}.') | [
"termcolor.colored"
] | [((195, 252), 'termcolor.colored', 'colored', (['"""ERRO!! Digite um número inteiro válido."""', '"""red"""'], {}), "('ERRO!! Digite um número inteiro válido.', 'red')\n", (202, 252), False, 'from termcolor import colored\n')] |
from typing import Collection
import pytest
from advent_of_code.utils import calc_product
@pytest.mark.fast
@pytest.mark.parametrize(
("input_list", "expected"),
[((), 0), ((2,), 2), ((2, 2), 4), ((5, 3, 4), 60), ((-1, 10), -10)],
)
def test_product_function_returns_correct_result(
input_list: Collection[int], expected: int
) -> None:
assert calc_product(input_list) == expected
| [
"pytest.mark.parametrize",
"advent_of_code.utils.calc_product"
] | [((113, 238), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('input_list', 'expected')", '[((), 0), ((2,), 2), ((2, 2), 4), ((5, 3, 4), 60), ((-1, 10), -10)]'], {}), "(('input_list', 'expected'), [((), 0), ((2,), 2), ((\n 2, 2), 4), ((5, 3, 4), 60), ((-1, 10), -10)])\n", (136, 238), False, 'import pytest\n'), ((364, 388), 'advent_of_code.utils.calc_product', 'calc_product', (['input_list'], {}), '(input_list)\n', (376, 388), False, 'from advent_of_code.utils import calc_product\n')] |
# 2019-11-18 00:38:19(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# import re
# import heapq
# import array
# from scipy.misc import comb # (default: exact=False)
# import numpy as np
def main():
n, *p = [int(x) for x in sys.stdin.read().split()]
ans = 0
for l in range(n-1):
if p[l] < p[l+1]:
largest, second = p[l+1], p[l]
else:
largest, second = p[l], p[l+1]
ans += second
for r in range(l+2, n):
if p[r] > largest:
second, largest = largest, p[r]
elif p[r] > second:
second = p[r]
ans += second
print(ans)
if __name__ == "__main__":
main()
| [
"sys.stdin.read"
] | [((452, 468), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (466, 468), False, 'import sys\n')] |
import argparse
import re
import os
from pathlib import Path
from multiprocessing import Process
import numpy as np
# ensures Python 3.x
assert sys.version_info >= (3, 0)
RE_COORD = re.compile(r'-?\d+\.\d+')
def process_batch(data_file, dimension, start, batch_size):
done = 0
vocab_file = 'vocabulary-%05d.voc' % start
matrix_file = 'matrix-%05d.npy' % start
matrix = np.zeros((batch_size, dimension), dtype=np.float)
with open(data_file) as fin, open(vocab_file, 'w') as fout:
for i, line in enumerate(fin):
if i < start:
continue
# begin job
tokens = RE_COORD.findall(line)
coords = tokens[-dimension:]
word = line[:line.find(coords[0])]
print(word, file=fout)
vector = np.array([float(x) for x in coords], dtype=np.float)
row = i - start
matrix[row, :] = vector
# end job
done += 1
if done == batch_size: # finished batch
break
np.save(matrix_file, matrix)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Export embeddings from text to binary '
'NumPy arrays')
parser.add_argument('source', help='embeddings text file')
parser.add_argument('dimension', help='embedding dimension', type=int)
args = parser.parse_args()
source = Path(args.source)
# output files
vocab_file = source.with_suffix('.voc').name
matrix_file = source.with_suffix('.npy').name
dimension = args.dimension
print('computing matrix dimensions')
with source.open() as fin:
line = next(fin)
n_lines = sum((1 for _ in fin), 1)
batch_size = n_lines // os.cpu_count()
print('starting workers...')
start = 0
workers = []
batches = []
while start < n_lines:
remaining = n_lines - start
this_batch = batch_size if batch_size <= remaining else remaining
p = Process(target=process_batch,
args=(args.source, dimension, start, this_batch))
batches.append(start)
p.start()
workers.append(p)
start += batch_size
print('waiting...')
for p in workers:
p.join()
print('concatenating vocabulary...')
with open(vocab_file, 'w') as fout:
for batch in batches:
batch_file = Path('vocabulary-%05d.voc' % batch)
# matrix_file = 'matrix-%05d.npy' % batch
with batch_file.open() as fin:
for line in fin:
print(line.strip(), file=fout)
batch_file.unlink()
print('concatenating partial matrices...')
matrix = np.zeros((n_lines, dimension), dtype=np.float)
i = 0
for batch in batches:
batch_file = Path('matrix-%05d.npy' % batch)
partial = np.load(batch_file.as_posix())
matrix[i: i+len(partial), :] = partial
i += len(partial)
batch_file.unlink()
print('saving matrix...')
np.save(matrix_file, matrix)
print('finished')
| [
"argparse.ArgumentParser",
"pathlib.Path",
"re.compile",
"multiprocessing.Process",
"numpy.zeros",
"os.cpu_count",
"numpy.save"
] | [((187, 214), 're.compile', 're.compile', (['"""-?\\\\d+\\\\.\\\\d+"""'], {}), "('-?\\\\d+\\\\.\\\\d+')\n", (197, 214), False, 'import re\n'), ((392, 441), 'numpy.zeros', 'np.zeros', (['(batch_size, dimension)'], {'dtype': 'np.float'}), '((batch_size, dimension), dtype=np.float)\n', (400, 441), True, 'import numpy as np\n'), ((1048, 1076), 'numpy.save', 'np.save', (['matrix_file', 'matrix'], {}), '(matrix_file, matrix)\n', (1055, 1076), True, 'import numpy as np\n'), ((1119, 1213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Export embeddings from text to binary NumPy arrays"""'}), "(description=\n 'Export embeddings from text to binary NumPy arrays')\n", (1142, 1213), False, 'import argparse\n'), ((1427, 1444), 'pathlib.Path', 'Path', (['args.source'], {}), '(args.source)\n', (1431, 1444), False, 'from pathlib import Path\n'), ((2722, 2768), 'numpy.zeros', 'np.zeros', (['(n_lines, dimension)'], {'dtype': 'np.float'}), '((n_lines, dimension), dtype=np.float)\n', (2730, 2768), True, 'import numpy as np\n'), ((3042, 3070), 'numpy.save', 'np.save', (['matrix_file', 'matrix'], {}), '(matrix_file, matrix)\n', (3049, 3070), True, 'import numpy as np\n'), ((1763, 1777), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1775, 1777), False, 'import os\n'), ((2009, 2088), 'multiprocessing.Process', 'Process', ([], {'target': 'process_batch', 'args': '(args.source, dimension, start, this_batch)'}), '(target=process_batch, args=(args.source, dimension, start, this_batch))\n', (2016, 2088), False, 'from multiprocessing import Process\n'), ((2826, 2857), 'pathlib.Path', 'Path', (["('matrix-%05d.npy' % batch)"], {}), "('matrix-%05d.npy' % batch)\n", (2830, 2857), False, 'from pathlib import Path\n'), ((2412, 2447), 'pathlib.Path', 'Path', (["('vocabulary-%05d.voc' % batch)"], {}), "('vocabulary-%05d.voc' % batch)\n", (2416, 2447), False, 'from pathlib import Path\n')] |
from django.db import models
# Create your models here.
class BlackPerson(models.Model):
name = models.CharField(max_length=512, default=None)
sex = models.CharField(max_length=512, default=None)
age = models.CharField(max_length=512, default=None)
native_place = models.CharField(max_length=512, default=None)
political_outlook = models.CharField(max_length=512, default=None)
email = models.CharField(max_length=512, default=None)
telephone = models.CharField(max_length=512, default=None)
id_card = models.CharField(max_length=512, default=None)
address = models.CharField(max_length=512, default=None)
domicile = models.CharField(max_length=512, default=None)
is_fugitive = models.CharField(max_length=512, default=None)
is_crime = models.CharField(max_length=512, default=None)
comment = models.CharField(max_length=512, default=None)
| [
"django.db.models.CharField"
] | [((109, 155), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (125, 155), False, 'from django.db import models\n'), ((167, 213), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (183, 213), False, 'from django.db import models\n'), ((225, 271), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (241, 271), False, 'from django.db import models\n'), ((292, 338), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (308, 338), False, 'from django.db import models\n'), ((364, 410), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (380, 410), False, 'from django.db import models\n'), ((424, 470), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (440, 470), False, 'from django.db import models\n'), ((488, 534), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (504, 534), False, 'from django.db import models\n'), ((550, 596), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (566, 596), False, 'from django.db import models\n'), ((612, 658), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (628, 658), False, 'from django.db import models\n'), ((675, 721), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (691, 721), False, 'from django.db import models\n'), ((741, 787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (757, 787), False, 'from django.db import models\n'), ((804, 850), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (820, 850), False, 'from django.db import models\n'), ((866, 912), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'default': 'None'}), '(max_length=512, default=None)\n', (882, 912), False, 'from django.db import models\n')] |
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.decorators import api_view
from django.conf import settings
import random
import requests
import json
import opentracing
import logging
import six
tracer = settings.OPENTRACING_TRACER
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@tracer.trace()
@api_view(http_method_names=["GET"])
def get_shopping_menu(request):
headers = {'host': 'localhost'}
inject_as_headers(tracer, request, headers)
res = requests.get("http://localhost:50051/style/",
headers=headers)
if res.status_code == 200:
return Response(res.json(), status=200)
else:
tracer.get_span(request).set_tag("error", "true")
return Response(status=res.status_code)
@tracer.trace()
@api_view(http_method_names=["POST"])
def order_shirts(request):
if random.randint(1, 5) == 5:
msg = "Random Service Unavailable!"
logging.warning(msg)
tracer.get_span(request).set_tag("error", "true")
return Response(msg, status=503)
data = json.loads(request.body)
style_name = data.get("styleName", None)
quantity = data.get("quantity", None)
if style_name and quantity:
res = requests.get(
"http://localhost:50051/style/" + style_name + "/make",
params={'quantity': quantity},
headers=inject_as_headers(tracer, request)
)
if res.status_code == 200:
return Response(res.json(), status=200)
else:
msg = "Failed to order shirts!"
logging.warning(msg)
tracer.get_span(request).set_tag("error", "true")
return Response(msg, status=res.status_code)
else:
tracer.get_span(request).set_tag("error", "true")
return Response("Missing field!", status=400)
def inject_as_headers(tracer, request):
headers = {}
if isinstance(request, Request):
request = request._request
span = tracer.get_span(request)
text_carrier = {}
tracer._tracer.inject(span.context, opentracing.Format.TEXT_MAP,
text_carrier)
for k, v in six.iteritems(text_carrier):
headers[k] = v
| [
"logging.basicConfig",
"logging.getLogger",
"json.loads",
"logging.warning",
"requests.get",
"rest_framework.response.Response",
"six.iteritems",
"rest_framework.decorators.api_view",
"random.randint"
] | [((294, 333), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (313, 333), False, 'import logging\n'), ((343, 370), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (360, 370), False, 'import logging\n'), ((390, 425), 'rest_framework.decorators.api_view', 'api_view', ([], {'http_method_names': "['GET']"}), "(http_method_names=['GET'])\n", (398, 425), False, 'from rest_framework.decorators import api_view\n'), ((852, 888), 'rest_framework.decorators.api_view', 'api_view', ([], {'http_method_names': "['POST']"}), "(http_method_names=['POST'])\n", (860, 888), False, 'from rest_framework.decorators import api_view\n'), ((552, 614), 'requests.get', 'requests.get', (['"""http://localhost:50051/style/"""'], {'headers': 'headers'}), "('http://localhost:50051/style/', headers=headers)\n", (564, 614), False, 'import requests\n'), ((1133, 1157), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (1143, 1157), False, 'import json\n'), ((2214, 2241), 'six.iteritems', 'six.iteritems', (['text_carrier'], {}), '(text_carrier)\n', (2227, 2241), False, 'import six\n'), ((800, 832), 'rest_framework.response.Response', 'Response', ([], {'status': 'res.status_code'}), '(status=res.status_code)\n', (808, 832), False, 'from rest_framework.response import Response\n'), ((923, 943), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (937, 943), False, 'import random\n'), ((1002, 1022), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (1017, 1022), False, 'import logging\n'), ((1096, 1121), 'rest_framework.response.Response', 'Response', (['msg'], {'status': '(503)'}), '(msg, status=503)\n', (1104, 1121), False, 'from rest_framework.response import Response\n'), ((1861, 1899), 'rest_framework.response.Response', 'Response', (['"""Missing field!"""'], {'status': '(400)'}), "('Missing field!', status=400)\n", (1869, 1899), False, 'from rest_framework.response import Response\n'), ((1638, 1658), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (1653, 1658), False, 'import logging\n'), ((1740, 1777), 'rest_framework.response.Response', 'Response', (['msg'], {'status': 'res.status_code'}), '(msg, status=res.status_code)\n', (1748, 1777), False, 'from rest_framework.response import Response\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Normalize runs of characters to a single character
Revision ID: 3<PASSWORD>
Revises: <PASSWORD>
Create Date: 2015-08-17 21:05:51.699639
"""
from alembic import op
revision = "3af8d0006ba"
down_revision = "<PASSWORD>"
def upgrade():
op.execute(
r""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(regexp_replace($1, '(\.|_|-)+', '-', 'ig'))
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
op.execute("REINDEX INDEX project_name_pep426_normalized")
def downgrade():
op.execute(
r""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)
RETURNS text AS
$$
SELECT lower(regexp_replace($1, '(\.|_)', '-', 'ig'))
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
op.execute("REINDEX INDEX project_name_pep426_normalized")
| [
"alembic.op.execute"
] | [((786, 1097), 'alembic.op.execute', 'op.execute', (['""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)\n RETURNS text AS\n $$\n SELECT lower(regexp_replace($1, \'(\\\\.|_|-)+\', \'-\', \'ig\'))\n $$\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n """'], {}), '(\n """ CREATE OR REPLACE FUNCTION normalize_pep426_name(text)\n RETURNS text AS\n $$\n SELECT lower(regexp_replace($1, \'(\\\\.|_|-)+\', \'-\', \'ig\'))\n $$\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n """\n )\n', (796, 1097), False, 'from alembic import op\n'), ((1106, 1164), 'alembic.op.execute', 'op.execute', (['"""REINDEX INDEX project_name_pep426_normalized"""'], {}), "('REINDEX INDEX project_name_pep426_normalized')\n", (1116, 1164), False, 'from alembic import op\n'), ((1188, 1496), 'alembic.op.execute', 'op.execute', (['""" CREATE OR REPLACE FUNCTION normalize_pep426_name(text)\n RETURNS text AS\n $$\n SELECT lower(regexp_replace($1, \'(\\\\.|_)\', \'-\', \'ig\'))\n $$\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n """'], {}), '(\n """ CREATE OR REPLACE FUNCTION normalize_pep426_name(text)\n RETURNS text AS\n $$\n SELECT lower(regexp_replace($1, \'(\\\\.|_)\', \'-\', \'ig\'))\n $$\n LANGUAGE SQL\n IMMUTABLE\n RETURNS NULL ON NULL INPUT;\n """\n )\n', (1198, 1496), False, 'from alembic import op\n'), ((1505, 1563), 'alembic.op.execute', 'op.execute', (['"""REINDEX INDEX project_name_pep426_normalized"""'], {}), "('REINDEX INDEX project_name_pep426_normalized')\n", (1515, 1563), False, 'from alembic import op\n')] |
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index_map'),
path('help/', views.help, name='help'),
path('settings-configuration/', views.settings_config, name='settings_config'),
path('covid19-dashboard/', views.dashboard, name='dashboard'),
path('add-to-file-new-and-expire-old-covid19-locations/', views.Add_To_File_New_And_Expire_Old_Covid19_Locations, name='Add_To_File_New_And_Expire_Old_Covid19_Locations'),
path('cache-empty-add-geospatial-data-into-cache/', views.Cache_Failed_Add_Geospatial_Data_Into_Cache, name='Cache_Failed_Add_Geospatial_Data_Into_Cache'),
path('ajax-request/', views.AJAX_find_covid19_near_you, name='AJAX_find_covid19_near_you'),
] | [
"django.urls.path"
] | [((117, 156), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index_map"""'}), "('', views.index, name='index_map')\n", (121, 156), False, 'from django.urls import path, include\n'), ((166, 204), 'django.urls.path', 'path', (['"""help/"""', 'views.help'], {'name': '"""help"""'}), "('help/', views.help, name='help')\n", (170, 204), False, 'from django.urls import path, include\n'), ((214, 292), 'django.urls.path', 'path', (['"""settings-configuration/"""', 'views.settings_config'], {'name': '"""settings_config"""'}), "('settings-configuration/', views.settings_config, name='settings_config')\n", (218, 292), False, 'from django.urls import path, include\n'), ((302, 363), 'django.urls.path', 'path', (['"""covid19-dashboard/"""', 'views.dashboard'], {'name': '"""dashboard"""'}), "('covid19-dashboard/', views.dashboard, name='dashboard')\n", (306, 363), False, 'from django.urls import path, include\n'), ((373, 553), 'django.urls.path', 'path', (['"""add-to-file-new-and-expire-old-covid19-locations/"""', 'views.Add_To_File_New_And_Expire_Old_Covid19_Locations'], {'name': '"""Add_To_File_New_And_Expire_Old_Covid19_Locations"""'}), "('add-to-file-new-and-expire-old-covid19-locations/', views.\n Add_To_File_New_And_Expire_Old_Covid19_Locations, name=\n 'Add_To_File_New_And_Expire_Old_Covid19_Locations')\n", (377, 553), False, 'from django.urls import path, include\n'), ((553, 717), 'django.urls.path', 'path', (['"""cache-empty-add-geospatial-data-into-cache/"""', 'views.Cache_Failed_Add_Geospatial_Data_Into_Cache'], {'name': '"""Cache_Failed_Add_Geospatial_Data_Into_Cache"""'}), "('cache-empty-add-geospatial-data-into-cache/', views.\n Cache_Failed_Add_Geospatial_Data_Into_Cache, name=\n 'Cache_Failed_Add_Geospatial_Data_Into_Cache')\n", (557, 717), False, 'from django.urls import path, include\n'), ((717, 812), 'django.urls.path', 'path', (['"""ajax-request/"""', 'views.AJAX_find_covid19_near_you'], {'name': '"""AJAX_find_covid19_near_you"""'}), "('ajax-request/', views.AJAX_find_covid19_near_you, name=\n 'AJAX_find_covid19_near_you')\n", (721, 812), False, 'from django.urls import path, include\n')] |
import string
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
import regex as re
import seaborn as sns
from gensim import corpora, models
from plotly import tools
from wordcloud import STOPWORDS, ImageColorGenerator, WordCloud
from previs.data.badwords import BAD_WORDS
from previs.processors.TextProcessor import tag_part_of_speech
from previs.util.TextProcessingUtils import (CLEANED_TEXT_COL_NAME,
convert_text_col_to_corpus,
get_stopwords)
color = sns.color_palette()
py.init_notebook_mode(connected=True)
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0, 16.0),
title=None, title_size=40, image_color=False):
"""This method plots the word cloud for the text column
in the dataframe
@param text: This is the dataframe['text_col_name']
@param mask: This is not required.
@param max_words: The maximum number fo words which we will plot
@param max_font_size: The max font size
@param figure_size: Fig size
@param title: Title of the plot
@param title_size: Size of the title
@param image_color: Colour of the image
"""
stopwords = get_stopwords()
wordcloud = WordCloud(background_color='black',
stopwords=stopwords,
max_words=max_words,
max_font_size=max_font_size,
random_state=42,
width=800,
height=400,
mask=mask)
wordcloud.generate(str(text))
plt.figure(figsize=figure_size)
if image_color:
image_colors = ImageColorGenerator(mask)
plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear")
plt.title(title, fontdict={'size': title_size, 'verticalalignment': 'bottom'})
else:
plt.imshow(wordcloud)
plt.title(title, fontdict={'size': title_size, 'color': 'black', 'verticalalignment': 'bottom'})
plt.axis('off')
plt.tight_layout()
def generate_lsi_features(dataframe, text_col_name,
num_topics, batchsize):
"""This method cleans the dataset and generates the LSI
features using Gensim. Refer to README.md for more details on this.
@param dataframe: Dataframe name
@param text_col_name: Name of the text data column
@param num_topics: Number of topics which it will generate (10-20)
@param batchsize: Batchsize it will use to generate per iteration.
@return: the cleaned dataframe along with the topics and the lsi model
"""
res_lists = {}
for i in range(num_topics):
res_lists[i] = []
corpus, texts, dictionary = convert_text_col_to_corpus(dataframe, text_col_name, batchsize)
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lsi_model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=num_topics)
corpus_lsi = lsi_model[corpus_tfidf]
for i in range(num_topics):
for attr in corpus_lsi:
res_lists[i].append(attr[i][1])
for i in range(batchsize, len(texts), batchsize):
text = texts[i: i + batchsize]
dictionary = corpora.Dictionary(text)
corpus = [dictionary.doc2bow(txt) for txt in text]
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lsi_model.add_documents(corpus_tfidf)
corpus_lsi = lsi_model[corpus_tfidf]
for i in range(num_topics):
for attr in corpus_lsi:
res_lists[i].append(attr[i][1])
for i in range(num_topics):
dataframe[text_col_name + 'attr_' + str(i + 1)] = res_lists[i]
return dataframe, lsi_model
def generate_ngrams(text, n_gram=1):
"""This method id used to genrate the n-grams which
are used to plot in the plot_n_grams method
@param text: The string text
@param n_gram: Number of grams (1,2,3..)
@return: List of n-grams
"""
token = [token for token in text.lower().split(" ") if token != "" if token not in STOPWORDS]
ngrams = zip(*[token[i:] for i in range(n_gram)])
return [" ".join(ngram) for ngram in ngrams]
def horizontal_bar_chart(df, color):
"""Plots a bar chart of the word and word count.
This is used by the plotting the n-grams of a dataframe text
column.
@param df: The dataframe name
@param color: The color used
@return: Trace which will be used to plot the graph
"""
trace = go.Bar(
y=df["word"].values[::-1],
x=df["wordcount"].values[::-1],
showlegend=False,
orientation='h',
marker=dict(
color=color,
),
)
return trace
def plot_n_grams(dataframe, text_col_name, plot_title, n_gram):
"""This method plots the different n-grams for visualisation purpose.
@param dataframe: The name of the dataframe
@param text_col_name: The text column name
@param plot_title: The title plot
@param n_gram: The number of the n-grma (1,2,3...)
"""
freq_dict = defaultdict(int)
for sent in dataframe[text_col_name]:
for word in generate_ngrams(sent, n_gram):
freq_dict[word] += 1
fd_sorted = pd.DataFrame(sorted(freq_dict.items(), key=lambda x: x[1])[::-1])
fd_sorted.columns = ["word", "wordcount"]
trace = horizontal_bar_chart(fd_sorted.head(50), 'blue')
fig = tools.make_subplots(rows=1, cols=1, vertical_spacing=0.04,
subplot_titles=[plot_title])
fig.append_trace(trace, 1, 1)
fig['layout'].update(height=1200, width=900, paper_bgcolor='rgb(233,233,233)', title="Word Count Plots")
py.iplot(fig, filename='word-plots')
def word_count(text):
"""Returns the number of words in a text string
after removing the stopwords from it.
@param text: The string of text
@return: Length of the string in int
"""
try:
text = text.lower()
regex = re.compile('[' + re.escape(string.punctuation) + '0-9\\r\\t\\n]')
txt = regex.sub(" ", text)
words = [w for w in txt.split(" ") if w not in STOPWORDS and len(w) > 3]
return len(words)
finally:
return 0
def generate_features_from_text_column(dataframe, text_col_name):
"""This method performs the many processing and feature generation
tasks for the textual dataset. Please refer to README.md for the operations
and how to use this method.
@param dataframe: The name of the dataframe.
@param text_col_name: The columns name of the textual data
"""
dataframe["num_stopwords"] = dataframe[text_col_name].apply(
lambda x: len([w for w in str(x).lower().split() if w in STOPWORDS]))
dataframe["num_punctuations"] = dataframe[text_col_name].apply(
lambda x: len([c for c in str(x) if c in string.punctuation]))
dataframe["num_words_upper"] = dataframe[text_col_name].apply(
lambda x: len([w for w in str(x).split() if w.isupper()]))
dataframe["num_words_title"] = dataframe[text_col_name].apply(
lambda x: len([w for w in str(x).split() if w.istitle()]))
dataframe["mean_word_len"] = dataframe[text_col_name].apply(
lambda x: np.mean([len(w) for w in str(x).split()]))
dataframe['num_exclamation_marks'] = dataframe[text_col_name].apply(
lambda x: x.count('!'))
dataframe['num_question_marks'] = dataframe[text_col_name].apply(
lambda x: x.count('?'))
dataframe['num_symbols'] = dataframe[text_col_name].apply(
lambda x: sum(x.count(w) for w in '*&$%'))
dataframe['num_unique_words'] = dataframe[text_col_name].apply(
lambda x: len(set(w for w in x.split())))
dataframe, _ = generate_lsi_features(dataframe, text_col_name, 3, 50)
text_col_name = CLEANED_TEXT_COL_NAME
dataframe["num_words"] = dataframe[text_col_name].apply(
lambda x: word_count(x))
dataframe["num_unique_words"] = dataframe[text_col_name].apply(
lambda x: len(set(str(x).split())))
dataframe["num_chars"] = dataframe[text_col_name].apply(
lambda x: len(str(x)))
dataframe['words_vs_unique'] = dataframe['num_unique_words'] / dataframe['num_words']
dataframe["badwordcount"] = dataframe[text_col_name].apply(
lambda x: sum(x.count(w) for w in BAD_WORDS))
dataframe["normword_badwords"] = dataframe["badwordcount"] / dataframe['num_words']
dataframe['nouns'], dataframe['adjectives'], dataframe['verbs'] = zip(*dataframe[text_col_name].apply(
lambda text: tag_part_of_speech(text)))
dataframe['nouns_vs_length'] = dataframe['nouns'] / dataframe['num_words']
dataframe['adjectives_vs_length'] = dataframe['adjectives'] / dataframe['num_words']
dataframe['verbs_vs_length'] = dataframe['verbs'] / dataframe['num_words']
dataframe['nouns_vs_words'] = dataframe['nouns'] / dataframe['num_words']
dataframe['adjectives_vs_words'] = dataframe['adjectives'] / dataframe['num_words']
dataframe['verbs_vs_words'] = dataframe['verbs'] / dataframe['num_words']
| [
"gensim.models.LsiModel",
"plotly.offline.iplot",
"previs.util.TextProcessingUtils.convert_text_col_to_corpus",
"wordcloud.ImageColorGenerator",
"matplotlib.pyplot.imshow",
"plotly.tools.make_subplots",
"seaborn.color_palette",
"gensim.corpora.Dictionary",
"plotly.offline.init_notebook_mode",
"matplotlib.pyplot.axis",
"gensim.models.TfidfModel",
"regex.escape",
"previs.processors.TextProcessor.tag_part_of_speech",
"previs.util.TextProcessingUtils.get_stopwords",
"matplotlib.pyplot.title",
"wordcloud.WordCloud",
"matplotlib.pyplot.figure",
"collections.defaultdict",
"matplotlib.pyplot.tight_layout"
] | [((663, 682), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (680, 682), True, 'import seaborn as sns\n'), ((684, 721), 'plotly.offline.init_notebook_mode', 'py.init_notebook_mode', ([], {'connected': '(True)'}), '(connected=True)\n', (705, 721), True, 'import plotly.offline as py\n'), ((1433, 1448), 'previs.util.TextProcessingUtils.get_stopwords', 'get_stopwords', ([], {}), '()\n', (1446, 1448), False, 'from previs.util.TextProcessingUtils import CLEANED_TEXT_COL_NAME, convert_text_col_to_corpus, get_stopwords\n'), ((1465, 1631), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""black"""', 'stopwords': 'stopwords', 'max_words': 'max_words', 'max_font_size': 'max_font_size', 'random_state': '(42)', 'width': '(800)', 'height': '(400)', 'mask': 'mask'}), "(background_color='black', stopwords=stopwords, max_words=\n max_words, max_font_size=max_font_size, random_state=42, width=800,\n height=400, mask=mask)\n", (1474, 1631), False, 'from wordcloud import STOPWORDS, ImageColorGenerator, WordCloud\n'), ((1843, 1874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (1853, 1874), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2284), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2277, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2307), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2305, 2307), True, 'import matplotlib.pyplot as plt\n'), ((2970, 3033), 'previs.util.TextProcessingUtils.convert_text_col_to_corpus', 'convert_text_col_to_corpus', (['dataframe', 'text_col_name', 'batchsize'], {}), '(dataframe, text_col_name, batchsize)\n', (2996, 3033), False, 'from previs.util.TextProcessingUtils import CLEANED_TEXT_COL_NAME, convert_text_col_to_corpus, get_stopwords\n'), ((3046, 3071), 'gensim.models.TfidfModel', 'models.TfidfModel', (['corpus'], {}), '(corpus)\n', (3063, 3071), False, 'from gensim import corpora, models\n'), ((3121, 3193), 'gensim.models.LsiModel', 'models.LsiModel', (['corpus_tfidf'], {'id2word': 'dictionary', 'num_topics': 'num_topics'}), '(corpus_tfidf, id2word=dictionary, num_topics=num_topics)\n', (3136, 3193), False, 'from gensim import corpora, models\n'), ((5304, 5320), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5315, 5320), False, 'from collections import defaultdict\n'), ((5647, 5739), 'plotly.tools.make_subplots', 'tools.make_subplots', ([], {'rows': '(1)', 'cols': '(1)', 'vertical_spacing': '(0.04)', 'subplot_titles': '[plot_title]'}), '(rows=1, cols=1, vertical_spacing=0.04, subplot_titles=[\n plot_title])\n', (5666, 5739), False, 'from plotly import tools\n'), ((5912, 5948), 'plotly.offline.iplot', 'py.iplot', (['fig'], {'filename': '"""word-plots"""'}), "(fig, filename='word-plots')\n", (5920, 5948), True, 'import plotly.offline as py\n'), ((1918, 1943), 'wordcloud.ImageColorGenerator', 'ImageColorGenerator', (['mask'], {}), '(mask)\n', (1937, 1943), False, 'from wordcloud import STOPWORDS, ImageColorGenerator, WordCloud\n'), ((2041, 2119), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontdict': "{'size': title_size, 'verticalalignment': 'bottom'}"}), "(title, fontdict={'size': title_size, 'verticalalignment': 'bottom'})\n", (2050, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2159), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {}), '(wordcloud)\n', (2148, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2268), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontdict': "{'size': title_size, 'color': 'black', 'verticalalignment': 'bottom'}"}), "(title, fontdict={'size': title_size, 'color': 'black',\n 'verticalalignment': 'bottom'})\n", (2177, 2268), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3481), 'gensim.corpora.Dictionary', 'corpora.Dictionary', (['text'], {}), '(text)\n', (3475, 3481), False, 'from gensim import corpora, models\n'), ((3557, 3582), 'gensim.models.TfidfModel', 'models.TfidfModel', (['corpus'], {}), '(corpus)\n', (3574, 3582), False, 'from gensim import corpora, models\n'), ((6222, 6251), 'regex.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (6231, 6251), True, 'import regex as re\n'), ((8770, 8794), 'previs.processors.TextProcessor.tag_part_of_speech', 'tag_part_of_speech', (['text'], {}), '(text)\n', (8788, 8794), False, 'from previs.processors.TextProcessor import tag_part_of_speech\n')] |
"""Cobertura aggregator tests"""
import os
import unittest
import urllib2
from mock import patch, MagicMock
from base import CoberturaJSONAggregator
API_SETTINGS = {
CoberturaJSONAggregator.USERNAME_KEY: "test",
CoberturaJSONAggregator.API_TOKEN_KEY: "test",
CoberturaJSONAggregator.DOMAIN_KEY: "http://localhost",
CoberturaJSONAggregator.JOBS_KEY: ["test"],
CoberturaJSONAggregator.TARGETS_KEY: [
'club',
'cron',
],
}
class CoberturaJSONAggregatorTests(unittest.TestCase):
"""Test cases for aggregating Cobertura JSON via Jenkins REST API"""
def setUp(self):
self.test_name = 'test'
self.aggregator = CoberturaJSONAggregator(self.test_name, API_SETTINGS)
fixture_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'fixtures/cobertura.json'
)
with open(fixture_path) as fixture_file:
self._cobertura_json = fixture_file.read()
@patch('base.urllib2.urlopen')
def test_report_generator(self, urlopen_mock):
urlopen_mock().read.return_value = self._cobertura_json
self.aggregator.generate_report()
expected_result = [
['club', '67.50%', '62.50%'],
['cron', '8.15%', '6.25%']
]
self.assertEqual(self.aggregator._report, expected_result)
@patch('base.urllib2.urlopen')
@patch('base.LOGGER')
def test_http_error(self, logger_mock, urlopen_mock):
"""Test urllib2.HTTPError exceptions"""
log = []
logger_mock.error.side_effect = log.append
logger_mock.info.side_effect = log.append
urlopen_mock.side_effect = urllib2.HTTPError(
"test",
404,
"test",
{},
MagicMock()
)
self.aggregator.generate_report()
http_error_results = [
'http://localhost/job/test/lastSuccessfulBuild/cobertura/api/json?depth=4\nHTTP error: HTTP Error 404: test'
]
self.assertEqual(log, http_error_results)
@patch('base.urllib2.urlopen')
@patch('base.LOGGER')
def test_url_error(self, logger_mock, urlopen_mock):
"""Test urllib2.URLError exceptions"""
log = []
logger_mock.error.side_effect = log.append
logger_mock.info.side_effect = log.append
urlerror_results = [
'http://localhost/job/test/lastSuccessfulBuild/cobertura/api/json?depth=4\nURL error: URL Error'
]
urlopen_mock.side_effect = urllib2.URLError("URL Error")
self.aggregator.generate_report()
self.assertEqual(log, urlerror_results)
@patch('base.urllib2.urlopen')
@patch('base.LOGGER')
def test_simplejson_error(self, logger_mock, urlopen_mock):
"""Test simplejson.loads error"""
log = []
logger_mock.error.side_effect = log.append
logger_mock.info.side_effect = log.append
urlopen_mock().read.return_value = '{'
self.aggregator.generate_report()
simplejson_error = [
"{\nCobertura URL did not return valid json: Expecting property name enclosed in double quotes or '}': line 1 column 2 (char 1)"
]
self.assertEqual(log, simplejson_error)
| [
"mock.patch",
"urllib2.URLError",
"base.CoberturaJSONAggregator",
"os.path.abspath",
"mock.MagicMock"
] | [((974, 1003), 'mock.patch', 'patch', (['"""base.urllib2.urlopen"""'], {}), "('base.urllib2.urlopen')\n", (979, 1003), False, 'from mock import patch, MagicMock\n'), ((1353, 1382), 'mock.patch', 'patch', (['"""base.urllib2.urlopen"""'], {}), "('base.urllib2.urlopen')\n", (1358, 1382), False, 'from mock import patch, MagicMock\n'), ((1388, 1408), 'mock.patch', 'patch', (['"""base.LOGGER"""'], {}), "('base.LOGGER')\n", (1393, 1408), False, 'from mock import patch, MagicMock\n'), ((2055, 2084), 'mock.patch', 'patch', (['"""base.urllib2.urlopen"""'], {}), "('base.urllib2.urlopen')\n", (2060, 2084), False, 'from mock import patch, MagicMock\n'), ((2090, 2110), 'mock.patch', 'patch', (['"""base.LOGGER"""'], {}), "('base.LOGGER')\n", (2095, 2110), False, 'from mock import patch, MagicMock\n'), ((2643, 2672), 'mock.patch', 'patch', (['"""base.urllib2.urlopen"""'], {}), "('base.urllib2.urlopen')\n", (2648, 2672), False, 'from mock import patch, MagicMock\n'), ((2678, 2698), 'mock.patch', 'patch', (['"""base.LOGGER"""'], {}), "('base.LOGGER')\n", (2683, 2698), False, 'from mock import patch, MagicMock\n'), ((669, 722), 'base.CoberturaJSONAggregator', 'CoberturaJSONAggregator', (['self.test_name', 'API_SETTINGS'], {}), '(self.test_name, API_SETTINGS)\n', (692, 722), False, 'from base import CoberturaJSONAggregator\n'), ((2517, 2546), 'urllib2.URLError', 'urllib2.URLError', (['"""URL Error"""'], {}), "('URL Error')\n", (2533, 2546), False, 'import urllib2\n'), ((1773, 1784), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1782, 1784), False, 'from mock import patch, MagicMock\n'), ((788, 813), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (803, 813), False, 'import os\n')] |
from pyfenstein3d.engine import Server
from pyfenstein3d.game import Command
from pyfenstein3d.game import Screen
from pyfenstein3d.game import Game
from pyfenstein3d.game import Image
command = Command()
image = Image()
screen = Screen(image)
server = Server()
game = Game(command, screen, server)
game.start()
| [
"pyfenstein3d.game.Game",
"pyfenstein3d.engine.Server",
"pyfenstein3d.game.Command",
"pyfenstein3d.game.Image",
"pyfenstein3d.game.Screen"
] | [((195, 204), 'pyfenstein3d.game.Command', 'Command', ([], {}), '()\n', (202, 204), False, 'from pyfenstein3d.game import Command\n'), ((213, 220), 'pyfenstein3d.game.Image', 'Image', ([], {}), '()\n', (218, 220), False, 'from pyfenstein3d.game import Image\n'), ((230, 243), 'pyfenstein3d.game.Screen', 'Screen', (['image'], {}), '(image)\n', (236, 243), False, 'from pyfenstein3d.game import Screen\n'), ((253, 261), 'pyfenstein3d.engine.Server', 'Server', ([], {}), '()\n', (259, 261), False, 'from pyfenstein3d.engine import Server\n'), ((269, 298), 'pyfenstein3d.game.Game', 'Game', (['command', 'screen', 'server'], {}), '(command, screen, server)\n', (273, 298), False, 'from pyfenstein3d.game import Game\n')] |
import datetime
import glob
import os
import os.path
import pathlib
import sqlite3
import sys
import time
from datetime import date
from sqlite3 import Error
from secondary import take_input_rider, take_input_horse, show_rider_data, show_horse_data
##############################################
# _ _ _ #
# __| | __ _| |_ __ _| |__ __ _ ___ ___ #
# / _` |/ _` | __/ _` | '_ \ / _` / __|/ _ \ #
#| (_| | (_| | || (_| | |_) | (_| \__ \ __/ #
# \__,_|\__,_|\__\__,_|_.__/ \__,_|___/\___| #
# #
##############################################
############################################################
# CHEAT SHEET OF DATA TYPES #
#----------------------------------------------------------#
# NULL - The Value is a null value (NULL) #
# INTEGER - The Value is a numeric value (INT) #
# REAL - The Value is a float value (FLOAT) #
# TEXT - The Value is a text string (STR) #
# BLOB - The Value is a blob of data i.e. Binary #
# used to store images and files (BYTES) #
############################################################
def show_help_db():
print("\nYou can: ")
print("1. Add a new database")
print("2. List existing databases")
print("3. Connect to an existing database")
print("4. Delete a database")
print("5. Quit")
print("\n")
def show_help_table():
print("\nYou can: ")
print("1. Add a rider's data to the database")
print("2. Add a horses' data to the database")
print("3. Show rider data")
print("4. Show horse data")
print("5. Edit a rider's data")
print("6. Edit a horse's data")
print("7. Delete a row of data")
print("8. Show data in the database")
print("9. Return to the database selection")
print("10. Show this help message")
print("11. Quit")
print("\n")
def main():
db_path = pathlib.Path(sys.argv[0]).resolve()
db_path = db_path.parent / ".." / "databases"
if not db_path.exists():
db_path.mkdir()
os.chdir(db_path)
print("\nCurrent working directory: ")
print(db_path)
while True:
db_connected = False
while not db_connected:
file_list = os.listdir(db_path)
show_help_db()
db = input("what do you want to do: ").lower()
if db in ["create database", "1", "add database"]:
dbname = input("What do you want to call the database: ")
conn = sqlite3.connect(dbname + ".db") #connect to selected database
print("successfully created database")
db_connected = True
elif db in ["2", "check", "check databases", "check db", "list databases", "list db"]:
print("Databases: ")
for file in file_list:
path = pathlib.Path(file)
if path.suffix == ".db":
print(path.stem) #check the file list for the suffix .db and then prints it without the .db
elif db in ["3", "connect to database"]:
print("Databases: ")
for file in file_list:
path = pathlib.Path(file)
if path.suffix == ".db":
print(path.stem)
dbname = input("\nWhich database do you want to connect to: ")
dbname = (dbname + ".db")
if dbname not in file_list:
print ("No such database exists ")
else:
conn = sqlite3.connect(dbname)
print("connecting...")
time.sleep (0.5)
print("Successfully connected to database")
db_connected = True
elif db in ["4", "delete", "delete database"]:
print("Databases: ")
for file in file_list:
path = pathlib.Path(file)
if path.suffix == ".db":
print(path.stem)
print("\nIf you would like to cancel your deletion, just hit return")
base = input("which database would you like to delete: ")
dbname = (base + ".db")
if base in [""]:
print("cancelling deletion")
elif dbname not in file_list:
print ("No such database exists ")
else:
print("deleting...")
time.sleep(0.5)
os.remove (base + ".db")
print("Successfully deleted " + base + ".db")
elif db in ["5", "quit", "exit", "q"]:
sys.exit()
else:
print("Invalid command")
print(dbname)
show_help_table()
do = input("What would you like to do? ").lower()
if do in ["add rider", "1"]:
take_input_rider(conn)
elif do in ["add horse", "2"]:
take_input_horse(conn)
elif do in ["show rider data", "show rider", "3"]:
show_rider_data(conn)
elif do in ["show horse data", "show horse", "4"]:
show_horse_data(conn)
elif do in ["edit rider", "5"]:
pass
elif do in ["edit horse", "6"]:
pass
elif do in ["delete row", "delete data", "delete", "7"]:
print("sorry, that feature doesn't yet exist")
elif do in ["show row", "show data", "show", "8"]:
print("sorry, that feature doesn't yet exist")
elif do in ["return", "return to database list", "database list", "9"]:
db_connected = False
elif do in ["help", "10"]:
show_help_table()
elif do in ["quit", "11"]:
sys.exit()
else:
print("Invalid command")
if __name__ == '__main__':
main()
| [
"os.listdir",
"sqlite3.connect",
"secondary.show_rider_data",
"pathlib.Path",
"secondary.take_input_rider",
"secondary.show_horse_data",
"time.sleep",
"os.chdir",
"sys.exit",
"secondary.take_input_horse",
"os.remove"
] | [((2140, 2157), 'os.chdir', 'os.chdir', (['db_path'], {}), '(db_path)\n', (2148, 2157), False, 'import os\n'), ((1997, 2022), 'pathlib.Path', 'pathlib.Path', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2009, 2022), False, 'import pathlib\n'), ((2322, 2341), 'os.listdir', 'os.listdir', (['db_path'], {}), '(db_path)\n', (2332, 2341), False, 'import os\n'), ((5022, 5044), 'secondary.take_input_rider', 'take_input_rider', (['conn'], {}), '(conn)\n', (5038, 5044), False, 'from secondary import take_input_rider, take_input_horse, show_rider_data, show_horse_data\n'), ((2589, 2620), 'sqlite3.connect', 'sqlite3.connect', (["(dbname + '.db')"], {}), "(dbname + '.db')\n", (2604, 2620), False, 'import sqlite3\n'), ((5096, 5118), 'secondary.take_input_horse', 'take_input_horse', (['conn'], {}), '(conn)\n', (5112, 5118), False, 'from secondary import take_input_rider, take_input_horse, show_rider_data, show_horse_data\n'), ((5190, 5211), 'secondary.show_rider_data', 'show_rider_data', (['conn'], {}), '(conn)\n', (5205, 5211), False, 'from secondary import take_input_rider, take_input_horse, show_rider_data, show_horse_data\n'), ((2946, 2964), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (2958, 2964), False, 'import pathlib\n'), ((5283, 5304), 'secondary.show_horse_data', 'show_horse_data', (['conn'], {}), '(conn)\n', (5298, 5304), False, 'from secondary import take_input_rider, take_input_horse, show_rider_data, show_horse_data\n'), ((3283, 3301), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (3295, 3301), False, 'import pathlib\n'), ((3657, 3680), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (3672, 3680), False, 'import sqlite3\n'), ((3744, 3759), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3754, 3759), False, 'import time\n'), ((4028, 4046), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (4040, 4046), False, 'import pathlib\n'), ((4794, 4804), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4802, 4804), False, 'import sys\n'), ((4599, 4614), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4609, 4614), False, 'import time\n'), ((4635, 4658), 'os.remove', 'os.remove', (["(base + '.db')"], {}), "(base + '.db')\n", (4644, 4658), False, 'import os\n'), ((5886, 5896), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5894, 5896), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-27 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('views', '0004_refactoring'),
]
operations = [
migrations.AlterModelOptions(
name='view',
options={'ordering': ('key',), 'verbose_name': 'View', 'verbose_name_plural': 'Views'},
),
migrations.AlterField(
model_name='view',
name='comment',
field=models.TextField(blank=True, help_text='Additional information about this view.', null=True, verbose_name='Comment'),
),
migrations.AlterField(
model_name='view',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this view. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key'),
),
migrations.AlterField(
model_name='view',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this view (auto-generated).', max_length=640, null=True, verbose_name='URI'),
),
migrations.AlterField(
model_name='view',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this view.', max_length=256, null=True, verbose_name='URI Prefix'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.SlugField",
"django.db.models.URLField",
"django.db.models.TextField"
] | [((288, 421), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""view"""', 'options': "{'ordering': ('key',), 'verbose_name': 'View', 'verbose_name_plural': 'Views'}"}), "(name='view', options={'ordering': ('key',),\n 'verbose_name': 'View', 'verbose_name_plural': 'Views'})\n", (316, 421), False, 'from django.db import migrations, models\n'), ((562, 688), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Additional information about this view."""', 'null': '(True)', 'verbose_name': '"""Comment"""'}), "(blank=True, help_text=\n 'Additional information about this view.', null=True, verbose_name=\n 'Comment')\n", (578, 688), False, 'from django.db import migrations, models\n'), ((795, 972), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'help_text': '"""The internal identifier of this view. The URI will be generated from this key."""', 'max_length': '(128)', 'null': '(True)', 'verbose_name': '"""Key"""'}), "(blank=True, help_text=\n 'The internal identifier of this view. The URI will be generated from this key.'\n , max_length=128, null=True, verbose_name='Key')\n", (811, 972), False, 'from django.db import migrations, models\n'), ((1079, 1238), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""The Uniform Resource Identifier of this view (auto-generated)."""', 'max_length': '(640)', 'null': '(True)', 'verbose_name': '"""URI"""'}), "(blank=True, help_text=\n 'The Uniform Resource Identifier of this view (auto-generated).',\n max_length=640, null=True, verbose_name='URI')\n", (1094, 1238), False, 'from django.db import migrations, models\n'), ((1353, 1493), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""The prefix for the URI of this view."""', 'max_length': '(256)', 'null': '(True)', 'verbose_name': '"""URI Prefix"""'}), "(blank=True, help_text=\n 'The prefix for the URI of this view.', max_length=256, null=True,\n verbose_name='URI Prefix')\n", (1368, 1493), False, 'from django.db import migrations, models\n')] |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
import unittest
from confirm import validator
from confirm.utils import load_config_file
import yaml
def _call_validate(config_string, schema_string, **kwargs):
"""
Small wrapper to use the standard interface.
"""
config_parser = SafeConfigParser()
config_parser.readfp(StringIO(config_string))
schema = yaml.load(StringIO(schema_string))
config = load_config_file('.ini', config_string)
validation = validator.Validation(config, schema)
validation.validate(**kwargs)
return validation
class ValidatorTestCase(unittest.TestCase):
def test_required_field_in_missing_section(self):
config = "[sectiona]\noptiona = valuea"
schema = """
"sectionb":
"optionb":
"required": true
"sectiona":
"optiona":
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Missing required section sectionb.", result.errors())
def test_missing_required_field(self):
config = "[section]\noption1 = value1"
schema = """
"section":
"option2":
"required": true
"option1":
"description": "This is a description."
"""
result = _call_validate(config, schema)
self.assertIn("Missing required option option2 in section section.", result.errors())
def test_empty_required_field(self):
config = "[section]\noption1 ="
schema = """
"section":
"option1":
"required": true
"""
result = _call_validate(config, schema)
self.assertIn("Missing required option option1 in section section.", result.errors())
def test_invalid_int(self):
config = "[section]\noption1 =not an int!"
schema = """
"section":
"option1":
"required": true
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Invalid value for type int : not an int!.", result.errors())
def test_invalid_bool(self):
config = "[section]\noption1 =not a bool!"
schema = """
"section":
"option1":
"required": true
"type": "bool"
"""
result = _call_validate(config, schema)
self.assertIn("Invalid value for type bool : not a bool!.", result.errors())
def test_invalid_float(self):
config = "[section]\noption1 =not a float!"
schema = """
"section":
"option1":
"required": true
"type": "float"
"""
result = _call_validate(config, schema)
self.assertIn("Invalid value for type float : not a float!.", result.errors())
def test_invalid_type(self):
config = "[section]\noption1 =We don't care about the type here."
schema = """
"section":
"option1":
"required": true
"type": "invalid"
"""
result = _call_validate(config, schema)
self.assertIn("Invalid expected type for option option1 : invalid.", result.errors())
def test_typo_option_warning(self):
config = "[section]\noption13=14."
schema = """
"section":
"option1":
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Possible typo for option option1 : option13.", result.warnings())
def test_typo_section_warning(self):
config = "[section13]\nrandom_option=random_value."
schema = """
"section1":
"option1":
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Possible typo for section section1 : section13.", result.warnings())
def test_deprecated_section(self):
config = "[section1]\nrandom_option=random_value."
schema = """
"section1":
"option1":
"deprecated": true
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Deprecated section section1 is present!", result.warnings())
result = _call_validate(config, schema, error_on_deprecated=True)
self.assertIn("Deprecated section section1 is present!", result.errors())
def test_is_valid(self):
config = "[section1]\nrandom_option=14"
schema = """
"section1":
"random_option":
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertEqual(result.errors(), [], "There should be no errors for this validation!")
self.assertTrue(result.is_valid())
def test_is_invalid(self):
config = "[section1]\nrandom_option=not an int"
schema = """
"section1":
"random_option":
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertNotEqual(result.errors(), [], "There should be errors for this validation!")
self.assertFalse(result.is_valid())
def test_is_valid_warnings(self):
config = "[section1]\nrandom_option=14"
schema = """
"section1":
"random_option":
"required": false
"deprecated": true
"type": "int"
"""
result = _call_validate(config, schema)
self.assertNotEqual(result.warnings(), [], "There should be deprecation warnings for this validation!")
self.assertTrue(result.is_valid())
def test_deprecated_option(self):
config = "[section1]\noption1=random_value."
schema = """
"section1":
"option1":
"deprecated": true
"required": false
"type": "str"
"option2":
"deprecated": false
"required": false
"type": "int"
"""
result = _call_validate(config, schema)
self.assertIn("Deprecated option option1 is present in section section1!", result.warnings())
result = _call_validate(config, schema, error_on_deprecated=True)
self.assertIn("Deprecated option option1 is present in section section1!", result.errors())
def test_undefined_section(self):
config = "[section1]\noption1=random_value\n[section2]\noption=value"
schema = """
"section1":
"option1":
"required": false
"type": "str"
""".strip()
result = _call_validate(config, schema)
self.assertIn("Section section2 is not defined in the schema file.", result.warnings())
def test_undefined_option(self):
config = "[section1]\noption1=random_value\noption2=random_value2"
schema = """
"section1":
"option1":
"required": false
"type": "str"
""".strip()
result = _call_validate(config, schema)
self.assertIn("Option option2 of section section1 is not defined in the schema file.", result.warnings())
| [
"confirm.utils.load_config_file",
"io.StringIO",
"configparser.ConfigParser",
"confirm.validator.Validation"
] | [((470, 488), 'configparser.ConfigParser', 'SafeConfigParser', ([], {}), '()\n', (486, 488), True, 'from configparser import ConfigParser as SafeConfigParser\n'), ((601, 640), 'confirm.utils.load_config_file', 'load_config_file', (['""".ini"""', 'config_string'], {}), "('.ini', config_string)\n", (617, 640), False, 'from confirm.utils import load_config_file\n'), ((659, 695), 'confirm.validator.Validation', 'validator.Validation', (['config', 'schema'], {}), '(config, schema)\n', (679, 695), False, 'from confirm import validator\n'), ((514, 537), 'io.StringIO', 'StringIO', (['config_string'], {}), '(config_string)\n', (522, 537), False, 'from io import StringIO\n'), ((563, 586), 'io.StringIO', 'StringIO', (['schema_string'], {}), '(schema_string)\n', (571, 586), False, 'from io import StringIO\n')] |
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from pf_sqlalchemy.db.orm import BaseModel
class PfOnlySchema(Schema):
pass
class PfBaseSchema(SQLAlchemySchema):
pass
class PfDetailBaseSchema(PfBaseSchema):
class Meta:
model = BaseModel
id = auto_field()
created = auto_field()
updated = auto_field()
uuid = auto_field()
class ModelViewSort(Schema):
id = fields.Integer(required=True, error_messages={"required": "Please enter entity id."})
viewOrder = fields.Integer(required=True, error_messages={"required": "Please enter view order."})
def common_exclude():
exclude = ["id", "created", "updated", "uuid"]
return exclude
def update_exclude():
exclude = ("created", "updated", "uuid")
return exclude
def common_exclude_append(*args):
list_args = list(args)
list_args.extend(common_exclude())
return list_args
def update_exclude_append(*args):
list_args = list(args)
list_args.extend(update_exclude())
return list_args
| [
"marshmallow_sqlalchemy.auto_field",
"marshmallow.fields.Integer"
] | [((328, 340), 'marshmallow_sqlalchemy.auto_field', 'auto_field', ([], {}), '()\n', (338, 340), False, 'from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n'), ((355, 367), 'marshmallow_sqlalchemy.auto_field', 'auto_field', ([], {}), '()\n', (365, 367), False, 'from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n'), ((382, 394), 'marshmallow_sqlalchemy.auto_field', 'auto_field', ([], {}), '()\n', (392, 394), False, 'from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n'), ((406, 418), 'marshmallow_sqlalchemy.auto_field', 'auto_field', ([], {}), '()\n', (416, 418), False, 'from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n'), ((459, 548), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'required': '(True)', 'error_messages': "{'required': 'Please enter entity id.'}"}), "(required=True, error_messages={'required':\n 'Please enter entity id.'})\n", (473, 548), False, 'from marshmallow import Schema, fields\n'), ((561, 651), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'required': '(True)', 'error_messages': "{'required': 'Please enter view order.'}"}), "(required=True, error_messages={'required':\n 'Please enter view order.'})\n", (575, 651), False, 'from marshmallow import Schema, fields\n')] |
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
W = tf.constant([[1.74481176], [-0.7612069], [0.3190391]])
B = tf.constant([-0.24937038])
@tf.function
def perceptron(X, W, B):
layer = tf.add(tf.matmul(X, W), B)
act = tf.nn.sigmoid(layer)
return act
X1 = tf.constant([[1.62434536, -0.52817175, 0.86540763]])
Y1 = perceptron(X1, W, B)
print(Y1[0][0].numpy())
X2 = tf.constant([[-0.61175641, -1.07296862, -2.3015387]])
Y2 = perceptron(X2, W, B)
print(Y2[0][0].numpy())
| [
"tensorflow.nn.sigmoid",
"tensorflow.constant",
"tensorflow.matmul"
] | [((72, 126), 'tensorflow.constant', 'tf.constant', (['[[1.74481176], [-0.7612069], [0.3190391]]'], {}), '([[1.74481176], [-0.7612069], [0.3190391]])\n', (83, 126), True, 'import tensorflow as tf\n'), ((131, 157), 'tensorflow.constant', 'tf.constant', (['[-0.24937038]'], {}), '([-0.24937038])\n', (142, 157), True, 'import tensorflow as tf\n'), ((290, 342), 'tensorflow.constant', 'tf.constant', (['[[1.62434536, -0.52817175, 0.86540763]]'], {}), '([[1.62434536, -0.52817175, 0.86540763]])\n', (301, 342), True, 'import tensorflow as tf\n'), ((399, 452), 'tensorflow.constant', 'tf.constant', (['[[-0.61175641, -1.07296862, -2.3015387]]'], {}), '([[-0.61175641, -1.07296862, -2.3015387]])\n', (410, 452), True, 'import tensorflow as tf\n'), ((247, 267), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['layer'], {}), '(layer)\n', (260, 267), True, 'import tensorflow as tf\n'), ((217, 232), 'tensorflow.matmul', 'tf.matmul', (['X', 'W'], {}), '(X, W)\n', (226, 232), True, 'import tensorflow as tf\n')] |
from dataclasses import dataclass
from typing import Optional
from collections import defaultdict, Counter
core_noun = ["NNG", "NNP"]
core_pos = core_noun + ["VV", "VA"]
neighbor_pos = core_pos + ["NNBC", "MM", "MAG", "XPN", "XSN", "XSV", "XSA", "XR"]
single_possible_token = core_noun + ['SL']
entity_last_pos = single_possible_token + ["NNB","NNBC","NR","NP","VV","VA","MM","MAG","IC", "ETN","ETM","XPN","XSN","XR","SY","SH","SN",]
forbidden_words = ['’']
@dataclass
class Category:
large: str
small: str
class MecabTokenStorage:
def __init__(self):
self.core_key_word = defaultdict(dict)
self.core_pos_word = Counter() # core pos에 반드시 들어가야 하는 단어.
self.neighbor_word = Counter() # 가능한 pos 범위만 저장한다. ("가", "XPN")
class CategorySaveStorage:
def __init__(self):
self.pos_dict = defaultdict(set) # 포맷과 마지막 값을 확인
self.word_dict = set() # 마지막 값을 확인
self.counter_dict = Counter() # 검색용 점수 스코어 단어
self.counter_near_dict = Counter() # 검색용 점수 스코어 단어
class CategoryLoadStorage:
def __init__(self):
self.pos_dict = defaultdict(dict)
self.word_dict = list()
self.counter_dict = Counter()
self.counter_near_dict = Counter()
@dataclass
class MecabWordCategory:
category: Category
start_idx: Optional[int] = None
end_idx: Optional[int] = None
entity: Optional[str] = None
@dataclass
class MecabPatternData:
category: Category
dictionary_data: str
pattern: str
sentence: list
min_meaning: int = 0
parse_character: bool = False
@dataclass
class MecabWordFeature:
word: str
pos: str
semantic: str
has_jongseong: bool
reading: str
type : str
start_pos: str
end_pos: str
expression: str
space: Optional[int] = None
mecab_token: Optional[int] = None
mecab_compound: Optional[int] = None
begin: Optional[int] = None
end: Optional[int] = None
label: Optional[str] = "O"
@dataclass
class MecabNerFeature:
word: str
pos: str
start_idx: int
end_idx: int
category: Optional[Category] = None
@dataclass
class NerFeature:
word: str
pos: str
category: Optional[Category] = None | [
"collections.Counter",
"collections.defaultdict"
] | [((600, 617), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (611, 617), False, 'from collections import defaultdict, Counter\n'), ((647, 656), 'collections.Counter', 'Counter', ([], {}), '()\n', (654, 656), False, 'from collections import defaultdict, Counter\n'), ((714, 723), 'collections.Counter', 'Counter', ([], {}), '()\n', (721, 723), False, 'from collections import defaultdict, Counter\n'), ((834, 850), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (845, 850), False, 'from collections import defaultdict, Counter\n'), ((938, 947), 'collections.Counter', 'Counter', ([], {}), '()\n', (945, 947), False, 'from collections import defaultdict, Counter\n'), ((997, 1006), 'collections.Counter', 'Counter', ([], {}), '()\n', (1004, 1006), False, 'from collections import defaultdict, Counter\n'), ((1101, 1118), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1112, 1118), False, 'from collections import defaultdict, Counter\n'), ((1179, 1188), 'collections.Counter', 'Counter', ([], {}), '()\n', (1186, 1188), False, 'from collections import defaultdict, Counter\n'), ((1222, 1231), 'collections.Counter', 'Counter', ([], {}), '()\n', (1229, 1231), False, 'from collections import defaultdict, Counter\n')] |
from tkinter import *
from tkinter import ttk, font
import getpass
# Gestor de geometría (pack)
class Aplicacion():
def __init__(self):
self.raiz = Tk()
self.raiz.title("Acceso")
# Cambia el formato de la fuente actual a negrita para
# resaltar las dos etiquetas que acompañan a las cajas
# de entrada. (Para este cambio se ha importado el
# módulo 'font' al comienzo del programa):
fuente = font.Font(weight='bold')
# Define las etiquetas que acompañan a las cajas de
# entrada y asigna el formato de fuente anterior:
self.etiq1 = ttk.Label(self.raiz, text="Usuario:", font=fuente)
self.etiq2 = ttk.Label(self.raiz, text="Contraseña:", font=fuente)
# Declara dos variables de tipo cadena para contener
# el usuario y la contraseña:
self.usuario = StringVar()
self.clave = StringVar()
# Realiza una lectura del nombre de usuario que
# inició sesión en el sistema y lo asigna a la
# variable 'self.usuario' (Para capturar esta
# información se ha importando el módulo getpass
# al comienzo del programa):
self.usuario.set(getpass.getuser())
# Define dos cajas de entrada que aceptarán cadenas
# de una longitud máxima de 30 caracteres.
# A la primera de ellas 'self.ctext1' que contendrá
# el nombre del usuario, se le asigna la variable
# 'self.usuario' a la opción 'textvariable'. Cualquier
# valor que tome la variable durante la ejecución del
# programa quedará reflejada en la caja de entrada.
# En la segunda caja de entrada, la de la contraseña,
# se hace lo mismo. Además, se establece la opción
# 'show' con un "*" (asterisco) para ocultar la
# escritura de las contraseñas:
self.ctext1 = ttk.Entry(self.raiz, textvariable=self.usuario, width=30)
self.ctext2 = ttk.Entry(self.raiz, textvariable=self.clave, width=30,
show="*")
self.separ1 = ttk.Separator(self.raiz, orient=HORIZONTAL)
# Se definen dos botones con dos métodos: El botón
# 'Aceptar' llamará al método 'self.aceptar' cuando
# sea presionado para validar la contraseña; y el botón
# 'Cancelar' finalizará la aplicación si se llega a
# presionar:
self.boton1 = ttk.Button(self.raiz, text="Aceptar",
command=self.aceptar)
self.boton2 = ttk.Button(self.raiz, text="Cancelar", command=quit)
# Se definen las posiciones de los widgets dentro de
# la ventana. Todos los controles se van colocando
# hacia el lado de arriba, excepto, los dos últimos,
# los botones, que se situarán debajo del último 'TOP':
# el primer botón hacia el lado de la izquierda y el
# segundo a su derecha.
# Los valores posibles para la opción 'side' son:
# TOP (arriba), BOTTOM (abajo), LEFT (izquierda)
# y RIGHT (derecha). Si se omite, el valor será TOP
# La opción 'fill' se utiliza para indicar al gestor
# cómo expandir/reducir el widget si la ventana cambia
# de tamaño. Tiene tres posibles valores: BOTH
# (Horizontal y Verticalmente), X (Horizontalmente) e
# Y (Verticalmente). Funcionará si el valor de la opción
# 'expand' es True.
# Por último, las opciones 'padx' y 'pady' se utilizan
# para añadir espacio extra externo horizontal y/o
# vertical a los widgets para separarlos entre sí y de
# los bordes de la ventana. Hay otras equivalentes que
# añaden espacio extra interno: 'ipàdx' y 'ipady':
self.etiq1.pack(side=TOP, fill=BOTH, expand=True, padx=5, pady=5)
self.ctext1.pack(side=TOP, fill=X, expand=True, padx=5, pady=5)
self.etiq2.pack(side=TOP, fill=BOTH, expand=True, padx=5, pady=5)
self.ctext2.pack(side=TOP, fill=X, expand=True, padx=5, pady=5)
self.separ1.pack(side=TOP, fill=BOTH, expand=True, padx=5, pady=5)
self.boton1.pack(side=LEFT, fill=BOTH, expand=True, padx=5, pady=5)
self.boton2.pack(side=RIGHT, fill=BOTH, expand=True, padx=5, pady=5)
# Cuando se inicia el programa se asigna el foco
# a la caja de entrada de la contraseña para que se
# pueda empezar a escribir directamente:
self.ctext2.focus_set()
self.raiz.mainloop()
# El método 'aceptar' se emplea para validar la
# contraseña introducida. Será llamado cuando se
# presione el botón 'Aceptar'. Si la contraseña
# coincide con la cadena '<PASSWORD>' se imprimirá
# el mensaje 'Acceso permitido' y los valores
# aceptados. En caso contrario, se mostrará el
# mensaje 'Acceso denegado' y el foco volverá al
# mismo lugar.
def aceptar(self):
if self.clave.get() == 'tkinter':
print("Acceso permitido\n"
f"Usuario: {self.ctext1.get()}\n"
f"Contraseña: {self.ctext2.get()}")
else:
print("Acceso denegado")
# Se inicializa la variable 'self.clave' para
# que el widget 'self.ctext2' quede limpio.
# Por último, se vuelve a asignar el foco
# a este widget para poder escribir una nueva
# contraseña.
self.clave.set("")
self.ctext2.focus_set()
def main():
Aplicacion()
return 0
if __name__ == '__main__':
main()
| [
"tkinter.ttk.Button",
"tkinter.ttk.Entry",
"tkinter.ttk.Label",
"tkinter.font.Font",
"getpass.getuser",
"tkinter.ttk.Separator"
] | [((457, 481), 'tkinter.font.Font', 'font.Font', ([], {'weight': '"""bold"""'}), "(weight='bold')\n", (466, 481), False, 'from tkinter import ttk, font\n'), ((623, 673), 'tkinter.ttk.Label', 'ttk.Label', (['self.raiz'], {'text': '"""Usuario:"""', 'font': 'fuente'}), "(self.raiz, text='Usuario:', font=fuente)\n", (632, 673), False, 'from tkinter import ttk, font\n'), ((695, 748), 'tkinter.ttk.Label', 'ttk.Label', (['self.raiz'], {'text': '"""Contraseña:"""', 'font': 'fuente'}), "(self.raiz, text='Contraseña:', font=fuente)\n", (704, 748), False, 'from tkinter import ttk, font\n'), ((1878, 1935), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.raiz'], {'textvariable': 'self.usuario', 'width': '(30)'}), '(self.raiz, textvariable=self.usuario, width=30)\n', (1887, 1935), False, 'from tkinter import ttk, font\n'), ((1958, 2023), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.raiz'], {'textvariable': 'self.clave', 'width': '(30)', 'show': '"""*"""'}), "(self.raiz, textvariable=self.clave, width=30, show='*')\n", (1967, 2023), False, 'from tkinter import ttk, font\n'), ((2078, 2121), 'tkinter.ttk.Separator', 'ttk.Separator', (['self.raiz'], {'orient': 'HORIZONTAL'}), '(self.raiz, orient=HORIZONTAL)\n', (2091, 2121), False, 'from tkinter import ttk, font\n'), ((2410, 2469), 'tkinter.ttk.Button', 'ttk.Button', (['self.raiz'], {'text': '"""Aceptar"""', 'command': 'self.aceptar'}), "(self.raiz, text='Aceptar', command=self.aceptar)\n", (2420, 2469), False, 'from tkinter import ttk, font\n'), ((2525, 2577), 'tkinter.ttk.Button', 'ttk.Button', (['self.raiz'], {'text': '"""Cancelar"""', 'command': 'quit'}), "(self.raiz, text='Cancelar', command=quit)\n", (2535, 2577), False, 'from tkinter import ttk, font\n'), ((1204, 1221), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (1219, 1221), False, 'import getpass\n')] |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest.mock import call
from unittest.mock import patch
import kfserving
class TestS3Storage(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.whole_bucket_download_calls = [
call('kfserving-storage-test', 'file1', unittest.mock.ANY),
call('kfserving-storage-test', 'file2', unittest.mock.ANY),
call('kfserving-storage-test', 'subdir1/file1', unittest.mock.ANY),
call('kfserving-storage-test', 'subdir1/file2', unittest.mock.ANY),
call('kfserving-storage-test', 'subdir2/file1', unittest.mock.ANY),
call('kfserving-storage-test', 'subdir2/file2', unittest.mock.ANY)
]
self.under_prefix_download_calls = [
call('kfserving-storage-test', 'model-prefix/file1', unittest.mock.ANY),
call('kfserving-storage-test', 'model-prefix/file2', unittest.mock.ANY),
call('kfserving-storage-test', 'model-prefix/subdir1/file1', unittest.mock.ANY),
call('kfserving-storage-test', 'model-prefix/subdir1/file2', unittest.mock.ANY),
call('kfserving-storage-test', 'model-prefix/subdir2/file1', unittest.mock.ANY),
call('kfserving-storage-test', 'model-prefix/subdir2/file2', unittest.mock.ANY)
]
@patch('boto3.client')
def testDownloadWholeS3Bucket(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(return_value=TestS3Storage._generate_s3_list_objects_response())
storage.download('s3://kfserving-storage-test', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test')
s3client.download_fileobj.assert_has_calls(self.whole_bucket_download_calls, any_order=True)
self._verify_download(tmpdir)
@patch('boto3.client')
def testDownloadWholeBucketTrailingSlash(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(return_value=TestS3Storage._generate_s3_list_objects_response())
storage.download('s3://kfserving-storage-test/', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test')
s3client.download_fileobj.assert_has_calls(self.whole_bucket_download_calls, any_order=True)
self._verify_download(tmpdir)
@patch('boto3.client')
def testDownloadUnderPrefix(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(
return_value=TestS3Storage._generate_s3_list_objects_response(prefix='model-prefix'))
storage.download('s3://kfserving-storage-test/model-prefix', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test', Prefix='model-prefix')
s3client.download_fileobj.assert_has_calls(self.under_prefix_download_calls, any_order=True)
self._verify_download(tmpdir)
@patch('boto3.client')
def testDownloadUnderPrefixTrailingSlash(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(
return_value=TestS3Storage._generate_s3_list_objects_response(prefix='model-prefix'))
storage.download('s3://kfserving-storage-test/model-prefix/', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test', Prefix='model-prefix/')
s3client.download_fileobj.assert_has_calls(self.under_prefix_download_calls, any_order=True)
self._verify_download(tmpdir)
@patch('boto3.client')
def testDownloadNonExistentPrefix(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
with self.assertRaises(RuntimeError):
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(
return_value=TestS3Storage._generate_s3_list_objects_response(nfiles=0, ndirs=0, depth=0))
storage.download('s3://kfserving-storage-test/nonexistent', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test',
Prefix='nonexistent')
s3client.download_fileobj.assert_not_called()
self.assertListEqual([(tmpdir, [], [])], list(os.walk(tmpdir)))
@patch('boto3.client')
def testContinuation(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = TestS3Storage._generate_truncated_response
storage.download('s3://kfserving-storage-test', tmpdir)
s3client.download_fileobj.assert_has_calls(self.whole_bucket_download_calls, any_order=True)
self._verify_download(tmpdir)
@patch('boto3.client')
def testEmptyDir(self, client):
storage = kfserving.Storage()
with tempfile.TemporaryDirectory() as tmpdir:
s3client = MagicMock()
s3client.download_fileobj = MagicMock()
client.return_value = s3client
s3client.list_objects_v2 = Mock(
return_value=TestS3Storage._generate_s3_list_objects_response(nfiles=1, ndirs=1, depth=0,
empty_dirs=True))
storage.download('s3://kfserving-storage-test', tmpdir)
s3client.list_objects_v2.assert_called_with(Bucket='kfserving-storage-test')
s3client.download_fileobj.assert_called_with('kfserving-storage-test', 'file1', unittest.mock.ANY)
self.assertListEqual([(tmpdir, ['subdir1'], ['file1']), (os.path.join(tmpdir, 'subdir1'), [], [])],
list(os.walk(tmpdir)))
def _verify_download(self, tmpdir, prefix=None):
if prefix:
tmpdir = os.path.join(tmpdir, prefix)
for root, dirs, files in os.walk(tmpdir):
if root == tmpdir:
self.assertListEqual(sorted(['subdir1', 'subdir2']), sorted(dirs))
self.assertListEqual(sorted(['file1', 'file2']), sorted(files))
elif root == os.path.join(tmpdir, 'subdir1') or root == os.path.join(tmpdir, 'subdir2'):
self.assertListEqual([], dirs)
self.assertListEqual(sorted(['file1', 'file2']), sorted(files))
else:
self.fail('Unexpected download directory: {}'.format(root))
@staticmethod
def _generate_keys(prefix, nfiles, ndirs, depth, empty_dirs):
keys = []
for file_index in range(1, nfiles + 1):
key = 'file{}'.format(file_index)
if prefix:
key = '{}/{}'.format(prefix, key)
keys.append(key)
if depth > 1:
for dir_index in range(1, ndirs + 1):
r_prefix = 'subdir{}'.format(dir_index)
if prefix:
r_prefix = '{}/{}'.format(prefix, r_prefix)
keys += TestS3Storage._generate_keys(r_prefix, nfiles, ndirs, depth - 1, empty_dirs)
elif ndirs > 0 and empty_dirs:
for dir_index in range(1, ndirs + 1):
key = 'subdir{}/'.format(dir_index)
if prefix:
key = '{}/{}'.format(prefix, key)
keys.append(key)
return keys
@staticmethod
def _generate_s3_list_objects_response(prefix='', nfiles=2, ndirs=2, depth=2, empty_dirs=False):
keys = TestS3Storage._generate_keys(prefix, nfiles, ndirs, depth, empty_dirs)
return {'Contents': [{'ETag': None,
'Key': key,
'LastModified': None,
'Size': None,
'StorageClass': 'STANDARD'} for key in keys],
'EncodingType': 'url',
'IsTruncated': False,
'KeyCount': len(keys),
'MaxKeys': 1000,
'Name': 'kfserving-storage-test',
'Prefix': prefix,
'ResponseMetadata': {'HTTPHeaders': {'content-type': 'application/xml',
'date': None,
'server': None,
'transfer-encoding': None,
'x-amz-bucket-region': None,
'x-amz-id-2': None,
'x-amz-request-id': None},
'HTTPStatusCode': 200,
'HostId': None,
'RequestId': None,
'RetryAttempts': 0}}
@staticmethod
def _generate_truncated_response(Bucket, Prefix='', ContinuationToken=0):
response = TestS3Storage._generate_s3_list_objects_response(prefix=Prefix)
response['MaxKeys'] = 1
if ContinuationToken < len(response['Contents']) - 1:
response['IsTruncated'] = True
response['NextContinuationToken'] = ContinuationToken + 1
response['Contents'] = [response['Contents'][ContinuationToken]]
return response
if __name__ == '__main__':
unittest.main()
| [
"tempfile.TemporaryDirectory",
"kfserving.Storage",
"unittest.mock.MagicMock",
"unittest.mock.call",
"os.path.join",
"unittest.main",
"unittest.mock.patch",
"os.walk"
] | [((2015, 2036), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (2020, 2036), False, 'from unittest.mock import patch\n'), ((2727, 2748), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (2732, 2748), False, 'from unittest.mock import patch\n'), ((3451, 3472), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (3456, 3472), False, 'from unittest.mock import patch\n'), ((4235, 4256), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (4240, 4256), False, 'from unittest.mock import patch\n'), ((5034, 5055), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (5039, 5055), False, 'from unittest.mock import patch\n'), ((5940, 5961), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (5945, 5961), False, 'from unittest.mock import patch\n'), ((6527, 6548), 'unittest.mock.patch', 'patch', (['"""boto3.client"""'], {}), "('boto3.client')\n", (6532, 6548), False, 'from unittest.mock import patch\n'), ((11056, 11071), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11069, 11071), False, 'import unittest\n'), ((2104, 2123), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (2121, 2123), False, 'import kfserving\n'), ((2827, 2846), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (2844, 2846), False, 'import kfserving\n'), ((3538, 3557), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (3555, 3557), False, 'import kfserving\n'), ((4335, 4354), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (4352, 4354), False, 'import kfserving\n'), ((5127, 5146), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (5144, 5146), False, 'import kfserving\n'), ((6020, 6039), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (6037, 6039), False, 'import kfserving\n'), ((6603, 6622), 'kfserving.Storage', 'kfserving.Storage', ([], {}), '()\n', (6620, 6622), False, 'import kfserving\n'), ((7646, 7661), 'os.walk', 'os.walk', (['tmpdir'], {}), '(tmpdir)\n', (7653, 7661), False, 'import os\n'), ((951, 1009), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'file1', unittest.mock.ANY)\n", (955, 1009), False, 'from unittest.mock import call\n'), ((1023, 1081), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'file2', unittest.mock.ANY)\n", (1027, 1081), False, 'from unittest.mock import call\n'), ((1095, 1161), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""subdir1/file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'subdir1/file1', unittest.mock.ANY)\n", (1099, 1161), False, 'from unittest.mock import call\n'), ((1175, 1241), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""subdir1/file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'subdir1/file2', unittest.mock.ANY)\n", (1179, 1241), False, 'from unittest.mock import call\n'), ((1255, 1321), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""subdir2/file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'subdir2/file1', unittest.mock.ANY)\n", (1259, 1321), False, 'from unittest.mock import call\n'), ((1335, 1401), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""subdir2/file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'subdir2/file2', unittest.mock.ANY)\n", (1339, 1401), False, 'from unittest.mock import call\n'), ((1470, 1541), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/file1', unittest.mock.ANY)\n", (1474, 1541), False, 'from unittest.mock import call\n'), ((1555, 1626), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/file2', unittest.mock.ANY)\n", (1559, 1626), False, 'from unittest.mock import call\n'), ((1640, 1719), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/subdir1/file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/subdir1/file1', unittest.mock.ANY)\n", (1644, 1719), False, 'from unittest.mock import call\n'), ((1733, 1812), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/subdir1/file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/subdir1/file2', unittest.mock.ANY)\n", (1737, 1812), False, 'from unittest.mock import call\n'), ((1826, 1905), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/subdir2/file1"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/subdir2/file1', unittest.mock.ANY)\n", (1830, 1905), False, 'from unittest.mock import call\n'), ((1919, 1998), 'unittest.mock.call', 'call', (['"""kfserving-storage-test"""', '"""model-prefix/subdir2/file2"""', 'unittest.mock.ANY'], {}), "('kfserving-storage-test', 'model-prefix/subdir2/file2', unittest.mock.ANY)\n", (1923, 1998), False, 'from unittest.mock import call\n'), ((2137, 2166), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2164, 2166), False, 'import tempfile\n'), ((2201, 2212), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2210, 2212), False, 'from unittest.mock import MagicMock\n'), ((2253, 2264), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2262, 2264), False, 'from unittest.mock import MagicMock\n'), ((2860, 2889), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2887, 2889), False, 'import tempfile\n'), ((2924, 2935), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2933, 2935), False, 'from unittest.mock import MagicMock\n'), ((2976, 2987), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2985, 2987), False, 'from unittest.mock import MagicMock\n'), ((3571, 3600), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3598, 3600), False, 'import tempfile\n'), ((3635, 3646), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3644, 3646), False, 'from unittest.mock import MagicMock\n'), ((3687, 3698), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3696, 3698), False, 'from unittest.mock import MagicMock\n'), ((4368, 4397), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4395, 4397), False, 'import tempfile\n'), ((4432, 4443), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4441, 4443), False, 'from unittest.mock import MagicMock\n'), ((4484, 4495), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4493, 4495), False, 'from unittest.mock import MagicMock\n'), ((5160, 5189), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5187, 5189), False, 'import tempfile\n'), ((5224, 5235), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5233, 5235), False, 'from unittest.mock import MagicMock\n'), ((6053, 6082), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6080, 6082), False, 'import tempfile\n'), ((6117, 6128), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6126, 6128), False, 'from unittest.mock import MagicMock\n'), ((6169, 6180), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6178, 6180), False, 'from unittest.mock import MagicMock\n'), ((6636, 6665), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6663, 6665), False, 'import tempfile\n'), ((6700, 6711), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6709, 6711), False, 'from unittest.mock import MagicMock\n'), ((6752, 6763), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6761, 6763), False, 'from unittest.mock import MagicMock\n'), ((7584, 7612), 'os.path.join', 'os.path.join', (['tmpdir', 'prefix'], {}), '(tmpdir, prefix)\n', (7596, 7612), False, 'import os\n'), ((5330, 5341), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5339, 5341), False, 'from unittest.mock import MagicMock\n'), ((5916, 5931), 'os.walk', 'os.walk', (['tmpdir'], {}), '(tmpdir)\n', (5923, 5931), False, 'import os\n'), ((7472, 7487), 'os.walk', 'os.walk', (['tmpdir'], {}), '(tmpdir)\n', (7479, 7487), False, 'import os\n'), ((7391, 7422), 'os.path.join', 'os.path.join', (['tmpdir', '"""subdir1"""'], {}), "(tmpdir, 'subdir1')\n", (7403, 7422), False, 'import os\n'), ((7882, 7913), 'os.path.join', 'os.path.join', (['tmpdir', '"""subdir1"""'], {}), "(tmpdir, 'subdir1')\n", (7894, 7913), False, 'import os\n'), ((7925, 7956), 'os.path.join', 'os.path.join', (['tmpdir', '"""subdir2"""'], {}), "(tmpdir, 'subdir2')\n", (7937, 7956), False, 'import os\n')] |
#!/usr/bin/env python3
from fuseq.option import Option
from fuseq.genomon import Genomon
from fuseq.pipeline import Pipeline
def main():
opt = Option()
genomon = Genomon(opt.refer())
for mf_dir, mf_path in genomon.mf_dic.items():
params = opt.copy()
# Paths
work_dir = f'{params.fuseq_root_dir}/{mf_dir}/{params.work_dirname}'
swork_dir = f'{params.fuseq_root_dir}/{mf_dir}/{params.work_dirname}/{params.swork_dirname}'
fuseq_path = f'{params.fuseq_root_dir}/{mf_dir}/{params.fuseq_filename}'
inputs = {'mf_path': mf_path, 'star_dir': genomon.star_dir}
# Add to params
params.work_dir = work_dir
params.swork_dir = swork_dir
params.fuseq_path = fuseq_path
params.inputs = inputs
# Run
pipeline = Pipeline(params)
pipeline.run()
if __name__ == '__main__':
main()
| [
"fuseq.pipeline.Pipeline",
"fuseq.option.Option"
] | [((151, 159), 'fuseq.option.Option', 'Option', ([], {}), '()\n', (157, 159), False, 'from fuseq.option import Option\n'), ((820, 836), 'fuseq.pipeline.Pipeline', 'Pipeline', (['params'], {}), '(params)\n', (828, 836), False, 'from fuseq.pipeline import Pipeline\n')] |
#!/usr/bin/env python3
from termcolor import *
import colorama
colorama.init()
def start():
cprint('\n------------------ START -----------------\n', 'green')
def end():
cprint('\n------------------- END ------------------\n', 'red') | [
"colorama.init"
] | [((63, 78), 'colorama.init', 'colorama.init', ([], {}), '()\n', (76, 78), False, 'import colorama\n')] |
import numpy as np
import torch
class Dataset(torch.utils.data.Dataset):
def __init__(self, data, device="cpu"):
mesh = np.stack(np.meshgrid(*data), -1).reshape(-1, len(data))
self.X = torch.from_numpy(mesh).float().to(device)
def __len__(self):
return len(self.X)
def __getitem__(self, ix):
return self.X[ix]
class Mesh():
def __init__(self, data, device="cpu"):
assert isinstance(data, dict), "you must pass a dict with your data"
self.vars, data = tuple(data.keys()), data.values()
self.dataset = Dataset(data, device)
self.device = device
def build_dataloader(self, batch_size=None, shuffle=True):
if batch_size == None:
batch_size = len(self.dataset)
return torch.utils.data.DataLoader(
self.dataset, batch_size=batch_size, shuffle=shuffle)
| [
"numpy.meshgrid",
"torch.from_numpy",
"torch.utils.data.DataLoader"
] | [((810, 896), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle'}), '(self.dataset, batch_size=batch_size, shuffle=\n shuffle)\n', (837, 896), False, 'import torch\n'), ((149, 167), 'numpy.meshgrid', 'np.meshgrid', (['*data'], {}), '(*data)\n', (160, 167), True, 'import numpy as np\n'), ((214, 236), 'torch.from_numpy', 'torch.from_numpy', (['mesh'], {}), '(mesh)\n', (230, 236), False, 'import torch\n')] |
import matplotlib.pyplot as plt
from visualization_utils.base_functions import annotate_heatmap, heatmap
def plot_confusion_matrix(
confusion_matrix, classes, figure_path="confusion_matrix.png"
):
"""Plots a given confusion matrix.
This function plots a given confusion matrix and saves it as a figure.
Parameters
----------
confusion_matrix : array-like of shape (n_classes, n_classes)
Confusion matrix.
classes : array-like of shape (n_classes,)
Classes.
figure_path : str, default=confusion_matrix.png
Figure path.
Returns
-------
None
"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 5),)
fig.tight_layout()
im = heatmap(
confusion_matrix,
classes,
classes,
x_label="True Class",
y_label="Predicted Class",
ax=ax,
vmin=0,
vmax=0,
)
annotate_heatmap(im, valfmt="{x:d}", size=20, textcolors=["black", "white"])
fig.tight_layout()
plt.savefig(figure_path, dpi=300, bbox_inches="tight")
| [
"visualization_utils.base_functions.heatmap",
"visualization_utils.base_functions.annotate_heatmap",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((642, 688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(6, 5)'}), '(nrows=1, ncols=1, figsize=(6, 5))\n', (654, 688), True, 'import matplotlib.pyplot as plt\n'), ((723, 843), 'visualization_utils.base_functions.heatmap', 'heatmap', (['confusion_matrix', 'classes', 'classes'], {'x_label': '"""True Class"""', 'y_label': '"""Predicted Class"""', 'ax': 'ax', 'vmin': '(0)', 'vmax': '(0)'}), "(confusion_matrix, classes, classes, x_label='True Class', y_label=\n 'Predicted Class', ax=ax, vmin=0, vmax=0)\n", (730, 843), False, 'from visualization_utils.base_functions import annotate_heatmap, heatmap\n'), ((914, 990), 'visualization_utils.base_functions.annotate_heatmap', 'annotate_heatmap', (['im'], {'valfmt': '"""{x:d}"""', 'size': '(20)', 'textcolors': "['black', 'white']"}), "(im, valfmt='{x:d}', size=20, textcolors=['black', 'white'])\n", (930, 990), False, 'from visualization_utils.base_functions import annotate_heatmap, heatmap\n'), ((1019, 1073), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(figure_path, dpi=300, bbox_inches='tight')\n", (1030, 1073), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
def knn_graph(X, k, threshold):
'''
KNN_GRAPH Construct W using KNN graph
Input:
X - data point features, n-by-p maxtirx.
k - number of nn.
threshold
Output:
W - adjacency matrix, n-by-n matrix.
'''
# YOUR CODE HERE
# begin answer
N, P = X.shape
sigma = 0.1
W = np.zeros((N, N))
for i in range(N):
L2 = np.linalg.norm(X - X[i, :], axis=1)
Idx = np.argsort(L2)[1:k+1]
for j in Idx:
similarity = np.exp(-np.sum(np.square(X[i, :] - X[j, :]))/(2*sigma**2))
W[i, j] = W[j, i] = similarity if similarity > threshold else 0
return W
# end answer
| [
"numpy.argsort",
"numpy.zeros",
"numpy.square",
"numpy.linalg.norm"
] | [((380, 396), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (388, 396), True, 'import numpy as np\n'), ((433, 468), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - X[i, :])'], {'axis': '(1)'}), '(X - X[i, :], axis=1)\n', (447, 468), True, 'import numpy as np\n'), ((483, 497), 'numpy.argsort', 'np.argsort', (['L2'], {}), '(L2)\n', (493, 497), True, 'import numpy as np\n'), ((567, 595), 'numpy.square', 'np.square', (['(X[i, :] - X[j, :])'], {}), '(X[i, :] - X[j, :])\n', (576, 595), True, 'import numpy as np\n')] |
import logging
from unittest import mock
import pytest
from datahub.email_ingestion.validation import was_email_sent_by_dit
@pytest.mark.parametrize(
'email,authentication_results,expected_result,expected_warning',
(
# Valid trade.gov.uk email - authentication exempt
(
'<EMAIL>',
None,
True,
None,
),
# Valid digital.trade.gov.uk email, ensure whitelist is case insensitive
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk',
'compauth=pass (reason=109)',
]),
True,
None,
),
# Invalid authentication results - dkim
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=fail header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
(
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk'
),
'compauth=pass (reason=109)',
]),
False,
None,
),
# Invalid authentication results - spf
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=fail (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
(
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk'
),
'compauth=pass (reason=109)',
]),
False,
None,
),
# Invalid authentication results - dmarc
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates ',
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
(
'dmarc=fail (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk'
),
'compauth=pass (reason=109)',
]),
False,
None,
),
# Missing authentication results for spf
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
(
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk'
),
'compauth=pass (reason=109)',
]),
False,
None,
),
# Extra unknown auth method - still passes
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk;',
'sender-id=fail header.from=example.com',
'compauth=pass (reason=109)',
]),
True,
None,
),
# Domain which is not on DIT_EMAIL_DOMAINS setting, fails validation
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk;',
'compauth=pass (reason=109)',
]),
False,
(
'Domain "other.trade.gov.uk" not present in DIT_EMAIL_DOMAINS setting.'
),
),
# Domain which is not on DIT_EMAIL_DOMAINS setting, fails validation
(
'<EMAIL>',
'\n'.join([
'mx.google.com;',
'dkim=pass header.i=<EMAIL> header.s=selector1 header.b=foobar;',
'spf=pass (google.com: domain of <EMAIL> designates '
'XX.XXX.XX.XX as permitted sender) smtp.mailfrom=<EMAIL>;',
'dmarc=pass (p=QUARANTINE sp=QUARANTINE dis=NONE) '
'header.from=digital.trade.gov.uk;',
]),
False,
(
'Domain "gmail.com" not present in DIT_EMAIL_DOMAINS setting.'
),
),
# Blacklisted email
(
'<EMAIL>',
None,
False,
None,
),
),
)
def test_email_sent_by_dit(
caplog,
email,
authentication_results,
expected_result,
expected_warning,
):
"""
Tests for was_email_sent_by_dit validator.
"""
caplog.set_level(logging.ERROR)
message = mock.Mock()
message.from_ = [['<NAME>', email]]
message.authentication_results = authentication_results
result = was_email_sent_by_dit(message)
assert result == expected_result
if expected_warning:
expected_log = (
'datahub.email_ingestion.validation',
40,
expected_warning,
)
assert expected_log in caplog.record_tuples
def test_bad_from_returns_false():
"""
Test was_email_sent_by_dit validator when the from_ attribute is malformed.
"""
message = mock.Mock()
# This should be an iterable of pairs - simulate a malformed from attribute
message.from_ = []
result = was_email_sent_by_dit(message)
assert result is False
| [
"datahub.email_ingestion.validation.was_email_sent_by_dit",
"unittest.mock.Mock"
] | [((5864, 5875), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5873, 5875), False, 'from unittest import mock\n'), ((5989, 6019), 'datahub.email_ingestion.validation.was_email_sent_by_dit', 'was_email_sent_by_dit', (['message'], {}), '(message)\n', (6010, 6019), False, 'from datahub.email_ingestion.validation import was_email_sent_by_dit\n'), ((6412, 6423), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6421, 6423), False, 'from unittest import mock\n'), ((6540, 6570), 'datahub.email_ingestion.validation.was_email_sent_by_dit', 'was_email_sent_by_dit', (['message'], {}), '(message)\n', (6561, 6570), False, 'from datahub.email_ingestion.validation import was_email_sent_by_dit\n')] |
import os
from pathlib import Path
import numpy as np
class MarkovText:
def __init__(self, text_path, first_word=False, raw_output=False, remake_dict=False, sentence_mode=True):
self.text_path = text_path
self.raw_output = raw_output
self.remake_dict = remake_dict
self.dict_path = Path(os.getenv('APPDATA') + '\\Markov_TextGen\\Dicts\\')
self.dict_list = self.get_existing_dicts()
self.sentence_mode = sentence_mode
self.first_word = first_word
# self.start_words = get_start_words
def get_existing_dicts(self):
dict_list = []
if self.dict_path.exists():
for i in os.listdir(self.dict_path):
if self.dict_path.suffix == '.json' and 'dict_' in self.dict_path.stem:
dict_list.append(Path(i))
return dict_list
else:
os.makedirs(self.dict_path)
return dict_list
def make_pairs(self):
text = open(self.text_path).read().split()
for i in range(len(text) - 1):
yield text[i], text[i + 1]
def make_dict(self):
word_dict = {}
for word_1, word_2 in self.make_pairs():
if word_1 in word_dict.keys():
word_dict[word_1].append(word_2)
else:
word_dict[word_1] = [word_2]
return word_dict
# def get_dict(self):
def generate_text(self, word_dict):
if not self.first_word:
chain = [np.random.choice(open(self.text_path).read.split())]
else:
chain = [self.first_word]
while True:
curr_word = np.random.choice(word_dict[chain[-1]])
chain.append(curr_word)
if any(x in curr_word for x in ('!', '?', '.')) and not any(x in curr_word for x in ('Mr.', 'Ms.', 'Mrs.')):
return chain
def generate_sentences(self, word_dict, num_sentences):
sentences = []
for i in range(num_sentences):
if self.first_word == 'NullNull':
chain = [np.random.choice(open(self.text_path).read.split())]
else:
chain = [self.first_word]
while True:
curr_word = np.random.choice(word_dict[chain[-1]])
chain.append(curr_word)
if any(x in curr_word for x in ('!', '?', '.')) and not any(
x in curr_word for x in ('Mr.', 'Ms.', 'Mrs.')):
sentences.append(chain)
break
return sentences
| [
"os.listdir",
"os.makedirs",
"pathlib.Path",
"numpy.random.choice",
"os.getenv"
] | [((670, 696), 'os.listdir', 'os.listdir', (['self.dict_path'], {}), '(self.dict_path)\n', (680, 696), False, 'import os\n'), ((887, 914), 'os.makedirs', 'os.makedirs', (['self.dict_path'], {}), '(self.dict_path)\n', (898, 914), False, 'import os\n'), ((1649, 1687), 'numpy.random.choice', 'np.random.choice', (['word_dict[chain[-1]]'], {}), '(word_dict[chain[-1]])\n', (1665, 1687), True, 'import numpy as np\n'), ((326, 346), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (335, 346), False, 'import os\n'), ((2234, 2272), 'numpy.random.choice', 'np.random.choice', (['word_dict[chain[-1]]'], {}), '(word_dict[chain[-1]])\n', (2250, 2272), True, 'import numpy as np\n'), ((823, 830), 'pathlib.Path', 'Path', (['i'], {}), '(i)\n', (827, 830), False, 'from pathlib import Path\n')] |
import json
import boto3
import os
ddb = boto3.resource('dynamodb')
table = ddb.Table(os.environ['CHARACTER_TABLE'])
def handler(event, context):
print('request: {}'.format(json.dumps(event)))
response = table.scan(Select='ALL_ATTRIBUTES')
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(response['Items'])
}
| [
"boto3.resource",
"json.dumps"
] | [((42, 68), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (56, 68), False, 'import boto3\n'), ((379, 408), 'json.dumps', 'json.dumps', (["response['Items']"], {}), "(response['Items'])\n", (389, 408), False, 'import json\n'), ((179, 196), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (189, 196), False, 'import json\n')] |
import pygame
import os
import src
white = pygame.Color(255, 255, 255)
black = pygame.Color(0, 0, 0)
red = pygame.Color(255, 0, 0)
button_colour = pygame.Color(75, 0, 130)
hover_colour = pygame.Color(255,105,180)
def Button(name, x, y, w, dist, action=None):
'''
this function creates the button on the screen.
it takes the positions, fonts, colours and sizes and what to do when clicked
'''
pos = pygame.mouse.get_pos() # pygame methods to recognize that the mouse is clicked
click = pygame.mouse.get_pressed() # pygame methods to recognize that the mouse is clicked
if x < pos[0] < x+w and y < pos[1] < y+40: # it is the x and y positions of the mouse
# the pygame.draw.rect x,y,w, and 40 are the dimensions.
pygame.draw.rect(src.display, hover_colour, [x, y, w, 40]) # change the hover colour
# if someone clicked and there is an action associated with that button
if click[0] == 1 and action != None:
action() # do that action
else: # or
pygame.draw.rect(src.display, button_colour, [x, y, w, 40]) # just draw and hold it
font = pygame.font.Font(src.bitterfont, 15) # font object
# renders the text on the secreen
text = font.render(name, True, white)
text_rect = text.get_rect(center=((x+ (x+w)) /2, (y + (y+40))/2))
src.display.blit(text, text_rect)
def ButtonWithReturn(name, x, y, w, dist, val=None):
'''
this function creates the button on the screen but with a return value
same as the function above essentially
'''
pos = pygame.mouse.get_pos() # pygame method for mouse. it gets the mouse's position (x,y) on the screen
click = pygame.mouse.get_pressed() # pygame method to see if it is pressed
if x < pos[0] < x+w and y < pos[1] < y+40: # if mouse is on the button
pygame.draw.rect(src.display, hover_colour, [x, y, w, 40]) # change the hover colour
if click[0] == 1: # if clicked while it is on
return val # do what the button calls for
else: # if not
pygame.draw.rect(src.display, button_colour, [x, y, w, 40]) # just draw it
font = pygame.font.Font(src.bitterfont, 14) # font object
# renders the text on the secreen
text = font.render(name, True, white)
text_rect = text.get_rect(center=((x+ (x+w)) /2, (y + (y+40))/2))
src.display.blit(text, text_rect) # shows the text
return 0 # return 0 from the button function
def AddText(text, pos, color=white):
'''
add texts to the screen
takes text which is a string object as an argument
'''
font = pygame.font.Font(src.bitterfont, 16) # creating font with size
# creating font pygame text object with size, colour and text
renderedText = font.render(text, True, color)
# displaying text on the screen, pos is the position of where it should appear
src.display.blit(renderedText, pos)
def InsertSecret(text):
'''
this function takes an input of string for the algorithm efficieny function.
'''
pygame.display.set_caption(text)
inpText = "" # empty placeholder
enter = True # enable enter
while enter: # starting the algorithm
# pygame method to fill the screen, takes colours and a display object
src.display.fill(black)
src.display.blit(src.bg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get(): # again iterating as an important pygame method to set the features.
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
if event.type == pygame.KEYUP: # Here is to tell the computer to recognise if a keybord key is pressed.
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
if event.type == pygame.KEYDOWN: # if a key is pressed
if event.key == pygame.K_RETURN: # and if this key is enter
enter = False # enter changes the status of true to false and ends the loop, you entered what you wanted
elif event.key == pygame.K_BACKSPACE: # if backspace is pressed
# backspace deletes the last letter of the input. this [:-1] called slicing
inpText = inpText[:-1]
else: # if none of this happened
inpText += event.unicode # takes care of capslocks and shiftkeys
AddText("Press \"ENTER\" to continue...!", (128, 270), white)
AddText(text, (128, 220), white) # displaying the text
pygame.draw.rect(src.display, white, (290, 215, 250, 40)) # displaying the text
AddText(inpText, (295, 225), black) # displaying the text
# updates the screen every turn
pygame.display.flip()
# will not run more than 15 frames per second
src.clock.tick(60) # 15 frames per second
return inpText
| [
"src.clock.tick",
"pygame.mouse.get_pressed",
"pygame.quit",
"pygame.event.get",
"pygame.display.flip",
"pygame.mouse.get_pos",
"pygame.draw.rect",
"src.display.fill",
"pygame.display.set_caption",
"pygame.Color",
"pygame.font.Font",
"src.display.blit"
] | [((44, 71), 'pygame.Color', 'pygame.Color', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (56, 71), False, 'import pygame\n'), ((80, 101), 'pygame.Color', 'pygame.Color', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (92, 101), False, 'import pygame\n'), ((108, 131), 'pygame.Color', 'pygame.Color', (['(255)', '(0)', '(0)'], {}), '(255, 0, 0)\n', (120, 131), False, 'import pygame\n'), ((148, 172), 'pygame.Color', 'pygame.Color', (['(75)', '(0)', '(130)'], {}), '(75, 0, 130)\n', (160, 172), False, 'import pygame\n'), ((188, 215), 'pygame.Color', 'pygame.Color', (['(255)', '(105)', '(180)'], {}), '(255, 105, 180)\n', (200, 215), False, 'import pygame\n'), ((421, 443), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (441, 443), False, 'import pygame\n'), ((513, 539), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (537, 539), False, 'import pygame\n'), ((1131, 1167), 'pygame.font.Font', 'pygame.font.Font', (['src.bitterfont', '(15)'], {}), '(src.bitterfont, 15)\n', (1147, 1167), False, 'import pygame\n'), ((1337, 1370), 'src.display.blit', 'src.display.blit', (['text', 'text_rect'], {}), '(text, text_rect)\n', (1353, 1370), False, 'import src\n'), ((1570, 1592), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1590, 1592), False, 'import pygame\n'), ((1682, 1708), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (1706, 1708), False, 'import pygame\n'), ((2145, 2181), 'pygame.font.Font', 'pygame.font.Font', (['src.bitterfont', '(14)'], {}), '(src.bitterfont, 14)\n', (2161, 2181), False, 'import pygame\n'), ((2351, 2384), 'src.display.blit', 'src.display.blit', (['text', 'text_rect'], {}), '(text, text_rect)\n', (2367, 2384), False, 'import src\n'), ((2605, 2641), 'pygame.font.Font', 'pygame.font.Font', (['src.bitterfont', '(16)'], {}), '(src.bitterfont, 16)\n', (2621, 2641), False, 'import pygame\n'), ((2872, 2907), 'src.display.blit', 'src.display.blit', (['renderedText', 'pos'], {}), '(renderedText, pos)\n', (2888, 2907), False, 'import src\n'), ((3035, 3067), 'pygame.display.set_caption', 'pygame.display.set_caption', (['text'], {}), '(text)\n', (3061, 3067), False, 'import pygame\n'), ((761, 819), 'pygame.draw.rect', 'pygame.draw.rect', (['src.display', 'hover_colour', '[x, y, w, 40]'], {}), '(src.display, hover_colour, [x, y, w, 40])\n', (777, 819), False, 'import pygame\n'), ((1035, 1094), 'pygame.draw.rect', 'pygame.draw.rect', (['src.display', 'button_colour', '[x, y, w, 40]'], {}), '(src.display, button_colour, [x, y, w, 40])\n', (1051, 1094), False, 'import pygame\n'), ((1834, 1892), 'pygame.draw.rect', 'pygame.draw.rect', (['src.display', 'hover_colour', '[x, y, w, 40]'], {}), '(src.display, hover_colour, [x, y, w, 40])\n', (1850, 1892), False, 'import pygame\n'), ((2058, 2117), 'pygame.draw.rect', 'pygame.draw.rect', (['src.display', 'button_colour', '[x, y, w, 40]'], {}), '(src.display, button_colour, [x, y, w, 40])\n', (2074, 2117), False, 'import pygame\n'), ((3269, 3292), 'src.display.fill', 'src.display.fill', (['black'], {}), '(black)\n', (3285, 3292), False, 'import src\n'), ((3301, 3333), 'src.display.blit', 'src.display.blit', (['src.bg', '(0, 0)'], {}), '(src.bg, (0, 0))\n', (3317, 3333), False, 'import src\n'), ((3461, 3479), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3477, 3479), False, 'import pygame\n'), ((4773, 4830), 'pygame.draw.rect', 'pygame.draw.rect', (['src.display', 'white', '(290, 215, 250, 40)'], {}), '(src.display, white, (290, 215, 250, 40))\n', (4789, 4830), False, 'import pygame\n'), ((4969, 4990), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4988, 4990), False, 'import pygame\n'), ((5053, 5071), 'src.clock.tick', 'src.clock.tick', (['(60)'], {}), '(60)\n', (5067, 5071), False, 'import src\n'), ((3627, 3640), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3638, 3640), False, 'import pygame\n')] |
from django.shortcuts import redirect
from django.views.generic import CreateView
import users.forms
from django.contrib.auth import login
from core.views import TitleMixin
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import reverse
class UserLoginView(TitleMixin, LoginView):
title = 'Вход'
template_name = 'users/login.html'
def get_success_url(self):
return reverse('core:home')
class UserLogoutView(LogoutView):
next_page = 'core:home'
class UserRegisterView(TitleMixin, CreateView):
title = 'Регистрация'
form_class = users.forms.UserRegisterForm
template_name = 'users/register.html'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('core:home')
return super(UserRegisterView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
user = form.save(commit=False)
user.set_password(form.cleaned_data['<PASSWORD>'])
user.save()
login(self.request, user)
return redirect('core:home')
| [
"django.shortcuts.redirect",
"django.contrib.auth.login",
"django.urls.reverse"
] | [((416, 436), 'django.urls.reverse', 'reverse', (['"""core:home"""'], {}), "('core:home')\n", (423, 436), False, 'from django.urls import reverse\n'), ((1038, 1063), 'django.contrib.auth.login', 'login', (['self.request', 'user'], {}), '(self.request, user)\n', (1043, 1063), False, 'from django.contrib.auth import login\n'), ((1079, 1100), 'django.shortcuts.redirect', 'redirect', (['"""core:home"""'], {}), "('core:home')\n", (1087, 1100), False, 'from django.shortcuts import redirect\n'), ((777, 798), 'django.shortcuts.redirect', 'redirect', (['"""core:home"""'], {}), "('core:home')\n", (785, 798), False, 'from django.shortcuts import redirect\n')] |
"""
This file contains the code for commands that target a bonsai imported model in version 2 of the bonsai command line.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Microsoft Corp."
from typing import Any, Dict, List
import click
import os
import time
from json import dumps
from bonsai_cli.exceptions import AuthenticationError, BrainServerError
from bonsai_cli.utils import (
api,
get_version_checker,
raise_204_click_exception,
raise_as_click_exception,
raise_brain_server_error_as_click_exception,
raise_not_found_as_click_exception,
raise_unique_constraint_violation_as_click_exception,
)
@click.group(hidden=True)
def importedmodel():
"""Imported model operations."""
pass
@click.command("create", short_help="Create a imported model.")
@click.option("--name", "-n", help="[Required] Name of the imported model.")
@click.option("--modelfilepath", "-m", help="[Required] ModelFilePath on local system.")
@click.option("--display-name", help="Display name of the imported model.")
@click.option("--description", help="Description for the imported model.")
@click.option(
"--workspace-id",
"-w",
help="Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.",
)
@click.option(
"--no-wait",
default=False,
is_flag=True,
help="If set to true, do not wait for the imported model operation that may be long-running to complete.",
)
@click.option(
"--debug", default=False, is_flag=True, help="Verbose logging for request."
)
@click.option("--output", "-o", help="Set output, only json supported.")
@click.option(
"--test",
default=False,
is_flag=True,
help="Enhanced response for testing.",
hidden=True,
)
@click.pass_context
def create_importedmodel(
ctx: click.Context,
name: str,
modelfilepath: str,
display_name: str,
description: str,
workspace_id: str,
no_wait: bool,
debug: bool,
output: str,
test: bool,
):
version_checker = get_version_checker(ctx, interactive=not output)
error_msg = ""
required_options_provided = True
if not name:
required_options_provided = False
error_msg += "\nImported model name is required"
if not modelfilepath:
required_options_provided = False
error_msg += "\nModelfilepath is required"
if not required_options_provided:
raise_as_click_exception(error_msg)
try:
tic = time.perf_counter()
response = api(use_aad=True).upload_importedmodel(
name, modelfilepath, debug=debug
)
toc = time.perf_counter()
size = os.path.getsize(modelfilepath)
print(
f"step 1: Uploading {modelfilepath} of size:{size*0.000001} MB is successful in {toc - tic:0.4f} seconds."
)
response = api(use_aad=True).create_importedmodel(
name=name,
uploaded_file_path=response["modelFileStoragePath"],
display_name=display_name,
description=description,
workspace=workspace_id,
debug=debug,
output=output,
)
if not no_wait:
print("step 2: Imported model creation may take a while. Please wait...")
while response["operationStatus"] not in ["Succeeded", "Failed"]:
print(
"step 3: Finalizing the imported model. The current status is "
+ response["operationStatus"]
+ ". Please wait..."
)
response = api(use_aad=True).get_importedmodel(
name=name, workspace=workspace_id
)
time.sleep(10)
if response["operationStatus"] == "Succeeded":
statusMessage = "Created new imported model {} successfully.".format(
response["name"]
)
else:
raise_as_click_exception(response["operationStatusMessage"])
if output == "json":
json_response = {
"status": response["operationStatus"],
"statusCode": response["statusCode"],
"statusMessage": statusMessage,
}
click.echo(dumps(json_response, indent=4))
else:
click.echo(statusMessage)
else:
click.echo(
"imported model creation is still in progress. Please use bonsai importedmodel show -n <importedmodelname> to check the status."
)
except BrainServerError as e:
if "Unique index constraint violation" in str(e):
raise_unique_constraint_violation_as_click_exception(
debug, output, "Imported model", name, test, e
)
else:
raise_as_click_exception(e)
except AuthenticationError as e:
raise_as_click_exception(e)
version_checker.check_cli_version(wait=True, print_up_to_date=False)
@click.command("show", short_help="Show information about imported model.")
@click.option(
"--name",
"-n",
help="[Required] The name of the imported model to show.",
)
@click.option(
"--workspace-id",
"-w",
help="Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.",
)
@click.option(
"--debug", default=False, is_flag=True, help="Verbose logging for request."
)
@click.option("--output", "-o", help="Set output, only json supported.")
@click.option(
"--test",
default=False,
is_flag=True,
help="Enhanced response for testing.",
hidden=True,
)
@click.pass_context
def show_importedmodel(
ctx: click.Context,
name: str,
workspace_id: str,
debug: bool,
output: str,
test: bool,
):
version_checker = get_version_checker(ctx, interactive=not output)
if not name:
raise_as_click_exception("\nName of the imported model is required")
try:
response = api(use_aad=True).get_importedmodel(
name, workspace=workspace_id, debug=debug, output=output
)
except BrainServerError as e:
if e.exception["statusCode"] == 404:
raise_not_found_as_click_exception(
debug,
output,
"Show imported model",
"Imported model",
name,
test,
e,
)
else:
raise_brain_server_error_as_click_exception(debug, output, test, e)
except AuthenticationError as e:
raise_as_click_exception(e)
if output == "json":
json_response = {
"name": response["name"],
"displayName": response["displayName"],
"description": response["description"],
"importedModelType": response["importedModelType"],
"createdOn": response["createdTimeStamp"],
"modifiedOn": response["createdTimeStamp"],
"Status": response["operationStatus"],
"StatusMessage": response["operationStatusMessage"],
}
click.echo(dumps(json_response, indent=4))
else:
click.echo("Name: {}".format(response["name"]))
click.echo("Display Name: {}".format(response["displayName"]))
click.echo("Description: {}".format(response["description"]))
click.echo("Imported Model Type: {}".format(response["importedModelType"]))
click.echo("Created On: {}".format(response["createdTimeStamp"]))
click.echo("Modified On: {}".format(response["createdTimeStamp"]))
click.echo("Status: {}".format(response["operationStatus"]))
version_checker.check_cli_version(wait=True, print_up_to_date=False)
@click.command("update", short_help="Update information about a imported model")
@click.option("--name", "-n", help="[Required] Name of the imported model.")
@click.option("--display-name", help="Display name of the imported model.")
@click.option("--description", help="Description for the imported model.")
@click.option(
"--workspace-id",
"-w",
help="Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.",
)
@click.option(
"--debug", default=False, is_flag=True, help="Verbose logging for request."
)
@click.option("--output", "-o", help="Set output, only json supported.")
@click.option(
"--test",
default=False,
is_flag=True,
help="Enhanced response for testing.",
hidden=True,
)
@click.pass_context
def update_importedmodel(
ctx: click.Context,
name: str,
display_name: str,
description: str,
workspace_id: str,
debug: bool,
output: str,
test: bool,
):
version_checker = get_version_checker(ctx, interactive=not output)
if not name:
raise_as_click_exception("\nName of the imported model is required")
if not (display_name or description):
raise_as_click_exception(
"\nDisplay Name or description for the imported model must be updated."
)
try:
response = api(use_aad=True).update_importedmodel(
name=name,
display_name=display_name,
description=description,
workspace=workspace_id,
debug=debug,
output=output,
)
except BrainServerError as e:
if e.exception["statusCode"] == 404:
raise_not_found_as_click_exception(
debug,
output,
"Update imported model",
"ImportedModel",
name,
test,
e,
)
else:
raise_brain_server_error_as_click_exception(debug, output, test, e)
except AuthenticationError as e:
raise_as_click_exception(e)
status_message = "Updated {}.".format(response["name"])
if output == "json":
json_response = {
"status": response["operationStatus"],
"statusCode": response["statusCode"],
"statusMessage": status_message,
}
if test:
json_response["elapsed"] = str(response["elapsed"])
json_response["timeTaken"] = str(response["timeTaken"])
click.echo(dumps(json_response, indent=4))
else:
click.echo(status_message)
version_checker.check_cli_version(wait=True, print_up_to_date=False)
@click.command("list", short_help="Lists imported model owned by current user.")
@click.option(
"--workspace-id",
"-w",
help="Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.",
)
@click.option(
"--debug", default=False, is_flag=True, help="Verbose logging for request."
)
@click.option("--output", "-o", help="Set output, only json supported.")
@click.option(
"--test",
default=False,
is_flag=True,
help="Enhanced response for testing.",
hidden=True,
)
@click.pass_context
def list_importedmodel(
ctx: click.Context, workspace_id: str, debug: bool, output: str, test: bool
):
version_checker = get_version_checker(ctx, interactive=not output)
try:
response = api(use_aad=True).list_importedmodels(
workspace=workspace_id, debug=debug
)
except BrainServerError as e:
raise_brain_server_error_as_click_exception(debug, output, test, e)
except AuthenticationError as e:
raise_as_click_exception(e)
if len(response["value"]) == 0:
click.echo("No imported models exist for the current user")
ctx.exit()
if output == "json":
dict_rows: List[Dict[str, Any]] = []
for imported_model in response["value"]:
dict_rows.append(imported_model["name"])
json_response = {
"value": dict_rows,
"status": response["operationStatus"],
"statusCode": response["statusCode"],
"statusMessage": "",
}
if test:
json_response["elapsed"] = str(response["elapsed"])
json_response["timeTaken"] = str(response["timeTaken"])
click.echo(dumps(json_response, indent=4))
else:
for imported_model in response["value"]:
click.echo(imported_model["name"])
version_checker.check_cli_version(wait=True, print_up_to_date=False)
@click.command("delete", short_help="Delete a imported model.")
@click.option(
"--name",
"-n",
help="[Required] The name of the imported model to delete.",
)
@click.option(
"--yes", "-y", default=False, is_flag=True, help="Do not prompt for confirmation."
)
@click.option(
"--workspace-id",
"-w",
help="Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.",
)
@click.option(
"--debug",
default=False,
is_flag=True,
help="Increase logging verbosity to show all logs.",
)
@click.option("--output", "-o", help="Set output, only json supported.")
@click.option(
"--test",
default=False,
is_flag=True,
help="Enhanced response for testing.",
hidden=True,
)
@click.pass_context
def delete_importedmodel(
ctx: click.Context,
name: str,
yes: bool,
workspace_id: str,
debug: bool,
output: str,
test: bool,
):
version_checker = get_version_checker(ctx, interactive=True)
if not name:
raise_as_click_exception("\nName of the imported model is required")
is_delete = False
if yes:
is_delete = True
if not yes:
click.echo(
"Are you sure you want to delete imported model {} (y/n?).".format(name)
)
choice = input().lower()
yes_set = {"yes", "y"}
no_set = {"no", "n"}
if choice in yes_set:
is_delete = True
elif choice in no_set:
is_delete = False
else:
raise_as_click_exception("\nPlease respond with 'y' or 'n'")
if is_delete:
try:
response = api(use_aad=True).delete_importedmodel(
name, workspace=workspace_id, debug=debug
)
if response["statusCode"] == 204:
raise_204_click_exception(
debug,
output,
test,
204,
"Imported model '{}' not found".format(name),
response,
)
except BrainServerError as e:
raise_brain_server_error_as_click_exception(debug, output, test, e)
except AuthenticationError as e:
raise_as_click_exception(e)
status_message = "Deleted {}.".format(name)
if output == "json":
json_response = {
"status": response["operationStatus"],
"statusCode": response["statusCode"],
"statusMessage": status_message,
}
if test:
json_response["elapsed"] = str(response["elapsed"])
json_response["timeTaken"] = str(response["timeTaken"])
click.echo(dumps(json_response, indent=4))
else:
click.echo(status_message)
version_checker.check_cli_version(wait=True, print_up_to_date=False)
importedmodel.add_command(create_importedmodel)
importedmodel.add_command(show_importedmodel)
importedmodel.add_command(update_importedmodel)
importedmodel.add_command(list_importedmodel)
importedmodel.add_command(delete_importedmodel)
| [
"os.path.getsize",
"click.group",
"click.option",
"bonsai_cli.utils.api",
"json.dumps",
"bonsai_cli.utils.raise_not_found_as_click_exception",
"time.perf_counter",
"time.sleep",
"bonsai_cli.utils.raise_unique_constraint_violation_as_click_exception",
"bonsai_cli.utils.raise_as_click_exception",
"click.echo",
"bonsai_cli.utils.raise_brain_server_error_as_click_exception",
"bonsai_cli.utils.get_version_checker",
"click.command"
] | [((644, 668), 'click.group', 'click.group', ([], {'hidden': '(True)'}), '(hidden=True)\n', (655, 668), False, 'import click\n'), ((739, 801), 'click.command', 'click.command', (['"""create"""'], {'short_help': '"""Create a imported model."""'}), "('create', short_help='Create a imported model.')\n", (752, 801), False, 'import click\n'), ((803, 878), 'click.option', 'click.option', (['"""--name"""', '"""-n"""'], {'help': '"""[Required] Name of the imported model."""'}), "('--name', '-n', help='[Required] Name of the imported model.')\n", (815, 878), False, 'import click\n'), ((880, 972), 'click.option', 'click.option', (['"""--modelfilepath"""', '"""-m"""'], {'help': '"""[Required] ModelFilePath on local system."""'}), "('--modelfilepath', '-m', help=\n '[Required] ModelFilePath on local system.')\n", (892, 972), False, 'import click\n'), ((969, 1043), 'click.option', 'click.option', (['"""--display-name"""'], {'help': '"""Display name of the imported model."""'}), "('--display-name', help='Display name of the imported model.')\n", (981, 1043), False, 'import click\n'), ((1045, 1118), 'click.option', 'click.option', (['"""--description"""'], {'help': '"""Description for the imported model."""'}), "('--description', help='Description for the imported model.')\n", (1057, 1118), False, 'import click\n'), ((1120, 1416), 'click.option', 'click.option', (['"""--workspace-id"""', '"""-w"""'], {'help': '"""Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure."""'}), "('--workspace-id', '-w', help=\n 'Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.'\n )\n", (1132, 1416), False, 'import click\n'), ((1423, 1594), 'click.option', 'click.option', (['"""--no-wait"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""If set to true, do not wait for the imported model operation that may be long-running to complete."""'}), "('--no-wait', default=False, is_flag=True, help=\n 'If set to true, do not wait for the imported model operation that may be long-running to complete.'\n )\n", (1435, 1594), False, 'import click\n'), ((1605, 1699), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Verbose logging for request."""'}), "('--debug', default=False, is_flag=True, help=\n 'Verbose logging for request.')\n", (1617, 1699), False, 'import click\n'), ((1702, 1773), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'help': '"""Set output, only json supported."""'}), "('--output', '-o', help='Set output, only json supported.')\n", (1714, 1773), False, 'import click\n'), ((1775, 1883), 'click.option', 'click.option', (['"""--test"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enhanced response for testing."""', 'hidden': '(True)'}), "('--test', default=False, is_flag=True, help=\n 'Enhanced response for testing.', hidden=True)\n", (1787, 1883), False, 'import click\n'), ((5185, 5259), 'click.command', 'click.command', (['"""show"""'], {'short_help': '"""Show information about imported model."""'}), "('show', short_help='Show information about imported model.')\n", (5198, 5259), False, 'import click\n'), ((5261, 5353), 'click.option', 'click.option', (['"""--name"""', '"""-n"""'], {'help': '"""[Required] The name of the imported model to show."""'}), "('--name', '-n', help=\n '[Required] The name of the imported model to show.')\n", (5273, 5353), False, 'import click\n'), ((5365, 5661), 'click.option', 'click.option', (['"""--workspace-id"""', '"""-w"""'], {'help': '"""Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure."""'}), "('--workspace-id', '-w', help=\n 'Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.'\n )\n", (5377, 5661), False, 'import click\n'), ((5668, 5762), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Verbose logging for request."""'}), "('--debug', default=False, is_flag=True, help=\n 'Verbose logging for request.')\n", (5680, 5762), False, 'import click\n'), ((5765, 5836), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'help': '"""Set output, only json supported."""'}), "('--output', '-o', help='Set output, only json supported.')\n", (5777, 5836), False, 'import click\n'), ((5838, 5946), 'click.option', 'click.option', (['"""--test"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enhanced response for testing."""', 'hidden': '(True)'}), "('--test', default=False, is_flag=True, help=\n 'Enhanced response for testing.', hidden=True)\n", (5850, 5946), False, 'import click\n'), ((8060, 8139), 'click.command', 'click.command', (['"""update"""'], {'short_help': '"""Update information about a imported model"""'}), "('update', short_help='Update information about a imported model')\n", (8073, 8139), False, 'import click\n'), ((8141, 8216), 'click.option', 'click.option', (['"""--name"""', '"""-n"""'], {'help': '"""[Required] Name of the imported model."""'}), "('--name', '-n', help='[Required] Name of the imported model.')\n", (8153, 8216), False, 'import click\n'), ((8218, 8292), 'click.option', 'click.option', (['"""--display-name"""'], {'help': '"""Display name of the imported model."""'}), "('--display-name', help='Display name of the imported model.')\n", (8230, 8292), False, 'import click\n'), ((8294, 8367), 'click.option', 'click.option', (['"""--description"""'], {'help': '"""Description for the imported model."""'}), "('--description', help='Description for the imported model.')\n", (8306, 8367), False, 'import click\n'), ((8369, 8665), 'click.option', 'click.option', (['"""--workspace-id"""', '"""-w"""'], {'help': '"""Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure."""'}), "('--workspace-id', '-w', help=\n 'Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.'\n )\n", (8381, 8665), False, 'import click\n'), ((8672, 8766), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Verbose logging for request."""'}), "('--debug', default=False, is_flag=True, help=\n 'Verbose logging for request.')\n", (8684, 8766), False, 'import click\n'), ((8769, 8840), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'help': '"""Set output, only json supported."""'}), "('--output', '-o', help='Set output, only json supported.')\n", (8781, 8840), False, 'import click\n'), ((8842, 8950), 'click.option', 'click.option', (['"""--test"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enhanced response for testing."""', 'hidden': '(True)'}), "('--test', default=False, is_flag=True, help=\n 'Enhanced response for testing.', hidden=True)\n", (8854, 8950), False, 'import click\n'), ((10866, 10945), 'click.command', 'click.command', (['"""list"""'], {'short_help': '"""Lists imported model owned by current user."""'}), "('list', short_help='Lists imported model owned by current user.')\n", (10879, 10945), False, 'import click\n'), ((10947, 11243), 'click.option', 'click.option', (['"""--workspace-id"""', '"""-w"""'], {'help': '"""Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure."""'}), "('--workspace-id', '-w', help=\n 'Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.'\n )\n", (10959, 11243), False, 'import click\n'), ((11250, 11344), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Verbose logging for request."""'}), "('--debug', default=False, is_flag=True, help=\n 'Verbose logging for request.')\n", (11262, 11344), False, 'import click\n'), ((11347, 11418), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'help': '"""Set output, only json supported."""'}), "('--output', '-o', help='Set output, only json supported.')\n", (11359, 11418), False, 'import click\n'), ((11420, 11528), 'click.option', 'click.option', (['"""--test"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enhanced response for testing."""', 'hidden': '(True)'}), "('--test', default=False, is_flag=True, help=\n 'Enhanced response for testing.', hidden=True)\n", (11432, 11528), False, 'import click\n'), ((12942, 13004), 'click.command', 'click.command', (['"""delete"""'], {'short_help': '"""Delete a imported model."""'}), "('delete', short_help='Delete a imported model.')\n", (12955, 13004), False, 'import click\n'), ((13006, 13100), 'click.option', 'click.option', (['"""--name"""', '"""-n"""'], {'help': '"""[Required] The name of the imported model to delete."""'}), "('--name', '-n', help=\n '[Required] The name of the imported model to delete.')\n", (13018, 13100), False, 'import click\n'), ((13112, 13213), 'click.option', 'click.option', (['"""--yes"""', '"""-y"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Do not prompt for confirmation."""'}), "('--yes', '-y', default=False, is_flag=True, help=\n 'Do not prompt for confirmation.')\n", (13124, 13213), False, 'import click\n'), ((13216, 13512), 'click.option', 'click.option', (['"""--workspace-id"""', '"""-w"""'], {'help': '"""Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure."""'}), "('--workspace-id', '-w', help=\n 'Please provide the workspace id if you would like to override the default target workspace. If your current Azure Active Directory login does not have access to this workspace, you will need to configure the workspace using bonsai configure.'\n )\n", (13228, 13512), False, 'import click\n'), ((13519, 13629), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Increase logging verbosity to show all logs."""'}), "('--debug', default=False, is_flag=True, help=\n 'Increase logging verbosity to show all logs.')\n", (13531, 13629), False, 'import click\n'), ((13645, 13716), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'help': '"""Set output, only json supported."""'}), "('--output', '-o', help='Set output, only json supported.')\n", (13657, 13716), False, 'import click\n'), ((13718, 13826), 'click.option', 'click.option', (['"""--test"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Enhanced response for testing."""', 'hidden': '(True)'}), "('--test', default=False, is_flag=True, help=\n 'Enhanced response for testing.', hidden=True)\n", (13730, 13826), False, 'import click\n'), ((2173, 2221), 'bonsai_cli.utils.get_version_checker', 'get_version_checker', (['ctx'], {'interactive': '(not output)'}), '(ctx, interactive=not output)\n', (2192, 2221), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((6146, 6194), 'bonsai_cli.utils.get_version_checker', 'get_version_checker', (['ctx'], {'interactive': '(not output)'}), '(ctx, interactive=not output)\n', (6165, 6194), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((9197, 9245), 'bonsai_cli.utils.get_version_checker', 'get_version_checker', (['ctx'], {'interactive': '(not output)'}), '(ctx, interactive=not output)\n', (9216, 9245), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((11696, 11744), 'bonsai_cli.utils.get_version_checker', 'get_version_checker', (['ctx'], {'interactive': '(not output)'}), '(ctx, interactive=not output)\n', (11715, 11744), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((14043, 14085), 'bonsai_cli.utils.get_version_checker', 'get_version_checker', (['ctx'], {'interactive': '(True)'}), '(ctx, interactive=True)\n', (14062, 14085), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((2563, 2598), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['error_msg'], {}), '(error_msg)\n', (2587, 2598), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((2623, 2642), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2640, 2642), False, 'import time\n'), ((2771, 2790), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2788, 2790), False, 'import time\n'), ((2806, 2836), 'os.path.getsize', 'os.path.getsize', (['modelfilepath'], {}), '(modelfilepath)\n', (2821, 2836), False, 'import os\n'), ((6221, 6292), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['"""\nName of the imported model is required"""'], {}), '("""\nName of the imported model is required""")\n', (6245, 6292), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((9272, 9343), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['"""\nName of the imported model is required"""'], {}), '("""\nName of the imported model is required""")\n', (9296, 9343), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((9392, 9497), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['"""\nDisplay Name or description for the imported model must be updated."""'], {}), '(\n """\nDisplay Name or description for the imported model must be updated.""")\n', (9416, 9497), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((10762, 10788), 'click.echo', 'click.echo', (['status_message'], {}), '(status_message)\n', (10772, 10788), False, 'import click\n'), ((12101, 12160), 'click.echo', 'click.echo', (['"""No imported models exist for the current user"""'], {}), "('No imported models exist for the current user')\n", (12111, 12160), False, 'import click\n'), ((14112, 14183), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['"""\nName of the imported model is required"""'], {}), '("""\nName of the imported model is required""")\n', (14136, 14183), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((4573, 4723), 'click.echo', 'click.echo', (['"""imported model creation is still in progress. Please use bonsai importedmodel show -n <importedmodelname> to check the status."""'], {}), "(\n 'imported model creation is still in progress. Please use bonsai importedmodel show -n <importedmodelname> to check the status.'\n )\n", (4583, 4723), False, 'import click\n'), ((5080, 5107), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (5104, 5107), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((6899, 6926), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (6923, 6926), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((7442, 7472), 'json.dumps', 'dumps', (['json_response'], {'indent': '(4)'}), '(json_response, indent=4)\n', (7447, 7472), False, 'from json import dumps\n'), ((10244, 10271), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (10268, 10271), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((10711, 10741), 'json.dumps', 'dumps', (['json_response'], {'indent': '(4)'}), '(json_response, indent=4)\n', (10716, 10741), False, 'from json import dumps\n'), ((11914, 11981), 'bonsai_cli.utils.raise_brain_server_error_as_click_exception', 'raise_brain_server_error_as_click_exception', (['debug', 'output', 'test', 'e'], {}), '(debug, output, test, e)\n', (11957, 11981), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((12028, 12055), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (12052, 12055), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((12726, 12756), 'json.dumps', 'dumps', (['json_response'], {'indent': '(4)'}), '(json_response, indent=4)\n', (12731, 12756), False, 'from json import dumps\n'), ((12830, 12864), 'click.echo', 'click.echo', (["imported_model['name']"], {}), "(imported_model['name'])\n", (12840, 12864), False, 'import click\n'), ((15884, 15910), 'click.echo', 'click.echo', (['status_message'], {}), '(status_message)\n', (15894, 15910), False, 'import click\n'), ((2662, 2679), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (2665, 2679), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((3002, 3019), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (3005, 3019), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((3862, 3876), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3872, 3876), False, 'import time\n'), ((4112, 4172), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (["response['operationStatusMessage']"], {}), "(response['operationStatusMessage'])\n", (4136, 4172), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((4521, 4546), 'click.echo', 'click.echo', (['statusMessage'], {}), '(statusMessage)\n', (4531, 4546), False, 'import click\n'), ((4849, 4953), 'bonsai_cli.utils.raise_unique_constraint_violation_as_click_exception', 'raise_unique_constraint_violation_as_click_exception', (['debug', 'output', '"""Imported model"""', 'name', 'test', 'e'], {}), "(debug, output,\n 'Imported model', name, test, e)\n", (4901, 4953), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((5006, 5033), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (5030, 5033), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((6319, 6336), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (6322, 6336), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((6527, 6636), 'bonsai_cli.utils.raise_not_found_as_click_exception', 'raise_not_found_as_click_exception', (['debug', 'output', '"""Show imported model"""', '"""Imported model"""', 'name', 'test', 'e'], {}), "(debug, output, 'Show imported model',\n 'Imported model', name, test, e)\n", (6561, 6636), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((6786, 6853), 'bonsai_cli.utils.raise_brain_server_error_as_click_exception', 'raise_brain_server_error_as_click_exception', (['debug', 'output', 'test', 'e'], {}), '(debug, output, test, e)\n', (6829, 6853), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((9541, 9558), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (9544, 9558), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((9870, 9980), 'bonsai_cli.utils.raise_not_found_as_click_exception', 'raise_not_found_as_click_exception', (['debug', 'output', '"""Update imported model"""', '"""ImportedModel"""', 'name', 'test', 'e'], {}), "(debug, output, 'Update imported model',\n 'ImportedModel', name, test, e)\n", (9904, 9980), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((10130, 10197), 'bonsai_cli.utils.raise_brain_server_error_as_click_exception', 'raise_brain_server_error_as_click_exception', (['debug', 'output', 'test', 'e'], {}), '(debug, output, test, e)\n', (10173, 10197), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((11774, 11791), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (11777, 11791), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((14615, 14678), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['"""\nPlease respond with \'y\' or \'n\'"""'], {}), '("""\nPlease respond with \'y\' or \'n\'""")\n', (14639, 14678), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((15204, 15271), 'bonsai_cli.utils.raise_brain_server_error_as_click_exception', 'raise_brain_server_error_as_click_exception', (['debug', 'output', 'test', 'e'], {}), '(debug, output, test, e)\n', (15247, 15271), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((15326, 15353), 'bonsai_cli.utils.raise_as_click_exception', 'raise_as_click_exception', (['e'], {}), '(e)\n', (15350, 15353), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((15825, 15855), 'json.dumps', 'dumps', (['json_response'], {'indent': '(4)'}), '(json_response, indent=4)\n', (15830, 15855), False, 'from json import dumps\n'), ((4455, 4485), 'json.dumps', 'dumps', (['json_response'], {'indent': '(4)'}), '(json_response, indent=4)\n', (4460, 4485), False, 'from json import dumps\n'), ((14731, 14748), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (14734, 14748), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n'), ((3737, 3754), 'bonsai_cli.utils.api', 'api', ([], {'use_aad': '(True)'}), '(use_aad=True)\n', (3740, 3754), False, 'from bonsai_cli.utils import api, get_version_checker, raise_204_click_exception, raise_as_click_exception, raise_brain_server_error_as_click_exception, raise_not_found_as_click_exception, raise_unique_constraint_violation_as_click_exception\n')] |
#!/usr/bin/env python
from __future__ import annotations
import unittest
from pytheos import utils
from pytheos.utils import CHARACTER_REPLACE_MAP
class TestUtils(unittest.TestCase):
def test_extract_ip(self):
self.assertEqual(utils.extract_host('http://127.0.0.1'), '127.0.0.1')
self.assertEqual(utils.extract_host('http://127.0.0.1/'), '127.0.0.1')
self.assertEqual(utils.extract_host('http://127.0.0.1/testing'), '127.0.0.1')
self.assertEqual(utils.extract_host('http://127.0.0.1:1234'), '127.0.0.1')
self.assertEqual(utils.extract_host('http://127.0.0.1:1234/'), '127.0.0.1')
self.assertEqual(utils.extract_host('http://127.0.0.1:1234/testing'), '127.0.0.1')
self.assertEqual(utils.extract_host('https://127.0.0.1'), '127.0.0.1')
self.assertIsNone(utils.extract_host('hxxp://127.0.0.1'))
def test_build_command_string(self):
self.assertEqual(utils.build_command_string("group", "command"), "heos://group/command\n")
self.assertEqual(utils.build_command_string("group", "command", arg1="hello", other_arg="world", stop=True),
"heos://group/command?arg1=hello&other_arg=world&stop=True\n")
def test_build_command_string_replacers(self):
input_str = ''
output_str = ''
for invalid_char, replacement_char in CHARACTER_REPLACE_MAP.items():
input_str += invalid_char
output_str += replacement_char
self.assertEqual(utils.build_command_string("group", "command", test=input_str),
f"heos://group/command?test={output_str}\n")
def test_parse_var_string(self):
self.assertEqual(utils.parse_var_string('pid=12345678&un=someone'), {'pid': '12345678', 'un': 'someone'})
self.assertEqual(utils.parse_var_string('signed_in&un=username'), {'signed_in': 'signed_in', 'un': 'username'})
def test_parse_var_string_replacers(self):
input_str = ''
output_str = ''
for invalid_char, replacement_char in CHARACTER_REPLACE_MAP.items():
input_str += replacement_char
output_str += invalid_char
self.assertEqual(utils.parse_var_string(f'test={input_str}'), {'test': output_str})
if __name__ == '__main__':
unittest.main()
| [
"pytheos.utils.parse_var_string",
"pytheos.utils.CHARACTER_REPLACE_MAP.items",
"unittest.main",
"pytheos.utils.extract_host",
"pytheos.utils.build_command_string"
] | [((2278, 2293), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2291, 2293), False, 'import unittest\n'), ((1355, 1384), 'pytheos.utils.CHARACTER_REPLACE_MAP.items', 'CHARACTER_REPLACE_MAP.items', ([], {}), '()\n', (1382, 1384), False, 'from pytheos.utils import CHARACTER_REPLACE_MAP\n'), ((2040, 2069), 'pytheos.utils.CHARACTER_REPLACE_MAP.items', 'CHARACTER_REPLACE_MAP.items', ([], {}), '()\n', (2067, 2069), False, 'from pytheos.utils import CHARACTER_REPLACE_MAP\n'), ((243, 281), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1"""'], {}), "('http://127.0.0.1')\n", (261, 281), False, 'from pytheos import utils\n'), ((321, 360), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1/"""'], {}), "('http://127.0.0.1/')\n", (339, 360), False, 'from pytheos import utils\n'), ((400, 446), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1/testing"""'], {}), "('http://127.0.0.1/testing')\n", (418, 446), False, 'from pytheos import utils\n'), ((486, 529), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1:1234"""'], {}), "('http://127.0.0.1:1234')\n", (504, 529), False, 'from pytheos import utils\n'), ((569, 613), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1:1234/"""'], {}), "('http://127.0.0.1:1234/')\n", (587, 613), False, 'from pytheos import utils\n'), ((653, 704), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""http://127.0.0.1:1234/testing"""'], {}), "('http://127.0.0.1:1234/testing')\n", (671, 704), False, 'from pytheos import utils\n'), ((744, 783), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""https://127.0.0.1"""'], {}), "('https://127.0.0.1')\n", (762, 783), False, 'from pytheos import utils\n'), ((824, 862), 'pytheos.utils.extract_host', 'utils.extract_host', (['"""hxxp://127.0.0.1"""'], {}), "('hxxp://127.0.0.1')\n", (842, 862), False, 'from pytheos import utils\n'), ((931, 977), 'pytheos.utils.build_command_string', 'utils.build_command_string', (['"""group"""', '"""command"""'], {}), "('group', 'command')\n", (957, 977), False, 'from pytheos import utils\n'), ((1030, 1125), 'pytheos.utils.build_command_string', 'utils.build_command_string', (['"""group"""', '"""command"""'], {'arg1': '"""hello"""', 'other_arg': '"""world"""', 'stop': '(True)'}), "('group', 'command', arg1='hello', other_arg=\n 'world', stop=True)\n", (1056, 1125), False, 'from pytheos import utils\n'), ((1493, 1555), 'pytheos.utils.build_command_string', 'utils.build_command_string', (['"""group"""', '"""command"""'], {'test': 'input_str'}), "('group', 'command', test=input_str)\n", (1519, 1555), False, 'from pytheos import utils\n'), ((1690, 1739), 'pytheos.utils.parse_var_string', 'utils.parse_var_string', (['"""pid=12345678&un=someone"""'], {}), "('pid=12345678&un=someone')\n", (1712, 1739), False, 'from pytheos import utils\n'), ((1804, 1851), 'pytheos.utils.parse_var_string', 'utils.parse_var_string', (['"""signed_in&un=username"""'], {}), "('signed_in&un=username')\n", (1826, 1851), False, 'from pytheos import utils\n'), ((2178, 2221), 'pytheos.utils.parse_var_string', 'utils.parse_var_string', (['f"""test={input_str}"""'], {}), "(f'test={input_str}')\n", (2200, 2221), False, 'from pytheos import utils\n')] |
import card_repository
from card import Card
from new_card import new_card
from user_colors import print_info
def new_cards(conn, cursor, two_way_card, single_line_mode, editor_mode):
while True:
card, two_way_card, single_line_mode, editor_mode = \
new_card(two_way_card, single_line_mode, editor_mode)
if card == None:
break
card_repository.insert(conn, cursor, card)
if two_way_card:
flipped_card = Card(
card.back,
card.front,
card.score,
card.last_viewed
)
card_repository.insert(conn, cursor, flipped_card)
print_info('Saved.')
| [
"card_repository.insert",
"card.Card",
"new_card.new_card",
"user_colors.print_info"
] | [((277, 330), 'new_card.new_card', 'new_card', (['two_way_card', 'single_line_mode', 'editor_mode'], {}), '(two_way_card, single_line_mode, editor_mode)\n', (285, 330), False, 'from new_card import new_card\n'), ((384, 426), 'card_repository.insert', 'card_repository.insert', (['conn', 'cursor', 'card'], {}), '(conn, cursor, card)\n', (406, 426), False, 'import card_repository\n'), ((690, 710), 'user_colors.print_info', 'print_info', (['"""Saved."""'], {}), "('Saved.')\n", (700, 710), False, 'from user_colors import print_info\n'), ((481, 538), 'card.Card', 'Card', (['card.back', 'card.front', 'card.score', 'card.last_viewed'], {}), '(card.back, card.front, card.score, card.last_viewed)\n', (485, 538), False, 'from card import Card\n'), ((630, 680), 'card_repository.insert', 'card_repository.insert', (['conn', 'cursor', 'flipped_card'], {}), '(conn, cursor, flipped_card)\n', (652, 680), False, 'import card_repository\n')] |
"""
Redis Instances module
"""
import logging
import re
import redis
# Constants
ip_pattern = '(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])'
hostname_pattern = '(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z\-]*[A-Za-z])'
port_pattern = '([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])'
endpoint_pattern = '^({}|{}):{}$'.format(ip_pattern, hostname_pattern, port_pattern)
class RedisInstance(object):
"""
Redis instance
"""
def __init__(self, endpoint):
"""
Initialize a Redis instance
"""
# Validate redis endpoint format
if re.match(endpoint_pattern, endpoint):
self._endpoint = endpoint
hostname, port = self._endpoint.split(':')
self._hostname = hostname
self._port = port
logging.debug('Redis hostname: {0}'.format(self._hostname))
logging.debug('Redis port: {0}'.format(self._port))
self._redis = redis.StrictRedis(host=hostname, port=port, db=0)
else:
logging.info('ERROR: {} is not a valid endpoint (ip:port or fqdn:port)'.format(endpoint))
exit(1)
def get_endpoint(self):
"""
Get Redis endpoint
"""
logging.debug('Redis endpoint: {0}'.format(self._endpoint))
return self._endpoint
def list_keys(self, namespace):
"""
List Redis keys matching specified namespace
"""
logging.info('Get redis keys starting by namespace "{0}" for redis endpoint "{1}'.format(
namespace, self._endpoint))
# Create iterator to scan all keys matching namespace
keys_iter = self._redis.scan_iter(match=namespace)
# Using set type for list of keys will help us to compare list of keys more efficiently
keys = set()
for k in keys_iter:
logging.debug('Add key {0} to list'.format(k))
keys.add(k)
logging.debug('Keys: {0}'.format(keys))
return keys
def get_keys(self, keys):
"""
Get Redis keys detail
"""
keys_detailed = []
for k in keys:
# Get key name, value and ttl
key = {'key': k, 'value': self._redis.get(k), 'ttl': self._redis.ttl(k)}
logging.debug('Key: {}, Value: {}, TTL: {}'.format(key['key'], key['value'], key['ttl']))
keys_detailed.append(key)
logging.debug('Keys: {0}'.format(keys_detailed))
return keys_detailed
def set_keys(self, keys):
"""
Insert Redis keys from list
"""
# Use pipeline for performance (https://github.com/andymccurdy/redis-py/blob/master/README.rst#pipelines)
pipe = self._redis.pipeline()
for k in keys:
# Get key name, value and ttl
name, value, ttl = k.values()
# Add key to pipeline
logging.debug('Add to pipeline: key: {}, value: {}'.format(name, value))
pipe.set(name, value)
# Add expire to pipeline
if ttl > 0:
logging.debug('Add to pipeline: key: {}, ttl: {}'.format(name, ttl))
pipe.expire(name, ttl)
# Execute pipeline
if pipe.__len__() > 0:
logging.info('Execute pipeline with {0} tasks'.format(pipe.__len__()))
pipe.execute()
else:
logging.info('Pipeline empty, nothing to execute')
| [
"logging.info",
"re.match",
"redis.StrictRedis"
] | [((715, 751), 're.match', 're.match', (['endpoint_pattern', 'endpoint'], {}), '(endpoint_pattern, endpoint)\n', (723, 751), False, 'import re\n'), ((1078, 1127), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'hostname', 'port': 'port', 'db': '(0)'}), '(host=hostname, port=port, db=0)\n', (1095, 1127), False, 'import redis\n'), ((3495, 3545), 'logging.info', 'logging.info', (['"""Pipeline empty, nothing to execute"""'], {}), "('Pipeline empty, nothing to execute')\n", (3507, 3545), False, 'import logging\n')] |
import time
import torch
import pandas as pd
from sklearn.metrics import classification_report
from model import VanillaClassifier
from utils import create_data_loader, train_epoch, eval_model, epoch_time
import torch.nn as nn
from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup
from collections import defaultdict
from get_predictions import get_predictions
# Set the path of the Kannada Hope speech dataset here
# Reading the previously split train, test, and validation dataframes
train = pd.read_csv('train_hope.csv')
val = pd.read_csv('val_hope.csv')
test = pd.read_csv('test_hope.csv')
pretrained_models = ['distilbert-base-multilingual-cased', 'bert-base-multilingual-cased', 'xlm-roberta-base']
# pretrained_model_kan_mal = 'bert-base-multilingual-cased'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizers = [AutoTokenizer.from_pretrained(model_name) for model_name in pretrained_models]
BATCH_SIZE = 32
MAX_LEN = 128
loss_fn = nn.CrossEntropyLoss.to(device)
classification_reports = []
for tokenizer, pretrained_model in zip(tokenizers, pretrained_models):
train_data_loader = create_data_loader(train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(test, tokenizer, MAX_LEN, BATCH_SIZE)
model = VanillaClassifier(pretrained_model, 1)
model = model.to(device)
EPOCHS = 5
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
scheduler = get_linear_schedule_with_warmup()
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
start_time = time.time()
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
scheduler,
train.shape[0]
)
end_time = time.time()
epoch_min, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_min}m {epoch_secs}s')
print(f'Train Acc1 {train_acc} Train loss {train_loss}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
val.shape[0]
)
print(f'Val Acc1 {val_acc} Val Loss {val_loss}')
y_review_texts, y_pred1, y_pred_probs1, y_test1 = get_predictions(
model,
test_data_loader
)
classes = ['Not-hope', 'Hope']
classification_reports.append(classification_report(y_test1, y_pred1, target_names=classes, zero_division=0))
| [
"torch.nn.CrossEntropyLoss.to",
"utils.create_data_loader",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"transformers.get_linear_schedule_with_warmup",
"utils.train_epoch",
"torch.cuda.is_available",
"collections.defaultdict",
"utils.eval_model",
"transformers.AutoTokenizer.from_pretrained",
"get_predictions.get_predictions",
"utils.epoch_time",
"time.time",
"model.VanillaClassifier"
] | [((520, 549), 'pandas.read_csv', 'pd.read_csv', (['"""train_hope.csv"""'], {}), "('train_hope.csv')\n", (531, 549), True, 'import pandas as pd\n'), ((556, 583), 'pandas.read_csv', 'pd.read_csv', (['"""val_hope.csv"""'], {}), "('val_hope.csv')\n", (567, 583), True, 'import pandas as pd\n'), ((591, 619), 'pandas.read_csv', 'pd.read_csv', (['"""test_hope.csv"""'], {}), "('test_hope.csv')\n", (602, 619), True, 'import pandas as pd\n'), ((983, 1013), 'torch.nn.CrossEntropyLoss.to', 'nn.CrossEntropyLoss.to', (['device'], {}), '(device)\n', (1005, 1013), True, 'import torch.nn as nn\n'), ((812, 837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (835, 837), False, 'import torch\n'), ((863, 904), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (892, 904), False, 'from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((1137, 1194), 'utils.create_data_loader', 'create_data_loader', (['train', 'tokenizer', 'MAX_LEN', 'BATCH_SIZE'], {}), '(train, tokenizer, MAX_LEN, BATCH_SIZE)\n', (1155, 1194), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((1217, 1272), 'utils.create_data_loader', 'create_data_loader', (['val', 'tokenizer', 'MAX_LEN', 'BATCH_SIZE'], {}), '(val, tokenizer, MAX_LEN, BATCH_SIZE)\n', (1235, 1272), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((1296, 1352), 'utils.create_data_loader', 'create_data_loader', (['test', 'tokenizer', 'MAX_LEN', 'BATCH_SIZE'], {}), '(test, tokenizer, MAX_LEN, BATCH_SIZE)\n', (1314, 1352), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((1366, 1404), 'model.VanillaClassifier', 'VanillaClassifier', (['pretrained_model', '(1)'], {}), '(pretrained_model, 1)\n', (1383, 1404), False, 'from model import VanillaClassifier\n'), ((1538, 1571), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', ([], {}), '()\n', (1569, 1571), False, 'from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((1586, 1603), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1597, 1603), False, 'from collections import defaultdict\n'), ((2503, 2543), 'get_predictions.get_predictions', 'get_predictions', (['model', 'test_data_loader'], {}), '(model, test_data_loader)\n', (2518, 2543), False, 'from get_predictions import get_predictions\n'), ((1679, 1690), 'time.time', 'time.time', ([], {}), '()\n', (1688, 1690), False, 'import time\n'), ((1723, 1812), 'utils.train_epoch', 'train_epoch', (['model', 'train_data_loader', 'loss_fn', 'optimizer', 'scheduler', 'train.shape[0]'], {}), '(model, train_data_loader, loss_fn, optimizer, scheduler, train.\n shape[0])\n', (1734, 1812), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((1910, 1921), 'time.time', 'time.time', ([], {}), '()\n', (1919, 1921), False, 'import time\n'), ((1954, 1986), 'utils.epoch_time', 'epoch_time', (['start_time', 'end_time'], {}), '(start_time, end_time)\n', (1964, 1986), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((2275, 2332), 'utils.eval_model', 'eval_model', (['model', 'val_data_loader', 'loss_fn', 'val.shape[0]'], {}), '(model, val_data_loader, loss_fn, val.shape[0])\n', (2285, 2332), False, 'from utils import create_data_loader, train_epoch, eval_model, epoch_time\n'), ((2635, 2713), 'sklearn.metrics.classification_report', 'classification_report', (['y_test1', 'y_pred1'], {'target_names': 'classes', 'zero_division': '(0)'}), '(y_test1, y_pred1, target_names=classes, zero_division=0)\n', (2656, 2713), False, 'from sklearn.metrics import classification_report\n')] |
# Softmax classifier loss function
import numpy as np
from random import shuffle
import linear_classifier
def softmax_loss_naive(W, X, y, reg):
"""Softmax loss function, naive implementation (with loops)
Important dimensions: K is number of classes we classify samples to. D is
the dimensionality of data (for example, 32x32x3 images have D=3072). Note
that bias is often folded into the sample as "1", so the actual
dimensionality may be +1 (or 3073 for those images).
N is simply the number of samples we're working with.
Inputs:
- W: K x D array of weights.
- X: D x N array of data. Each datum is a (D-dimensional) column.
- y: 1-dimensional array of length N with labels 0...K-1, for K classes.
y[i] is the correct classification of sample i.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Note: this code is from the internet, since I couldn't find an explanation
# of how to compute the softmax gradient in lecture notes.
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
for i in range(X.shape[1]):
scores = W.dot(X[:, i])
# Shift down by max to improve numerical stability -- now the highest
# number is 0.
scores -= np.max(scores)
prob = 0.0
loss -= scores[y[i]]
for curr_score in scores:
prob += np.exp(curr_score)
for j in range(W.shape[0]):
prob_ji = np.exp(scores[j]) / prob
margin = -prob_ji * X[:, i].T
if j == y[i]:
margin = (1 - prob_ji) * X[:, i].T
dW[j, :] += -margin
loss += np.log(prob)
loss /= X.shape[1]
dW /= X.shape[1]
# Regularization
loss += 0.5 * reg * np.sum(W * W)
dW += reg * W
return loss, dW
class Softmax(linear_classifier.LinearClassifier):
""" A subclass that uses the Softmax + cross entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_naive(self.W, X_batch, y_batch, reg)
| [
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros_like"
] | [((1206, 1222), 'numpy.zeros_like', 'np.zeros_like', (['W'], {}), '(W)\n', (1219, 1222), True, 'import numpy as np\n'), ((1408, 1422), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (1414, 1422), True, 'import numpy as np\n'), ((1798, 1810), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (1804, 1810), True, 'import numpy as np\n'), ((1902, 1915), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (1908, 1915), True, 'import numpy as np\n'), ((1526, 1544), 'numpy.exp', 'np.exp', (['curr_score'], {}), '(curr_score)\n', (1532, 1544), True, 'import numpy as np\n'), ((1604, 1621), 'numpy.exp', 'np.exp', (['scores[j]'], {}), '(scores[j])\n', (1610, 1621), True, 'import numpy as np\n')] |
from dd import run
if __name__=="__main__":
run.main() | [
"dd.run.main"
] | [((50, 60), 'dd.run.main', 'run.main', ([], {}), '()\n', (58, 60), False, 'from dd import run\n')] |
# coding: utf-8
"""
Created on Mon May 17 00:00:00 2017
@author: DIP
"""
# # Import necessary dependencies and settings
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import scipy.stats as spstats
get_ipython().magic('matplotlib inline')
mpl.style.reload_library()
mpl.style.use('classic')
mpl.rcParams['figure.facecolor'] = (1, 1, 1, 0)
mpl.rcParams['figure.figsize'] = [6.0, 4.0]
mpl.rcParams['figure.dpi'] = 100
# # Raw Measures
# ## Values
# In[2]:
poke_df = pd.read_csv('datasets/Pokemon.csv', encoding='utf-8')
poke_df.head()
# In[3]:
poke_df[['HP', 'Attack', 'Defense']].head()
# In[4]:
poke_df[['HP', 'Attack', 'Defense']].describe()
# ## Counts
# In[5]:
popsong_df = pd.read_csv('datasets/song_views.csv', encoding='utf-8')
popsong_df.head(10)
# # Binarization
# In[6]:
watched = np.array(popsong_df['listen_count'])
watched[watched >= 1] = 1
popsong_df['watched'] = watched
popsong_df.head(10)
# In[7]:
from sklearn.preprocessing import Binarizer
bn = Binarizer(threshold=0.9)
pd_watched = bn.transform([popsong_df['listen_count']])[0]
popsong_df['pd_watched'] = pd_watched
popsong_df.head(11)
# # Rounding
# In[8]:
items_popularity = pd.read_csv('datasets/item_popularity.csv', encoding='utf-8')
items_popularity
# In[9]:
items_popularity['popularity_scale_10'] = np.array(np.round((items_popularity['pop_percent'] * 10)), dtype='int')
items_popularity['popularity_scale_100'] = np.array(np.round((items_popularity['pop_percent'] * 100)), dtype='int')
items_popularity
# # Interactions
# In[10]:
atk_def = poke_df[['Attack', 'Defense']]
atk_def.head()
# In[11]:
from sklearn.preprocessing import PolynomialFeatures
pf = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False)
res = pf.fit_transform(atk_def)
res
# In[12]:
pd.DataFrame(pf.powers_, columns=['Attack_degree', 'Defense_degree'])
# In[13]:
intr_features = pd.DataFrame(res, columns=['Attack', 'Defense', 'Attack^2', 'Attack x Defense', 'Defense^2'])
intr_features.head(5)
# ## Transforming new data in the future (during predictions)
# In[14]:
new_df = pd.DataFrame([[95, 75],[121, 120], [77, 60]],
columns=['Attack', 'Defense'])
new_df
# In[15]:
new_res = pf.transform(new_df)
new_intr_features = pd.DataFrame(new_res,
columns=['Attack', 'Defense',
'Attack^2', 'Attack x Defense', 'Defense^2'])
new_intr_features
# # Binning
# In[16]:
fcc_survey_df = pd.read_csv('datasets/fcc_2016_coder_survey_subset.csv', encoding='utf-8')
fcc_survey_df[['ID.x', 'EmploymentField', 'Age', 'Income']].head()
# ## Fixed-width binning
# ### Developer age distribution
# In[17]:
fig, ax = plt.subplots()
fcc_survey_df['Age'].hist(color='#A9C5D3')
ax.set_title('Developer Age Histogram', fontsize=12)
ax.set_xlabel('Age', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
# ### Binning based on rounding
#
# ```
# Age Range: Bin
# ---------------
# 0 - 9 : 0
# 10 - 19 : 1
# 20 - 29 : 2
# 30 - 39 : 3
# 40 - 49 : 4
# 50 - 59 : 5
# 60 - 69 : 6
# ... and so on
# ```
# In[18]:
fcc_survey_df['Age_bin_round'] = np.array(np.floor(np.array(fcc_survey_df['Age']) / 10.))
fcc_survey_df[['ID.x', 'Age', 'Age_bin_round']].iloc[1071:1076]
# ### Binning based on custom ranges
#
# ```
# Age Range : Bin
# ---------------
# 0 - 15 : 1
# 16 - 30 : 2
# 31 - 45 : 3
# 46 - 60 : 4
# 61 - 75 : 5
# 75 - 100 : 6
# ```
# In[19]:
bin_ranges = [0, 15, 30, 45, 60, 75, 100]
bin_names = [1, 2, 3, 4, 5, 6]
fcc_survey_df['Age_bin_custom_range'] = pd.cut(np.array(fcc_survey_df['Age']),
bins=bin_ranges)
fcc_survey_df['Age_bin_custom_label'] = pd.cut(np.array(fcc_survey_df['Age']),
bins=bin_ranges, labels=bin_names)
fcc_survey_df[['ID.x', 'Age', 'Age_bin_round',
'Age_bin_custom_range', 'Age_bin_custom_label']].iloc[1071:1076]
# ## Quantile based binning
# In[20]:
fcc_survey_df[['ID.x', 'Age', 'Income']].iloc[4:9]
# In[21]:
fig, ax = plt.subplots()
fcc_survey_df['Income'].hist(bins=30, color='#A9C5D3')
ax.set_title('Developer Income Histogram', fontsize=12)
ax.set_xlabel('Developer Income', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
# In[22]:
quantile_list = [0, .25, .5, .75, 1.]
quantiles = fcc_survey_df['Income'].quantile(quantile_list)
quantiles
# In[23]:
fig, ax = plt.subplots()
fcc_survey_df['Income'].hist(bins=30, color='#A9C5D3')
for quantile in quantiles:
qvl = plt.axvline(quantile, color='r')
ax.legend([qvl], ['Quantiles'], fontsize=10)
ax.set_title('Developer Income Histogram with Quantiles', fontsize=12)
ax.set_xlabel('Developer Income', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
# In[24]:
quantile_labels = ['0-25Q', '25-50Q', '50-75Q', '75-100Q']
fcc_survey_df['Income_quantile_range'] = pd.qcut(fcc_survey_df['Income'],
q=quantile_list)
fcc_survey_df['Income_quantile_label'] = pd.qcut(fcc_survey_df['Income'],
q=quantile_list, labels=quantile_labels)
fcc_survey_df[['ID.x', 'Age', 'Income',
'Income_quantile_range', 'Income_quantile_label']].iloc[4:9]
# # Mathematical Transformations
# ## Log transform
# In[25]:
fcc_survey_df['Income_log'] = np.log((1+ fcc_survey_df['Income']))
fcc_survey_df[['ID.x', 'Age', 'Income', 'Income_log']].iloc[4:9]
# In[26]:
income_log_mean = np.round(np.mean(fcc_survey_df['Income_log']), 2)
fig, ax = plt.subplots()
fcc_survey_df['Income_log'].hist(bins=30, color='#A9C5D3')
plt.axvline(income_log_mean, color='r')
ax.set_title('Developer Income Histogram after Log Transform', fontsize=12)
ax.set_xlabel('Developer Income (log scale)', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
ax.text(11.5, 450, r'$\mu$='+str(income_log_mean), fontsize=10)
# ## Box–Cox transform
# In[27]:
# get optimal lambda value from non null income values
income = np.array(fcc_survey_df['Income'])
income_clean = income[~np.isnan(income)]
l, opt_lambda = spstats.boxcox(income_clean)
print('Optimal lambda value:', opt_lambda)
# In[28]:
fcc_survey_df['Income_boxcox_lambda_0'] = spstats.boxcox((1+fcc_survey_df['Income']),
lmbda=0)
fcc_survey_df['Income_boxcox_lambda_opt'] = spstats.boxcox(fcc_survey_df['Income'],
lmbda=opt_lambda)
fcc_survey_df[['ID.x', 'Age', 'Income', 'Income_log',
'Income_boxcox_lambda_0', 'Income_boxcox_lambda_opt']].iloc[4:9]
# In[29]:
income_boxcox_mean = np.round(np.mean(fcc_survey_df['Income_boxcox_lambda_opt']), 2)
fig, ax = plt.subplots()
fcc_survey_df['Income_boxcox_lambda_opt'].hist(bins=30, color='#A9C5D3')
plt.axvline(income_boxcox_mean, color='r')
ax.set_title('Developer Income Histogram after Box–Cox Transform', fontsize=12)
ax.set_xlabel('Developer Income (Box–Cox transform)', fontsize=12)
ax.set_ylabel('Frequency', fontsize=12)
ax.text(24, 450, r'$\mu$='+str(income_boxcox_mean), fontsize=10)
| [
"numpy.mean",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.style.reload_library",
"sklearn.preprocessing.Binarizer",
"pandas.read_csv",
"pandas.qcut",
"scipy.stats.boxcox",
"numpy.log",
"numpy.array",
"matplotlib.pyplot.axvline",
"matplotlib.style.use",
"numpy.isnan",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.round"
] | [((302, 328), 'matplotlib.style.reload_library', 'mpl.style.reload_library', ([], {}), '()\n', (326, 328), True, 'import matplotlib as mpl\n'), ((329, 353), 'matplotlib.style.use', 'mpl.style.use', (['"""classic"""'], {}), "('classic')\n", (342, 353), True, 'import matplotlib as mpl\n'), ((532, 585), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/Pokemon.csv"""'], {'encoding': '"""utf-8"""'}), "('datasets/Pokemon.csv', encoding='utf-8')\n", (543, 585), True, 'import pandas as pd\n'), ((755, 811), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/song_views.csv"""'], {'encoding': '"""utf-8"""'}), "('datasets/song_views.csv', encoding='utf-8')\n", (766, 811), True, 'import pandas as pd\n'), ((872, 908), 'numpy.array', 'np.array', (["popsong_df['listen_count']"], {}), "(popsong_df['listen_count'])\n", (880, 908), True, 'import numpy as np\n'), ((1050, 1074), 'sklearn.preprocessing.Binarizer', 'Binarizer', ([], {'threshold': '(0.9)'}), '(threshold=0.9)\n', (1059, 1074), False, 'from sklearn.preprocessing import Binarizer\n'), ((1237, 1298), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/item_popularity.csv"""'], {'encoding': '"""utf-8"""'}), "('datasets/item_popularity.csv', encoding='utf-8')\n", (1248, 1298), True, 'import pandas as pd\n'), ((1734, 1806), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(False)', 'include_bias': '(False)'}), '(degree=2, interaction_only=False, include_bias=False)\n', (1752, 1806), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1856, 1925), 'pandas.DataFrame', 'pd.DataFrame', (['pf.powers_'], {'columns': "['Attack_degree', 'Defense_degree']"}), "(pf.powers_, columns=['Attack_degree', 'Defense_degree'])\n", (1868, 1925), True, 'import pandas as pd\n'), ((1955, 2052), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['Attack', 'Defense', 'Attack^2', 'Attack x Defense', 'Defense^2']"}), "(res, columns=['Attack', 'Defense', 'Attack^2',\n 'Attack x Defense', 'Defense^2'])\n", (1967, 2052), True, 'import pandas as pd\n'), ((2158, 2235), 'pandas.DataFrame', 'pd.DataFrame', (['[[95, 75], [121, 120], [77, 60]]'], {'columns': "['Attack', 'Defense']"}), "([[95, 75], [121, 120], [77, 60]], columns=['Attack', 'Defense'])\n", (2170, 2235), True, 'import pandas as pd\n'), ((2329, 2430), 'pandas.DataFrame', 'pd.DataFrame', (['new_res'], {'columns': "['Attack', 'Defense', 'Attack^2', 'Attack x Defense', 'Defense^2']"}), "(new_res, columns=['Attack', 'Defense', 'Attack^2',\n 'Attack x Defense', 'Defense^2'])\n", (2341, 2430), True, 'import pandas as pd\n'), ((2564, 2638), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/fcc_2016_coder_survey_subset.csv"""'], {'encoding': '"""utf-8"""'}), "('datasets/fcc_2016_coder_survey_subset.csv', encoding='utf-8')\n", (2575, 2638), True, 'import pandas as pd\n'), ((2789, 2803), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4182, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4527, 4541), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4539, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4985, 5034), 'pandas.qcut', 'pd.qcut', (["fcc_survey_df['Income']"], {'q': 'quantile_list'}), "(fcc_survey_df['Income'], q=quantile_list)\n", (4992, 5034), True, 'import pandas as pd\n'), ((5126, 5199), 'pandas.qcut', 'pd.qcut', (["fcc_survey_df['Income']"], {'q': 'quantile_list', 'labels': 'quantile_labels'}), "(fcc_survey_df['Income'], q=quantile_list, labels=quantile_labels)\n", (5133, 5199), True, 'import pandas as pd\n'), ((5464, 5499), 'numpy.log', 'np.log', (["(1 + fcc_survey_df['Income'])"], {}), "(1 + fcc_survey_df['Income'])\n", (5470, 5499), True, 'import numpy as np\n'), ((5658, 5672), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5670, 5672), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5771), 'matplotlib.pyplot.axvline', 'plt.axvline', (['income_log_mean'], {'color': '"""r"""'}), "(income_log_mean, color='r')\n", (5743, 5771), True, 'import matplotlib.pyplot as plt\n'), ((6112, 6145), 'numpy.array', 'np.array', (["fcc_survey_df['Income']"], {}), "(fcc_survey_df['Income'])\n", (6120, 6145), True, 'import numpy as np\n'), ((6203, 6231), 'scipy.stats.boxcox', 'spstats.boxcox', (['income_clean'], {}), '(income_clean)\n', (6217, 6231), True, 'import scipy.stats as spstats\n'), ((6330, 6382), 'scipy.stats.boxcox', 'spstats.boxcox', (["(1 + fcc_survey_df['Income'])"], {'lmbda': '(0)'}), "(1 + fcc_survey_df['Income'], lmbda=0)\n", (6344, 6382), True, 'import scipy.stats as spstats\n'), ((6485, 6542), 'scipy.stats.boxcox', 'spstats.boxcox', (["fcc_survey_df['Income']"], {'lmbda': 'opt_lambda'}), "(fcc_survey_df['Income'], lmbda=opt_lambda)\n", (6499, 6542), True, 'import scipy.stats as spstats\n'), ((6847, 6861), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6859, 6861), True, 'import matplotlib.pyplot as plt\n'), ((6935, 6977), 'matplotlib.pyplot.axvline', 'plt.axvline', (['income_boxcox_mean'], {'color': '"""r"""'}), "(income_boxcox_mean, color='r')\n", (6946, 6977), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1425), 'numpy.round', 'np.round', (["(items_popularity['pop_percent'] * 10)"], {}), "(items_popularity['pop_percent'] * 10)\n", (1387, 1425), True, 'import numpy as np\n'), ((1494, 1541), 'numpy.round', 'np.round', (["(items_popularity['pop_percent'] * 100)"], {}), "(items_popularity['pop_percent'] * 100)\n", (1502, 1541), True, 'import numpy as np\n'), ((3667, 3697), 'numpy.array', 'np.array', (["fcc_survey_df['Age']"], {}), "(fcc_survey_df['Age'])\n", (3675, 3697), True, 'import numpy as np\n'), ((3811, 3841), 'numpy.array', 'np.array', (["fcc_survey_df['Age']"], {}), "(fcc_survey_df['Age'])\n", (3819, 3841), True, 'import numpy as np\n'), ((4635, 4667), 'matplotlib.pyplot.axvline', 'plt.axvline', (['quantile'], {'color': '"""r"""'}), "(quantile, color='r')\n", (4646, 4667), True, 'import matplotlib.pyplot as plt\n'), ((5606, 5642), 'numpy.mean', 'np.mean', (["fcc_survey_df['Income_log']"], {}), "(fcc_survey_df['Income_log'])\n", (5613, 5642), True, 'import numpy as np\n'), ((6781, 6831), 'numpy.mean', 'np.mean', (["fcc_survey_df['Income_boxcox_lambda_opt']"], {}), "(fcc_survey_df['Income_boxcox_lambda_opt'])\n", (6788, 6831), True, 'import numpy as np\n'), ((6169, 6185), 'numpy.isnan', 'np.isnan', (['income'], {}), '(income)\n', (6177, 6185), True, 'import numpy as np\n'), ((3245, 3275), 'numpy.array', 'np.array', (["fcc_survey_df['Age']"], {}), "(fcc_survey_df['Age'])\n", (3253, 3275), True, 'import numpy as np\n')] |
Subsets and Splits