code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions used for data handling
"""
__author__ = "<NAME>, GIScience Research Group, Heidelberg University"
__email__ = "<EMAIL>"
import os
import yaml
from shapely.geometry import box
import numpy as np
import pandas as pd
import geopandas as gpd
import json
from nb_utils.utils import create_bbox, reproject_to_utm
CONTEXT_NAMES = {"area": "Area", "building_density": "Building density", "age": "Days since creation",
"n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number",
"user_count_inner": "Inner user count", "user_density_inner": "Inner user density",
"user_count_outer": "Outer user count", "user_density_outer": "Outer user density",
"feature_count": "Feature count", "random": "Random"}
rules_colnames = ['antecedents', 'consequents', 'antecedent support',
'consequent support', 'support', 'confidence', 'lift', 'leverage',
'conviction', "context", "context_min", "context_max", "context_p_min", "context_p_max", "nfeatures", "rule"]
pretty_names_units = {"area": "Area [ha]", "building_density": "Building density", "feature_count": "Feature count", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count",
"user_density_outer": "Outer user density", "random": "Random"}
def load_config(config_file, cities):
"""
Load config parameters from file
:param config_file:
:param cities:
:return:
"""
if not os.path.exists(config_file):
print("ERROR: Config file {} does not exist.".format(config_file))
else:
with open(config_file, 'r') as src:
config = yaml.load(src, Loader=yaml.FullLoader)
config_cities = config["locations"]
config_cities = {city: config_cities[city] for city in cities}
return config_cities
def load_data(cities, data_dir):
"""
Load data into notebook from file
:return:
"""
loaded_tags_dfs = []
loaded_context_dfs = []
for city in cities:
print("Loading {}...".format(city))
# Check paths
tags_file = os.path.join(data_dir, city, "{}_tags.json".format(city))
context_file = os.path.join(data_dir, city, "{}_context.geojson".format(city))
if (not os.path.exists(tags_file)) or (not os.path.exists(context_file)):
print("{}: Input files not found.".format(city))
return None, None, None
# Read data and set index
tags_df = pd.read_json(tags_file).set_index("@osmId")
context_df = gpd.read_file(context_file).set_index("@osmId")
# Calculate area (should be moved to data_extraction)
context_df["area"] = reproject_to_utm(context_df).area #/ 10000. # conversion to ha
# Add column holding the city name
context_df["city"] = city
loaded_tags_dfs.append(tags_df)
loaded_context_dfs.append(context_df)
# Convert list of dataframes to dataframe
all_tags_df = pd.concat(loaded_tags_dfs, axis=0)
all_tags_df = all_tags_df.fillna(False)
all_context_df = pd.concat(loaded_context_dfs, axis=0)
all_features = all_context_df.join(all_tags_df, sort=False)
# Add dummy columns for "no antecedent" and random context variable
all_features["none"] = True
all_features["random"] = np.random.rand(len(all_features))
# The park iteself is always counted as an objects inside of it. Therefore, subtract 1.
all_features["feature_count"] = all_features["feature_count"] - 1
# Delete unnecessary columns
unnecessary_cols = list(filter(lambda x: x.startswith("gt:"), all_features.columns)) + ["leisure=park"]
all_features.drop(unnecessary_cols, axis=1, inplace=True)
return all_features
def create_city_bboxes(config_cities):
"""
Creat bboxes of cities
:return:
"""
bboxes = {c: box(*create_bbox(config_cities[c]["center"], config_cities[c]["width"])) for c in config_cities.keys()}
bbox_df = pd.DataFrame().from_dict(bboxes, orient="index", columns=["geometry"])
return gpd.GeoDataFrame(bbox_df)
def dump_city_rules(city_rules, interim_dir):
"""
Write results from context based association rule analysis to file
:param city_rules:
:param interim_dir:
:return:
"""
city_rules_dir = os.path.join(interim_dir, "city_rules")
if not os.path.exists(city_rules_dir):
os.mkdir(city_rules_dir)
for k, v in city_rules.items():
print(k)
v["heatmap"].to_json(os.path.join(city_rules_dir, "{}_heatmap.json".format(k)))
v["valid_rules"].reset_index().to_json(os.path.join(city_rules_dir, "{}_valid_rules.json".format(k)))
with open(os.path.join(city_rules_dir, "{}_sel_features.json".format(k)), "w") as dst:
json.dump(list(v["sel_features"].index), dst)
def load_city_rules(cities, interim_dir, all_features):
"""
Load results from context based association rule analysis to file
:param cities:
:param interim_dir:
:param all_features:
:return:
"""
city_rules = {}
for city in cities:
with open(os.path.join(interim_dir, "city_rules", "{}_sel_features.json".format(city))) as dst:
selected_ids = json.load(dst)
sel_features = all_features.loc[selected_ids]
selected_osmids = json
city_rules[city] = {
"heatmap": pd.read_json(os.path.join(interim_dir, "city_rules", "{}_heatmap.json".format(city))),
"valid_rules": pd.read_json(
os.path.join(interim_dir, "city_rules", "{}_valid_rules.json".format(city))).set_index("index"),
"sel_features": sel_features}
return city_rules
| [
"os.path.exists",
"pandas.read_json",
"geopandas.read_file",
"os.path.join",
"yaml.load",
"nb_utils.utils.create_bbox",
"json.load",
"nb_utils.utils.reproject_to_utm",
"os.mkdir",
"pandas.DataFrame",
"pandas.concat",
"geopandas.GeoDataFrame"
]
| [((3221, 3255), 'pandas.concat', 'pd.concat', (['loaded_tags_dfs'], {'axis': '(0)'}), '(loaded_tags_dfs, axis=0)\n', (3230, 3255), True, 'import pandas as pd\n'), ((3321, 3358), 'pandas.concat', 'pd.concat', (['loaded_context_dfs'], {'axis': '(0)'}), '(loaded_context_dfs, axis=0)\n', (3330, 3358), True, 'import pandas as pd\n'), ((4295, 4320), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['bbox_df'], {}), '(bbox_df)\n', (4311, 4320), True, 'import geopandas as gpd\n'), ((4537, 4576), 'os.path.join', 'os.path.join', (['interim_dir', '"""city_rules"""'], {}), "(interim_dir, 'city_rules')\n", (4549, 4576), False, 'import os\n'), ((1723, 1750), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (1737, 1750), False, 'import os\n'), ((4588, 4618), 'os.path.exists', 'os.path.exists', (['city_rules_dir'], {}), '(city_rules_dir)\n', (4602, 4618), False, 'import os\n'), ((4628, 4652), 'os.mkdir', 'os.mkdir', (['city_rules_dir'], {}), '(city_rules_dir)\n', (4636, 4652), False, 'import os\n'), ((1902, 1940), 'yaml.load', 'yaml.load', (['src'], {'Loader': 'yaml.FullLoader'}), '(src, Loader=yaml.FullLoader)\n', (1911, 1940), False, 'import yaml\n'), ((2929, 2957), 'nb_utils.utils.reproject_to_utm', 'reproject_to_utm', (['context_df'], {}), '(context_df)\n', (2945, 2957), False, 'from nb_utils.utils import create_bbox, reproject_to_utm\n'), ((4213, 4227), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4225, 4227), True, 'import pandas as pd\n'), ((5457, 5471), 'json.load', 'json.load', (['dst'], {}), '(dst)\n', (5466, 5471), False, 'import json\n'), ((2508, 2533), 'os.path.exists', 'os.path.exists', (['tags_file'], {}), '(tags_file)\n', (2522, 2533), False, 'import os\n'), ((2543, 2571), 'os.path.exists', 'os.path.exists', (['context_file'], {}), '(context_file)\n', (2557, 2571), False, 'import os\n'), ((2724, 2747), 'pandas.read_json', 'pd.read_json', (['tags_file'], {}), '(tags_file)\n', (2736, 2747), True, 'import pandas as pd\n'), ((2789, 2816), 'geopandas.read_file', 'gpd.read_file', (['context_file'], {}), '(context_file)\n', (2802, 2816), True, 'import geopandas as gpd\n'), ((4100, 4166), 'nb_utils.utils.create_bbox', 'create_bbox', (["config_cities[c]['center']", "config_cities[c]['width']"], {}), "(config_cities[c]['center'], config_cities[c]['width'])\n", (4111, 4166), False, 'from nb_utils.utils import create_bbox, reproject_to_utm\n')] |
from warnings import warn
from tables.utilsextension import *
_warnmsg = ("utilsExtension is pending deprecation, import utilsextension instead. "
"You may use the pt2to3 tool to update your source code.")
warn(_warnmsg, DeprecationWarning, stacklevel=2)
| [
"warnings.warn"
]
| [((224, 272), 'warnings.warn', 'warn', (['_warnmsg', 'DeprecationWarning'], {'stacklevel': '(2)'}), '(_warnmsg, DeprecationWarning, stacklevel=2)\n', (228, 272), False, 'from warnings import warn\n')] |
from functions import get_df, write_df
import geopy
from geopy import distance
"""
The function question3 takes in the latitude and longitude of potential distress locations,
and returns the nearest port with essential provisions such as water, fuel_oil and diesel.
"""
def question3(dataset_name, latitude, longitude):
df = get_df()
distress_location = (latitude, longitude)
ports_with_provisions = df[(df['provisions'] == True) & (df['water'] == True) & (df['fuel_oil'] == True) & (df['diesel'] == True)]
results = []
for each in ports_with_provisions.itertuples(index=False):
each_coords = (float(each[4]), float(each[5]))
dist = geopy.distance.geodesic(distress_location, each_coords)
results.append(dist.km)
ports_with_provisions['dist'] = results
answer3 = ports_with_provisions.sort_values(by='dist', ascending=True)[['country', 'port_name', 'port_latitude', 'port_longitude']].head(1)
write_df(answer3, dataset_name, 'Table for Question 3')
if __name__ == "__main__":
question3("foodpanda_tables", 32.610982, -38.706256)
| [
"geopy.distance.geodesic",
"functions.write_df",
"functions.get_df"
]
| [((332, 340), 'functions.get_df', 'get_df', ([], {}), '()\n', (338, 340), False, 'from functions import get_df, write_df\n'), ((956, 1011), 'functions.write_df', 'write_df', (['answer3', 'dataset_name', '"""Table for Question 3"""'], {}), "(answer3, dataset_name, 'Table for Question 3')\n", (964, 1011), False, 'from functions import get_df, write_df\n'), ((674, 729), 'geopy.distance.geodesic', 'geopy.distance.geodesic', (['distress_location', 'each_coords'], {}), '(distress_location, each_coords)\n', (697, 729), False, 'import geopy\n')] |
import pybullet as p
#p.connect(p.UDP,"192.168.86.100")
p.connect(p.SHARED_MEMORY)
p.resetSimulation()
objects = [p.loadURDF("plane.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("samurai.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("pr2_gripper.urdf", 0.500000,0.300006,0.700000,-0.000000,-0.000000,-0.000031,1.000000)]
pr2_gripper = objects[0]
print ("pr2_gripper=")
print (pr2_gripper)
jointPositions=[ 0.550569, 0.000000, 0.549657, 0.000000 ]
for jointIndex in range (p.getNumJoints(pr2_gripper)):
p.resetJointState(pr2_gripper,jointIndex,jointPositions[jointIndex])
pr2_cid = p.createConstraint(pr2_gripper,-1,-1,-1,p.JOINT_FIXED,[0,0,0],[0.2,0,0],[0.500000,0.300006,0.700000])
print ("pr2_cid")
print (pr2_cid)
objects = [p.loadURDF("kuka_iiwa/model_vr_limits.urdf", 1.400000,-0.200000,0.600000,0.000000,0.000000,0.000000,1.000000)]
kuka = objects[0]
jointPositions=[ -0.000000, -0.000000, 0.000000, 1.570793, 0.000000, -1.036725, 0.000001 ]
for jointIndex in range (p.getNumJoints(kuka)):
p.resetJointState(kuka,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.700000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.800000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.900000,0.000000,0.000000,0.000000,1.000000)]
objects = p.loadSDF("gripper/wsg50_one_motor_gripper_new_free_base.sdf")
kuka_gripper = objects[0]
print ("kuka gripper=")
print(kuka_gripper)
p.resetBasePositionAndOrientation(kuka_gripper,[0.923103,-0.200000,1.250036],[-0.000000,0.964531,-0.000002,-0.263970])
jointPositions=[ 0.000000, -0.011130, -0.206421, 0.205143, -0.009999, 0.000000, -0.010055, 0.000000 ]
for jointIndex in range (p.getNumJoints(kuka_gripper)):
p.resetJointState(kuka_gripper,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka_gripper,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
kuka_cid = p.createConstraint(kuka, 6, kuka_gripper,0,p.JOINT_FIXED, [0,0,0], [0,0,0.05],[0,0,0])
objects = [p.loadURDF("jenga/jenga.urdf", 1.300000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.200000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.100000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.000000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.900000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.800000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("table/table.urdf", 1.000000,-0.200000,0.000000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("teddy_vhacd.urdf", 1.050000,-0.500000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("cube_small.urdf", 0.950000,-0.100000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("sphere_small.urdf", 0.850000,-0.400000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("duck_vhacd.urdf", 0.850000,-0.400000,0.900000,0.000000,0.000000,0.707107,0.707107)]
objects = p.loadSDF("kiva_shelf/model.sdf")
ob = objects[0]
p.resetBasePositionAndOrientation(ob,[0.000000,1.000000,1.204500],[0.000000,0.000000,0.000000,1.000000])
objects = [p.loadURDF("teddy_vhacd.urdf", -0.100000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("sphere_small.urdf", -0.100000,0.955006,1.169706,0.633232,-0.000000,-0.000000,0.773962)]
objects = [p.loadURDF("cube_small.urdf", 0.300000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("table_square/table_square.urdf", -1.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
objects = [p.loadURDF("husky/husky.urdf", 2.000000,-5.000000,1.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
p.setGravity(0.000000,0.000000,0.000000)
p.setGravity(0,0,-10)
p.stepSimulation()
p.disconnect()
| [
"pybullet.resetJointState",
"pybullet.resetSimulation",
"pybullet.loadSDF",
"pybullet.connect",
"pybullet.createConstraint",
"pybullet.getNumJoints",
"pybullet.setGravity",
"pybullet.disconnect",
"pybullet.stepSimulation",
"pybullet.resetBasePositionAndOrientation",
"pybullet.loadURDF",
"pybullet.setJointMotorControl2"
]
| [((56, 82), 'pybullet.connect', 'p.connect', (['p.SHARED_MEMORY'], {}), '(p.SHARED_MEMORY)\n', (65, 82), True, 'import pybullet as p\n'), ((83, 102), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (100, 102), True, 'import pybullet as p\n'), ((681, 789), 'pybullet.createConstraint', 'p.createConstraint', (['pr2_gripper', '(-1)', '(-1)', '(-1)', 'p.JOINT_FIXED', '[0, 0, 0]', '[0.2, 0, 0]', '[0.5, 0.300006, 0.7]'], {}), '(pr2_gripper, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0.2,\n 0, 0], [0.5, 0.300006, 0.7])\n', (699, 789), True, 'import pybullet as p\n'), ((1579, 1641), 'pybullet.loadSDF', 'p.loadSDF', (['"""gripper/wsg50_one_motor_gripper_new_free_base.sdf"""'], {}), "('gripper/wsg50_one_motor_gripper_new_free_base.sdf')\n", (1588, 1641), True, 'import pybullet as p\n'), ((1713, 1828), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['kuka_gripper', '[0.923103, -0.2, 1.250036]', '[-0.0, 0.964531, -2e-06, -0.26397]'], {}), '(kuka_gripper, [0.923103, -0.2, 1.250036],\n [-0.0, 0.964531, -2e-06, -0.26397])\n', (1746, 1828), True, 'import pybullet as p\n'), ((2172, 2272), 'pybullet.createConstraint', 'p.createConstraint', (['kuka', '(6)', 'kuka_gripper', '(0)', 'p.JOINT_FIXED', '[0, 0, 0]', '[0, 0, 0.05]', '[0, 0, 0]'], {}), '(kuka, 6, kuka_gripper, 0, p.JOINT_FIXED, [0, 0, 0], [0, \n 0, 0.05], [0, 0, 0])\n', (2190, 2272), True, 'import pybullet as p\n'), ((3460, 3493), 'pybullet.loadSDF', 'p.loadSDF', (['"""kiva_shelf/model.sdf"""'], {}), "('kiva_shelf/model.sdf')\n", (3469, 3493), True, 'import pybullet as p\n'), ((3510, 3589), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['ob', '[0.0, 1.0, 1.2045]', '[0.0, 0.0, 0.0, 1.0]'], {}), '(ob, [0.0, 1.0, 1.2045], [0.0, 0.0, 0.0, 1.0])\n', (3543, 3589), True, 'import pybullet as p\n'), ((4564, 4591), 'pybullet.setGravity', 'p.setGravity', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (4576, 4591), True, 'import pybullet as p\n'), ((4605, 4628), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (4617, 4628), True, 'import pybullet as p\n'), ((4628, 4646), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4644, 4646), True, 'import pybullet as p\n'), ((4648, 4662), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (4660, 4662), True, 'import pybullet as p\n'), ((115, 174), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('plane.urdf', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)\n", (125, 174), True, 'import pybullet as p\n'), ((216, 277), 'pybullet.loadURDF', 'p.loadURDF', (['"""samurai.urdf"""', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('samurai.urdf', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)\n", (226, 277), True, 'import pybullet as p\n'), ((319, 396), 'pybullet.loadURDF', 'p.loadURDF', (['"""pr2_gripper.urdf"""', '(0.5)', '(0.300006)', '(0.7)', '(-0.0)', '(-0.0)', '(-3.1e-05)', '(1.0)'], {}), "('pr2_gripper.urdf', 0.5, 0.300006, 0.7, -0.0, -0.0, -3.1e-05, 1.0)\n", (329, 396), True, 'import pybullet as p\n'), ((570, 597), 'pybullet.getNumJoints', 'p.getNumJoints', (['pr2_gripper'], {}), '(pr2_gripper)\n', (584, 597), True, 'import pybullet as p\n'), ((601, 671), 'pybullet.resetJointState', 'p.resetJointState', (['pr2_gripper', 'jointIndex', 'jointPositions[jointIndex]'], {}), '(pr2_gripper, jointIndex, jointPositions[jointIndex])\n', (618, 671), True, 'import pybullet as p\n'), ((829, 914), 'pybullet.loadURDF', 'p.loadURDF', (['"""kuka_iiwa/model_vr_limits.urdf"""', '(1.4)', '(-0.2)', '(0.6)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('kuka_iiwa/model_vr_limits.urdf', 1.4, -0.2, 0.6, 0.0, 0.0, 0.0, 1.0\n )\n", (839, 914), True, 'import pybullet as p\n'), ((1074, 1094), 'pybullet.getNumJoints', 'p.getNumJoints', (['kuka'], {}), '(kuka)\n', (1088, 1094), True, 'import pybullet as p\n'), ((1098, 1161), 'pybullet.resetJointState', 'p.resetJointState', (['kuka', 'jointIndex', 'jointPositions[jointIndex]'], {}), '(kuka, jointIndex, jointPositions[jointIndex])\n', (1115, 1161), True, 'import pybullet as p\n'), ((1161, 1257), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['kuka', 'jointIndex', 'p.POSITION_CONTROL', 'jointPositions[jointIndex]', '(0)'], {}), '(kuka, jointIndex, p.POSITION_CONTROL,\n jointPositions[jointIndex], 0)\n', (1184, 1257), True, 'import pybullet as p\n'), ((1262, 1326), 'pybullet.loadURDF', 'p.loadURDF', (['"""lego/lego.urdf"""', '(1.0)', '(-0.2)', '(0.7)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('lego/lego.urdf', 1.0, -0.2, 0.7, 0.0, 0.0, 0.0, 1.0)\n", (1272, 1326), True, 'import pybullet as p\n'), ((1368, 1432), 'pybullet.loadURDF', 'p.loadURDF', (['"""lego/lego.urdf"""', '(1.0)', '(-0.2)', '(0.8)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('lego/lego.urdf', 1.0, -0.2, 0.8, 0.0, 0.0, 0.0, 1.0)\n", (1378, 1432), True, 'import pybullet as p\n'), ((1474, 1538), 'pybullet.loadURDF', 'p.loadURDF', (['"""lego/lego.urdf"""', '(1.0)', '(-0.2)', '(0.9)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('lego/lego.urdf', 1.0, -0.2, 0.9, 0.0, 0.0, 0.0, 1.0)\n", (1484, 1538), True, 'import pybullet as p\n'), ((1959, 1987), 'pybullet.getNumJoints', 'p.getNumJoints', (['kuka_gripper'], {}), '(kuka_gripper)\n', (1973, 1987), True, 'import pybullet as p\n'), ((1991, 2062), 'pybullet.resetJointState', 'p.resetJointState', (['kuka_gripper', 'jointIndex', 'jointPositions[jointIndex]'], {}), '(kuka_gripper, jointIndex, jointPositions[jointIndex])\n', (2008, 2062), True, 'import pybullet as p\n'), ((2062, 2166), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['kuka_gripper', 'jointIndex', 'p.POSITION_CONTROL', 'jointPositions[jointIndex]', '(0)'], {}), '(kuka_gripper, jointIndex, p.POSITION_CONTROL,\n jointPositions[jointIndex], 0)\n', (2085, 2166), True, 'import pybullet as p\n'), ((2274, 2351), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(1.3)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 1.3, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2284, 2351), True, 'import pybullet as p\n'), ((2382, 2459), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(1.2)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 1.2, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2392, 2459), True, 'import pybullet as p\n'), ((2490, 2567), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(1.1)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 1.1, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2500, 2567), True, 'import pybullet as p\n'), ((2598, 2675), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(1.0)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 1.0, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2608, 2675), True, 'import pybullet as p\n'), ((2706, 2783), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(0.9)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 0.9, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2716, 2783), True, 'import pybullet as p\n'), ((2814, 2891), 'pybullet.loadURDF', 'p.loadURDF', (['"""jenga/jenga.urdf"""', '(0.8)', '(-0.7)', '(0.75)', '(0.0)', '(0.707107)', '(0.0)', '(0.707107)'], {}), "('jenga/jenga.urdf', 0.8, -0.7, 0.75, 0.0, 0.707107, 0.0, 0.707107)\n", (2824, 2891), True, 'import pybullet as p\n'), ((2922, 2998), 'pybullet.loadURDF', 'p.loadURDF', (['"""table/table.urdf"""', '(1.0)', '(-0.2)', '(0.0)', '(0.0)', '(0.0)', '(0.707107)', '(0.707107)'], {}), "('table/table.urdf', 1.0, -0.2, 0.0, 0.0, 0.0, 0.707107, 0.707107)\n", (2932, 2998), True, 'import pybullet as p\n'), ((3030, 3107), 'pybullet.loadURDF', 'p.loadURDF', (['"""teddy_vhacd.urdf"""', '(1.05)', '(-0.5)', '(0.7)', '(0.0)', '(0.0)', '(0.707107)', '(0.707107)'], {}), "('teddy_vhacd.urdf', 1.05, -0.5, 0.7, 0.0, 0.0, 0.707107, 0.707107)\n", (3040, 3107), True, 'import pybullet as p\n'), ((3138, 3214), 'pybullet.loadURDF', 'p.loadURDF', (['"""cube_small.urdf"""', '(0.95)', '(-0.1)', '(0.7)', '(0.0)', '(0.0)', '(0.707107)', '(0.707107)'], {}), "('cube_small.urdf', 0.95, -0.1, 0.7, 0.0, 0.0, 0.707107, 0.707107)\n", (3148, 3214), True, 'import pybullet as p\n'), ((3245, 3323), 'pybullet.loadURDF', 'p.loadURDF', (['"""sphere_small.urdf"""', '(0.85)', '(-0.4)', '(0.7)', '(0.0)', '(0.0)', '(0.707107)', '(0.707107)'], {}), "('sphere_small.urdf', 0.85, -0.4, 0.7, 0.0, 0.0, 0.707107, 0.707107)\n", (3255, 3323), True, 'import pybullet as p\n'), ((3354, 3430), 'pybullet.loadURDF', 'p.loadURDF', (['"""duck_vhacd.urdf"""', '(0.85)', '(-0.4)', '(0.9)', '(0.0)', '(0.0)', '(0.707107)', '(0.707107)'], {}), "('duck_vhacd.urdf', 0.85, -0.4, 0.9, 0.0, 0.0, 0.707107, 0.707107)\n", (3364, 3430), True, 'import pybullet as p\n'), ((3626, 3693), 'pybullet.loadURDF', 'p.loadURDF', (['"""teddy_vhacd.urdf"""', '(-0.1)', '(0.6)', '(0.85)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('teddy_vhacd.urdf', -0.1, 0.6, 0.85, 0.0, 0.0, 0.0, 1.0)\n", (3636, 3693), True, 'import pybullet as p\n'), ((3734, 3828), 'pybullet.loadURDF', 'p.loadURDF', (['"""sphere_small.urdf"""', '(-0.1)', '(0.955006)', '(1.169706)', '(0.633232)', '(-0.0)', '(-0.0)', '(0.773962)'], {}), "('sphere_small.urdf', -0.1, 0.955006, 1.169706, 0.633232, -0.0, -\n 0.0, 0.773962)\n", (3744, 3828), True, 'import pybullet as p\n'), ((3845, 3910), 'pybullet.loadURDF', 'p.loadURDF', (['"""cube_small.urdf"""', '(0.3)', '(0.6)', '(0.85)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('cube_small.urdf', 0.3, 0.6, 0.85, 0.0, 0.0, 0.0, 1.0)\n", (3855, 3910), True, 'import pybullet as p\n'), ((3951, 4036), 'pybullet.loadURDF', 'p.loadURDF', (['"""table_square/table_square.urdf"""', '(-1.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('table_square/table_square.urdf', -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0\n )\n", (3961, 4036), True, 'import pybullet as p\n'), ((4131, 4149), 'pybullet.getNumJoints', 'p.getNumJoints', (['ob'], {}), '(ob)\n', (4145, 4149), True, 'import pybullet as p\n'), ((4153, 4214), 'pybullet.resetJointState', 'p.resetJointState', (['ob', 'jointIndex', 'jointPositions[jointIndex]'], {}), '(ob, jointIndex, jointPositions[jointIndex])\n', (4170, 4214), True, 'import pybullet as p\n'), ((4225, 4291), 'pybullet.loadURDF', 'p.loadURDF', (['"""husky/husky.urdf"""', '(2.0)', '(-5.0)', '(1.0)', '(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), "('husky/husky.urdf', 2.0, -5.0, 1.0, 0.0, 0.0, 0.0, 1.0)\n", (4235, 4291), True, 'import pybullet as p\n'), ((4481, 4499), 'pybullet.getNumJoints', 'p.getNumJoints', (['ob'], {}), '(ob)\n', (4495, 4499), True, 'import pybullet as p\n'), ((4503, 4564), 'pybullet.resetJointState', 'p.resetJointState', (['ob', 'jointIndex', 'jointPositions[jointIndex]'], {}), '(ob, jointIndex, jointPositions[jointIndex])\n', (4520, 4564), True, 'import pybullet as p\n')] |
from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import pandas as pd
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
class WranglingScript:
"""Get, review and restructure tabular data."""
def __init__(self):
self.check_source = CoreScript().check_source
self.core = CoreScript()
self.DATE_FORMATS = {
"date": {"fmt": ["%Y-%m-%d"], "txt": ["YYYY-MM-DD"]},
"datetime": {
"fmt": ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z%z"],
"txt": ["YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss UTC+0000"],
},
"year": {"fmt": ["%Y"], "txt": ["YYYY"]},
}
def get_dataframe(
self,
source: str,
preserve: Union[str, List[str]] = None,
filetype: MimeType = MimeType.CSV,
names: Optional[List[str]] = None,
nrows: Optional[int] = None,
) -> Union[Dict[str, pd.DataFrame], pd.DataFrame]:
"""Return a Pandas dataframe from a given source.
Accepts default pandas parameters for Excel and CSV, but the objective is to preserve the source data with
little data conversion outside of the data wrangling process. With this in mind, a
Parameters
----------
source: str
Source filename.
preserve: str or list of str, default None
Column names where variable type guessing must be prevented and the original data preserved.
Critical for foreign key references with weird formats, like integers with leading `0`.
filetype: MimeType, default MimeType.CSV
Pandas can read a diversity of filetypes, but whyqd has only been tested on `xls`, `xlsx` and `csv`.
names: list of str, default None
If the source data has no header row, explicitly pass a list of names - in the correct order - to address
the data.
nrows: int, default None
A specified number of rows to return. For review, it is faster to load only a small number.
Returns
-------
DataFrame or dict of DataFrame
"""
self.check_source(source)
# If the dtypes have not been set, then ensure that any provided preserved columns remain untouched
# i.e. no forcing of text to numbers
# defaulting to `dtype = object` ...
kwargs = {}
if preserve:
if not isinstance(preserve, list):
preserve = [preserve]
# kwargs["dtype"] = {k: object for k in preserve}
kwargs["dtype"] = {k: pd.StringDtype() for k in preserve}
if names:
kwargs["header"] = None
kwargs["names"] = names
if nrows:
kwargs["nrows"] = nrows
# Check filetype
if filetype in [MimeType.XLS, MimeType.XLSX]:
# This will default to returning a dictionary of dataframes for each sheet
kwargs["sheet_name"] = None
df = pd.read_excel(source, **kwargs)
keys = list(df.keys())
for k in keys:
if df[k].empty:
del df[k]
if len(df.keys()) == 1:
df = df[keys[0]]
if filetype == MimeType.CSV:
# New in pandas 1.3: will ignore encoding errors - perfect for this initial wrangling process
kwargs["encoding_errors"] = "ignore"
# Supposed to help with fruity separater guessing
kwargs["engine"] = "python"
if not nrows:
df = pd.read_csv(source, **kwargs)
else:
kwargs["iterator"] = True
kwargs["chunksize"] = 10000
df_iterator = pd.read_csv(source, **kwargs)
df = pd.concat(df_iterator, ignore_index=True)
return df
def get_dataframe_from_datasource(self, data: DataSourceModel) -> pd.DataFrame:
"""Return the dataframe for a data source.
Parameters
----------
data: DataSourceModel
Returns
-------
pd.DataFrame
"""
path = data.path
try:
self.core.check_source(path)
except FileNotFoundError:
path = str(self.directory / data.source)
self.core.check_source(path)
df_columns = [d.name for d in data.columns]
names = [d.name for d in data.names] if data.names else None
df = self.get_dataframe(
source=path,
filetype=data.mime,
names=names,
preserve=[d.name for d in data.preserve if d.name in df_columns],
)
if isinstance(df, dict):
if df:
df = df[data.sheet_name]
else:
# It's an empty df for some reason. Maybe excessive filtering.
df = pd.DataFrame()
if df.empty:
raise ValueError(
f"Data source contains no data ({data.path}). Review actions to see if any were more destructive than expected."
)
return df
def get_dataframe_columns(self, df: pd.DataFrame) -> List(ColumnModel):
"""Returns a list of ColumnModels from a source DataFrame.
Parameters
----------
df: pd.DataFrame
Should be derived from `get_dataframe` with a sensible default for `nrows` being 50.
Returns
-------
List of ColumnModel
"""
# Prepare summary
columns = [
{"name": k, "type": "number"}
if v in ["float64", "int64"]
else {"name": k, "type": "date"}
if v in ["datetime64[ns]"]
else {"name": k, "type": "string"}
for k, v in df.dtypes.apply(lambda x: x.name).to_dict().items()
]
return [ColumnModel(**c) for c in columns]
def deduplicate_columns(self, df: pd.DataFrame, schema: Type[Schema]) -> pd.Index:
"""
Source: https://stackoverflow.com/a/65254771/295606
Source: https://stackoverflow.com/a/55405151
Returns a new column list permitting deduplication of dataframes which may result from merge.
Parameters
----------
df: pd.DataFrame
fields: list of FieldModel
Destination Schema fields
Returns
-------
pd.Index
Updated column names
"""
column_index = pd.Series(df.columns.tolist())
if df.columns.has_duplicates:
duplicates = column_index[column_index.duplicated()].unique()
for name in duplicates:
dups = column_index == name
replacements = [f"{name}{i}" if i != 0 else name for i in range(dups.sum())]
column_index.loc[dups] = replacements
# Fix any fields with the same name as any of the target fields
# Do this to 'force' schema assignment
for name in [f.name for f in schema.get.fields]:
dups = column_index == name
replacements = [f"{name}{i}__dd" if i != 0 else f"{name}__dd" for i in range(dups.sum())]
column_index.loc[dups] = replacements
return pd.Index(column_index)
# def check_column_unique(self, source: str, key: str) -> bool:
# """
# Test a column in a dataframe to ensure all values are unique.
# Parameters
# ----------
# source: Source filename
# key: Column name of field where data are to be tested for uniqueness
# Raises
# ------
# ValueError if not unique
# Returns
# -------
# bool, True if unique
# """
# df = self.get_dataframe(source, key)
# if len(df[key]) != len(df[key].unique()):
# import warnings
# filename = source.split("/")[-1] # Obfuscate the path
# e = "'{}' contains non-unique rows in column `{}`".format(filename, key)
# # raise ValueError(e)
# warnings.warn(e)
# return True
# def check_date_format(self, date_type: str, date_value: str) -> bool:
# # https://stackoverflow.com/a/37045601
# # https://www.saltycrane.com/blog/2009/05/converting-time-zones-datetime-objects-python/
# for fmt in self.DATE_FORMATS[date_type]["fmt"]:
# try:
# if date_value == datetime.strptime(date_value, fmt).strftime(fmt):
# return True
# except ValueError:
# continue
# raise ValueError(f"Incorrect date format, should be: `{self.DATE_FORMATS[date_type]['txt']}`")
###################################################################################################
### Pandas type parsers
###################################################################################################
def parse_dates(self, x: Union[None, str]) -> Union[pd.NaT, date.isoformat]:
"""
This is the hard-won 'trust nobody', certainly not Americans, date parser.
TODO: Replace with https://github.com/scrapinghub/dateparser
The only concern is that dateparser.parse(x).date().isoformat() will coerce *any* string to a date,
no matter *what* it is.
"""
if pd.isnull(x):
return pd.NaT
# Check if to_datetime can handle things
if not pd.isnull(pd.to_datetime(x, errors="coerce", dayfirst=True)):
return date.isoformat(pd.to_datetime(x, errors="coerce", dayfirst=True))
# Manually see if coersion will work
x = str(x).strip()[:10]
x = re.sub(r"[\\/,\.]", "-", x)
try:
y, m, d = x.split("-")
except ValueError:
return pd.NaT
if len(y) < 4:
# Swap the day and year positions
# Ignore US dates
d, m, y = x.split("-")
# Fat finger on 1999 ... not going to check for other date errors as no way to figure out
if y[0] == "9":
y = "1" + y[1:]
x = "{}-{}-{}".format(y, m, d)
try:
x = datetime.strptime(x, "%Y-%m-%d")
except ValueError:
return pd.NaT
x = date.isoformat(x)
try:
pd.Timestamp(x)
return x
except pd.errors.OutOfBoundsDatetime:
return pd.NaT
def parse_float(self, x: Union[str, int, float]) -> Union[np.nan, float]:
"""
Regex to extract wrecked floats: https://stackoverflow.com/a/385597
Checked against: https://regex101.com/
"""
try:
return float(x)
except ValueError:
re_float = re.compile(
r"""(?x)
^
\D* # first, match an optional sign *and space*
( # then match integers or f.p. mantissas:
\d+ # start out with a ...
(
\.\d* # mantissa of the form a.b or a.
)? # ? takes care of integers of the form a
|\.\d+ # mantissa of the form .b
)
([eE][+-]?\d+)? # finally, optionally match an exponent
$"""
)
try:
x = re_float.match(x).group(1)
x = re.sub(r"[^e0-9,-\.]", "", str(x))
return locale.atof(x)
except (ValueError, AttributeError):
return np.nan
| [
"pandas.isnull",
"locale.atof",
"locale.setlocale",
"pandas.read_csv",
"re.compile",
"datetime.datetime.strptime",
"pandas.StringDtype",
"typing.List",
"pandas.Index",
"pandas.read_excel",
"pandas.DataFrame",
"re.sub",
"pandas.Timestamp",
"pandas.concat",
"datetime.date.isoformat",
"pandas.to_datetime"
]
| [((212, 258), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US.UTF-8"""'], {}), "(locale.LC_ALL, 'en_US.UTF-8')\n", (228, 258), False, 'import locale\n'), ((5556, 5573), 'typing.List', 'List', (['ColumnModel'], {}), '(ColumnModel)\n', (5560, 5573), False, 'from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING\n'), ((344, 379), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (360, 379), False, 'import locale\n'), ((7596, 7618), 'pandas.Index', 'pd.Index', (['column_index'], {}), '(column_index)\n', (7604, 7618), True, 'import pandas as pd\n'), ((9702, 9714), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (9711, 9714), True, 'import pandas as pd\n'), ((10042, 10071), 're.sub', 're.sub', (['"""[\\\\\\\\/,\\\\.]"""', '"""-"""', 'x'], {}), "('[\\\\\\\\/,\\\\.]', '-', x)\n", (10048, 10071), False, 'import re\n'), ((10621, 10638), 'datetime.date.isoformat', 'date.isoformat', (['x'], {}), '(x)\n', (10635, 10638), False, 'from datetime import date, datetime\n'), ((3417, 3448), 'pandas.read_excel', 'pd.read_excel', (['source'], {}), '(source, **kwargs)\n', (3430, 3448), True, 'import pandas as pd\n'), ((10523, 10555), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d"""'], {}), "(x, '%Y-%m-%d')\n", (10540, 10555), False, 'from datetime import date, datetime\n'), ((10664, 10679), 'pandas.Timestamp', 'pd.Timestamp', (['x'], {}), '(x)\n', (10676, 10679), True, 'import pandas as pd\n'), ((3014, 3030), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (3028, 3030), True, 'import pandas as pd\n'), ((3983, 4012), 'pandas.read_csv', 'pd.read_csv', (['source'], {}), '(source, **kwargs)\n', (3994, 4012), True, 'import pandas as pd\n'), ((4147, 4176), 'pandas.read_csv', 'pd.read_csv', (['source'], {}), '(source, **kwargs)\n', (4158, 4176), True, 'import pandas as pd\n'), ((4198, 4239), 'pandas.concat', 'pd.concat', (['df_iterator'], {'ignore_index': '(True)'}), '(df_iterator, ignore_index=True)\n', (4207, 4239), True, 'import pandas as pd\n'), ((5271, 5285), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5283, 5285), True, 'import pandas as pd\n'), ((9816, 9865), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'errors': '"""coerce"""', 'dayfirst': '(True)'}), "(x, errors='coerce', dayfirst=True)\n", (9830, 9865), True, 'import pandas as pd\n'), ((9902, 9951), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'errors': '"""coerce"""', 'dayfirst': '(True)'}), "(x, errors='coerce', dayfirst=True)\n", (9916, 9951), True, 'import pandas as pd\n'), ((11090, 11655), 're.compile', 're.compile', (['"""(?x)\n ^\n \\\\D* \t\t# first, match an optional sign *and space*\n ( # then match integers or f.p. mantissas:\n \\\\d+ # start out with a ...\n (\n \\\\.\\\\d* # mantissa of the form a.b or a.\n )? # ? takes care of integers of the form a\n |\\\\.\\\\d+ # mantissa of the form .b\n )\n ([eE][+-]?\\\\d+)? # finally, optionally match an exponent\n $"""'], {}), '(\n """(?x)\n ^\n \\\\D* \t\t# first, match an optional sign *and space*\n ( # then match integers or f.p. mantissas:\n \\\\d+ # start out with a ...\n (\n \\\\.\\\\d* # mantissa of the form a.b or a.\n )? # ? takes care of integers of the form a\n |\\\\.\\\\d+ # mantissa of the form .b\n )\n ([eE][+-]?\\\\d+)? # finally, optionally match an exponent\n $"""\n )\n', (11100, 11655), False, 'import re\n'), ((11812, 11826), 'locale.atof', 'locale.atof', (['x'], {}), '(x)\n', (11823, 11826), False, 'import locale\n')] |
from __future__ import print_function
import json
from os.path import join, dirname
from watson_developer_cloud import ToneAnalyzerV3
from watson_developer_cloud.tone_analyzer_v3 import ToneInput
from pprint import pprint
# If service instance provides API key authentication
# service = ToneAnalyzerV3(
# ## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/tone-analyzer/api',
# version='2017-09-21',
# iam_apikey='your_apikey')
service = ToneAnalyzerV3(
## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/tone-analyzer/api',
username='f0ec47cc-5191-4421-8fca-<PASSWORD>917e1<PASSWORD>',
password='<PASSWORD>',
version='2017-09-21')
# print("\ntone_chat() example 1:\n")
# utterances = [{
# 'text': 'I am very happy.',
# 'user': 'glenn'
# }, {
# 'text': 'It is a good day.',
# 'user': 'glenn'
# }]
# tone_chat = service.tone_chat(utterances).get_result()
# print(json.dumps(tone_chat, indent=2))
# print("\ntone() example 1:\n")
# print(
# json.dumps(
# service.tone(
# tone_input='I am very happy. It is a good day.',
# content_type="text/plain").get_result(),
# indent=2))
# print("\ntone() example 2:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(json.load(tone_json)['text'], "text/plain").get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 3:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(
# tone_input=json.load(tone_json)['text'],
# content_type='text/plain',
# sentences=True).get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 4:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(
# tone_input=json.load(tone_json),
# content_type='application/json').get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 5:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example-html.json')) as tone_html:
# tone = service.tone(
# json.load(tone_html)['text'], content_type='text/html').get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 6 with GDPR support:\n")
# service.set_detailed_response(True)
# with open(join(dirname(__file__),
# '../resources/tone-example-html.json')) as tone_html:
# tone = service.tone(
# json.load(tone_html)['text'],
# content_type='text/html',
# headers={
# 'Custom-Header': 'custom_value'
# })
# print(tone)
# print(tone.get_headers())
# print(tone.get_result())
# print(tone.get_status_code())
# service.set_detailed_response(False)
# print("\ntone() example 7:\n")
test_tone="Hi Team, The times are difficult! Our sales have been disappointing for the past three quarters for our data analytics product suite. We have a competitive data analytics product suite in the industry. However, we are not doing a good job at selling it, and this is really frustrating.We are missing critical sales opportunities. We cannot blame the economy for our lack of execution. Our clients need analytical tools to change their current business outcomes. In fact, it is in times such as this, our clients want to get the insights they need to turn their businesses around. It is disheartening to see that we are failing at closing deals, in such a hungry market. Let's buckle up and execute.<NAME>akerSales Leader, North-East region"
tone_input = ToneInput(test_tone)
result = service.tone(tone_input=tone_input, content_type="application/json").get_result()
# print(type(json.dumps(tone, indent=2)))
pprint(result) | [
"watson_developer_cloud.ToneAnalyzerV3",
"watson_developer_cloud.tone_analyzer_v3.ToneInput",
"pprint.pprint"
]
| [((537, 662), 'watson_developer_cloud.ToneAnalyzerV3', 'ToneAnalyzerV3', ([], {'username': '"""f0ec47cc-5191-4421-8fca-<PASSWORD>917e1<PASSWORD>"""', 'password': '"""<PASSWORD>"""', 'version': '"""2017-09-21"""'}), "(username='f0ec47cc-5191-4421-8fca-<PASSWORD>917e1<PASSWORD>',\n password='<PASSWORD>', version='2017-09-21')\n", (551, 662), False, 'from watson_developer_cloud import ToneAnalyzerV3\n'), ((3816, 3836), 'watson_developer_cloud.tone_analyzer_v3.ToneInput', 'ToneInput', (['test_tone'], {}), '(test_tone)\n', (3825, 3836), False, 'from watson_developer_cloud.tone_analyzer_v3 import ToneInput\n'), ((3970, 3984), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (3976, 3984), False, 'from pprint import pprint\n')] |
import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
# import tensorflow as tf
import numpy as np
from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator
from sklearn.metrics import confusion_matrix
from models_AC import SentenceModel
import json
import os
def emotion_recognition(n_run, epochs, batch_size, embedding_size, first_rnn_size, dropout, embedding, num_speakers):
########################################################################################################################
# Hyper-parameters
########################################################################################################################
split_size = 0.8 # Split proportion of train and test data
#log_dir = './logs_AC/RNN_without_ID/1'
log_dir = './logs_AC/RNN_' \
+ str(num_speakers) + '/' + str(n_run) + '/'
#log_dir = './logs_AC/RNN_' + embedding + 'Emb' + str(embedding_size) + '_1layer' + str(2*first_rnn_size) + '/' + str(n_run)
train_log_dir = log_dir + 'train'
val_log_dir = log_dir + 'val'
########################################################################################################################
# Initialize the Data set
########################################################################################################################
sentences, targets, data_info, speakers = dataset(mode='sentences', embedding=embedding, embedding_size=embedding_size)
train_data = IeomapSentenceIterator(sentences[0], targets[0], data_info['sentences_length'][0], speakers[0])
val_data = IeomapSentenceIterator(sentences[1], targets[1], data_info['sentences_length'][1], speakers[1])
test_data = IeomapSentenceIterator(sentences[2], targets[2], data_info['sentences_length'][2], speakers[2])
########################################################################################################################
# Initialize the model
########################################################################################################################
g = SentenceModel(vocab_size=(data_info['vocabulary_size'] + 1),
embedding_size=embedding_size,
first_rnn_size=first_rnn_size,
num_classes=data_info['num_classes'],
dropout=dropout,
embedding=embedding,
num_speakers=num_speakers)
# Store model setup
model_setup = {'vocab_size': (data_info['vocabulary_size'] + 1),
'embedding_size': embedding_size,
'first_rnn_size': first_rnn_size,
'num_classes': data_info['num_classes'],
'dropout': dropout,
'embedding': embedding,
'num_speakers': num_speakers}
dirname = os.path.dirname(log_dir)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(log_dir + 'model_setup.p', 'w') as file:
json.dump(model_setup, file, indent=4)
########################################################################################################################
# Initialize the parameters
########################################################################################################################
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
epoch = 0
best_epoch = 0
train_conf_matrix = 0
val_conf_matrix = 0
test_conf_matrix = 0
best_acc = 0
########################################################################################################################
# Performance Indicators
########################################################################################################################
writer_train = tf.summary.FileWriter(train_log_dir, sess.graph)
writer_val = tf.summary.FileWriter(val_log_dir)
accuracy_tf = tf.placeholder(tf.float32, [])
precision_tf = tf.placeholder(tf.float32, [])
recall_tf = tf.placeholder(tf.float32, [])
summary_op = tf.summary.scalar('accuracy', accuracy_tf)
summary_op = tf.summary.scalar('precision', precision_tf)
summary_op = tf.summary.scalar('recall', recall_tf)
########################################################################################################################
# Model training procedure
########################################################################################################################
while train_data.epoch < epochs: # and train_data.epoch < best_epoch + 20:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = train_data.next_batch(batch_size)
preds, _ = sess.run([g['preds'],
g['ts']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(len(targets_batch))})
####################################################################################################################
# Calculate the Train data Confusion Matrix
####################################################################################################################
train_conf_matrix += confusion_matrix(targets_batch, preds, labels=range(data_info['num_classes']))
####################################################################################################################
# Add the end of each training epoch compute the validation results and store the relevant information
####################################################################################################################
if train_data.epoch != epoch:
while val_data.epoch == epoch:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = val_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
val_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
train_CM_size = len(train_conf_matrix)
total_train = sum(sum(train_conf_matrix))
train_TP = np.diagonal(train_conf_matrix)
train_FP = [sum(train_conf_matrix[:, i]) - train_TP[i] for i in range(train_CM_size)]
train_FN = [sum(train_conf_matrix[i, :]) - train_TP[i] for i in range(train_CM_size)]
train_TN = train_CM_size - train_TP - train_FP - train_FN
train_precision = train_TP / (train_TP + train_FP) # aka True Positive Rate
train_recall = train_TP / (train_TP + train_FN)
total_train_correct = sum(train_TP)
total_train_accuracy = total_train_correct / total_train
total_train_precision = sum(train_precision) / train_CM_size
total_train_recall = sum(train_recall) / train_CM_size
val_CM_size = len(val_conf_matrix)
total_val = sum(sum(val_conf_matrix))
val_TP = np.diagonal(val_conf_matrix)
val_FP = [sum(val_conf_matrix[:, i]) - val_TP[i] for i in range(val_CM_size)]
val_FN = [sum(val_conf_matrix[i, :]) - val_TP[i] for i in range(val_CM_size)]
val_TN = val_CM_size - val_TP - val_FP - val_FN
val_precision = val_TP / (val_TP + val_FP)
val_recall = val_TP / (val_TP + val_FN)
total_val_correct = sum(val_TP)
total_val_accuracy = total_val_correct / total_val
total_val_precision = sum(val_precision) / val_CM_size
total_val_recall = sum(val_recall) / val_CM_size
################################################################################################################
# Store Accuracy Precision Recall
################################################################################################################
train_acc_summary = tf.Summary(
value=[tf.Summary.Value(tag="accuracy", simple_value=total_train_accuracy), ])
train_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_train_precision), ])
train_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_train_recall), ])
val_acc_summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=total_val_accuracy), ])
val_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_val_precision), ])
val_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_val_recall), ])
writer_train.add_summary(train_acc_summary, epoch)
writer_train.add_summary(train_prec_summary, epoch)
writer_train.add_summary(train_rec_summary, epoch)
writer_val.add_summary(val_acc_summary, epoch)
writer_val.add_summary(val_prec_summary, epoch)
writer_val.add_summary(val_rec_summary, epoch)
writer_train.flush()
writer_val.flush()
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(train_conf_matrix)
print(val_conf_matrix)
if best_acc < total_val_accuracy:
saver.save(sess, log_dir + "acc_best_validation_model.ckpt")
best_acc = total_val_accuracy
best_epoch = epoch
store_info = {'epoch': best_epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
store_convergence_info = {'epoch': train_data.epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
################################################################################################################
# Get ready for the next epoch
################################################################################################################
epoch += 1
train_conf_matrix = 0
val_conf_matrix = 0
################################################################################################################
####################################################################################################################
# Add the end of training compute the test results and store the relevant information
####################################################################################################################
while test_data.epoch == 0:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_convergence_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_convergence_info['test_accuracy'] = total_test_accuracy
store_convergence_info['test_precision'] = list(test_precision)
store_convergence_info['total_test_precision'] = total_test_precision
store_convergence_info['test_recall'] = list(test_recall)
store_convergence_info['total_test_recall'] = total_test_recall
# trick to be able to save numpy.int64 into json
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
with open(log_dir + 'convergence_results.p', 'w') as file:
json.dump(store_convergence_info, file, default=default, indent=4)
saver.save(sess, log_dir + "convergence_model.ckpt")
####################################################################################################################
# Add the end of training compute the test results of the best validation model and store the relevant information
####################################################################################################################
saver.restore(sess, log_dir + "acc_best_validation_model.ckpt")
test_conf_matrix = 0
while test_data.epoch == 1:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_info['test_accuracy'] = total_test_accuracy
store_info['test_precision'] = list(test_precision)
store_info['total_test_precision'] = total_test_precision
store_info['test_recall'] = list(test_recall)
store_info['total_test_recall'] = total_test_recall
with open(log_dir + 'acc_best_validation_results.p', 'w') as file:
json.dump(store_info, file, default=default, indent=4)
| [
"tensorflow.local_variables_initializer",
"numpy.diagonal",
"os.path.exists",
"os.makedirs",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"IEOMAP_dataset_AC.dataset",
"os.path.dirname",
"IEOMAP_dataset_AC.IeomapSentenceIterator",
"numpy.array",
"tensorflow.Summary.Value",
"models_AC.SentenceModel",
"tensorflow.ConfigProto",
"tensorflow.summary.scalar",
"tensorflow.summary.FileWriter",
"json.dump"
]
| [((280, 425), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(4)', 'inter_op_parallelism_threads': '(4)', 'device_count': "{'CPU': 1, 'GPU': 0}", 'allow_soft_placement': '(True)'}), "(intra_op_parallelism_threads=4, inter_op_parallelism_threads\n =4, device_count={'CPU': 1, 'GPU': 0}, allow_soft_placement=True)\n", (294, 425), True, 'import tensorflow as tf\n'), ((1837, 1914), 'IEOMAP_dataset_AC.dataset', 'dataset', ([], {'mode': '"""sentences"""', 'embedding': 'embedding', 'embedding_size': 'embedding_size'}), "(mode='sentences', embedding=embedding, embedding_size=embedding_size)\n", (1844, 1914), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((1933, 2033), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[0]', 'targets[0]', "data_info['sentences_length'][0]", 'speakers[0]'], {}), "(sentences[0], targets[0], data_info[\n 'sentences_length'][0], speakers[0])\n", (1955, 2033), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2044, 2144), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[1]', 'targets[1]', "data_info['sentences_length'][1]", 'speakers[1]'], {}), "(sentences[1], targets[1], data_info[\n 'sentences_length'][1], speakers[1])\n", (2066, 2144), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2156, 2256), 'IEOMAP_dataset_AC.IeomapSentenceIterator', 'IeomapSentenceIterator', (['sentences[2]', 'targets[2]', "data_info['sentences_length'][2]", 'speakers[2]'], {}), "(sentences[2], targets[2], data_info[\n 'sentences_length'][2], speakers[2])\n", (2178, 2256), False, 'from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator\n'), ((2538, 2776), 'models_AC.SentenceModel', 'SentenceModel', ([], {'vocab_size': "(data_info['vocabulary_size'] + 1)", 'embedding_size': 'embedding_size', 'first_rnn_size': 'first_rnn_size', 'num_classes': "data_info['num_classes']", 'dropout': 'dropout', 'embedding': 'embedding', 'num_speakers': 'num_speakers'}), "(vocab_size=data_info['vocabulary_size'] + 1, embedding_size=\n embedding_size, first_rnn_size=first_rnn_size, num_classes=data_info[\n 'num_classes'], dropout=dropout, embedding=embedding, num_speakers=\n num_speakers)\n", (2551, 2776), False, 'from models_AC import SentenceModel\n'), ((3308, 3332), 'os.path.dirname', 'os.path.dirname', (['log_dir'], {}), '(log_dir)\n', (3323, 3332), False, 'import os\n'), ((3795, 3820), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3805, 3820), True, 'import tensorflow as tf\n'), ((3928, 3944), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3942, 3944), True, 'import tensorflow as tf\n'), ((4369, 4417), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_log_dir', 'sess.graph'], {}), '(train_log_dir, sess.graph)\n', (4390, 4417), True, 'import tensorflow as tf\n'), ((4435, 4469), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['val_log_dir'], {}), '(val_log_dir)\n', (4456, 4469), True, 'import tensorflow as tf\n'), ((4489, 4519), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4503, 4519), True, 'import tensorflow as tf\n'), ((4539, 4569), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4553, 4569), True, 'import tensorflow as tf\n'), ((4586, 4616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4600, 4616), True, 'import tensorflow as tf\n'), ((4635, 4677), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy_tf'], {}), "('accuracy', accuracy_tf)\n", (4652, 4677), True, 'import tensorflow as tf\n'), ((4695, 4739), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""precision"""', 'precision_tf'], {}), "('precision', precision_tf)\n", (4712, 4739), True, 'import tensorflow as tf\n'), ((4757, 4795), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""recall"""', 'recall_tf'], {}), "('recall', recall_tf)\n", (4774, 4795), True, 'import tensorflow as tf\n'), ((15755, 15784), 'numpy.diagonal', 'np.diagonal', (['test_conf_matrix'], {}), '(test_conf_matrix)\n', (15766, 15784), True, 'import numpy as np\n'), ((19245, 19274), 'numpy.diagonal', 'np.diagonal', (['test_conf_matrix'], {}), '(test_conf_matrix)\n', (19256, 19274), True, 'import numpy as np\n'), ((3344, 3367), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (3358, 3367), False, 'import os\n'), ((3377, 3397), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (3388, 3397), False, 'import os\n'), ((3462, 3500), 'json.dump', 'json.dump', (['model_setup', 'file'], {'indent': '(4)'}), '(model_setup, file, indent=4)\n', (3471, 3500), False, 'import json\n'), ((3834, 3867), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3865, 3867), True, 'import tensorflow as tf\n'), ((3882, 3914), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3912, 3914), True, 'import tensorflow as tf\n'), ((17307, 17373), 'json.dump', 'json.dump', (['store_convergence_info', 'file'], {'default': 'default', 'indent': '(4)'}), '(store_convergence_info, file, default=default, indent=4)\n', (17316, 17373), False, 'import json\n'), ((20585, 20639), 'json.dump', 'json.dump', (['store_info', 'file'], {'default': 'default', 'indent': '(4)'}), '(store_info, file, default=default, indent=4)\n', (20594, 20639), False, 'import json\n'), ((8039, 8069), 'numpy.diagonal', 'np.diagonal', (['train_conf_matrix'], {}), '(train_conf_matrix)\n', (8050, 8069), True, 'import numpy as np\n'), ((8864, 8892), 'numpy.diagonal', 'np.diagonal', (['val_conf_matrix'], {}), '(val_conf_matrix)\n', (8875, 8892), True, 'import numpy as np\n'), ((5400, 5425), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (5408, 5425), True, 'import numpy as np\n'), ((5580, 5604), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (5588, 5604), True, 'import numpy as np\n'), ((14627, 14652), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (14635, 14652), True, 'import numpy as np\n'), ((14801, 14825), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (14809, 14825), True, 'import numpy as np\n'), ((18117, 18142), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (18125, 18142), True, 'import numpy as np\n'), ((18291, 18315), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (18299, 18315), True, 'import numpy as np\n'), ((9841, 9908), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""accuracy"""', 'simple_value': 'total_train_accuracy'}), "(tag='accuracy', simple_value=total_train_accuracy)\n", (9857, 9908), True, 'import tensorflow as tf\n'), ((9981, 10050), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""precision"""', 'simple_value': 'total_train_precision'}), "(tag='precision', simple_value=total_train_precision)\n", (9997, 10050), True, 'import tensorflow as tf\n'), ((10105, 10168), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""recall"""', 'simple_value': 'total_train_recall'}), "(tag='recall', simple_value=total_train_recall)\n", (10121, 10168), True, 'import tensorflow as tf\n'), ((10222, 10287), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""accuracy"""', 'simple_value': 'total_val_accuracy'}), "(tag='accuracy', simple_value=total_val_accuracy)\n", (10238, 10287), True, 'import tensorflow as tf\n'), ((10358, 10425), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""precision"""', 'simple_value': 'total_val_precision'}), "(tag='precision', simple_value=total_val_precision)\n", (10374, 10425), True, 'import tensorflow as tf\n'), ((10478, 10539), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""recall"""', 'simple_value': 'total_val_recall'}), "(tag='recall', simple_value=total_val_recall)\n", (10494, 10539), True, 'import tensorflow as tf\n'), ((5474, 5497), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (5482, 5497), True, 'import numpy as np\n'), ((5658, 5690), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (5666, 5690), True, 'import numpy as np\n'), ((6795, 6820), 'numpy.array', 'np.array', (['sentences_batch'], {}), '(sentences_batch)\n', (6803, 6820), True, 'import numpy as np\n'), ((6985, 7009), 'numpy.array', 'np.array', (['speakers_batch'], {}), '(speakers_batch)\n', (6993, 7009), True, 'import numpy as np\n'), ((14698, 14721), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (14706, 14721), True, 'import numpy as np\n'), ((14876, 14908), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (14884, 14908), True, 'import numpy as np\n'), ((18188, 18211), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (18196, 18211), True, 'import numpy as np\n'), ((18366, 18398), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (18374, 18398), True, 'import numpy as np\n'), ((6874, 6897), 'numpy.array', 'np.array', (['targets_batch'], {}), '(targets_batch)\n', (6882, 6897), True, 'import numpy as np\n'), ((7068, 7100), 'numpy.array', 'np.array', (['sentences_length_batch'], {}), '(sentences_length_batch)\n', (7076, 7100), True, 'import numpy as np\n')] |
import torch
DEVICE = 'cuda'
import math
import torch.optim as optim
from model import *
import os
import copy, gzip, pickle, time
data_dir = './drive/MyDrive/music_classification/Data'
classes = os.listdir(data_dir+'/images_original')
def fit(model, train_loader, train_len, optimizer, criterion):
model.train()
batch_size = train_loader.batch_size
n_batches = math.ceil(train_len/batch_size)
#print('Batch Size:', batch_size,'Number of Batches:', n_batches)
model.train()
train_running_loss = 0.0
train_running_correct = 0
counter = 0
total = 0
#prog_bar = tqdm(enumerate(train_loader), total=int(train_len/batch_size))
for i, data in enumerate(train_loader):
counter += 1
data, target = data[0].to(DEVICE), data[1].to(DEVICE)
total += target.size(0)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
train_running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
train_running_correct += (preds == target).sum().item()
loss.backward()
optimizer.step()
train_loss = train_running_loss / counter
train_accuracy = 100. * train_running_correct / total
return train_loss, train_accuracy
def validate(model, val_loader, val_len, criterion):
model.eval()
val_running_loss = 0.0
val_running_correct = 0
counter = 0
total = 0
batch_size = val_len
#prog_bar = tqdm(enumerate(val_loader), total=int(val_len/batch_size))
with torch.no_grad():
for i, data in enumerate(val_loader):
counter += 1
data, target = data[0].to(DEVICE), data[1].to(DEVICE)
total += target.size(0)
outputs = model(data)
loss = criterion(outputs, target)
val_running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
val_running_correct += (preds == target).sum().item()
val_loss = val_running_loss / counter
val_accuracy = 100. * val_running_correct / total
return val_loss, val_accuracy
def train(hparams, train_loader, val_loader, train_len, val_len, checkpoint_path=None, **kwargs):
model = CRNN_Base(len(classes), hparams['c'], hparams['h'], hparams['w'], hparams['k'], hparams['filters'],\
hparams['poolings'], hparams['dropout_rate'], gru_units=hparams['gru_units'])
model.to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=hparams['lr'])
try:
path = kwargs['path']
stream = gzip.open(path, "rb")
checkpoint = pickle.load(stream)
stream.close()
train_loss = checkpoint['train_loss']
train_accuracy = checkpoint['train_accuracy']
val_loss = checkpoint['val_loss']
val_accuracy = checkpoint['val_accuracy']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch_load = checkpoint['epoch']
print(f'Checkpoint found! Training will resume from epoch {epoch_load+1}')
print('Last epoch results: ')
print(f"Train Loss: {train_loss[-1]:.4f}, Train Acc: {train_accuracy[-1]:.2f}")
print(f'Val Loss: {val_loss[-1]:.4f}, Val Acc: {val_accuracy[-1]:.2f}')
if 'lr_scheduler' in kwargs.keys() and 'scheduler_state_dict' in checkpoint.keys():
if kwargs['lr_scheduler'] == True:
print('Learning rate sceduler is active.\n')
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=-1, verbose=True)
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
else:
scheduler = False
else:
scheduler = False
except:
print('No checkpoints found! Training will start from the beginning.\n')
train_loss, train_accuracy = [], []
val_loss, val_accuracy = [], []
epoch_load = 0
scheduler = None
es = False
if 'lr_scheduler' in kwargs.keys():
if kwargs['lr_scheduler'] == True:
print('Learning rate sceduler is active.\n')
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=-1, verbose=True)
else:
scheduler = False
else:
scheduler = False
es = False
if 'early_stopping' in kwargs.keys():
print('Early stopping is active.')
print()
es = True
min_val_loss = np.inf
patience = 30
epochs_no_improve = 0
best_model = None
criterion = nn.CrossEntropyLoss()
start = time.time()
for epoch in range(hparams['epochs']-epoch_load):
print(f"Epoch {epoch+epoch_load+1} of {hparams['epochs']}")
train_epoch_loss, train_epoch_accuracy = fit(
model, train_loader, train_len, optimizer, criterion
)
val_epoch_loss, val_epoch_accuracy = validate(
model, val_loader, val_len, criterion
)
if scheduler:
scheduler.step()
train_loss.append(train_epoch_loss)
train_accuracy.append(train_epoch_accuracy)
val_loss.append(val_epoch_loss)
val_accuracy.append(val_epoch_accuracy)
if es:
if val_epoch_loss < min_val_loss:
#Saving the model
min_val_loss = val_epoch_loss
best_model = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
else:
epochs_no_improve += 1
# Check early stopping condition
if epochs_no_improve == patience:
print(f'Early stopping after {epoch+epoch_load+1} epochs!')
model.load_state_dict(best_model)
break
print(f"Train Loss: {train_epoch_loss:.4f}, Train Acc: {train_epoch_accuracy:.2f}")
print(f'Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_accuracy:.2f}')
checkpoint_to_save = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch+epoch_load,
'train_loss': train_loss,
'val_loss': val_loss,
'train_accuracy': train_accuracy,
'val_accuracy': val_accuracy
}
if scheduler:
checkpoint_to_save['scheduler_state_dict'] = scheduler.state_dict()
## Saving the model
if checkpoint_path != None:
stream = gzip.open(checkpoint_path, "wb")
pickle.dump(checkpoint_to_save, stream)
stream.close()
end = time.time()
print(f"Training time: {(end-start)/60:.3f} minutes")
return model, train_loss, train_accuracy, val_loss, val_accuracy | [
"os.listdir",
"math.ceil",
"pickle.dump",
"gzip.open",
"torch.max",
"pickle.load",
"torch.optim.lr_scheduler.StepLR",
"torch.no_grad",
"time.time"
]
| [((196, 237), 'os.listdir', 'os.listdir', (["(data_dir + '/images_original')"], {}), "(data_dir + '/images_original')\n", (206, 237), False, 'import os\n'), ((376, 409), 'math.ceil', 'math.ceil', (['(train_len / batch_size)'], {}), '(train_len / batch_size)\n', (385, 409), False, 'import math\n'), ((4727, 4738), 'time.time', 'time.time', ([], {}), '()\n', (4736, 4738), False, 'import copy, gzip, pickle, time\n'), ((6800, 6811), 'time.time', 'time.time', ([], {}), '()\n', (6809, 6811), False, 'import copy, gzip, pickle, time\n'), ((986, 1012), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (995, 1012), False, 'import torch\n'), ((1542, 1557), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1555, 1557), False, 'import torch\n'), ((2588, 2609), 'gzip.open', 'gzip.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2597, 2609), False, 'import copy, gzip, pickle, time\n'), ((2631, 2650), 'pickle.load', 'pickle.load', (['stream'], {}), '(stream)\n', (2642, 2650), False, 'import copy, gzip, pickle, time\n'), ((1892, 1918), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (1901, 1918), False, 'import torch\n'), ((6678, 6710), 'gzip.open', 'gzip.open', (['checkpoint_path', '"""wb"""'], {}), "(checkpoint_path, 'wb')\n", (6687, 6710), False, 'import copy, gzip, pickle, time\n'), ((6723, 6762), 'pickle.dump', 'pickle.dump', (['checkpoint_to_save', 'stream'], {}), '(checkpoint_to_save, stream)\n', (6734, 6762), False, 'import copy, gzip, pickle, time\n'), ((3556, 3650), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.1)', 'last_epoch': '(-1)', 'verbose': '(True)'}), '(optimizer, step_size=20, gamma=0.1, last_epoch=-1,\n verbose=True)\n', (3581, 3650), True, 'import torch.optim as optim\n'), ((4245, 4339), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(20)', 'gamma': '(0.1)', 'last_epoch': '(-1)', 'verbose': '(True)'}), '(optimizer, step_size=20, gamma=0.1, last_epoch=-1,\n verbose=True)\n', (4270, 4339), True, 'import torch.optim as optim\n')] |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for projectq.backends._sim._simulator.py, using both the Python
and the C++ simulator as backends.
"""
import copy
import math
import cmath
import numpy
import pytest
import random
import scipy
import scipy.sparse
import scipy.sparse.linalg
from projectq import MainEngine
from projectq.cengines import (BasicEngine, BasicMapperEngine, DummyEngine,
LocalOptimizer, NotYetMeasuredError)
from projectq.ops import (All, Allocate, BasicGate, BasicMathGate, CNOT, C,
Command, H, Measure, QubitOperator, Rx, Ry, Rz, S,
TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap,
UniformlyControlledRy, UniformlyControlledRz)
from projectq.libs.math import (AddConstant,
AddConstantModN,
SubConstant,
SubConstantModN,
MultiplyByConstantModN)
from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag
from projectq.types import WeakQubitRef
from projectq.backends import Simulator
tolerance = 1e-6
def test_is_qrack_simulator_present():
_qracksim = pytest.importorskip("projectq.backends._qracksim._qracksim")
import projectq.backends._qracksim._qracksim as _
def get_available_simulators():
result = []
try:
test_is_qrack_simulator_present()
result.append("qrack_simulator_qengine")
result.append("qrack_simulator_qunit")
except:
pass
return result
@pytest.fixture(params=get_available_simulators())
def sim(request):
if request.param == "qrack_simulator_qengine":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 1)
elif request.param == "qrack_simulator_qunit":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 2)
return sim
@pytest.fixture(params=["mapper", "no_mapper"])
def mapper(request):
"""
Adds a mapper which changes qubit ids by adding 1
"""
if request.param == "mapper":
class TrivialMapper(BasicMapperEngine):
def __init__(self):
BasicEngine.__init__(self)
self.current_mapping = dict()
def receive(self, command_list):
for cmd in command_list:
for qureg in cmd.all_qubits:
for qubit in qureg:
if qubit.id == -1:
continue
elif qubit.id not in self.current_mapping:
previous_map = self.current_mapping
previous_map[qubit.id] = qubit.id + 1
self.current_mapping = previous_map
self._send_cmd_with_mapped_ids(cmd)
return TrivialMapper()
if request.param == "no_mapper":
return None
class Mock1QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.matrix([[0, 1],
[1, 0]])
class Mock6QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.eye(2 ** 6)
class MockNoMatrixGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
raise AttributeError
def test_simulator_is_available(sim):
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend, [])
qubit = eng.allocate_qubit()
Measure | qubit
qubit[0].__del__()
assert len(backend.received_commands) == 3
# Test that allocate, measure, basic math, and deallocate are available.
for cmd in backend.received_commands:
assert sim.is_available(cmd)
new_cmd = backend.received_commands[-1]
new_cmd.gate = Mock6QubitGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = MockNoMatrixGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = Mock1QubitGate()
assert sim.is_available(new_cmd)
new_cmd = backend.received_commands[-2]
assert len(new_cmd.qubits) == 1
new_cmd.gate = AddConstantModN(1, 2)
assert sim.is_available(new_cmd)
new_cmd.gate = MultiplyByConstantModN(1, 2)
assert sim.is_available(new_cmd)
#new_cmd.gate = DivideByConstantModN(1, 2)
#assert sim.is_available(new_cmd)
def test_simulator_cheat(sim):
# cheat function should return a tuple
assert isinstance(sim.cheat(), tuple)
# first entry is the qubit mapping.
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
# one qubit has been allocated
assert len(sim.cheat()[0]) == 1
assert sim.cheat()[0][0] == 0
assert len(sim.cheat()[1]) == 2
assert 1. == pytest.approx(abs(sim.cheat()[1][0]))
qubit[0].__del__()
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
def test_simulator_functional_measurement(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
All(Measure) | qubits
bit_value_sum = sum([int(qubit) for qubit in qubits])
assert bit_value_sum == 0 or bit_value_sum == 5
def test_simulator_measure_mapped_qubit(sim):
eng = MainEngine(sim, [])
qb1 = WeakQubitRef(engine=eng, idx=1)
qb2 = WeakQubitRef(engine=eng, idx=2)
cmd0 = Command(engine=eng, gate=Allocate, qubits=([qb1],))
cmd1 = Command(engine=eng, gate=X, qubits=([qb1],))
cmd2 = Command(engine=eng, gate=Measure, qubits=([qb1],), controls=[],
tags=[LogicalQubitIDTag(2)])
with pytest.raises(NotYetMeasuredError):
int(qb1)
with pytest.raises(NotYetMeasuredError):
int(qb2)
eng.send([cmd0, cmd1, cmd2])
eng.flush()
with pytest.raises(NotYetMeasuredError):
int(qb1)
assert int(qb2) == 1
def test_simulator_kqubit_exception(sim):
m1 = Rx(0.3).matrix
m2 = Rx(0.8).matrix
m3 = Ry(0.1).matrix
m4 = Rz(0.9).matrix.dot(Ry(-0.1).matrix)
m = numpy.kron(m4, numpy.kron(m3, numpy.kron(m2, m1)))
class KQubitGate(BasicGate):
@property
def matrix(self):
return m
eng = MainEngine(sim, [])
qureg = eng.allocate_qureg(3)
with pytest.raises(Exception):
KQubitGate() | qureg
with pytest.raises(Exception):
H | qureg
def test_simulator_swap(sim):
eng = MainEngine(sim, [])
qubits1 = eng.allocate_qureg(1)
qubits2 = eng.allocate_qureg(1)
X | qubits1
Swap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 0) and (int(qubits2[0]) == 1)
SqrtSwap | (qubits1, qubits2)
SqrtSwap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 1) and (int(qubits2[0]) == 0)
def test_simulator_math(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(8)
AddConstant(1) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 1
AddConstantModN(10, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
controls = eng.allocate_qureg(1)
# Control is off
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
# Turn control on
X | controls
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 21
SubConstant(5) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 16
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
# Turn control off
X | controls
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
MultiplyByConstantModN(2, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Control is off
C(MultiplyByConstantModN(2, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Turn control on
X | controls
C(MultiplyByConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 120
def test_simulator_probability(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
for i in range(6):
assert (eng.backend.get_probability(bits[:i], qubits[:i]) ==
pytest.approx(0.5**i))
extra_qubit = eng.allocate_qubit()
with pytest.raises(RuntimeError):
eng.backend.get_probability([0], extra_qubit)
del extra_qubit
All(H) | qubits
Ry(2 * math.acos(math.sqrt(0.3))) | qubits[0]
eng.flush()
assert eng.backend.get_probability([0], [qubits[0]]) == pytest.approx(0.3)
Ry(2 * math.acos(math.sqrt(0.4))) | qubits[2]
eng.flush()
assert eng.backend.get_probability([0], [qubits[2]]) == pytest.approx(0.4)
assert (numpy.isclose(0.12, eng.backend.get_probability([0, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.18, eng.backend.get_probability([0, 1], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.28, eng.backend.get_probability([1, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
All(Measure) | qubits
def test_simulator_amplitude(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(X) | qubits
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi < 0:
polPhi += 2 * math.pi
assert polR == pytest.approx(1. / 8.)
bits = [0, 0, 0, 0, 1, 0]
polR2, polPhi2 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi2 < math.pi:
polPhi2 += 2 * math.pi
assert polR2 == pytest.approx(polR)
assert (polPhi2 - math.pi) == pytest.approx(polPhi)
bits = [0, 1, 1, 0, 1, 0]
polR3, polPhi3 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi3 < math.pi:
polPhi3 += 2 * math.pi
assert polR3 == pytest.approx(polR)
assert (polPhi3 - math.pi) == pytest.approx(polPhi)
All(H) | qubits
All(X) | qubits
Ry(2 * math.acos(0.3)) | qubits[0]
eng.flush()
bits = [0] * 6
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert polR == pytest.approx(0.3)
bits[0] = 1
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert (polR ==
pytest.approx(math.sqrt(0.91)))
All(Measure) | qubits
# raises if not all qubits are in the list:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1])
# doesn't just check for length:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1] + [qubits[0]])
extra_qubit = eng.allocate_qubit()
eng.flush()
# there is a new qubit now!
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits)
def test_simulator_set_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(2)
wf = [0., 0., math.sqrt(0.2), math.sqrt(0.8)]
with pytest.raises(RuntimeError):
eng.backend.set_wavefunction(wf, qubits)
eng.flush()
eng.backend.set_wavefunction(wf, qubits)
assert pytest.approx(eng.backend.get_probability('1', [qubits[0]])) == .8
assert pytest.approx(eng.backend.get_probability('01', qubits)) == .2
assert pytest.approx(eng.backend.get_probability('1', [qubits[1]])) == 1.
All(Measure) | qubits
def test_simulator_set_wavefunction_always_complex(sim):
""" Checks that wavefunction is always complex """
eng = MainEngine(sim)
qubit = eng.allocate_qubit()
eng.flush()
wf = [1., 0]
eng.backend.set_wavefunction(wf, qubit)
Y | qubit
eng.flush()
amplitude = eng.backend.get_amplitude('1', qubit)
assert amplitude == pytest.approx(1j) or amplitude == pytest.approx(-1j)
def test_simulator_collapse_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(4)
# unknown qubits: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [0] * 4)
eng.flush()
eng.backend.collapse_wavefunction(qubits, [0] * 4)
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == 1.
All(H) | qubits[1:]
eng.flush()
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == .125
# impossible outcome: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [1] + [0] * 3)
eng.backend.collapse_wavefunction(qubits[:-1], [0, 1, 0])
probability = eng.backend.get_probability([0, 1, 0, 1], qubits)
assert probability == pytest.approx(.5)
eng.backend.set_wavefunction([1.] + [0.] * 15, qubits)
H | qubits[0]
CNOT | (qubits[0], qubits[1])
eng.flush()
eng.backend.collapse_wavefunction([qubits[0]], [1])
probability = eng.backend.get_probability([1, 1], qubits[0:2])
assert probability == pytest.approx(1.)
def test_simulator_no_uncompute_exception(sim):
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
H | qubit
with pytest.raises(RuntimeError):
qubit[0].__del__()
# If you wanted to keep using the qubit, you shouldn't have deleted it.
assert qubit[0].id == -1
def test_simulator_functional_entangle(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# unentangle all except the first 2
for qb in qubits[2:]:
CNOT | (qubits[0], qb)
# entangle using Toffolis
for qb in qubits[2:]:
Toffoli | (qubits[0], qubits[1], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# uncompute using multi-controlled NOTs
with Control(eng, qubits[0:-1]):
X | qubits[-1]
with Control(eng, qubits[0:-2]):
X | qubits[-2]
with Control(eng, qubits[0:-3]):
X | qubits[-3]
CNOT | (qubits[0], qubits[1])
H | qubits[0]
# check the state vector:
assert 1. == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
for i in range(1, 32):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
All(Measure) | qubits
def test_simulator_convert_logical_to_mapped_qubits(sim):
mapper = BasicMapperEngine()
def receive(command_list):
pass
mapper.receive = receive
eng = MainEngine(sim, [mapper])
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
mapper.current_mapping = {qubit0[0].id: qubit1[0].id,
qubit1[0].id: qubit0[0].id}
assert (sim._convert_logical_to_mapped_qureg(qubit0 + qubit1) ==
qubit1 + qubit0)
def slow_implementation(angles, control_qubits, target_qubit, eng, gate_class):
"""
Assumption is that control_qubits[0] is lowest order bit
We apply angles[0] to state |0>
"""
assert len(angles) == 2**len(control_qubits)
for index in range(2**len(control_qubits)):
with Compute(eng):
for bit_pos in range(len(control_qubits)):
if not (index >> bit_pos) & 1:
X | control_qubits[bit_pos]
with Control(eng, control_qubits):
gate_class(angles[index]) | target_qubit
Uncompute(eng)
@pytest.mark.parametrize("gate_classes", [(Ry, UniformlyControlledRy),
(Rz, UniformlyControlledRz)])
def test_uniformly_controlled_r(sim, gate_classes):
n = 2
random_angles = [3.0, 0.8, 1.2, 0.7]
basis_state_index = 2
basis_state = [0] * 2**(n+1)
basis_state[basis_state_index] = 1.
correct_eng = MainEngine(backend=Simulator())
test_eng = MainEngine(backend=sim)
correct_sim = correct_eng.backend
correct_qb = correct_eng.allocate_qubit()
correct_ctrl_qureg = correct_eng.allocate_qureg(n)
correct_eng.flush()
test_sim = test_eng.backend
test_qb = test_eng.allocate_qubit()
test_ctrl_qureg = test_eng.allocate_qureg(n)
test_eng.flush()
correct_sim.set_wavefunction(basis_state, correct_qb + correct_ctrl_qureg)
test_sim.set_wavefunction(basis_state, test_qb + test_ctrl_qureg)
test_eng.flush()
correct_eng.flush()
gate_classes[1](random_angles) | (test_ctrl_qureg, test_qb)
slow_implementation(angles=random_angles,
control_qubits=correct_ctrl_qureg,
target_qubit=correct_qb,
eng=correct_eng,
gate_class=gate_classes[0])
test_eng.flush()
correct_eng.flush()
for fstate in range(2**(n+1)):
binary_state = format(fstate, '0' + str(n+1) + 'b')
test = test_sim.get_amplitude(binary_state,
test_qb + test_ctrl_qureg)
correct = correct_sim.get_amplitude(binary_state, correct_qb +
correct_ctrl_qureg)
print(test, "==", correct)
assert correct == pytest.approx(test, rel=tolerance, abs=tolerance)
All(Measure) | test_qb + test_ctrl_qureg
All(Measure) | correct_qb + correct_ctrl_qureg
test_eng.flush(deallocate_qubits=True)
correct_eng.flush(deallocate_qubits=True)
def test_qubit_operator(sim):
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(1)
test_eng.flush()
qubit_op = QubitOperator("X0 X1", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
test_eng.backend.set_wavefunction([1, 0],
test_qureg)
test_eng.flush()
qubit_op = QubitOperator("X0", 1)
qubit_op | test_qureg[0]
test_eng.flush()
amplitude = test_eng.backend.get_amplitude('0', test_qureg)
assert amplitude == pytest.approx(0.)
amplitude = test_eng.backend.get_amplitude('1', test_qureg)
assert amplitude == pytest.approx(1.)
def test_get_expectation_value(sim):
num_qubits = 2
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(num_qubits)
test_eng.flush()
qubit_op = QubitOperator("X0 X1 X2", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
qubit_op = QubitOperator("X0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Y0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 0.25)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(0.25, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-0.25, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0 Z1", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
X | test_qureg[1]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
| [
"projectq.types.WeakQubitRef",
"math.acos",
"projectq.MainEngine",
"projectq.cengines.DummyEngine",
"math.sqrt",
"projectq.ops.All",
"pytest.fixture",
"projectq.libs.math.SubConstantModN",
"projectq.libs.math.AddConstantModN",
"projectq.ops.Rx",
"projectq.libs.math.SubConstant",
"numpy.eye",
"projectq.meta.Compute",
"projectq.ops.Ry",
"projectq.meta.LogicalQubitIDTag",
"projectq.cengines.BasicMapperEngine",
"numpy.kron",
"projectq.libs.math.AddConstant",
"pytest.raises",
"projectq.libs.math.MultiplyByConstantModN",
"projectq.meta.Control",
"projectq.ops.Rz",
"projectq.backends._qracksim._qracksim.QrackSimulator",
"pytest.approx",
"projectq.ops.QubitOperator",
"projectq.ops.Command",
"projectq.cengines.BasicEngine.__init__",
"projectq.cengines.LocalOptimizer",
"projectq.backends.Simulator",
"pytest.mark.parametrize",
"pytest.importorskip",
"projectq.ops.BasicGate.__init__",
"numpy.matrix",
"projectq.meta.Uncompute"
]
| [((2688, 2734), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['mapper', 'no_mapper']"}), "(params=['mapper', 'no_mapper'])\n", (2702, 2734), False, 'import pytest\n'), ((18744, 18847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gate_classes"""', '[(Ry, UniformlyControlledRy), (Rz, UniformlyControlledRz)]'], {}), "('gate_classes', [(Ry, UniformlyControlledRy), (Rz,\n UniformlyControlledRz)])\n", (18767, 18847), False, 'import pytest\n'), ((1832, 1892), 'pytest.importorskip', 'pytest.importorskip', (['"""projectq.backends._qracksim._qracksim"""'], {}), "('projectq.backends._qracksim._qracksim')\n", (1851, 1892), False, 'import pytest\n'), ((4517, 4548), 'projectq.cengines.DummyEngine', 'DummyEngine', ([], {'save_commands': '(True)'}), '(save_commands=True)\n', (4528, 4548), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((4559, 4582), 'projectq.MainEngine', 'MainEngine', (['backend', '[]'], {}), '(backend, [])\n', (4569, 4582), False, 'from projectq import MainEngine\n'), ((5241, 5262), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(1)', '(2)'], {}), '(1, 2)\n', (5256, 5262), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((5320, 5348), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(1)', '(2)'], {}), '(1, 2)\n', (5342, 5348), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((5781, 5800), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (5791, 5800), False, 'from projectq import MainEngine\n'), ((6255, 6274), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (6265, 6274), False, 'from projectq import MainEngine\n'), ((6608, 6627), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (6618, 6627), False, 'from projectq import MainEngine\n'), ((6638, 6669), 'projectq.types.WeakQubitRef', 'WeakQubitRef', ([], {'engine': 'eng', 'idx': '(1)'}), '(engine=eng, idx=1)\n', (6650, 6669), False, 'from projectq.types import WeakQubitRef\n'), ((6680, 6711), 'projectq.types.WeakQubitRef', 'WeakQubitRef', ([], {'engine': 'eng', 'idx': '(2)'}), '(engine=eng, idx=2)\n', (6692, 6711), False, 'from projectq.types import WeakQubitRef\n'), ((6723, 6774), 'projectq.ops.Command', 'Command', ([], {'engine': 'eng', 'gate': 'Allocate', 'qubits': '([qb1],)'}), '(engine=eng, gate=Allocate, qubits=([qb1],))\n', (6730, 6774), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6786, 6830), 'projectq.ops.Command', 'Command', ([], {'engine': 'eng', 'gate': 'X', 'qubits': '([qb1],)'}), '(engine=eng, gate=X, qubits=([qb1],))\n', (6793, 6830), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7544, 7563), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (7554, 7563), False, 'from projectq import MainEngine\n'), ((7757, 7776), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (7767, 7776), False, 'from projectq import MainEngine\n'), ((8238, 8257), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (8248, 8257), False, 'from projectq import MainEngine\n'), ((10462, 10502), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (10472, 10502), False, 'from projectq import MainEngine\n'), ((11724, 11764), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (11734, 11764), False, 'from projectq import MainEngine\n'), ((13589, 13629), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (13599, 13629), False, 'from projectq import MainEngine\n'), ((14243, 14258), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (14253, 14258), False, 'from projectq import MainEngine\n'), ((14696, 14736), 'projectq.MainEngine', 'MainEngine', (['sim'], {'engine_list': 'engine_list'}), '(sim, engine_list=engine_list)\n', (14706, 14736), False, 'from projectq import MainEngine\n'), ((15829, 15848), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (15839, 15848), False, 'from projectq import MainEngine\n'), ((16123, 16142), 'projectq.MainEngine', 'MainEngine', (['sim', '[]'], {}), '(sim, [])\n', (16133, 16142), False, 'from projectq import MainEngine\n'), ((17740, 17759), 'projectq.cengines.BasicMapperEngine', 'BasicMapperEngine', ([], {}), '()\n', (17757, 17759), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((17845, 17870), 'projectq.MainEngine', 'MainEngine', (['sim', '[mapper]'], {}), '(sim, [mapper])\n', (17855, 17870), False, 'from projectq import MainEngine\n'), ((19155, 19178), 'projectq.MainEngine', 'MainEngine', ([], {'backend': 'sim'}), '(backend=sim)\n', (19165, 19178), False, 'from projectq import MainEngine\n'), ((20729, 20744), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (20739, 20744), False, 'from projectq import MainEngine\n'), ((20826, 20851), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0 X1"""', '(1)'], {}), "('X0 X1', 1)\n", (20839, 20851), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21076, 21098), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0"""', '(1)'], {}), "('X0', 1)\n", (21089, 21098), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21434, 21449), 'projectq.MainEngine', 'MainEngine', (['sim'], {}), '(sim)\n', (21444, 21449), False, 'from projectq import MainEngine\n'), ((21540, 21568), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0 X1 X2"""', '(1)'], {}), "('X0 X1 X2', 1)\n", (21553, 21568), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((21676, 21698), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""X0"""', '(1)'], {}), "('X0', 1)\n", (21689, 21698), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((22243, 22265), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Y0"""', '(1)'], {}), "('Y0', 1)\n", (22256, 22265), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((22812, 22834), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0"""', '(1)'], {}), "('Z0', 1)\n", (22825, 22834), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((23318, 23343), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0"""', '(0.25)'], {}), "('Z0', 0.25)\n", (23331, 23343), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((23833, 23858), 'projectq.ops.QubitOperator', 'QubitOperator', (['"""Z0 Z1"""', '(1)'], {}), "('Z0 Z1', 1)\n", (23846, 23858), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((2408, 2419), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (2417, 2419), False, 'from projectq.backends import Simulator\n'), ((2445, 2463), 'projectq.backends._qracksim._qracksim.QrackSimulator', 'QrackSim', (['(1)', '(-1)', '(1)'], {}), '(1, -1, 1)\n', (2453, 2463), True, 'from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim\n'), ((3795, 3819), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (3813, 3819), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((3935, 3965), 'numpy.matrix', 'numpy.matrix', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (3947, 3965), False, 'import numpy\n'), ((4074, 4098), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (4092, 4098), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((4214, 4231), 'numpy.eye', 'numpy.eye', (['(2 ** 6)'], {}), '(2 ** 6)\n', (4223, 4231), False, 'import numpy\n'), ((4309, 4333), 'projectq.ops.BasicGate.__init__', 'BasicGate.__init__', (['self'], {}), '(self)\n', (4327, 4333), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6417, 6429), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (6420, 6429), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((6963, 6997), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (6976, 6997), False, 'import pytest\n'), ((7025, 7059), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (7038, 7059), False, 'import pytest\n'), ((7136, 7170), 'pytest.raises', 'pytest.raises', (['NotYetMeasuredError'], {}), '(NotYetMeasuredError)\n', (7149, 7170), False, 'import pytest\n'), ((7267, 7274), 'projectq.ops.Rx', 'Rx', (['(0.3)'], {}), '(0.3)\n', (7269, 7274), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7291, 7298), 'projectq.ops.Rx', 'Rx', (['(0.8)'], {}), '(0.8)\n', (7293, 7298), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7315, 7322), 'projectq.ops.Ry', 'Ry', (['(0.1)'], {}), '(0.1)\n', (7317, 7322), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7607, 7631), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7620, 7631), False, 'import pytest\n'), ((7671, 7695), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7684, 7695), False, 'import pytest\n'), ((7901, 7913), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (7904, 7913), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7928, 7940), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (7931, 7940), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8085, 8097), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8088, 8097), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8112, 8124), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8115, 8124), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8298, 8312), 'projectq.libs.math.AddConstant', 'AddConstant', (['(1)'], {}), '(1)\n', (8309, 8312), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8327, 8339), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8330, 8339), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8460, 8484), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8475, 8484), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8499, 8511), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8502, 8511), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8745, 8757), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8748, 8757), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((8972, 8984), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (8975, 8984), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9106, 9120), 'projectq.libs.math.SubConstant', 'SubConstant', (['(5)'], {}), '(5)\n', (9117, 9120), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9135, 9147), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9138, 9147), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9322, 9334), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9325, 9334), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9549, 9561), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9552, 9561), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9682, 9712), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(2)', '(256)'], {}), '(2, 256)\n', (9704, 9712), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9727, 9739), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9730, 9739), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((9942, 9954), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (9945, 9954), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10176, 10188), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (10179, 10188), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10372, 10388), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (10386, 10388), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((10542, 10548), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (10545, 10548), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((10783, 10810), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10796, 10810), False, 'import pytest\n'), ((10890, 10896), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (10893, 10896), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11032, 11050), 'pytest.approx', 'pytest.approx', (['(0.3)'], {}), '(0.3)\n', (11045, 11050), False, 'import pytest\n'), ((11177, 11195), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (11190, 11195), False, 'import pytest\n'), ((11548, 11560), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (11551, 11560), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11634, 11650), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (11648, 11650), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((11804, 11810), 'projectq.ops.All', 'All', (['X'], {}), '(X)\n', (11807, 11810), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((11824, 11830), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (11827, 11830), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12029, 12053), 'pytest.approx', 'pytest.approx', (['(1.0 / 8.0)'], {}), '(1.0 / 8.0)\n', (12042, 12053), False, 'import pytest\n'), ((12236, 12255), 'pytest.approx', 'pytest.approx', (['polR'], {}), '(polR)\n', (12249, 12255), False, 'import pytest\n'), ((12290, 12311), 'pytest.approx', 'pytest.approx', (['polPhi'], {}), '(polPhi)\n', (12303, 12311), False, 'import pytest\n'), ((12496, 12515), 'pytest.approx', 'pytest.approx', (['polR'], {}), '(polR)\n', (12509, 12515), False, 'import pytest\n'), ((12550, 12571), 'pytest.approx', 'pytest.approx', (['polPhi'], {}), '(polPhi)\n', (12563, 12571), False, 'import pytest\n'), ((12576, 12582), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (12579, 12582), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12596, 12602), 'projectq.ops.All', 'All', (['X'], {}), '(X)\n', (12599, 12602), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12777, 12795), 'pytest.approx', 'pytest.approx', (['(0.3)'], {}), '(0.3)\n', (12790, 12795), False, 'import pytest\n'), ((12952, 12964), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (12955, 12964), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((13031, 13058), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13044, 13058), False, 'import pytest\n'), ((13159, 13186), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13172, 13186), False, 'import pytest\n'), ((13351, 13378), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13364, 13378), False, 'import pytest\n'), ((13499, 13515), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (13513, 13515), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((13683, 13697), 'math.sqrt', 'math.sqrt', (['(0.2)'], {}), '(0.2)\n', (13692, 13697), False, 'import math\n'), ((13699, 13713), 'math.sqrt', 'math.sqrt', (['(0.8)'], {}), '(0.8)\n', (13708, 13713), False, 'import math\n'), ((13724, 13751), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (13737, 13751), False, 'import pytest\n'), ((14097, 14109), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (14100, 14109), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((14606, 14622), 'projectq.cengines.LocalOptimizer', 'LocalOptimizer', ([], {}), '()\n', (14620, 14622), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((14810, 14837), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (14823, 14837), False, 'import pytest\n'), ((15050, 15056), 'projectq.ops.All', 'All', (['H'], {}), '(H)\n', (15053, 15056), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((15207, 15234), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15220, 15234), False, 'import pytest\n'), ((15457, 15475), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (15470, 15475), False, 'import pytest\n'), ((15751, 15769), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (15764, 15769), False, 'import pytest\n'), ((15905, 15932), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15918, 15932), False, 'import pytest\n'), ((17182, 17208), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-1]'], {}), '(eng, qubits[0:-1])\n', (17189, 17208), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17242, 17268), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-2]'], {}), '(eng, qubits[0:-2])\n', (17249, 17268), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17302, 17328), 'projectq.meta.Control', 'Control', (['eng', 'qubits[0:-3]'], {}), '(eng, qubits[0:-3])\n', (17309, 17328), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((17645, 17657), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (17648, 17657), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((18726, 18740), 'projectq.meta.Uncompute', 'Uncompute', (['eng'], {}), '(eng)\n', (18735, 18740), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((20502, 20514), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (20505, 20514), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((20547, 20559), 'projectq.ops.All', 'All', (['Measure'], {}), '(Measure)\n', (20550, 20559), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((20861, 20885), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (20874, 20885), False, 'import pytest\n'), ((21238, 21256), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (21251, 21256), False, 'import pytest\n'), ((21344, 21362), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (21357, 21362), False, 'import pytest\n'), ((21578, 21602), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (21591, 21602), False, 'import pytest\n'), ((21914, 21960), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (21927, 21960), False, 'import pytest\n'), ((22178, 22225), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (22191, 22225), False, 'import pytest\n'), ((22482, 22528), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (22495, 22528), False, 'import pytest\n'), ((22747, 22794), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (22760, 22794), False, 'import pytest\n'), ((23020, 23066), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (23033, 23066), False, 'import pytest\n'), ((23253, 23300), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (23266, 23300), False, 'import pytest\n'), ((23529, 23578), 'pytest.approx', 'pytest.approx', (['(0.25)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(0.25, rel=tolerance, abs=tolerance)\n', (23542, 23578), False, 'import pytest\n'), ((23765, 23815), 'pytest.approx', 'pytest.approx', (['(-0.25)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-0.25, rel=tolerance, abs=tolerance)\n', (23778, 23815), False, 'import pytest\n'), ((24044, 24090), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (24057, 24090), False, 'import pytest\n'), ((24197, 24244), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (24210, 24244), False, 'import pytest\n'), ((24351, 24397), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(1, rel=tolerance, abs=tolerance)\n', (24364, 24397), False, 'import pytest\n'), ((24504, 24551), 'pytest.approx', 'pytest.approx', (['(-1)'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(-1, rel=tolerance, abs=tolerance)\n', (24517, 24551), False, 'import pytest\n'), ((2614, 2625), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (2623, 2625), False, 'from projectq.backends import Simulator\n'), ((2651, 2669), 'projectq.backends._qracksim._qracksim.QrackSimulator', 'QrackSim', (['(1)', '(-1)', '(2)'], {}), '(1, -1, 2)\n', (2659, 2669), True, 'from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim\n'), ((7358, 7366), 'projectq.ops.Ry', 'Ry', (['(-0.1)'], {}), '(-0.1)\n', (7360, 7366), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((7413, 7431), 'numpy.kron', 'numpy.kron', (['m2', 'm1'], {}), '(m2, m1)\n', (7423, 7431), False, 'import numpy\n'), ((8694, 8718), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8709, 8718), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((8921, 8945), 'projectq.libs.math.AddConstantModN', 'AddConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (8936, 8945), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9271, 9295), 'projectq.libs.math.SubConstantModN', 'SubConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (9286, 9295), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9498, 9522), 'projectq.libs.math.SubConstantModN', 'SubConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (9513, 9522), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((9885, 9915), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(2)', '(256)'], {}), '(2, 256)\n', (9907, 9915), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((10118, 10149), 'projectq.libs.math.MultiplyByConstantModN', 'MultiplyByConstantModN', (['(10)', '(256)'], {}), '(10, 256)\n', (10140, 10149), False, 'from projectq.libs.math import AddConstant, AddConstantModN, SubConstant, SubConstantModN, MultiplyByConstantModN\n'), ((10712, 10735), 'pytest.approx', 'pytest.approx', (['(0.5 ** i)'], {}), '(0.5 ** i)\n', (10725, 10735), False, 'import pytest\n'), ((12930, 12945), 'math.sqrt', 'math.sqrt', (['(0.91)'], {}), '(0.91)\n', (12939, 12945), False, 'import math\n'), ((14477, 14496), 'pytest.approx', 'pytest.approx', (['(1.0j)'], {}), '(1.0j)\n', (14490, 14496), False, 'import pytest\n'), ((14511, 14531), 'pytest.approx', 'pytest.approx', (['(-1.0j)'], {}), '(-1.0j)\n', (14524, 14531), False, 'import pytest\n'), ((18458, 18470), 'projectq.meta.Compute', 'Compute', (['eng'], {}), '(eng)\n', (18465, 18470), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((18635, 18663), 'projectq.meta.Control', 'Control', (['eng', 'control_qubits'], {}), '(eng, control_qubits)\n', (18642, 18663), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((19127, 19138), 'projectq.backends.Simulator', 'Simulator', ([], {}), '()\n', (19136, 19138), False, 'from projectq.backends import Simulator\n'), ((20447, 20496), 'pytest.approx', 'pytest.approx', (['test'], {'rel': 'tolerance', 'abs': 'tolerance'}), '(test, rel=tolerance, abs=tolerance)\n', (20460, 20496), False, 'import pytest\n'), ((2957, 2983), 'projectq.cengines.BasicEngine.__init__', 'BasicEngine.__init__', (['self'], {}), '(self)\n', (2977, 2983), False, 'from projectq.cengines import BasicEngine, BasicMapperEngine, DummyEngine, LocalOptimizer, NotYetMeasuredError\n'), ((6931, 6951), 'projectq.meta.LogicalQubitIDTag', 'LogicalQubitIDTag', (['(2)'], {}), '(2)\n', (6948, 6951), False, 'from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag\n'), ((7339, 7346), 'projectq.ops.Rz', 'Rz', (['(0.9)'], {}), '(0.9)\n', (7341, 7346), False, 'from projectq.ops import All, Allocate, BasicGate, BasicMathGate, CNOT, C, Command, H, Measure, QubitOperator, Rx, Ry, Rz, S, TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap, UniformlyControlledRy, UniformlyControlledRz\n'), ((12623, 12637), 'math.acos', 'math.acos', (['(0.3)'], {}), '(0.3)\n', (12632, 12637), False, 'import math\n'), ((21742, 21754), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (21751, 21754), False, 'import math\n'), ((21760, 21772), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (21769, 21772), False, 'import math\n'), ((22005, 22017), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22014, 22017), False, 'import math\n'), ((22024, 22036), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22033, 22036), False, 'import math\n'), ((22309, 22321), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22318, 22321), False, 'import math\n'), ((22328, 22340), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22337, 22340), False, 'import math\n'), ((22573, 22585), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22582, 22585), False, 'import math\n'), ((22593, 22605), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (22602, 22605), False, 'import math\n'), ((10927, 10941), 'math.sqrt', 'math.sqrt', (['(0.3)'], {}), '(0.3)\n', (10936, 10941), False, 'import math\n'), ((11072, 11086), 'math.sqrt', 'math.sqrt', (['(0.4)'], {}), '(0.4)\n', (11081, 11086), False, 'import math\n')] |
# from wx.lib.pubsub import pub
from pubsub import pub
import serial
import threading
import queue
import time
class ComReaderThread(threading.Thread):
'''
Creates a thread that continously reads from the serial connection
Puts result as a tuple (timestamp, data) in a queue
'''
def __init__(self, ser, error_que):
threading.Thread.__init__(self)
self.ser = ser
self.error_que = error_que
self.alive = threading.Event()
self.alive.set()
def run(self):
while self.alive.isSet():
try:
if self.ser.in_waiting > 0:
timestamp = time.time()
data = self.ser.read(self.ser.in_waiting)
pub.sendMessage('serial.data', data=(timestamp, data))
except serial.SerialException as e:
reconnected = False
print('Serial connection lost, trying to reconnect.')
ts = time.time()
self.error_que.put((ts, str(e)))
while not reconnected and self.alive.isSet():
try:
# if ser still thinks it's open close it
if self.ser.is_open:
self.ser.close()
self.ser.open()
except Exception as e:
# if reconnection failed let some time pass
time.sleep(0.1)
else:
reconnected = True
print('Reconnected')
def stop(self, timeout=0.5):
self.alive.clear()
threading.Thread.join(self, timeout)
| [
"threading.Thread.__init__",
"pubsub.pub.sendMessage",
"time.sleep",
"threading.Event",
"threading.Thread.join",
"time.time"
]
| [((347, 378), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (372, 378), False, 'import threading\n'), ((459, 476), 'threading.Event', 'threading.Event', ([], {}), '()\n', (474, 476), False, 'import threading\n'), ((1656, 1692), 'threading.Thread.join', 'threading.Thread.join', (['self', 'timeout'], {}), '(self, timeout)\n', (1677, 1692), False, 'import threading\n'), ((649, 660), 'time.time', 'time.time', ([], {}), '()\n', (658, 660), False, 'import time\n'), ((745, 799), 'pubsub.pub.sendMessage', 'pub.sendMessage', (['"""serial.data"""'], {'data': '(timestamp, data)'}), "('serial.data', data=(timestamp, data))\n", (760, 799), False, 'from pubsub import pub\n'), ((976, 987), 'time.time', 'time.time', ([], {}), '()\n', (985, 987), False, 'import time\n'), ((1456, 1471), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1466, 1471), False, 'import time\n')] |
from unittest.mock import patch
import pytest
from just_bin_it.endpoints.sources import HistogramSource
from tests.doubles.consumer import StubConsumer
TEST_MESSAGE = b"this is a byte message"
INVALID_FB = b"this is an invalid fb message"
class TestHistogramSource:
@pytest.fixture(autouse=True)
def prepare(self):
pass
def test_if_no_consumer_supplied_then_raises(self):
with pytest.raises(Exception):
HistogramSource(None)
def test_if_no_new_messages_then_no_data(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([])
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
assert len(data) == 0
@patch("just_bin_it.endpoints.sources.deserialise_hs00", return_value=TEST_MESSAGE)
def test_if_five_new_messages_on_one_topic_then_data_has_five_items(
self, mock_method
):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([TEST_MESSAGE] * 5)
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
_, _, message = data[0]
assert len(data) == 5
assert message == TEST_MESSAGE
def test_deserialising_invalid_fb_does_not_throw(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([INVALID_FB])
hs = HistogramSource(mock_consumer)
hs.get_new_data()
| [
"tests.doubles.consumer.StubConsumer",
"pytest.raises",
"pytest.fixture",
"just_bin_it.endpoints.sources.HistogramSource",
"unittest.mock.patch"
]
| [((276, 304), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (290, 304), False, 'import pytest\n'), ((738, 825), 'unittest.mock.patch', 'patch', (['"""just_bin_it.endpoints.sources.deserialise_hs00"""'], {'return_value': 'TEST_MESSAGE'}), "('just_bin_it.endpoints.sources.deserialise_hs00', return_value=\n TEST_MESSAGE)\n", (743, 825), False, 'from unittest.mock import patch\n'), ((548, 585), 'tests.doubles.consumer.StubConsumer', 'StubConsumer', (["['broker1']", "['topic1']"], {}), "(['broker1'], ['topic1'])\n", (560, 585), False, 'from tests.doubles.consumer import StubConsumer\n'), ((638, 668), 'just_bin_it.endpoints.sources.HistogramSource', 'HistogramSource', (['mock_consumer'], {}), '(mock_consumer)\n', (653, 668), False, 'from just_bin_it.endpoints.sources import HistogramSource\n'), ((951, 988), 'tests.doubles.consumer.StubConsumer', 'StubConsumer', (["['broker1']", "['topic1']"], {}), "(['broker1'], ['topic1'])\n", (963, 988), False, 'from tests.doubles.consumer import StubConsumer\n'), ((1057, 1087), 'just_bin_it.endpoints.sources.HistogramSource', 'HistogramSource', (['mock_consumer'], {}), '(mock_consumer)\n', (1072, 1087), False, 'from just_bin_it.endpoints.sources import HistogramSource\n'), ((1309, 1346), 'tests.doubles.consumer.StubConsumer', 'StubConsumer', (["['broker1']", "['topic1']"], {}), "(['broker1'], ['topic1'])\n", (1321, 1346), False, 'from tests.doubles.consumer import StubConsumer\n'), ((1409, 1439), 'just_bin_it.endpoints.sources.HistogramSource', 'HistogramSource', (['mock_consumer'], {}), '(mock_consumer)\n', (1424, 1439), False, 'from just_bin_it.endpoints.sources import HistogramSource\n'), ((411, 435), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (424, 435), False, 'import pytest\n'), ((449, 470), 'just_bin_it.endpoints.sources.HistogramSource', 'HistogramSource', (['None'], {}), '(None)\n', (464, 470), False, 'from just_bin_it.endpoints.sources import HistogramSource\n')] |
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import math
import shutil
# Import Broadband modules
import plot_srf
import bband_utils
from irikura_gen_srf_cfg import IrikuraGenSrfCfg
from install_cfg import InstallCfg
class IrikuraGenSrf(object):
"""
Implements Arben's gen_srf.csh script in Python
"""
def __init__(self, i_r_velmodel, i_r_srcfile,
o_r_srffile, i_vmodel_name, sim_id=0,
**kwargs):
self.sim_id = sim_id
self.r_velmodel = i_r_velmodel
self.r_srcfile = i_r_srcfile
self.r_srffile = o_r_srffile
self.vmodel_name = i_vmodel_name
self.r_srcfiles = []
# Get all src files that were passed to us
if kwargs is not None and len(kwargs) > 0:
for idx in range(len(kwargs)):
self.r_srcfiles.append(kwargs['src%d' % (idx)])
else:
# Not a multisegment run, just use the single src file
self.r_srcfiles.append(i_r_srcfile)
def run(self):
"""
This function prepares the parameters for Irikura's gen_srf then calls it
"""
print("IrikuraGenSrf".center(80, '-'))
# Load configuration, set sim_id
install = InstallCfg.getInstance()
sim_id = self.sim_id
# Build directory paths
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id))
a_param_outdir = os.path.join(a_outdir, "param_files")
# Make sure the output and tmp directories exist
bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir,
a_logdir, a_param_outdir])
# Now, file paths
self.log = os.path.join(a_logdir, "%d.gen_srf.log" % (sim_id))
a_srcfiles = [os.path.join(a_indir,
srcfile) for srcfile in self.r_srcfiles]
# Read src file
cfg = IrikuraGenSrfCfg(a_srcfiles)
# Define location of input velocity model and output srf file
if cfg.num_srcfiles > 1:
a_srffile = os.path.join(a_tmpdir, self.r_srffile)
a_final_srffile = os.path.join(a_indir, self.r_srffile)
else:
a_srffile = os.path.join(a_indir, self.r_srffile)
a_velmod = os.path.join(install.A_IN_DATA_DIR, str(sim_id),
self.r_velmodel)
# Run in tmpdir subdir to isolate temp fortran files
# Save cwd, change back to it at the end
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
# Read parameters from the src(s) file(s)
# The following parameters should be common to all SRC files
# So we just read from the first one
simulation_seed = int(cfg.CFGDICT[0]['seed'])
dip = cfg.CFGDICT[0]['dip']
rake = cfg.CFGDICT[0]['rake']
dlen = cfg.CFGDICT[0]['dlen']
dwid = cfg.CFGDICT[0]['dwid']
lon_top_center = cfg.CFGDICT[0]['lon_top_center']
lat_top_center = cfg.CFGDICT[0]['lat_top_center']
depth_to_top = cfg.CFGDICT[0]['depth_to_top']
if cfg.num_srcfiles > 1:
fault_len = cfg.CFGDICT[0]['max_fault_length']
else:
fault_len = cfg.CFGDICT[0]['fault_length']
fault_width = cfg.CFGDICT[0]['fault_width']
# Average strike of all SRC files
strike = 0.0
for segment in range(cfg.num_srcfiles):
strike = strike + cfg.CFGDICT[segment]['strike']
strike = math.ceil(strike / cfg.num_srcfiles)
# Hypocenter (down_dip is common to all src files)
hypo_down_dip = cfg.CFGDICT[0]['hypo_down_dip']
if cfg.num_srcfiles > 1:
hypo_along_stk = 0.0
for segment in range(cfg.num_srcfiles):
current_fault_len = cfg.CFGDICT[segment]['fault_length']
current_hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
if abs(current_hypo_along_stk) <= current_fault_len:
# Hypocenter in this segment!
hypo_along_stk = hypo_along_stk + (current_fault_len / 2.0) + current_hypo_along_stk
break
else:
# Not here yet, just add the total length of this segment
hypo_along_stk = hypo_along_stk + current_fault_len
# Now convert hypo_along_stk so that 0.0 is the middle of the fault
hypo_along_stk = hypo_along_stk - (fault_len / 2.0)
else:
hypo_along_stk = cfg.CFGDICT[0]['hypo_along_stk']
#
# Run gen_srf code
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR, cfg.GENSRF),
self.log) +
"%s\n" % a_srffile +
"%f %f %f %f %f\n" %
(fault_len, fault_width,
strike, dip, rake) +
"%f %f %f\n" %
(lon_top_center, lat_top_center, depth_to_top) +
"%f %f\n" % (dlen, dwid) +
"%f %f %f %f\n" %
(hypo_along_stk, hypo_down_dip,
cfg.DENS, cfg.VS) +
"%f\n" % (cfg.DT) +
"%d\n" % (simulation_seed) +
"%s\n" % (a_velmod) +
"%f\n" % (cfg.VEL_RUP_FRAC) +
"END")
bband_utils.runprog(progstring)
if cfg.num_srcfiles > 1:
# Assign the slip from the planar fault to each segment's SRF file
a_segs_file = os.path.join(a_tmpdir, "segments.midpoint.txt")
# Write segments' file
seg_file = open(a_segs_file, 'w')
seg_file.write("segm lon lat depth fleng fwidth shypo zhypo strike dip rake\n")
seg_file.write("%d\n" % (cfg.num_srcfiles))
total_length = 0.0
for segment in range(cfg.num_srcfiles):
if abs(cfg.CFGDICT[segment]['hypo_along_stk']) <= cfg.CFGDICT[segment]['fault_length']:
hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
hypo_down_dip = cfg.CFGDICT[segment]['hypo_down_dip']
else:
hypo_along_stk = 999.0
hypo_down_dip = 999.0
seg_file.write("seg%d %.6f %.6f %.1f %.1f %.1f %.1f %.1f %.1f %d %d %d\n" %
(segment + 1,
cfg.CFGDICT[segment]['lon_top_center'],
cfg.CFGDICT[segment]['lat_top_center'],
cfg.CFGDICT[segment]['depth_to_top'],
total_length,
(total_length + cfg.CFGDICT[segment]['fault_length']),
cfg.CFGDICT[segment]['fault_width'],
hypo_along_stk, hypo_down_dip,
cfg.CFGDICT[segment]['strike'],
cfg.CFGDICT[segment]['dip'],
cfg.CFGDICT[segment]['rake']))
total_length = total_length + cfg.CFGDICT[segment]['fault_length']
seg_file.close()
#
# Run gen_srf_segment code
#
for segment in range(cfg.num_srcfiles):
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.GENSRFSEGMENT), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (segment + 1) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
#
# Now add the segments together
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.SUMSEG), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (cfg.num_srcfiles) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
# Copy file to final location
progstring = "cp %s %s" % (os.path.join(a_tmpdir,
"all_seg.%s" %
(self.r_srffile)),
a_final_srffile)
bband_utils.runprog(progstring)
# Use copied file from now on
a_srffile = a_final_srffile
# Restore working directory
os.chdir(old_cwd)
#
# Move results to outputfile
#
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_tmpdir, self.r_srffile))
bband_utils.runprog(progstring)
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_outdir, self.r_srffile))
bband_utils.runprog(progstring)
shutil.copy2(os.path.join(a_tmpdir, "stress_drop.out"),
os.path.join(a_param_outdir,
"stress_drop.out"))
# Plot SRF
plot_srf.run(self.r_srffile, sim_id=self.sim_id)
print("IrikuraGenSrf Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % os.path.basename((sys.argv[0])))
ME = IrikuraGenSrf(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sim_id=int(sys.argv[5]))
ME.run()
| [
"math.ceil",
"bband_utils.runprog",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.basename",
"bband_utils.mkdirs",
"plot_srf.run",
"install_cfg.InstallCfg.getInstance",
"irikura_gen_srf_cfg.IrikuraGenSrfCfg"
]
| [((1889, 1913), 'install_cfg.InstallCfg.getInstance', 'InstallCfg.getInstance', ([], {}), '()\n', (1911, 1913), False, 'from install_cfg import InstallCfg\n'), ((2274, 2311), 'os.path.join', 'os.path.join', (['a_outdir', '"""param_files"""'], {}), "(a_outdir, 'param_files')\n", (2286, 2311), False, 'import os\n'), ((2378, 2453), 'bband_utils.mkdirs', 'bband_utils.mkdirs', (['[a_tmpdir, a_indir, a_outdir, a_logdir, a_param_outdir]'], {}), '([a_tmpdir, a_indir, a_outdir, a_logdir, a_param_outdir])\n', (2396, 2453), False, 'import bband_utils\n'), ((2528, 2577), 'os.path.join', 'os.path.join', (['a_logdir', "('%d.gen_srf.log' % sim_id)"], {}), "(a_logdir, '%d.gen_srf.log' % sim_id)\n", (2540, 2577), False, 'import os\n'), ((2739, 2767), 'irikura_gen_srf_cfg.IrikuraGenSrfCfg', 'IrikuraGenSrfCfg', (['a_srcfiles'], {}), '(a_srcfiles)\n', (2755, 2767), False, 'from irikura_gen_srf_cfg import IrikuraGenSrfCfg\n'), ((3325, 3336), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3334, 3336), False, 'import os\n'), ((3345, 3363), 'os.chdir', 'os.chdir', (['a_tmpdir'], {}), '(a_tmpdir)\n', (3353, 3363), False, 'import os\n'), ((4305, 4341), 'math.ceil', 'math.ceil', (['(strike / cfg.num_srcfiles)'], {}), '(strike / cfg.num_srcfiles)\n', (4314, 4341), False, 'import math\n'), ((6274, 6305), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (6293, 6305), False, 'import bband_utils\n'), ((9885, 9902), 'os.chdir', 'os.chdir', (['old_cwd'], {}), '(old_cwd)\n', (9893, 9902), False, 'import os\n'), ((10090, 10121), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (10109, 10121), False, 'import bband_utils\n'), ((10251, 10282), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (10270, 10282), False, 'import bband_utils\n'), ((10480, 10528), 'plot_srf.run', 'plot_srf.run', (['self.r_srffile'], {'sim_id': 'self.sim_id'}), '(self.r_srffile, sim_id=self.sim_id)\n', (10492, 10528), False, 'import plot_srf\n'), ((2602, 2632), 'os.path.join', 'os.path.join', (['a_indir', 'srcfile'], {}), '(a_indir, srcfile)\n', (2614, 2632), False, 'import os\n'), ((2896, 2934), 'os.path.join', 'os.path.join', (['a_tmpdir', 'self.r_srffile'], {}), '(a_tmpdir, self.r_srffile)\n', (2908, 2934), False, 'import os\n'), ((2965, 3002), 'os.path.join', 'os.path.join', (['a_indir', 'self.r_srffile'], {}), '(a_indir, self.r_srffile)\n', (2977, 3002), False, 'import os\n'), ((3041, 3078), 'os.path.join', 'os.path.join', (['a_indir', 'self.r_srffile'], {}), '(a_indir, self.r_srffile)\n', (3053, 3078), False, 'import os\n'), ((6445, 6492), 'os.path.join', 'os.path.join', (['a_tmpdir', '"""segments.midpoint.txt"""'], {}), "(a_tmpdir, 'segments.midpoint.txt')\n", (6457, 6492), False, 'import os\n'), ((9382, 9413), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (9401, 9413), False, 'import bband_utils\n'), ((9725, 9756), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (9744, 9756), False, 'import bband_utils\n'), ((10304, 10345), 'os.path.join', 'os.path.join', (['a_tmpdir', '"""stress_drop.out"""'], {}), "(a_tmpdir, 'stress_drop.out')\n", (10316, 10345), False, 'import os\n'), ((10368, 10415), 'os.path.join', 'os.path.join', (['a_param_outdir', '"""stress_drop.out"""'], {}), "(a_param_outdir, 'stress_drop.out')\n", (10380, 10415), False, 'import os\n'), ((10648, 10677), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (10664, 10677), False, 'import os\n'), ((8770, 8801), 'bband_utils.runprog', 'bband_utils.runprog', (['progstring'], {}), '(progstring)\n', (8789, 8801), False, 'import bband_utils\n'), ((10042, 10080), 'os.path.join', 'os.path.join', (['a_tmpdir', 'self.r_srffile'], {}), '(a_tmpdir, self.r_srffile)\n', (10054, 10080), False, 'import os\n'), ((10203, 10241), 'os.path.join', 'os.path.join', (['a_outdir', 'self.r_srffile'], {}), '(a_outdir, self.r_srffile)\n', (10215, 10241), False, 'import os\n'), ((9496, 9549), 'os.path.join', 'os.path.join', (['a_tmpdir', "('all_seg.%s' % self.r_srffile)"], {}), "(a_tmpdir, 'all_seg.%s' % self.r_srffile)\n", (9508, 9549), False, 'import os\n'), ((8955, 9006), 'os.path.join', 'os.path.join', (['install.A_IRIKURA_BIN_DIR', 'cfg.SUMSEG'], {}), '(install.A_IRIKURA_BIN_DIR, cfg.SUMSEG)\n', (8967, 9006), False, 'import os\n'), ((8305, 8363), 'os.path.join', 'os.path.join', (['install.A_IRIKURA_BIN_DIR', 'cfg.GENSRFSEGMENT'], {}), '(install.A_IRIKURA_BIN_DIR, cfg.GENSRFSEGMENT)\n', (8317, 8363), False, 'import os\n'), ((5489, 5540), 'os.path.join', 'os.path.join', (['install.A_IRIKURA_BIN_DIR', 'cfg.GENSRF'], {}), '(install.A_IRIKURA_BIN_DIR, cfg.GENSRF)\n', (5501, 5540), False, 'import os\n')] |
"""test_models.py: runs tests on the models for digit."""
import pytest
from core.models import (Grade,
Subject,
Question,
Comment,
Option,
Topic,
Block,
Syllabus,
StateException,
)
from django.test import TestCase
from django.contrib.auth.models import User
class TestQuestion(TestCase):
"""Test the Question Model."""
def setUp(self):
"""Create questions for testing."""
grade_test = Grade(name="Grade Example")
grade_test.save()
subject_test = Subject(name="addition",
grade=grade_test)
subject_test.save()
question1 = Question(question_content='what is 1 + 1?',
answer_content='This is an addition question',
subject=subject_test)
question1.save()
def test_question_default_state(self):
"""Confirm that default state is Incomplete."""
question1 = Question.objects.all()[0]
assert(question1.state == question1.INCOMPLETE)
def test_question_state_from_incomplete(self):
"""Check that question state.
Confirm that state can only go from 'incomplete' to
'ready for review'.
"""
question1 = Question.objects.all()[0]
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
question1.change_to_review_ready()
assert(question1.state == question1.REVIEW_READY)
def test_question_state_from_ready_for_review(self):
"""Check that question state.
Confirm that state can only go from 'ready to review' to
'complete' or 'needs reworking'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.REVIEW_READY
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 1")
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 1")
assert(question1.state == question1.REVIEW_READY)
question1.change_to_complete()
assert(question1.state == question1.COMPLETE)
question1.state = question1.REVIEW_READY
question1.change_to_needs_reworking()
assert(question1.state == question1.NEEDS_REWORKING)
def test_question_state_from_needs_reworking(self):
"""Check that question state.
Confirm that state can only go from 'needs reworking' to
'ready for review'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.NEEDS_REWORKING
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
question1.change_to_review_ready()
assert(question1.state == question1.REVIEW_READY)
def test_question_state_from_complete(self):
"""Check that question state.
Confirm that state can only go from 'complete' to
'flagged for review'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.COMPLETE
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
question1.change_to_flagged()
assert(question1.state == question1.FLAGGED)
def test_question_state_from_flagged_for_review(self):
"""Check that question state.
Confirm that state can only go from 'flagged for review' to
'complete'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.FLAGGED
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
question1.change_to_complete()
assert(question1.state == question1.COMPLETE)
def test_question_option_save(self):
"""Test that question cannot have option with correct answer."""
question1 = Question.objects.all()[0]
option = Option.objects.first()
option.correct = True
option.save()
assert(len(question1.option_set.all()) == 3)
assert(len(Option.objects.all()) == 3)
def test_get_comments(self):
"""
Test that the get_comments() function returns all comments
relating to a question.
"""
user = User.objects.create(username="testuser")
question1 = Question.objects.all()[0]
Comment.objects.create(text="Test comment!", question=question1, user=user)
Comment.objects.create(text="Another comment!", question=question1, user=user)
assert(len(question1.get_comments()) == 2)
assert(question1.get_comments()[0].text == "Test comment!")
assert(question1.get_comments()[0].created_at < question1.get_comments()[1].created_at)
def test_get_options(self):
"""
Test that the get_options() function returns all options
relating to a question.
"""
question1 = Question.objects.all()[0]
assert(question1.get_number_of_options() == 3)
def test_get_state(self):
question1 = Question.objects.all()[0]
assert(question1.state == question1.INCOMPLETE)
assert(question1.get_state() == "Incomplete")
class TestTopic(TestCase):
"""Test the Topic Model."""
def setUp(self):
"""Create Topic for testing."""
grade_test = Grade.objects.create(name="Grade Example")
syllabus_test = Syllabus.objects.create(grade=grade_test)
Topic.objects.create(name="Financial Mathematics",
description="Topic that involves sinking funds "
"and loan calculations",
syllabus=syllabus_test, week_start=1,
duration=3)
def test_topic_creates_blocks(self):
"""
Confirm that blocks are created automatically and associated with the
topic.
"""
blocks = Block.objects.all()
assert(len(blocks) == 3)
assert(blocks[0].topic.name == "Financial Mathematics")
def test_topic_creates_questions(self):
"""
Confirm that questions are created automatically and associated with the
correct block and topic.
"""
questions = Question.objects.all()
assert(len(questions) == 3 * 15)
assert(questions[0].block.topic.name == "Financial Mathematics")
def test_topic_number_of_questions(self):
"""
Confirm that the correct number of questions is returned by the helper
function.
"""
questions = Question.objects.all()
topics = Topic.objects.all()
assert(len(questions) == topics[0].get_number_of_questions())
def test_topic_number_of_blocks(self):
"""
Confirm that the correct number of blocks is returned by the helper
function.
"""
blocks = Block.objects.all()
topics = Topic.objects.all()
assert(len(blocks) == topics[0].get_number_of_blocks())
def test_topic_save_does_not_duplicate_questions(self):
already_created_topic = Topic.objects.get(name="Financial Mathematics")
count = 0
for block in Block.objects.filter(topic=already_created_topic):
for question in Question.objects.filter(block=block):
count += 1
assert(count == 45)
new_description = "This is a new description"
already_created_topic.description = new_description
already_created_topic.save()
edited_topic = Topic.objects.get(name="Financial Mathematics")
count = 0
for block in Block.objects.filter(topic=edited_topic):
for question in Question.objects.filter(block=block):
count += 1
assert(count == 45)
| [
"core.models.Option.objects.all",
"core.models.Topic.objects.all",
"core.models.Question.objects.filter",
"core.models.Block.objects.all",
"core.models.Question.objects.all",
"core.models.Grade.objects.create",
"core.models.Block.objects.filter",
"core.models.Topic.objects.create",
"core.models.Option.objects.first",
"core.models.Topic.objects.get",
"core.models.Subject",
"pytest.raises",
"core.models.Grade",
"core.models.Question",
"django.contrib.auth.models.User.objects.create",
"core.models.Comment.objects.create",
"core.models.Syllabus.objects.create"
]
| [((638, 665), 'core.models.Grade', 'Grade', ([], {'name': '"""Grade Example"""'}), "(name='Grade Example')\n", (643, 665), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((715, 757), 'core.models.Subject', 'Subject', ([], {'name': '"""addition"""', 'grade': 'grade_test'}), "(name='addition', grade=grade_test)\n", (722, 757), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((837, 954), 'core.models.Question', 'Question', ([], {'question_content': '"""what is 1 + 1?"""', 'answer_content': '"""This is an addition question"""', 'subject': 'subject_test'}), "(question_content='what is 1 + 1?', answer_content=\n 'This is an addition question', subject=subject_test)\n", (845, 954), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((7261, 7283), 'core.models.Option.objects.first', 'Option.objects.first', ([], {}), '()\n', (7281, 7283), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((7609, 7649), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""testuser"""'}), "(username='testuser')\n", (7628, 7649), False, 'from django.contrib.auth.models import User\n'), ((7704, 7779), 'core.models.Comment.objects.create', 'Comment.objects.create', ([], {'text': '"""Test comment!"""', 'question': 'question1', 'user': 'user'}), "(text='Test comment!', question=question1, user=user)\n", (7726, 7779), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((7788, 7866), 'core.models.Comment.objects.create', 'Comment.objects.create', ([], {'text': '"""Another comment!"""', 'question': 'question1', 'user': 'user'}), "(text='Another comment!', question=question1, user=user)\n", (7810, 7866), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((8672, 8714), 'core.models.Grade.objects.create', 'Grade.objects.create', ([], {'name': '"""Grade Example"""'}), "(name='Grade Example')\n", (8692, 8714), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((8739, 8780), 'core.models.Syllabus.objects.create', 'Syllabus.objects.create', ([], {'grade': 'grade_test'}), '(grade=grade_test)\n', (8762, 8780), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((8789, 8970), 'core.models.Topic.objects.create', 'Topic.objects.create', ([], {'name': '"""Financial Mathematics"""', 'description': '"""Topic that involves sinking funds and loan calculations"""', 'syllabus': 'syllabus_test', 'week_start': '(1)', 'duration': '(3)'}), "(name='Financial Mathematics', description=\n 'Topic that involves sinking funds and loan calculations', syllabus=\n syllabus_test, week_start=1, duration=3)\n", (8809, 8970), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((9268, 9287), 'core.models.Block.objects.all', 'Block.objects.all', ([], {}), '()\n', (9285, 9287), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((9588, 9610), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (9608, 9610), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((9913, 9935), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (9933, 9935), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((9953, 9972), 'core.models.Topic.objects.all', 'Topic.objects.all', ([], {}), '()\n', (9970, 9972), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10222, 10241), 'core.models.Block.objects.all', 'Block.objects.all', ([], {}), '()\n', (10239, 10241), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10259, 10278), 'core.models.Topic.objects.all', 'Topic.objects.all', ([], {}), '()\n', (10276, 10278), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10436, 10483), 'core.models.Topic.objects.get', 'Topic.objects.get', ([], {'name': '"""Financial Mathematics"""'}), "(name='Financial Mathematics')\n", (10453, 10483), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10523, 10572), 'core.models.Block.objects.filter', 'Block.objects.filter', ([], {'topic': 'already_created_topic'}), '(topic=already_created_topic)\n', (10543, 10572), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10870, 10917), 'core.models.Topic.objects.get', 'Topic.objects.get', ([], {'name': '"""Financial Mathematics"""'}), "(name='Financial Mathematics')\n", (10887, 10917), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10957, 10997), 'core.models.Block.objects.filter', 'Block.objects.filter', ([], {'topic': 'edited_topic'}), '(topic=edited_topic)\n', (10977, 10997), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((1153, 1175), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (1173, 1175), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((1446, 1468), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (1466, 1468), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((1486, 1515), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (1499, 1515), False, 'import pytest\n'), ((1765, 1794), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (1778, 1794), False, 'import pytest\n'), ((2037, 2066), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (2050, 2066), False, 'import pytest\n'), ((2631, 2653), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (2651, 2653), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((2720, 2749), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (2733, 2749), False, 'import pytest\n'), ((2940, 2969), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (2953, 2969), False, 'import pytest\n'), ((3672, 3694), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (3692, 3694), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((3764, 3793), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (3777, 3793), False, 'import pytest\n'), ((4048, 4077), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (4061, 4077), False, 'import pytest\n'), ((4325, 4354), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (4338, 4354), False, 'import pytest\n'), ((4898, 4920), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (4918, 4920), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((4983, 5012), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (4996, 5012), False, 'import pytest\n'), ((5257, 5286), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (5270, 5286), False, 'import pytest\n'), ((5527, 5556), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (5540, 5556), False, 'import pytest\n'), ((6101, 6123), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (6121, 6123), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((6185, 6214), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (6198, 6214), False, 'import pytest\n'), ((6458, 6487), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (6471, 6487), False, 'import pytest\n'), ((6734, 6763), 'pytest.raises', 'pytest.raises', (['StateException'], {}), '(StateException)\n', (6747, 6763), False, 'import pytest\n'), ((7217, 7239), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (7237, 7239), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((7670, 7692), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (7690, 7692), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((8257, 8279), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (8277, 8279), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((8390, 8412), 'core.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (8410, 8412), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((10602, 10638), 'core.models.Question.objects.filter', 'Question.objects.filter', ([], {'block': 'block'}), '(block=block)\n', (10625, 10638), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((11027, 11063), 'core.models.Question.objects.filter', 'Question.objects.filter', ([], {'block': 'block'}), '(block=block)\n', (11050, 11063), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n'), ((7409, 7429), 'core.models.Option.objects.all', 'Option.objects.all', ([], {}), '()\n', (7427, 7429), False, 'from core.models import Grade, Subject, Question, Comment, Option, Topic, Block, Syllabus, StateException\n')] |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.forms import UserCreationForm
from .decorators import *
from .forms import PostForm, CustomUserCreationForm, ProfileForm, UserForm
from .filters import PostFilter
from .models import *
# Create your views here.
def home(request):
posts = Post.objects.filter(active=True, featured=True)[0:3]
context = {'posts':posts}
return render(request, 'base/index.html', context)
def posts(request):
posts = Post.objects.filter(active=True)
myFilter = PostFilter(request.GET, queryset=posts)
posts = myFilter.qs
page = request.GET.get('page')
paginator = Paginator(posts, 5)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {'posts':posts, 'myFilter':myFilter}
return render(request, 'base/posts.html', context)
def post(request, slug):
post = Post.objects.get(slug=slug)
if request.method == 'POST':
PostComment.objects.create(
author=request.user.profile,
post=post,
body=request.POST['comment']
)
messages.success(request, "Your comment has been posted successfully!")
return redirect('post', slug=post.slug)
context = {'post':post}
return render(request, 'base/post.html', context)
def profile(request):
return render(request, 'base/profile.html')
#CRUD VIEWS
@admin_only
@login_required(login_url="home")
def createPost(request):
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('posts')
context = {'form':form}
return render(request, 'base/post_form.html', context)
@admin_only
@login_required(login_url="home")
def updatePost(request, slug):
post = Post.objects.get(slug=slug)
form = PostForm(instance=post)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
return redirect('posts')
context = {'form':form}
return render(request, 'base/post_form.html', context)
@admin_only
@login_required(login_url="home")
def deletePost(request, slug):
post = Post.objects.get(slug=slug)
if request.method == 'POST':
post.delete()
return redirect('posts')
context = {'item':post}
return render(request, 'base/delete.html', context)
def sendEmail(request):
if request.method == 'POST':
template = render_to_string('base/email_template.html', {
'name':request.POST['name'],
'email':request.POST['email'],
'message':request.POST['message'],
})
email = EmailMessage(
request.POST['subject'],
template,
settings.EMAIL_HOST_USER,
['<EMAIL>']
)
email.fail_silently=False
email.send()
return render(request, 'base/email_sent.html')
def loginPage(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
email = request.POST.get('email')
password =request.POST.get('password')
#Little Hack to work around re-building the usermodel
try:
user = User.objects.get(email=email)
user = authenticate(request, username=user.username, password=password)
except:
messages.error(request, 'User with this email does not exists')
return redirect('login')
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, 'Email OR password is incorrect')
context = {}
return render(request, 'base/login.html', context)
def registerPage(request):
form = CustomUserCreationForm()
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.success(request, 'Account successfuly created!')
user = authenticate(request, username=user.username, password=request.POST['<PASSWORD>'])
if user is not None:
login(request, user)
next_url = request.GET.get('next')
if next_url == '' or next_url == None:
next_url = 'home'
return redirect(next_url)
else:
messages.error(request, 'An error has occured with registration')
context = {'form':form}
return render(request, 'base/register.html', context)
def logoutUser(request):
logout(request)
return redirect('home')
@admin_only
@login_required(login_url="home")
def userAccount(request):
profile = request.user.profile
context = {'profile':profile}
return render(request, 'base/account.html', context)
@login_required(login_url="home")
def updateProfile(request):
user = request.user
profile = user.profile
form = ProfileForm(instance=profile)
if request.method == 'POST':
user_form = UserForm(request.POST, instance=user)
if user_form.is_valid():
user_form.save()
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form.save()
return redirect('account')
context = {'form':form}
return render(request, 'base/profile_form.html', context)
def myEducation(request):
return render(request, 'base/education.html')
def myExperience(request):
return render(request, 'base/experience.html')
def myAchievements(request):
return render(request, 'base/achievements.html')
def myAbout(request):
return render(request, 'base/about.html')
def myContact(request):
return render(request, 'base/contact.html')
def mySkills(request):
return render(request, 'base/skills.html')
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.contrib.messages.error",
"django.contrib.auth.login",
"django.shortcuts.redirect",
"django.template.loader.render_to_string",
"django.contrib.auth.decorators.login_required",
"django.contrib.messages.success",
"django.core.mail.EmailMessage",
"django.contrib.auth.models.User.objects.get",
"django.contrib.auth.logout",
"django.core.paginator.Paginator"
]
| [((1835, 1867), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""home"""'}), "(login_url='home')\n", (1849, 1867), False, 'from django.contrib.auth.decorators import login_required\n'), ((2151, 2183), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""home"""'}), "(login_url='home')\n", (2165, 2183), False, 'from django.contrib.auth.decorators import login_required\n'), ((2536, 2568), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""home"""'}), "(login_url='home')\n", (2550, 2568), False, 'from django.contrib.auth.decorators import login_required\n'), ((4701, 4733), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""home"""'}), "(login_url='home')\n", (4715, 4733), False, 'from django.contrib.auth.decorators import login_required\n'), ((4880, 4912), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""home"""'}), "(login_url='home')\n", (4894, 4912), False, 'from django.contrib.auth.decorators import login_required\n'), ((836, 879), 'django.shortcuts.render', 'render', (['request', '"""base/index.html"""', 'context'], {}), "(request, 'base/index.html', context)\n", (842, 879), False, 'from django.shortcuts import render, redirect\n'), ((1062, 1081), 'django.core.paginator.Paginator', 'Paginator', (['posts', '(5)'], {}), '(posts, 5)\n', (1071, 1081), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((1296, 1339), 'django.shortcuts.render', 'render', (['request', '"""base/posts.html"""', 'context'], {}), "(request, 'base/posts.html', context)\n", (1302, 1339), False, 'from django.shortcuts import render, redirect\n'), ((1698, 1740), 'django.shortcuts.render', 'render', (['request', '"""base/post.html"""', 'context'], {}), "(request, 'base/post.html', context)\n", (1704, 1740), False, 'from django.shortcuts import render, redirect\n'), ((1772, 1808), 'django.shortcuts.render', 'render', (['request', '"""base/profile.html"""'], {}), "(request, 'base/profile.html')\n", (1778, 1808), False, 'from django.shortcuts import render, redirect\n'), ((2088, 2135), 'django.shortcuts.render', 'render', (['request', '"""base/post_form.html"""', 'context'], {}), "(request, 'base/post_form.html', context)\n", (2094, 2135), False, 'from django.shortcuts import render, redirect\n'), ((2474, 2521), 'django.shortcuts.render', 'render', (['request', '"""base/post_form.html"""', 'context'], {}), "(request, 'base/post_form.html', context)\n", (2480, 2521), False, 'from django.shortcuts import render, redirect\n'), ((2743, 2787), 'django.shortcuts.render', 'render', (['request', '"""base/delete.html"""', 'context'], {}), "(request, 'base/delete.html', context)\n", (2749, 2787), False, 'from django.shortcuts import render, redirect\n'), ((3185, 3224), 'django.shortcuts.render', 'render', (['request', '"""base/email_sent.html"""'], {}), "(request, 'base/email_sent.html')\n", (3191, 3224), False, 'from django.shortcuts import render, redirect\n'), ((3873, 3916), 'django.shortcuts.render', 'render', (['request', '"""base/login.html"""', 'context'], {}), "(request, 'base/login.html', context)\n", (3879, 3916), False, 'from django.shortcuts import render, redirect\n'), ((4572, 4618), 'django.shortcuts.render', 'render', (['request', '"""base/register.html"""', 'context'], {}), "(request, 'base/register.html', context)\n", (4578, 4618), False, 'from django.shortcuts import render, redirect\n'), ((4646, 4661), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (4652, 4661), False, 'from django.contrib.auth import logout, login, authenticate\n'), ((4670, 4686), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (4678, 4686), False, 'from django.shortcuts import render, redirect\n'), ((4832, 4877), 'django.shortcuts.render', 'render', (['request', '"""base/account.html"""', 'context'], {}), "(request, 'base/account.html', context)\n", (4838, 4877), False, 'from django.shortcuts import render, redirect\n'), ((5324, 5374), 'django.shortcuts.render', 'render', (['request', '"""base/profile_form.html"""', 'context'], {}), "(request, 'base/profile_form.html', context)\n", (5330, 5374), False, 'from django.shortcuts import render, redirect\n'), ((5410, 5448), 'django.shortcuts.render', 'render', (['request', '"""base/education.html"""'], {}), "(request, 'base/education.html')\n", (5416, 5448), False, 'from django.shortcuts import render, redirect\n'), ((5485, 5524), 'django.shortcuts.render', 'render', (['request', '"""base/experience.html"""'], {}), "(request, 'base/experience.html')\n", (5491, 5524), False, 'from django.shortcuts import render, redirect\n'), ((5563, 5604), 'django.shortcuts.render', 'render', (['request', '"""base/achievements.html"""'], {}), "(request, 'base/achievements.html')\n", (5569, 5604), False, 'from django.shortcuts import render, redirect\n'), ((5639, 5673), 'django.shortcuts.render', 'render', (['request', '"""base/about.html"""'], {}), "(request, 'base/about.html')\n", (5645, 5673), False, 'from django.shortcuts import render, redirect\n'), ((5710, 5746), 'django.shortcuts.render', 'render', (['request', '"""base/contact.html"""'], {}), "(request, 'base/contact.html')\n", (5716, 5746), False, 'from django.shortcuts import render, redirect\n'), ((5782, 5817), 'django.shortcuts.render', 'render', (['request', '"""base/skills.html"""'], {}), "(request, 'base/skills.html')\n", (5788, 5817), False, 'from django.shortcuts import render, redirect\n'), ((1548, 1619), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your comment has been posted successfully!"""'], {}), "(request, 'Your comment has been posted successfully!')\n", (1564, 1619), False, 'from django.contrib import messages\n'), ((1630, 1662), 'django.shortcuts.redirect', 'redirect', (['"""post"""'], {'slug': 'post.slug'}), "('post', slug=post.slug)\n", (1638, 1662), False, 'from django.shortcuts import render, redirect\n'), ((2036, 2053), 'django.shortcuts.redirect', 'redirect', (['"""posts"""'], {}), "('posts')\n", (2044, 2053), False, 'from django.shortcuts import render, redirect\n'), ((2422, 2439), 'django.shortcuts.redirect', 'redirect', (['"""posts"""'], {}), "('posts')\n", (2430, 2439), False, 'from django.shortcuts import render, redirect\n'), ((2692, 2709), 'django.shortcuts.redirect', 'redirect', (['"""posts"""'], {}), "('posts')\n", (2700, 2709), False, 'from django.shortcuts import render, redirect\n'), ((2860, 3008), 'django.template.loader.render_to_string', 'render_to_string', (['"""base/email_template.html"""', "{'name': request.POST['name'], 'email': request.POST['email'], 'message':\n request.POST['message']}"], {}), "('base/email_template.html', {'name': request.POST['name'],\n 'email': request.POST['email'], 'message': request.POST['message']})\n", (2876, 3008), False, 'from django.template.loader import render_to_string\n'), ((3028, 3119), 'django.core.mail.EmailMessage', 'EmailMessage', (["request.POST['subject']", 'template', 'settings.EMAIL_HOST_USER', "['<EMAIL>']"], {}), "(request.POST['subject'], template, settings.EMAIL_HOST_USER, [\n '<EMAIL>'])\n", (3040, 3119), False, 'from django.core.mail import EmailMessage\n'), ((3294, 3310), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (3302, 3310), False, 'from django.shortcuts import render, redirect\n'), ((3493, 3522), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (3509, 3522), False, 'from django.contrib.auth.models import User\n'), ((3533, 3597), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': 'user.username', 'password': 'password'}), '(request, username=user.username, password=password)\n', (3545, 3597), False, 'from django.contrib.auth import logout, login, authenticate\n'), ((3733, 3753), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3738, 3753), False, 'from django.contrib.auth import logout, login, authenticate\n'), ((3764, 3780), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (3772, 3780), False, 'from django.shortcuts import render, redirect\n'), ((3792, 3849), 'django.contrib.messages.error', 'messages.error', (['request', '"""Email OR password is incorrect"""'], {}), "(request, 'Email OR password is incorrect')\n", (3806, 3849), False, 'from django.contrib import messages\n'), ((4128, 4185), 'django.contrib.messages.success', 'messages.success', (['request', '"""Account successfuly created!"""'], {}), "(request, 'Account successfuly created!')\n", (4144, 4185), False, 'from django.contrib import messages\n'), ((4197, 4284), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': 'user.username', 'password': "request.POST['<PASSWORD>']"}), "(request, username=user.username, password=request.POST[\n '<PASSWORD>'])\n", (4209, 4284), False, 'from django.contrib.auth import logout, login, authenticate\n'), ((4443, 4461), 'django.shortcuts.redirect', 'redirect', (['next_url'], {}), '(next_url)\n', (4451, 4461), False, 'from django.shortcuts import render, redirect\n'), ((4473, 4538), 'django.contrib.messages.error', 'messages.error', (['request', '"""An error has occured with registration"""'], {}), "(request, 'An error has occured with registration')\n", (4487, 4538), False, 'from django.contrib import messages\n'), ((5269, 5288), 'django.shortcuts.redirect', 'redirect', (['"""account"""'], {}), "('account')\n", (5277, 5288), False, 'from django.shortcuts import render, redirect\n'), ((3611, 3674), 'django.contrib.messages.error', 'messages.error', (['request', '"""User with this email does not exists"""'], {}), "(request, 'User with this email does not exists')\n", (3625, 3674), False, 'from django.contrib import messages\n'), ((3685, 3702), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (3693, 3702), False, 'from django.shortcuts import render, redirect\n'), ((4309, 4329), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (4314, 4329), False, 'from django.contrib.auth import logout, login, authenticate\n')] |
import socket, datetime, os
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.distributed.DistributedObject import DistributedObject
from toontown.toonbase import ToontownGlobals
from toontown.uberdog import InGameNewsResponses
class DistributedInGameNewsMgr(DistributedObject):
notify = directNotify.newCategory('InGameNewsMgr')
neverDisable = 1
def __init__(self, cr):
DistributedObject.__init__(self, cr)
base.cr.inGameNewsMgr = self
def delete(self):
DistributedObject.delete(self)
self.cr.inGameNewsMgr = None
return
def disable(self):
self.notify.debug("i'm disabling InGameNewsMgr rightnow.")
DistributedObject.disable(self)
def generate(self):
self.notify.debug('BASE: generate')
DistributedObject.generate(self)
def setLatestIssueStr(self, issueStr):
self.latestIssueStr = issueStr
self.latestIssue = base.cr.toontownTimeManager.convertUtcStrToToontownTime(issueStr)
messenger.send('newIssueOut')
self.notify.info('latestIssue=%s' % self.latestIssue)
def getLatestIssueStr(self):
pass
def getLatestIssue(self):
return self.latestIssue
| [
"direct.distributed.DistributedObject.DistributedObject.disable",
"direct.distributed.DistributedObject.DistributedObject.generate",
"direct.distributed.DistributedObject.DistributedObject.delete",
"direct.distributed.DistributedObject.DistributedObject.__init__"
]
| [((434, 470), 'direct.distributed.DistributedObject.DistributedObject.__init__', 'DistributedObject.__init__', (['self', 'cr'], {}), '(self, cr)\n', (460, 470), False, 'from direct.distributed.DistributedObject import DistributedObject\n'), ((539, 569), 'direct.distributed.DistributedObject.DistributedObject.delete', 'DistributedObject.delete', (['self'], {}), '(self)\n', (563, 569), False, 'from direct.distributed.DistributedObject import DistributedObject\n'), ((722, 753), 'direct.distributed.DistributedObject.DistributedObject.disable', 'DistributedObject.disable', (['self'], {}), '(self)\n', (747, 753), False, 'from direct.distributed.DistributedObject import DistributedObject\n'), ((831, 863), 'direct.distributed.DistributedObject.DistributedObject.generate', 'DistributedObject.generate', (['self'], {}), '(self)\n', (857, 863), False, 'from direct.distributed.DistributedObject import DistributedObject\n')] |
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2017-11-24 21:10:35 +0100 (Fri, 24 Nov 2017)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check a Logstash pipeline is online via the Logstash Rest API
API is only available in Logstash 5.x onwards, will get connection refused on older versions
Optional thresholds apply to the number of pipeline workers
Ensure Logstash options:
--http.host should be set to 0.0.0.0 if querying remotely
--http.port should be set to the same port that you are querying via this plugin's --port switch
Tested on Logstash 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 6.0, 6.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
#from harisekhon.utils import log
from harisekhon.utils import ERRORS, UnknownError, support_msg_api
from harisekhon.utils import validate_chars
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.6'
class CheckLogstashPipeline(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckLogstashPipeline, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Logstash'
self.default_port = 9600
# could add pipeline name to end of this endpoint but error would be less good 404 Not Found
# Logstash 5.x /_node/pipeline <= use -5 switch for older Logstash
# Logstash 6.x /_node/pipelines
self.path = '/_node/pipelines'
self.auth = False
self.json = True
self.msg = 'Logstash piplines msg not defined yet'
self.pipeline = None
def add_options(self):
super(CheckLogstashPipeline, self).add_options()
self.add_opt('-i', '--pipeline', default='main', help='Pipeline to expect is configured (default: main)')
self.add_opt('-d', '--dead-letter-queue-enabled', action='store_true',
help='Check dead letter queue is enabled on pipeline (optional, only applies to Logstash 6+)')
self.add_opt('-5', '--logstash-5', action='store_true',
help='Logstash 5.x (has a slightly different API endpoint to 6.x)')
self.add_opt('-l', '--list', action='store_true', help='List pipelines and exit (only for Logstash 6+)')
self.add_thresholds()
def process_options(self):
super(CheckLogstashPipeline, self).process_options()
self.pipeline = self.get_opt('pipeline')
validate_chars(self.pipeline, 'pipeline', 'A-Za-z0-9_-')
# slightly more efficient to not return the potential list of other pipelines but the error is less informative
#self.path += '/{}'.format(self.pipeline)
if self.get_opt('logstash_5'):
if self.pipeline != 'main':
self.usage("--pipeline can only be 'main' for --logstash-5")
if self.get_opt('list'):
self.usage('can only --list pipelines for Logstash 6+')
if self.get_opt('dead_letter_queue_enabled'):
self.usage('--dead-letter-queue-enabled only available with Logstash 6+')
self.path = self.path.rstrip('s')
self.validate_thresholds(simple='lower', optional=True)
def parse_json(self, json_data):
if self.get_opt('logstash_5'):
pipeline = json_data['pipeline']
else:
pipelines = json_data['pipelines']
if self.get_opt('list'):
print('Logstash Pipelines:\n')
for pipeline in pipelines:
print(pipeline)
sys.exit(ERRORS['UNKNOWN'])
pipeline = None
if self.pipeline in pipelines:
pipeline = pipelines[self.pipeline]
self.msg = "Logstash pipeline '{}' ".format(self.pipeline)
if pipeline:
self.msg += 'exists'
if 'workers' not in pipeline:
raise UnknownError('workers field not found, Logstash may still be initializing' + \
'. If problem persists {}'.format(support_msg_api()))
workers = pipeline['workers']
self.msg += ' with {} workers'.format(workers)
self.check_thresholds(workers)
if not self.get_opt('logstash_5'):
dead_letter_queue_enabled = pipeline['dead_letter_queue_enabled']
self.msg += ', dead letter queue enabled: {}'.format(dead_letter_queue_enabled)
if self.get_opt('dead_letter_queue_enabled') and not dead_letter_queue_enabled:
self.warning()
self.msg += ' (expected True)'
batch_delay = pipeline['batch_delay']
batch_size = pipeline['batch_size']
self.msg += ', batch delay: {}, batch size: {}'.format(batch_delay, batch_size)
else:
self.critical()
self.msg += 'does not exist!'
if __name__ == '__main__':
CheckLogstashPipeline().main()
| [
"harisekhon.utils.support_msg_api",
"traceback.format_exc",
"os.path.join",
"os.path.dirname",
"harisekhon.utils.validate_chars",
"sys.exit",
"sys.path.append"
]
| [((1187, 1216), 'os.path.join', 'os.path.join', (['srcdir', '"""pylib"""'], {}), "(srcdir, 'pylib')\n", (1199, 1216), False, 'import os\n'), ((1217, 1240), 'sys.path.append', 'sys.path.append', (['libdir'], {}), '(libdir)\n', (1232, 1240), False, 'import sys\n'), ((1151, 1176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1166, 1176), False, 'import os\n'), ((1562, 1573), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (1570, 1573), False, 'import sys\n'), ((3114, 3170), 'harisekhon.utils.validate_chars', 'validate_chars', (['self.pipeline', '"""pipeline"""', '"""A-Za-z0-9_-"""'], {}), "(self.pipeline, 'pipeline', 'A-Za-z0-9_-')\n", (3128, 3170), False, 'from harisekhon.utils import validate_chars\n'), ((1526, 1548), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1546, 1548), False, 'import traceback\n'), ((4226, 4253), 'sys.exit', 'sys.exit', (["ERRORS['UNKNOWN']"], {}), "(ERRORS['UNKNOWN'])\n", (4234, 4253), False, 'import sys\n'), ((4710, 4727), 'harisekhon.utils.support_msg_api', 'support_msg_api', ([], {}), '()\n', (4725, 4727), False, 'from harisekhon.utils import ERRORS, UnknownError, support_msg_api\n')] |
from django.http import HttpResponse
from django.core.mail import send_mail
import json
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from GestiRED.models import User
from GestiRED.models import QualityControl, Phase, Resource, ResourceType,PhaseType
from django.core import serializers
from django.db.models import Q
# Create your views here.
def index(request):
return HttpResponse("GestiRED app UP")
@csrf_exempt
def quality_review_notification(request):
if request.method == 'POST':
data = json.loads(request.body)
qualityControl_id = data["qualityControl_id"]
resource_name = data["resource_name"]
responsible_name = data["responsible_name"]
qualityControl = QualityControl.objects.get(pk=qualityControl_id)
user = qualityControl.responsible
send_mail('Revision Calidad',
'Recurso: ' + resource_name + '\n Observaciones: Se ha asignado para control de calidad a: ' + responsible_name,
'<EMAIL>',
[user.email],
fail_silently=False)
res = {"status": "Ok", "Content:": "Email enviado"}
return HttpResponse(json.dumps(res), content_type="application/json")
@csrf_exempt
def resources_filters(request):
qs_json={}
if request.method == 'GET':
phaseType = request.GET.get('phaseType')
if phaseType != None : phaseType= phaseType.split(',')
resourceType = request.GET.get('resourceType')
if resourceType != None : resourceType = resourceType.split(',')
responsible = request.GET.get('responsible')
if responsible != None: responsible = responsible.split(',')
labels = request.GET.get('labels')
my_dict = {'phase__phaseType__in':phaseType,
'resourceType__in': resourceType,
'responsibles__in':responsible,
'labels__icontains': labels} # Your dict with fields
or_condition = Q()
for key, value in my_dict.items():
if value != None:
or_condition.add(Q(**{key: value}), Q.AND)
lp = set()
lp=Resource.objects.filter(or_condition).all().distinct()
data = list([res.json() for res in lp])
qs_json =json.dumps({'objects':data})
return HttpResponse( qs_json, content_type='application/json')
| [
"json.loads",
"GestiRED.models.Resource.objects.filter",
"django.core.mail.send_mail",
"django.http.HttpResponse",
"json.dumps",
"GestiRED.models.QualityControl.objects.get",
"django.db.models.Q"
]
| [((420, 451), 'django.http.HttpResponse', 'HttpResponse', (['"""GestiRED app UP"""'], {}), "('GestiRED app UP')\n", (432, 451), False, 'from django.http import HttpResponse\n'), ((2333, 2387), 'django.http.HttpResponse', 'HttpResponse', (['qs_json'], {'content_type': '"""application/json"""'}), "(qs_json, content_type='application/json')\n", (2345, 2387), False, 'from django.http import HttpResponse\n'), ((557, 581), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (567, 581), False, 'import json\n'), ((759, 807), 'GestiRED.models.QualityControl.objects.get', 'QualityControl.objects.get', ([], {'pk': 'qualityControl_id'}), '(pk=qualityControl_id)\n', (785, 807), False, 'from GestiRED.models import QualityControl, Phase, Resource, ResourceType, PhaseType\n'), ((859, 1058), 'django.core.mail.send_mail', 'send_mail', (['"""Revision Calidad"""', '(\'Recurso: \' + resource_name +\n """\n Observaciones: Se ha asignado para control de calidad a: """ +\n responsible_name)', '"""<EMAIL>"""', '[user.email]'], {'fail_silently': '(False)'}), '(\'Revision Calidad\', \'Recurso: \' + resource_name +\n """\n Observaciones: Se ha asignado para control de calidad a: """ +\n responsible_name, \'<EMAIL>\', [user.email], fail_silently=False)\n', (868, 1058), False, 'from django.core.mail import send_mail\n'), ((1201, 1216), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (1211, 1216), False, 'import json\n'), ((2007, 2010), 'django.db.models.Q', 'Q', ([], {}), '()\n', (2008, 2010), False, 'from django.db.models import Q\n'), ((2293, 2322), 'json.dumps', 'json.dumps', (["{'objects': data}"], {}), "({'objects': data})\n", (2303, 2322), False, 'import json\n'), ((2117, 2134), 'django.db.models.Q', 'Q', ([], {}), '(**{key: value})\n', (2118, 2134), False, 'from django.db.models import Q\n'), ((2173, 2210), 'GestiRED.models.Resource.objects.filter', 'Resource.objects.filter', (['or_condition'], {}), '(or_condition)\n', (2196, 2210), False, 'from GestiRED.models import QualityControl, Phase, Resource, ResourceType, PhaseType\n')] |
from dataclasses import dataclass, field
from datetime import date, datetime, time, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
import ciso8601
import pytest
from mashumaro import DataClassDictMixin
from mashumaro.exceptions import UnserializableField
from mashumaro.types import SerializationStrategy
from .entities import (
MutableString,
MyList,
ThirdPartyType,
TypedDictRequiredKeys,
)
def test_ciso8601_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=date(2021, 1, 2))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_pendulum_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=datetime(2008, 12, 29, 7, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2009-W01 0700"})
assert instance == should_be
def test_pendulum_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=date(2008, 12, 29))
instance = DataClass.from_dict({"x": "2009-W01"})
assert instance == should_be
def test_pendulum_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2009-W01 030405"})
assert instance == should_be
def test_unsupported_datetime_parser_engine():
with pytest.raises(UnserializableField):
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "unsupported"})
def test_global_function_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": ciso8601.parse_datetime_as_naive}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_local_function_datetime_parser():
def parse_dt(s):
return ciso8601.parse_datetime_as_naive(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_class_method_datetime_parser():
class DateTimeParser:
@classmethod
def parse_dt(cls, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser.parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_class_instance_method_datetime_parser():
class DateTimeParser:
def __call__(self, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_callable_class_instance_datetime_parser():
class CallableDateTimeParser:
def __call__(self, s):
return ciso8601.parse_datetime(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": CallableDateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_lambda_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": lambda s: ciso8601.parse_datetime(s)}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_derived_dataclass_metadata_deserialize_option():
@dataclass
class A:
x: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
@dataclass
class B(A, DataClassDictMixin):
y: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
should_be = B(
x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
y=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
)
instance = B.from_dict(
{"x": "2021-01-02T03:04:05Z", "y": "2021-01-02T03:04:05Z"}
)
assert instance == should_be
def test_bytearray_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: bytearray = field(
metadata={"deserialize": lambda s: s.upper().encode()}
)
should_be = DataClass(x=bytearray(b"ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
def test_path_like_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Path = field(
metadata={"deserialize": lambda s: Path(str(s).upper())}
)
should_be = DataClass(x=Path("/ABC"))
instance = DataClass.from_dict({"x": "/abc"})
assert instance == should_be
def test_datetime_serialize_option():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"serialize": lambda v: v.strftime("%Y-%m-%d %H:%M:%S")}
)
should_be = {"x": "2021-01-02 03:04:05"}
instance = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
assert instance.to_dict() == should_be
def test_third_party_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: ThirdPartyType = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
should_be = DataClass(x=ThirdPartyType(123))
instance = DataClass.from_dict({"x": 123})
assert instance == should_be
assert instance.to_dict() == {"x": 123}
def test_serializable_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: MutableString = field(
metadata={
"deserialize": lambda s: MutableString(s.upper()),
"serialize": lambda v: str(v).lower(),
}
)
should_be = DataClass(x=MutableString("ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
assert instance.to_dict() == {"x": "abc"}
def test_optional_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Optional[ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 123})
assert instance
assert instance.x.value == 123
dct = instance.to_dict()
assert dct["x"] == 123
def test_union_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Union[int, str, float, ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 1})
assert instance == DataClass(x=ThirdPartyType(value=1))
assert instance.to_dict() == {"x": 1}
def test_serialization_strategy():
class TestSerializationStrategy(SerializationStrategy):
def serialize(self, value):
return [value]
def deserialize(self, value):
return value[0]
@dataclass
class DataClass(DataClassDictMixin):
x: int = field(
metadata={"serialization_strategy": TestSerializationStrategy()}
)
instance = DataClass(x=123)
assert DataClass.from_dict({"x": [123]}) == instance
assert instance.to_dict() == {"x": [123]}
def test_collection_derived_custom_class():
@dataclass
class DataClass(DataClassDictMixin):
x: MyList = field(
metadata={"serialize": lambda v: v, "deserialize": lambda v: v}
)
instance = DataClass(x=[1, 2, 3])
assert DataClass.from_dict({"x": [1, 2, 3]}) == instance
assert instance.to_dict() == {"x": [1, 2, 3]}
def test_dataclass_with_typed_dict_overridden():
def serialize_x(x: TypedDictRequiredKeys) -> Dict[str, Any]:
return {"int": int(x["int"]), "float": float(x["float"])}
def deserialize_x(x: Dict[str, Any]) -> TypedDictRequiredKeys:
return TypedDictRequiredKeys(int=x["int"], float=x["float"])
@dataclass
class DataClass(DataClassDictMixin):
x: TypedDictRequiredKeys = field(
metadata={"serialize": serialize_x, "deserialize": deserialize_x}
)
obj = DataClass(x=TypedDictRequiredKeys(int=1, float=2.0))
data = {"x": {"int": 1, "float": 2.0}}
assert DataClass.from_dict(data) == obj
assert obj.to_dict() == data
| [
"datetime.datetime",
"datetime.time",
"ciso8601.parse_datetime",
"pathlib.Path",
"pytest.raises",
"datetime.date",
"datetime.datetime.fromisoformat",
"ciso8601.parse_datetime_as_naive",
"dataclasses.field"
]
| [((562, 605), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'ciso8601'}"}), "(metadata={'deserialize': 'ciso8601'})\n", (567, 605), False, 'from dataclasses import dataclass, field\n'), ((895, 938), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'ciso8601'}"}), "(metadata={'deserialize': 'ciso8601'})\n", (900, 938), False, 'from dataclasses import dataclass, field\n'), ((1194, 1237), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'ciso8601'}"}), "(metadata={'deserialize': 'ciso8601'})\n", (1199, 1237), False, 'from dataclasses import dataclass, field\n'), ((1498, 1541), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'pendulum'}"}), "(metadata={'deserialize': 'pendulum'})\n", (1503, 1541), False, 'from dataclasses import dataclass, field\n'), ((1820, 1863), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'pendulum'}"}), "(metadata={'deserialize': 'pendulum'})\n", (1825, 1863), False, 'from dataclasses import dataclass, field\n'), ((2109, 2152), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'pendulum'}"}), "(metadata={'deserialize': 'pendulum'})\n", (2114, 2152), False, 'from dataclasses import dataclass, field\n'), ((2349, 2383), 'pytest.raises', 'pytest.raises', (['UnserializableField'], {}), '(UnserializableField)\n', (2362, 2383), False, 'import pytest\n'), ((2647, 2712), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': ciso8601.parse_datetime_as_naive}"}), "(metadata={'deserialize': ciso8601.parse_datetime_as_naive})\n", (2652, 2712), False, 'from dataclasses import dataclass, field\n'), ((2980, 3015), 'ciso8601.parse_datetime_as_naive', 'ciso8601.parse_datetime_as_naive', (['s'], {}), '(s)\n', (3012, 3015), False, 'import ciso8601\n'), ((3095, 3136), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': parse_dt}"}), "(metadata={'deserialize': parse_dt})\n", (3100, 3136), False, 'from dataclasses import dataclass, field\n'), ((3562, 3618), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': DateTimeParser.parse_dt}"}), "(metadata={'deserialize': DateTimeParser.parse_dt})\n", (3567, 3618), False, 'from dataclasses import dataclass, field\n'), ((5214, 5270), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': ciso8601.parse_datetime}"}), "(metadata={'deserialize': ciso8601.parse_datetime})\n", (5219, 5270), False, 'from dataclasses import dataclass, field\n'), ((5345, 5401), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': ciso8601.parse_datetime}"}), "(metadata={'deserialize': ciso8601.parse_datetime})\n", (5350, 5401), False, 'from dataclasses import dataclass, field\n'), ((9210, 9280), 'dataclasses.field', 'field', ([], {'metadata': "{'serialize': lambda v: v, 'deserialize': lambda v: v}"}), "(metadata={'serialize': lambda v: v, 'deserialize': lambda v: v})\n", (9215, 9280), False, 'from dataclasses import dataclass, field\n'), ((9864, 9936), 'dataclasses.field', 'field', ([], {'metadata': "{'serialize': serialize_x, 'deserialize': deserialize_x}"}), "(metadata={'serialize': serialize_x, 'deserialize': deserialize_x})\n", (9869, 9936), False, 'from dataclasses import dataclass, field\n'), ((635, 685), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (643, 685), False, 'from datetime import date, datetime, time, timezone\n'), ((968, 984), 'datetime.date', 'date', (['(2021)', '(1)', '(2)'], {}), '(2021, 1, 2)\n', (972, 984), False, 'from datetime import date, datetime, time, timezone\n'), ((1267, 1280), 'datetime.time', 'time', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1271, 1280), False, 'from datetime import date, datetime, time, timezone\n'), ((1571, 1617), 'datetime.datetime', 'datetime', (['(2008)', '(12)', '(29)', '(7)'], {'tzinfo': 'timezone.utc'}), '(2008, 12, 29, 7, tzinfo=timezone.utc)\n', (1579, 1617), False, 'from datetime import date, datetime, time, timezone\n'), ((1893, 1911), 'datetime.date', 'date', (['(2008)', '(12)', '(29)'], {}), '(2008, 12, 29)\n', (1897, 1911), False, 'from datetime import date, datetime, time, timezone\n'), ((2182, 2195), 'datetime.time', 'time', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (2186, 2195), False, 'from datetime import date, datetime, time, timezone\n'), ((2476, 2522), 'dataclasses.field', 'field', ([], {'metadata': "{'deserialize': 'unsupported'}"}), "(metadata={'deserialize': 'unsupported'})\n", (2481, 2522), False, 'from dataclasses import dataclass, field\n'), ((2764, 2793), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(2021, 1, 2, 3, 4, 5)\n', (2772, 2793), False, 'from datetime import date, datetime, time, timezone\n'), ((3166, 3195), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(2021, 1, 2, 3, 4, 5)\n', (3174, 3195), False, 'from datetime import date, datetime, time, timezone\n'), ((3457, 3482), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['s'], {}), '(s)\n', (3479, 3482), False, 'from datetime import date, datetime, time, timezone\n'), ((3648, 3677), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(2021, 1, 2, 3, 4, 5)\n', (3656, 3677), False, 'from datetime import date, datetime, time, timezone\n'), ((3922, 3947), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['s'], {}), '(s)\n', (3944, 3947), False, 'from datetime import date, datetime, time, timezone\n'), ((4106, 4135), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(2021, 1, 2, 3, 4, 5)\n', (4114, 4135), False, 'from datetime import date, datetime, time, timezone\n'), ((4373, 4399), 'ciso8601.parse_datetime', 'ciso8601.parse_datetime', (['s'], {}), '(s)\n', (4396, 4399), False, 'import ciso8601\n'), ((4566, 4616), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (4574, 4616), False, 'from datetime import date, datetime, time, timezone\n'), ((4953, 5003), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (4961, 5003), False, 'from datetime import date, datetime, time, timezone\n'), ((5432, 5482), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (5440, 5482), False, 'from datetime import date, datetime, time, timezone\n'), ((5494, 5544), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (5502, 5544), False, 'from datetime import date, datetime, time, timezone\n'), ((6238, 6250), 'pathlib.Path', 'Path', (['"""/ABC"""'], {}), "('/ABC')\n", (6242, 6250), False, 'from pathlib import Path\n'), ((6621, 6671), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(2)', '(3)', '(4)', '(5)'], {'tzinfo': 'timezone.utc'}), '(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc)\n', (6629, 6671), False, 'from datetime import date, datetime, time, timezone\n'), ((4886, 4912), 'ciso8601.parse_datetime', 'ciso8601.parse_datetime', (['s'], {}), '(s)\n', (4909, 4912), False, 'import ciso8601\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
return gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
def tensor_list_from_tensor(tensor, element_shape, name=None):
return gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
def tensor_list_concat(input_handle, element_dtype, name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat(
input_handle=input_handle, element_dtype=element_dtype, name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult, element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
return gen_list_ops.tensor_list_push_back(dlist, delement)
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:])
@ops.RegisterGradient("TensorListConcat")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
# TODO(srbs): We lose the element_shape information in tensor_list_concat.
# Consider providing that as an output of TensorListConcat?
if dtensor.shape.rank is None:
element_shape = None
else:
element_shape = [None] + dtensor.shape.as_list()[1:]
return tensor_list_split(
dtensor,
element_shape=_build_element_shape(element_shape),
lengths=op.outputs[1])
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
return tensor_list_concat(dlist, element_dtype=op.inputs[0].dtype), None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
if op.inputs[0].shape.dims and op.inputs[0].shape.dims[0].value is not None:
num_elements = op.inputs[0].shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=op.inputs[0].dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist, element_dtype=op.inputs[0].dtype, num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
return list_grad, index_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = gen_list_ops.tensor_list_get_item(
dlist, index, element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
_, indices = op.inputs
return gen_list_ops.tensor_list_scatter(
tensor=dtensor, indices=indices,
element_shape=ops.convert_to_tensor(-1, dtype=dtypes.int32)), None
@ops.RegisterGradient("TensorListScatter")
def _TensorListScatterGrad(op, dlist):
t, indices, _ = op.inputs
return gen_list_ops.tensor_list_gather(
dlist, indices, element_dtype=t.dtype), None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
return [d if d is not None else -1 for d in shape]
| [
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.gen_list_ops.tensor_list_push_back",
"tensorflow.python.ops.gen_list_ops.tensor_list_concat",
"tensorflow.python.ops.gen_list_ops.tensor_list_get_item",
"tensorflow.python.ops.gen_list_ops.tensor_list_gather",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.gen_list_ops.tensor_list_stack",
"tensorflow.python.ops.gen_list_ops.tensor_list_element_shape",
"tensorflow.python.ops.gen_list_ops.tensor_list_length",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.zeros_like"
]
| [((1248, 1294), 'tensorflow.python.framework.ops.NotDifferentiable', 'ops.NotDifferentiable', (['"""TensorListConcatLists"""'], {}), "('TensorListConcatLists')\n", (1269, 1294), False, 'from tensorflow.python.framework import ops\n'), ((1295, 1342), 'tensorflow.python.framework.ops.NotDifferentiable', 'ops.NotDifferentiable', (['"""TensorListElementShape"""'], {}), "('TensorListElementShape')\n", (1316, 1342), False, 'from tensorflow.python.framework import ops\n'), ((1343, 1384), 'tensorflow.python.framework.ops.NotDifferentiable', 'ops.NotDifferentiable', (['"""TensorListLength"""'], {}), "('TensorListLength')\n", (1364, 1384), False, 'from tensorflow.python.framework import ops\n'), ((1385, 1433), 'tensorflow.python.framework.ops.NotDifferentiable', 'ops.NotDifferentiable', (['"""TensorListPushBackBatch"""'], {}), "('TensorListPushBackBatch')\n", (1406, 1433), False, 'from tensorflow.python.framework import ops\n'), ((2826, 2868), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListPushBack"""'], {}), "('TensorListPushBack')\n", (2846, 2868), False, 'from tensorflow.python.framework import ops\n'), ((3007, 3048), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListPopBack"""'], {}), "('TensorListPopBack')\n", (3027, 3048), False, 'from tensorflow.python.framework import ops\n'), ((3356, 3395), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListStack"""'], {}), "('TensorListStack')\n", (3376, 3395), False, 'from tensorflow.python.framework import ops\n'), ((3520, 3560), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListConcat"""'], {}), "('TensorListConcat')\n", (3540, 3560), False, 'from tensorflow.python.framework import ops\n'), ((4012, 4051), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListSplit"""'], {}), "('TensorListSplit')\n", (4032, 4051), False, 'from tensorflow.python.framework import ops\n'), ((4173, 4217), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListFromTensor"""'], {}), "('TensorListFromTensor')\n", (4193, 4217), False, 'from tensorflow.python.framework import ops\n'), ((4852, 4893), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListGetItem"""'], {}), "('TensorListGetItem')\n", (4872, 4893), False, 'from tensorflow.python.framework import ops\n'), ((5407, 5448), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListSetItem"""'], {}), "('TensorListSetItem')\n", (5427, 5448), False, 'from tensorflow.python.framework import ops\n'), ((5791, 5831), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListGather"""'], {}), "('TensorListGather')\n", (5811, 5831), False, 'from tensorflow.python.framework import ops\n'), ((6055, 6096), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""TensorListScatter"""'], {}), "('TensorListScatter')\n", (6075, 6096), False, 'from tensorflow.python.framework import ops\n'), ((3301, 3352), 'tensorflow.python.ops.gen_list_ops.tensor_list_push_back', 'gen_list_ops.tensor_list_push_back', (['dlist', 'delement'], {}), '(dlist, delement)\n', (3335, 3352), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((4690, 4792), 'tensorflow.python.ops.gen_list_ops.tensor_list_stack', 'gen_list_ops.tensor_list_stack', (['dlist'], {'element_dtype': 'op.inputs[0].dtype', 'num_elements': 'num_elements'}), '(dlist, element_dtype=op.inputs[0].dtype,\n num_elements=num_elements)\n', (4720, 4792), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((4987, 5032), 'tensorflow.python.ops.gen_list_ops.tensor_list_length', 'gen_list_ops.tensor_list_length', (['op.inputs[0]'], {}), '(op.inputs[0])\n', (5018, 5032), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((5662, 5735), 'tensorflow.python.ops.gen_list_ops.tensor_list_get_item', 'gen_list_ops.tensor_list_get_item', (['dlist', 'index'], {'element_dtype': 'item.dtype'}), '(dlist, index, element_dtype=item.dtype)\n', (5695, 5735), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((2487, 2590), 'tensorflow.python.ops.gen_list_ops.tensor_list_concat', 'gen_list_ops.tensor_list_concat', ([], {'input_handle': 'input_handle', 'element_dtype': 'element_dtype', 'name': 'name'}), '(input_handle=input_handle, element_dtype=\n element_dtype, name=name)\n', (2518, 2590), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((6173, 6243), 'tensorflow.python.ops.gen_list_ops.tensor_list_gather', 'gen_list_ops.tensor_list_gather', (['dlist', 'indices'], {'element_dtype': 't.dtype'}), '(dlist, indices, element_dtype=t.dtype)\n', (6204, 6243), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((7386, 7434), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['shape'], {'dtype': 'dtypes.int32'}), '(shape, dtype=dtypes.int32)\n', (7407, 7434), False, 'from tensorflow.python.framework import ops\n'), ((5132, 5209), 'tensorflow.python.ops.gen_list_ops.tensor_list_element_shape', 'gen_list_ops.tensor_list_element_shape', (['op.inputs[0]'], {'shape_type': 'dtypes.int32'}), '(op.inputs[0], shape_type=dtypes.int32)\n', (5170, 5209), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((5597, 5623), 'tensorflow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', (['item'], {}), '(item)\n', (5617, 5623), False, 'from tensorflow.python.ops import array_ops\n'), ((3199, 3277), 'tensorflow.python.ops.gen_list_ops.tensor_list_element_shape', 'gen_list_ops.tensor_list_element_shape', (['op.outputs[0]'], {'shape_type': 'dtypes.int32'}), '(op.outputs[0], shape_type=dtypes.int32)\n', (3237, 3277), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((4581, 4659), 'tensorflow.python.ops.gen_list_ops.tensor_list_element_shape', 'gen_list_ops.tensor_list_element_shape', (['op.outputs[0]'], {'shape_type': 'dtypes.int32'}), '(op.outputs[0], shape_type=dtypes.int32)\n', (4619, 4659), False, 'from tensorflow.python.ops import gen_list_ops\n'), ((5999, 6044), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['(-1)'], {'dtype': 'dtypes.int32'}), '(-1, dtype=dtypes.int32)\n', (6020, 6044), False, 'from tensorflow.python.framework import ops\n')] |
import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
class DumpTestCase(AstunparseCommonTestCase, unittest.TestCase):
def assertASTEqual(self, dump1, dump2):
# undo the pretty-printing
dump1 = re.sub(r"(?<=[\(\[])\n\s+", "", dump1)
dump1 = re.sub(r"\n\s+", " ", dump1)
self.assertEqual(dump1, dump2)
def check_roundtrip(self, code1, filename="internal", mode="exec"):
ast_ = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
dump1 = astunparse.dump(ast_)
dump2 = ast.dump(ast_)
self.assertASTEqual(dump1, dump2)
| [
"re.sub",
"ast.dump",
"astunparse.dump"
]
| [((352, 393), 're.sub', 're.sub', (['"""(?<=[\\\\(\\\\[])\\\\n\\\\s+"""', '""""""', 'dump1'], {}), "('(?<=[\\\\(\\\\[])\\\\n\\\\s+', '', dump1)\n", (358, 393), False, 'import re\n'), ((407, 436), 're.sub', 're.sub', (['"""\\\\n\\\\s+"""', '""" """', 'dump1'], {}), "('\\\\n\\\\s+', ' ', dump1)\n", (413, 436), False, 'import re\n'), ((634, 655), 'astunparse.dump', 'astunparse.dump', (['ast_'], {}), '(ast_)\n', (649, 655), False, 'import astunparse\n'), ((672, 686), 'ast.dump', 'ast.dump', (['ast_'], {}), '(ast_)\n', (680, 686), False, 'import ast\n')] |
from django.urls import include, path
from .views import home, bike
urlpatterns = [
path("", home),
path("bike/<int:number>", bike)
] | [
"django.urls.path"
]
| [((89, 103), 'django.urls.path', 'path', (['""""""', 'home'], {}), "('', home)\n", (93, 103), False, 'from django.urls import include, path\n'), ((109, 140), 'django.urls.path', 'path', (['"""bike/<int:number>"""', 'bike'], {}), "('bike/<int:number>', bike)\n", (113, 140), False, 'from django.urls import include, path\n')] |
"""
Remove Fragments not in Knowledgebase
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019, Hong Kong University of Science and Technology"
__license__ = "3-clause BSD"
from argparse import ArgumentParser
import numpy as np
import pickle
parser = ArgumentParser(description="Build Files")
parser.add_argument("--datadir", type=str, default="Data", help="input - XXX.YYY ")
parser.add_argument("--envNewAcronym", type=str, default="PRT.SNW", help="input - XXX.YYY ")
args = parser.parse_args()
# Check the Bound Fragments
BoundFrags = np.loadtxt("../%s/%s/%s.Homogenised.boundfrags_zeros.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), delimiter=',')
normalDF = pickle.load(open("../%s/GrandCID.dict" %(args.datadir), "rb"))
binding = np.full(BoundFrags.shape,-1)
mlength = 0
for r, i in enumerate(BoundFrags):
for c, j in enumerate(i[i!=0]):
try:
# Checks whether the Fragment can be found in the 59k Fragment Base
binding[r,c]=normalDF.index.get_loc(int(j))
except:
continue
temp = binding[r]
if temp[temp!=-1].shape[0] > mlength:
mlength = temp[temp!=-1].shape[0]
print(mlength) #Finds the maximum number of Fragments per environment -> 705
indices = np.empty(binding.shape[0])
red_binding = np.full((binding.shape[0], mlength), -1)
for j, i in enumerate(binding):
indices[j] = i[i!=-1].shape[0]
red_binding[j][:int(indices[j])] = i[i!=-1]
red_binding = np.delete(red_binding, np.where(indices==0), axis=0)
pickle.dump(red_binding, open("../%s/%s/%s.binding.mtr" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environments without binding Fragments
Features_all = pickle.load(open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "rb"))
Features_all = np.delete(Features_all, np.where(indices==0), axis=0)
pickle.dump(Features_all, open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environment annotiation without binding fragments
with open("../%s/%s/%s.Homogenised.annotation.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "r+") as f:
lines = f.readlines()
for i in np.where(indices==0)[0][::-1]:
del lines[i]
f.seek(0)
f.truncate()
f.writelines(lines)
| [
"argparse.ArgumentParser",
"numpy.where",
"numpy.empty",
"numpy.full",
"numpy.loadtxt"
]
| [((281, 322), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Build Files"""'}), "(description='Build Files')\n", (295, 322), False, 'from argparse import ArgumentParser\n'), ((572, 706), 'numpy.loadtxt', 'np.loadtxt', (["('../%s/%s/%s.Homogenised.boundfrags_zeros.txt' % (args.datadir, args.\n envNewAcronym, args.envNewAcronym))"], {'delimiter': '""","""'}), "('../%s/%s/%s.Homogenised.boundfrags_zeros.txt' % (args.datadir,\n args.envNewAcronym, args.envNewAcronym), delimiter=',')\n", (582, 706), True, 'import numpy as np\n'), ((788, 817), 'numpy.full', 'np.full', (['BoundFrags.shape', '(-1)'], {}), '(BoundFrags.shape, -1)\n', (795, 817), True, 'import numpy as np\n'), ((1296, 1322), 'numpy.empty', 'np.empty', (['binding.shape[0]'], {}), '(binding.shape[0])\n', (1304, 1322), True, 'import numpy as np\n'), ((1337, 1377), 'numpy.full', 'np.full', (['(binding.shape[0], mlength)', '(-1)'], {}), '((binding.shape[0], mlength), -1)\n', (1344, 1377), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (1538, 1552), True, 'import numpy as np\n'), ((1908, 1930), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (1916, 1930), True, 'import numpy as np\n'), ((2294, 2316), 'numpy.where', 'np.where', (['(indices == 0)'], {}), '(indices == 0)\n', (2302, 2316), True, 'import numpy as np\n')] |
"""Tests for core.billing.
Run this test from the project root
$ nosetests core.tests.billing_tests
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import random
import math
from core.billing import get_call_cost
from core.billing import get_prefix_from_number
from core.billing import get_sms_cost
from core.billing import process_prices
from core.billing import round_to_billable_unit
from core.billing import round_up_to_nearest_100
from core import config_database
TARIFF = 100
class GetCostTest(unittest.TestCase):
"""Testing core.billing.get_call_cost."""
@classmethod
def setUpClass(cls):
# Setup the config db.
cls.config_db = config_database.ConfigDB()
cls.config_db['bts_secret'] = 'hokay'
cls.config_db['free_seconds'] = '5'
cls.config_db['billable_unit'] = '1'
# Setup some price data like what would be sent back from the cloud.
price_data = [
{
'directionality': 'off_network_send',
'prefix': '509',
'country_name': 'Haiti',
'country_code': 'HT',
'cost_to_subscriber_per_sms': 900,
'cost_to_subscriber_per_min': 1100,
'billable_unit': 1,
}, {
'directionality': 'off_network_send',
'prefix': '56',
'country_name': 'Chile',
'country_code': 'CL',
'cost_to_subscriber_per_sms': 1000,
'cost_to_subscriber_per_min': 800,
'billable_unit': 1,
}, {
'directionality': 'off_network_send',
'prefix': '63',
'country_name': 'Philippines',
'country_code': 'PH',
'cost_to_subscriber_per_sms': 100,
'cost_to_subscriber_per_min': 600,
'billable_unit': 30,
}, {
'directionality': 'off_network_receive',
'cost_to_subscriber_per_sms': 200,
'cost_to_subscriber_per_min': 100,
'billable_unit': 1,
}, {
'directionality': 'on_network_send',
'cost_to_subscriber_per_sms': 400,
'cost_to_subscriber_per_min': 300,
'billable_unit': 1,
}, {
'directionality': 'on_network_receive',
'cost_to_subscriber_per_sms': 500,
'cost_to_subscriber_per_min': 200,
'billable_unit': 1,
}
]
# Populate the config db with prices
process_prices(price_data, cls.config_db)
def test_on_receive_call(self):
"""We can get the subscriber price for an on-network received call."""
billable_seconds = 170
# Recall that the expected cost is rounded to the nearest value of 100.
expected_cost = 600
self.assertEqual(expected_cost,
get_call_cost(billable_seconds, 'on_network_receive'))
def test_on_receive_sms(self):
"""We can get the subscriber price for an on-network received SMS."""
expected_cost = 500
self.assertEqual(expected_cost, get_sms_cost('on_network_receive'))
def test_off_receive_call(self):
"""We can get the subscriber price for an off-network received call."""
billable_seconds = 700
expected_cost = 1200
self.assertEqual(
expected_cost,
get_call_cost(billable_seconds, 'off_network_receive'))
def test_off_receive_sms(self):
"""We can get the subscriber price for an off-network received SMS."""
expected_cost = 200
self.assertEqual(expected_cost, get_sms_cost('off_network_receive'))
def test_on_send_call(self):
"""We can get the subscriber price for an on-network sent call."""
billable_seconds = 190
expected_cost = 1000
self.assertEqual(expected_cost,
get_call_cost(billable_seconds, 'on_network_send'))
def test_on_send_sms(self):
"""We can get the subscriber price for an on-network sent SMS."""
expected_cost = 400
self.assertEqual(expected_cost, get_sms_cost('on_network_send'))
def test_call_to_chile(self):
"""We can get the cost of a call to Chile."""
billable_seconds = 830
expected_cost = 11000
number = ''.join(['56', '1235554567'])
actual_cost = get_call_cost(billable_seconds, 'off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_sms_to_chile(self):
"""We can get the price to a subscriber of an SMS sent to Chile."""
expected_cost = 1000
number = ''.join(['56', '1235554567'])
actual_cost = get_sms_cost('off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_call_to_ph(self):
""" We bill for calls to PH correctly. """
billable_seconds = 70
expected_cost = 900
number = ''.join(['63', '5551234567'])
actual_cost = get_call_cost(billable_seconds, 'off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_nonexistent_prefix(self):
"""If the prefix doesn't exist, it's free.
The prefix price key might not exist if, say, the billing tier data
has not yet been loaded.
"""
expected_cost = 0
number = ''.join(['9999', '1235554567'])
actual_cost = get_sms_cost('off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
class GetPrefixFromNumberTest(unittest.TestCase):
"""Testing core.billing.get_prefix_from_number."""
@classmethod
def setUpClass(cls):
# Setup the config db.
cls.config_db = config_database.ConfigDB()
cls.config_db['bts_secret'] = 'yup'
# Load up some pricing data into the config db. We use this data to
# determine what prefixes are available.
# 2015dec9(shasan): This is a legacy billing response, lacking billable
# units. This also tests we can handle that case.
price_data = [
{
'directionality': 'off_network_send',
'prefix': '789',
'country_name': 'Ocenaia',
'country_code': 'OC',
'cost_to_subscriber_per_sms': 300,
'cost_to_subscriber_per_min': 20,
}, {
'directionality': 'off_network_send',
'prefix': '78',
'country_name': 'Eurasia',
'country_code': 'EU',
'cost_to_subscriber_per_sms': 400,
'cost_to_subscriber_per_min': 10,
}, {
'directionality': 'off_network_send',
'prefix': '7',
'country_name': 'Eastasia',
'country_code': 'EA',
'cost_to_subscriber_per_sms': 500,
'cost_to_subscriber_per_min': 30,
}, {
'directionality': 'off_network_send',
'prefix': '3',
'country_name': 'London',
'country_code': 'LN',
'cost_to_subscriber_per_sms': 5000,
'cost_to_subscriber_per_min': 3000,
}
]
# Populate the config db with prices
process_prices(price_data, cls.config_db)
def test_get_one_digit_prefix(self):
"""We can get a one digit prefix."""
number = ''.join(['7', '1235557890'])
self.assertEqual('7', get_prefix_from_number(number))
def test_get_two_digit_prefix(self):
"""We can get a two digit prefix."""
number = ''.join(['78', '1235557890'])
self.assertEqual('78', get_prefix_from_number(number))
def test_get_three_digit_prefix(self):
"""We can get a three digit prefix."""
number = ''.join(['789', '1235557890'])
self.assertEqual('789', get_prefix_from_number(number))
def test_get_one_digit_uncommon_prefix(self):
"""We can get a one digit uncommon prefix."""
number = ''.join(['3', '1235557890'])
self.assertEqual('3', get_prefix_from_number(number))
class RoundCostToBillableUnit(unittest.TestCase):
"""Testing core.billing.round_to_billable_unit."""
def test_billable_unit_rounding_sans_free_seconds(self):
for i in range(100):
billsec = random.randint(1, 5000)
expected_cost = int(billsec * (TARIFF / 60.0))
print('%s seconds should cost %s' % (billsec, expected_cost))
self.assertEqual(expected_cost,
round_to_billable_unit(billsec, TARIFF))
def test_billable_unit_rounding_with_free_seconds(self):
for i in range(100):
billsec = random.randint(100, 5000)
free = random.randint(1, 100)
expected_cost = int((billsec - free) * (TARIFF / 60.0))
print('%s seconds with %s free should cost %s' %
(billsec, free, expected_cost))
self.assertEqual(expected_cost,
round_to_billable_unit(billsec, TARIFF, free))
def test_billable_unit_rounding_with_units(self):
"""Test the "rows" of this table: (billsec, expected_cost)."""
tests = [
# base case
(0, 60, 0, 30, 0),
# call too short
(5, 60, 0, 30, 30),
# changing the units
(5, 60, 0, 60, 60),
# call slightly too long
(61, 60, 0, 60, 120),
# weird non-uniform per minute
(61, 72, 0, 30, 108),
# including free seconds
(61, 60, 10, 60, 60)
]
for test in tests:
billsec = test[0]
rate = test[1]
free = test[2]
unit = test[3]
expected_cost = test[4]
actual_cost = round_to_billable_unit(billsec, rate, free, unit)
print('%s sec with %s free and a unit of %s sec '
'expected cost %s, actual cost %s' %
(billsec, free, unit, expected_cost, actual_cost))
self.assertEqual(expected_cost, actual_cost)
class RoundCostUpToNearest100(unittest.TestCase):
"""Testing core.billing.round_up_to_nearest_100."""
def test_round_negatives(self):
# test negatives
for i in [-10000, -100, -1]:
self.assertEqual(0, round_up_to_nearest_100(i))
def test_round_positives(self):
for i in range(0, 5000):
self.assertEqual(int(math.ceil(i / float(100))) * 100,
round_up_to_nearest_100(i))
| [
"core.billing.get_call_cost",
"core.billing.get_prefix_from_number",
"core.billing.get_sms_cost",
"core.billing.process_prices",
"core.billing.round_up_to_nearest_100",
"core.config_database.ConfigDB",
"core.billing.round_to_billable_unit",
"random.randint"
]
| [((1081, 1107), 'core.config_database.ConfigDB', 'config_database.ConfigDB', ([], {}), '()\n', (1105, 1107), False, 'from core import config_database\n'), ((3018, 3059), 'core.billing.process_prices', 'process_prices', (['price_data', 'cls.config_db'], {}), '(price_data, cls.config_db)\n', (3032, 3059), False, 'from core.billing import process_prices\n'), ((4886, 4964), 'core.billing.get_call_cost', 'get_call_cost', (['billable_seconds', '"""off_network_send"""'], {'destination_number': 'number'}), "(billable_seconds, 'off_network_send', destination_number=number)\n", (4899, 4964), False, 'from core.billing import get_call_cost\n'), ((5262, 5321), 'core.billing.get_sms_cost', 'get_sms_cost', (['"""off_network_send"""'], {'destination_number': 'number'}), "('off_network_send', destination_number=number)\n", (5274, 5321), False, 'from core.billing import get_sms_cost\n'), ((5620, 5698), 'core.billing.get_call_cost', 'get_call_cost', (['billable_seconds', '"""off_network_send"""'], {'destination_number': 'number'}), "(billable_seconds, 'off_network_send', destination_number=number)\n", (5633, 5698), False, 'from core.billing import get_call_cost\n'), ((6098, 6157), 'core.billing.get_sms_cost', 'get_sms_cost', (['"""off_network_send"""'], {'destination_number': 'number'}), "('off_network_send', destination_number=number)\n", (6110, 6157), False, 'from core.billing import get_sms_cost\n'), ((6450, 6476), 'core.config_database.ConfigDB', 'config_database.ConfigDB', ([], {}), '()\n', (6474, 6476), False, 'from core import config_database\n'), ((8024, 8065), 'core.billing.process_prices', 'process_prices', (['price_data', 'cls.config_db'], {}), '(price_data, cls.config_db)\n', (8038, 8065), False, 'from core.billing import process_prices\n'), ((3380, 3433), 'core.billing.get_call_cost', 'get_call_cost', (['billable_seconds', '"""on_network_receive"""'], {}), "(billable_seconds, 'on_network_receive')\n", (3393, 3433), False, 'from core.billing import get_call_cost\n'), ((3617, 3651), 'core.billing.get_sms_cost', 'get_sms_cost', (['"""on_network_receive"""'], {}), "('on_network_receive')\n", (3629, 3651), False, 'from core.billing import get_sms_cost\n'), ((3896, 3950), 'core.billing.get_call_cost', 'get_call_cost', (['billable_seconds', '"""off_network_receive"""'], {}), "(billable_seconds, 'off_network_receive')\n", (3909, 3950), False, 'from core.billing import get_call_cost\n'), ((4136, 4171), 'core.billing.get_sms_cost', 'get_sms_cost', (['"""off_network_receive"""'], {}), "('off_network_receive')\n", (4148, 4171), False, 'from core.billing import get_sms_cost\n'), ((4407, 4457), 'core.billing.get_call_cost', 'get_call_cost', (['billable_seconds', '"""on_network_send"""'], {}), "(billable_seconds, 'on_network_send')\n", (4420, 4457), False, 'from core.billing import get_call_cost\n'), ((4634, 4665), 'core.billing.get_sms_cost', 'get_sms_cost', (['"""on_network_send"""'], {}), "('on_network_send')\n", (4646, 4665), False, 'from core.billing import get_sms_cost\n'), ((8229, 8259), 'core.billing.get_prefix_from_number', 'get_prefix_from_number', (['number'], {}), '(number)\n', (8251, 8259), False, 'from core.billing import get_prefix_from_number\n'), ((8426, 8456), 'core.billing.get_prefix_from_number', 'get_prefix_from_number', (['number'], {}), '(number)\n', (8448, 8456), False, 'from core.billing import get_prefix_from_number\n'), ((8629, 8659), 'core.billing.get_prefix_from_number', 'get_prefix_from_number', (['number'], {}), '(number)\n', (8651, 8659), False, 'from core.billing import get_prefix_from_number\n'), ((8842, 8872), 'core.billing.get_prefix_from_number', 'get_prefix_from_number', (['number'], {}), '(number)\n', (8864, 8872), False, 'from core.billing import get_prefix_from_number\n'), ((9094, 9117), 'random.randint', 'random.randint', (['(1)', '(5000)'], {}), '(1, 5000)\n', (9108, 9117), False, 'import random\n'), ((9478, 9503), 'random.randint', 'random.randint', (['(100)', '(5000)'], {}), '(100, 5000)\n', (9492, 9503), False, 'import random\n'), ((9523, 9545), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (9537, 9545), False, 'import random\n'), ((10598, 10647), 'core.billing.round_to_billable_unit', 'round_to_billable_unit', (['billsec', 'rate', 'free', 'unit'], {}), '(billsec, rate, free, unit)\n', (10620, 10647), False, 'from core.billing import round_to_billable_unit\n'), ((9324, 9363), 'core.billing.round_to_billable_unit', 'round_to_billable_unit', (['billsec', 'TARIFF'], {}), '(billsec, TARIFF)\n', (9346, 9363), False, 'from core.billing import round_to_billable_unit\n'), ((9798, 9843), 'core.billing.round_to_billable_unit', 'round_to_billable_unit', (['billsec', 'TARIFF', 'free'], {}), '(billsec, TARIFF, free)\n', (9820, 9843), False, 'from core.billing import round_to_billable_unit\n'), ((11130, 11156), 'core.billing.round_up_to_nearest_100', 'round_up_to_nearest_100', (['i'], {}), '(i)\n', (11153, 11156), False, 'from core.billing import round_up_to_nearest_100\n'), ((11324, 11350), 'core.billing.round_up_to_nearest_100', 'round_up_to_nearest_100', (['i'], {}), '(i)\n', (11347, 11350), False, 'from core.billing import round_up_to_nearest_100\n')] |
from django import forms
from django.forms import ModelForm
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from .choices import ActionChoice
from .choices import StatusApproval
from .models import GreencheckIp
from .models import GreencheckIpApprove
from .models import GreencheckASN, GreencheckASNapprove
User = get_user_model()
class ApprovalMixin:
ApprovalModel = None
def _save_approval(self):
"""
Save the approval request, be it an IP Range or an AS Network
from a
"""
if self.ApprovalModel is None:
raise NotImplementedError("Approval model missing")
model_name = self.ApprovalModel._meta.model_name
if not self.cleaned_data["is_staff"]:
hosting_provider = self.instance.hostingprovider
# changed here represents an
action = ActionChoice.update if self.changed else ActionChoice.new
status = StatusApproval.update if self.changed else StatusApproval.new
kwargs = {
"action": action,
"status": status,
"hostingprovider": hosting_provider,
}
if model_name == "greencheckasnapprove":
self.instance = GreencheckASNapprove(asn=self.instance.asn, **kwargs)
else:
self.instance = GreencheckIpApprove(
ip_end=self.instance.ip_end,
ip_start=self.instance.ip_start,
**kwargs
)
hosting_provider.mark_as_pending_review(self.instance)
def clean_is_staff(self):
try:
# when using this form `is_staff` should always be available
# or else something has gone wrong...
return self.data["is_staff"]
except KeyError:
raise ValidationError("Alert staff: a bug has occurred.")
class GreencheckAsnForm(ModelForm, ApprovalMixin):
ApprovalModel = GreencheckASNapprove
is_staff = forms.BooleanField(
label="user_is_staff", required=False, widget=forms.HiddenInput()
)
class Meta:
model = GreencheckASN
fields = (
"active",
"asn",
)
def save(self, commit=True):
self._save_approval()
return super().save(commit=True)
class GreencheckIpForm(ModelForm, ApprovalMixin):
"""This form is meant for admin
If a non staff user fills in the form it would return
an unsaved approval record instead of greencheckip record
"""
ApprovalModel = GreencheckIpApprove
is_staff = forms.BooleanField(
label="user_is_staff", required=False, widget=forms.HiddenInput()
)
class Meta:
model = GreencheckIp
fields = (
"active",
"ip_start",
"ip_end",
)
def save(self, commit=True):
"""
If a non-staff user creates an ip, instead of saving
the ip record directly, it will save an approval record.
Only when it has been approved the record will actually
be created.
So we return an approval instance instead of Greencheck instance
which in turn will get saved a bit later.
"""
self._save_approval()
return super().save(commit=commit)
class GreencheckAsnApprovalForm(ModelForm):
class Meta:
model = GreencheckASNapprove
fields = ("action", "asn", "status")
def save(self, commit=True):
instance = self.instance.greencheck_asn
if commit is True:
if instance:
instance.asn = self.instance.asn
instance.save()
else:
instance = GreencheckASN.objects.create(
active=True,
asn=self.instance.asn,
hostingprovider=self.instance.hostingprovider,
)
self.instance.greencheck_asn = instance
return super().save(commit=commit)
class GreecheckIpApprovalForm(ModelForm):
field_order = ("ip_start", "ip_end")
class Meta:
model = GreencheckIpApprove
fields = "__all__"
def save(self, commit=True):
ip_instance = self.instance.greencheck_ip
if commit is True:
if ip_instance:
ip_instance.ip_end = self.instance.ip_end
ip_instance.ip_end = self.instance.ip_start
ip_instance.save()
else:
ip_instance = GreencheckIp.objects.create(
active=True,
ip_end=self.instance.ip_end,
ip_start=self.instance.ip_start,
hostingprovider=self.instance.hostingprovider,
)
self.instance.greencheck_ip = ip_instance
return super().save(commit=commit)
| [
"django.forms.HiddenInput",
"django.contrib.auth.get_user_model",
"django.core.exceptions.ValidationError"
]
| [((366, 382), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (380, 382), False, 'from django.contrib.auth import get_user_model\n'), ((2116, 2135), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (2133, 2135), False, 'from django import forms\n'), ((2711, 2730), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (2728, 2730), False, 'from django import forms\n'), ((1880, 1931), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Alert staff: a bug has occurred."""'], {}), "('Alert staff: a bug has occurred.')\n", (1895, 1931), False, 'from django.core.exceptions import ValidationError\n')] |
#!/usr/bin/env python
# -*-coding:utf-8-*-
from tld import get_tld
__author__ = "<NAME>"
def get_domain(url):
'''
获取url中的全域名
:param url:
:return:
'''
res = get_tld(url, as_object=True)
return "{}.{}".format(res.subdomain, res.tld) | [
"tld.get_tld"
]
| [((182, 210), 'tld.get_tld', 'get_tld', (['url'], {'as_object': '(True)'}), '(url, as_object=True)\n', (189, 210), False, 'from tld import get_tld\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# Copyright © 2017-2019, GoodData Corporation. All rights reserved.
"""
FreeIPA Manager - entity module
Object representations of the entities configured in FreeIPA.
"""
import os
import re
import voluptuous
import yaml
from abc import ABCMeta, abstractproperty
import schemas
from command import Command
from core import FreeIPAManagerCore
from errors import ConfigError, ManagerError, IntegrityError
class FreeIPAEntity(FreeIPAManagerCore):
"""
General FreeIPA entity (user, group etc.) representation.
Can only be used via subclasses, not directly.
"""
__metaclass__ = ABCMeta
entity_id_type = 'cn' # entity name identificator in FreeIPA
key_mapping = {} # attribute name mapping between local config and FreeIPA
ignored = [] # list of ignored entities for each entity type
allowed_members = []
def __init__(self, name, data, path=None):
"""
:param str name: entity name (user login, group name etc.)
:param dict data: dictionary of entity configuration values
:param str path: path to file the entity was parsed from;
if None, indicates creation of entity from FreeIPA
"""
super(FreeIPAEntity, self).__init__()
if not data: # may be None; we want to ensure dictionary
data = dict()
self.name = name
self.path = path
self.metaparams = data.pop('metaparams', dict())
if self.path: # created from local config
try:
self.validation_schema(data)
except voluptuous.Error as e:
raise ConfigError('Error validating %s: %s' % (name, e))
if not path.endswith('.yaml'): # created from template tool
path, name = os.path.split(self.path)
self.path = '%s.yaml' % os.path.join(
path, name.replace('-', '_'))
self.data_ipa = self._convert_to_ipa(data)
self.data_repo = data
else: # created from FreeIPA
self.data_ipa = data
self.data_repo = self._convert_to_repo(data)
def _convert_to_ipa(self, data):
"""
Convert entity data to IPA format.
:param dict data: entity data in repository format
:returns: dictionary of data in IPA format
:rtype: dict
"""
result = dict()
for key, value in data.iteritems():
new_key = self.key_mapping.get(key, key).lower()
if new_key == 'memberof':
self._check_memberof(value)
result[new_key] = value
elif isinstance(value, bool):
result[new_key] = value
elif isinstance(value, list):
result[new_key] = tuple(unicode(i) for i in value)
else:
result[new_key] = (unicode(value),)
return result
def _convert_to_repo(self, data):
"""
Convert entity data to repo format.
:param dict data: entity data in IPA format
:returns: dictionary of data in repository format
:rtype: dict
"""
result = dict()
for attr in self.managed_attributes_pull:
if attr.lower() in data:
key = attr
# find reverse (IPA -> repo) attribute name mapping
for k, v in self.key_mapping.iteritems():
if v == attr:
key = k
break
value = data[attr.lower()]
if isinstance(value, tuple):
if len(value) > 1:
result[key] = list(value)
else:
result[key] = value[0]
else:
result[key] = value
return result
def _check_memberof(self, member_of):
for entity_type in member_of:
try:
self.get_entity_class(entity_type)
except KeyError:
raise ConfigError(
'Cannot be a member of non-existent entity type %s'
% entity_type)
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order
to sync entity with its FreeIPA counterpart.
:param FreeIPAEntity remote_entity: remote entity
:returns: list of Command objects to execute
:rtype: list(Command)
"""
diff = dict()
for key in self.managed_attributes_push:
local_value = self.data_ipa.get(key.lower(), ())
if not remote_entity:
if local_value:
diff[key.lower()] = local_value
else:
remote_value = remote_entity.data_ipa.get(key.lower(), ())
if sorted(local_value) != sorted(remote_value):
diff[key.lower()] = local_value
if diff or not remote_entity: # create entity even without params
if remote_entity: # modify existing entity
command = '%s_mod' % self.entity_name
else: # add new entity
command = '%s_add' % self.entity_name
return [Command(command, diff, self.name, self.entity_id_type)]
return []
def update_repo_data(self, additional):
"""
Update repo-format data with additional attributes.
Used for adding membership attributes to data.
:param dict additional: dictionary to update entity data with
:rtype: None
"""
self.data_repo.update(additional or {})
def normalize(self):
"""
Re-structure entity's data in such a way that it can be stored
into the configuration file in a normalized format. This is used
when round-trip loading and saving a configuration.
"""
memberof = self.data_repo.pop('memberOf', None)
if memberof:
for target_type, target_list in memberof.iteritems():
memberof[target_type] = sorted(target_list)
self.data_repo['memberOf'] = memberof
def write_to_file(self):
if not self.path:
raise ManagerError(
'%s has no file path, nowhere to write.' % repr(self))
if self.metaparams:
self.data_repo.update({'metaparams': self.metaparams})
# don't write default attributes into file
for key in self.default_attributes:
self.data_repo.pop(key, None)
try:
with open(self.path, 'w') as target:
data = {self.name: self.data_repo or None}
yaml.dump(data, stream=target, Dumper=EntityDumper,
default_flow_style=False, explicit_start=True)
self.lg.debug('%s written to file', repr(self))
except (IOError, OSError, yaml.YAMLError) as e:
raise ConfigError(
'Cannot write %s to %s: %s' % (repr(self), self.path, e))
def delete_file(self):
if not self.path:
raise ManagerError(
'%s has no file path, cannot delete.' % repr(self))
try:
os.unlink(self.path)
self.lg.debug('%s config file deleted', repr(self))
except OSError as e:
raise ConfigError(
'Cannot delete %s at %s: %s' % (repr(self), self.path, e))
@staticmethod
def get_entity_class(name):
for entity_class in [
FreeIPAHBACRule, FreeIPAHBACService,
FreeIPAHBACServiceGroup, FreeIPAHostGroup, FreeIPAPermission,
FreeIPAPrivilege, FreeIPARole, FreeIPAService,
FreeIPASudoRule, FreeIPAUser, FreeIPAUserGroup]:
if entity_class.entity_name == name:
return entity_class
raise KeyError(name)
@abstractproperty
def validation_schema(self):
"""
:returns: entity validation schema
:rtype: voluptuous.Schema
"""
@abstractproperty
def managed_attributes_push(self):
"""
Return a list of properties that are managed for given entity type
when pushing configuration from local repo to FreeIPA.
NOTE: the list should NOT include attributes that are managed via
separate commands, like memberOf/memberHost/memberUser or ipasudoopt.
:returns: list of entity's managed attributes
:rtype: list(str)
"""
@property
def managed_attributes_pull(self):
"""
Return a list of properties that are managed for given entity type.
when pulling configuration from FreeIPA to local repository.
:returns: list of entity's managed attributes
:rtype: list(str)
"""
return self.managed_attributes_push
@property
def default_attributes(self):
"""
Return a list of default attributes for each entity of the given type.
These attributes will not be written into the YAML file when pulling.
:returns: list of entity's attributes that have single default value
:rtype: list(str)
"""
return []
def __repr__(self):
return '%s %s' % (self.entity_name, self.name)
def __str__(self):
return self.name
def __eq__(self, other):
return type(self) is type(other) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return self.name > other.name
def __lt__(self, other):
return self.name < other.name
class FreeIPAGroup(FreeIPAEntity):
"""Abstract representation a FreeIPA group entity (host/user group)."""
managed_attributes_push = ['description']
@abstractproperty
def allowed_members(self):
"""
:returns: list of entity types that can be members of this entity
:rtype: list(FreeIPAEntity)
"""
class FreeIPAHostGroup(FreeIPAGroup):
"""Representation of a FreeIPA host group entity."""
entity_name = 'hostgroup'
allowed_members = ['hostgroup']
validation_schema = voluptuous.Schema(schemas.schema_hostgroups)
class FreeIPAUserGroup(FreeIPAGroup):
"""Representation of a FreeIPA user group entity."""
entity_name = 'group'
managed_attributes_pull = ['description', 'posix']
allowed_members = ['user', 'group']
validation_schema = voluptuous.Schema(schemas.schema_usergroups)
def __init__(self, name, data, path=None):
"""
:param str name: entity name (user login, group name etc.)
:param dict data: dictionary of entity configuration values
:param str path: path to file the entity was parsed from;
if None, indicates creation of entity from FreeIPA
"""
if not path: # entity created from FreeIPA, not from config
data['posix'] = u'posixgroup' in data.get(u'objectclass', [])
super(FreeIPAUserGroup, self).__init__(name, data, path)
self.posix = self.data_repo.get('posix', True)
def can_contain_users(self, pattern):
"""
Check whether the group can contain users directly.
If the pattern is None, no restrictions are applied.
:param str pattern: regex to check name by (not enforced if empty)
"""
return not pattern or re.match(pattern, self.name)
def cannot_contain_users(self, pattern):
"""
Check whether the group can not contain users directly.
Used for determining if the group can be a member of a sudo/HBAC rule.
If the pattern is None, no restrictions are applied.
:param str pattern: regex to check name by (not enforced if empty)
"""
return not pattern or not re.match(pattern, self.name)
def _process_posix_setting(self, remote_entity):
posix_diff = dict()
description = None
if remote_entity:
if self.posix and not remote_entity.posix:
posix_diff = {u'posix': True}
description = 'group_mod %s (make POSIX)' % self.name
elif not self.posix and remote_entity.posix:
posix_diff = {'setattr': (u'gidnumber=',),
'delattr': (u'objectclass=posixgroup',)}
description = 'group_mod %s (make non-POSIX)' % self.name
elif not self.posix: # creation of new non-POSIX group
posix_diff = {u'nonposix': True}
return (posix_diff, description)
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation with POSIX/non-POSIX setting.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
commands = super(FreeIPAUserGroup, self).create_commands(remote_entity)
posix_diff, description = self._process_posix_setting(remote_entity)
if posix_diff:
if not commands: # no diff but POSIX setting, new command needed
cmd = Command('group_mod', posix_diff,
self.name, self.entity_id_type)
cmd.description = description
return [cmd]
else: # update POSIX setting as part of existing command
commands[0].update(posix_diff)
return commands
class FreeIPAUser(FreeIPAEntity):
"""Representation of a FreeIPA user entity."""
entity_name = 'user'
entity_id_type = 'uid'
managed_attributes_push = ['givenName', 'sn', 'initials', 'mail',
'ou', 'manager', 'carLicense', 'title']
key_mapping = {
'emailAddress': 'mail',
'firstName': 'givenName',
'lastName': 'sn',
'organizationUnit': 'ou',
'githubLogin': 'carLicense'
}
validation_schema = voluptuous.Schema(schemas.schema_users)
class FreeIPARule(FreeIPAEntity):
"""Abstract class covering HBAC and sudo rules."""
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation
to account for adding/removing rule members.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
result = super(FreeIPARule, self).create_commands(remote_entity)
result.extend(self._process_rule_membership(remote_entity))
return result
def _process_rule_membership(self, remote_entity):
"""
Prepare a command for a hbac/sudo rule membership update.
If the rule previously had any members, these are removed
as a rule can only have one usergroup and one hostgroup as members.
:param FreeIPArule remote_entity: remote entity data (may be None)
"""
commands = []
for key, member_type, cmd_key in (
('memberhost', 'hostgroup', 'host'),
('memberuser', 'group', 'user'),
('memberservice', 'hbacsvc', 'service')):
local_members = set(self.data_ipa.get(key, []))
if remote_entity:
search_key = '%s_%s' % (key, member_type)
remote_members = set(
remote_entity.data_ipa.get(search_key, []))
else:
remote_members = set()
command = '%s_add_%s' % (self.entity_name, cmd_key)
for member in local_members - remote_members:
diff = {member_type: member}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
command = '%s_remove_%s' % (self.entity_name, cmd_key)
for member in remote_members - local_members:
diff = {member_type: member}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
return commands
class FreeIPAHBACRule(FreeIPARule):
"""Representation of a FreeIPA HBAC (host-based access control) rule."""
entity_name = 'hbacrule'
default_attributes = ['serviceCategory']
managed_attributes_push = ['description', 'serviceCategory']
validation_schema = voluptuous.Schema(schemas.schema_hbac)
def __init__(self, name, data, path=None):
"""
Create a HBAC rule instance.
This override is needed to set the servicecat parameter.
"""
if path: # only edit local entities
if not data: # may be None; we want to ensure dictionary
data = dict()
if 'memberService' not in data:
data.update({'serviceCategory': 'all'})
elif 'serviceCategory' in data:
raise IntegrityError(
'%s cannot contain both memberService and serviceCategory'
% name)
super(FreeIPAHBACRule, self).__init__(name, data, path)
class FreeIPASudoRule(FreeIPARule):
"""Representation of a FreeIPA sudo rule."""
entity_name = 'sudorule'
default_attributes = [
'cmdCategory', 'options', 'runAsGroupCategory', 'runAsUserCategory']
managed_attributes_push = [
'cmdCategory', 'description',
'ipaSudoRunAsGroupCategory', 'ipaSudoRunAsUserCategory']
managed_attributes_pull = managed_attributes_push + ['ipaSudoOpt']
key_mapping = {
'options': 'ipaSudoOpt',
'runAsGroupCategory': 'ipaSudoRunAsGroupCategory',
'runAsUserCategory': 'ipaSudoRunAsUserCategory'
}
validation_schema = voluptuous.Schema(schemas.schema_sudo)
def __init__(self, name, data, path=None):
"""
Create a sudorule instance.
This override is needed to set the options & runAs params.
"""
if path: # only edit local entities
if not data: # may be None; we want to ensure dictionary
data = dict()
data.update({'options': ['!authenticate', '!requiretty'],
'cmdCategory': 'all',
'runAsUserCategory': 'all',
'runAsGroupCategory': 'all'})
super(FreeIPASudoRule, self).__init__(name, data, path)
def _convert_to_repo(self, data):
result = super(FreeIPASudoRule, self)._convert_to_repo(data)
if isinstance(result.get('options'), unicode):
result['options'] = [result['options']]
return result
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation with sudorule option update.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
result = super(FreeIPASudoRule, self).create_commands(remote_entity)
result.extend(self._parse_sudo_options(remote_entity))
return result
def _parse_sudo_options(self, remote_entity):
"""
Prepare commands for sudo rule options update. This includes
deletion of old options that are no longer in configuration
as well as addition of new options.
:param dict remote_entity: remote entity data (can be None)
:returns: list of sudorule option update commands to execute
:rtype: list(Command)
"""
commands = []
local_options = set(self.data_repo.get('options', []))
if remote_entity:
remote_options = set(remote_entity.data_ipa.get('ipasudoopt', []))
else:
remote_options = set()
command = 'sudorule_add_option'
for opt in local_options - remote_options:
diff = {'ipasudoopt': [opt]}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
command = 'sudorule_remove_option'
for opt in remote_options - local_options:
diff = {'ipasudoopt': [opt]}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
return commands
class FreeIPAHBACService(FreeIPAEntity):
"""Entity to hold the info about FreeIPA HBACServices"""
entity_name = 'hbacsvc'
managed_attributes_push = ['description']
managed_attributes_pull = managed_attributes_push
validation_schema = voluptuous.Schema(schemas.schema_hbacservices)
class FreeIPAHBACServiceGroup(FreeIPAEntity):
"""Entity to hold the info about FreeIPA HBACServiceGroups"""
entity_name = 'hbacsvcgroup'
managed_attributes_push = ['description']
managed_attributes_pull = managed_attributes_push
allowed_members = ['hbacsvc']
validation_schema = voluptuous.Schema(schemas.schema_hbacsvcgroups)
class FreeIPARole(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Roles"""
entity_name = 'role'
managed_attributes_pull = ['description']
managed_attributes_push = managed_attributes_pull
allowed_members = ['user', 'group', 'service', 'hostgroup']
validation_schema = voluptuous.Schema(schemas.schema_roles)
class FreeIPAPrivilege(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Privilege"""
entity_name = 'privilege'
managed_attributes_pull = ['description']
managed_attributes_push = managed_attributes_pull
allowed_members = ['role']
validation_schema = voluptuous.Schema(schemas.schema_privileges)
class FreeIPAPermission(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Permission"""
entity_name = 'permission'
managed_attributes_pull = ['description', 'subtree', 'attrs',
'ipapermlocation', 'ipapermright',
'ipapermdefaultattr']
managed_attributes_push = managed_attributes_pull
key_mapping = {
'grantedRights': 'ipapermright',
'attributes': 'attrs',
'location': 'ipapermlocation',
'defaultAttr': 'ipapermdefaultattr'
}
allowed_members = ['privilege']
validation_schema = voluptuous.Schema(schemas.schema_permissions)
class FreeIPAService(FreeIPAEntity):
"""
Entity to hold the info about FreeIPA Services
PUSH NOT SUPPORTED yet
"""
entity_name = 'service'
entity_id_type = 'krbcanonicalname'
managed_attributes_push = [] # Empty because we don't support push
managed_attributes_pull = ['managedby_host', 'description']
key_mapping = {
'managedBy': 'managedby_host',
}
validation_schema = voluptuous.Schema(schemas.schema_services)
def write_to_file(self):
"""
Converts the file name format from xyz/hostname.int.na.intgdc.com
to xyz-hostname_int_na_intgdc_com.yaml
"""
path, file_name = os.path.split(self.path)
service_name, _ = file_name.split('@')
self.path = ('%s-%s.yaml' % (path, service_name.replace('.', '_')))
super(FreeIPAService, self).write_to_file()
class EntityDumper(yaml.SafeDumper):
"""YAML dumper subclass used to fix under-indent of lists when dumping."""
def __init__(self, *args, **kwargs):
super(EntityDumper, self).__init__(*args, **kwargs)
self.add_representer(type(None), self._none_representer())
def increase_indent(self, flow=False, indentless=False):
return super(EntityDumper, self).increase_indent(flow, False)
def _none_representer(self):
"""
Enable correct representation of empty values in config
by representing None as empty string instead of 'null'.
"""
def representer(dumper, value):
return dumper.represent_scalar(u'tag:yaml.org,2002:null', '')
return representer
| [
"yaml.dump",
"command.Command",
"re.match",
"voluptuous.Schema",
"os.path.split",
"os.unlink",
"errors.IntegrityError",
"errors.ConfigError"
]
| [((10197, 10241), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_hostgroups'], {}), '(schemas.schema_hostgroups)\n', (10214, 10241), False, 'import voluptuous\n'), ((10484, 10528), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_usergroups'], {}), '(schemas.schema_usergroups)\n', (10501, 10528), False, 'import voluptuous\n'), ((14016, 14055), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_users'], {}), '(schemas.schema_users)\n', (14033, 14055), False, 'import voluptuous\n'), ((16426, 16464), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_hbac'], {}), '(schemas.schema_hbac)\n', (16443, 16464), False, 'import voluptuous\n'), ((17761, 17799), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_sudo'], {}), '(schemas.schema_sudo)\n', (17778, 17799), False, 'import voluptuous\n'), ((20559, 20605), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_hbacservices'], {}), '(schemas.schema_hbacservices)\n', (20576, 20605), False, 'import voluptuous\n'), ((20911, 20958), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_hbacsvcgroups'], {}), '(schemas.schema_hbacsvcgroups)\n', (20928, 20958), False, 'import voluptuous\n'), ((21262, 21301), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_roles'], {}), '(schemas.schema_roles)\n', (21279, 21301), False, 'import voluptuous\n'), ((21586, 21630), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_privileges'], {}), '(schemas.schema_privileges)\n', (21603, 21630), False, 'import voluptuous\n'), ((22243, 22288), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_permissions'], {}), '(schemas.schema_permissions)\n', (22260, 22288), False, 'import voluptuous\n'), ((22715, 22757), 'voluptuous.Schema', 'voluptuous.Schema', (['schemas.schema_services'], {}), '(schemas.schema_services)\n', (22732, 22757), False, 'import voluptuous\n'), ((22959, 22983), 'os.path.split', 'os.path.split', (['self.path'], {}), '(self.path)\n', (22972, 22983), False, 'import os\n'), ((7249, 7269), 'os.unlink', 'os.unlink', (['self.path'], {}), '(self.path)\n', (7258, 7269), False, 'import os\n'), ((11434, 11462), 're.match', 're.match', (['pattern', 'self.name'], {}), '(pattern, self.name)\n', (11442, 11462), False, 'import re\n'), ((1852, 1876), 'os.path.split', 'os.path.split', (['self.path'], {}), '(self.path)\n', (1865, 1876), False, 'import os\n'), ((5288, 5342), 'command.Command', 'Command', (['command', 'diff', 'self.name', 'self.entity_id_type'], {}), '(command, diff, self.name, self.entity_id_type)\n', (5295, 5342), False, 'from command import Command\n'), ((6720, 6823), 'yaml.dump', 'yaml.dump', (['data'], {'stream': 'target', 'Dumper': 'EntityDumper', 'default_flow_style': '(False)', 'explicit_start': '(True)'}), '(data, stream=target, Dumper=EntityDumper, default_flow_style=\n False, explicit_start=True)\n', (6729, 6823), False, 'import yaml\n'), ((11846, 11874), 're.match', 're.match', (['pattern', 'self.name'], {}), '(pattern, self.name)\n', (11854, 11874), False, 'import re\n'), ((13213, 13277), 'command.Command', 'Command', (['"""group_mod"""', 'posix_diff', 'self.name', 'self.entity_id_type'], {}), "('group_mod', posix_diff, self.name, self.entity_id_type)\n", (13220, 13277), False, 'from command import Command\n'), ((19987, 20041), 'command.Command', 'Command', (['command', 'diff', 'self.name', 'self.entity_id_type'], {}), '(command, diff, self.name, self.entity_id_type)\n', (19994, 20041), False, 'from command import Command\n'), ((20223, 20277), 'command.Command', 'Command', (['command', 'diff', 'self.name', 'self.entity_id_type'], {}), '(command, diff, self.name, self.entity_id_type)\n', (20230, 20277), False, 'from command import Command\n'), ((1699, 1749), 'errors.ConfigError', 'ConfigError', (["('Error validating %s: %s' % (name, e))"], {}), "('Error validating %s: %s' % (name, e))\n", (1710, 1749), False, 'from errors import ConfigError, ManagerError, IntegrityError\n'), ((4100, 4178), 'errors.ConfigError', 'ConfigError', (["('Cannot be a member of non-existent entity type %s' % entity_type)"], {}), "('Cannot be a member of non-existent entity type %s' % entity_type)\n", (4111, 4178), False, 'from errors import ConfigError, ManagerError, IntegrityError\n'), ((15789, 15843), 'command.Command', 'Command', (['command', 'diff', 'self.name', 'self.entity_id_type'], {}), '(command, diff, self.name, self.entity_id_type)\n', (15796, 15843), False, 'from command import Command\n'), ((16068, 16122), 'command.Command', 'Command', (['command', 'diff', 'self.name', 'self.entity_id_type'], {}), '(command, diff, self.name, self.entity_id_type)\n', (16075, 16122), False, 'from command import Command\n'), ((16950, 17035), 'errors.IntegrityError', 'IntegrityError', (["('%s cannot contain both memberService and serviceCategory' % name)"], {}), "('%s cannot contain both memberService and serviceCategory' %\n name)\n", (16964, 17035), False, 'from errors import ConfigError, ManagerError, IntegrityError\n')] |
# pylint: skip-file
from athena_glue_service_logs.catalog_manager import BaseCatalogManager
def test_class_init(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
assert base_catalog.database_name == 'dbname'
assert base_catalog.s3_location == 's3://somewhere'
assert base_catalog.table_name == 'tablename'
def test_init_with_partitions(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=True)
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_database')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions')
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 0
BaseCatalogManager.create_table.assert_called_once()
BaseCatalogManager.create_partitions.assert_called_once_with(partition_list=['a', 'b', 'c'])
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=False)
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 1
| [
"athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table.assert_called_once",
"athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions.assert_called_once_with",
"athena_glue_service_logs.catalog_manager.BaseCatalogManager"
]
| [((216, 288), 'athena_glue_service_logs.catalog_manager.BaseCatalogManager', 'BaseCatalogManager', (['"""us-west-2"""', '"""dbname"""', '"""tablename"""', '"""s3://somewhere"""'], {}), "('us-west-2', 'dbname', 'tablename', 's3://somewhere')\n", (234, 288), False, 'from athena_glue_service_logs.catalog_manager import BaseCatalogManager\n'), ((985, 1057), 'athena_glue_service_logs.catalog_manager.BaseCatalogManager', 'BaseCatalogManager', (['"""us-west-2"""', '"""dbname"""', '"""tablename"""', '"""s3://somewhere"""'], {}), "('us-west-2', 'dbname', 'tablename', 's3://somewhere')\n", (1003, 1057), False, 'from athena_glue_service_logs.catalog_manager import BaseCatalogManager\n'), ((1186, 1238), 'athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table.assert_called_once', 'BaseCatalogManager.create_table.assert_called_once', ([], {}), '()\n', (1236, 1238), False, 'from athena_glue_service_logs.catalog_manager import BaseCatalogManager\n'), ((1243, 1340), 'athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions.assert_called_once_with', 'BaseCatalogManager.create_partitions.assert_called_once_with', ([], {'partition_list': "['a', 'b', 'c']"}), "(partition_list\n =['a', 'b', 'c'])\n", (1303, 1340), False, 'from athena_glue_service_logs.catalog_manager import BaseCatalogManager\n')] |
from twisted.internet import reactor
reactor.listenTCP(8789, factory)
reactor.run() | [
"twisted.internet.reactor.listenTCP",
"twisted.internet.reactor.run"
]
| [((37, 69), 'twisted.internet.reactor.listenTCP', 'reactor.listenTCP', (['(8789)', 'factory'], {}), '(8789, factory)\n', (54, 69), False, 'from twisted.internet import reactor\n'), ((70, 83), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (81, 83), False, 'from twisted.internet import reactor\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def visualize(dataframe, balltype):
df = dataframe
#Filter by balltype
res = df[df["pitch_type"] == balltype]
#Group by results
groups = res.groupby("description")
for name, group in groups:
if name == "miss":
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#9A9A9A", label=name)
else:
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#03A77F", label=name)
#Fixing the viewpoint of the plot
axes = plt.gca()
axes.set_xlim([-2.50,2.50])
axes.set_ylim([0.00,5.00])
#Setting strike zone
sz_top_avg = res["sz_top"].mean()
sz_bottom_avg = res["sz_bot"].mean()
sz_left = -0.85
sz_right = 0.85
#Drawing strike zone
plt.plot((sz_left, sz_right), (sz_top_avg, sz_top_avg), 'k-')
plt.plot((sz_left, sz_right), (sz_bottom_avg, sz_bottom_avg), 'k-')
plt.plot((sz_left, sz_left), (sz_top_avg, sz_bottom_avg), 'k-')
plt.plot((sz_right, sz_right), (sz_top_avg, sz_bottom_avg), 'k-')
#Setting labels
plt.xlabel("Horizontal Location")
plt.ylabel("Vertical Location")
plt.title(f"{player_name} 2018\n {ballname_dict.get(balltype, balltype)}")
plt.legend()
plt.show()
#Setting up Name and CSV location
player_name = "Put player name"
file_src = "Put target csv"
raw = pd.read_csv(file_src)
df = pd.DataFrame(raw)
#For filtering cases
replace_dict = {"description": {"hit_into_play_no_out": "contact", "hit_into_play": "contact", "hit_into_play_score": "contact", "swinging_strike": "miss", "swinging_strike_blocked": "miss"}}
ballname_dict = {"FF": "4-Seam Fastball", "CH": "Changeup", "CU": "Curveball", "SL": "Slider", "FT": "2-Seam Fastball", "AB": "Automatic Ball",
"AS": "Automatic Strike", "EP": "Eephus", "FC": "Cutter", "FO": "Forkball", "FS": "Splitter", "GY": "Gyroball", "IN": "Intentional Ball",
"KC": "Knuckle Curve", "NP": "No Pitch", "PO": "Pitchout", "SC": "Screwball", "SI": "Sinker", "UN": "Unknown"}
df = df.replace(replace_dict)
df = df[df["description"].isin(["contact", "miss"])]
for i in df["pitch_type"].unique():
visualize(df, i)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pandas.DataFrame",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((1524, 1545), 'pandas.read_csv', 'pd.read_csv', (['file_src'], {}), '(file_src)\n', (1535, 1545), True, 'import pandas as pd\n'), ((1552, 1569), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (1564, 1569), True, 'import pandas as pd\n'), ((665, 674), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (672, 674), True, 'import matplotlib.pyplot as plt\n'), ((924, 985), 'matplotlib.pyplot.plot', 'plt.plot', (['(sz_left, sz_right)', '(sz_top_avg, sz_top_avg)', '"""k-"""'], {}), "((sz_left, sz_right), (sz_top_avg, sz_top_avg), 'k-')\n", (932, 985), True, 'import matplotlib.pyplot as plt\n'), ((991, 1058), 'matplotlib.pyplot.plot', 'plt.plot', (['(sz_left, sz_right)', '(sz_bottom_avg, sz_bottom_avg)', '"""k-"""'], {}), "((sz_left, sz_right), (sz_bottom_avg, sz_bottom_avg), 'k-')\n", (999, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1127), 'matplotlib.pyplot.plot', 'plt.plot', (['(sz_left, sz_left)', '(sz_top_avg, sz_bottom_avg)', '"""k-"""'], {}), "((sz_left, sz_left), (sz_top_avg, sz_bottom_avg), 'k-')\n", (1072, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1198), 'matplotlib.pyplot.plot', 'plt.plot', (['(sz_right, sz_right)', '(sz_top_avg, sz_bottom_avg)', '"""k-"""'], {}), "((sz_right, sz_right), (sz_top_avg, sz_bottom_avg), 'k-')\n", (1141, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1260), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Horizontal Location"""'], {}), "('Horizontal Location')\n", (1237, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vertical Location"""'], {}), "('Vertical Location')\n", (1276, 1297), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1397), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1395, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1411, 1413), True, 'import matplotlib.pyplot as plt\n'), ((342, 463), 'matplotlib.pyplot.plot', 'plt.plot', (["group['plate_x']", "group['plate_z']"], {'marker': '"""o"""', 'linestyle': '""""""', 'color': '"""none"""', 'ms': '(3)', 'mec': '"""#9A9A9A"""', 'label': 'name'}), "(group['plate_x'], group['plate_z'], marker='o', linestyle='',\n color='none', ms=3, mec='#9A9A9A', label=name)\n", (350, 463), True, 'import matplotlib.pyplot as plt\n'), ((490, 611), 'matplotlib.pyplot.plot', 'plt.plot', (["group['plate_x']", "group['plate_z']"], {'marker': '"""o"""', 'linestyle': '""""""', 'color': '"""none"""', 'ms': '(3)', 'mec': '"""#03A77F"""', 'label': 'name'}), "(group['plate_x'], group['plate_z'], marker='o', linestyle='',\n color='none', ms=3, mec='#03A77F', label=name)\n", (498, 611), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 13:17:49 2019
@author: Toonw
"""
import numpy as np
def vlen(a):
return (a[0]**2 + a[1]**2)**0.5
def add(v1,v2):
return (v1[0]+v2[0], v1[1]+v2[1])
def sub(v1,v2):
return (v1[0]-v2[0], v1[1]-v2[1])
def unit_vector(v):
vu = v / np.linalg.norm(v)
return (vu[0], vu[1])
def angle_between(v1, v2):
angle = np.arccos(np.dot(v1,v2)/(vlen(v1)*vlen(v2)))
return angle
# Similarity measure of article
## https://pdfs.semanticscholar.org/60b5/aca20ba34d424f4236359bd5e6aa30487682.pdf
def sim_measure(A, B): # similarity between two shapes A and B
# print(A)
# print(B)
return 1 - (sum([(vlen(unit_vector(a))+vlen(unit_vector(b)))*angle_between(a,b) for a,b in zip(A,B)]))/(np.pi*(len(A)+len(B))) | [
"numpy.dot",
"numpy.linalg.norm"
]
| [((300, 317), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (314, 317), True, 'import numpy as np\n'), ((394, 408), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (400, 408), True, 'import numpy as np\n')] |
import unittest
#import tempfile
try:
from StringIO import StringIO
except:
from io import StringIO
import pyx12.error_handler
from pyx12.errors import EngineError # , X12PathError
import pyx12.x12context
import pyx12.params
from pyx12.test.x12testdata import datafiles
class X12fileTestCase(unittest.TestCase):
def setUp(self):
self.param = pyx12.params.params()
def _makeFd(self, x12str=None):
try:
if x12str:
fd = StringIO(x12str)
else:
fd = StringIO()
except:
if x12str:
fd = StringIO(x12str, encoding='ascii')
else:
fd = StringIO(encoding='ascii')
fd.seek(0)
return fd
class Delimiters(X12fileTestCase):
def test_arbitrary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, '!')
self.assertEqual(src.ele_term, '&')
self.assertEqual(src.seg_term, '+')
def test_binary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
str1 = str1.replace('&', chr(0x1C))
str1 = str1.replace('+', chr(0x1D))
str1 = str1.replace('!', chr(0x1E))
fd = self._makeFd(str1)
errors = []
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, chr(0x1E))
self.assertEqual(src.ele_term, chr(0x1C))
self.assertEqual(src.seg_term, chr(0x1D))
class TreeGetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_get_line_numbers_2200(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(self.loop2300.seg_count, 19)
self.assertEqual(self.loop2300.cur_line_number, 21)
for seg in loop2400.select('CLM'):
self.assertEqual(seg.seg_count, 25)
self.assertEqual(seg.cur_line_number, 2271)
break
def test_get_line_numbers_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.seg_count, 35)
self.assertEqual(loop2400.cur_line_number, 37)
for svc in loop2400.select('SV1'):
self.assertEqual(svc.seg_count, 36)
self.assertEqual(svc.cur_line_number, 38)
break
def test_get_seg_value(self):
self.assertEqual(self.loop2300.get_value('CLM02'), '21')
self.assertEqual(self.loop2300.get_value('CLM99'), None)
def test_get_seg_value_fail_no_element_index(self):
self.assertRaises(IndexError, self.loop2300.get_value, 'CLM')
def test_get_parent_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../CLM01'), '3215338')
self.assertEqual(loop2400.get_value('../2310B/NM109'), '222185735')
def test_get_seg_value_idx(self):
for clm in self.loop2300.select('CLM'):
self.assertEqual(clm.get_value('02'), '21')
self.assertEqual(clm.get_value('05-3'), '1')
def test_get_first_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV101'), 'HC:H2015:TT')
self.assertEqual(self.loop2300.get_value('2400/SV101-2'), 'H2015')
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/2430/SVD02'), '21')
self.assertEqual(self.loop2300.get_value('2400/AMT[AAE]02'), '21')
def test_get_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '21')
self.assertEqual(loop2400.get_value('2430/AMT[AAE]02'), None)
def test_get_no_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV199'), None)
self.assertEqual(self.loop2300.get_value('2400'), None)
def test_get_parent_no_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../2310E/NM109'), None)
def test_get_specific_qual(self):
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/REF[G1]02'), None)
self.assertEqual(self.loop2300.get_value('2400/REF[XX]02'), None)
class TreeSetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_set_seg_value(self):
self.loop2300.set_value('CLM02', '50')
self.assertEqual(self.loop2300.get_value('CLM02'), '50')
def test_set_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
loop2400.set_value('AMT[AAE]02', '25')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '25')
class TreeSelect(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
#def test_select_loop_and_parent(self):
# loop2400 = self.loop2300.first('2400')
# assert loop2400.id == '2400', 'Not in 2400'
# ct = 0
# newtree = loop2400.parent
# for newtree in loop2400.select('../'):
# self.assertEqual(newtree.id, '2300')
# ct += 1
# self.assertEqual(ct, 1)
def test_select_loops(self):
ct = 0
for newtree in self.loop2300.select('2400'):
self.assertEqual(newtree.id, '2400')
ct += 1
self.assertEqual(ct, 2)
def test_select_seg(self):
ct = 0
for newtree in self.loop2300.select('2400/SV1'):
self.assertEqual(newtree.id, 'SV1')
self.assertEqual(newtree.get_value('SV102'), '21')
ct += 1
self.assertEqual(ct, 2)
def test_select_parent_seg(self):
loop2400 = self.loop2300.first('2400')
assert loop2400.id == '2400', 'Not in 2400'
ct = 0
for newtree in loop2400.select('../CLM'):
self.assertEqual(newtree.id, 'CLM')
self.assertEqual(newtree.get_value('CLM01'), '3215338')
ct += 1
self.assertEqual(ct, 1)
def test_select_from_st(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'ST_LOOP':
for claim in datatree.select('DETAIL/2000/2100'):
self.assertEqual(claim.id, '2100')
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2100 loops. Should have %i' % (ct, 3))
def test_select_from_gs(self):
fd = self._makeFd(datafiles['simple_837i']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('GS_LOOP'):
if datatree.id == 'GS_LOOP':
for sub in datatree.select('ST_LOOP/DETAIL/2000A/2000B/2300/2400'):
self.assertEqual(sub.id, '2400')
ct += 1
self.assertEqual(
ct, 6, 'Found %i 2400 loops. Should have %i' % (ct, 6))
class TreeSelectFromSegment(X12fileTestCase):
def test_select_from_seg_fail(self):
fd = self._makeFd(datafiles['835id']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'GS':
#self.assertFalseRaises(AttributeError, datatree.select, 'DETAIL/2000/2100')
for claim in datatree.select('DETAIL/2000/2100'):
pass
class TreeAddSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
seg_data = pyx12.segment.Segment('REF*F5*6.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
seg_data = pyx12.segment.Segment('ZZZ*00~', '~', '*', ':')
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, seg_data)
class TreeAddSegmentString(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
new_node = self.loop2300.add_segment('HCP*00*7.11~')
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
new_node = self.loop2300.add_segment('REF*F5*6.11')
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, 'ZZZ*00~')
class SegmentExists(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_qual_segment(self):
self.assertTrue(self.loop2300.exists('2310B'))
self.assertTrue(self.loop2300.exists('2310B/NM1[82]'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_qual_segment_sub_loop(self):
self.assertTrue(self.loop2300.exists('2400/2430'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]'))
self.assertFalse(self.loop2300.exists('2400/2430/DTP[111]'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]03'))
def test_qual_segment_select_sub_loop(self):
loop2430 = self.loop2300.first('2400/2430')
self.assertTrue(loop2430.exists('DTP'))
self.assertTrue(loop2430.exists('DTP[573]'))
self.assertTrue(loop2430.exists('DTP[573]03'))
def test_qual_834_dtp(self):
fd = self._makeFd(datafiles['834_lui_id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertTrue(loop2300.exists('DTP[348]'))
self.assertFalse(loop2300.exists('DTP[349]'))
class TreeAddLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment(
'NM1*82*2*Provider 1*****ZZ*9898798~', '~', '*', ':')
new_node = self.loop2300.add_loop(seg_data)
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2310B'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_add_new_string_seg(self):
old_ct = self.loop2300.count('2400')
new_node = self.loop2300.add_loop('LX*5~')
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2400'))
self.assertEqual(old_ct + 1, self.loop2300.count('2400'))
for loop2400 in self.loop2300.select('2400'):
self.assertTrue(loop2400.exists('LX'))
class TreeAddLoopDetail(X12fileTestCase):
def test_add_loops_under_detail(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&BE&ZZ000&ZZ001&20030828&1128&17&X&004010X095A1+\n'
str1 += 'ST&834&11280001+\n'
str1 += 'BGN&+\n'
str1 += 'INS&Y&18&30&XN&AE&RT+\n'
str1 += 'SE&4&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errors = []
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for st_loop in src.iter_segments('ST_LOOP'):
if st_loop.id == 'ST_LOOP' and st_loop.exists('DETAIL'):
detail = st_loop.first('DETAIL')
self.assertTrue(detail.exists('2000'))
detail.first('2000').delete()
self.assertFalse(detail.exists('2000'))
detail.add_loop('INS&Y&18&30&XN&AE&RT+')
self.assertTrue(detail.exists('2000'))
class TreeAddNode(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_loop(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, '2400'), 2)
for node in loop2300.select('2400'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, '2400'), 4)
def test_add_segment(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, 'CN1'), 1)
for node in loop2300.select('CN1'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, 'CN1'), 2)
def test_fail(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
for node in loop2300.select('CN1'):
cn1 = node
break
n2400 = None
for node in loop2300.select('2400'):
n2400 = node
break
assert n2400 is not None, 'Loop 2400 was not matched'
self.assertRaises(pyx12.errors.X12PathError, n2400.add_node, cn1)
def _get_count(self, node, loop_id):
ct = 0
for n in node.select(loop_id):
ct += 1
return ct
class CountRepeatingLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300' and datatree.get_value('CLM01') == '5555':
self.loop2300 = datatree
break
def test_repeat_2400(self):
ct = 0
for loop_2400 in self.loop2300.select('2400'):
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2400 loops. Should have %i' % (ct, 3))
def test_repeat_2430(self):
ct = 0
for loop_2430 in self.loop2300.select('2400/2430'):
ct += 1
self.assertEqual(
ct, 0, 'Found %i 2430 loops. Should have %i' % (ct, 0))
class IterateTree(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
def test_iterate_all(self):
ct_2000a = 0
ct_other = 0
for datatree in self.src.iter_segments('2000A'):
if datatree.id == '2000A':
ct_2000a += 1
else:
ct_other += 1
self.assertEqual(ct_2000a, 1,
'Found %i 2000A loops. Should have %i' % (ct_2000a, 1))
self.assertEqual(ct_other, 11, 'Found %i external segments. Should have %i' % (ct_other, 11))
class TreeDeleteSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
assert self.loop2300.get_value('CN101') == '05'
seg_data = pyx12.segment.Segment('CN1*05~', '~', '*', ':')
self.assertTrue(self.loop2300.delete_segment(seg_data))
self.assertEqual(self.loop2300.get_value('CN101'), None)
def test_delete_fail(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
self.assertFalse(self.loop2300.delete_segment(seg_data))
class TreeDeleteLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
self.assertEqual(self.loop2300.get_value('2400/LX01'), '1')
self.assertTrue(self.loop2300.delete_node('2400'))
self.assertEqual(self.loop2300.get_value('2400/LX01'), '2')
def test_delete_fail(self):
self.assertFalse(self.loop2300.delete_node('2500'))
class NodeDeleteSelf(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
cn1 = self.loop2300.first('CN1')
assert cn1.id == 'CN1'
cn1.delete()
try:
a = cn1.id
except EngineError:
pass
except:
a = cn1.id
#self.assertRaises(EngineError, cn1.id)
class TreeCopy(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_node(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertTrue(not svc is new_svc)
datatree.add_node(new_svc)
#for svc in datatree.select('2110'):
# print svc.get_value('SVC01')
break
def test_copy_seg(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
self.assertFalse(svc is new_svc)
self.assertEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertFalse(svc is new_svc)
self.assertNotEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
break
| [
"io.StringIO"
]
| [((482, 498), 'io.StringIO', 'StringIO', (['x12str'], {}), '(x12str)\n', (490, 498), False, 'from io import StringIO\n'), ((538, 548), 'io.StringIO', 'StringIO', ([], {}), '()\n', (546, 548), False, 'from io import StringIO\n'), ((609, 643), 'io.StringIO', 'StringIO', (['x12str'], {'encoding': '"""ascii"""'}), "(x12str, encoding='ascii')\n", (617, 643), False, 'from io import StringIO\n'), ((683, 709), 'io.StringIO', 'StringIO', ([], {'encoding': '"""ascii"""'}), "(encoding='ascii')\n", (691, 709), False, 'from io import StringIO\n')] |
#!/usr/bin/env python
import os
import json
import tornado.ioloop
import tornado.log
import tornado.web
from google.oauth2 import id_token
from google.auth.transport import requests as google_requests
import jwt
import requests
API_KEY = os.environ.get('OPEN_WEATHER_MAP_KEY', None)
PROJECT_ID = os.environ.get('PROJECT_ID', None)
class WeatherHandler(tornado.web.RequestHandler):
def start_conversation (self):
response = {
'expectUserResponse': True,
'expectedInputs': [
{
'possibleIntents': {'intent': 'actions.intent.TEXT'},
'inputPrompt': {
'richInitialPrompt': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>What city would you like the weather for?</speak>'
}
}
]
}
}
}
]
}
self.set_header("Content-Type", 'application/json')
self.set_header('Google-Assistant-API-Version', 'v2')
self.write(json.dumps(response, indent=2))
def get_weather (self, city):
api_response = requests.get(
'http://api.openweathermap.org/data/2.5/weather',
params={'q': city, 'APPID': API_KEY}
)
data = api_response.json()
if 'main' not in data:
response = {
'expectUserResponse': False,
'finalResponse': {
'richResponse': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>City not found - meow!</speak>'
}
}
]
}
}
}
else:
temp = round(1.8 * (data['main']['temp'] - 273) + 32)
response = {
'expectUserResponse': False,
'finalResponse': {
'richResponse': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>The temperature in {} is {} degrees.</speak>'.format(city, temp)
}
}
]
}
}
}
self.set_header("Content-Type", 'application/json')
self.set_header('Google-Assistant-API-Version', 'v2')
self.write(json.dumps(response, indent=2))
def get (self):
city = self.get_query_argument('city', '')
if city:
self.get_weather(city)
else:
self.start_conversation()
def post (self):
token = self.request.headers.get("Authorization")
jwt_data = jwt.decode(token, verify=False)
if jwt_data['aud'] != PROJECT_ID:
self.set_status(401)
self.write('Token Mismatch')
else:
request = google_requests.Request()
try:
# Makes external request, remove if not needed to speed things up
id_info = id_token.verify_oauth2_token(token, request, PROJECT_ID)
except:
self.set_status(401)
self.write('Token Mismatch')
data = json.loads(self.request.body.decode('utf-8'))
intent = data['inputs'][0]['intent']
print(intent)
print(data['conversation']['conversationId'])
if intent == 'actions.intent.MAIN':
self.start_conversation()
else:
city = data['inputs'][0]['arguments'][0]['textValue']
self.get_weather(city)
def make_app():
return tornado.web.Application([
(r"/weather-app", WeatherHandler),
], autoreload=True)
if __name__ == "__main__":
tornado.log.enable_pretty_logging()
app = make_app()
app.listen(int(os.environ.get('PORT', '8000')))
tornado.ioloop.IOLoop.current().start()
| [
"jwt.decode",
"google.auth.transport.requests.Request",
"json.dumps",
"os.environ.get",
"requests.get",
"google.oauth2.id_token.verify_oauth2_token"
]
| [((243, 287), 'os.environ.get', 'os.environ.get', (['"""OPEN_WEATHER_MAP_KEY"""', 'None'], {}), "('OPEN_WEATHER_MAP_KEY', None)\n", (257, 287), False, 'import os\n'), ((301, 335), 'os.environ.get', 'os.environ.get', (['"""PROJECT_ID"""', 'None'], {}), "('PROJECT_ID', None)\n", (315, 335), False, 'import os\n'), ((1119, 1223), 'requests.get', 'requests.get', (['"""http://api.openweathermap.org/data/2.5/weather"""'], {'params': "{'q': city, 'APPID': API_KEY}"}), "('http://api.openweathermap.org/data/2.5/weather', params={'q':\n city, 'APPID': API_KEY})\n", (1131, 1223), False, 'import requests\n'), ((2463, 2494), 'jwt.decode', 'jwt.decode', (['token'], {'verify': '(False)'}), '(token, verify=False)\n', (2473, 2494), False, 'import jwt\n'), ((1035, 1065), 'json.dumps', 'json.dumps', (['response'], {'indent': '(2)'}), '(response, indent=2)\n', (1045, 1065), False, 'import json\n'), ((2191, 2221), 'json.dumps', 'json.dumps', (['response'], {'indent': '(2)'}), '(response, indent=2)\n', (2201, 2221), False, 'import json\n'), ((2622, 2647), 'google.auth.transport.requests.Request', 'google_requests.Request', ([], {}), '()\n', (2645, 2647), True, 'from google.auth.transport import requests as google_requests\n'), ((3444, 3474), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '"""8000"""'], {}), "('PORT', '8000')\n", (3458, 3474), False, 'import os\n'), ((2751, 2807), 'google.oauth2.id_token.verify_oauth2_token', 'id_token.verify_oauth2_token', (['token', 'request', 'PROJECT_ID'], {}), '(token, request, PROJECT_ID)\n', (2779, 2807), False, 'from google.oauth2 import id_token\n')] |
from typing import Iterator, NamedTuple, Tuple
from cached_property import cached_property
from cv2 import Rodrigues
from pyquaternion import Quaternion
class Coordinates(NamedTuple):
"""
:param float x: X coordinate
:param float y: Y coordinate
"""
x: float
y: float
class ThreeDCoordinates(NamedTuple):
"""
:param float x: X coordinate
:param float y: Y coordinate
:param float z: Z coordinate
"""
x: float
y: float
z: float
class Spherical(NamedTuple):
"""
:param float rot_x: Rotation around the X-axis, in radians
:param float rot_y: Rotation around the Y-axis, in radians
:param float dist: Distance
"""
rot_x: float
rot_y: float
dist: int
ThreeTuple = Tuple[float, float, float]
RotationMatrix = Tuple[ThreeTuple, ThreeTuple, ThreeTuple]
class Orientation:
"""The orientation of an object in 3-D space."""
def __init__(self, e_x: float, e_y: float, e_z: float):
"""
Construct a quaternion given the components of a rotation vector.
More information: https://w.wiki/Fci
"""
rotation_matrix, _ = Rodrigues((e_x, e_y, e_z))
self._quaternion = Quaternion(matrix=rotation_matrix)
@property
def rot_x(self) -> float:
"""Get rotation angle around x axis in radians."""
return self.roll
@property
def rot_y(self) -> float:
"""Get rotation angle around y axis in radians."""
return self.pitch
@property
def rot_z(self) -> float:
"""Get rotation angle around z axis in radians."""
return self.yaw
@property
def yaw(self) -> float:
"""Get rotation angle around z axis in radians."""
return self.yaw_pitch_roll[0]
@property
def pitch(self) -> float:
"""Get rotation angle around y axis in radians."""
return self.yaw_pitch_roll[1]
@property
def roll(self) -> float:
"""Get rotation angle around x axis in radians."""
return self.yaw_pitch_roll[2]
@cached_property
def yaw_pitch_roll(self) -> ThreeTuple:
"""
Get the equivalent yaw-pitch-roll angles.
Specifically intrinsic Tait-Bryan angles following the z-y'-x'' convention.
"""
return self._quaternion.yaw_pitch_roll
def __iter__(self) -> Iterator[float]:
"""
Get an iterator over the rotation angles.
Returns:
An iterator of floating point angles in order x, y, z.
"""
return iter([self.rot_x, self.rot_y, self.rot_z])
@cached_property
def rotation_matrix(self) -> RotationMatrix:
"""
Get the rotation matrix represented by this orientation.
Returns:
A 3x3 rotation matrix as a tuple of tuples.
"""
r_m = self._quaternion.rotation_matrix
return (
(r_m[0][0], r_m[0][1], r_m[0][2]),
(r_m[1][0], r_m[1][1], r_m[1][2]),
(r_m[2][0], r_m[2][1], r_m[2][2]),
)
@property
def quaternion(self) -> Quaternion:
"""Get the quaternion represented by this orientation."""
return self._quaternion
def __repr__(self) -> str:
return "Orientation(rot_x={},rot_y={},rot_z={})".format(
self.rot_x, self.rot_y, self.rot_z
)
| [
"pyquaternion.Quaternion",
"cv2.Rodrigues"
]
| [((1153, 1179), 'cv2.Rodrigues', 'Rodrigues', (['(e_x, e_y, e_z)'], {}), '((e_x, e_y, e_z))\n', (1162, 1179), False, 'from cv2 import Rodrigues\n'), ((1207, 1241), 'pyquaternion.Quaternion', 'Quaternion', ([], {'matrix': 'rotation_matrix'}), '(matrix=rotation_matrix)\n', (1217, 1241), False, 'from pyquaternion import Quaternion\n')] |
import sqlite3
import os
import datetime
__all__ = ['DMARCStorage', 'totimestamp']
def totimestamp(datetime_object):
if datetime_object.utcoffset() is not None:
utc_naive = datetime_object.replace(tzinfo=None) - datetime_object.utcoffset()
else:
utc_naive = datetime_object
return (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class DMARCStorage(object):
def __init__(self, database_filename='dmarc.sqlite', database_directory="./results"):
# Create or connect to the database:
database_path = os.path.join(database_directory, database_filename)
if not os.path.exists(database_directory):
os.makedirs(database_directory)
self._conn = sqlite3.connect(database_path)
# Set automcommit to true and initialise cursor:
self._conn.isolation_level = None
self._cur = self._conn.cursor()
# Create the tables if they don't exist already:
self._init_database()
def __del__(self):
if self._conn is not None:
self._close_connection()
def _init_database(self):
self._cur.execute("PRAGMA foreign_keys = ON;")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_reports (
report_id TEXT PRIMARY KEY,
receiver TEXT,
report_filename TEXT,
report_start INTEGER,
report_end INTEGER
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_records (
report_id TEXT REFERENCES dmarc_reports(report_id) ON DELETE CASCADE,
record_id INTEGER,
ip_address TEXT,
hostname TEXT,
disposition TEXT,
reason TEXT,
spf_pass INTEGER,
dkim_pass INTEGER,
header_from TEXT,
envelope_from TEXT,
count INTEGER,
PRIMARY KEY (report_id, record_id)
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS spf_results (
report_id TEXT,
record_id INTEGER,
spf_id INTEGER,
domain TEXT,
result TEXT,
PRIMARY KEY (report_id, record_id, spf_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dkim_signatures (
report_id TEXT,
record_id INTEGER,
signature_id INTEGER,
domain TEXT,
result TEXT,
selector TEXT,
PRIMARY KEY (report_id, record_id, signature_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE,
CONSTRAINT unique_dkim_sig
UNIQUE (report_id, record_id, domain, result, selector)
);""")
def _delete_all_data(self):
# Drop the tables in the right order:
self._cur.execute("DROP TABLE dkim_signatures;")
self._cur.execute("DROP TABLE spf_results;")
self._cur.execute("DROP TABLE dmarc_records;")
self._cur.execute("DROP TABLE dmarc_reports;")
# Recreate them again, empty:
self._init_database()
def _close_connection(self):
self._conn.close()
self._conn = None
def report_already_exists(self, report_filename):
# Check if a report with that filename already exists:
self._cur.execute("SELECT report_filename FROM dmarc_reports WHERE report_filename=?;", (report_filename,))
already_exists = self._cur.fetchone() is not None
return already_exists
def save_new_report(self, report):
# Persist the report itself:
self._cur.execute("INSERT INTO dmarc_reports VALUES (?,?,?,?,?);",
[report.id, report.receiver, report.filename,
totimestamp(report.start_date), totimestamp(report.end_date)])
# Persist each record of that report with a generated ID:
for rec_id, rec in enumerate(report.records):
self._cur.execute("INSERT INTO dmarc_records VALUES (?,?,?,?,?,?,?,?,?,?,?);",
[report.id, rec_id, rec.ip, rec.host, rec.disposition, rec.reason,
rec.spf_pass, rec.dkim_pass, rec.header_from, rec.envelope_from,
rec.count])
# Persist the SPF data:
for spf_id, spf_result in enumerate(rec.spf_results):
self._cur.execute("INSERT INTO spf_results VALUES (?,?,?,?,?);",
[report.id, rec_id, spf_id, spf_result["domain"], spf_result["result"]])
# Persist all the DKIM signatures with generated IDs
for sig_id, sig in enumerate(rec.dkim_signatures):
self._cur.execute("INSERT INTO dkim_signatures VALUES (?,?,?,?,?,?);",
[report.id, rec_id, sig_id, sig["domain"], sig["result"], sig["selector"]])
def get_reporting_start_date(self):
self._cur.execute("SELECT min(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_reporting_end_date(self):
self._cur.execute("SELECT max(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_number_reports(self):
self._cur.execute("SELECT count(*) FROM dmarc_reports;")
return self._cur.fetchone()[0]
def get_count_by_disposition(self):
self._cur.execute("SELECT disposition, sum(count) FROM dmarc_records GROUP BY disposition;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_hostnames(self):
self._cur.execute("SELECT hostname, ip_address, sum(count) FROM dmarc_records GROUP BY hostname, ip_address;")
return {str(r[0]) if r[0] is not None else str(r[1]): r[2] for r in self._cur.fetchall()}
def get_count_by_receiver(self):
self._cur.execute("SELECT receiver, sum(count) FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id GROUP BY receiver;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_dkim_domain(self):
self._cur.execute("SELECT domain, sum(count) FROM dmarc_records JOIN dkim_signatures " +
"ON dmarc_records.report_id=dkim_signatures.report_id AND " +
"dmarc_records.record_id=dkim_signatures.record_id GROUP BY domain;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_status_string(self):
self._cur.execute("SELECT spf_pass, dkim_pass, sum(count) FROM dmarc_records GROUP BY spf_pass, dkim_pass;")
status = {1: "pass", 0: "fail", None: "n/a"}
return {"SPF:%s, DKIM:%s" % (status[r[0]], status[r[1]]): r[2] for r in self._cur.fetchall()}
def get_raw_spf_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dkim_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, dkim_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dmarc_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass + dkim_pass, count " +
"FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def execute_query(self, sql, values=None):
if values is not None:
self._cur.execute(sql, values)
else:
self._cur.execute(sql)
return self._cur.fetchall()
| [
"datetime.datetime",
"os.path.exists",
"sqlite3.connect",
"os.makedirs",
"os.path.join"
]
| [((562, 613), 'os.path.join', 'os.path.join', (['database_directory', 'database_filename'], {}), '(database_directory, database_filename)\n', (574, 613), False, 'import os\n'), ((730, 760), 'sqlite3.connect', 'sqlite3.connect', (['database_path'], {}), '(database_path)\n', (745, 760), False, 'import sqlite3\n'), ((629, 663), 'os.path.exists', 'os.path.exists', (['database_directory'], {}), '(database_directory)\n', (643, 663), False, 'import os\n'), ((677, 708), 'os.makedirs', 'os.makedirs', (['database_directory'], {}), '(database_directory)\n', (688, 708), False, 'import os\n'), ((325, 354), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (342, 354), False, 'import datetime\n')] |
from setuptools import setup, find_packages
setup(
name="sumologic-sdk",
version="0.1.9",
packages=find_packages(),
install_requires=['requests>=2.2.1'],
# PyPI metadata
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
description="Sumo Logic Python SDK",
license="PSF",
keywords="sumologic python sdk rest api log management analytics logreduce splunk security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
| [
"setuptools.find_packages"
]
| [((112, 127), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (125, 127), False, 'from setuptools import setup, find_packages\n')] |
import copy
from django.conf import settings
from django.test.utils import override_settings
from rest_framework import status, test
class PermissionsTest(test.APITransactionTestCase):
"""
Abstract class for permissions tests.
Methods `get_urls_configs`, `get_users_with_permission`,
`get_users_without_permissions` have to be overridden.
Logical example:
class ExamplePermissionsTest(PermissionsTest):
def get_users_with_permission(self, url, method):
if is_unreachable(url):
# no one can has access to unreachable url
return []
return [user_with_permission]
def get_users_without_permissions(self, url, method):
if is_unreachable(url):
# everybody does not have access to to unreachable url
return [user_with_permission, user_without_permission]
return [user_without_permission]
def get_urls_configs(self):
yield {'url': 'http://testserver/some/url, 'method': 'GET'}
yield {'url': 'http://testserver/some/unreachable/url', 'method': 'POST'}
...
"""
def get_urls_configs(self):
"""
Return list or generator of url configs.
Each url config is dictionary with such keys:
- url: url itself
- method: request method
- data: data which will be sent in request
url config example:
{
'url': 'http://testserver/api/backup/',
'method': 'POST',
'data': {'backup_source': 'backup/source/url'}
}
"""
raise NotImplementedError()
def get_users_with_permission(self, url, method):
"""
Return list of users which can access given url with given method
"""
raise NotImplementedError()
def get_users_without_permissions(self, url, method):
"""
Return list of users which can not access given url with given method
"""
raise NotImplementedError()
def test_permissions(self):
"""
Go through all url configs ands checks that user with permissions
can request them and users without - can't
"""
for conf in self.get_urls_configs():
url, method = conf['url'], conf['method']
data = conf['data'] if 'data' in conf else {}
for user in self.get_users_with_permission(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
self.assertFalse(
response.status_code
in (status.HTTP_403_FORBIDDEN, status.HTTP_404_NOT_FOUND),
'Error. User %s can not reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
for user in self.get_users_without_permissions(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
unreachable_statuses = (
status.HTTP_403_FORBIDDEN,
status.HTTP_404_NOT_FOUND,
status.HTTP_409_CONFLICT,
)
self.assertTrue(
response.status_code in unreachable_statuses,
'Error. User %s can reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
class ListPermissionsTest(test.APITransactionTestCase):
"""
Abstract class that tests what objects user receive in list.
Method `get_users_and_expected_results` has to be overridden.
Method `get_url` have to be defined.
"""
def get_url(self):
return None
def get_users_and_expected_results(self):
"""
Return list or generator of dictionaries with such keys:
- user - user which we want to test
- expected_results - list of dictionaries with fields which user has
to receive as answer from server
"""
pass
def test_list_permissions(self):
for user_and_expected_result in self.get_users_and_expected_results():
user = user_and_expected_result['user']
expected_results = user_and_expected_result['expected_results']
self.client.force_authenticate(user=user)
response = self.client.get(self.get_url())
self.assertEqual(
len(expected_results),
len(response.data),
'User %s receive wrong number of objects. Expected: %s, received %s'
% (user, len(expected_results), len(response.data)),
)
for actual, expected in zip(response.data, expected_results):
for key, value in expected.items():
self.assertEqual(actual[key], value)
def override_waldur_core_settings(**kwargs):
waldur_settings = copy.deepcopy(settings.WALDUR_CORE)
waldur_settings.update(kwargs)
return override_settings(WALDUR_CORE=waldur_settings)
| [
"django.test.utils.override_settings",
"copy.deepcopy"
]
| [((5168, 5203), 'copy.deepcopy', 'copy.deepcopy', (['settings.WALDUR_CORE'], {}), '(settings.WALDUR_CORE)\n', (5181, 5203), False, 'import copy\n'), ((5250, 5296), 'django.test.utils.override_settings', 'override_settings', ([], {'WALDUR_CORE': 'waldur_settings'}), '(WALDUR_CORE=waldur_settings)\n', (5267, 5296), False, 'from django.test.utils import override_settings\n')] |
import io
import time
import todoist
def test_stats_get(api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
response = api.completed.get_stats()
assert 'days_items' in response
assert 'week_items' in response
assert 'karma_trend' in response
assert 'karma_last_update' in response
def test_user_update(api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
date_format = api.state['user']['date_format']
date_format_new = 1 - date_format
api.user.update(date_format=date_format_new)
api.commit()
assert date_format_new == api.state['user']['date_format']
api.user.update_goals(vacation_mode=1)
api.commit()
api.user.update_goals(vacation_mode=0)
api.commit()
def test_user_settings_update(api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
reminder_email = api.state['user_settings']['reminder_email']
if reminder_email:
reminder_email = False
else:
reminder_email = True
api.user_settings.update(reminder_email=reminder_email)
api.commit()
assert reminder_email == api.state['user_settings']['reminder_email']
def test_project_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
response = api.commit()
assert response['projects'][0]['name'] == 'Project1'
assert 'Project1' in [p['name'] for p in api.state['projects']]
assert api.projects.get_by_id(project1['id']) == project1
project1.delete()
api.commit()
def test_project_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project1.delete()
response = api.commit()
assert response['projects'][0]['id'] == project1['id']
assert response['projects'][0]['is_deleted'] == 1
assert 'Project1' not in [p['name'] for p in api.state['projects']]
def test_project_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project1.update(name='UpdatedProject1')
response = api.commit()
assert response['projects'][0]['name'] == 'UpdatedProject1'
assert 'UpdatedProject1' in [p['name'] for p in api.state['projects']]
assert api.projects.get_by_id(project1['id']) == project1
project1.delete()
api.commit()
def test_project_archive(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project1.archive()
response = api.commit()
assert response['projects'][0]['name'] == 'Project1'
assert response['projects'][0]['is_archived'] == 1
assert 'Project1' in [p['name'] for p in api.state['projects']]
assert 1 in [
p['is_archived'] for p in api.state['projects']
if p['id'] == project1['id']
]
project1.delete()
api.commit()
def test_project_unarchive(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project1.archive()
api.commit()
project1.unarchive()
response = api.commit()
assert response['projects'][0]['name'] == 'Project1'
assert response['projects'][0]['is_archived'] == 0
assert 0 in [
p['is_archived'] for p in api.state['projects']
if p['id'] == project1['id']
]
project1.delete()
api.commit()
def test_project_move_to_parent(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project2 = api.projects.add('Project2')
api.commit()
project2.move(project1['id'])
response = api.commit()
assert response['projects'][0]['name'] == 'Project2'
assert response['projects'][0]['parent_id'] == project1['id']
assert project1['id'] in [
i['parent_id'] for i in api.state['projects'] if i['id'] == project2['id']
]
project2.delete()
api.commit()
project1.delete()
api.commit()
def test_project_reorder(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
project2 = api.projects.add('Project2')
api.commit()
api.projects.reorder(projects=[
{'id': project1['id'], 'child_order': 2},
{'id': project2['id'], 'child_order': 1},
])
response = api.commit()
for project in response['projects']:
if project['id'] == project1['id']:
assert project['child_order'] == 2
if project['id'] == project2['id']:
assert project['child_order'] == 1
assert 2 in [
p['child_order'] for p in api.state['projects']
if p['id'] == project1['id']
]
assert 1 in [
p['child_order'] for p in api.state['projects']
if p['id'] == project2['id']
]
project1.delete()
api.commit()
project2.delete()
api.commit()
def test_item_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
response = api.add_item('Item1')
assert response['content'] == 'Item1'
api.sync()
assert 'Item1' in [i['content'] for i in api.state['items']]
item1 = [i for i in api.state['items'] if i['content'] == 'Item1'][0]
assert api.items.get_by_id(item1['id']) == item1
item1.delete()
api.commit()
def test_item_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.sync()
item1.delete()
response = api.commit()
assert response['items'][0]['id'] == item1['id']
assert response['items'][0]['is_deleted'] == 1
assert 'Item1' not in [i['content'] for i in api.state['items']]
def test_item_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item1.update(content='UpdatedItem1')
response = api.commit()
assert response['items'][0]['content'] == 'UpdatedItem1'
assert 'UpdatedItem1' in [i['content'] for i in api.state['items']]
assert api.items.get_by_id(item1['id']) == item1
item1.delete()
api.commit()
def test_item_complete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2', parent_id=item1['id'])
api.commit()
item2.complete()
response = api.commit()
assert response['items'][0]['content'] == 'Item2'
assert response['items'][0]['checked'] == 1
assert 1 in [
i['checked'] for i in api.state['items'] if i['id'] == item2['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_uncomplete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2', parent_id=item1['id'])
api.commit()
item2.complete()
api.commit()
item2.uncomplete()
response = api.commit()
assert response['items'][0]['content'] == 'Item2'
assert response['items'][0]['checked'] == 0
assert 0 in [
i['checked'] for i in api.state['items'] if i['id'] == item1['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_archive(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2', parent_id=item1['id'])
api.commit()
item2.complete()
api.commit()
item2.archive()
response = api.commit()
assert response['items'][0]['content'] == 'Item2'
assert response['items'][0]['in_history'] == 1
assert 1 in [
i['in_history'] for i in api.state['items'] if i['id'] == item2['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_unarchive(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2', parent_id=item1['id'])
api.commit()
item2.complete()
api.commit()
item2.archive()
api.commit()
item2.unarchive()
response = api.commit()
assert response['items'][0]['content'] == 'Item2'
assert response['items'][0]['in_history'] == 0
assert 0 in [
i['in_history'] for i in api.state['items'] if i['id'] == item2['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_move_to_project(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
project1 = api.projects.add('Project1')
api.commit()
item1.move(project_id=project1['id'])
response = api.commit()
assert response['items'][0]['content'] == 'Item1'
assert response['items'][0]['project_id'] == project1['id']
assert project1['id'] in [
i['project_id'] for i in api.state['items'] if i['id'] == item1['id']
]
item1.delete()
api.commit()
project1.delete()
api.commit()
def test_item_move_to_parent(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2')
api.commit()
item2.move(parent_id=item1['id'])
response = api.commit()
assert response['items'][0]['content'] == 'Item2'
assert response['items'][0]['parent_id'] == item1['id']
assert item1['id'] in [
i['parent_id'] for i in api.state['items'] if i['id'] == item2['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_update_date_complete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'every day'})
api.commit()
now = time.time()
tomorrow = time.gmtime(now + 24 * 3600)
new_date_utc = time.strftime("%Y-%m-%dT%H:%M:%SZ", tomorrow)
due = {
'date': new_date_utc,
'string': 'every day',
}
api.items.update_date_complete(item1['id'], due=due)
response = api.commit()
assert response['items'][0]['due']['string'] == 'every day'
assert 'every day' in [
i['due']['string'] for i in api.state['items'] if i['id'] == item1['id']
]
item1.delete()
api.commit()
def test_item_reorder(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2')
api.commit()
api.items.reorder(items=[
{'id': item1['id'], 'child_order': 2},
{'id': item2['id'], 'child_order': 1},
])
response = api.commit()
for item in response['items']:
if item['id'] == item1['id']:
assert item['child_order'] == 2
if item['id'] == item2['id']:
assert item['child_order'] == 1
assert 2 in [
p['child_order'] for p in api.state['items']
if p['id'] == item1['id']
]
assert 1 in [
p['child_order'] for p in api.state['items']
if p['id'] == item2['id']
]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_item_update_day_orders(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
item2 = api.items.add('Item2')
api.commit()
api.items.update_day_orders({item1['id']: 1, item2['id']: 2})
response = api.commit()
for item in response['items']:
if item['id'] == item1['id']:
assert item['day_order'] == 1
if item['id'] == item2['id']:
assert item['day_order'] == 2
assert 1 == api.state['day_orders'][str(item1['id'])]
assert 2 == api.state['day_orders'][str(item2['id'])]
item1.delete()
api.commit()
item2.delete()
api.commit()
def test_label_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
label1 = api.labels.add('Label1')
response = api.commit()
assert response['labels'][0]['name'] == 'Label1'
assert 'Label1' in [l['name'] for l in api.state['labels']]
assert api.labels.get_by_id(label1['id']) == label1
label1.delete()
api.commit()
def test_label_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
label1 = api.labels.add('Label1')
api.commit()
label1.delete()
response = api.commit()
assert response['labels'][0]['id'] == label1['id']
assert response['labels'][0]['is_deleted'] == 1
assert 'UpdatedLabel1' not in [l['name'] for l in api.state['labels']]
def test_label_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
label1 = api.labels.add('Label1')
api.commit()
label1.update(name='UpdatedLabel1')
response = api.commit()
assert response['labels'][0]['name'] == 'UpdatedLabel1'
assert 'UpdatedLabel1' in [l['name'] for l in api.state['labels']]
assert api.labels.get_by_id(label1['id']) == label1
label1.delete()
api.commit()
def test_label_update_orders(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
label1 = api.labels.add('Label1')
api.commit()
label2 = api.labels.add('Label2')
api.commit()
api.labels.update_orders({label1['id']: 1, label2['id']: 2})
response = api.commit()
for label in response['labels']:
if label['id'] == label1['id']:
assert label['item_order'] == 1
if label['id'] == label2['id']:
assert label['item_order'] == 2
assert 1 in [
l['item_order'] for l in api.state['labels'] if l['id'] == label1['id']
]
assert 2 in [
l['item_order'] for l in api.state['labels'] if l['id'] == label2['id']
]
label1.delete()
api.commit()
label2.delete()
api.commit()
def test_note_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
note1 = api.notes.add(item1['id'], 'Note1')
response = api.commit()
assert response['notes'][0]['content'] == 'Note1'
assert 'Note1' in [n['content'] for n in api.state['notes']]
assert api.notes.get_by_id(note1['id']) == note1
note1.delete()
api.commit()
item1.delete()
api.commit()
def test_note_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
note1 = api.notes.add(item1['id'], 'Note1')
api.commit()
note1.delete()
response = api.commit()
assert response['notes'][0]['id'] == note1['id']
assert response['notes'][0]['is_deleted'] == 1
assert 'UpdatedNote1' not in [n['content'] for n in api.state['notes']]
note1.delete()
api.commit()
item1.delete()
api.commit()
def test_note_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1')
api.commit()
note1 = api.notes.add(item1['id'], 'Note1')
api.commit()
note1.update(content='UpdatedNote1')
response = api.commit()
assert response['notes'][0]['content'] == 'UpdatedNote1'
assert 'UpdatedNote1' in [n['content'] for n in api.state['notes']]
assert api.notes.get_by_id(note1['id']) == note1
note1.delete()
api.commit()
item1.delete()
api.commit()
def test_projectnote_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
note1 = api.project_notes.add(project1['id'], 'Note1')
response = api.commit()
assert response['project_notes'][0]['content'] == 'Note1'
assert 'Note1' in [n['content'] for n in api.state['project_notes']]
assert api.project_notes.get_by_id(note1['id']) == note1
note1.delete()
api.commit()
project1.delete()
api.commit()
def test_projectnote_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
note1 = api.project_notes.add(project1['id'], 'Note1')
api.commit()
note1.delete()
response = api.commit()
assert response['project_notes'][0]['id'] == note1['id']
assert response['project_notes'][0]['is_deleted'] == 1
assert 'UpdatedNote1' not in [
n['content'] for n in api.state['project_notes']
]
project1.delete()
api.commit()
def test_projectnote_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
api.commit()
note1 = api.project_notes.add(project1['id'], 'Note1')
api.commit()
note1.update(content='UpdatedNote1')
response = api.commit()
assert response['project_notes'][0]['content'] == 'UpdatedNote1'
assert 'UpdatedNote1' in [n['content'] for n in api.state['project_notes']]
assert api.project_notes.get_by_id(note1['id']) == note1
note1.delete()
api.commit()
project1.delete()
api.commit()
def test_filter_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
filter1 = api.filters.add('Filter1', 'no due date')
response = api.commit()
assert response['filters'][0]['name'] == 'Filter1'
assert 'Filter1' in [f['name'] for f in api.state['filters']]
assert api.filters.get_by_id(filter1['id']) == filter1
filter1.delete()
api.commit()
def test_filter_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
filter1 = api.filters.add('Filter1', 'no due date')
api.commit()
filter1.delete()
response = api.commit()
assert response['filters'][0]['id'] == filter1['id']
assert response['filters'][0]['is_deleted'] == 1
assert 'Filter1' not in [p['name'] for p in api.state['filters']]
def test_filter_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
filter1 = api.filters.add('Filter1', 'no due date')
api.commit()
filter1.update(name='UpdatedFilter1')
response = api.commit()
assert response['filters'][0]['name'] == 'UpdatedFilter1'
assert 'UpdatedFilter1' in [f['name'] for f in api.state['filters']]
assert api.filters.get_by_id(filter1['id']) == filter1
filter1.delete()
api.commit()
def test_filter_update_orders(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
filter1 = api.filters.add('Filter1', 'no due date')
api.commit()
filter2 = api.filters.add('Filter2', 'today')
api.commit()
api.filters.update_orders({filter1['id']: 2, filter2['id']: 1})
response = api.commit()
for filter in response['filters']:
if filter['id'] == filter1['id']:
assert filter['item_order'] == 2
if filter['id'] == filter2['id']:
assert filter['item_order'] == 1
assert 2 in [
f['item_order'] for f in api.state['filters']
if f['id'] == filter1['id']
]
assert 1 in [
f['item_order'] for f in api.state['filters']
if f['id'] == filter2['id']
]
filter1.delete()
api.commit()
filter2.delete()
api.commit()
def test_reminder_relative_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
reminder1 = api.reminders.add(item1['id'], minute_offset=30)
response = api.commit()
assert response['reminders'][0]['minute_offset'] == 30
assert reminder1['id'] in [p['id'] for p in api.state['reminders']]
assert api.reminders.get_by_id(reminder1['id']) == reminder1
reminder1.delete()
api.commit()
item1.delete()
api.commit()
def test_reminder_relative_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
reminder1 = api.reminders.add(item1['id'], minute_offset=30)
api.commit()
reminder1.delete()
response = api.commit()
assert response['reminders'][0]['is_deleted'] == 1
assert reminder1['id'] not in [p['id'] for p in api.state['reminders']]
item1.delete()
api.commit()
def test_reminder_relative_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
reminder1 = api.reminders.add(item1['id'], minute_offset=30)
api.commit()
reminder1.update(minute_offset=str(15))
response = api.commit()
assert response['reminders'][0]['minute_offset'] == 15
assert reminder1['id'] in [p['id'] for p in api.state['reminders']]
assert api.reminders.get_by_id(reminder1['id']) == reminder1
reminder1.delete()
api.commit()
item1.delete()
api.commit()
def test_reminder_absolute_add(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
now = time.time()
tomorrow = time.gmtime(now + 24 * 3600)
due_date_utc = time.strftime("%Y-%m-%dT%H:%M:%SZ", tomorrow)
reminder1 = api.reminders.add(item1['id'], due={'date': due_date_utc})
response = api.commit()
assert response['reminders'][0]['due']['date'] == due_date_utc
tomorrow = time.gmtime(time.time() + 24 * 3600)
assert reminder1['id'] in [p['id'] for p in api.state['reminders']]
assert api.reminders.get_by_id(reminder1['id']) == reminder1
reminder1.delete()
api.commit()
item1.delete()
api.commit()
def test_reminder_absolute_delete(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
now = time.time()
tomorrow = time.gmtime(now + 24 * 3600)
due_date_utc = time.strftime("%Y-%m-%dT%H:%M:%SZ", tomorrow)
reminder1 = api.reminders.add(item1['id'], due={'date': due_date_utc})
api.commit()
api.reminders.delete(reminder1['id'])
response = api.commit()
assert response['reminders'][0]['is_deleted'] == 1
assert reminder1['id'] not in [p['id'] for p in api.state['reminders']]
item1.delete()
response = api.commit()
def test_reminder_absolute_update(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
item1 = api.items.add('Item1', due={'string': 'tomorrow 5pm'})
api.commit()
now = time.time()
tomorrow = time.gmtime(now + 24 * 3600)
due_date_utc = time.strftime("%Y-%m-%dT%H:%M:%SZ", tomorrow)
reminder1 = api.reminders.add(item1['id'], due={'date': due_date_utc})
api.commit()
tomorrow = time.gmtime(now + 24 * 3600 + 60)
due_date_utc = time.strftime("%Y-%m-%dT%H:%M:%SZ", tomorrow)
api.reminders.update(reminder1['id'], due_date_utc=due_date_utc)
response = api.commit()
assert response['reminders'][0]['due']['date'] == due_date_utc
assert reminder1['id'] in [p['id'] for p in api.state['reminders']]
assert api.reminders.get_by_id(reminder1['id']) == reminder1
reminder1.delete()
api.commit()
item1.delete()
api.commit()
def test_locations(api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
api.locations.clear()
api.commit()
assert api.state['locations'] == []
def test_live_notifications(api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
api.live_notifications.set_last_read(
api.state['live_notifications_last_read_id'])
response = api.commit()
assert response['live_notifications_last_read_id'] == \
api.state['live_notifications_last_read_id']
def test_share_accept(cleanup, cleanup2, api_endpoint, api_token, api_token2):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api2 = todoist.api.TodoistAPI(api_token2, api_endpoint)
api.user.update(auto_invite_disabled=1)
api.commit()
api.sync()
api2.user.update(auto_invite_disabled=1)
api2.commit()
api2.sync()
project1 = api.projects.add('Project1')
api.commit()
api.projects.share(project1['id'], api2.state['user']['email'])
response = api.commit()
assert response['projects'][0]['name'] == project1['name']
assert response['projects'][0]['shared']
response2 = api2.sync()
invitation1 = next((ln for ln in response2['live_notifications']
if ln['notification_type'] == 'share_invitation_sent'),
None)
assert invitation1 is not None
assert invitation1['project_name'] == project1['name']
assert invitation1['from_user']['email'] == api.state['user']['email']
api2.invitations.accept(invitation1['id'],
invitation1['invitation_secret'])
response2 = api2.commit()
assert api2.state['user']['id'] in \
[p['user_id'] for p in api2.state['collaborator_states']]
api.sync()
project1 = [p for p in api.state['projects'] if p['name'] == 'Project1'][0]
project1.delete()
api.commit()
def test_share_reject(cleanup, cleanup2, api_endpoint, api_token, api_token2):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api2 = todoist.api.TodoistAPI(api_token2, api_endpoint)
api.user.update(auto_invite_disabled=1)
api.commit()
api.sync()
api2.user.update(auto_invite_disabled=1)
api2.commit()
api2.sync()
project1 = api.projects.add('Project1')
api.commit()
api.projects.share(project1['id'], api2.state['user']['email'])
response = api.commit()
assert response['projects'][0]['name'] == project1['name']
assert response['projects'][0]['shared']
response2 = api2.sync()
invitation2 = next((ln for ln in response2['live_notifications']
if ln['notification_type'] == 'share_invitation_sent'),
None)
assert invitation2 is not None
assert invitation2['project_name'] == project1['name']
assert invitation2['from_user']['email'] == api.state['user']['email']
api2.invitations.reject(invitation2['id'],
invitation2['invitation_secret'])
response2 = api2.commit()
assert len(response2['projects']) == 0
assert len(response2['collaborator_states']) == 0
project1 = [p for p in api.state['projects'] if p['name'] == 'Project1'][0]
project1.delete()
api.commit()
def test_share_delete(cleanup, cleanup2, api_endpoint, api_token, api_token2):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api2 = todoist.api.TodoistAPI(api_token2, api_endpoint)
api.user.update(auto_invite_disabled=1)
api.commit()
api.sync()
api2.user.update(auto_invite_disabled=1)
api2.commit()
api2.sync()
project1 = api.projects.add('Project1')
api.commit()
api.projects.share(project1['id'], api2.state['user']['email'])
response = api.commit()
assert response['projects'][0]['name'] == project1['name']
assert response['projects'][0]['shared']
response2 = api2.sync()
invitation3 = next((ln for ln in response2['live_notifications']
if ln['notification_type'] == 'share_invitation_sent'),
None)
assert invitation3 is not None
assert invitation3['project_name'] == project1['name']
assert invitation3['from_user']['email'] == api.state['user']['email']
api.invitations.delete(invitation3['id'])
api.commit()
project1 = [p for p in api.state['projects'] if p['name'] == 'Project1'][0]
project1.delete()
api.commit()
def test_templates(cleanup, api_endpoint, api_token):
api = todoist.api.TodoistAPI(api_token, api_endpoint)
api.sync()
project1 = api.projects.add('Project1')
project2 = api.projects.add('Project2')
api.commit()
item1 = api.items.add('Item1', project_id=project1['id'])
api.commit()
template = api.templates.export_as_file(project1['id'])
assert 'task,Item1,4,1' in template
with io.open('/tmp/example.csv', 'w', encoding='utf-8') as example:
example.write(template)
result = api.templates.import_into_project(project1['id'],
'/tmp/example.csv')
assert result == {'status': u'ok'}
item1.delete()
api.commit()
project1.delete()
api.commit()
project2.delete()
api.commit()
| [
"time.strftime",
"io.open",
"time.gmtime",
"time.time",
"todoist.api.TodoistAPI"
]
| [((95, 142), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (117, 142), False, 'import todoist\n'), ((395, 442), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (417, 442), False, 'import todoist\n'), ((864, 911), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (886, 911), False, 'import todoist\n'), ((1306, 1353), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (1328, 1353), False, 'import todoist\n'), ((1740, 1787), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (1762, 1787), False, 'import todoist\n'), ((2172, 2219), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (2194, 2219), False, 'import todoist\n'), ((2683, 2730), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (2705, 2730), False, 'import todoist\n'), ((3271, 3318), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (3293, 3318), False, 'import todoist\n'), ((3839, 3886), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (3861, 3886), False, 'import todoist\n'), ((4483, 4530), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (4505, 4530), False, 'import todoist\n'), ((5443, 5490), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (5465, 5490), False, 'import todoist\n'), ((5898, 5945), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (5920, 5945), False, 'import todoist\n'), ((6301, 6348), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (6323, 6348), False, 'import todoist\n'), ((6780, 6827), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (6802, 6827), False, 'import todoist\n'), ((7367, 7414), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (7389, 7414), False, 'import todoist\n'), ((7991, 8038), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (8013, 8038), False, 'import todoist\n'), ((8620, 8667), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (8642, 8667), False, 'import todoist\n'), ((9294, 9341), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (9316, 9341), False, 'import todoist\n'), ((9927, 9974), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (9949, 9974), False, 'import todoist\n'), ((10542, 10589), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (10564, 10589), False, 'import todoist\n'), ((10698, 10709), 'time.time', 'time.time', ([], {}), '()\n', (10707, 10709), False, 'import time\n'), ((10725, 10753), 'time.gmtime', 'time.gmtime', (['(now + 24 * 3600)'], {}), '(now + 24 * 3600)\n', (10736, 10753), False, 'import time\n'), ((10773, 10818), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""', 'tomorrow'], {}), "('%Y-%m-%dT%H:%M:%SZ', tomorrow)\n", (10786, 10818), False, 'import time\n'), ((11268, 11315), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (11290, 11315), False, 'import todoist\n'), ((12169, 12216), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (12191, 12216), False, 'import todoist\n'), ((12883, 12930), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (12905, 12930), False, 'import todoist\n'), ((13293, 13340), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (13315, 13340), False, 'import todoist\n'), ((13712, 13759), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (13734, 13759), False, 'import todoist\n'), ((14201, 14248), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (14223, 14248), False, 'import todoist\n'), ((15022, 15069), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (15044, 15069), False, 'import todoist\n'), ((15528, 15575), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (15550, 15575), False, 'import todoist\n'), ((16078, 16125), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (16100, 16125), False, 'import todoist\n'), ((16660, 16707), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (16682, 16707), False, 'import todoist\n'), ((17220, 17267), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (17242, 17267), False, 'import todoist\n'), ((17802, 17849), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (17824, 17849), False, 'import todoist\n'), ((18426, 18473), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (18448, 18473), False, 'import todoist\n'), ((18863, 18910), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (18885, 18910), False, 'import todoist\n'), ((19300, 19347), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (19322, 19347), False, 'import todoist\n'), ((19818, 19865), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (19840, 19865), False, 'import todoist\n'), ((20716, 20763), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (20738, 20763), False, 'import todoist\n'), ((21312, 21359), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (21334, 21359), False, 'import todoist\n'), ((21843, 21890), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (21865, 21890), False, 'import todoist\n'), ((22497, 22544), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (22519, 22544), False, 'import todoist\n'), ((22656, 22667), 'time.time', 'time.time', ([], {}), '()\n', (22665, 22667), False, 'import time\n'), ((22683, 22711), 'time.gmtime', 'time.gmtime', (['(now + 24 * 3600)'], {}), '(now + 24 * 3600)\n', (22694, 22711), False, 'import time\n'), ((22731, 22776), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""', 'tomorrow'], {}), "('%Y-%m-%dT%H:%M:%SZ', tomorrow)\n", (22744, 22776), False, 'import time\n'), ((23294, 23341), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (23316, 23341), False, 'import todoist\n'), ((23453, 23464), 'time.time', 'time.time', ([], {}), '()\n', (23462, 23464), False, 'import time\n'), ((23480, 23508), 'time.gmtime', 'time.gmtime', (['(now + 24 * 3600)'], {}), '(now + 24 * 3600)\n', (23491, 23508), False, 'import time\n'), ((23528, 23573), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""', 'tomorrow'], {}), "('%Y-%m-%dT%H:%M:%SZ', tomorrow)\n", (23541, 23573), False, 'import time\n'), ((23997, 24044), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (24019, 24044), False, 'import todoist\n'), ((24156, 24167), 'time.time', 'time.time', ([], {}), '()\n', (24165, 24167), False, 'import time\n'), ((24183, 24211), 'time.gmtime', 'time.gmtime', (['(now + 24 * 3600)'], {}), '(now + 24 * 3600)\n', (24194, 24211), False, 'import time\n'), ((24231, 24276), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""', 'tomorrow'], {}), "('%Y-%m-%dT%H:%M:%SZ', tomorrow)\n", (24244, 24276), False, 'import time\n'), ((24385, 24418), 'time.gmtime', 'time.gmtime', (['(now + 24 * 3600 + 60)'], {}), '(now + 24 * 3600 + 60)\n', (24396, 24418), False, 'import time\n'), ((24438, 24483), 'time.strftime', 'time.strftime', (['"""%Y-%m-%dT%H:%M:%SZ"""', 'tomorrow'], {}), "('%Y-%m-%dT%H:%M:%SZ', tomorrow)\n", (24451, 24483), False, 'import time\n'), ((24919, 24966), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (24941, 24966), False, 'import todoist\n'), ((25134, 25181), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (25156, 25181), False, 'import todoist\n'), ((25527, 25574), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (25549, 25574), False, 'import todoist\n'), ((25586, 25634), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token2', 'api_endpoint'], {}), '(api_token2, api_endpoint)\n', (25608, 25634), False, 'import todoist\n'), ((26908, 26955), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (26930, 26955), False, 'import todoist\n'), ((26967, 27015), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token2', 'api_endpoint'], {}), '(api_token2, api_endpoint)\n', (26989, 27015), False, 'import todoist\n'), ((28264, 28311), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (28286, 28311), False, 'import todoist\n'), ((28323, 28371), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token2', 'api_endpoint'], {}), '(api_token2, api_endpoint)\n', (28345, 28371), False, 'import todoist\n'), ((29422, 29469), 'todoist.api.TodoistAPI', 'todoist.api.TodoistAPI', (['api_token', 'api_endpoint'], {}), '(api_token, api_endpoint)\n', (29444, 29469), False, 'import todoist\n'), ((29782, 29832), 'io.open', 'io.open', (['"""/tmp/example.csv"""', '"""w"""'], {'encoding': '"""utf-8"""'}), "('/tmp/example.csv', 'w', encoding='utf-8')\n", (29789, 29832), False, 'import io\n'), ((22974, 22985), 'time.time', 'time.time', ([], {}), '()\n', (22983, 22985), False, 'import time\n')] |
from setuptools import setup
import iotio
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="iot.io",
version=iotio.__version__,
packages=["iotio"],
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
description="A management API for connecting and managing Clients via websocket connections.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dylancrockett/iot.io",
project_urls={
"Documentation": "https://iotio.readthedocs.io/",
"Source Code": "https://github.com/dylancrockett/iot.io"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
install_requires=[
'gevent',
'gevent-websocket',
'flask',
'flask-sockets',
],
python_requires='>=3.7'
)
| [
"setuptools.setup"
]
| [((112, 860), 'setuptools.setup', 'setup', ([], {'name': '"""iot.io"""', 'version': 'iotio.__version__', 'packages': "['iotio']", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'description': '"""A management API for connecting and managing Clients via websocket connections."""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/dylancrockett/iot.io"""', 'project_urls': "{'Documentation': 'https://iotio.readthedocs.io/', 'Source Code':\n 'https://github.com/dylancrockett/iot.io'}", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']", 'install_requires': "['gevent', 'gevent-websocket', 'flask', 'flask-sockets']", 'python_requires': '""">=3.7"""'}), "(name='iot.io', version=iotio.__version__, packages=['iotio'], author=\n '<NAME>', author_email='<EMAIL>', license='MIT', description=\n 'A management API for connecting and managing Clients via websocket connections.'\n , long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/dylancrockett/iot.io',\n project_urls={'Documentation': 'https://iotio.readthedocs.io/',\n 'Source Code': 'https://github.com/dylancrockett/iot.io'}, classifiers=\n ['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'], install_requires=['gevent',\n 'gevent-websocket', 'flask', 'flask-sockets'], python_requires='>=3.7')\n", (117, 860), False, 'from setuptools import setup\n')] |
import numpy as np
import tensorflow as tf
import os
from scipy.io import savemat
from scipy.io import loadmat
from scipy.misc import imread
from scipy.misc import imsave
from alexnet_face_classifier import *
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class backprop_graph:
def __init__(self, num_classes, nhid, cnn):
self.num_classes = num_classes
self.inputs = tf.placeholder(tf.float32, shape = [None, 227, 227, 3], name='input')
self.labels_1hot = tf.placeholder(tf.float32, shape=[None, self.num_classes])
self.cnn = cnn(self.inputs, None, self.num_classes)
self.cnn.preprocess()
self.cnn.convlayers()
self.cnn.fc_layers(transfer_learning=False, nhid=nhid)
def classifier_graph(self, temp=3.0):
self.probabilities = tf.nn.softmax(self.cnn.fc2/temp)
self.probability = tf.tensordot(self.probabilities, self.labels_1hot, axes=[[1],[1]])
self.log_probability = tf.log(self.probability)
def guided_backprop_graph(self):
self.grad_fc2 = tf.nn.relu(tf.gradients(self.probability, self.cnn.fc2)[0])
self.grad_fc1 = tf.nn.relu(tf.gradients(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)[0])
self.grad_conv5 = tf.nn.relu(tf.gradients(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)[0])
self.grad_conv4 = tf.nn.relu(tf.gradients(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)[0])
self.grad_conv3 = tf.nn.relu(tf.gradients(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)[0])
self.grad_conv2 = tf.nn.relu(tf.gradients(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)[0])
self.grad_conv1 = tf.nn.relu(tf.gradients(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)[0])
self.grad_image = tf.nn.relu(tf.gradients(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)[0])
###
def guided_backprop(graph, image, one_hot, sess):
image = np.expand_dims(image, 0)
one_hot = np.expand_dims(one_hot, 0)
saliency_map = sess.run(graph.grad_image, feed_dict={graph.inputs:image, graph.labels_1hot:one_hot})[0]
scaling_adjustment = 1E-20
saliency_map_scaled = saliency_map/(np.max(saliency_map)+scaling_adjustment)
return saliency_map_scaled
| [
"tensorflow.tensordot",
"tensorflow.placeholder",
"matplotlib.pyplot.switch_backend",
"numpy.max",
"tensorflow.gradients",
"tensorflow.nn.softmax",
"numpy.expand_dims",
"tensorflow.log"
]
| [((243, 268), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (261, 268), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1991), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1981, 1991), True, 'import numpy as np\n'), ((2006, 2032), 'numpy.expand_dims', 'np.expand_dims', (['one_hot', '(0)'], {}), '(one_hot, 0)\n', (2020, 2032), True, 'import numpy as np\n'), ((402, 469), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 227, 227, 3]', 'name': '"""input"""'}), "(tf.float32, shape=[None, 227, 227, 3], name='input')\n", (416, 469), True, 'import tensorflow as tf\n'), ((499, 557), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.num_classes]'}), '(tf.float32, shape=[None, self.num_classes])\n', (513, 557), True, 'import tensorflow as tf\n'), ((819, 853), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(self.cnn.fc2 / temp)'], {}), '(self.cnn.fc2 / temp)\n', (832, 853), True, 'import tensorflow as tf\n'), ((879, 946), 'tensorflow.tensordot', 'tf.tensordot', (['self.probabilities', 'self.labels_1hot'], {'axes': '[[1], [1]]'}), '(self.probabilities, self.labels_1hot, axes=[[1], [1]])\n', (891, 946), True, 'import tensorflow as tf\n'), ((977, 1001), 'tensorflow.log', 'tf.log', (['self.probability'], {}), '(self.probability)\n', (983, 1001), True, 'import tensorflow as tf\n'), ((2216, 2236), 'numpy.max', 'np.max', (['saliency_map'], {}), '(saliency_map)\n', (2222, 2236), True, 'import numpy as np\n'), ((1079, 1123), 'tensorflow.gradients', 'tf.gradients', (['self.probability', 'self.cnn.fc2'], {}), '(self.probability, self.cnn.fc2)\n', (1091, 1123), True, 'import tensorflow as tf\n'), ((1163, 1226), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.fc2', 'self.cnn.fc1'], {'grad_ys': 'self.grad_fc2'}), '(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)\n', (1175, 1226), True, 'import tensorflow as tf\n'), ((1268, 1333), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.fc1', 'self.cnn.conv5'], {'grad_ys': 'self.grad_fc1'}), '(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)\n', (1280, 1333), True, 'import tensorflow as tf\n'), ((1375, 1444), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv5', 'self.cnn.conv4'], {'grad_ys': 'self.grad_conv5'}), '(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)\n', (1387, 1444), True, 'import tensorflow as tf\n'), ((1486, 1555), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv4', 'self.cnn.conv3'], {'grad_ys': 'self.grad_conv4'}), '(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)\n', (1498, 1555), True, 'import tensorflow as tf\n'), ((1597, 1666), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv3', 'self.cnn.conv2'], {'grad_ys': 'self.grad_conv3'}), '(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)\n', (1609, 1666), True, 'import tensorflow as tf\n'), ((1708, 1777), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv2', 'self.cnn.conv1'], {'grad_ys': 'self.grad_conv2'}), '(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)\n', (1720, 1777), True, 'import tensorflow as tf\n'), ((1819, 1885), 'tensorflow.gradients', 'tf.gradients', (['self.cnn.conv1', 'self.inputs'], {'grad_ys': 'self.grad_conv1'}), '(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)\n', (1831, 1885), True, 'import tensorflow as tf\n')] |
import pytest
from nesta.packages.misc_utils.guess_sql_type import guess_sql_type
@pytest.fixture
def int_data():
return [1,2,4,False]
@pytest.fixture
def text_data():
return ['a', True, 2,
('A very long sentence A very long sentence A '
'very long sentence A very long sentence'), 'd']
@pytest.fixture
def float_data():
return [1,2.3,True,None]
@pytest.fixture
def bool_data():
return [True,False,None]
def test_guess_sql_type_int(int_data):
assert guess_sql_type(int_data) == 'INTEGER'
def test_guess_sql_type_float(float_data):
assert guess_sql_type(float_data) == 'FLOAT'
def test_guess_sql_type_bool(bool_data):
assert guess_sql_type(bool_data) == 'BOOLEAN'
def test_guess_sql_type_str(text_data):
assert guess_sql_type(text_data, text_len=10) == 'TEXT'
assert guess_sql_type(text_data, text_len=100).startswith('VARCHAR(')
| [
"nesta.packages.misc_utils.guess_sql_type.guess_sql_type"
]
| [((502, 526), 'nesta.packages.misc_utils.guess_sql_type.guess_sql_type', 'guess_sql_type', (['int_data'], {}), '(int_data)\n', (516, 526), False, 'from nesta.packages.misc_utils.guess_sql_type import guess_sql_type\n'), ((595, 621), 'nesta.packages.misc_utils.guess_sql_type.guess_sql_type', 'guess_sql_type', (['float_data'], {}), '(float_data)\n', (609, 621), False, 'from nesta.packages.misc_utils.guess_sql_type import guess_sql_type\n'), ((686, 711), 'nesta.packages.misc_utils.guess_sql_type.guess_sql_type', 'guess_sql_type', (['bool_data'], {}), '(bool_data)\n', (700, 711), False, 'from nesta.packages.misc_utils.guess_sql_type import guess_sql_type\n'), ((777, 815), 'nesta.packages.misc_utils.guess_sql_type.guess_sql_type', 'guess_sql_type', (['text_data'], {'text_len': '(10)'}), '(text_data, text_len=10)\n', (791, 815), False, 'from nesta.packages.misc_utils.guess_sql_type import guess_sql_type\n'), ((837, 876), 'nesta.packages.misc_utils.guess_sql_type.guess_sql_type', 'guess_sql_type', (['text_data'], {'text_len': '(100)'}), '(text_data, text_len=100)\n', (851, 876), False, 'from nesta.packages.misc_utils.guess_sql_type import guess_sql_type\n')] |
# -*- coding: utf-8 -*-
'''活动管理接口'''
from flask import request
from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User
from model.role import identity
from flask_jwt_extended import (fresh_jwt_required)
def demand_activity_add(activity_id, data):
'''添加活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if not demand.activityId:
demand.activityId = activity_id
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_del(activity_id, data):
'''删除活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.activityId = None
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_done(activity_id, data):
'''更新活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.status = 1
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
@fresh_jwt_required
@identity.check_permission("create", 'task')
def activity_add():
'''创建项目活动'''
data = request.json
if 'memberId' in data and data['memberId']:
data['status'] = 'dev-ing'
with database.atomic():
activity_id = ActivityBase.create(**data).id
if 'memberId' in data and data['memberId']:
for member_id in data['memberId']:
role = ProjectMember.get(
ProjectMember.projectId == data['projectId'],
ProjectMember.memberId == member_id).role
ActivityMember.create(**{
'activityId': activity_id,
'memberId': member_id,
'role': role
})
demand_activity_add(activity_id, data['demand'])
return {"msg": 'ok'}
@fresh_jwt_required
@identity.check_permission("update", 'task')
def activity_update():
'''更新项目活动'''
data = request.json
activity_id = data.pop('activityId')
with database.atomic():
if 'del_memberId' in data:
for member_id in data.pop('del_memberId'):
ActivityMember.delete().where(
(ActivityMember.activityId == activity_id) &
(ActivityMember.memberId == member_id)).execute()
if 'memberId' in data:
if not 'status' in data or not data['status']:
data['status'] = 'dev-ing'
for member_id in data.pop('memberId'):
ActivityMember.get_or_create(
activityId=activity_id,
memberId=member_id,
role=ProjectMember.get(
(ProjectMember.projectId == data['projectId'])
& (ProjectMember.memberId == member_id)).role)
if 'done_demand' in data:
demand_activity_done(activity_id, data.pop('done_demand'))
if 'demand' in data:
demand_activity_add(activity_id, data.pop('demand'))
if 'del_demand' in data:
demand_activity_del(activity_id, data.pop('del_demand'))
Activity.update(**data).where(Activity.id == activity_id).execute()
return {"msg": 'ok'}
@fresh_jwt_required
def activity_detail(activity_id):
'''查询活动详情
GET /api/activity/<int:activity_id>
'''
activity = Activity.findOne(Activity.id == activity_id)
activity['member'] = list(
ActivityMember.find(ActivityMember.role, User.username,
User.email, User.id).join(User)
.where(ActivityMember.activityId == activity_id))
activity['demand'] = list(
Demand.find().where(Demand.activityId == activity_id))
return activity
@fresh_jwt_required
def project_user(project_id):
'''查询项目成员'''
return {
"data":
list(
ProjectMember.find(
ProjectMember.role,
User).join(User).where(ProjectMember.projectId == project_id))
}
| [
"model.db.ActivityMember.delete",
"model.db.ActivityMember.create",
"model.db.Demand.find",
"model.db.Activity.update",
"model.db.database.atomic",
"model.db.Activity.findOne",
"model.role.identity.check_permission",
"model.db.ProjectMember.get",
"model.db.Demand.get",
"model.db.ActivityBase.create",
"model.db.ActivityMember.find",
"model.db.ProjectMember.find"
]
| [((1287, 1330), 'model.role.identity.check_permission', 'identity.check_permission', (['"""create"""', '"""task"""'], {}), "('create', 'task')\n", (1312, 1330), False, 'from model.role import identity\n'), ((2114, 2157), 'model.role.identity.check_permission', 'identity.check_permission', (['"""update"""', '"""task"""'], {}), "('update', 'task')\n", (2139, 2157), False, 'from model.role import identity\n'), ((3602, 3646), 'model.db.Activity.findOne', 'Activity.findOne', (['(Activity.id == activity_id)'], {}), '(Activity.id == activity_id)\n', (3618, 3646), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((355, 389), 'model.db.Demand.get', 'Demand.get', (['(Demand.id == demand_id)'], {}), '(Demand.id == demand_id)\n', (365, 389), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((693, 727), 'model.db.Demand.get', 'Demand.get', (['(Demand.id == demand_id)'], {}), '(Demand.id == demand_id)\n', (703, 727), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((1036, 1070), 'model.db.Demand.get', 'Demand.get', (['(Demand.id == demand_id)'], {}), '(Demand.id == demand_id)\n', (1046, 1070), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((1484, 1501), 'model.db.database.atomic', 'database.atomic', ([], {}), '()\n', (1499, 1501), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((2272, 2289), 'model.db.database.atomic', 'database.atomic', ([], {}), '()\n', (2287, 2289), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((1525, 1552), 'model.db.ActivityBase.create', 'ActivityBase.create', ([], {}), '(**data)\n', (1544, 1552), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((1841, 1934), 'model.db.ActivityMember.create', 'ActivityMember.create', ([], {}), "(**{'activityId': activity_id, 'memberId': member_id,\n 'role': role})\n", (1862, 1934), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((3899, 3912), 'model.db.Demand.find', 'Demand.find', ([], {}), '()\n', (3910, 3912), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((1678, 1783), 'model.db.ProjectMember.get', 'ProjectMember.get', (["(ProjectMember.projectId == data['projectId'])", '(ProjectMember.memberId == member_id)'], {}), "(ProjectMember.projectId == data['projectId'], \n ProjectMember.memberId == member_id)\n", (1695, 1783), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((3372, 3395), 'model.db.Activity.update', 'Activity.update', ([], {}), '(**data)\n', (3387, 3395), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((3686, 3762), 'model.db.ActivityMember.find', 'ActivityMember.find', (['ActivityMember.role', 'User.username', 'User.email', 'User.id'], {}), '(ActivityMember.role, User.username, User.email, User.id)\n', (3705, 3762), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((2902, 3012), 'model.db.ProjectMember.get', 'ProjectMember.get', (["((ProjectMember.projectId == data['projectId']) & (ProjectMember.memberId ==\n member_id))"], {}), "((ProjectMember.projectId == data['projectId']) & (\n ProjectMember.memberId == member_id))\n", (2919, 3012), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((4098, 4142), 'model.db.ProjectMember.find', 'ProjectMember.find', (['ProjectMember.role', 'User'], {}), '(ProjectMember.role, User)\n', (4116, 4142), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n'), ((2397, 2420), 'model.db.ActivityMember.delete', 'ActivityMember.delete', ([], {}), '()\n', (2418, 2420), False, 'from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User\n')] |
import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
# Define data loaders #####################################
# See https://gist.github.com/peterroelants/9956ec93a07ca4e9ba5bc415b014bcca
class IteratorInitializerHook(tf.train.SessionRunHook):
"""Hook to initialise data iterator after Session is created."""
def __init__(self, func=None):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_func = func
def after_create_session(self, session, coord):
"""Initialise the iterator after the session has been created."""
self.iterator_initializer_func(session)
# redefine summarysaverhook (for more accurate saving)
class CustomSummarySaverHook(tf.train.SummarySaverHook):
"""Saves summaries every N steps."""
def __init__(self,save_steps,*args,**kwargs):
super(CustomSummarySaverHook, self).__init__(*args,save_steps=save_steps,**kwargs)
def begin(self):
super().begin()
self._timer.reset()
self._iter_count = 0
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = ((self._iter_count + 1) % self.save_steps == 0)
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._iter_count += 1
class OneTimeSummarySaverHook(tf.train.SummarySaverHook):
"""One-Time SummarySaver
Saves summaries every N steps.
E.g. can be used for saving the source code as text.
"""
def __init__(self, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
class emptytimer():
def update_last_triggered_step(*args,**kwargs):
pass
self._timer = emptytimer()
def begin(self):
super().begin()
self._done = False
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = not self._done
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._done = True
def ExperimentTemplate() -> str:
"""A template with Markdown syntax.
:return: str with Markdown template
"""
return """
Experiment
==========
Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment.
For instance, you can find the automatically generated used settings of this run below.
Current Settings
----------------
| Argument | Value |
| -------- | ----- |
"""
| [
"tensorflow.python.training.session_run_hook.SessionRunArgs"
]
| [((1490, 1514), 'tensorflow.python.training.session_run_hook.SessionRunArgs', 'SessionRunArgs', (['requests'], {}), '(requests)\n', (1504, 1514), False, 'from tensorflow.python.training.session_run_hook import SessionRunArgs\n'), ((2691, 2715), 'tensorflow.python.training.session_run_hook.SessionRunArgs', 'SessionRunArgs', (['requests'], {}), '(requests)\n', (2705, 2715), False, 'from tensorflow.python.training.session_run_hook import SessionRunArgs\n')] |
'''
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Code taken from: https://github.com/facebookresearch/wsd-biencoders/blob/master/wsd_models/util.py
'''
import os
import re
import torch
import subprocess
from transformers import *
import random
pos_converter = {'NOUN':'n', 'PROPN':'n', 'VERB':'v', 'AUX':'v', 'ADJ':'a', 'ADV':'r'}
def generate_key(lemma, pos):
if pos in pos_converter.keys():
pos = pos_converter[pos]
key = '{}+{}'.format(lemma, pos)
return key
def load_pretrained_model(name):
if name == 'roberta-base':
model = RobertaModel.from_pretrained('roberta-base')
hdim = 768
elif name == 'roberta-large':
model = RobertaModel.from_pretrained('roberta-large')
hdim = 1024
elif name == 'bert-large':
model = BertModel.from_pretrained('bert-large-uncased')
hdim = 1024
else: #bert base
model = BertModel.from_pretrained('bert-base-uncased')
hdim = 768
return model, hdim
def load_tokenizer(name):
if name == 'roberta-base':
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
elif name == 'roberta-large':
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
elif name == 'bert-large':
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
else: #bert base
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
return tokenizer
def load_wn_senses(path):
wn_senses = {}
with open(path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split('\t')
lemma = line[0]
pos = line[1]
senses = line[2:]
key = generate_key(lemma, pos)
wn_senses[key] = senses
return wn_senses
def get_label_space(data):
#get set of labels from dataset
labels = set()
for sent in data:
for _, _, _, _, label in sent:
if label != -1:
labels.add(label)
labels = list(labels)
labels.sort()
labels.append('n/a')
label_map = {}
for sent in data:
for _, lemma, pos, _, label in sent:
if label != -1:
key = generate_key(lemma, pos)
label_idx = labels.index(label)
if key not in label_map: label_map[key] = set()
label_map[key].add(label_idx)
return labels, label_map
def process_encoder_outputs(output, mask, as_tensor=False):
combined_outputs = []
position = -1
avg_arr = []
for idx, rep in zip(mask, torch.split(output, 1, dim=0)):
#ignore unlabeled words
if idx == -1: continue
#average representations for units in same example
elif position < idx:
position=idx
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
avg_arr = [rep]
else:
assert position == idx
avg_arr.append(rep)
#get last example from avg_arr
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
if as_tensor: return torch.cat(combined_outputs, dim=0)
else: return combined_outputs
#run WSD Evaluation Framework scorer within python
def evaluate_output(scorer_path, gold_filepath, out_filepath):
eval_cmd = ['java','-cp', scorer_path, 'Scorer', gold_filepath, out_filepath]
output = subprocess.Popen(eval_cmd, stdout=subprocess.PIPE ).communicate()[0]
output = [x.decode("utf-8") for x in output.splitlines()]
p,r,f1 = [float(output[i].split('=')[-1].strip()[:-1]) for i in range(3)]
return p, r, f1
def load_data(datapath, name):
text_path = os.path.join(datapath, '{}.data.xml'.format(name))
gold_path = os.path.join(datapath, '{}.gold.key.txt'.format(name))
#load gold labels
gold_labels = {}
with open(gold_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split(' ')
instance = line[0]
#this means we are ignoring other senses if labeled with more than one
#(happens at least in SemCor data)
key = line[1]
gold_labels[instance] = key
#load train examples + annotate sense instances with gold labels
sentences = []
s = []
with open(text_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip()
if line == '</sentence>':
sentences.append(s)
s=[]
elif line.startswith('<instance') or line.startswith('<wf'):
word = re.search('>(.+?)<', line).group(1)
lemma = re.search('lemma="(.+?)"', line).group(1)
pos = re.search('pos="(.+?)"', line).group(1)
#clean up data
word = re.sub(''', '\'', word)
lemma = re.sub(''', '\'', lemma)
sense_inst = -1
sense_label = -1
if line.startswith('<instance'):
sense_inst = re.search('instance id="(.+?)"', line).group(1)
#annotate sense instance with gold label
sense_label = gold_labels[sense_inst]
s.append((word, lemma, pos, sense_inst, sense_label))
return sentences
#normalize ids list, masks to whatever the passed in length is
def normalize_length(ids, attn_mask, o_mask, max_len, pad_id):
if max_len == -1:
return ids, attn_mask, o_mask
else:
if len(ids) < max_len:
while len(ids) < max_len:
ids.append(torch.tensor([[pad_id]]))
attn_mask.append(0)
o_mask.append(-1)
else:
ids = ids[:max_len-1]+[ids[-1]]
attn_mask = attn_mask[:max_len]
o_mask = o_mask[:max_len]
assert len(ids) == max_len
assert len(attn_mask) == max_len
assert len(o_mask) == max_len
return ids, attn_mask, o_mask
#filters down training dataset to (up to) k examples per sense
#for few-shot learning of the model
def filter_k_examples(data, k):
#shuffle data so we don't only get examples for (common) senses from beginning
random.shuffle(data)
#track number of times sense from data is used
sense_dict = {}
#store filtered data
filtered_data = []
example_count = 0
for sent in data:
filtered_sent = []
for form, lemma, pos, inst, sense in sent:
#treat unlabeled words normally
if sense == -1:
x = (form, lemma, pos, inst, sense)
elif sense in sense_dict:
if sense_dict[sense] < k:
#increment sense count and add example to filtered data
sense_dict[sense] += 1
x = (form, lemma, pos, inst, sense)
example_count += 1
else: #if the data already has k examples of this sense
#add example with no instance or sense label to data
x = (form, lemma, pos, -1, -1)
else:
#add labeled example to filtered data and sense dict
sense_dict[sense] = 1
x = (form, lemma, pos, inst, sense)
example_count += 1
filtered_sent.append(x)
filtered_data.append(filtered_sent)
print("k={}, training on {} sense examples...".format(k, example_count))
return filtered_data
#EOF
| [
"torch.split",
"random.shuffle",
"subprocess.Popen",
"torch.stack",
"torch.tensor",
"re.sub",
"torch.cat",
"re.search"
]
| [((5585, 5605), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (5599, 5605), False, 'import random\n'), ((2451, 2480), 'torch.split', 'torch.split', (['output', '(1)'], {'dim': '(0)'}), '(output, 1, dim=0)\n', (2462, 2480), False, 'import torch\n'), ((2952, 2986), 'torch.cat', 'torch.cat', (['combined_outputs'], {'dim': '(0)'}), '(combined_outputs, dim=0)\n', (2961, 2986), False, 'import torch\n'), ((2891, 2919), 'torch.stack', 'torch.stack', (['avg_arr'], {'dim': '(-1)'}), '(avg_arr, dim=-1)\n', (2902, 2919), False, 'import torch\n'), ((3222, 3272), 'subprocess.Popen', 'subprocess.Popen', (['eval_cmd'], {'stdout': 'subprocess.PIPE'}), '(eval_cmd, stdout=subprocess.PIPE)\n', (3238, 3272), False, 'import subprocess\n'), ((4426, 4453), 're.sub', 're.sub', (['"""'"""', '"""\'"""', 'word'], {}), '(\''\', "\'", word)\n', (4432, 4453), False, 'import re\n'), ((4467, 4495), 're.sub', 're.sub', (['"""'"""', '"""\'"""', 'lemma'], {}), '(\''\', "\'", lemma)\n', (4473, 4495), False, 'import re\n'), ((5062, 5086), 'torch.tensor', 'torch.tensor', (['[[pad_id]]'], {}), '([[pad_id]])\n', (5074, 5086), False, 'import torch\n'), ((2686, 2714), 'torch.stack', 'torch.stack', (['avg_arr'], {'dim': '(-1)'}), '(avg_arr, dim=-1)\n', (2697, 2714), False, 'import torch\n'), ((4253, 4279), 're.search', 're.search', (['""">(.+?)<"""', 'line'], {}), "('>(.+?)<', line)\n", (4262, 4279), False, 'import re\n'), ((4301, 4333), 're.search', 're.search', (['"""lemma="(.+?)\\""""', 'line'], {}), '(\'lemma="(.+?)"\', line)\n', (4310, 4333), False, 'import re\n'), ((4355, 4385), 're.search', 're.search', (['"""pos="(.+?)\\""""', 'line'], {}), '(\'pos="(.+?)"\', line)\n', (4364, 4385), False, 'import re\n'), ((4594, 4632), 're.search', 're.search', (['"""instance id="(.+?)\\""""', 'line'], {}), '(\'instance id="(.+?)"\', line)\n', (4603, 4632), False, 'import re\n')] |
#!/usr/bin/env python3
from typing import Dict, AnyStr
from pathlib import Path
from ontopy import get_ontology
import dlite
from dlite.mappings import make_instance
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
workflow1dir = rootdir / '1-simple-workflow'
entitiesdir = rootdir / 'entities'
atomdata = workflow1dir / 'atomscaledata.json'
dlite.storage_path.append(f'{entitiesdir}/*.json')
# Define the calculation
def get_energy(reaction):
"""Calculates reaction energies with data from Substance entity
data is harvested from collection and mapped to Substance according to
mappings.
Args:
reaction: dict with names of reactants and products ase keys
and stochiometric coefficient as value
Negative stochiometric coefficients for reactants.
Positive stochiometric coefficients for products.
Returns:
reaction energy
"""
energy = 0
for label, n in reaction.items():
inst = make_instance(Substance, coll[label], mappings,
mapsTo=mapsTo)
energy+=n*inst.molecule_energy
return energy
# Import ontologies with mappings
molecules_onto = get_ontology(f'{thisdir}/mapping_mols.ttl').load()
reaction_onto = get_ontology(f'{thisdir}/mapping_substance.ttl').load()
# Convert to mappings to a single list of triples
mappings = list(molecules_onto.get_unabbreviated_triples())
mappings.extend(list(reaction_onto.get_unabbreviated_triples()))
# Obtain the Metadata to be mapped to each other
Molecule = dlite.get_instance('http://onto-ns.com/meta/0.1/Molecule')
Substance = dlite.get_instance('http://onto-ns.com/meta/0.1/Substance')
# Find mapping relation
# TODO: investigate what to do if the two cases
# use a different mappings relation. As of now it is a
# hard requirement that they use the same.
mapsTo = molecules_onto.mapsTo.iri
# Define where the molecule data is obtained from
# This is a dlite collection
coll = dlite.Collection(f'json://{atomdata}?mode=r#molecules', 0)
# input from chemical engineer, e.g. what are reactants and products
# reactants (left side of equation) have negative stochiometric coefficient
# products (right side of equation) have positive stochiometric coefficient
reaction1 = {'C2H6':-1, 'C2H4':1,'H2':1}
reaction_energy = get_energy(reaction1)
print('Reaction energy 1', reaction_energy)
reaction2 = {'C3H8':-1, 'H2': -2,'CH4':3}
reaction_energy2 = get_energy(reaction2)
print('Reaction energy 1', reaction_energy2)
# Map instance Molecule with label 'H2' to Substance
#inst = make_instance(Substance, coll['H2'], mappings)
#print(inst)
# Map instance Molecule with label 'H2' to itself
#inst2 = make_instance(Molecule, coll['H2'], mappings, strict=False)
#print(inst2)
| [
"dlite.mappings.make_instance",
"dlite.storage_path.append",
"ontopy.get_ontology",
"pathlib.Path",
"dlite.Collection",
"dlite.get_instance"
]
| [((391, 441), 'dlite.storage_path.append', 'dlite.storage_path.append', (['f"""{entitiesdir}/*.json"""'], {}), "(f'{entitiesdir}/*.json')\n", (416, 441), False, 'import dlite\n'), ((1602, 1660), 'dlite.get_instance', 'dlite.get_instance', (['"""http://onto-ns.com/meta/0.1/Molecule"""'], {}), "('http://onto-ns.com/meta/0.1/Molecule')\n", (1620, 1660), False, 'import dlite\n'), ((1673, 1732), 'dlite.get_instance', 'dlite.get_instance', (['"""http://onto-ns.com/meta/0.1/Substance"""'], {}), "('http://onto-ns.com/meta/0.1/Substance')\n", (1691, 1732), False, 'import dlite\n'), ((2028, 2086), 'dlite.Collection', 'dlite.Collection', (['f"""json://{atomdata}?mode=r#molecules"""', '(0)'], {}), "(f'json://{atomdata}?mode=r#molecules', 0)\n", (2044, 2086), False, 'import dlite\n'), ((1039, 1101), 'dlite.mappings.make_instance', 'make_instance', (['Substance', 'coll[label]', 'mappings'], {'mapsTo': 'mapsTo'}), '(Substance, coll[label], mappings, mapsTo=mapsTo)\n', (1052, 1101), False, 'from dlite.mappings import make_instance\n'), ((1242, 1285), 'ontopy.get_ontology', 'get_ontology', (['f"""{thisdir}/mapping_mols.ttl"""'], {}), "(f'{thisdir}/mapping_mols.ttl')\n", (1254, 1285), False, 'from ontopy import get_ontology\n'), ((1309, 1357), 'ontopy.get_ontology', 'get_ontology', (['f"""{thisdir}/mapping_substance.ttl"""'], {}), "(f'{thisdir}/mapping_substance.ttl')\n", (1321, 1357), False, 'from ontopy import get_ontology\n'), ((199, 213), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'from pathlib import Path\n')] |
"""
Period benchmarks that rely only on tslibs. See benchmarks.period for
Period benchmarks that rely on other parts fo pandas.
"""
from pandas import Period
from pandas.tseries.frequencies import to_offset
class PeriodProperties:
params = (
["M", "min"],
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"is_leap_year",
"quarter",
"qyear",
"week",
"daysinmonth",
"dayofweek",
"dayofyear",
"start_time",
"end_time",
],
)
param_names = ["freq", "attr"]
def setup(self, freq, attr):
self.per = Period("2012-06-01", freq=freq)
def time_property(self, freq, attr):
getattr(self.per, attr)
class PeriodUnaryMethods:
params = ["M", "min"]
param_names = ["freq"]
def setup(self, freq):
self.per = Period("2012-06-01", freq=freq)
def time_to_timestamp(self, freq):
self.per.to_timestamp()
def time_now(self, freq):
self.per.now(freq)
def time_asfreq(self, freq):
self.per.asfreq("A")
class PeriodConstructor:
params = [["D"], [True, False]]
param_names = ["freq", "is_offset"]
def setup(self, freq, is_offset):
if is_offset:
self.freq = to_offset(freq)
else:
self.freq = freq
def time_period_constructor(self, freq, is_offset):
Period("2012-06-01", freq=freq)
| [
"pandas.tseries.frequencies.to_offset",
"pandas.Period"
]
| [((731, 762), 'pandas.Period', 'Period', (['"""2012-06-01"""'], {'freq': 'freq'}), "('2012-06-01', freq=freq)\n", (737, 762), False, 'from pandas import Period\n'), ((966, 997), 'pandas.Period', 'Period', (['"""2012-06-01"""'], {'freq': 'freq'}), "('2012-06-01', freq=freq)\n", (972, 997), False, 'from pandas import Period\n'), ((1503, 1534), 'pandas.Period', 'Period', (['"""2012-06-01"""'], {'freq': 'freq'}), "('2012-06-01', freq=freq)\n", (1509, 1534), False, 'from pandas import Period\n'), ((1379, 1394), 'pandas.tseries.frequencies.to_offset', 'to_offset', (['freq'], {}), '(freq)\n', (1388, 1394), False, 'from pandas.tseries.frequencies import to_offset\n')] |
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.5 or later.")
from setuptools import setup, find_packages
from couchdbkit import __version__
setup(
name = 'couchdbkit',
version = __version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = '<NAME>',
author_email = '<EMAIL>',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [
'restkit>=3.2',
],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='noses',
)
| [
"os.path.dirname",
"setuptools.find_packages"
]
| [((1191, 1223), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (1204, 1223), False, 'from setuptools import setup, find_packages\n'), ((540, 565), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (555, 565), False, 'import os\n')] |
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import Network
from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, \
PoolingLayer, EltwiseLayer
class TestNetwork(unittest.TestCase):
''' Tests for Network. '''
# pylint: disable=too-many-public-methods
def setUp(self):
''' Set up. '''
self.network = Network('test_net')
self.network.set_input_layer(InputLayer(3, 224))
self.network.add('c1', ConvLayer(3, 64, 224, 3))
self.network.add('p1', PoolingLayer(64, 7, 32))
self.network.add('f1', FCLayer(64, 1000, 7))
def test_set_input_layer(self):
''' Modifier set_input_layer. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
self.assertIsInstance(network.input_layer(), InputLayer)
self.assertEqual(network.input_layer().nofm, 3)
self.assertEqual(network.input_layer().hofm, 24)
self.assertEqual(network.input_layer().wofm, 24)
self.assertEqual(len(network), 0)
def test_set_input_layer_type(self):
''' Modifier set_input_layer type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(ConvLayer(3, 8, 24, 3))
def test_set_input_layer_duplicate(self):
''' Modifier set_input_layer duplicate. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*input.*'):
network.set_input_layer(InputLayer(3, 24))
def test_add(self):
''' Modifier add. '''
self.assertEqual(len(self.network), 3)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_add_same_key(self):
''' Modifier add same key. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*c1.*'):
network.add('c1', ConvLayer(64, 128, 224, 3))
def test_add_no_input(self):
''' Modifier add no input. '''
network = Network('test_net')
with self.assertRaisesRegex(RuntimeError, 'Network: .*input.*'):
network.add('c1', ConvLayer(3, 64, 224, 3))
def test_add_no_prev(self):
''' Modifier add no prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*prev.*p1.*'):
network.add('p1', PoolingLayer(64, 7, 32), prevs='p1')
def test_add_invalid_type(self):
''' Modifier add invalid type. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaisesRegex(TypeError, 'Network: .*Layer.*'):
network.add('c1', (3, 64, 224, 3))
def test_add_unmatch_prev(self):
''' Modifier add unmatch prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*p1.*mismatch fmap.*'):
network.add('p1', PoolingLayer(64, 7, 2))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*c2.*mismatch fmap.*'):
network.add('c2', ConvLayer(64, 128, 220, 3))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*p1.*'):
network.add('p1', PoolingLayer(32, 7, 32))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*c2.*'):
network.add('c2', ConvLayer(32, 128, 224, 3))
self.assertEqual(len(network), 1)
network.add('c2', ConvLayer(64, 128, 224, 3))
with self.assertRaisesRegex(ValueError,
r'Network: .*c1 | c2.*prev.*p1.*'):
network.add('p1', PoolingLayer(128, 7, 32), prevs=('c1', 'c2'))
self.assertEqual(len(network), 2)
def test_add_ext(self):
''' Modifier add_ext. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 24))
self.assertIsInstance(self.network['e0'], InputLayer)
self.assertEqual(self.network['e0'].nofm, 3)
self.assertEqual(self.network['e0'].hofm, 24)
self.assertEqual(self.network['e0'].wofm, 24)
self.network.add_ext('e1', InputLayer(5, (16, 20)))
self.assertIsInstance(self.network['e1'], InputLayer)
self.assertEqual(self.network['e1'].nofm, 5)
self.assertEqual(self.network['e1'].hofm, 16)
self.assertEqual(self.network['e1'].wofm, 20)
self.assertEqual(len(self.network), 3)
def test_add_ext_same_key(self):
''' Modifier add_ext same key. '''
network = Network('test_net')
network.add_ext('e0', InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*ext.*'):
network.add_ext('e0', InputLayer(3, 24))
def test_add_ext_invalid_type(self):
''' Modifier add_ext invalid type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', ConvLayer(3, 8, 24, 3))
def test_prevs(self):
''' Get prevs. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
prevs = self.network.prevs('f1')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f2')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f3')
self.assertTupleEqual(prevs, ('f1', 'f2'))
def test_prevs_first(self):
''' Get prevs first layer. '''
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
prevs = self.network.prevs('c1')
self.assertTupleEqual(prevs, (None,))
prevs = self.network.prevs('c2')
self.assertTupleEqual(prevs, (None,))
def test_prevs_input(self):
''' Get prevs input layer. '''
with self.assertRaisesRegex(ValueError, 'Network: .*input.*'):
_ = self.network.prevs(self.network.INPUT_LAYER_KEY)
def test_prevs_ext_next(self):
''' Get prevs next layer of an external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('n', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0'))
prevs = self.network.prevs('n')
self.assertTupleEqual(prevs, (None, 'e0'))
def test_prevs_ext(self):
''' Get prevs external layer. '''
self.network.add_ext('e0', InputLayer(3, 3))
with self.assertRaisesRegex(ValueError, 'Network: .*ext.*'):
_ = self.network.prevs('e0')
def test_nexts(self):
''' Get nexts. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
nexts = self.network.nexts('p1')
self.assertTupleEqual(nexts, ('f1', 'f2'))
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, ('f3', 'e4'))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, ('f3',))
nexts = self.network.nexts('f3')
self.assertTupleEqual(nexts, ('e4',))
def test_nexts_last(self):
''' Get nexts first layer. '''
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, (None,))
def test_nexts_input(self):
''' Get nexts input layer. '''
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1', 'c2', 'c3'))
def test_firsts(self):
''' Get firsts. '''
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1', 'c2'))
self.assertIn('c1', firsts)
self.assertNotIn('c3', firsts)
def test_firsts_ext(self):
''' Get firsts with external layers. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=('e0',))
self.network.add('c3', ConvLayer(67, 3, 224, 1), prevs=('e0', 'c1'))
self.network.add('c4', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0',))
firsts = self.network.firsts()
self.assertIn('c2', firsts)
self.assertNotIn('c3', firsts)
self.assertIn('c4', firsts)
def test_lasts(self):
''' Get lasts. '''
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1',))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1', 'f2'))
def test_ext_layers(self):
''' Get external layers. '''
self.assertTupleEqual(self.network.ext_layers(), tuple())
self.network.add_ext('e0', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0',))
self.network.add_ext('e1', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0', 'e1'))
def test_contains(self):
''' Whether contains. '''
self.assertIn('c1', self.network)
self.assertIn('p1', self.network)
self.assertIn('f1', self.network)
self.assertNotIn('f2', self.network)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertIn('f2', self.network)
def test_len(self):
''' Accessor len. '''
self.assertEqual(len(self.network), 3)
network = Network('test_net')
self.assertEqual(len(network), 0)
network.set_input_layer(InputLayer(3, 224))
self.assertEqual(len(network), 0)
network.add('c1', ConvLayer(3, 4, 224, 1))
self.assertEqual(len(network), 1)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertEqual(len(self.network), 4)
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.assertEqual(len(self.network), 5)
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.assertEqual(len(self.network), 6)
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_iter(self):
''' Accessor iter. '''
num = 0
for layer in self.network:
self.assertIn(layer, self.network)
self.assertIsInstance(self.network[layer], Layer)
num += 1
self.assertEqual(len(self.network), num)
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaises(StopIteration):
_ = next(iter(network))
def test_contains_ext(self):
''' Whether contains external layer. '''
self.assertNotIn('e0', self.network)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertIn('e0', self.network)
def test_len_ext(self):
''' Accessor len external layer. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertEqual(len(self.network), 3)
def test_iter_ext(self):
''' Accessor iter external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
for layer in self.network:
self.assertNotEqual(layer, 'e0')
def test_getitem(self):
''' Accessor getitem. '''
self.assertIsInstance(self.network['c1'], ConvLayer)
self.assertIsInstance(self.network['p1'], PoolingLayer)
self.assertIsInstance(self.network['f1'], FCLayer)
def test_getitem_error(self):
''' Accessor getitem. '''
with self.assertRaisesRegex(KeyError, 'Network: .*c2.*'):
_ = self.network['c2']
def test_str(self):
''' Accessor str. '''
string = str(self.network)
for layer in self.network:
self.assertIn(layer, string)
| [
"nn_dataflow.core.Network",
"nn_dataflow.core.Layer",
"nn_dataflow.core.ConvLayer",
"nn_dataflow.core.InputLayer",
"nn_dataflow.core.PoolingLayer",
"nn_dataflow.core.EltwiseLayer",
"nn_dataflow.core.FCLayer"
]
| [((987, 1006), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (994, 1006), False, 'from nn_dataflow.core import Network\n'), ((1327, 1346), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (1334, 1346), False, 'from nn_dataflow.core import Network\n'), ((1782, 1801), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (1789, 1801), False, 'from nn_dataflow.core import Network\n'), ((2181, 2200), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (2188, 2200), False, 'from nn_dataflow.core import Network\n'), ((2895, 2914), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (2902, 2914), False, 'from nn_dataflow.core import Network\n'), ((3235, 3254), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (3242, 3254), False, 'from nn_dataflow.core import Network\n'), ((3475, 3494), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (3482, 3494), False, 'from nn_dataflow.core import Network\n'), ((3838, 3857), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (3845, 3857), False, 'from nn_dataflow.core import Network\n'), ((4128, 4147), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (4135, 4147), False, 'from nn_dataflow.core import Network\n'), ((6162, 6181), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (6169, 6181), False, 'from nn_dataflow.core import Network\n'), ((6459, 6478), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (6466, 6478), False, 'from nn_dataflow.core import Network\n'), ((12274, 12293), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (12281, 12293), False, 'from nn_dataflow.core import Network\n'), ((13296, 13315), 'nn_dataflow.core.Network', 'Network', (['"""test_net"""'], {}), "('test_net')\n", (13303, 13315), False, 'from nn_dataflow.core import Network\n'), ((1044, 1062), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (1054, 1062), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((1095, 1119), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(64)', '(224)', '(3)'], {}), '(3, 64, 224, 3)\n', (1104, 1119), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((1152, 1175), 'nn_dataflow.core.PoolingLayer', 'PoolingLayer', (['(64)', '(7)', '(32)'], {}), '(64, 7, 32)\n', (1164, 1175), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((1208, 1228), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(1000)', '(7)'], {}), '(64, 1000, 7)\n', (1215, 1228), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((1379, 1396), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (1389, 1396), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2233, 2250), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (2243, 2250), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2510, 2530), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (2517, 2530), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2575, 2594), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(3000)', '(1000)'], {}), '(3000, 1000)\n', (2582, 2594), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2647, 2671), 'nn_dataflow.core.EltwiseLayer', 'EltwiseLayer', (['(1000)', '(1)', '(2)'], {}), '(1000, 1, 2)\n', (2659, 2671), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2724, 2743), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (2731, 2743), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2947, 2965), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (2957, 2965), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2994, 3018), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(64)', '(224)', '(3)'], {}), '(3, 64, 224, 3)\n', (3003, 3018), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3527, 3545), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (3537, 3545), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3574, 3598), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(64)', '(224)', '(3)'], {}), '(3, 64, 224, 3)\n', (3583, 3598), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3890, 3908), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (3900, 3908), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4180, 4198), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (4190, 4198), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4226, 4250), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(64)', '(224)', '(3)'], {}), '(3, 64, 224, 3)\n', (4235, 4250), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((5076, 5102), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(64)', '(128)', '(224)', '(3)'], {}), '(64, 128, 224, 3)\n', (5085, 5102), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((5489, 5506), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (5499, 5506), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((5767, 5790), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(5)', '(16, 20)'], {}), '(5, (16, 20))\n', (5777, 5790), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6213, 6230), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (6223, 6230), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6829, 6849), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (6836, 6849), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6894, 6913), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(3000)', '(1000)'], {}), '(3000, 1000)\n', (6901, 6913), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((7306, 7329), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(3)', '(224)', '(1)'], {}), '(3, 3, 224, 1)\n', (7315, 7329), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((7906, 7924), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (7916, 7924), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((7957, 7980), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(6)', '(3)', '(224)', '(1)'], {}), '(6, 3, 224, 1)\n', (7966, 7980), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((8251, 8267), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(3)'], {}), '(3, 3)\n', (8261, 8267), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((8464, 8484), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (8471, 8484), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((8529, 8548), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(3000)', '(1000)'], {}), '(3000, 1000)\n', (8536, 8548), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((8601, 8625), 'nn_dataflow.core.EltwiseLayer', 'EltwiseLayer', (['(1000)', '(1)', '(2)'], {}), '(1000, 1, 2)\n', (8613, 8625), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((8678, 8697), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (8685, 8697), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((9263, 9283), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (9270, 9283), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((9687, 9710), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(3)', '(224)', '(1)'], {}), '(3, 3, 224, 1)\n', (9696, 9710), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((9804, 9827), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(6)', '(4)', '(224)', '(1)'], {}), '(6, 4, 224, 1)\n', (9813, 9827), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10194, 10217), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(3)', '(224)', '(1)'], {}), '(3, 3, 224, 1)\n', (10203, 10217), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10311, 10334), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(6)', '(4)', '(224)', '(1)'], {}), '(6, 4, 224, 1)\n', (10320, 10334), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10688, 10706), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (10698, 10706), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10740, 10763), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(3)', '(224)', '(1)'], {}), '(3, 3, 224, 1)\n', (10749, 10763), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10811, 10835), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(67)', '(3)', '(224)', '(1)'], {}), '(67, 3, 224, 1)\n', (10820, 10835), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((10888, 10911), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(6)', '(3)', '(224)', '(1)'], {}), '(6, 3, 224, 1)\n', (10897, 10911), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((11303, 11323), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (11310, 11323), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((11597, 11615), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (11607, 11615), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((11719, 11737), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (11729, 11737), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12077, 12097), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (12084, 12097), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12368, 12386), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (12378, 12386), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12456, 12479), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(4)', '(224)', '(1)'], {}), '(3, 4, 224, 1)\n', (12465, 12479), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12555, 12575), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(64)', '(2000)', '(7)'], {}), '(64, 2000, 7)\n', (12562, 12575), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12667, 12686), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(3000)', '(1000)'], {}), '(3000, 1000)\n', (12674, 12686), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12786, 12810), 'nn_dataflow.core.EltwiseLayer', 'EltwiseLayer', (['(1000)', '(1)', '(2)'], {}), '(1000, 1, 2)\n', (12798, 12810), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((12910, 12929), 'nn_dataflow.core.FCLayer', 'FCLayer', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (12917, 12929), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((13348, 13366), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (13358, 13366), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((13614, 13632), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (13624, 13632), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((13832, 13850), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (13842, 13850), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((14010, 14028), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(224)'], {}), '(3, 224)\n', (14020, 14028), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((1914, 1926), 'nn_dataflow.core.Layer', 'Layer', (['(3)', '(24)'], {}), '(3, 24)\n', (1919, 1926), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2040, 2062), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(8)', '(24)', '(3)'], {}), '(3, 8, 24, 3)\n', (2049, 2062), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((2357, 2374), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (2367, 2374), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3116, 3142), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(64)', '(128)', '(224)', '(3)'], {}), '(64, 128, 224, 3)\n', (3125, 3142), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3359, 3383), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(64)', '(224)', '(3)'], {}), '(3, 64, 224, 3)\n', (3368, 3383), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((3702, 3725), 'nn_dataflow.core.PoolingLayer', 'PoolingLayer', (['(64)', '(7)', '(32)'], {}), '(64, 7, 32)\n', (3714, 3725), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4406, 4428), 'nn_dataflow.core.PoolingLayer', 'PoolingLayer', (['(64)', '(7)', '(2)'], {}), '(64, 7, 2)\n', (4418, 4428), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4625, 4651), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(64)', '(128)', '(220)', '(3)'], {}), '(64, 128, 220, 3)\n', (4634, 4651), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4804, 4827), 'nn_dataflow.core.PoolingLayer', 'PoolingLayer', (['(32)', '(7)', '(32)'], {}), '(32, 7, 32)\n', (4816, 4827), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((4979, 5005), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(32)', '(128)', '(224)', '(3)'], {}), '(32, 128, 224, 3)\n', (4988, 5005), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((5255, 5279), 'nn_dataflow.core.PoolingLayer', 'PoolingLayer', (['(128)', '(7)', '(32)'], {}), '(128, 7, 32)\n', (5267, 5279), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6333, 6350), 'nn_dataflow.core.InputLayer', 'InputLayer', (['(3)', '(24)'], {}), '(3, 24)\n', (6343, 6350), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6593, 6605), 'nn_dataflow.core.Layer', 'Layer', (['(3)', '(24)'], {}), '(3, 24)\n', (6598, 6605), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n'), ((6720, 6742), 'nn_dataflow.core.ConvLayer', 'ConvLayer', (['(3)', '(8)', '(24)', '(3)'], {}), '(3, 8, 24, 3)\n', (6729, 6742), False, 'from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, PoolingLayer, EltwiseLayer\n')] |
import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content = pd.read_table(data_content_path, header=None, dtype={0:np.str})
raw_edge = pd.read_table(data_edge_path, header=None, dtype=np.str)
paper_ids = raw_content[0]
paper_id_map = {}
for i, pp_id in enumerate(paper_ids):
paper_id_map[pp_id] = i
edge_index = torch.from_numpy(raw_edge.apply(lambda col: col.map(paper_id_map)).dropna().values).long().t().contiguous()
x = torch.from_numpy(raw_content.values[:, 1:-1].astype(np.float)).float()
labels = np.unique(raw_content[raw_content.keys()[-1]]).tolist()
y = torch.from_numpy(raw_content[raw_content.keys()[-1]].map(lambda x: labels.index(x)).values).long()
def get_mask(y:torch.tensor):
train_mask = torch.tensor([False] * y.shape[0])
for i in torch.unique(y).unbind():
temp = torch.arange(0, y.shape[0])[y == i].tolist()
random.shuffle(temp)
train_mask[temp[:30]] = True
train_mask = torch.tensor(train_mask)
test_mask = train_mask == False
return train_mask, test_mask
train_mask, test_mask = get_mask(y)
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=test_mask)
def drop_edge(edge_index, keep_ratio:float=1.):
num_keep = int(keep_ratio * edge_index.shape[1])
temp = [True] * num_keep + [False] * (edge_index.shape[1] - num_keep)
random.shuffle(temp)
return edge_index[:, temp]
class GCNNodeClassifier(torch.nn.Module):
def __init__(self,
dim_features,
num_classes,
num_layers,
add_self_loops:bool=True,
use_pairnorm:bool=False,
drop_edge:float=1.,
activation:str='relu',
undirected:bool=False
):
super(GCNNodeClassifier, self).__init__()
dim_hidden = 32
self.gconvs = torch.nn.ModuleList(
[GCNConv(in_channels=dim_features, out_channels=dim_hidden, add_self_loops=add_self_loops)]
+ [GCNConv(in_channels=dim_hidden, out_channels=dim_hidden, add_self_loops=add_self_loops) for i in range(num_layers - 2)]
)
self.final_conv = GCNConv(in_channels=dim_hidden, out_channels=num_classes, add_self_loops=add_self_loops)
self.use_pairnorm = use_pairnorm
if self.use_pairnorm:
self.pairnorm = PairNorm()
self.drop_edge = drop_edge
activations_map = {'relu':torch.relu, 'tanh':torch.tanh, 'sigmoid':torch.sigmoid, 'leaky_relu':torch.nn.LeakyReLU(0.1)}
self.activation_fn = activations_map[activation]
def forward(self, x, edge_index):
for l in self.gconvs:
edges = drop_edge(edge_index, self.drop_edge)
x = l(x, edges)
if self.use_pairnorm:
x = self.pairnorm(x)
x = self.activation_fn(x)
x = self.final_conv(x, edge_index)
return x
def eval_acc(y_pred, y):
return ((torch.argmax(y_pred, dim=-1) == y).float().sum() / y.shape[0]).item()
num_epochs = 100
test_cases = [
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# num layers
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# self loop
{'num_layers':2, 'add_self_loops':False, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# pair norm
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# drop edge
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
# activation fn
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'tanh', 'undirected':False},
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'leaky_relu', 'undirected':False},
# undirected
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.8, 'activation':'relu', 'undirected':True},
]
for i_case, kwargs in enumerate(test_cases):
print(f'Test Case {i_case:>2}')
model = GCNNodeClassifier(x.shape[1], len(labels), **kwargs)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
history_test_acc = []
input_edge_index = to_undirected(edge_index) if kwargs['undirected'] else edge_index
for i_epoch in range(0, num_epochs):
print(f'Epoch {i_epoch:>3} ', end='')
y_pred = model(x, input_edge_index)
train_acc = eval_acc(y_pred[train_mask], y[train_mask])
# Train
loss = F.cross_entropy(y_pred[train_mask], y[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Test
test_acc = eval_acc(y_pred[test_mask], y[test_mask])
history_test_acc.append(test_acc)
print(f'Train Acc = {train_acc}. Test Acc = {test_acc}')
kwargs['best_acc'] = max(history_test_acc)
plt.plot(list(range(num_epochs)), history_test_acc, label=f'case_{str(i_case).zfill(2)}')
plt.legend()
plt.savefig(f'{data_name}-HistoryAcc.jpg')
pd.DataFrame(test_cases).to_csv(f'{data_name}-Result.csv')
| [
"torch.unique",
"matplotlib.pyplot.savefig",
"random.shuffle",
"pandas.DataFrame",
"torch.nn.LeakyReLU",
"torch_geometric.nn.GCNConv",
"torch_geometric.nn.PairNorm",
"torch.tensor",
"torch.arange",
"pandas.read_table",
"torch_geometric.utils.undirected.to_undirected",
"torch.nn.functional.cross_entropy",
"torch_geometric.data.Data",
"matplotlib.pyplot.legend",
"torch.argmax"
]
| [((466, 532), 'pandas.read_table', 'pd.read_table', (['data_content_path'], {'header': 'None', 'dtype': '{(0): np.str}'}), '(data_content_path, header=None, dtype={(0): np.str})\n', (479, 532), True, 'import pandas as pd\n'), ((541, 597), 'pandas.read_table', 'pd.read_table', (['data_edge_path'], {'header': 'None', 'dtype': 'np.str'}), '(data_edge_path, header=None, dtype=np.str)\n', (554, 597), True, 'import pandas as pd\n'), ((1484, 1570), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'y': 'y', 'train_mask': 'train_mask', 'test_mask': 'test_mask'}), '(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=\n test_mask)\n', (1488, 1570), False, 'from torch_geometric.data import Data\n'), ((6257, 6269), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6267, 6269), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6312), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{data_name}-HistoryAcc.jpg"""'], {}), "(f'{data_name}-HistoryAcc.jpg')\n", (6281, 6312), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1158), 'torch.tensor', 'torch.tensor', (['([False] * y.shape[0])'], {}), '([False] * y.shape[0])\n', (1136, 1158), False, 'import torch\n'), ((1346, 1370), 'torch.tensor', 'torch.tensor', (['train_mask'], {}), '(train_mask)\n', (1358, 1370), False, 'import torch\n'), ((1746, 1766), 'random.shuffle', 'random.shuffle', (['temp'], {}), '(temp)\n', (1760, 1766), False, 'import random\n'), ((1266, 1286), 'random.shuffle', 'random.shuffle', (['temp'], {}), '(temp)\n', (1280, 1286), False, 'import random\n'), ((2492, 2585), 'torch_geometric.nn.GCNConv', 'GCNConv', ([], {'in_channels': 'dim_hidden', 'out_channels': 'num_classes', 'add_self_loops': 'add_self_loops'}), '(in_channels=dim_hidden, out_channels=num_classes, add_self_loops=\n add_self_loops)\n', (2499, 2585), False, 'from torch_geometric.nn import GCNConv, PairNorm\n'), ((5503, 5528), 'torch_geometric.utils.undirected.to_undirected', 'to_undirected', (['edge_index'], {}), '(edge_index)\n', (5516, 5528), False, 'from torch_geometric.utils.undirected import to_undirected\n'), ((5801, 5851), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['y_pred[train_mask]', 'y[train_mask]'], {}), '(y_pred[train_mask], y[train_mask])\n', (5816, 5851), True, 'import torch.nn.functional as F\n'), ((6313, 6337), 'pandas.DataFrame', 'pd.DataFrame', (['test_cases'], {}), '(test_cases)\n', (6325, 6337), True, 'import pandas as pd\n'), ((1172, 1187), 'torch.unique', 'torch.unique', (['y'], {}), '(y)\n', (1184, 1187), False, 'import torch\n'), ((2681, 2691), 'torch_geometric.nn.PairNorm', 'PairNorm', ([], {}), '()\n', (2689, 2691), False, 'from torch_geometric.nn import GCNConv, PairNorm\n'), ((2830, 2853), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2848, 2853), False, 'import torch\n'), ((1213, 1240), 'torch.arange', 'torch.arange', (['(0)', 'y.shape[0]'], {}), '(0, y.shape[0])\n', (1225, 1240), False, 'import torch\n'), ((2229, 2323), 'torch_geometric.nn.GCNConv', 'GCNConv', ([], {'in_channels': 'dim_features', 'out_channels': 'dim_hidden', 'add_self_loops': 'add_self_loops'}), '(in_channels=dim_features, out_channels=dim_hidden, add_self_loops=\n add_self_loops)\n', (2236, 2323), False, 'from torch_geometric.nn import GCNConv, PairNorm\n'), ((2336, 2428), 'torch_geometric.nn.GCNConv', 'GCNConv', ([], {'in_channels': 'dim_hidden', 'out_channels': 'dim_hidden', 'add_self_loops': 'add_self_loops'}), '(in_channels=dim_hidden, out_channels=dim_hidden, add_self_loops=\n add_self_loops)\n', (2343, 2428), False, 'from torch_geometric.nn import GCNConv, PairNorm\n'), ((3284, 3312), 'torch.argmax', 'torch.argmax', (['y_pred'], {'dim': '(-1)'}), '(y_pred, dim=-1)\n', (3296, 3312), False, 'import torch\n')] |
import datetime
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from django.views import generic
from .models import Event, FlatPage, News
class HomeView(generic.ListView):
"""
View for the first page called 'Home'.
"""
context_object_name = 'event_list'
model = Event
template_name = 'home.html'
def get_queryset(self):
"""
Returns a queryset of all future events that should appear on home.
Uses settings.EVENT_DELAY_IN_MINUTES to determine the range.
"""
time_to_hide = timezone.now() - datetime.timedelta(
minutes=settings.EVENT_DELAY_IN_MINUTES)
queryset = super().get_queryset().filter(begin__gte=time_to_hide)
result = []
for event in queryset:
time_to_show = timezone.now() + datetime.timedelta(
days=event.on_home_before_begin)
if event.on_home_before_begin > 0 and event.begin <= time_to_show:
result.append(event)
return result
def get_context_data(self, **context):
"""
Adds all news to the context.
"""
news_list = News.objects.all()
return super().get_context_data(news_list=news_list, **context)
class CalendarView(generic.ListView):
"""
View for a calendar with all events.
"""
model = Event
template_name = 'calendar.html'
def get_context_data(self, **context):
"""
Returns the template context. Adds event data as JSON for use in
Javascript calendar.
"""
context = super().get_context_data(**context)
event_list = []
for event in context['event_list']:
event_dict = {
'title': event.title,
'start': event.begin.isoformat(),
'description': event.content,
'className': event.css_class_name}
if event.duration:
event_dict['end'] = event.end.isoformat()
event_list.append(event_dict)
context['event_list_json'] = json.dumps(event_list)
return context
class FlatPageView(generic.DetailView):
"""
View for static pages.
"""
model = FlatPage
def get_object(self, queryset=None):
"""
Returns the flatpage instance. Raises Http404 if inexistent.
"""
queryset = queryset or self.get_queryset()
url = self.kwargs.get('url')
for flatpage in queryset.filter(slug=url.split('/')[-1]):
if flatpage.get_absolute_url().strip('/') == url:
obj = flatpage
break
else:
raise Http404
return obj
def get_template_names(self):
"""
Returns the template names for the view as list. The name
'flatpage_default.html' is always appended.
"""
template_names = []
if self.object.template_name:
template_names.append(self.object.template_name)
template_names.append('flatpage_default.html')
return template_names
def get_context_data(self, **context):
"""
Returns the template context. Adds breadcrumb to it if neccessary.
"""
context = super().get_context_data(**context)
parent = context['flatpage'].parent
if parent is None:
breadcrumb_list = []
else:
breadcrumb_list = [context['flatpage']]
while parent is not None:
breadcrumb_list.append(parent)
parent = parent.parent
breadcrumb_list.reverse()
context['breadcrumb_list'] = breadcrumb_list
return context
| [
"django.utils.timezone.now",
"datetime.timedelta",
"json.dumps"
]
| [((2115, 2137), 'json.dumps', 'json.dumps', (['event_list'], {}), '(event_list)\n', (2125, 2137), False, 'import json\n'), ((609, 623), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (621, 623), False, 'from django.utils import timezone\n'), ((626, 685), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'settings.EVENT_DELAY_IN_MINUTES'}), '(minutes=settings.EVENT_DELAY_IN_MINUTES)\n', (644, 685), False, 'import datetime\n'), ((851, 865), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (863, 865), False, 'from django.utils import timezone\n'), ((868, 919), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'event.on_home_before_begin'}), '(days=event.on_home_before_begin)\n', (886, 919), False, 'import datetime\n')] |
import random
import matplotlib.pyplot as plt
import wandb
import hydra
import torch
import torch.utils.data as data_utils
from model import ChessPiecePredictor
from torch import nn, optim
from google.cloud import storage
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
@hydra.main(config_path="../conf", config_name="config")
def train(cfg):
print(f"Training started with parameters: {cfg}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wandb.init()
torch.manual_seed(cfg.seed)
model = ChessPiecePredictor(
image_size=cfg.image_size,
patch_size=cfg.patch_size,
in_channels=cfg.in_channels,
embed_dim=cfg.embed_dim,
num_heads=cfg.num_heads,
)
wandb.watch(model)
t = transforms.Compose(
[
transforms.Resize((cfg.image_size, cfg.image_size)),
transforms.Grayscale(num_output_channels=cfg.in_channels),
transforms.ToTensor(),
]
)
train_data = ImageFolder(f"{cfg.data_path}/train", transform=t)
validation_data = ImageFolder(f"{cfg.data_path}/test", transform=t)
indices_train = random.sample(range(1, 60000), 5000)
indices_valid = random.sample(range(1, 30000), 1000)
train_data = data_utils.Subset(train_data, indices_train)
validation_data = data_utils.Subset(validation_data, indices_valid)
train_loader = DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True)
validation_loader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
print("Training started...")
train_losses = []
validation_losses = []
batch_count = len(train_loader)
epochs = 2
for e in range(epochs):
train_loss = 0
train_correct = 0
validation_loss = 0
validation_correct = 0
i = 0
for images, labels in train_loader:
# in case we use cuda to train on gpu
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
preds = model(images)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
train_correct += (preds_indices == labels).sum()
i += 1
if i % 100 == 0:
print(
f"Epoch: {e+1} / {epochs}"
f" - progress: {i} / {batch_count}"
f" - loss: {loss.data.mean()}"
)
for images, labels in validation_loader:
images = images.to(device)
labels = labels.to(device)
preds = model(images)
loss = criterion(preds, labels)
validation_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
validation_correct += (preds_indices == labels).sum()
train_accuracy = float(train_correct / (len(train_loader) * cfg.batch_size))
validation_accuracy = float(validation_correct / (len(validation_loader) * cfg.batch_size))
wandb.log({
"train_loss": train_loss,
"validation_loss": validation_loss,
"train_accuracy": train_accuracy,
"validation_accuracy": validation_accuracy,
})
train_losses.append(train_loss / len(train_loader))
validation_losses.append(validation_loss / len(validation_loader))
# plotting
plt.plot(list(range(1, len(train_losses) + 1)), train_losses, label="Training loss")
print("Train losses:", train_losses)
plt.plot(list(range(1, len(validation_losses) + 1)), validation_losses, label="Validation loss")
print("Validation losses:", validation_losses)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
fig_path = "training_run.png"
plt.savefig(fig_path)
print(f"Saved training loss figure to {fig_path}")
model_path = "trained_model.pth"
torch.save(model.state_dict(), model_path)
print(f"Saved trained model to {model_path}")
storage_client = storage.Client()
bucket = storage_client.bucket("chess_predictor")
blob = bucket.blob("model_blob")
blob.upload_from_filename("outputs/model_0.pt")
if __name__ == "__main__":
train()
| [
"wandb.log",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"torch.max",
"torchvision.transforms.Grayscale",
"wandb.init",
"torch.cuda.is_available",
"hydra.main",
"matplotlib.pyplot.xlabel",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.savefig",
"torchvision.transforms.Resize",
"matplotlib.pyplot.legend",
"torch.manual_seed",
"google.cloud.storage.Client",
"torch.utils.data.Subset",
"wandb.watch",
"torch.utils.data.DataLoader",
"model.ChessPiecePredictor"
]
| [((346, 401), 'hydra.main', 'hydra.main', ([], {'config_path': '"""../conf"""', 'config_name': '"""config"""'}), "(config_path='../conf', config_name='config')\n", (356, 401), False, 'import hydra\n'), ((552, 564), 'wandb.init', 'wandb.init', ([], {}), '()\n', (562, 564), False, 'import wandb\n'), ((570, 597), 'torch.manual_seed', 'torch.manual_seed', (['cfg.seed'], {}), '(cfg.seed)\n', (587, 597), False, 'import torch\n'), ((611, 772), 'model.ChessPiecePredictor', 'ChessPiecePredictor', ([], {'image_size': 'cfg.image_size', 'patch_size': 'cfg.patch_size', 'in_channels': 'cfg.in_channels', 'embed_dim': 'cfg.embed_dim', 'num_heads': 'cfg.num_heads'}), '(image_size=cfg.image_size, patch_size=cfg.patch_size,\n in_channels=cfg.in_channels, embed_dim=cfg.embed_dim, num_heads=cfg.\n num_heads)\n', (630, 772), False, 'from model import ChessPiecePredictor\n'), ((815, 833), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (826, 833), False, 'import wandb\n'), ((1078, 1128), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['f"""{cfg.data_path}/train"""'], {'transform': 't'}), "(f'{cfg.data_path}/train', transform=t)\n", (1089, 1128), False, 'from torchvision.datasets import ImageFolder\n'), ((1151, 1200), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['f"""{cfg.data_path}/test"""'], {'transform': 't'}), "(f'{cfg.data_path}/test', transform=t)\n", (1162, 1200), False, 'from torchvision.datasets import ImageFolder\n'), ((1335, 1379), 'torch.utils.data.Subset', 'data_utils.Subset', (['train_data', 'indices_train'], {}), '(train_data, indices_train)\n', (1352, 1379), True, 'import torch.utils.data as data_utils\n'), ((1402, 1451), 'torch.utils.data.Subset', 'data_utils.Subset', (['validation_data', 'indices_valid'], {}), '(validation_data, indices_valid)\n', (1419, 1451), True, 'import torch.utils.data as data_utils\n'), ((1472, 1535), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'cfg.batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=cfg.batch_size, shuffle=True)\n', (1482, 1535), False, 'from torch.utils.data import DataLoader\n'), ((1560, 1628), 'torch.utils.data.DataLoader', 'DataLoader', (['validation_data'], {'batch_size': 'cfg.batch_size', 'shuffle': '(True)'}), '(validation_data, batch_size=cfg.batch_size, shuffle=True)\n', (1570, 1628), False, 'from torch.utils.data import DataLoader\n'), ((1646, 1667), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1665, 1667), False, 'from torch import nn, optim\n'), ((4020, 4039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4030, 4039), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4062), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (4054, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4079), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4077, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {}), '(fig_path)\n', (4130, 4140), True, 'import matplotlib.pyplot as plt\n'), ((4353, 4369), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (4367, 4369), False, 'from google.cloud import storage\n'), ((3369, 3528), 'wandb.log', 'wandb.log', (["{'train_loss': train_loss, 'validation_loss': validation_loss,\n 'train_accuracy': train_accuracy, 'validation_accuracy':\n validation_accuracy}"], {}), "({'train_loss': train_loss, 'validation_loss': validation_loss,\n 'train_accuracy': train_accuracy, 'validation_accuracy':\n validation_accuracy})\n", (3378, 3528), False, 'import wandb\n'), ((509, 534), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (532, 534), False, 'import torch\n'), ((885, 936), 'torchvision.transforms.Resize', 'transforms.Resize', (['(cfg.image_size, cfg.image_size)'], {}), '((cfg.image_size, cfg.image_size))\n', (902, 936), False, 'from torchvision import transforms\n'), ((950, 1007), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': 'cfg.in_channels'}), '(num_output_channels=cfg.in_channels)\n', (970, 1007), False, 'from torchvision import transforms\n'), ((1021, 1042), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1040, 1042), False, 'from torchvision import transforms\n'), ((2449, 2472), 'torch.max', 'torch.max', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (2458, 2472), False, 'import torch\n'), ((3084, 3107), 'torch.max', 'torch.max', (['preds'], {'dim': '(1)'}), '(preds, dim=1)\n', (3093, 3107), False, 'import torch\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
if isinstance(choice, DictConfig):
choice = choice._name
if choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| [
"os.path.dirname",
"fairseq.registry.setup_registry",
"importlib.import_module"
]
| [((714, 766), 'fairseq.registry.setup_registry', 'registry.setup_registry', (['"""--scoring"""'], {'default': '"""bleu"""'}), "('--scoring', default='bleu')\n", (737, 766), False, 'from fairseq import registry\n'), ((1197, 1222), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1212, 1222), False, 'import os\n'), ((1333, 1385), 'importlib.import_module', 'importlib.import_module', (["('fairseq.scoring.' + module)"], {}), "('fairseq.scoring.' + module)\n", (1356, 1385), False, 'import importlib\n')] |
import copy
import unittest
import networkx as nx
import numpy as np
from scipy.special import erf
from dfn import Fluid, FractureNetworkThermal
class TestFractureNetworkThermal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFractureNetworkThermal, self).__init__(*args, **kwargs)
# fluid properties
cp_w = 4300.0
rho_w = 1000.0
mu_w = 1E-3
self.fluid = Fluid(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)
# reservoir properties
k_r = 2.9
cp_r = 1050.0
rho_r = 2700.0
alpha_r = k_r / (rho_r * cp_r)
# first network
conn_1 = [(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]
L_1 = [100, 500, 500, 500, 500, 100]
H_1 = [500, 500, 500, 500, 500, 500]
w_1 = [1E-3, 1E-3, 1E-3, 1E-3, 1E-3, 1E-3]
self.network_1 = FractureNetworkThermal(conn_1, L_1, H_1, w_1, k_r,
alpha_r)
# second network
conn_2 = [(0, 1), (1, 2), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5),
(5, 6), (4, 7), (5, 8), (6, 9), (7, 8), (8, 9), (9, 10)]
L_2 = 250 * np.ones(len(conn_2))
L_2[0] = 100
L_2[-1] = 100
H_2 = 500 * np.ones(len(conn_2))
w_2 = 1E-3 * np.ones(len(conn_2))
self.network_2 = FractureNetworkThermal(conn_2, L_2, H_2, w_2, k_r,
alpha_r)
def copy_networks(self):
"""Return a copy of the fracture networks."""
return copy.copy(self.network_1), copy.copy(self.network_2)
def networks_with_flow(self):
"""Return networks with the mass flow calculated."""
network_1, network_2 = self.copy_networks()
P_0 = 0.0
m_inj = 50.0
network_1.calculate_flow(self.fluid, {0: P_0}, {5: -m_inj})
network_2.calculate_flow(self.fluid, {0: P_0}, {10: -m_inj})
return network_1, network_2
def reverse_nodes(self, network, segments):
"""Reverse the node order for given segments."""
conn = network.connectivity
for seg in segments:
inlet, outlet = conn[seg]
conn[seg, :] = outlet, inlet
network.connectivity = conn
return network
def test_no_mass_flow(self):
"""Test if TypeError is raised for networks without flow calculated."""
with self.assertRaises(TypeError):
self.network_1._check_if_calculated()
with self.assertRaises(TypeError):
self.network_2._check_if_calculated()
def test_neg_mass_flow(self):
"""Test if valueError is raised for networks with negative flow."""
network_1, network_2 = self.networks_with_flow()
network_1 = self.reverse_nodes(network_1, [1])
network_2 = self.reverse_nodes(network_2, [1])
network_1.calculate_flow(self.fluid, {0: 0}, {5: -1.0})
network_2.calculate_flow(self.fluid, {0: 0}, {10: -1.0})
with self.assertRaises(ValueError):
network_1.calculate_temperature(self.fluid, 0, [0], [1])
with self.assertRaises(ValueError):
network_2.calculate_temperature(self.fluid, 0, [0], [1])
def test_construct_graph(self):
"""Test _construct_graph method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
# construct graph for network 1
G_1 = nx.MultiDiGraph()
edge_data_1 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(1, 3, {'index': 2}), (2, 4, {'index': 3}),
(3, 4, {'index': 4}), (4, 5, {'index': 5})]
G_1.add_edges_from(edge_data_1)
# construct graph for network 2
G_2 = nx.MultiDiGraph()
edge_data_2 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(2, 3, {'index': 2}), (1, 4, {'index': 3}),
(2, 5, {'index': 4}), (3, 6, {'index': 5}),
(4, 5, {'index': 6}), (5, 6, {'index': 7}),
(4, 7, {'index': 8}), (5, 8, {'index': 9}),
(6, 9, {'index': 10}), (7, 8, {'index': 11}),
(8, 9, {'index': 12}), (9, 10, {'index': 13})]
G_2.add_edges_from(edge_data_2)
# return True if graphs are the same
is_isomorphic_1 = nx.is_isomorphic(network_1.graph, G_1)
is_isomorphic_2 = nx.is_isomorphic(network_2.graph, G_2)
self.assertTrue(is_isomorphic_1)
self.assertTrue(is_isomorphic_2)
def test_find_injection_nodes(self):
"""Test _find_injection_nodes method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
self.assertEqual(network_1._find_injection_nodes(), [0])
self.assertEqual(network_2._find_injection_nodes(), [0])
def test_mass_contribution(self):
"""Test _mass_contribution method."""
network_1, network_2 = self.networks_with_flow()
chi_1 = network_1._mass_contribution()
chi_2 = network_2._mass_contribution()
# first network
for i in (0, 1, 2, 5):
self.assertAlmostEqual(chi_1[i], 1.0, 12)
self.assertAlmostEqual(chi_1[3] + chi_1[4], 1.0, 12)
# second network
for i in (0, 1, 2, 3, 8, 13):
self.assertAlmostEqual(chi_2[i], 1.0, 12)
for i, j in [(4, 6), (5, 7), (9, 11), (10, 12)]:
self.assertAlmostEqual(chi_2[i] + chi_2[j], 1.0, 12)
def test_find_paths(self):
"""Test find_paths method."""
# .find_paths method calls .construct_graph if needed. Manually call
# .construct_graph() on one network for testing both True and False
# conditions
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
path_1 = {(0, 1, 3), (0, 2, 4)}
path_2 = {(0, 1, 2, 5, 10), (0, 1, 4, 7, 10), (0, 3, 6, 7, 10),
(0, 3, 6, 9, 12), (0, 3, 8, 11, 12), (0, 1, 4, 9, 12)}
self.assertEqual(path_1, set(network_1.find_paths(0, 4)))
self.assertEqual(path_2, set(network_2.find_paths(0, 9)))
def test_calculate_temperature_inlet_segment(self):
"""Test calculate_temperature ability to handle the inlet segment."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
m_1 = network_1.mass_flow[0]
m_2 = network_2.mass_flow[0]
beta_1 = 2 * network_1.thermal_cond * network_1.thickness[0] / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness[0] / \
(m_2 * network_2.fluid.c_f)
xi_1 = beta_1 * z / (2 * np.sqrt(network_1.thermal_diff * t))
xi_2 = beta_2 * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_1 = erf(xi_1)
Theta_2 = erf(xi_2)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 0,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 0,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
def test_calculate_temperature(self):
"""Test calculate_temperature by constructing manual the equations."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
chi_1 = np.array([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])
chi_2 = np.ones(network_2.n_segments)
chi_2[4:8] = 0.5
chi_2[9:13] = 0.5
m_1 = network_1.mass_flow
m_2 = network_2.mass_flow
beta_1 = 2 * network_1.thermal_cond * network_1.thickness / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness / \
(m_2 * network_2.fluid.c_f)
xi_1 = np.einsum('i,jk->ijk', beta_1 * network_1.length,
1 / (2 * np.sqrt(network_1.thermal_diff * t)))
xi_2 = np.einsum('i,jk->ijk', beta_2 * network_2.length,
1 / (2 * np.sqrt(network_2.thermal_diff * t)))
a = xi_1[[0, 2, 4], :, :].sum(axis=0)
b = xi_1[[0, 1, 3], :, :].sum(axis=0)
xi_seg = beta_1[-1] * z / (2 * np.sqrt(network_1.thermal_diff * t))
Theta_1 = chi_1[0] * chi_1[2] * chi_1[4] * erf(a + xi_seg) + \
chi_1[0] * chi_1[1] * chi_1[3] * erf(b + xi_seg)
a = xi_2[[0, 1, 2, 5, 10], :, :].sum(axis=0)
b = xi_2[[0, 1, 4, 7, 10], :, :].sum(axis=0)
c = xi_2[[0, 3, 6, 7, 10], :, :].sum(axis=0)
d = xi_2[[0, 3, 6, 9, 12], :, :].sum(axis=0)
e = xi_2[[0, 3, 8, 11, 12], :, :].sum(axis=0)
f = xi_2[[0, 1, 4, 9, 12], :, :].sum(axis=0)
C_1 = chi_2[0] * chi_2[1] * chi_2[2] * chi_2[5] * chi_2[10]
C_2 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[7] * chi_2[10]
C_3 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[7] * chi_2[10]
C_4 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[9] * chi_2[12]
C_5 = chi_2[0] * chi_2[3] * chi_2[8] * chi_2[11] * chi_2[12]
C_6 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[9] * chi_2[12]
xi_seg = beta_2[-1] * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_2 = C_1 * erf(a + xi_seg) + C_2 * erf(b + xi_seg) + \
C_3 * erf(c + xi_seg) + C_4 * erf(d + xi_seg) + \
C_5 * erf(e + xi_seg) + C_6 * erf(f + xi_seg)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 5,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 13,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
if __name__ == '__main__':
unittest.main()
| [
"networkx.MultiDiGraph",
"networkx.is_isomorphic",
"numpy.sqrt",
"numpy.ones",
"dfn.FractureNetworkThermal",
"numpy.array",
"numpy.linspace",
"scipy.special.erf",
"dfn.Fluid",
"unittest.main",
"numpy.meshgrid",
"copy.copy"
]
| [((10801, 10816), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10814, 10816), False, 'import unittest\n'), ((432, 488), 'dfn.Fluid', 'Fluid', ([], {'density': 'rho_w', 'viscosity': 'mu_w', 'heat_capacity': 'cp_w'}), '(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)\n', (437, 488), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((880, 939), 'dfn.FractureNetworkThermal', 'FractureNetworkThermal', (['conn_1', 'L_1', 'H_1', 'w_1', 'k_r', 'alpha_r'], {}), '(conn_1, L_1, H_1, w_1, k_r, alpha_r)\n', (902, 939), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((1355, 1414), 'dfn.FractureNetworkThermal', 'FractureNetworkThermal', (['conn_2', 'L_2', 'H_2', 'w_2', 'k_r', 'alpha_r'], {}), '(conn_2, L_2, H_2, w_2, k_r, alpha_r)\n', (1377, 1414), False, 'from dfn import Fluid, FractureNetworkThermal\n'), ((3500, 3517), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (3515, 3517), True, 'import networkx as nx\n'), ((3814, 3831), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (3829, 3831), True, 'import networkx as nx\n'), ((4418, 4456), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['network_1.graph', 'G_1'], {}), '(network_1.graph, G_1)\n', (4434, 4456), True, 'import networkx as nx\n'), ((4483, 4521), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['network_2.graph', 'G_2'], {}), '(network_2.graph, G_2)\n', (4499, 4521), True, 'import networkx as nx\n'), ((6565, 6593), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(100)'], {}), '(0.0, 100.0, 100)\n', (6576, 6593), True, 'import numpy as np\n'), ((6609, 6636), 'numpy.meshgrid', 'np.meshgrid', (['distance', 'time'], {}), '(distance, time)\n', (6620, 6636), True, 'import numpy as np\n'), ((7209, 7218), 'scipy.special.erf', 'erf', (['xi_1'], {}), '(xi_1)\n', (7212, 7218), False, 'from scipy.special import erf\n'), ((7237, 7246), 'scipy.special.erf', 'erf', (['xi_2'], {}), '(xi_2)\n', (7240, 7246), False, 'from scipy.special import erf\n'), ((8050, 8078), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(100)'], {}), '(0.0, 100.0, 100)\n', (8061, 8078), True, 'import numpy as np\n'), ((8094, 8121), 'numpy.meshgrid', 'np.meshgrid', (['distance', 'time'], {}), '(distance, time)\n', (8105, 8121), True, 'import numpy as np\n'), ((8250, 8290), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.5, 0.5, 1.0]'], {}), '([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])\n', (8258, 8290), True, 'import numpy as np\n'), ((8307, 8336), 'numpy.ones', 'np.ones', (['network_2.n_segments'], {}), '(network_2.n_segments)\n', (8314, 8336), True, 'import numpy as np\n'), ((1562, 1587), 'copy.copy', 'copy.copy', (['self.network_1'], {}), '(self.network_1)\n', (1571, 1587), False, 'import copy\n'), ((1589, 1614), 'copy.copy', 'copy.copy', (['self.network_2'], {}), '(self.network_2)\n', (1598, 1614), False, 'import copy\n'), ((6512, 6544), 'numpy.linspace', 'np.linspace', (['(1.0 / 100)', '(1.0)', '(100)'], {}), '(1.0 / 100, 1.0, 100)\n', (6523, 6544), True, 'import numpy as np\n'), ((7997, 8029), 'numpy.linspace', 'np.linspace', (['(1.0 / 100)', '(1.0)', '(100)'], {}), '(1.0 / 100, 1.0, 100)\n', (8008, 8029), True, 'import numpy as np\n'), ((7083, 7118), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (7090, 7118), True, 'import numpy as np\n'), ((7153, 7188), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (7160, 7188), True, 'import numpy as np\n'), ((9083, 9118), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (9090, 9118), True, 'import numpy as np\n'), ((9172, 9187), 'scipy.special.erf', 'erf', (['(a + xi_seg)'], {}), '(a + xi_seg)\n', (9175, 9187), False, 'from scipy.special import erf\n'), ((9237, 9252), 'scipy.special.erf', 'erf', (['(b + xi_seg)'], {}), '(b + xi_seg)\n', (9240, 9252), False, 'from scipy.special import erf\n'), ((10022, 10057), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (10029, 10057), True, 'import numpy as np\n'), ((10233, 10248), 'scipy.special.erf', 'erf', (['(f + xi_seg)'], {}), '(f + xi_seg)\n', (10236, 10248), False, 'from scipy.special import erf\n'), ((8777, 8812), 'numpy.sqrt', 'np.sqrt', (['(network_1.thermal_diff * t)'], {}), '(network_1.thermal_diff * t)\n', (8784, 8812), True, 'import numpy as np\n'), ((8914, 8949), 'numpy.sqrt', 'np.sqrt', (['(network_2.thermal_diff * t)'], {}), '(network_2.thermal_diff * t)\n', (8921, 8949), True, 'import numpy as np\n'), ((10209, 10224), 'scipy.special.erf', 'erf', (['(e + xi_seg)'], {}), '(e + xi_seg)\n', (10212, 10224), False, 'from scipy.special import erf\n'), ((10171, 10186), 'scipy.special.erf', 'erf', (['(d + xi_seg)'], {}), '(d + xi_seg)\n', (10174, 10186), False, 'from scipy.special import erf\n'), ((10147, 10162), 'scipy.special.erf', 'erf', (['(c + xi_seg)'], {}), '(c + xi_seg)\n', (10150, 10162), False, 'from scipy.special import erf\n'), ((10085, 10100), 'scipy.special.erf', 'erf', (['(a + xi_seg)'], {}), '(a + xi_seg)\n', (10088, 10100), False, 'from scipy.special import erf\n'), ((10109, 10124), 'scipy.special.erf', 'erf', (['(b + xi_seg)'], {}), '(b + xi_seg)\n', (10112, 10124), False, 'from scipy.special import erf\n')] |
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: pandas DataFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
df = df.rename(fh_ticker, axis=1)
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: pandas Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
df = pd.read_sql(sql=sql_query, con=self.conn)
return df
def filter_fetch(self, filter_dict, ret='series'):
"""
Grabs the trackers from the FH database that satisfy the criteria given by 'filter_dict'.
:param filter_dict: dict. Keys must be column names from the metadata table. Values must be
either str or list of str
:param ret: If 'series', returns the a dataframe with the tracker series that staistfy the conditions.
If 'tickers', returns a list of the tickers that staistfy the conditions.
:return: list or pandas DataFrame
"""
assert type(filter_dict) is dict, "'filter_dict' must be a dict"
assert len(filter_dict) > 0, "'filter_dict' is empty"
assert ret.lower() in ['series', 'tickers'], "'ret' must be either 'series' or 'ticker'"
desc_query = 'SELECT fh_ticker FROM trackers_description WHERE '
for col in filter_dict.keys():
if type(filter_dict[col]) is list:
desc_query = desc_query + col + " IN ('" + "', '".join(filter_dict[col]) + "')"
else:
desc_query = desc_query + col + f" IN ('{filter_dict[col]}')"
desc_query = desc_query + ' and '
desc_query = desc_query[:-5]
df = pd.read_sql(sql=desc_query, con=self.conn)
tickers = df.values.flatten().tolist()
if ret == 'tickers':
return tickers
df = self.fetch(tickers)
return df
def filter_parameters(self):
"""
Grabs the possible columns and their respective unique values from the metadata table.
:return: dict. Keys are the column names, values are list of unique values of the column.
"""
df = self.fetch_metadata()
param_dict = {}
for col in df.columns:
param_dict[col] = df[col].unique().tolist()
return param_dict
def fetch_everything(self):
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers"'
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
class FocusFeeder(object):
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, index='ipca', frequency='yearly', prediction_scope=None,
dt_ini=None, dt_end=None):
"""
Grabs data from the data base and pivots the results into a dataframe. To assure consistency The function can
only take one index at a time and one frequency at a time. Only'prediction_scope' can be a list.
If no prediction scope is passed, all available prediction scopes are returned.
:param index: String containing the name of the index.
:param frequency: String. 'yearly', 'monthly' or 'quarterly' (availability depends on the index)
:param prediction_scope: string, float or list. Years that the forecasts are for.
:param dt_ini: string. Initial date for the series
:param dt_end: string. End date for the series
:return: pandas DataFrame with the pivoted data.
"""
# Error Checking
self._basic_assertions(index, frequency, prediction_scope)
# Handle formats
index, frequency, prediction_scope, dt_ini, dt_end, pivot \
= self._map_inputs(index, frequency, prediction_scope, dt_ini, dt_end)
# build sql query
sql_query = self._build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end)
# get data
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.drop_duplicates()
# pivoting
df = df.pivot(index='date', columns=pivot, values='value')
df.index = pd.to_datetime(df.index)
return df
def years_ahead(self, index='IPCA', years=1, dt_ini=None, dt_end=None):
"""
The metric atribute is set to 'mean' by default because further projections change smoothly
"""
# Error checking
self._basic_assertions_years_ahead(index, years)
# Handle formats
index, dt_ini, dt_end = self._map_inputs_years_ahead(index, dt_ini, dt_end)
# grabs the index for all available years for each date
df = self.fetch(index=index, frequency='yearly', prediction_scope=None,
dt_ini=dt_ini, dt_end=dt_end)
# creates the new dataframe
df_weighted = pd.DataFrame(index=df.index)
df_weighted[index + ' ' + str(years) + ' year ahead'] = np.nan
# days until year end
df_weighted['D2YE'] = ((df_weighted.index + pd.offsets.YearEnd()) -
pd.to_datetime(df_weighted.index.tolist())).days
for ind in df_weighted.index:
if ind.day == 31 and ind.month == 12:
df_weighted.loc[ind, 'D2YE'] = 0
# loops on each date
for date in df_weighted.index:
df_weighted.loc[date, index + ' ' + str(years) + ' year ahead'] = \
(df.loc[date, str(date.year + years - 1)] * df_weighted.loc[date, 'D2YE'] +
df.loc[date, str(date.year + years)] * (365 - df_weighted.loc[date, 'D2YE'])) / 365
df = df_weighted[[index + ' ' + str(years) + ' year ahead']].interpolate()
df.index = pd.to_datetime(df.index)
return df
@staticmethod
def _basic_assertions(index, frequency, prediction_scope):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert type(frequency) is str, 'frequency must be a string'
@staticmethod
def _map_inputs(index, frequency, prediction_scope, dt_ini, dt_end):
"""Handle formats of the inputs"""
# index
if type(index) is str:
index = index.lower()
elif type(index) is list:
index = [x.lower() for x in index]
# frequency
frequency = frequency.lower()
# prediction_scope
if type(prediction_scope) is str:
prediction_scope = prediction_scope.lower()
elif type(prediction_scope) is list:
prediction_scope = [str(x).lower() for x in prediction_scope]
elif prediction_scope is None:
prediction_scope = None
else:
prediction_scope = str(prediction_scope).lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
# pivot variable (while we have no metrics, its always the prediction scope)
pivot = 'prediction_scope'
return index, frequency, prediction_scope, dt_ini, dt_end, pivot
@staticmethod
def _build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end):
sql_query = 'SELECT DATE, VALUE, PREDICTION_SCOPE FROM "focus_survey" WHERE '
# index (must not be None)
if type(index) is str:
sql_query = sql_query + "lower(INDEX) IN ('" + index + "')"
elif type(index) is list:
sql_query = sql_query + "lower(INDEX) IN ('" + "', '".join(index) + "')"
# frequency
if type(frequency) is str:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + frequency + "')"
elif type(frequency) is list:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + "', '".join(frequency) + "')"
# prediction scope
if type(prediction_scope) is str:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + prediction_scope + "')"
elif type(prediction_scope) is list:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + "', '".join(prediction_scope) + "')"
sql_query = sql_query + " AND DATE BETWEEN '" + dt_ini + "' AND '" + dt_end + "'"
sql_query = sql_query + ' ORDER BY DATE;'
return sql_query
@staticmethod
def _basic_assertions_years_ahead(index, years):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert (type(years) is int) and (years <= 4), 'number of years must be an intger between 1 and 4'
@staticmethod
def _map_inputs_years_ahead(index, dt_ini, dt_end):
"""Handles the format of the inputs of the years_ahead method"""
index = index.lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
return index, dt_ini, dt_end
| [
"pandas.offsets.YearEnd",
"pandas.DataFrame",
"datetime.datetime.now",
"pandas.read_sql",
"pandas.to_datetime"
]
| [((1256, 1297), 'pandas.read_sql', 'pd.read_sql', ([], {'sql': 'sql_query', 'con': 'self.conn'}), '(sql=sql_query, con=self.conn)\n', (1267, 1297), True, 'import pandas as pd\n'), ((1480, 1504), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (1494, 1504), True, 'import pandas as pd\n'), ((1890, 1931), 'pandas.read_sql', 'pd.read_sql', ([], {'sql': 'sql_query', 'con': 'self.conn'}), '(sql=sql_query, con=self.conn)\n', (1901, 1931), True, 'import pandas as pd\n'), ((3214, 3256), 'pandas.read_sql', 'pd.read_sql', ([], {'sql': 'desc_query', 'con': 'self.conn'}), '(sql=desc_query, con=self.conn)\n', (3225, 3256), True, 'import pandas as pd\n'), ((3962, 4003), 'pandas.read_sql', 'pd.read_sql', ([], {'sql': 'sql_query', 'con': 'self.conn'}), '(sql=sql_query, con=self.conn)\n', (3973, 4003), True, 'import pandas as pd\n'), ((4103, 4127), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (4117, 4127), True, 'import pandas as pd\n'), ((5743, 5784), 'pandas.read_sql', 'pd.read_sql', ([], {'sql': 'sql_query', 'con': 'self.conn'}), '(sql=sql_query, con=self.conn)\n', (5754, 5784), True, 'import pandas as pd\n'), ((5925, 5949), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (5939, 5949), True, 'import pandas as pd\n'), ((6621, 6649), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (6633, 6649), True, 'import pandas as pd\n'), ((7491, 7515), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (7505, 7515), True, 'import pandas as pd\n'), ((6804, 6824), 'pandas.offsets.YearEnd', 'pd.offsets.YearEnd', ([], {}), '()\n', (6822, 6824), True, 'import pandas as pd\n'), ((8663, 8677), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8675, 8677), False, 'from datetime import datetime\n'), ((10690, 10704), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10702, 10704), False, 'from datetime import datetime\n')] |
# Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
reaper_group = cfg.OptGroup(
'reaper',
title='Aardvark Service Options',
help="Configuration options for Aardvark service")
reaper_opts = [
cfg.StrOpt('reaper_driver',
default='chance_driver',
help="""
The driver that the reaper will use
Possible choices:
* strict_driver: The purpose of the preemptibles existence is to eliminate the
idling resources. This driver gets all the possible offers
from the relevant hosts and tries to find the best matching
for the requested resources. The best matching offer is the
combination of preemptible servers that leave the least
possible resources unused.
* chance_driver: A valid host is selected randomly and in a number of
preconfigured retries, the driver tries to find the instances
that have to be culled in order to have the requested
resources available.
"""
),
cfg.IntOpt('alternatives',
default=1,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.IntOpt('max_attempts',
default=5,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.ListOpt('watched_aggregates',
default=[],
help="""
The list of aggregate names that the reaper will try to make space to
Each element of the list can be an aggregate or a combination of aggregates.
Combination of aggregates is a single string with a vertical-line-separated
aggregate names.
e.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}',....
For each element in the list, a reaper thread will be spawned and the request
will be forwarded to the responsible worker.
If the provided list is empty, only one worker will be spawned, responsible for
the whole system.
"""
),
cfg.StrOpt('job_backend',
default='redis',
choices=('redis', 'zookeeper'),
help="""
The backend to use for distributed task management.
For this purpose the Reaper uses OpenStack Taskflow. The two supported
backends are redis and zookeper.
"""
),
cfg.StrOpt('backend_host',
default='localhost',
help="""
Specifies the host where the job board backend can be found.
"""
),
]
def register_opts(conf):
conf.register_group(reaper_group)
conf.register_opts(reaper_opts, group=reaper_group)
| [
"oslo_config.cfg.ListOpt",
"oslo_config.cfg.OptGroup",
"oslo_config.cfg.StrOpt",
"oslo_config.cfg.IntOpt"
]
| [((708, 820), 'oslo_config.cfg.OptGroup', 'cfg.OptGroup', (['"""reaper"""'], {'title': '"""Aardvark Service Options"""', 'help': '"""Configuration options for Aardvark service"""'}), "('reaper', title='Aardvark Service Options', help=\n 'Configuration options for Aardvark service')\n", (720, 820), False, 'from oslo_config import cfg\n'), ((851, 1668), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""reaper_driver"""'], {'default': '"""chance_driver"""', 'help': '"""\nThe driver that the reaper will use\n\nPossible choices:\n\n* strict_driver: The purpose of the preemptibles existence is to eliminate the\n idling resources. This driver gets all the possible offers\n from the relevant hosts and tries to find the best matching\n for the requested resources. The best matching offer is the\n combination of preemptible servers that leave the least\n possible resources unused.\n\n* chance_driver: A valid host is selected randomly and in a number of\n preconfigured retries, the driver tries to find the instances\n that have to be culled in order to have the requested\n resources available.\n"""'}), '(\'reaper_driver\', default=\'chance_driver\', help=\n """\nThe driver that the reaper will use\n\nPossible choices:\n\n* strict_driver: The purpose of the preemptibles existence is to eliminate the\n idling resources. This driver gets all the possible offers\n from the relevant hosts and tries to find the best matching\n for the requested resources. The best matching offer is the\n combination of preemptible servers that leave the least\n possible resources unused.\n\n* chance_driver: A valid host is selected randomly and in a number of\n preconfigured retries, the driver tries to find the instances\n that have to be culled in order to have the requested\n resources available.\n"""\n )\n', (861, 1668), False, 'from oslo_config import cfg\n'), ((1699, 1857), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""alternatives"""'], {'default': '(1)', 'help': '"""\nThe number of alternative slots that the the reaper will try to free up for\neach requested slot.\n"""'}), '(\'alternatives\', default=1, help=\n """\nThe number of alternative slots that the the reaper will try to free up for\neach requested slot.\n"""\n )\n', (1709, 1857), False, 'from oslo_config import cfg\n'), ((1888, 2046), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""max_attempts"""'], {'default': '(5)', 'help': '"""\nThe number of alternative slots that the the reaper will try to free up for\neach requested slot.\n"""'}), '(\'max_attempts\', default=5, help=\n """\nThe number of alternative slots that the the reaper will try to free up for\neach requested slot.\n"""\n )\n', (1898, 2046), False, 'from oslo_config import cfg\n'), ((2077, 2677), 'oslo_config.cfg.ListOpt', 'cfg.ListOpt', (['"""watched_aggregates"""'], {'default': '[]', 'help': '"""\nThe list of aggregate names that the reaper will try to make space to\n\nEach element of the list can be an aggregate or a combination of aggregates.\nCombination of aggregates is a single string with a vertical-line-separated\naggregate names.\n\ne.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}\',....\n\nFor each element in the list, a reaper thread will be spawned and the request\nwill be forwarded to the responsible worker.\n\nIf the provided list is empty, only one worker will be spawned, responsible for\nthe whole system.\n"""'}), '(\'watched_aggregates\', default=[], help=\n """\nThe list of aggregate names that the reaper will try to make space to\n\nEach element of the list can be an aggregate or a combination of aggregates.\nCombination of aggregates is a single string with a vertical-line-separated\naggregate names.\n\ne.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}\',....\n\nFor each element in the list, a reaper thread will be spawned and the request\nwill be forwarded to the responsible worker.\n\nIf the provided list is empty, only one worker will be spawned, responsible for\nthe whole system.\n"""\n )\n', (2088, 2677), False, 'from oslo_config import cfg\n'), ((2708, 2967), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""job_backend"""'], {'default': '"""redis"""', 'choices': "('redis', 'zookeeper')", 'help': '"""\nThe backend to use for distributed task management.\n\nFor this purpose the Reaper uses OpenStack Taskflow. The two supported\nbackends are redis and zookeper.\n"""'}), '(\'job_backend\', default=\'redis\', choices=(\'redis\', \'zookeeper\'),\n help=\n """\nThe backend to use for distributed task management.\n\nFor this purpose the Reaper uses OpenStack Taskflow. The two supported\nbackends are redis and zookeper.\n"""\n )\n', (2718, 2967), False, 'from oslo_config import cfg\n'), ((3009, 3136), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""backend_host"""'], {'default': '"""localhost"""', 'help': '"""\nSpecifies the host where the job board backend can be found.\n"""'}), '(\'backend_host\', default=\'localhost\', help=\n """\nSpecifies the host where the job board backend can be found.\n""")\n', (3019, 3136), False, 'from oslo_config import cfg\n')] |
"""
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, json
from flask import g, request, render_template, redirect
from flask import session, flash, Blueprint
from flask import send_file, after_this_request
from flask_babel import gettext as _
from liberaforms.models.user import User
from liberaforms.models.form import Form
from liberaforms.models.site import Site
from liberaforms.models.invite import Invite
from liberaforms.utils.wraps import *
from liberaforms.utils import utils
from liberaforms.utils.utils import make_url_for, JsonResponse
from liberaforms.utils.dispatcher import Dispatcher
from liberaforms.utils import wtf
from pprint import pprint
admin_bp = Blueprint('admin_bp', __name__,
template_folder='../templates/admin')
@admin_bp.route('/admin', methods=['GET'])
@admin_required
def site_admin():
return render_template('admin-panel.html',
user=g.current_user,
app_version=utils.get_app_version(),
site=g.site)
""" User management """
@admin_bp.route('/admin/users', methods=['GET'])
@admin_required
def list_users():
return render_template('list-users.html',
users=User.find_all(),
invites=Invite.find_all())
@admin_bp.route('/admin/users/<int:id>', methods=['GET'])
@admin_required
def inspect_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
return render_template('inspect-user.html', user=user)
@admin_bp.route('/admin/users/toggle-blocked/<int:id>', methods=['POST'])
@admin_required
def toggle_user_blocked(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.id == g.current_user.id:
# current_user cannot disable themself
blocked=user.blocked
else:
blocked=user.toggle_blocked()
return JsonResponse(json.dumps({'blocked':blocked}))
@admin_bp.route('/admin/users/toggle-admin/<int:id>', methods=['POST'])
@admin_required
def toggle_admin(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.username == g.current_user.username:
# current_user cannot remove their own admin permission
is_admin=True
else:
is_admin=user.toggle_admin()
return JsonResponse(json.dumps({'admin':is_admin}))
@admin_bp.route('/admin/users/toggle-uploads-enabled/<int:id>', methods=['POST'])
@admin_required
def toggle_uploads_enabled(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
uploads_enabled=user.toggle_uploads_enabled()
return JsonResponse(json.dumps({'uploads_enabled':uploads_enabled}))
@admin_bp.route('/admin/users/delete/<int:id>', methods=['GET', 'POST'])
@admin_required
def delete_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
if request.method == 'POST' and 'username' in request.form:
if user.is_root_user():
flash(_("Cannot delete root user"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user', id=user.id))
if user.id == g.current_user.id:
flash(_("Cannot delete yourself"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user',
username=user.username))
if user.username == request.form['username']:
user.delete_user()
flash(_("Deleted user '%s'" % (user.username)), 'success')
return redirect(make_url_for('admin_bp.list_users'))
else:
flash(_("Username does not match"), 'warning')
return render_template('delete-user.html', user=user)
@admin_bp.route('/admin/users/csv', methods=['GET'])
@admin_required
def csv_users():
csv_file = g.site.write_users_csv()
@after_this_request
def remove_file(response):
os.remove(csv_file)
return response
return send_file(csv_file, mimetype="text/csv", as_attachment=True)
""" Form management """
@admin_bp.route('/admin/forms', methods=['GET'])
@admin_required
def list_forms():
return render_template('list-forms.html', forms=Form.find_all())
@admin_bp.route('/admin/forms/toggle-public/<int:id>', methods=['GET'])
@admin_required
def toggle_form_public_admin_prefs(id):
queriedForm = Form.find(id=id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('form_bp.my_forms'))
queriedForm.toggle_admin_form_public()
return redirect(make_url_for('form_bp.inspect_form', form_id=id))
""" Invitations """
@admin_bp.route('/admin/invites', methods=['GET'])
@admin_required
def list_invites():
return render_template('list-invites.html', invites=Invite.find_all())
@admin_bp.route('/admin/invites/new', methods=['GET', 'POST'])
@admin_required
def new_invite():
wtform=wtf.NewInvite()
if wtform.validate_on_submit():
message=wtform.message.data
token = utils.create_token(Invite)
#pprint(token)
new_invite=Invite( email=wtform.email.data,
message=message,
token=token,
admin=wtform.admin.data)
new_invite.save()
status = Dispatcher().send_invitation(new_invite)
if status['email_sent'] == True:
flash_text = _("We have sent an invitation to %s" % new_invite.email)
flash(flash_text, 'success')
else:
flash(status['msg'], 'warning')
return redirect(make_url_for('admin_bp.list_invites'))
wtform.message.data=Invite.default_message()
return render_template('new-invite.html',
wtform=wtform,
total_invites=Invite.find_all().count())
@admin_bp.route('/admin/invites/delete/<int:id>', methods=['GET'])
@admin_required
def delete_invite(id):
invite=Invite.find(id=id)
if invite:
invite.delete()
# i18n: Invitation to <EMAIL> deleted OK
flash(_("Invitation to %s deleted OK" % invite.email), 'success')
else:
flash(_("Opps! We can't find that invitation"), 'error')
return redirect(make_url_for('admin_bp.list_invites'))
""" Personal Admin preferences """
@admin_bp.route('/admin/toggle-newuser-notification', methods=['POST'])
@admin_required
def toggle_newUser_notification():
return json.dumps({'notify': g.current_user.toggle_new_user_notification()})
@admin_bp.route('/admin/toggle-newform-notification', methods=['POST'])
@admin_required
def toggle_newForm_notification():
return json.dumps({'notify': g.current_user.toggle_new_form_notification()})
""" ROOT_USERS functions
"""
@admin_bp.route('/admin/forms/change-author/<int:form_id>', methods=['GET', 'POST'])
@rootuser_required
def change_author(form_id):
queriedForm = Form.find(id=form_id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('user_bp.my_forms'))
if request.method == 'POST':
author = queriedForm.author
if not ('old_author_username' in request.form and \
request.form['old_author_username']==author.username):
flash(_("Current author incorrect"), 'warning')
return render_template('change-author.html', form=queriedForm)
if 'new_author_username' in request.form:
new_author=User.find(username=request.form['new_author_username'])
if new_author:
if new_author.enabled:
old_author=author
if queriedForm.change_author(new_author):
log_text = _("Changed author from %s to %s" % (
old_author.username,
new_author.username))
queriedForm.add_log(log_text)
flash(_("Changed author OK"), 'success')
return redirect(make_url_for('form_bp.inspect_form',
form_id=queriedForm.id))
else:
flash(_("Cannot use %s. The user is not enabled" % (
request.form['new_author_username']),
), 'warning')
else:
flash(_("Can't find username %s" % (
request.form['new_author_username'])
), 'warning')
return render_template('change-author.html', form=queriedForm)
| [
"flask.render_template",
"liberaforms.models.invite.Invite.find_all",
"flask.g.current_user.toggle_new_form_notification",
"liberaforms.models.invite.Invite",
"liberaforms.models.form.Form.find",
"liberaforms.utils.utils.get_app_version",
"os.remove",
"liberaforms.models.user.User.find_all",
"flask.flash",
"json.dumps",
"liberaforms.models.user.User.find",
"liberaforms.utils.utils.make_url_for",
"flask_babel.gettext",
"liberaforms.utils.dispatcher.Dispatcher",
"liberaforms.models.invite.Invite.default_message",
"flask.send_file",
"liberaforms.utils.wtf.NewInvite",
"flask.g.site.write_users_csv",
"liberaforms.models.form.Form.find_all",
"liberaforms.models.invite.Invite.find",
"liberaforms.utils.utils.create_token",
"flask.Blueprint",
"flask.g.current_user.toggle_new_user_notification"
]
| [((768, 837), 'flask.Blueprint', 'Blueprint', (['"""admin_bp"""', '__name__'], {'template_folder': '"""../templates/admin"""'}), "('admin_bp', __name__, template_folder='../templates/admin')\n", (777, 837), False, 'from flask import session, flash, Blueprint\n'), ((1506, 1522), 'liberaforms.models.user.User.find', 'User.find', ([], {'id': 'id'}), '(id=id)\n', (1515, 1522), False, 'from liberaforms.models.user import User\n'), ((1658, 1705), 'flask.render_template', 'render_template', (['"""inspect-user.html"""'], {'user': 'user'}), "('inspect-user.html', user=user)\n", (1673, 1705), False, 'from flask import g, request, render_template, redirect\n'), ((1836, 1852), 'liberaforms.models.user.User.find', 'User.find', ([], {'id': 'id'}), '(id=id)\n', (1845, 1852), False, 'from liberaforms.models.user import User\n'), ((2251, 2267), 'liberaforms.models.user.User.find', 'User.find', ([], {'id': 'id'}), '(id=id)\n', (2260, 2267), False, 'from liberaforms.models.user import User\n'), ((2705, 2721), 'liberaforms.models.user.User.find', 'User.find', ([], {'id': 'id'}), '(id=id)\n', (2714, 2721), False, 'from liberaforms.models.user import User\n'), ((3024, 3040), 'liberaforms.models.user.User.find', 'User.find', ([], {'id': 'id'}), '(id=id)\n', (3033, 3040), False, 'from liberaforms.models.user import User\n'), ((3936, 3982), 'flask.render_template', 'render_template', (['"""delete-user.html"""'], {'user': 'user'}), "('delete-user.html', user=user)\n", (3951, 3982), False, 'from flask import g, request, render_template, redirect\n'), ((4086, 4110), 'flask.g.site.write_users_csv', 'g.site.write_users_csv', ([], {}), '()\n', (4108, 4110), False, 'from flask import g, request, render_template, redirect\n'), ((4229, 4289), 'flask.send_file', 'send_file', (['csv_file'], {'mimetype': '"""text/csv"""', 'as_attachment': '(True)'}), "(csv_file, mimetype='text/csv', as_attachment=True)\n", (4238, 4289), False, 'from flask import send_file, after_this_request\n'), ((4617, 4633), 'liberaforms.models.form.Form.find', 'Form.find', ([], {'id': 'id'}), '(id=id)\n', (4626, 4633), False, 'from liberaforms.models.form import Form\n'), ((5176, 5191), 'liberaforms.utils.wtf.NewInvite', 'wtf.NewInvite', ([], {}), '()\n', (5189, 5191), False, 'from liberaforms.utils import wtf\n'), ((5915, 5939), 'liberaforms.models.invite.Invite.default_message', 'Invite.default_message', ([], {}), '()\n', (5937, 5939), False, 'from liberaforms.models.invite import Invite\n'), ((6217, 6235), 'liberaforms.models.invite.Invite.find', 'Invite.find', ([], {'id': 'id'}), '(id=id)\n', (6228, 6235), False, 'from liberaforms.models.invite import Invite\n'), ((7162, 7183), 'liberaforms.models.form.Form.find', 'Form.find', ([], {'id': 'form_id'}), '(id=form_id)\n', (7171, 7183), False, 'from liberaforms.models.form import Form\n'), ((8843, 8898), 'flask.render_template', 'render_template', (['"""change-author.html"""'], {'form': 'queriedForm'}), "('change-author.html', form=queriedForm)\n", (8858, 8898), False, 'from flask import g, request, render_template, redirect\n'), ((2097, 2129), 'json.dumps', 'json.dumps', (["{'blocked': blocked}"], {}), "({'blocked': blocked})\n", (2107, 2129), False, 'import os, json\n'), ((2533, 2564), 'json.dumps', 'json.dumps', (["{'admin': is_admin}"], {}), "({'admin': is_admin})\n", (2543, 2564), False, 'import os, json\n'), ((2855, 2903), 'json.dumps', 'json.dumps', (["{'uploads_enabled': uploads_enabled}"], {}), "({'uploads_enabled': uploads_enabled})\n", (2865, 2903), False, 'import os, json\n'), ((4174, 4193), 'os.remove', 'os.remove', (['csv_file'], {}), '(csv_file)\n', (4183, 4193), False, 'import os, json\n'), ((4831, 4879), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""form_bp.inspect_form"""'], {'form_id': 'id'}), "('form_bp.inspect_form', form_id=id)\n", (4843, 4879), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((5280, 5306), 'liberaforms.utils.utils.create_token', 'utils.create_token', (['Invite'], {}), '(Invite)\n', (5298, 5306), False, 'from liberaforms.utils import utils\n'), ((5349, 5440), 'liberaforms.models.invite.Invite', 'Invite', ([], {'email': 'wtform.email.data', 'message': 'message', 'token': 'token', 'admin': 'wtform.admin.data'}), '(email=wtform.email.data, message=message, token=token, admin=wtform.\n admin.data)\n', (5355, 5440), False, 'from liberaforms.models.invite import Invite\n'), ((6493, 6530), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.list_invites"""'], {}), "('admin_bp.list_invites')\n", (6505, 6530), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((1073, 1096), 'liberaforms.utils.utils.get_app_version', 'utils.get_app_version', ([], {}), '()\n', (1094, 1096), False, 'from liberaforms.utils import utils\n'), ((1328, 1343), 'liberaforms.models.user.User.find_all', 'User.find_all', ([], {}), '()\n', (1341, 1343), False, 'from liberaforms.models.user import User\n'), ((1381, 1398), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ([], {}), '()\n', (1396, 1398), False, 'from liberaforms.models.invite import Invite\n'), ((1554, 1573), 'flask_babel.gettext', '_', (['"""User not found"""'], {}), "('User not found')\n", (1555, 1573), True, 'from flask_babel import gettext as _\n'), ((1610, 1645), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.list_users"""'], {}), "('admin_bp.list_users')\n", (1622, 1645), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((1898, 1910), 'json.dumps', 'json.dumps', ([], {}), '()\n', (1908, 1910), False, 'import os, json\n'), ((2313, 2325), 'json.dumps', 'json.dumps', ([], {}), '()\n', (2323, 2325), False, 'import os, json\n'), ((2767, 2779), 'json.dumps', 'json.dumps', ([], {}), '()\n', (2777, 2779), False, 'import os, json\n'), ((3072, 3091), 'flask_babel.gettext', '_', (['"""User not found"""'], {}), "('User not found')\n", (3073, 3091), True, 'from flask_babel import gettext as _\n'), ((3128, 3163), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.list_users"""'], {}), "('admin_bp.list_users')\n", (3140, 3163), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((4452, 4467), 'liberaforms.models.form.Form.find_all', 'Form.find_all', ([], {}), '()\n', (4465, 4467), False, 'from liberaforms.models.form import Form\n'), ((4672, 4697), 'flask_babel.gettext', '_', (['"""Can\'t find that form"""'], {}), '("Can\'t find that form")\n', (4673, 4697), True, 'from flask_babel import gettext as _\n'), ((4734, 4766), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""form_bp.my_forms"""'], {}), "('form_bp.my_forms')\n", (4746, 4766), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((5047, 5064), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ([], {}), '()\n', (5062, 5064), False, 'from liberaforms.models.invite import Invite\n'), ((5672, 5728), 'flask_babel.gettext', '_', (["('We have sent an invitation to %s' % new_invite.email)"], {}), "('We have sent an invitation to %s' % new_invite.email)\n", (5673, 5728), True, 'from flask_babel import gettext as _\n'), ((5741, 5769), 'flask.flash', 'flash', (['flash_text', '"""success"""'], {}), "(flash_text, 'success')\n", (5746, 5769), False, 'from flask import session, flash, Blueprint\n'), ((5796, 5827), 'flask.flash', 'flash', (["status['msg']", '"""warning"""'], {}), "(status['msg'], 'warning')\n", (5801, 5827), False, 'from flask import session, flash, Blueprint\n'), ((5852, 5889), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.list_invites"""'], {}), "('admin_bp.list_invites')\n", (5864, 5889), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((6338, 6385), 'flask_babel.gettext', '_', (["('Invitation to %s deleted OK' % invite.email)"], {}), "('Invitation to %s deleted OK' % invite.email)\n", (6339, 6385), True, 'from flask_babel import gettext as _\n'), ((6422, 6462), 'flask_babel.gettext', '_', (['"""Opps! We can\'t find that invitation"""'], {}), '("Opps! We can\'t find that invitation")\n', (6423, 6462), True, 'from flask_babel import gettext as _\n'), ((6726, 6771), 'flask.g.current_user.toggle_new_user_notification', 'g.current_user.toggle_new_user_notification', ([], {}), '()\n', (6769, 6771), False, 'from flask import g, request, render_template, redirect\n'), ((6932, 6977), 'flask.g.current_user.toggle_new_form_notification', 'g.current_user.toggle_new_form_notification', ([], {}), '()\n', (6975, 6977), False, 'from flask import g, request, render_template, redirect\n'), ((7222, 7247), 'flask_babel.gettext', '_', (['"""Can\'t find that form"""'], {}), '("Can\'t find that form")\n', (7223, 7247), True, 'from flask_babel import gettext as _\n'), ((7284, 7316), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""user_bp.my_forms"""'], {}), "('user_bp.my_forms')\n", (7296, 7316), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((7597, 7652), 'flask.render_template', 'render_template', (['"""change-author.html"""'], {'form': 'queriedForm'}), "('change-author.html', form=queriedForm)\n", (7612, 7652), False, 'from flask import g, request, render_template, redirect\n'), ((7726, 7781), 'liberaforms.models.user.User.find', 'User.find', ([], {'username': "request.form['new_author_username']"}), "(username=request.form['new_author_username'])\n", (7735, 7781), False, 'from liberaforms.models.user import User\n'), ((3280, 3308), 'flask_babel.gettext', '_', (['"""Cannot delete root user"""'], {}), "('Cannot delete root user')\n", (3281, 3308), True, 'from flask_babel import gettext as _\n'), ((3349, 3398), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.inspect_user"""'], {'id': 'user.id'}), "('admin_bp.inspect_user', id=user.id)\n", (3361, 3398), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((3459, 3486), 'flask_babel.gettext', '_', (['"""Cannot delete yourself"""'], {}), "('Cannot delete yourself')\n", (3460, 3486), True, 'from flask_babel import gettext as _\n'), ((3527, 3588), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.inspect_user"""'], {'username': 'user.username'}), "('admin_bp.inspect_user', username=user.username)\n", (3539, 3588), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((3734, 3772), 'flask_babel.gettext', '_', (['("Deleted user \'%s\'" % user.username)'], {}), '("Deleted user \'%s\'" % user.username)\n', (3735, 3772), True, 'from flask_babel import gettext as _\n'), ((3815, 3850), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""admin_bp.list_users"""'], {}), "('admin_bp.list_users')\n", (3827, 3850), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((3884, 3912), 'flask_babel.gettext', '_', (['"""Username does not match"""'], {}), "('Username does not match')\n", (3885, 3912), True, 'from flask_babel import gettext as _\n'), ((5565, 5577), 'liberaforms.utils.dispatcher.Dispatcher', 'Dispatcher', ([], {}), '()\n', (5575, 5577), False, 'from liberaforms.utils.dispatcher import Dispatcher\n'), ((7536, 7565), 'flask_babel.gettext', '_', (['"""Current author incorrect"""'], {}), "('Current author incorrect')\n", (7537, 7565), True, 'from flask_babel import gettext as _\n'), ((6071, 6088), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ([], {}), '()\n', (6086, 6088), False, 'from liberaforms.models.invite import Invite\n'), ((8697, 8762), 'flask_babel.gettext', '_', (['("Can\'t find username %s" % request.form[\'new_author_username\'])'], {}), '("Can\'t find username %s" % request.form[\'new_author_username\'])\n', (8698, 8762), True, 'from flask_babel import gettext as _\n'), ((7983, 8061), 'flask_babel.gettext', '_', (["('Changed author from %s to %s' % (old_author.username, new_author.username))"], {}), "('Changed author from %s to %s' % (old_author.username, new_author.username))\n", (7984, 8061), True, 'from flask_babel import gettext as _\n'), ((8497, 8583), 'flask_babel.gettext', '_', (["('Cannot use %s. The user is not enabled' % request.form['new_author_username']\n )"], {}), "('Cannot use %s. The user is not enabled' % request.form[\n 'new_author_username'])\n", (8498, 8583), True, 'from flask_babel import gettext as _\n'), ((8259, 8281), 'flask_babel.gettext', '_', (['"""Changed author OK"""'], {}), "('Changed author OK')\n", (8260, 8281), True, 'from flask_babel import gettext as _\n'), ((8334, 8394), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (['"""form_bp.inspect_form"""'], {'form_id': 'queriedForm.id'}), "('form_bp.inspect_form', form_id=queriedForm.id)\n", (8346, 8394), False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n')] |
from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| [
"Roteiro7.Roteiro7__funcoes.GrafoComPesos"
]
| [((203, 300), 'Roteiro7.Roteiro7__funcoes.GrafoComPesos', 'GrafoComPesos', (["['E', 'A', 'B', 'C', 'D']", "{'E-A': 1, 'E-C': 10, 'A-B': 2, 'B-C': 4, 'C-D': 3}"], {}), "(['E', 'A', 'B', 'C', 'D'], {'E-A': 1, 'E-C': 10, 'A-B': 2,\n 'B-C': 4, 'C-D': 3})\n", (216, 300), False, 'from Roteiro7.Roteiro7__funcoes import GrafoComPesos\n'), ((491, 651), 'Roteiro7.Roteiro7__funcoes.GrafoComPesos', 'GrafoComPesos', (["['A', 'B', 'C', 'D', 'E', 'F', 'G']", "{'A-B': 1, 'A-F': 3, 'A-G': 2, 'B-F': 1, 'C-B': 2, 'C-D': 5, 'D-E': 2,\n 'F-D': 4, 'F-G': 2, 'G-E': 7}"], {}), "(['A', 'B', 'C', 'D', 'E', 'F', 'G'], {'A-B': 1, 'A-F': 3,\n 'A-G': 2, 'B-F': 1, 'C-B': 2, 'C-D': 5, 'D-E': 2, 'F-D': 4, 'F-G': 2,\n 'G-E': 7})\n", (504, 651), False, 'from Roteiro7.Roteiro7__funcoes import GrafoComPesos\n')] |
import pytest
from rest_framework import status
from rest_framework.test import APIClient
class TestBase:
__test__ = False
path = None
get_data = {}
put_data = {}
post_data = {}
delete_data = {}
requires_auth = True
implements_retrieve = False
implements_create = False
implements_update = False
implements_destroy = False
client = APIClient()
@pytest.fixture
def setup(self, setup_method=None):
return setup_method
@pytest.fixture
def authenticate(self, api_client_admin):
self.client = api_client_admin
class TestGet(TestBase):
@pytest.fixture
def get_response(self):
return self.client.get(f"/{self.path}", self.get_data, format="json",)
def test_get_without_authentication(self, setup, get_response):
if not self.requires_auth:
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
else:
returns_status_code_http_401_unauthorized(get_response)
def test_get_with_authentication(self, setup, authenticate, get_response):
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
class TestPost(TestBase):
@pytest.fixture
def post_response(self):
return self.client.post(
path=f"/{self.path}", data=self.post_data, format="json",
)
def test_post_without_authentication(self, setup, post_response):
returns_status_code_http_401_unauthorized(post_response)
def test_post_with_authentication(self, setup, authenticate, post_response):
if self.implements_create:
returns_status_code_http_201_created(post_response)
else:
returns_status_code_http_405_not_allowed(post_response)
class TestPut(TestBase):
@pytest.fixture
def put_response(self):
return self.client.put(f"/{self.path}", self.put_data, format="json",)
def test_put_without_authentication(self, setup, put_response):
if not self.requires_auth:
if self.implements_update:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_405_not_allowed(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
def test_put_with_authentication(self, setup, authenticate, put_response):
if not self.implements_update:
returns_status_code_http_405_not_allowed(put_response)
elif self.requires_auth:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
class TestDelete(TestBase):
@pytest.fixture
def delete_response(self):
return self.client.delete(f"/{self.path}", self.delete_data, format="json")
def test_delete_without_authentication(self, setup, delete_response):
if not self.requires_auth:
if self.implements_destroy:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_405_not_allowed(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
def test_delete_with_authentication(self, setup, authenticate, delete_response):
if not self.implements_destroy:
returns_status_code_http_405_not_allowed(delete_response)
elif self.requires_auth:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
class TestView(TestGet, TestPost, TestPut, TestDelete):
__test__ = False
requires_auth = True
class TestListCreateAPIView(TestView):
__test__ = False
implements_retrieve = True
implements_create = True
requires_auth = True
class TestRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = True
class TestUnauthenticatedRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = False
def returns_status_code_http_200_ok(response):
assert response.status_code == status.HTTP_200_OK
def returns_status_code_http_401_unauthorized(response):
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def returns_status_code_http_201_created(response):
assert response.status_code == status.HTTP_201_CREATED
def returns_status_code_http_204_no_content(response):
assert response.status_code == status.HTTP_204_NO_CONTENT
def returns_status_code_http_405_not_allowed(response):
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def response_has_etag(response):
assert response.get("ETag")
| [
"rest_framework.test.APIClient"
]
| [((383, 394), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (392, 394), False, 'from rest_framework.test import APIClient\n')] |
from torchvision.datasets import ImageFolder
from torchvision import transforms
import random
import os
import torch
from torch.utils.data.dataloader import DataLoader
from utils import constants, get_default_device
from image_folder_with_path import ImageFolderWithPaths
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
""" wrap a Dataloader to move data to a device """
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
""" yield a batch of data after moving it to device """
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
""" return number of batch size """
return len(self.dl)
default_device = get_default_device.default_device
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=random.uniform(5, 10)),
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
classes = os.listdir(constants.DATA_PATH + constants.TRAIN_PATH)
training_dataset = ImageFolder(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
valid_dataset = ImageFolder(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
# testing_dataset = ImageFolder(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
# training_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
# valid_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
testing_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
torch.manual_seed(constants.RANDOM_SEED)
train_dl = DataLoader(training_dataset, constants.BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)
val_dl = DataLoader(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
test_dl = DataLoader(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
"""
Now we can wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches
of data to GPU (if available), and use to_device to move our model to GPU (if available)
"""
train_dl = DeviceDataLoader(train_dl, default_device)
val_dl = DeviceDataLoader(val_dl, default_device)
test_dl = DeviceDataLoader(test_dl, default_device) | [
"torch.manual_seed",
"random.uniform",
"os.listdir",
"image_folder_with_path.ImageFolderWithPaths",
"torch.utils.data.dataloader.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
]
| [((1288, 1342), 'os.listdir', 'os.listdir', (['(constants.DATA_PATH + constants.TRAIN_PATH)'], {}), '(constants.DATA_PATH + constants.TRAIN_PATH)\n', (1298, 1342), False, 'import os\n'), ((1363, 1451), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['(constants.DATA_PATH + constants.TRAIN_PATH)'], {'transform': 'train_transforms'}), '(constants.DATA_PATH + constants.TRAIN_PATH, transform=\n train_transforms)\n', (1374, 1451), False, 'from torchvision.datasets import ImageFolder\n'), ((1463, 1548), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['(constants.DATA_PATH + constants.VAL_PATH)'], {'transform': 'test_transforms'}), '(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms\n )\n', (1474, 1548), False, 'from torchvision.datasets import ImageFolder\n'), ((1887, 1982), 'image_folder_with_path.ImageFolderWithPaths', 'ImageFolderWithPaths', (['(constants.DATA_PATH + constants.TEST_PATH)'], {'transform': 'test_transforms'}), '(constants.DATA_PATH + constants.TEST_PATH, transform=\n test_transforms)\n', (1907, 1982), False, 'from image_folder_with_path import ImageFolderWithPaths\n'), ((1980, 2020), 'torch.manual_seed', 'torch.manual_seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (1997, 2020), False, 'import torch\n'), ((2033, 2133), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['training_dataset', 'constants.BATCH_SIZE'], {'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)'}), '(training_dataset, constants.BATCH_SIZE, shuffle=True,\n num_workers=8, pin_memory=True)\n', (2043, 2133), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2139, 2218), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['valid_dataset', 'constants.BATCH_SIZE'], {'num_workers': '(8)', 'pin_memory': '(True)'}), '(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)\n', (2149, 2218), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2229, 2315), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['testing_dataset', 'constants.BATCH_SIZE'], {'num_workers': '(8)', 'pin_memory': '(True)'}), '(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory\n =True)\n', (2239, 2315), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((1005, 1043), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1036, 1043), False, 'from torchvision import transforms\n'), ((1111, 1140), 'torchvision.transforms.Resize', 'transforms.Resize', (['(512, 512)'], {}), '((512, 512))\n', (1128, 1140), False, 'from torchvision import transforms\n'), ((1146, 1167), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1165, 1167), False, 'from torchvision import transforms\n'), ((1216, 1245), 'torchvision.transforms.Resize', 'transforms.Resize', (['(512, 512)'], {}), '((512, 512))\n', (1233, 1245), False, 'from torchvision import transforms\n'), ((1251, 1272), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1270, 1272), False, 'from torchvision import transforms\n'), ((1083, 1104), 'random.uniform', 'random.uniform', (['(5)', '(10)'], {}), '(5, 10)\n', (1097, 1104), False, 'import random\n')] |
# import matplotlib
# matplotlib.use('Qt5Agg') # Prevents `Invalid DISPLAY variable` errors
import pytest
import tempfile
from calliope import Model
from calliope.utils import AttrDict
from calliope import analysis
from . import common
from .common import assert_almost_equal, solver, solver_io
import matplotlib.pyplot as plt
plt.switch_backend('agg') # Prevents `Invalid DISPLAY variable` errors
class TestModel:
@pytest.fixture(scope='module')
def model(self):
locations = """
locations:
1:
techs: ['ccgt', 'demand_power']
override:
ccgt:
constraints:
e_cap.max: 100
demand_power:
constraints:
r: -50
metadata:
map_boundary: [-10, 35, 5, 45]
location_coordinates:
1: [40, -2]
links:
"""
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_t: ['2005-01-01', '2005-01-02']
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(locations.encode('utf-8'))
f.read()
override_dict = AttrDict({
'solver': solver,
'solver_io': solver_io,
})
model = common.simple_model(config_run=config_run,
config_locations=f.name,
override=override_dict)
model.run()
return model
@pytest.fixture(scope='module')
def builtin_model(self):
model = Model()
model.run()
return model
def test_plot_carrier_production(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_carrier_production(model.solution)
def test_plot_timeseries(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_timeseries(model.solution,
model.solution['e'].loc[dict(c='power')].sum(dim='x'),
carrier='power', demand='demand_power')
def test_plot_installed_capacities(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_installed_capacities(model.solution)
def test_plot_transmission(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_transmission(model.solution, map_resolution='c')
def test_get_delivered_cost(self, model):
# TODO this should be tested with a more complex model
assert_almost_equal(analysis.get_delivered_cost(model.solution), 0.1)
def test_get_levelized_cost(self, model):
lcoe = analysis.get_levelized_cost(model.solution)
assert_almost_equal(lcoe.at['ccgt'], 0.1)
def test_get_group_share(self, model):
# TODO this should be tested with a more complex model
share = analysis.get_group_share(model.solution, techs=['ccgt'])
assert share == 1.0
def test_get_unmet_demand_hours(self, builtin_model):
# TODO this should be tested with a more complex model
unmet = analysis.get_unmet_demand_hours(builtin_model.solution)
assert unmet == 1
def test_recompute_levelized_costs(self, model):
# Cost in solution
sol = model.solution
assert_almost_equal(sol['summary'].to_pandas().loc['ccgt', 'levelized_cost_monetary'], 0.1)
# Recomputed cost must be the same
dm = analysis.SolutionModel(model.solution)
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 0.1)
def test_recompute_levelized_costs_after_changes(self, model):
# Make changes
dm = analysis.SolutionModel(model.solution)
dm.config_model.techs.ccgt.costs.monetary.e_cap = 50
dm.config_model.techs.ccgt.costs.monetary.om_fuel = 1.0
# Recomputed cost
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 1.0, tolerance=0.001)
| [
"calliope.utils.AttrDict",
"calliope.analysis.plot_installed_capacities",
"calliope.analysis.plot_transmission",
"calliope.Model",
"calliope.analysis.get_group_share",
"calliope.analysis.get_unmet_demand_hours",
"calliope.analysis.plot_carrier_production",
"tempfile.NamedTemporaryFile",
"calliope.analysis.SolutionModel",
"calliope.analysis.get_delivered_cost",
"matplotlib.pyplot.switch_backend",
"pytest.fixture",
"calliope.analysis.get_levelized_cost"
]
| [((333, 358), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (351, 358), True, 'import matplotlib.pyplot as plt\n'), ((429, 459), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (443, 459), False, 'import pytest\n'), ((1688, 1718), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1702, 1718), False, 'import pytest\n'), ((1764, 1771), 'calliope.Model', 'Model', ([], {}), '()\n', (1769, 1771), False, 'from calliope import Model\n'), ((1932, 1980), 'calliope.analysis.plot_carrier_production', 'analysis.plot_carrier_production', (['model.solution'], {}), '(model.solution)\n', (1964, 1980), False, 'from calliope import analysis\n'), ((2415, 2465), 'calliope.analysis.plot_installed_capacities', 'analysis.plot_installed_capacities', (['model.solution'], {}), '(model.solution)\n', (2449, 2465), False, 'from calliope import analysis\n'), ((2579, 2641), 'calliope.analysis.plot_transmission', 'analysis.plot_transmission', (['model.solution'], {'map_resolution': '"""c"""'}), "(model.solution, map_resolution='c')\n", (2605, 2641), False, 'from calliope import analysis\n'), ((2892, 2935), 'calliope.analysis.get_levelized_cost', 'analysis.get_levelized_cost', (['model.solution'], {}), '(model.solution)\n', (2919, 2935), False, 'from calliope import analysis\n'), ((3109, 3165), 'calliope.analysis.get_group_share', 'analysis.get_group_share', (['model.solution'], {'techs': "['ccgt']"}), "(model.solution, techs=['ccgt'])\n", (3133, 3165), False, 'from calliope import analysis\n'), ((3332, 3387), 'calliope.analysis.get_unmet_demand_hours', 'analysis.get_unmet_demand_hours', (['builtin_model.solution'], {}), '(builtin_model.solution)\n', (3363, 3387), False, 'from calliope import analysis\n'), ((3680, 3718), 'calliope.analysis.SolutionModel', 'analysis.SolutionModel', (['model.solution'], {}), '(model.solution)\n', (3702, 3718), False, 'from calliope import analysis\n'), ((3935, 3973), 'calliope.analysis.SolutionModel', 'analysis.SolutionModel', (['model.solution'], {}), '(model.solution)\n', (3957, 3973), False, 'from calliope import analysis\n'), ((1205, 1246), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1232, 1246), False, 'import tempfile\n'), ((1349, 1401), 'calliope.utils.AttrDict', 'AttrDict', (["{'solver': solver, 'solver_io': solver_io}"], {}), "({'solver': solver, 'solver_io': solver_io})\n", (1357, 1401), False, 'from calliope.utils import AttrDict\n'), ((2780, 2823), 'calliope.analysis.get_delivered_cost', 'analysis.get_delivered_cost', (['model.solution'], {}), '(model.solution)\n', (2807, 2823), False, 'from calliope import analysis\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
class JSONRPCException(Exception):
def __init__(self, message):
self.message = message
class JSONRPCClient(object):
def __init__(self, addr=None, port=None):
self.methods = {"bdev_get_bdevs": self.get_bdevs,
"construct_nvmf_subsystem":
self.construct_nvmf_subsystem,
"nvmf_delete_subsystem": self.delete_nvmf_subsystem,
"nvmf_create_subsystem": self.nvmf_subsystem_create,
"nvmf_subsystem_add_listener":
self.nvmf_subsystem_add_listener,
"nvmf_subsystem_add_ns":
self.nvmf_subsystem_add_ns,
"nvmf_get_subsystems": self.get_nvmf_subsystems}
self.bdevs = copy.deepcopy(BDEVS)
self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS)
def __del__(self):
pass
def get_bdevs(self, params=None):
if params and 'name' in params:
for bdev in self.bdevs:
for alias in bdev['aliases']:
if params['name'] in alias:
return json.dumps({"result": [bdev]})
if bdev['name'] == params['name']:
return json.dumps({"result": [bdev]})
return json.dumps({"error": "Not found"})
return json.dumps({"result": self.bdevs})
def get_nvmf_subsystems(self, params=None):
return json.dumps({"result": self.nvmf_subsystems})
def construct_nvmf_subsystem(self, params=None):
nvmf_subsystem = {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": params['serial_number'],
"nqn": params['nqn']
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def delete_nvmf_subsystem(self, params=None):
found_id = -1
i = 0
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
found_id = i
i += 1
if found_id != -1:
del self.nvmf_subsystems[found_id]
return json.dumps({"result": {}})
def nvmf_subsystem_create(self, params=None):
nvmf_subsystem = {
"namespaces": [],
"nqn": params['nqn'],
"serial_number": "S0000000000000000001",
"allow_any_host": False,
"subtype": "NVMe",
"hosts": [],
"listen_addresses": []
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def nvmf_subsystem_add_listener(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['listen_addresses'].append(
params['listen_address']
)
return json.dumps({"result": ""})
def nvmf_subsystem_add_ns(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['namespaces'].append(
params['namespace']
)
return json.dumps({"result": ""})
def call(self, method, params=None):
req = {}
req['jsonrpc'] = '2.0'
req['method'] = method
req['id'] = 1
if (params):
req['params'] = params
response = json.loads(self.methods[method](params))
if not response:
return {}
if 'error' in response:
msg = "\n".join(["Got JSON-RPC error response",
"request:",
json.dumps(req, indent=2),
"response:",
json.dumps(response['error'], indent=2)])
raise JSONRPCException(msg)
return response['result']
class Target(object):
def __init__(self, name="Nvme0n1p0"):
self.name = name
class SpdkNvmfDriverTestCase(test.TestCase):
def setUp(self):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk"
self.configuration.target_protocol = "nvmet_rdma"
self.configuration.spdk_rpc_ip = "127.0.0.1"
self.configuration.spdk_rpc_port = 8000
self.driver = spdknvmf_driver.SpdkNvmf(configuration=
self.configuration)
self.jsonrpcclient = JSONRPCClient()
def test__get_spdk_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
bdevs = self.driver._rpc_call("bdev_get_bdevs")
bdev_name = bdevs[0]['name']
volume_name = self.driver._get_spdk_volume_name(bdev_name)
self.assertEqual(bdev_name, volume_name)
volume_name = self.driver._get_spdk_volume_name("fake")
self.assertIsNone(volume_name)
def test__get_nqn_with_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0")
nqn_tmp = self.driver._rpc_call("nvmf_get_subsystems")[1]['nqn']
self.assertEqual(nqn, nqn_tmp)
nqn = self.driver._get_nqn_with_volume_name("fake")
self.assertIsNone(nqn)
def test__get_first_free_node(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
free_node = self.driver._get_first_free_node()
self.assertEqual(3, free_node)
def test_create_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
self.driver.create_nvmeof_target("Nvme0n1p1",
"nqn.2016-06.io.spdk",
"192.168.0.1",
4420, "rdma", -1, -1, "")
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) + 1, len(subsystems_last))
def test_delete_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
target = Target()
self.driver.delete_nvmeof_target(target)
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
target.name = "fake"
self.driver.delete_nvmeof_target(target)
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
| [
"unittest.mock.Mock",
"copy.deepcopy",
"json.dumps",
"unittest.mock.patch.object",
"cinder.volume.targets.spdknvmf.SpdkNvmf"
]
| [((6393, 6413), 'copy.deepcopy', 'copy.deepcopy', (['BDEVS'], {}), '(BDEVS)\n', (6406, 6413), False, 'import copy\n'), ((6445, 6475), 'copy.deepcopy', 'copy.deepcopy', (['NVMF_SUBSYSTEMS'], {}), '(NVMF_SUBSYSTEMS)\n', (6458, 6475), False, 'import copy\n'), ((6963, 6997), 'json.dumps', 'json.dumps', (["{'result': self.bdevs}"], {}), "({'result': self.bdevs})\n", (6973, 6997), False, 'import json\n'), ((7062, 7106), 'json.dumps', 'json.dumps', (["{'result': self.nvmf_subsystems}"], {}), "({'result': self.nvmf_subsystems})\n", (7072, 7106), False, 'import json\n'), ((7631, 7669), 'json.dumps', 'json.dumps', (["{'result': nvmf_subsystem}"], {}), "({'result': nvmf_subsystem})\n", (7641, 7669), False, 'import json\n'), ((8003, 8029), 'json.dumps', 'json.dumps', (["{'result': {}}"], {}), "({'result': {}})\n", (8013, 8029), False, 'import json\n'), ((8432, 8470), 'json.dumps', 'json.dumps', (["{'result': nvmf_subsystem}"], {}), "({'result': nvmf_subsystem})\n", (8442, 8470), False, 'import json\n'), ((8773, 8799), 'json.dumps', 'json.dumps', (["{'result': ''}"], {}), "({'result': ''})\n", (8783, 8799), False, 'import json\n'), ((9085, 9111), 'json.dumps', 'json.dumps', (["{'result': ''}"], {}), "({'result': ''})\n", (9095, 9111), False, 'import json\n'), ((10036, 10065), 'unittest.mock.Mock', 'mock.Mock', (['conf.Configuration'], {}), '(conf.Configuration)\n', (10045, 10065), False, 'from unittest import mock\n'), ((10572, 10630), 'cinder.volume.targets.spdknvmf.SpdkNvmf', 'spdknvmf_driver.SpdkNvmf', ([], {'configuration': 'self.configuration'}), '(configuration=self.configuration)\n', (10596, 10630), True, 'from cinder.volume.targets import spdknvmf as spdknvmf_driver\n'), ((6912, 6946), 'json.dumps', 'json.dumps', (["{'error': 'Not found'}"], {}), "({'error': 'Not found'})\n", (6922, 6946), False, 'import json\n'), ((10780, 10848), 'unittest.mock.patch.object', 'mock.patch.object', (['self.driver', '"""_rpc_call"""', 'self.jsonrpcclient.call'], {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)\n", (10797, 10848), False, 'from unittest import mock\n'), ((11277, 11345), 'unittest.mock.patch.object', 'mock.patch.object', (['self.driver', '"""_rpc_call"""', 'self.jsonrpcclient.call'], {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)\n", (11294, 11345), False, 'from unittest import mock\n'), ((11721, 11789), 'unittest.mock.patch.object', 'mock.patch.object', (['self.driver', '"""_rpc_call"""', 'self.jsonrpcclient.call'], {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)\n", (11738, 11789), False, 'from unittest import mock\n'), ((11979, 12047), 'unittest.mock.patch.object', 'mock.patch.object', (['self.driver', '"""_rpc_call"""', 'self.jsonrpcclient.call'], {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)\n", (11996, 12047), False, 'from unittest import mock\n'), ((12621, 12689), 'unittest.mock.patch.object', 'mock.patch.object', (['self.driver', '"""_rpc_call"""', 'self.jsonrpcclient.call'], {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)\n", (12638, 12689), False, 'from unittest import mock\n'), ((6862, 6892), 'json.dumps', 'json.dumps', (["{'result': [bdev]}"], {}), "({'result': [bdev]})\n", (6872, 6892), False, 'import json\n'), ((9581, 9606), 'json.dumps', 'json.dumps', (['req'], {'indent': '(2)'}), '(req, indent=2)\n', (9591, 9606), False, 'import json\n'), ((9679, 9718), 'json.dumps', 'json.dumps', (["response['error']"], {'indent': '(2)'}), "(response['error'], indent=2)\n", (9689, 9718), False, 'import json\n'), ((6753, 6783), 'json.dumps', 'json.dumps', (["{'result': [bdev]}"], {}), "({'result': [bdev]})\n", (6763, 6783), False, 'import json\n')] |
""" This is algos.euler.transformer module.
This module is responsible for transforming raw candle data into training
samples usable to the Euler algorithm.
"""
import datetime
import decimal
from algos.euler.models import training_samples as ts
from core.models import instruments
from datasource.models import candles
TWO_PLACES = decimal.Decimal('0.01')
def extract_features(day_candle):
""" Extract the features for the learning algorithm from a daily candle.
The Features are:
high_bid, low_bid, close_bid, open_ask, high_ask, low_ask,
and close_ask (all relative to open_bid) in pips.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
features: List of Decimals. The features described above, all in two
decimal places.
"""
multiplier = day_candle.instrument.multiplier
features = [
day_candle.high_bid,
day_candle.low_bid,
day_candle.close_bid,
day_candle.open_ask,
day_candle.high_ask,
day_candle.low_ask,
day_candle.close_ask,
]
features = [multiplier * (x - day_candle.open_bid) for x in features]
features = [decimal.Decimal(x).quantize(TWO_PLACES) for x in features]
return features
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES)
def build_sample_row(candle_previous, candle_next):
""" Build one training sample from two consecutive days of candles.
Args:
candle_previous: candles.Candle object. Candle of first day.
candle_next: candles.Candle object. Candle of second day.
Returns:
sample: TrainingSample object. One training sample for learning.
"""
return ts.create_one(
instrument=candle_next.instrument,
date=candle_next.start_time.date() + datetime.timedelta(1),
features=extract_features(candle_previous),
target=get_profitable_change(candle_next))
def get_start_time(instrument):
""" Get the start time for retrieving candles of the given instrument.
This is determined by the last training sample in the database.
Args:
instrument: Instrument object. The given instrument.
Returns:
start_time: Datetime object. The datetime from which to query
candles from to fill the rest of the training samples.
"""
last_sample = ts.get_last(instrument)
if last_sample is not None:
start_date = last_sample.date - datetime.timedelta(1)
return datetime.datetime.combine(start_date, datetime.time())
return datetime.datetime(2005, 1, 1)
def run():
""" Update the training samples in the database from the latest candles.
This should be run daily to ensure the training set is up-to-date.
Args:
None.
"""
all_new_samples = []
for instrument in instruments.get_all():
start_time = get_start_time(instrument)
new_candles = candles.get_candles(
instrument=instrument, start=start_time, order_by='start_time')
for i in range(len(new_candles) - 1):
all_new_samples.append(
build_sample_row(new_candles[i], new_candles[i + 1]))
ts.insert_many(all_new_samples)
| [
"datetime.datetime",
"datetime.time",
"core.models.instruments.get_all",
"datasource.models.candles.get_candles",
"algos.euler.models.training_samples.insert_many",
"algos.euler.models.training_samples.get_last",
"datetime.timedelta",
"decimal.Decimal"
]
| [((343, 366), 'decimal.Decimal', 'decimal.Decimal', (['"""0.01"""'], {}), "('0.01')\n", (358, 366), False, 'import decimal\n'), ((3342, 3365), 'algos.euler.models.training_samples.get_last', 'ts.get_last', (['instrument'], {}), '(instrument)\n', (3353, 3365), True, 'from algos.euler.models import training_samples as ts\n'), ((3542, 3571), 'datetime.datetime', 'datetime.datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (3559, 3571), False, 'import datetime\n'), ((3825, 3846), 'core.models.instruments.get_all', 'instruments.get_all', ([], {}), '()\n', (3844, 3846), False, 'from core.models import instruments\n'), ((4172, 4203), 'algos.euler.models.training_samples.insert_many', 'ts.insert_many', (['all_new_samples'], {}), '(all_new_samples)\n', (4186, 4203), True, 'from algos.euler.models import training_samples as ts\n'), ((3918, 4006), 'datasource.models.candles.get_candles', 'candles.get_candles', ([], {'instrument': 'instrument', 'start': 'start_time', 'order_by': '"""start_time"""'}), "(instrument=instrument, start=start_time, order_by=\n 'start_time')\n", (3937, 4006), False, 'from datasource.models import candles\n'), ((2220, 2243), 'decimal.Decimal', 'decimal.Decimal', (['change'], {}), '(change)\n', (2235, 2243), False, 'import decimal\n'), ((3438, 3459), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (3456, 3459), False, 'import datetime\n'), ((3513, 3528), 'datetime.time', 'datetime.time', ([], {}), '()\n', (3526, 3528), False, 'import datetime\n'), ((1234, 1252), 'decimal.Decimal', 'decimal.Decimal', (['x'], {}), '(x)\n', (1249, 1252), False, 'import decimal\n'), ((2766, 2787), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (2784, 2787), False, 'import datetime\n')] |
__doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| [
"sys.setdlopenflags",
"sys.platform.startswith",
"sys.getdlopenflags"
]
| [((812, 844), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (835, 844), False, 'import sys\n'), ((882, 902), 'sys.getdlopenflags', 'sys.getdlopenflags', ([], {}), '()\n', (900, 902), False, 'import sys\n'), ((907, 955), 'sys.setdlopenflags', 'sys.setdlopenflags', (['(dl.RTLD_NOW | dl.RTLD_GLOBAL)'], {}), '(dl.RTLD_NOW | dl.RTLD_GLOBAL)\n', (925, 955), False, 'import sys\n'), ((1020, 1046), 'sys.setdlopenflags', 'sys.setdlopenflags', (['_flags'], {}), '(_flags)\n', (1038, 1046), False, 'import sys\n')] |
# -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| [
"platform.system"
]
| [((221, 238), 'platform.system', 'platform.system', ([], {}), '()\n', (236, 238), False, 'import platform\n')] |
import numpy as np
import math
from scipy.optimize import curve_fit
def calc_lorentzian(CestCurveS, x_calcentires, mask, config):
(rows, colums, z_slices, entires) = CestCurveS.shape
lorenzian = {key: np.zeros((rows, colums, z_slices), dtype=float) for key in config.lorenzian_keys}
for k in range(z_slices):
for i in range(rows):
for j in range(colums):
if mask[i, j, k] != 0:
params = calc_lorenzian_pixel(CestCurveS[i, j, k, :], x_calcentires, config.Lorenzian['MT_f'],
config.Lorenzian['NOE1_f'], config.Lorenzian['NOE2_f'],
config.Lorenzian['OH_f'], config.Lorenzian['NH_f'])
if params is None:
continue
dic = {
'OH_a': params[3],
'OH_w': params[4],
'NH_a': params[5],
'NH_w': params[6],
'NOE1_a': params[7],
'NOE1_w': params[8],
'NOE2_a': params[9],
'NOE2_w': params[10],
'MT_a': params[11],
'MT_w': params[12],
}
for key in config.lorenzian_keys:
lorenzian[key][i, j, k] = dic[key]
return lorenzian
def calc_lorenzian_pixel(values, x_calcentires, MT_f, NOE1_f, NOE2_f, OH_f, NH_f):
# wassr_offset, da die Z-Spektren vorher korrigiert wurden
fit = lorenz_like_matlab(wassr_offset=0, MT_f=MT_f, NOE1_f=NOE1_f, NOE2_f=NOE2_f, OH_f=OH_f, NH_f=NH_f)
try:
param, param_cov = curve_fit(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10,
10]))
except RuntimeError:
param = None
return param
def lorenz_like_matlab(wassr_offset, MT_f: float = - 2.43, NOE1_f: float = - 1, NOE2_f: float = - 2.6,
OH_f: float = + 1.4, NH_f: float = + 3.2):
# X_f = frequenz of X
#ret = (a + ak) - (a * ((b ** 2) / 4) / (((b ** 2) / 4) + (x - wassr_offset) ** 2))
pass
def one_lorenz(x, amplitude, width, wassr_offset, frequenz):
return amplitude * ((width ** 2) / 4) / (((width ** 2) / 4) + (x - (wassr_offset + frequenz)) ** 2)
| [
"scipy.optimize.curve_fit",
"numpy.zeros"
]
| [((212, 259), 'numpy.zeros', 'np.zeros', (['(rows, colums, z_slices)'], {'dtype': 'float'}), '((rows, colums, z_slices), dtype=float)\n', (220, 259), True, 'import numpy as np\n'), ((1741, 1886), 'scipy.optimize.curve_fit', 'curve_fit', (['fit', 'x_calcentires', 'values'], {'bounds': '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [10, 10, 10, 10, 10, 10, 10, 10, \n 10, 10, 10, 10, 10])'}), '(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]))\n', (1750, 1886), False, 'from scipy.optimize import curve_fit\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: network_models_LSTU.py
# Created Date: Tuesday February 25th 2020
# Author: <NAME>
# Email: <EMAIL>
# Last Modified: Tuesday, 25th February 2020 9:57:06 pm
# Modified By: <NAME>
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
sigmoid = tf.nn.sigmoid
tanh = tf.nn.tanh
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
instance_norm = slim.instance_norm
MAX_DIM = 64 * 16
def Genc(x, dim=64, n_layers=5, multi_inputs=1, is_training=True):
bn = partial(batch_norm, is_training=is_training)
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
with tf.variable_scope('Genc', reuse=tf.AUTO_REUSE):
h, w = x.shape[1:3]
z = x
zs = []
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
if multi_inputs > i and i > 0:
z = tf.concat([z, tf.image.resize_bicubic(x, (h//(2**i), w//(2**i)))], 3)
z = conv_bn_lrelu(z, d, 4, 2)
zs.append(z)
return zs
def LSTU(in_data, state, out_channel, is_training=True, kernel_size=3, norm='none', pass_state='lstate'):
if norm == 'bn':
norm_fn = partial(batch_norm, is_training=is_training)
elif norm == 'in':
norm_fn = instance_norm
else:
norm_fn = None
gate = partial(conv, normalizer_fn=norm_fn, activation_fn=sigmoid)
info = partial(conv, normalizer_fn=norm_fn, activation_fn=tanh)
with tf.name_scope('ConvGRUCell'):
state_ = dconv(state, out_channel, 4, 2) # upsample and make `channel` identical to `out_channel`
reset_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
update_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
new_state = reset_gate * state_
new_info = info(tf.concat([in_data, new_state], axis=3), out_channel, kernel_size)
output = (1-update_gate)*state_ + update_gate*new_info
if pass_state == 'gru':
return output, output
elif pass_state == 'direct':
return output, state_
else: # 'stu'
return output, new_state
# state_hat = dconv(old_state, outdim, 4, 2)
# tmp_concat= _concat(x, state_hat, None)
# channelpool1=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# r_channel=conv(channelpool1,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# new_state = r_channel * state_hat
# tmp_concat= _concat(x, new_state, None)
# hidden_info = conv(tmp_concat,outdim,3,1,normalizer_fn=None,activation_fn=tanh)
# tmp_concat= _concat(x, state_hat, None)
# channelpool2=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# z=conv(channelpool2,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# output =z *hidden_info +(1-z)*state_hat
# return output,new_state
def Gstu(zs, _a, dim=64, n_layers=1, inject_layers=0, is_training=True, kernel_size=3, norm='none', pass_state='stu'):
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gstu', reuse=tf.AUTO_REUSE):
zs_ = [zs[-1]]
state = _concat(zs[-1], None, _a)
for i in range(n_layers): # n_layers <= 4
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
output = LSTU(zs[n_layers - 1 - i],state,d,is_training=is_training,
kernel_size=kernel_size, norm=norm, pass_state=pass_state)
zs_.insert(0, output[0])
if inject_layers > i:
state = _concat(output[1], None, _a)
else:
state = output[1]
return zs_
def Gdec(zs, _a, dim=64, n_layers=5, shortcut_layers=1, inject_layers=0, is_training=True, one_more_conv=0):
bn = partial(batch_norm, is_training=is_training)
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
shortcut_layers = min(shortcut_layers, n_layers - 1)
inject_layers = min(inject_layers, n_layers - 1)
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gdec', reuse=tf.AUTO_REUSE):
z = _concat(zs[-1], None, _a)
for i in range(n_layers):
if i < n_layers - 1:
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
z = dconv_bn_relu(z, d, 4, 2)
if shortcut_layers > i:
z = _concat(z, zs[n_layers - 2 - i], None)
if inject_layers > i:
z = _concat(z, None, _a)
else:
if one_more_conv: # add one more conv after the decoder
z = dconv_bn_relu(z, dim//4, 4, 2)
x = tf.nn.tanh(dconv(z, 3, one_more_conv))
else:
x = z = tf.nn.tanh(dconv(z, 3, 4, 2))
return x
def D(x, n_att, dim=64, fc_dim=MAX_DIM, n_layers=5):
conv_in_lrelu = partial(conv, normalizer_fn=instance_norm, activation_fn=lrelu)
with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
y = x
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
y = conv_in_lrelu(y, d, 4, 2)
logit_gan = lrelu(fc(y, fc_dim))
logit_gan = fc(logit_gan, 1)
logit_att = lrelu(fc(y, fc_dim))
logit_att = fc(logit_att, n_att)
return logit_gan, logit_att
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
if b is None: # interpolation in DRAGAN
beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
_, variance = tf.nn.moments(a, range(a.shape.ndims))
b = a + 0.5 * tf.sqrt(variance) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = tf.norm(slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
| [
"tensorflow.contrib.slim.flatten",
"tensorflow.variable_scope",
"tflib.shape",
"tensorflow.shape",
"tensorflow.image.resize_bicubic",
"tensorflow.gradients",
"tensorflow.concat",
"tensorflow.random_uniform",
"tensorflow.sqrt",
"tensorflow.name_scope",
"functools.partial",
"tensorflow.reduce_mean"
]
| [((642, 682), 'functools.partial', 'partial', (['slim.conv2d'], {'activation_fn': 'None'}), '(slim.conv2d, activation_fn=None)\n', (649, 682), False, 'from functools import partial\n'), ((691, 741), 'functools.partial', 'partial', (['slim.conv2d_transpose'], {'activation_fn': 'None'}), '(slim.conv2d_transpose, activation_fn=None)\n', (698, 741), False, 'from functools import partial\n'), ((747, 802), 'functools.partial', 'partial', (['tl.flatten_fully_connected'], {'activation_fn': 'None'}), '(tl.flatten_fully_connected, activation_fn=None)\n', (754, 802), False, 'from functools import partial\n'), ((901, 963), 'functools.partial', 'partial', (['slim.batch_norm'], {'scale': '(True)', 'updates_collections': 'None'}), '(slim.batch_norm, scale=True, updates_collections=None)\n', (908, 963), False, 'from functools import partial\n'), ((1096, 1140), 'functools.partial', 'partial', (['batch_norm'], {'is_training': 'is_training'}), '(batch_norm, is_training=is_training)\n', (1103, 1140), False, 'from functools import partial\n'), ((1161, 1213), 'functools.partial', 'partial', (['conv'], {'normalizer_fn': 'bn', 'activation_fn': 'lrelu'}), '(conv, normalizer_fn=bn, activation_fn=lrelu)\n', (1168, 1213), False, 'from functools import partial\n'), ((1913, 1972), 'functools.partial', 'partial', (['conv'], {'normalizer_fn': 'norm_fn', 'activation_fn': 'sigmoid'}), '(conv, normalizer_fn=norm_fn, activation_fn=sigmoid)\n', (1920, 1972), False, 'from functools import partial\n'), ((1984, 2040), 'functools.partial', 'partial', (['conv'], {'normalizer_fn': 'norm_fn', 'activation_fn': 'tanh'}), '(conv, normalizer_fn=norm_fn, activation_fn=tanh)\n', (1991, 2040), False, 'from functools import partial\n'), ((4737, 4781), 'functools.partial', 'partial', (['batch_norm'], {'is_training': 'is_training'}), '(batch_norm, is_training=is_training)\n', (4744, 4781), False, 'from functools import partial\n'), ((4802, 4854), 'functools.partial', 'partial', (['dconv'], {'normalizer_fn': 'bn', 'activation_fn': 'relu'}), '(dconv, normalizer_fn=bn, activation_fn=relu)\n', (4809, 4854), False, 'from functools import partial\n'), ((6135, 6198), 'functools.partial', 'partial', (['conv'], {'normalizer_fn': 'instance_norm', 'activation_fn': 'lrelu'}), '(conv, normalizer_fn=instance_norm, activation_fn=lrelu)\n', (6142, 6198), False, 'from functools import partial\n'), ((1224, 1270), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Genc"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Genc', reuse=tf.AUTO_REUSE)\n", (1241, 1270), True, 'import tensorflow as tf\n'), ((1769, 1813), 'functools.partial', 'partial', (['batch_norm'], {'is_training': 'is_training'}), '(batch_norm, is_training=is_training)\n', (1776, 1813), False, 'from functools import partial\n'), ((2050, 2078), 'tensorflow.name_scope', 'tf.name_scope', (['"""ConvGRUCell"""'], {}), "('ConvGRUCell')\n", (2063, 2078), True, 'import tensorflow as tf\n'), ((3990, 4014), 'tensorflow.concat', 'tf.concat', (['feats'], {'axis': '(3)'}), '(feats, axis=3)\n', (3999, 4014), True, 'import tensorflow as tf\n'), ((4029, 4075), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Gstu"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Gstu', reuse=tf.AUTO_REUSE)\n", (4046, 4075), True, 'import tensorflow as tf\n'), ((5273, 5297), 'tensorflow.concat', 'tf.concat', (['feats'], {'axis': '(3)'}), '(feats, axis=3)\n', (5282, 5297), True, 'import tensorflow as tf\n'), ((5308, 5354), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Gdec"""'], {'reuse': 'tf.AUTO_REUSE'}), "('Gdec', reuse=tf.AUTO_REUSE)\n", (5325, 5354), True, 'import tensorflow as tf\n'), ((6209, 6252), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""D"""'], {'reuse': 'tf.AUTO_REUSE'}), "('D', reuse=tf.AUTO_REUSE)\n", (6226, 6252), True, 'import tensorflow as tf\n'), ((7230, 7263), 'tensorflow.name_scope', 'tf.name_scope', (['"""gradient_penalty"""'], {}), "('gradient_penalty')\n", (7243, 7263), True, 'import tensorflow as tf\n'), ((7489, 7522), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((norm - 1.0) ** 2)'], {}), '((norm - 1.0) ** 2)\n', (7503, 7522), True, 'import tensorflow as tf\n'), ((2219, 2255), 'tensorflow.concat', 'tf.concat', (['[in_data, state_]'], {'axis': '(3)'}), '([in_data, state_], axis=3)\n', (2228, 2255), True, 'import tensorflow as tf\n'), ((2300, 2336), 'tensorflow.concat', 'tf.concat', (['[in_data, state_]'], {'axis': '(3)'}), '([in_data, state_], axis=3)\n', (2309, 2336), True, 'import tensorflow as tf\n'), ((2423, 2462), 'tensorflow.concat', 'tf.concat', (['[in_data, new_state]'], {'axis': '(3)'}), '([in_data, new_state], axis=3)\n', (2432, 2462), True, 'import tensorflow as tf\n'), ((6674, 6702), 'tensorflow.name_scope', 'tf.name_scope', (['"""interpolate"""'], {}), "('interpolate')\n", (6687, 6702), True, 'import tensorflow as tf\n'), ((7049, 7103), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'shape', 'minval': '(0.0)', 'maxval': '(1.0)'}), '(shape=shape, minval=0.0, maxval=1.0)\n', (7066, 7103), True, 'import tensorflow as tf\n'), ((7400, 7421), 'tensorflow.gradients', 'tf.gradients', (['pred', 'x'], {}), '(pred, x)\n', (7412, 7421), True, 'import tensorflow as tf\n'), ((7448, 7466), 'tensorflow.contrib.slim.flatten', 'slim.flatten', (['grad'], {}), '(grad)\n', (7460, 7466), True, 'import tensorflow.contrib.slim as slim\n'), ((1482, 1536), 'tensorflow.image.resize_bicubic', 'tf.image.resize_bicubic', (['x', '(h // 2 ** i, w // 2 ** i)'], {}), '(x, (h // 2 ** i, w // 2 ** i))\n', (1505, 1536), True, 'import tensorflow as tf\n'), ((3858, 3870), 'tflib.shape', 'tl.shape', (['_a'], {}), '(_a)\n', (3866, 3870), True, 'import tflib as tl\n'), ((3910, 3921), 'tflib.shape', 'tl.shape', (['z'], {}), '(z)\n', (3918, 3921), True, 'import tflib as tl\n'), ((3926, 3937), 'tflib.shape', 'tl.shape', (['z'], {}), '(z)\n', (3934, 3937), True, 'import tflib as tl\n'), ((5141, 5153), 'tflib.shape', 'tl.shape', (['_a'], {}), '(_a)\n', (5149, 5153), True, 'import tflib as tl\n'), ((5193, 5204), 'tflib.shape', 'tl.shape', (['z'], {}), '(z)\n', (5201, 5204), True, 'import tflib as tl\n'), ((5209, 5220), 'tflib.shape', 'tl.shape', (['z'], {}), '(z)\n', (5217, 5220), True, 'import tflib as tl\n'), ((6805, 6816), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (6813, 6816), True, 'import tensorflow as tf\n'), ((6985, 6996), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (6993, 6996), True, 'import tensorflow as tf\n'), ((6939, 6956), 'tensorflow.sqrt', 'tf.sqrt', (['variance'], {}), '(variance)\n', (6946, 6956), True, 'import tensorflow as tf\n')] |
import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_create_full():
g = generate_rand_graph(100)
full_nf = dgl.contrib.sampling.sampler.create_full_nodeflow(g, 5)
assert full_nf.number_of_nodes() == g.number_of_nodes() * 6
assert full_nf.number_of_edges() == g.number_of_edges() * 5
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 1, g.number_of_nodes(), neighbor_type='in', num_workers=4)):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() == len(src)
assert seed_ids == subg.layer_parent_nid(-1)
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
assert F.array_equal(child_src, subg.layer_nid(0))
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr, 0) == len(arr)
def verify_subgraph(g, subg, seed_id):
seed_id = F.asnumpy(seed_id)
seeds = F.asnumpy(subg.map_to_parent_nid(subg.layer_nid(-1)))
assert seed_id in seeds
child_seed = F.asnumpy(subg.layer_nid(-1))[seeds == seed_id]
src, dst, eid = g.in_edges(seed_id, form='all')
child_src, child_dst, child_eid = subg.in_edges(child_seed, form='all')
child_src = F.asnumpy(child_src)
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
# a neighbor in the subgraph must also exist in parent graph.
src = F.asnumpy(src)
for i in subg.map_to_parent_nid(child_src):
assert F.asnumpy(i) in src
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_prefetch_neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4, prefetch=True):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, g.number_of_nodes(),
neighbor_type='in', num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert F.array_equal(seed_ids, subg.map_to_parent_nid(subg.layer_nid(-1)))
src, dst, eid = g.in_edges(seed_ids, form='all')
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
seed_ids = subg.layer_parent_nid(-1)
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
def _test_layer_sampler(prefetch=False):
g = generate_rand_graph(100)
nid = g.nodes()
src, dst, eid = g.all_edges(form='all', order='eid')
n_batches = 5
batch_size = 50
seed_batches = [np.sort(np.random.choice(F.asnumpy(nid), batch_size, replace=False))
for i in range(n_batches)]
seed_nodes = np.hstack(seed_batches)
layer_sizes = [50] * 3
LayerSampler = getattr(dgl.contrib.sampling, 'LayerSampler')
sampler = LayerSampler(g, batch_size, layer_sizes, 'in',
seed_nodes=seed_nodes, num_workers=4, prefetch=prefetch)
for sub_g in sampler:
assert all(sub_g.layer_size(i) < size for i, size in enumerate(layer_sizes))
sub_nid = F.arange(0, sub_g.number_of_nodes())
assert all(np.all(np.isin(F.asnumpy(sub_g.layer_nid(i)), F.asnumpy(sub_nid)))
for i in range(sub_g.num_layers))
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_nid(sub_nid)),
F.asnumpy(nid)))
sub_eid = F.arange(0, sub_g.number_of_edges())
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_eid(sub_eid)),
F.asnumpy(eid)))
assert any(np.all(np.sort(F.asnumpy(sub_g.layer_parent_nid(-1))) == seed_batch)
for seed_batch in seed_batches)
sub_src, sub_dst = sub_g.all_edges(order='eid')
for i in range(sub_g.num_blocks):
block_eid = sub_g.block_eid(i)
block_src = sub_g.map_to_parent_nid(F.gather_row(sub_src, block_eid))
block_dst = sub_g.map_to_parent_nid(F.gather_row(sub_dst, block_eid))
block_parent_eid = sub_g.block_parent_eid(i)
block_parent_src = F.gather_row(src, block_parent_eid)
block_parent_dst = F.gather_row(dst, block_parent_eid)
assert np.all(F.asnumpy(block_src == block_parent_src))
n_layers = sub_g.num_layers
sub_n = sub_g.number_of_nodes()
assert sum(F.shape(sub_g.layer_nid(i))[0] for i in range(n_layers)) == sub_n
n_blocks = sub_g.num_blocks
sub_m = sub_g.number_of_edges()
assert sum(F.shape(sub_g.block_eid(i))[0] for i in range(n_blocks)) == sub_m
def test_layer_sampler():
_test_layer_sampler()
_test_layer_sampler(prefetch=True)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="Error occured when multiprocessing")
def test_nonuniform_neighbor_sampler():
# Construct a graph with
# (1) A path (0, 1, ..., 99) with weight 1
# (2) A bunch of random edges with weight 0.
edges = []
for i in range(99):
edges.append((i, i + 1))
for i in range(1000):
edge = (np.random.randint(100), np.random.randint(100))
if edge not in edges:
edges.append(edge)
src, dst = zip(*edges)
g = dgl.DGLGraph()
g.add_nodes(100)
g.add_edges(src, dst)
g.readonly()
g.edata['w'] = F.cat([
F.ones((99,), F.float64, F.cpu()),
F.zeros((len(edges) - 99,), F.float64, F.cpu())], 0)
# Test 1-neighbor NodeFlow with 99 as target node.
# The generated NodeFlow should only contain node i on layer i.
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'in', transition_prob='w', seed_nodes=[99])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == i
# Test the reverse direction
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'out', transition_prob='w', seed_nodes=[0])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == 99 - i
def test_setseed():
g = generate_rand_graph(100)
nids = []
dgl.random.seed(42)
for subg in dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1):
nids.append(
tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3)))
# reinitialize
dgl.random.seed(42)
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1)):
item = tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3))
assert item == nids[i]
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=4)):
pass
def check_head_tail(g):
lsrc, ldst, leid = g.all_edges(form='all', order='eid')
lsrc = np.unique(F.asnumpy(lsrc))
head_nid = np.unique(F.asnumpy(g.head_nid))
assert len(head_nid) == len(g.head_nid)
np.testing.assert_equal(lsrc, head_nid)
ldst = np.unique(F.asnumpy(ldst))
tail_nid = np.unique(F.asnumpy(g.tail_nid))
assert len(tail_nid) == len(g.tail_nid)
np.testing.assert_equal(tail_nid, ldst)
def check_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
etype = np.random.randint(0, 10, size=g.number_of_edges(), dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Test the homogeneous graph.
batch_size = 50
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst)[i])
neg_e = int(F.asnumpy(neg_eid)[i])
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# check replacement = False
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# Test the knowledge graph.
total_samples = 0
for _, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
def check_weighted_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
etype = np.random.randint(0, 10, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 50
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst[i]))
neg_e = int(F.asnumpy(neg_eid[i]))
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge/node weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = True
total_samples = 0
max_samples = 4 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# check replacement = False with pos/neg edges no-uniform sample
# reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False with pos/neg edges no-uniform sample
# reset = True
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# Check Rate
dgl.random.seed(0)
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[0] = F.sum(edge_weight, dim=0)
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
node_weight[-1] = F.sum(node_weight, dim=0) / 200
etype = np.random.randint(0, 20, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
# Test w/o node weight.
max_samples = num_edges // 5
total_samples = 0
# Test the knowledge graph with edge weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = neg_edges.parent_nid[neg_lsrc]
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = neg_edges.parent_nid[neg_ldst]
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate_0 = node_sampled[0] / node_sampled.sum()
node_tail_half_cnt = node_sampled[node_sampled.shape[0] // 2:-1].sum()
node_rate_tail_half = node_tail_half_cnt / node_sampled.sum()
assert node_rate_0 < 0.02
assert np.allclose(node_rate_tail_half, 0.5, atol=0.02)
# Test the knowledge graph with edge/node weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
node_weight=node_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate = node_sampled[-1] / node_sampled.sum()
node_rate_a = np.average(node_sampled[:50]) / node_sampled.sum()
node_rate_b = np.average(node_sampled[50:100]) / node_sampled.sum()
# As neg sampling does not contain duplicate nodes,
# this test takes some acceptable variation on the sample rate.
assert np.allclose(node_rate, node_rate_a * 5, atol=0.002)
assert np.allclose(node_rate_a, node_rate_b, atol=0.0002)
def check_positive_edge_sampler():
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[num_edges-1] = num_edges ** 3
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 128
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
shuffle=True,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support item assignment")
def test_negative_sampler():
check_negative_sampler('chunk-head', False, 10)
check_negative_sampler('head', True, 10)
check_negative_sampler('head', False, 10)
check_weighted_negative_sampler('chunk-head', False, 10)
check_weighted_negative_sampler('head', True, 10)
check_weighted_negative_sampler('head', False, 10)
check_positive_edge_sampler()
#disable this check for now. It might take too long time.
#check_negative_sampler('head', False, 100)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
| [
"numpy.testing.assert_equal",
"numpy.hstack",
"unittest.skipIf",
"backend.array_equal",
"backend.sum",
"dgl.random.seed",
"numpy.sort",
"scipy.sparse.random",
"numpy.random.seed",
"dgl.DGLGraph",
"dgl.contrib.sampling.NeighborSampler",
"backend.tensor",
"dgl.contrib.sampling.sampler.create_full_nodeflow",
"numpy.allclose",
"backend.asnumpy",
"numpy.average",
"numpy.unique",
"numpy.random.randint",
"numpy.array_equal",
"backend.gather_row",
"numpy.full",
"backend.cpu"
]
| [((153, 171), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (167, 171), True, 'import numpy as np\n'), ((7157, 7264), 'unittest.skipIf', 'unittest.skipIf', (["(dgl.backend.backend_name == 'tensorflow')"], {'reason': '"""Error occured when multiprocessing"""'}), "(dgl.backend.backend_name == 'tensorflow', reason=\n 'Error occured when multiprocessing')\n", (7172, 7264), False, 'import unittest\n'), ((32993, 33100), 'unittest.skipIf', 'unittest.skipIf', (["(dgl.backend.backend_name == 'tensorflow')"], {'reason': '"""TF doesn\'t support item assignment"""'}), '(dgl.backend.backend_name == \'tensorflow\', reason=\n "TF doesn\'t support item assignment")\n', (33008, 33100), False, 'import unittest\n'), ((296, 328), 'dgl.DGLGraph', 'dgl.DGLGraph', (['arr'], {'readonly': '(True)'}), '(arr, readonly=True)\n', (308, 328), False, 'import dgl\n'), ((401, 456), 'dgl.contrib.sampling.sampler.create_full_nodeflow', 'dgl.contrib.sampling.sampler.create_full_nodeflow', (['g', '(5)'], {}), '(g, 5)\n', (450, 456), False, 'import dgl\n'), ((1542, 1560), 'backend.asnumpy', 'F.asnumpy', (['seed_id'], {}), '(seed_id)\n', (1551, 1560), True, 'import backend as F\n'), ((1865, 1885), 'backend.asnumpy', 'F.asnumpy', (['child_src'], {}), '(child_src)\n', (1874, 1885), True, 'import backend as F\n'), ((2163, 2177), 'backend.asnumpy', 'F.asnumpy', (['src'], {}), '(src)\n', (2172, 2177), True, 'import backend as F\n'), ((2427, 2512), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)'}), "(g, 1, 5, neighbor_type='in', num_workers=4\n )\n", (2463, 2512), False, 'import dgl\n'), ((2944, 3043), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)', 'prefetch': '(True)'}), "(g, 1, 5, neighbor_type='in',\n num_workers=4, prefetch=True)\n", (2980, 3043), False, 'import dgl\n'), ((4133, 4236), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(10)', '(5)'], {'neighbor_type': '"""in"""', 'num_workers': '(4)', 'seed_nodes': 'seeds'}), "(g, 10, 5, neighbor_type='in',\n num_workers=4, seed_nodes=seeds)\n", (4169, 4236), False, 'import dgl\n'), ((5169, 5192), 'numpy.hstack', 'np.hstack', (['seed_batches'], {}), '(seed_batches)\n', (5178, 5192), True, 'import numpy as np\n'), ((7683, 7697), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (7695, 7697), False, 'import dgl\n'), ((8032, 8129), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(1)', '(99)', '"""in"""'], {'transition_prob': '"""w"""', 'seed_nodes': '[99]'}), "(g, 1, 1, 99, 'in', transition_prob='w',\n seed_nodes=[99])\n", (8068, 8129), False, 'import dgl\n'), ((8374, 8472), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(1)', '(1)', '(99)', '"""out"""'], {'transition_prob': '"""w"""', 'seed_nodes': '[0]'}), "(g, 1, 1, 99, 'out', transition_prob=\n 'w', seed_nodes=[0])\n", (8410, 8472), False, 'import dgl\n'), ((8747, 8766), 'dgl.random.seed', 'dgl.random.seed', (['(42)'], {}), '(42)\n', (8762, 8766), False, 'import dgl\n'), ((8783, 8880), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(1)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=1)\n", (8819, 8880), False, 'import dgl\n'), ((9016, 9035), 'dgl.random.seed', 'dgl.random.seed', (['(42)'], {}), '(42)\n', (9031, 9035), False, 'import dgl\n'), ((9658, 9697), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['lsrc', 'head_nid'], {}), '(lsrc, head_nid)\n', (9681, 9697), True, 'import numpy as np\n'), ((9833, 9872), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['tail_nid', 'ldst'], {}), '(tail_nid, ldst)\n', (9856, 9872), True, 'import numpy as np\n'), ((17319, 17375), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': 'num_edges', 'dtype': 'np.int64'}), '(0, 10, size=num_edges, dtype=np.int64)\n', (17336, 17375), True, 'import numpy as np\n'), ((26687, 26705), 'dgl.random.seed', 'dgl.random.seed', (['(0)'], {}), '(0)\n', (26702, 26705), False, 'import dgl\n'), ((26924, 26949), 'backend.sum', 'F.sum', (['edge_weight'], {'dim': '(0)'}), '(edge_weight, dim=0)\n', (26929, 26949), True, 'import backend as F\n'), ((27107, 27163), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {'size': 'num_edges', 'dtype': 'np.int64'}), '(0, 20, size=num_edges, dtype=np.int64)\n', (27124, 27163), True, 'import numpy as np\n'), ((27383, 27423), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (27390, 27423), True, 'import numpy as np\n'), ((27443, 27483), 'numpy.full', 'np.full', (['(num_nodes,)', '(0)'], {'dtype': 'np.int32'}), '((num_nodes,), 0, dtype=np.int32)\n', (27450, 27483), True, 'import numpy as np\n'), ((28888, 28928), 'numpy.allclose', 'np.allclose', (['edge_rate_0', '(0.5)'], {'atol': '(0.05)'}), '(edge_rate_0, 0.5, atol=0.05)\n', (28899, 28928), True, 'import numpy as np\n'), ((28940, 28989), 'numpy.allclose', 'np.allclose', (['edge_rate_tail_half', '(0.25)'], {'atol': '(0.05)'}), '(edge_rate_tail_half, 0.25, atol=0.05)\n', (28951, 28989), True, 'import numpy as np\n'), ((29228, 29276), 'numpy.allclose', 'np.allclose', (['node_rate_tail_half', '(0.5)'], {'atol': '(0.02)'}), '(node_rate_tail_half, 0.5, atol=0.02)\n', (29239, 29276), True, 'import numpy as np\n'), ((29359, 29399), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (29366, 29399), True, 'import numpy as np\n'), ((29419, 29459), 'numpy.full', 'np.full', (['(num_nodes,)', '(0)'], {'dtype': 'np.int32'}), '((num_nodes,), 0, dtype=np.int32)\n', (29426, 29459), True, 'import numpy as np\n'), ((30983, 31023), 'numpy.allclose', 'np.allclose', (['edge_rate_0', '(0.5)'], {'atol': '(0.05)'}), '(edge_rate_0, 0.5, atol=0.05)\n', (30994, 31023), True, 'import numpy as np\n'), ((31035, 31084), 'numpy.allclose', 'np.allclose', (['edge_rate_tail_half', '(0.25)'], {'atol': '(0.05)'}), '(edge_rate_tail_half, 0.25, atol=0.05)\n', (31046, 31084), True, 'import numpy as np\n'), ((31416, 31467), 'numpy.allclose', 'np.allclose', (['node_rate', '(node_rate_a * 5)'], {'atol': '(0.002)'}), '(node_rate, node_rate_a * 5, atol=0.002)\n', (31427, 31467), True, 'import numpy as np\n'), ((31479, 31529), 'numpy.allclose', 'np.allclose', (['node_rate_a', 'node_rate_b'], {'atol': '(0.0002)'}), '(node_rate_a, node_rate_b, atol=0.0002)\n', (31490, 31529), True, 'import numpy as np\n'), ((31936, 31976), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (31943, 31976), True, 'import numpy as np\n'), ((32296, 32336), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.int32'}), '((num_edges,), 1, dtype=np.int32)\n', (32303, 32336), True, 'import numpy as np\n'), ((32392, 32427), 'numpy.array_equal', 'np.array_equal', (['truth', 'edge_sampled'], {}), '(truth, edge_sampled)\n', (32406, 32427), True, 'import numpy as np\n'), ((32448, 32488), 'numpy.full', 'np.full', (['(num_edges,)', '(0)'], {'dtype': 'np.int32'}), '((num_edges,), 0, dtype=np.int32)\n', (32455, 32488), True, 'import numpy as np\n'), ((32858, 32898), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.int32'}), '((num_edges,), 1, dtype=np.int32)\n', (32865, 32898), True, 'import numpy as np\n'), ((32954, 32989), 'numpy.array_equal', 'np.array_equal', (['truth', 'edge_sampled'], {}), '(truth, edge_sampled)\n', (32968, 32989), True, 'import numpy as np\n'), ((1388, 1412), 'backend.array_equal', 'F.array_equal', (['src1', 'src'], {}), '(src1, src)\n', (1401, 1412), True, 'import backend as F\n'), ((3965, 3989), 'backend.array_equal', 'F.array_equal', (['src1', 'src'], {}), '(src1, src)\n', (3978, 3989), True, 'import backend as F\n'), ((9065, 9162), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(1)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=1)\n", (9101, 9162), False, 'import dgl\n'), ((9317, 9414), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (['g', '(5)', '(3)'], {'num_hops': '(2)', 'neighbor_type': '"""in"""', 'num_workers': '(4)'}), "(g, 5, 3, num_hops=2, neighbor_type=\n 'in', num_workers=4)\n", (9353, 9414), False, 'import dgl\n'), ((9545, 9560), 'backend.asnumpy', 'F.asnumpy', (['lsrc'], {}), '(lsrc)\n', (9554, 9560), True, 'import backend as F\n'), ((9587, 9608), 'backend.asnumpy', 'F.asnumpy', (['g.head_nid'], {}), '(g.head_nid)\n', (9596, 9608), True, 'import backend as F\n'), ((9720, 9735), 'backend.asnumpy', 'F.asnumpy', (['ldst'], {}), '(ldst)\n', (9729, 9735), True, 'import backend as F\n'), ((9762, 9783), 'backend.asnumpy', 'F.asnumpy', (['g.tail_nid'], {}), '(g.tail_nid)\n', (9771, 9783), True, 'import backend as F\n'), ((10118, 10133), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (10126, 10133), True, 'import backend as F\n'), ((10135, 10142), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (10140, 10142), True, 'import backend as F\n'), ((11419, 11463), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (11431, 11463), True, 'import backend as F\n'), ((11482, 11526), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (11494, 11526), True, 'import backend as F\n'), ((11545, 11589), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (11557, 11589), True, 'import backend as F\n'), ((11933, 11987), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_edges.tail_nid'], {}), '(pos_edges.parent_nid, pos_edges.tail_nid)\n', (11945, 11987), True, 'import backend as F\n'), ((12008, 12062), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_edges.tail_nid'], {}), '(neg_edges.parent_nid, neg_edges.tail_nid)\n', (12020, 12062), True, 'import backend as F\n'), ((12171, 12216), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pos_tails', 'neg_tails'], {}), '(pos_tails, neg_tails)\n', (12194, 12216), True, 'import numpy as np\n'), ((16230, 16274), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (16242, 16274), True, 'import backend as F\n'), ((16293, 16337), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (16305, 16337), True, 'import backend as F\n'), ((16356, 16400), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (16368, 16400), True, 'import backend as F\n'), ((16482, 16521), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (16494, 16521), True, 'import backend as F\n'), ((17207, 17214), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17212, 17214), True, 'import backend as F\n'), ((17298, 17305), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17303, 17305), True, 'import backend as F\n'), ((17409, 17424), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (17417, 17424), True, 'import backend as F\n'), ((17426, 17433), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (17431, 17433), True, 'import backend as F\n'), ((18858, 18902), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (18870, 18902), True, 'import backend as F\n'), ((18921, 18965), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (18933, 18965), True, 'import backend as F\n'), ((18984, 19028), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (18996, 19028), True, 'import backend as F\n'), ((19372, 19426), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_edges.tail_nid'], {}), '(pos_edges.parent_nid, pos_edges.tail_nid)\n', (19384, 19426), True, 'import backend as F\n'), ((19447, 19501), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_edges.tail_nid'], {}), '(neg_edges.parent_nid, neg_edges.tail_nid)\n', (19459, 19501), True, 'import backend as F\n'), ((19610, 19655), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pos_tails', 'neg_tails'], {}), '(pos_tails, neg_tails)\n', (19633, 19655), True, 'import numpy as np\n'), ((20681, 20725), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (20693, 20725), True, 'import backend as F\n'), ((20744, 20788), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (20756, 20788), True, 'import backend as F\n'), ((20807, 20851), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (20819, 20851), True, 'import backend as F\n'), ((20933, 20972), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (20945, 20972), True, 'import backend as F\n'), ((22193, 22237), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (22205, 22237), True, 'import backend as F\n'), ((22256, 22300), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (22268, 22300), True, 'import backend as F\n'), ((22319, 22363), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_eid', 'neg_leid'], {}), '(neg_edges.parent_eid, neg_leid)\n', (22331, 22363), True, 'import backend as F\n'), ((22445, 22484), 'backend.gather_row', 'F.gather_row', (["g.edata['etype']", 'neg_eid'], {}), "(g.edata['etype'], neg_eid)\n", (22457, 22484), True, 'import backend as F\n'), ((26894, 26901), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (26899, 26901), True, 'import backend as F\n'), ((27032, 27039), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (27037, 27039), True, 'import backend as F\n'), ((27063, 27088), 'backend.sum', 'F.sum', (['node_weight'], {'dim': '(0)'}), '(node_weight, dim=0)\n', (27068, 27088), True, 'import backend as F\n'), ((27197, 27212), 'backend.tensor', 'F.tensor', (['etype'], {}), '(etype)\n', (27205, 27212), True, 'import backend as F\n'), ((27214, 27221), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (27219, 27221), True, 'import backend as F\n'), ((31158, 31187), 'numpy.average', 'np.average', (['node_sampled[:50]'], {}), '(node_sampled[:50])\n', (31168, 31187), True, 'import numpy as np\n'), ((31227, 31259), 'numpy.average', 'np.average', (['node_sampled[50:100]'], {}), '(node_sampled[50:100])\n', (31237, 31259), True, 'import numpy as np\n'), ((31718, 31725), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (31723, 31725), True, 'import backend as F\n'), ((1963, 1983), 'numpy.unique', 'np.unique', (['child_src'], {}), '(child_src)\n', (1972, 1983), True, 'import numpy as np\n'), ((2241, 2253), 'backend.asnumpy', 'F.asnumpy', (['i'], {}), '(i)\n', (2250, 2253), True, 'import backend as F\n'), ((6568, 6603), 'backend.gather_row', 'F.gather_row', (['src', 'block_parent_eid'], {}), '(src, block_parent_eid)\n', (6580, 6603), True, 'import backend as F\n'), ((6635, 6670), 'backend.gather_row', 'F.gather_row', (['dst', 'block_parent_eid'], {}), '(dst, block_parent_eid)\n', (6647, 6670), True, 'import backend as F\n'), ((7539, 7561), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (7556, 7561), True, 'import numpy as np\n'), ((7563, 7585), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (7580, 7585), True, 'import numpy as np\n'), ((10289, 10311), 'backend.asnumpy', 'F.asnumpy', (['pos_gdst[i]'], {}), '(pos_gdst[i])\n', (10298, 10311), True, 'import backend as F\n'), ((10333, 10355), 'backend.asnumpy', 'F.asnumpy', (['pos_geid[i]'], {}), '(pos_geid[i])\n', (10342, 10355), True, 'import backend as F\n'), ((10395, 10417), 'backend.asnumpy', 'F.asnumpy', (['pos_gsrc[i]'], {}), '(pos_gsrc[i])\n', (10404, 10417), True, 'import backend as F\n'), ((12091, 12111), 'backend.asnumpy', 'F.asnumpy', (['pos_tails'], {}), '(pos_tails)\n', (12100, 12111), True, 'import backend as F\n'), ((12141, 12161), 'backend.asnumpy', 'F.asnumpy', (['neg_tails'], {}), '(neg_tails)\n', (12150, 12161), True, 'import backend as F\n'), ((17162, 17204), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (17169, 17204), True, 'import numpy as np\n'), ((17253, 17295), 'numpy.full', 'np.full', (['(num_nodes,)', '(1)'], {'dtype': 'np.float32'}), '((num_nodes,), 1, dtype=np.float32)\n', (17260, 17295), True, 'import numpy as np\n'), ((17580, 17602), 'backend.asnumpy', 'F.asnumpy', (['pos_gdst[i]'], {}), '(pos_gdst[i])\n', (17589, 17602), True, 'import backend as F\n'), ((17624, 17646), 'backend.asnumpy', 'F.asnumpy', (['pos_geid[i]'], {}), '(pos_geid[i])\n', (17633, 17646), True, 'import backend as F\n'), ((17686, 17708), 'backend.asnumpy', 'F.asnumpy', (['pos_gsrc[i]'], {}), '(pos_gsrc[i])\n', (17695, 17708), True, 'import backend as F\n'), ((19530, 19550), 'backend.asnumpy', 'F.asnumpy', (['pos_tails'], {}), '(pos_tails)\n', (19539, 19550), True, 'import backend as F\n'), ((19580, 19600), 'backend.asnumpy', 'F.asnumpy', (['neg_tails'], {}), '(neg_tails)\n', (19589, 19600), True, 'import backend as F\n'), ((26849, 26891), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (26856, 26891), True, 'import numpy as np\n'), ((26987, 27029), 'numpy.full', 'np.full', (['(num_nodes,)', '(1)'], {'dtype': 'np.float32'}), '((num_nodes,), 1, dtype=np.float32)\n', (26994, 27029), True, 'import numpy as np\n'), ((28518, 28559), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (28527, 28559), True, 'import backend as F\n'), ((30337, 30381), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_lsrc'], {}), '(neg_edges.parent_nid, neg_lsrc)\n', (30349, 30381), True, 'import backend as F\n'), ((30477, 30521), 'backend.gather_row', 'F.gather_row', (['neg_edges.parent_nid', 'neg_ldst'], {}), '(neg_edges.parent_nid, neg_ldst)\n', (30489, 30521), True, 'import backend as F\n'), ((30613, 30654), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (30622, 30654), True, 'import backend as F\n'), ((31673, 31715), 'numpy.full', 'np.full', (['(num_edges,)', '(1)'], {'dtype': 'np.float32'}), '((num_edges,), 1, dtype=np.float32)\n', (31680, 31715), True, 'import numpy as np\n'), ((32238, 32279), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (32247, 32279), True, 'import backend as F\n'), ((32800, 32841), 'backend.asnumpy', 'F.asnumpy', (['pos_edges.parent_eid[pos_leid]'], {}), '(pos_edges.parent_eid[pos_leid])\n', (32809, 32841), True, 'import backend as F\n'), ((212, 261), 'scipy.sparse.random', 'sp.sparse.random', (['n', 'n'], {'density': '(0.1)', 'format': '"""coo"""'}), "(n, n, density=0.1, format='coo')\n", (228, 261), True, 'import scipy as sp\n'), ((1452, 1464), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (1459, 1464), True, 'import numpy as np\n'), ((5061, 5075), 'backend.asnumpy', 'F.asnumpy', (['nid'], {}), '(nid)\n', (5070, 5075), True, 'import backend as F\n'), ((5840, 5854), 'backend.asnumpy', 'F.asnumpy', (['nid'], {}), '(nid)\n', (5849, 5854), True, 'import backend as F\n'), ((6017, 6031), 'backend.asnumpy', 'F.asnumpy', (['eid'], {}), '(eid)\n', (6026, 6031), True, 'import backend as F\n'), ((6363, 6395), 'backend.gather_row', 'F.gather_row', (['sub_src', 'block_eid'], {}), '(sub_src, block_eid)\n', (6375, 6395), True, 'import backend as F\n'), ((6445, 6477), 'backend.gather_row', 'F.gather_row', (['sub_dst', 'block_eid'], {}), '(sub_dst, block_eid)\n', (6457, 6477), True, 'import backend as F\n'), ((6698, 6738), 'backend.asnumpy', 'F.asnumpy', (['(block_src == block_parent_src)'], {}), '(block_src == block_parent_src)\n', (6707, 6738), True, 'import backend as F\n'), ((7823, 7830), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (7828, 7830), True, 'import backend as F\n'), ((7880, 7887), 'backend.cpu', 'F.cpu', ([], {}), '()\n', (7885, 7887), True, 'import backend as F\n'), ((11078, 11122), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_eid', 'pos_leid'], {}), '(pos_edges.parent_eid, pos_leid)\n', (11090, 11122), True, 'import backend as F\n'), ((16579, 16600), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (16588, 16600), True, 'import backend as F\n'), ((16602, 16623), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (16611, 16623), True, 'import backend as F\n'), ((18518, 18562), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_eid', 'pos_leid'], {}), '(pos_edges.parent_eid, pos_leid)\n', (18530, 18562), True, 'import backend as F\n'), ((19091, 19112), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (19100, 19112), True, 'import backend as F\n'), ((19138, 19159), 'backend.asnumpy', 'F.asnumpy', (['neg_eid[i]'], {}), '(neg_eid[i])\n', (19147, 19159), True, 'import backend as F\n'), ((21030, 21051), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (21039, 21051), True, 'import backend as F\n'), ((21053, 21074), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (21062, 21074), True, 'import backend as F\n'), ((22542, 22563), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (22551, 22563), True, 'import backend as F\n'), ((22565, 22586), 'backend.asnumpy', 'F.asnumpy', (['neg_dst[i]'], {}), '(neg_dst[i])\n', (22574, 22586), True, 'import backend as F\n'), ((28337, 28355), 'backend.asnumpy', 'F.asnumpy', (['neg_src'], {}), '(neg_src)\n', (28346, 28355), True, 'import backend as F\n'), ((28463, 28481), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (28472, 28481), True, 'import backend as F\n'), ((30418, 30436), 'backend.asnumpy', 'F.asnumpy', (['neg_src'], {}), '(neg_src)\n', (30427, 30436), True, 'import backend as F\n'), ((30558, 30576), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (30567, 30576), True, 'import backend as F\n'), ((11173, 11217), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_lsrc'], {}), '(pos_edges.parent_nid, pos_lsrc)\n', (11185, 11217), True, 'import backend as F\n'), ((11267, 11311), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_ldst'], {}), '(pos_edges.parent_nid, pos_ldst)\n', (11279, 11311), True, 'import backend as F\n'), ((11652, 11670), 'backend.asnumpy', 'F.asnumpy', (['neg_dst'], {}), '(neg_dst)\n', (11661, 11670), True, 'import backend as F\n'), ((11699, 11717), 'backend.asnumpy', 'F.asnumpy', (['neg_eid'], {}), '(neg_eid)\n', (11708, 11717), True, 'import backend as F\n'), ((16833, 16853), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (16842, 16853), True, 'import backend as F\n'), ((16857, 16873), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (16866, 16873), True, 'import backend as F\n'), ((18613, 18657), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_lsrc'], {}), '(pos_edges.parent_nid, pos_lsrc)\n', (18625, 18657), True, 'import backend as F\n'), ((18707, 18751), 'backend.gather_row', 'F.gather_row', (['pos_edges.parent_nid', 'pos_ldst'], {}), '(pos_edges.parent_nid, pos_ldst)\n', (18719, 18751), True, 'import backend as F\n'), ((21284, 21304), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (21293, 21304), True, 'import backend as F\n'), ((21308, 21324), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (21317, 21324), True, 'import backend as F\n'), ((22796, 22816), 'backend.asnumpy', 'F.asnumpy', (['exists[i]'], {}), '(exists[i])\n', (22805, 22816), True, 'import backend as F\n'), ((22820, 22836), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (22829, 22836), True, 'import backend as F\n'), ((5661, 5679), 'backend.asnumpy', 'F.asnumpy', (['sub_nid'], {}), '(sub_nid)\n', (5670, 5679), True, 'import backend as F\n'), ((11827, 11848), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (11836, 11848), True, 'import backend as F\n'), ((12318, 12334), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (12327, 12334), True, 'import backend as F\n'), ((19266, 19287), 'backend.asnumpy', 'F.asnumpy', (['neg_src[i]'], {}), '(neg_src[i])\n', (19275, 19287), True, 'import backend as F\n'), ((19757, 19773), 'backend.asnumpy', 'F.asnumpy', (['exist'], {}), '(exist)\n', (19766, 19773), True, 'import backend as F\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Optional
import aws_orbit
from aws_orbit.plugins import hooks
from aws_orbit.remote_files import helm
if TYPE_CHECKING:
from aws_orbit.models.context import Context, TeamContext
_logger: logging.Logger = logging.getLogger("aws_orbit")
CHART_PATH = os.path.join(os.path.dirname(__file__))
@hooks.deploy
def deploy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug("Team Env name: %s | Team name: %s", context.name, team_context.name)
plugin_id = plugin_id.replace("_", "-")
_logger.debug("plugin_id: %s", plugin_id)
chart_path = helm.create_team_charts_copy(team_context=team_context, path=CHART_PATH, target_path=plugin_id)
vars: Dict[str, Optional[str]] = dict(
team=team_context.name,
region=context.region,
account_id=context.account_id,
env_name=context.name,
restart_policy=parameters["restartPolicy"] if "restartPolicy" in parameters else "Always",
path=parameters["path"] if "path" in parameters else "/home/jovyan/shared/voila",
options=parameters["options"] if "options" in parameters else "",
plugin_id=plugin_id,
toolkit_s3_bucket=context.toolkit.s3_bucket,
image_pull_policy="Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
image=parameters["image"] if "image" in parameters else team_context.final_image_address,
sts_ep="legacy" if context.networking.data.internet_accessible else "regional",
)
repo_location = team_context.team_helm_repository
if repo_location:
repo = team_context.name
helm.add_repo(repo=repo, repo_location=repo_location)
chart_name, chart_version, chart_package = helm.package_chart(repo=repo, chart_path=chart_path, values=vars)
helm.install_chart(
repo=repo,
namespace=team_context.name,
name=f"{team_context.name}-{plugin_id}",
chart_name=chart_name,
chart_version=chart_version,
)
@hooks.destroy
def destroy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug(
"Delete Plugin %s of Team Env name: %s | Team name: %s",
plugin_id,
context.name,
team_context.name,
)
helm.uninstall_chart(f"{team_context.name}-{plugin_id}", namespace=team_context.name)
| [
"logging.getLogger",
"aws_orbit.remote_files.helm.install_chart",
"aws_orbit.__version__.endswith",
"os.path.dirname",
"aws_orbit.remote_files.helm.uninstall_chart",
"aws_orbit.remote_files.helm.create_team_charts_copy",
"aws_orbit.remote_files.helm.package_chart",
"aws_orbit.remote_files.helm.add_repo"
]
| [((924, 954), 'logging.getLogger', 'logging.getLogger', (['"""aws_orbit"""'], {}), "('aws_orbit')\n", (941, 954), False, 'import logging\n'), ((981, 1006), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (996, 1006), False, 'import os\n'), ((1351, 1450), 'aws_orbit.remote_files.helm.create_team_charts_copy', 'helm.create_team_charts_copy', ([], {'team_context': 'team_context', 'path': 'CHART_PATH', 'target_path': 'plugin_id'}), '(team_context=team_context, path=CHART_PATH,\n target_path=plugin_id)\n', (1379, 1450), False, 'from aws_orbit.remote_files import helm\n'), ((3092, 3182), 'aws_orbit.remote_files.helm.uninstall_chart', 'helm.uninstall_chart', (['f"""{team_context.name}-{plugin_id}"""'], {'namespace': 'team_context.name'}), "(f'{team_context.name}-{plugin_id}', namespace=\n team_context.name)\n", (3112, 3182), False, 'from aws_orbit.remote_files import helm\n'), ((2378, 2431), 'aws_orbit.remote_files.helm.add_repo', 'helm.add_repo', ([], {'repo': 'repo', 'repo_location': 'repo_location'}), '(repo=repo, repo_location=repo_location)\n', (2391, 2431), False, 'from aws_orbit.remote_files import helm\n'), ((2483, 2548), 'aws_orbit.remote_files.helm.package_chart', 'helm.package_chart', ([], {'repo': 'repo', 'chart_path': 'chart_path', 'values': 'vars'}), '(repo=repo, chart_path=chart_path, values=vars)\n', (2501, 2548), False, 'from aws_orbit.remote_files import helm\n'), ((2557, 2717), 'aws_orbit.remote_files.helm.install_chart', 'helm.install_chart', ([], {'repo': 'repo', 'namespace': 'team_context.name', 'name': 'f"""{team_context.name}-{plugin_id}"""', 'chart_name': 'chart_name', 'chart_version': 'chart_version'}), "(repo=repo, namespace=team_context.name, name=\n f'{team_context.name}-{plugin_id}', chart_name=chart_name,\n chart_version=chart_version)\n", (2575, 2717), False, 'from aws_orbit.remote_files import helm\n'), ((2007, 2046), 'aws_orbit.__version__.endswith', 'aws_orbit.__version__.endswith', (['""".dev0"""'], {}), "('.dev0')\n", (2037, 2046), False, 'import aws_orbit\n')] |
#! /usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate list of cinder drivers"""
import argparse
import os
from cinder.interface import util
parser = argparse.ArgumentParser(prog="generate_driver_list")
parser.add_argument("--format", default='str', choices=['str', 'dict'],
help="Output format type")
# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')
CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"
class Output(object):
def __init__(self, base_dir, output_list):
# At this point we don't care what was passed in, just a trigger
# to write this out to the doc tree for now
self.driver_file = None
if output_list:
self.driver_file = open(
'%s/doc/source/drivers.rst' % base_dir, 'w+')
self.driver_file.write('===================\n')
self.driver_file.write('Available Drivers\n')
self.driver_file.write('===================\n\n')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.driver_file:
self.driver_file.close()
def write(self, text):
if self.driver_file:
self.driver_file.write('%s\n' % text)
else:
print(text)
def format_description(desc, output):
desc = desc or '<None>'
lines = desc.rstrip('\n').split('\n')
for line in lines:
output.write(' %s' % line)
def print_drivers(drivers, config_name, output):
for driver in sorted(drivers, key=lambda x: x.class_fqn):
output.write(driver.class_name)
output.write('-' * len(driver.class_name))
if driver.version:
output.write('* Version: %s' % driver.version)
output.write('* %s=%s' % (config_name, driver.class_fqn))
if driver.ci_wiki_name:
output.write('* CI info: %s%s' % (CI_WIKI_ROOT,
driver.ci_wiki_name))
output.write('* Description:')
format_description(driver.desc, output)
output.write('')
output.write('')
def output_str(cinder_root, args):
with Output(cinder_root, args.output_list) as output:
output.write('Volume Drivers')
output.write('==============')
print_drivers(util.get_volume_drivers(), 'volume_driver', output)
output.write('Backup Drivers')
output.write('==============')
print_drivers(util.get_backup_drivers(), 'backup_driver', output)
output.write('FC Zone Manager Drivers')
output.write('=======================')
print_drivers(util.get_fczm_drivers(), 'zone_driver', output)
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info
def output_dict():
import pprint
driver_list = []
drivers = util.get_volume_drivers()
for driver in drivers:
driver_list.append(collect_driver_info(driver))
pprint.pprint(driver_list)
def main():
tools_dir = os.path.dirname(os.path.abspath(__file__))
cinder_root = os.path.dirname(tools_dir)
cur_dir = os.getcwd()
os.chdir(cinder_root)
args = parser.parse_args()
try:
if args.format == 'str':
output_str(cinder_root, args)
elif args.format == 'dict':
output_dict()
finally:
os.chdir(cur_dir)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"cinder.interface.util.get_backup_drivers",
"os.getcwd",
"os.chdir",
"os.path.dirname",
"cinder.interface.util.get_volume_drivers",
"cinder.interface.util.get_fczm_drivers",
"os.path.abspath",
"pprint.pprint"
]
| [((709, 761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""generate_driver_list"""'}), "(prog='generate_driver_list')\n", (732, 761), False, 'import argparse\n'), ((3748, 3773), 'cinder.interface.util.get_volume_drivers', 'util.get_volume_drivers', ([], {}), '()\n', (3771, 3773), False, 'from cinder.interface import util\n'), ((3862, 3888), 'pprint.pprint', 'pprint.pprint', (['driver_list'], {}), '(driver_list)\n', (3875, 3888), False, 'import pprint\n'), ((3980, 4006), 'os.path.dirname', 'os.path.dirname', (['tools_dir'], {}), '(tools_dir)\n', (3995, 4006), False, 'import os\n'), ((4021, 4032), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4030, 4032), False, 'import os\n'), ((4037, 4058), 'os.chdir', 'os.chdir', (['cinder_root'], {}), '(cinder_root)\n', (4045, 4058), False, 'import os\n'), ((3935, 3960), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3950, 3960), False, 'import os\n'), ((4259, 4276), 'os.chdir', 'os.chdir', (['cur_dir'], {}), '(cur_dir)\n', (4267, 4276), False, 'import os\n'), ((2985, 3010), 'cinder.interface.util.get_volume_drivers', 'util.get_volume_drivers', ([], {}), '()\n', (3008, 3010), False, 'from cinder.interface import util\n'), ((3138, 3163), 'cinder.interface.util.get_backup_drivers', 'util.get_backup_drivers', ([], {}), '()\n', (3161, 3163), False, 'from cinder.interface import util\n'), ((3309, 3332), 'cinder.interface.util.get_fczm_drivers', 'util.get_fczm_drivers', ([], {}), '()\n', (3330, 3332), False, 'from cinder.interface import util\n')] |
""" econ/fred_view.py tests """
import unittest
from unittest import mock
from io import StringIO
import pandas as pd
# pylint: disable=unused-import
from gamestonk_terminal.econ.fred_view import get_fred_data # noqa: F401
fred_data_mock = """
,GDP
2019-01-01,21115.309
2019-04-01,21329.877
2019-07-01,21540.325
2019-10-01,21747.394
2020-01-01,21561.139
2020-04-01,19520.114
2020-07-01,21170.252
2020-10-01,21494.731
"""
class TestFredFredView(unittest.TestCase):
@mock.patch("gamestonk_terminal.econ.fred_view.Fred.get_series")
def test_get_fred_data(self, mock_get_series):
fred_data = pd.read_csv(StringIO(fred_data_mock), header=0, index_col=0)
mock_get_series.return_value = fred_data
get_fred_data(["--noplot"], "gdp")
| [
"gamestonk_terminal.econ.fred_view.get_fred_data",
"io.StringIO",
"unittest.mock.patch"
]
| [((474, 537), 'unittest.mock.patch', 'mock.patch', (['"""gamestonk_terminal.econ.fred_view.Fred.get_series"""'], {}), "('gamestonk_terminal.econ.fred_view.Fred.get_series')\n", (484, 537), False, 'from unittest import mock\n'), ((729, 763), 'gamestonk_terminal.econ.fred_view.get_fred_data', 'get_fred_data', (["['--noplot']", '"""gdp"""'], {}), "(['--noplot'], 'gdp')\n", (742, 763), False, 'from gamestonk_terminal.econ.fred_view import get_fred_data\n'), ((621, 645), 'io.StringIO', 'StringIO', (['fred_data_mock'], {}), '(fred_data_mock)\n', (629, 645), False, 'from io import StringIO\n')] |
import os
import configparser
from warnings import warn
def read_control_file(control_file):
# Initialize ConfigParser object
config = configparser.ConfigParser(
strict=True,
comment_prefixes=('/*', ';', '#'),
inline_comment_prefixes=('/*', ';', '#')
)
# Parse control file
paths = config.read(control_file)
# Check number of read control files.
if len(paths) == 0:
raise FileNotFoundError(
f'Specified control file, {control_file}, is not found.')
elif len(paths) > 1:
raise TypeError(f'Iterable {type(control_file)} is given as a control '\
'file. Only one control file is supported.')
# Check sections. Only 'REQUIRED' and 'OPTIONAL' sections will be used.
assert 'REQUIRED' in config.sections(), \
f'REQUIRED section is not found in {control_file}.'
expected_sections = ['REQUIRED', 'OPTIONAL']
not_expected_sections = [
s for s in config.sections() if s not in expected_sections]
if len(not_expected_sections) >= 1:
msg = f'Unexpected sections, {", ".join(not_expected_sections)}, '\
'were found. These are not used in '\
'the analysis. If you wish to include in the analysis, please '\
'specify in "REQUIRED" or "OPTIONAL" sections.'
warn(msg)
converters_d = {
'pop_size': int,
'ns': float,
'init_mut_num': int,
'generation_num': int,
'total_site_num': int,
'var_site_num': int,
'poly_site_num': int,
'fix_site_num': int,
'output_only_fixation': lambda s: True if s == 'True' else (False if s == 'False' else -9)
}
flattened = [
(opt, converters_d[opt](v))
if opt in converters_d.keys() else (opt, v)
for s in expected_sections
for opt, v in config[s].items()
]
return dict(flattened)
def write_info_to_file(file_handle, separator, *args, **kw_args):
""" Write arguments or keyword arguments to a file. Values will be
separated by a given separator.
"""
output_lines = []
if len(args) > 0:
output_lines.append(separator.join(args))
if len(kw_args) > 0:
for k, v in kw_args.items():
output_lines.append(f'{k}{separator}{v}')
print('\n'.join(output_lines), file=file_handle)
def write_settings(file_handle, **kw_args):
print('[Setting]', file=file_handle)
write_info_to_file(file_handle, separator=' = ', **kw_args)
| [
"warnings.warn",
"configparser.ConfigParser"
]
| [((145, 264), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'strict': '(True)', 'comment_prefixes': "('/*', ';', '#')", 'inline_comment_prefixes': "('/*', ';', '#')"}), "(strict=True, comment_prefixes=('/*', ';', '#'),\n inline_comment_prefixes=('/*', ';', '#'))\n", (170, 264), False, 'import configparser\n'), ((1332, 1341), 'warnings.warn', 'warn', (['msg'], {}), '(msg)\n', (1336, 1341), False, 'from warnings import warn\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.core.pickle import dumps, loads
from pymor.functions.basic import ConstantFunction, GenericFunction
from pymortests.fixtures.function import function, picklable_function, function_argument
from pymortests.fixtures.parameter import parameters_of_type
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
# monkey np.testing.assert_allclose to behave the same as np.allclose
# for some reason, the default atol of np.testing.assert_allclose is 0
# while it is 1e-8 for np.allclose
real_assert_allclose = np.testing.assert_allclose
def monkey_allclose(a, b, rtol=1.e-5, atol=1.e-8):
real_assert_allclose(a, b, rtol=rtol, atol=atol)
np.testing.assert_allclose = monkey_allclose
def test_evaluate(function):
f = function
mus = parameters_of_type(f.parameter_type, 4711)
for count in [0, 1, 5, (0, 1), (2, 2, 2)]:
arg = function_argument(f, count, 454)
result = f.evaluate(arg, next(mus))
assert result.shape == arg.shape[:-1] + f.shape_range
def test_lincomb_function():
for steps in (1, 10):
x = np.linspace(0, 1, num=steps)
zero = ConstantFunction(0.0, dim_domain=steps)
for zero in (ConstantFunction(0.0, dim_domain=steps),
GenericFunction(lambda X: np.zeros(X.shape[:-1]), dim_domain=steps)):
for one in (ConstantFunction(1.0, dim_domain=steps),
GenericFunction(lambda X: np.ones(X.shape[:-1]), dim_domain=steps), 1.0):
add = (zero + one) + 0
sub = (zero - one) + np.zeros(())
neg = - zero
assert np.allclose(sub(x), [-1])
assert np.allclose(add(x), [1.0])
assert np.allclose(neg(x), [0.0])
(repr(add), str(add), repr(one), str(one)) # just to cover the respective special funcs too
mul = neg * 1.
assert np.allclose(mul(x), [0.0])
with pytest.raises(AssertionError):
zero + ConstantFunction(dim_domain=steps + 1)
with pytest.raises(AssertionError):
zero * ConstantFunction(dim_domain=steps)
with pytest.raises(AssertionError):
ConstantFunction(dim_domain=0)
def test_pickle(function):
assert_picklable(function)
def test_pickle_without_dumps_function(picklable_function):
assert_picklable_without_dumps_function(picklable_function)
def test_pickle_by_evaluation(function):
f = function
f2 = loads(dumps(f))
mus = parameters_of_type(f.parameter_type, 47)
for arg in function_argument(f, 10, 42):
mu = next(mus)
assert np.all(f.evaluate(arg, mu) == f2.evaluate(arg, mu))
| [
"pymortests.pickling.assert_picklable_without_dumps_function",
"pymortests.fixtures.function.function_argument",
"pymor.core.pickle.dumps",
"numpy.ones",
"pymortests.fixtures.parameter.parameters_of_type",
"numpy.linspace",
"pymor.functions.basic.ConstantFunction",
"pytest.raises",
"numpy.zeros",
"pymortests.pickling.assert_picklable"
]
| [((1045, 1087), 'pymortests.fixtures.parameter.parameters_of_type', 'parameters_of_type', (['f.parameter_type', '(4711)'], {}), '(f.parameter_type, 4711)\n', (1063, 1087), False, 'from pymortests.fixtures.parameter import parameters_of_type\n'), ((2526, 2552), 'pymortests.pickling.assert_picklable', 'assert_picklable', (['function'], {}), '(function)\n', (2542, 2552), False, 'from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function\n'), ((2619, 2678), 'pymortests.pickling.assert_picklable_without_dumps_function', 'assert_picklable_without_dumps_function', (['picklable_function'], {}), '(picklable_function)\n', (2658, 2678), False, 'from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function\n'), ((2774, 2814), 'pymortests.fixtures.parameter.parameters_of_type', 'parameters_of_type', (['f.parameter_type', '(47)'], {}), '(f.parameter_type, 47)\n', (2792, 2814), False, 'from pymortests.fixtures.parameter import parameters_of_type\n'), ((2830, 2858), 'pymortests.fixtures.function.function_argument', 'function_argument', (['f', '(10)', '(42)'], {}), '(f, 10, 42)\n', (2847, 2858), False, 'from pymortests.fixtures.function import function, picklable_function, function_argument\n'), ((1149, 1181), 'pymortests.fixtures.function.function_argument', 'function_argument', (['f', 'count', '(454)'], {}), '(f, count, 454)\n', (1166, 1181), False, 'from pymortests.fixtures.function import function, picklable_function, function_argument\n'), ((1357, 1385), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'steps'}), '(0, 1, num=steps)\n', (1368, 1385), True, 'import numpy as np\n'), ((1401, 1440), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (['(0.0)'], {'dim_domain': 'steps'}), '(0.0, dim_domain=steps)\n', (1417, 1440), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((2423, 2452), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2436, 2452), False, 'import pytest\n'), ((2462, 2492), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', ([], {'dim_domain': '(0)'}), '(dim_domain=0)\n', (2478, 2492), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((2754, 2762), 'pymor.core.pickle.dumps', 'dumps', (['f'], {}), '(f)\n', (2759, 2762), False, 'from pymor.core.pickle import dumps, loads\n'), ((1462, 1501), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (['(0.0)'], {'dim_domain': 'steps'}), '(0.0, dim_domain=steps)\n', (1478, 1501), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((2227, 2256), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2240, 2256), False, 'import pytest\n'), ((2329, 2358), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2342, 2358), False, 'import pytest\n'), ((1618, 1657), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (['(1.0)'], {'dim_domain': 'steps'}), '(1.0, dim_domain=steps)\n', (1634, 1657), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((2277, 2315), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', ([], {'dim_domain': '(steps + 1)'}), '(dim_domain=steps + 1)\n', (2293, 2315), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((2379, 2413), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', ([], {'dim_domain': 'steps'}), '(dim_domain=steps)\n', (2395, 2413), False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((1550, 1572), 'numpy.zeros', 'np.zeros', (['X.shape[:-1]'], {}), '(X.shape[:-1])\n', (1558, 1572), True, 'import numpy as np\n'), ((1833, 1845), 'numpy.zeros', 'np.zeros', (['()'], {}), '(())\n', (1841, 1845), True, 'import numpy as np\n'), ((1709, 1730), 'numpy.ones', 'np.ones', (['X.shape[:-1]'], {}), '(X.shape[:-1])\n', (1716, 1730), True, 'import numpy as np\n')] |
import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
def test_bot_init(event_loop, monkeypatch, bot_config):
importlib = FakeImportLib()
monkeypatch.setattr('importlib.import_module', importlib.import_module)
bot_config.APPS = ['yui.app1', 'yui.app2']
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
assert bot.config == bot_config
assert bot.channels == []
assert bot.ims == []
assert bot.groups == []
assert bot.restart is False
assert isinstance(bot.api, SlackAPI)
assert bot.box is box
assert isinstance(bot.queue, asyncio.Queue)
assert importlib.import_queue == [
'yui.app1',
'yui.app2',
]
@pytest.mark.asyncio
async def test_call(event_loop, bot_config, response_mock):
token = 'asdf<PASSWORD>'
response_mock.post(
'https://slack.com/api/test11',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test12',
body=json.dumps({'res': 'hello world!', 'data': {'extra': 'wow'}}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test21',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test22',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test3',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
bot.api.throttle_interval = defaultdict(lambda: timedelta(0))
res = await bot.call('test11')
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test12', data={'extra': 'wow'})
assert res == APIResponse(
body={'res': 'hello world!', 'data': {'extra': 'wow'}},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test21')
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test22', data={'extra': 'wow'})
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test3', token=token)
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
| [
"yui.types.slack.response.APIResponse",
"yui.utils.json.dumps",
"yui.bot.Bot",
"datetime.timedelta",
"yui.box.Box"
]
| [((509, 514), 'yui.box.Box', 'Box', ([], {}), '()\n', (512, 514), False, 'from yui.box import Box\n'), ((525, 567), 'yui.bot.Bot', 'Bot', (['bot_config', 'event_loop'], {'using_box': 'box'}), '(bot_config, event_loop, using_box=box)\n', (528, 567), False, 'from yui.bot import Bot\n'), ((2026, 2031), 'yui.box.Box', 'Box', ([], {}), '()\n', (2029, 2031), False, 'from yui.box import Box\n'), ((2042, 2084), 'yui.bot.Bot', 'Bot', (['bot_config', 'event_loop'], {'using_box': 'box'}), '(bot_config, event_loop, using_box=box)\n', (2045, 2084), False, 'from yui.bot import Bot\n'), ((2205, 2309), 'yui.types.slack.response.APIResponse', 'APIResponse', ([], {'body': "{'res': 'hello world!'}", 'status': '(200)', 'headers': "{'content-type': 'application/json'}"}), "(body={'res': 'hello world!'}, status=200, headers={\n 'content-type': 'application/json'})\n", (2216, 2309), False, 'from yui.types.slack.response import APIResponse\n'), ((2413, 2543), 'yui.types.slack.response.APIResponse', 'APIResponse', ([], {'body': "{'res': 'hello world!', 'data': {'extra': 'wow'}}", 'status': '(200)', 'headers': "{'content-type': 'application/json'}"}), "(body={'res': 'hello world!', 'data': {'extra': 'wow'}}, status=\n 200, headers={'content-type': 'application/json'})\n", (2424, 2543), False, 'from yui.types.slack.response import APIResponse\n'), ((2624, 2720), 'yui.types.slack.response.APIResponse', 'APIResponse', ([], {'body': "{'error': 'aaa'}", 'status': '(404)', 'headers': "{'content-type': 'application/json'}"}), "(body={'error': 'aaa'}, status=404, headers={'content-type':\n 'application/json'})\n", (2635, 2720), False, 'from yui.types.slack.response import APIResponse\n'), ((2825, 2921), 'yui.types.slack.response.APIResponse', 'APIResponse', ([], {'body': "{'error': 'aaa'}", 'status': '(404)', 'headers': "{'content-type': 'application/json'}"}), "(body={'error': 'aaa'}, status=404, headers={'content-type':\n 'application/json'})\n", (2836, 2921), False, 'from yui.types.slack.response import APIResponse\n'), ((3015, 3119), 'yui.types.slack.response.APIResponse', 'APIResponse', ([], {'body': "{'res': 'hello world!'}", 'status': '(200)', 'headers': "{'content-type': 'application/json'}"}), "(body={'res': 'hello world!'}, status=200, headers={\n 'content-type': 'application/json'})\n", (3026, 3119), False, 'from yui.types.slack.response import APIResponse\n'), ((1110, 1145), 'yui.utils.json.dumps', 'json.dumps', (["{'res': 'hello world!'}"], {}), "({'res': 'hello world!'})\n", (1120, 1145), False, 'from yui.utils import json\n'), ((1304, 1365), 'yui.utils.json.dumps', 'json.dumps', (["{'res': 'hello world!', 'data': {'extra': 'wow'}}"], {}), "({'res': 'hello world!', 'data': {'extra': 'wow'}})\n", (1314, 1365), False, 'from yui.utils import json\n'), ((1525, 1553), 'yui.utils.json.dumps', 'json.dumps', (["{'error': 'aaa'}"], {}), "({'error': 'aaa'})\n", (1535, 1553), False, 'from yui.utils import json\n'), ((1712, 1740), 'yui.utils.json.dumps', 'json.dumps', (["{'error': 'aaa'}"], {}), "({'error': 'aaa'})\n", (1722, 1740), False, 'from yui.utils import json\n'), ((1898, 1933), 'yui.utils.json.dumps', 'json.dumps', (["{'res': 'hello world!'}"], {}), "({'res': 'hello world!'})\n", (1908, 1933), False, 'from yui.utils import json\n'), ((2137, 2149), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (2146, 2149), False, 'from datetime import timedelta\n')] |
import requests
from bs4 import BeautifulSoup
def recursiveUrl(url, link, depth):
if depth == 5:
return url
else:
print(link['href'])
page = requests.get(url + link['href'])
soup = BeautifulSoup(page.text, 'html.parser')
newlink = soup.find('a')
if len(newlink) == 0:
return link
else:
return link, recursiveUrl(url, newlink, depth + 1)
def getLinks(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
links = soup.find_all('a')
for link in links:
links.append(recursiveUrl(url, link, 0))
return links
links = getLinks("http://www.reddit.com/")
print(links) | [
"bs4.BeautifulSoup",
"requests.get"
]
| [((457, 474), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (469, 474), False, 'import requests\n'), ((486, 525), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""html.parser"""'], {}), "(page.text, 'html.parser')\n", (499, 525), False, 'from bs4 import BeautifulSoup\n'), ((174, 206), 'requests.get', 'requests.get', (["(url + link['href'])"], {}), "(url + link['href'])\n", (186, 206), False, 'import requests\n'), ((222, 261), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""html.parser"""'], {}), "(page.text, 'html.parser')\n", (235, 261), False, 'from bs4 import BeautifulSoup\n')] |
"""Tests for SEIR model in this repo
* Compares conserved quantities
* Compares model against SEIR wo social policies in limit to SIR
"""
from pandas import Series
from pandas.testing import assert_frame_equal, assert_series_equal
from bayes_chime.normal.models import SEIRModel, SIRModel
from pytest import fixture
from tests.normal.models.sir_test import ( # pylint: disable=W0611
fixture_penn_chime_raw_df_no_policy,
fixture_penn_chime_setup,
fixture_sir_data_wo_policy,
)
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
# Does not compare census as this repo uses the exponential distribution
]
PENN_CHIME_COMMIT = "<PASSWORD>"
@fixture(name="seir_data")
def fixture_seir_data(sir_data_wo_policy):
"""Returns data for the SIHR model
"""
x, p = sir_data_wo_policy
pp = p.copy()
xx = x.copy()
pp["alpha"] = 0.5
pp["nu"] = 1
pp["initial_exposed"] = 0
return xx, pp
def test_conserved_n(seir_data):
"""Checks if S + E + I + R is conserved for SEIR
"""
x, pars = seir_data
n_total = 0
for key in SEIRModel.compartments:
n_total += pars[f"initial_{key}"]
seir_model = SEIRModel()
predictions = seir_model.propagate_uncertainties(x, pars)
n_computed = predictions[SEIRModel.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
def mocked_seir_step(data, **pars):
data["exposed"] = 0
new_data = SEIRModel.simulation_step(data, **pars)
new_data["infected"] += new_data["exposed_new"]
return new_data
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
)
| [
"bayes_chime.normal.models.SEIRModel.simulation_step",
"bayes_chime.normal.models.SIRModel",
"bayes_chime.normal.models.SEIRModel",
"pytest.fixture",
"pandas.testing.assert_frame_equal",
"pandas.testing.assert_series_equal"
]
| [((679, 704), 'pytest.fixture', 'fixture', ([], {'name': '"""seir_data"""'}), "(name='seir_data')\n", (686, 704), False, 'from pytest import fixture\n'), ((1185, 1196), 'bayes_chime.normal.models.SEIRModel', 'SEIRModel', ([], {}), '()\n', (1194, 1196), False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((1412, 1455), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['n_expected', 'n_computed'], {}), '(n_expected, n_computed)\n', (1431, 1455), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((2027, 2038), 'bayes_chime.normal.models.SEIRModel', 'SEIRModel', ([], {}), '()\n', (2036, 2038), False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((2129, 2139), 'bayes_chime.normal.models.SIRModel', 'SIRModel', ([], {}), '()\n', (2137, 2139), False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((2295, 2387), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['predictions_sir[COLS_TO_COMPARE]', 'predictions_seir[COLS_TO_COMPARE]'], {}), '(predictions_sir[COLS_TO_COMPARE], predictions_seir[\n COLS_TO_COMPARE])\n', (2313, 2387), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((1889, 1928), 'bayes_chime.normal.models.SEIRModel.simulation_step', 'SEIRModel.simulation_step', (['data'], {}), '(data, **pars)\n', (1914, 1928), False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n')] |
import numpy as np
from scipy.interpolate import interp1d
from pyTools import *
################################################################################
#~~~~~~~~~Log ops
################################################################################
def logPolyVal(p,x):
ord = p.order()
logs = []
for idx in xrange(ord+1):
logs.append( np.log( p[idx] ) + (ord-idx)*np.log(x) )
return logs
################################################################################
#~~~~~~~~~Symmeterize data
################################################################################
def symmeterize( x, y, interp_type='cubic' ):
if x.min() <= 0:
raise ValueError('x.min() must be greater than zero.')
xs = np.array([-x,x]).flatten()
xs.sort()
f = interp1d( x , y , kind=interp_type )
return { 'x':xs , 'y':f(np.abs(xs)) }
################################################################################
#~~~~~~~~~3D Shapes
################################################################################
def makeSphere(x0=0,y0=0,z0=0,r=1,ntheta=30,nphi=30):
u = np.linspace(0, np.pi, ntheta)
v = np.linspace(0, 2 * np.pi, nphi)
x = np.outer(np.sin(u), np.sin(v))*r
y = np.outer(np.sin(u), np.cos(v))*r
z = np.outer(np.cos(u), np.ones_like(v))*r
return x+x0, y+y0, z+z0
def makeCylinder(x0=0,y0=0,z0=0,r=1,h=10,ntheta=30,nz=30):
u = np.linspace(0, 2*np.pi, ntheta)
z = np.linspace(0, h, nz)
UU,ZZ = np.meshgrid(u,z)
XX = np.cos(UU)*r
YY = np.sin(UU)*r
# ax.plot_wireframe(x, y, z)
return XX+x0, YY+y0, ZZ+z0
def generateLine3D( x0=0, x1=1, y0=0, y1=1, z0=0, z1=0, N=2 ):
return {'line':{'xData':np.linspace(x0,x1,N),
'yData':np.linspace(y0,y1,N),
'zData':np.linspace(z0,z1,N),
'cData':np.ones((N,1))}}
################################################################################
#~~~~~~~~~2D Shapes
################################################################################
def generateCircle(R=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*R
uX = np.cos( thetas )*R
return {'circle':{'xData':uX+X0, 'yData':uY+Y0}}
def generateEllipse( RX=2, RY=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*RY
uX = np.cos( thetas )*RX
return {'ellipse':{'xData':uX+X0, 'yData':uY+Y0}}
def makeCylinder2D( L = 10., R = 1., N=60, view_degrees=30. ):
yFac = np.cos(view_degrees * np.pi/180.)
zFac = np.sin(view_degrees * np.pi/180.)
xL = np.ones((2,1))*-R
xR = -xL
y = np.array([0,L])*yFac
cylinder = { 'leftSide':{'xData':xL, 'yData':y},
'rightSide':{'xData':xR, 'yData':y},
'upperEllipse':generateEllipse(RX = R, RY=R*zFac, Y0=L*yFac,N=N)['ellipse'],
'lowerHalfEllipse':generateEllipse(RX = R, RY=R*zFac, thetaMin=np.pi, thetaMax=2*np.pi, N=int(N/2.))['ellipse']}
return cylinder
################################################################################
#~~~~~~~~~Rotations
################################################################################
def rotateObject(x,y,z,ax=None,ay=None,az=None):
if ax is not None:
y,z = rotateAt(y,z,ax)
if ay is not None:
x,z = rotateAt(x,z,-ay)
if az is not None:
x,y = rotateAt(x,y,az)
return x,y,z
def rotateAt(x,y,a):
xp = np.cos(a)*x-np.sin(a)*y
yp = np.cos(a)*y+np.sin(a)*x
return xp, yp
def rotateObj2D( obj_in, degrees ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate2D( degrees=degrees, **obj[key] )
return obj
def rotate2D( xData, yData, degrees ):
x = xData.flatten()
y = yData.flatten()
z = np.zeros_like(x)
x,y,z = rotateObject( x, y, z, az=float(degrees)/180.*np.pi )
return {'xData':x, 'yData':y}
def rotateObj3D( obj_in, gamma, theta, phi ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate3D( gamma=gamma, theta=theta, phi=phi, **obj[key] )
return obj
def rotate3D( xData, yData, zData, gamma, theta, phi, kwargs_toggle=True, **kwargs ):
ignore_kwargs(kwargs, toggle=kwargs_toggle)
x = xData.flatten()
y = yData.flatten()
z = zData.flatten()
x,y,z = rotateObject( x, y, z, az=float(gamma)/180.*np.pi )
x,y,z = rotateObject( x, y, z, ay=float(theta)/180.*np.pi )
x,y,z = rotateObject( x, y, z, az=float(phi)/180.*np.pi )
return {'xData':x, 'yData':y, 'zData':z}
| [
"numpy.abs",
"numpy.ones_like",
"numpy.ones",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"numpy.zeros_like"
]
| [((804, 836), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y'], {'kind': 'interp_type'}), '(x, y, kind=interp_type)\n', (812, 836), False, 'from scipy.interpolate import interp1d\n'), ((1129, 1158), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'ntheta'], {}), '(0, np.pi, ntheta)\n', (1140, 1158), True, 'import numpy as np\n'), ((1167, 1198), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'nphi'], {}), '(0, 2 * np.pi, nphi)\n', (1178, 1198), True, 'import numpy as np\n'), ((1425, 1458), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'ntheta'], {}), '(0, 2 * np.pi, ntheta)\n', (1436, 1458), True, 'import numpy as np\n'), ((1466, 1487), 'numpy.linspace', 'np.linspace', (['(0)', 'h', 'nz'], {}), '(0, h, nz)\n', (1477, 1487), True, 'import numpy as np\n'), ((1501, 1518), 'numpy.meshgrid', 'np.meshgrid', (['u', 'z'], {}), '(u, z)\n', (1512, 1518), True, 'import numpy as np\n'), ((2139, 2173), 'numpy.linspace', 'np.linspace', (['thetaMin', 'thetaMax', 'N'], {}), '(thetaMin, thetaMax, N)\n', (2150, 2173), True, 'import numpy as np\n'), ((2389, 2423), 'numpy.linspace', 'np.linspace', (['thetaMin', 'thetaMax', 'N'], {}), '(thetaMin, thetaMax, N)\n', (2400, 2423), True, 'import numpy as np\n'), ((2615, 2651), 'numpy.cos', 'np.cos', (['(view_degrees * np.pi / 180.0)'], {}), '(view_degrees * np.pi / 180.0)\n', (2621, 2651), True, 'import numpy as np\n'), ((2660, 2696), 'numpy.sin', 'np.sin', (['(view_degrees * np.pi / 180.0)'], {}), '(view_degrees * np.pi / 180.0)\n', (2666, 2696), True, 'import numpy as np\n'), ((3911, 3927), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3924, 3927), True, 'import numpy as np\n'), ((1528, 1538), 'numpy.cos', 'np.cos', (['UU'], {}), '(UU)\n', (1534, 1538), True, 'import numpy as np\n'), ((1550, 1560), 'numpy.sin', 'np.sin', (['UU'], {}), '(UU)\n', (1556, 1560), True, 'import numpy as np\n'), ((2186, 2200), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (2192, 2200), True, 'import numpy as np\n'), ((2214, 2228), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (2220, 2228), True, 'import numpy as np\n'), ((2436, 2450), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (2442, 2450), True, 'import numpy as np\n'), ((2465, 2479), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (2471, 2479), True, 'import numpy as np\n'), ((2704, 2719), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (2711, 2719), True, 'import numpy as np\n'), ((2744, 2760), 'numpy.array', 'np.array', (['[0, L]'], {}), '([0, L])\n', (2752, 2760), True, 'import numpy as np\n'), ((754, 771), 'numpy.array', 'np.array', (['[-x, x]'], {}), '([-x, x])\n', (762, 771), True, 'import numpy as np\n'), ((870, 880), 'numpy.abs', 'np.abs', (['xs'], {}), '(xs)\n', (876, 880), True, 'import numpy as np\n'), ((1217, 1226), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (1223, 1226), True, 'import numpy as np\n'), ((1228, 1237), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (1234, 1237), True, 'import numpy as np\n'), ((1258, 1267), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (1264, 1267), True, 'import numpy as np\n'), ((1269, 1278), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (1275, 1278), True, 'import numpy as np\n'), ((1299, 1308), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (1305, 1308), True, 'import numpy as np\n'), ((1310, 1325), 'numpy.ones_like', 'np.ones_like', (['v'], {}), '(v)\n', (1322, 1325), True, 'import numpy as np\n'), ((1720, 1742), 'numpy.linspace', 'np.linspace', (['x0', 'x1', 'N'], {}), '(x0, x1, N)\n', (1731, 1742), True, 'import numpy as np\n'), ((1762, 1784), 'numpy.linspace', 'np.linspace', (['y0', 'y1', 'N'], {}), '(y0, y1, N)\n', (1773, 1784), True, 'import numpy as np\n'), ((1804, 1826), 'numpy.linspace', 'np.linspace', (['z0', 'z1', 'N'], {}), '(z0, z1, N)\n', (1815, 1826), True, 'import numpy as np\n'), ((1846, 1861), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (1853, 1861), True, 'import numpy as np\n'), ((3560, 3569), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (3566, 3569), True, 'import numpy as np\n'), ((3572, 3581), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (3578, 3581), True, 'import numpy as np\n'), ((3593, 3602), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (3599, 3602), True, 'import numpy as np\n'), ((3605, 3614), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (3611, 3614), True, 'import numpy as np\n'), ((367, 381), 'numpy.log', 'np.log', (['p[idx]'], {}), '(p[idx])\n', (373, 381), True, 'import numpy as np\n'), ((396, 405), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (402, 405), True, 'import numpy as np\n')] |
from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| [
"setuptools.setup"
]
| [((30, 524), 'setuptools.setup', 'setup', ([], {'name': '"""ambient_api"""', 'version': '"""1.5.6"""', 'packages': "['ambient_api']", 'url': '"""https://github.com/avryhof/ambient_api"""', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A Python class for accessing the Ambient Weather API."""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3']", 'install_requires': "['requests', 'urllib3']"}), "(name='ambient_api', version='1.5.6', packages=['ambient_api'], url=\n 'https://github.com/avryhof/ambient_api', license='MIT', author=\n '<NAME>', author_email='<EMAIL>', description=\n 'A Python class for accessing the Ambient Weather API.', classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'], install_requires=['requests',\n 'urllib3'])\n", (35, 524), False, 'from setuptools import setup\n')] |
from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
| [
"collections.deque"
]
| [((294, 304), 'collections.deque', 'deque', (['[n]'], {}), '([n])\n', (299, 304), False, 'from collections import deque\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
object = AccountUserManager()
| [
"django.utils.timezone.now"
]
| [((673, 687), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (685, 687), False, 'from django.utils import timezone\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-05 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("careeropportunity", "0002_careeropportunity_job_type")]
operations = [
migrations.AddField(
model_name="careeropportunity",
name="deadline",
field=models.DateField(blank=True, null=True, verbose_name="søknadsfrist"),
)
]
| [
"django.db.models.DateField"
]
| [((415, 483), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""søknadsfrist"""'}), "(blank=True, null=True, verbose_name='søknadsfrist')\n", (431, 483), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from miio import chuangmi_plug
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
state = {}
f = open('/home/pi/smart-climat-daemon/ac_state.json')
state = json.load(f)
plug_type = 'chuangmi.plug.m1'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def turn_on_humidifier():
"""Turn on humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.on()
def turn_off_humidifier():
"""Turn off humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.off()
def check_if_ac_off():
"""Check if AC is turned off."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=<KEY>'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if response.json()['props']['boot'] == 0:
return True
return False
return None
def check_if_ac_cool():
"""Check if AC is turned for a automate cooling."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=<KEY>'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) or ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '001':
return False
if not response.json()['props']['wdNumber'] == 25:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def check_if_ac_heat():
"""Check if AC is turned for a automate heating."""
status_url = 'http://smart.levabd.pp.ua:2003/status/key/27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '100':
return False
if not response.json()['props']['wdNumber'] == 23:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def turn_on_heat_ac():
"""Turn on AC on a first floor for a heating if it was not."""
if (state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1:
return
heat_url = 'http://smart.levabd.pp.ua:2003/heat/key/27fbc501b51b47663e77c46816a'
ac_heat = check_if_ac_heat()
if ac_heat is not None:
if not ac_heat:
state['triedTurnedHeat'] = 1
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(heat_url)
print(response.json())
else:
if state['triedTurnedHeat'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_on_cool_ac():
"""Turn on AC on a first floor for a cooling if it was not."""
if (state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1:
return
cool_url = 'http://smart.levabd.pp.ua:2003/cool/key/<KEY>'
ac_cool = check_if_ac_cool()
if ac_cool is not None:
if not ac_cool:
state['triedTurnedCool'] = 1
state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(cool_url)
print(response.json())
else:
if state['triedTurnedCool'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 1
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_off_ac():
"""Turn off AC on a first floor."""
if (state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1:
return
turn_url = 'http://smart.levabd.pp.ua:2003/power-off/key/27fbc501b51b47663e77c46816a'
ac_off = check_if_ac_off()
if ac_off is not None:
if not ac_off:
state['triedTurnedOff'] = 1
state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(turn_url)
print(response.json())
else:
if state['triedTurnedOff'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 1
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def record_temp_humid(temperature, humidity):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='<PASSWORD>.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/lr.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity():
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
poller = MiTempBtPoller('58:2d:34:38:c0:91', backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check_if_ac_cool()
(today, temperature, humidity) = poll_temp_humidity()
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity)
try:
if (humidity > 49) and (today.month < 10) and (today.month > 4):
turn_off_humidifier()
if (humidity < 31) and (today.month < 10) and (today.month > 4):
turn_on_humidifier()
if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
turn_on_humidifier()
if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
turn_off_humidifier()
# Prevent Sleep of Xiaomi Smart Plug
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=0,
lazy_discover=True,
model='chuangmi.plug.m1')
print(hummidifier_plug.status())
except Exception:
print("Can not connect to humidifier")
# clear env at night
if today.hour == 4:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
if (today.hour > -1) and (today.hour < 7):
turn_off_ac()
if (temperature > 26.4) and (today.month < 6) and (today.month > 4) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 26.4) and (today.month < 10) and (today.month > 8) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 27.3) and (today.month < 9) and (today.month > 5) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature < 23.5) and (today.month < 10) and (today.month > 4):
turn_off_ac()
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
turn_off_ac()
if __name__ == '__main__':
main()
| [
"re.compile",
"paramiko.AutoAddPolicy",
"btlewrap.available_backends",
"requests.get",
"datetime.datetime.today",
"miio.chuangmi_plug.ChuangmiPlug",
"json.load",
"paramiko.SSHClient",
"json.dump",
"mitemp_bt.mitemp_bt_poller.MiTempBtPoller"
]
| [((522, 534), 'json.load', 'json.load', (['f'], {}), '(f)\n', (531, 534), False, 'import json\n'), ((597, 687), 're.compile', 're.compile', (['"""[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}"""'], {}), "(\n '[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}')\n", (607, 687), False, 'import re\n'), ((996, 1152), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', ([], {'ip': '"""192.168.19.59"""', 'token': '"""14f5b868a58ef4ffaef6fece61c65b16"""', 'start_id': '(0)', 'debug': '(1)', 'lazy_discover': '(True)', 'model': 'plug_type'}), "(ip='192.168.19.59', token=\n '14f5b868a58ef4ffaef6fece61c65b16', start_id=0, debug=1, lazy_discover=\n True, model=plug_type)\n", (1022, 1152), False, 'from miio import chuangmi_plug\n'), ((1318, 1474), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', ([], {'ip': '"""192.168.19.59"""', 'token': '"""14f5b868a58ef4ffaef6fece61c65b16"""', 'start_id': '(0)', 'debug': '(1)', 'lazy_discover': '(True)', 'model': 'plug_type'}), "(ip='192.168.19.59', token=\n '14f5b868a58ef4ffaef6fece61c65b16', start_id=0, debug=1, lazy_discover=\n True, model=plug_type)\n", (1344, 1474), False, 'from miio import chuangmi_plug\n'), ((1693, 1735), 'requests.get', 'requests.get', (['status_url'], {'timeout': '(20, 30)'}), '(status_url, timeout=(20, 30))\n', (1705, 1735), False, 'import requests\n'), ((2223, 2265), 'requests.get', 'requests.get', (['status_url'], {'timeout': '(20, 30)'}), '(status_url, timeout=(20, 30))\n', (2235, 2265), False, 'import requests\n'), ((3029, 3071), 'requests.get', 'requests.get', (['status_url'], {'timeout': '(20, 30)'}), '(status_url, timeout=(20, 30))\n', (3041, 3071), False, 'import requests\n'), ((7071, 7091), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (7089, 7091), False, 'import paramiko\n'), ((7507, 7532), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7530, 7532), False, 'import datetime\n'), ((7574, 7618), 'mitemp_bt.mitemp_bt_poller.MiTempBtPoller', 'MiTempBtPoller', (['"""58:2d:34:38:c0:91"""', 'backend'], {}), "('58:2d:34:38:c0:91', backend)\n", (7588, 7618), False, 'from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY\n'), ((7128, 7152), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (7150, 7152), False, 'import paramiko\n'), ((7364, 7389), 'json.dump', 'json.dump', (['dicty', 'outfile'], {}), '(dicty, outfile)\n', (7373, 7389), False, 'import json\n'), ((9381, 9546), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', ([], {'ip': '"""192.168.19.59"""', 'token': '"""14f5b868a58ef4ffaef6fece61c65b16"""', 'start_id': '(0)', 'debug': '(0)', 'lazy_discover': '(True)', 'model': '"""chuangmi.plug.m1"""'}), "(ip='192.168.19.59', token=\n '14f5b868a58ef4ffaef6fece61c65b16', start_id=0, debug=0, lazy_discover=\n True, model='chuangmi.plug.m1')\n", (9407, 9546), False, 'from miio import chuangmi_plug\n'), ((4227, 4249), 'requests.get', 'requests.get', (['heat_url'], {}), '(heat_url)\n', (4239, 4249), False, 'import requests\n'), ((5291, 5313), 'requests.get', 'requests.get', (['cool_url'], {}), '(cool_url)\n', (5303, 5313), False, 'import requests\n'), ((6343, 6365), 'requests.get', 'requests.get', (['turn_url'], {}), '(turn_url)\n', (6355, 6365), False, 'import requests\n'), ((8601, 8621), 'btlewrap.available_backends', 'available_backends', ([], {}), '()\n', (8619, 8621), False, 'from btlewrap import available_backends, BluepyBackend\n'), ((10074, 10096), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (10083, 10096), False, 'import json\n'), ((4181, 4203), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (4190, 4203), False, 'import json\n'), ((5245, 5267), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (5254, 5267), False, 'import json\n'), ((6297, 6319), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (6306, 6319), False, 'import json\n'), ((4713, 4735), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (4722, 4735), False, 'import json\n'), ((5777, 5799), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (5786, 5799), False, 'import json\n'), ((6828, 6850), 'json.dump', 'json.dump', (['state', 'file'], {}), '(state, file)\n', (6837, 6850), False, 'import json\n')] |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#New record
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
data.shape
cenus=np.concatenate((new_record,data),axis=0)
cenus.shape
print(cenus)
age=cenus[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
mean_age=np.mean(age)
age_std=np.std(age)
race=cenus[:,2]
print(race)
race_0=(race==0)
len_0=len(race[race_0])
print(len_0)
race_1=(race==1)
len_1=len(race[race_1])
race_2=(race==2)
race_3=(race==3)
race_4=(race==4)
len_2=len(race[race_2])
len_3=len(race[race_3])
len_4=len(race[race_4])
minority_race=3
print(minority_race)
senior_citizen=(age>60)
working_hour_sum=sum(cenus[:,6][senior_citizen])
print(working_hour_sum)
senior_citizen_len=len(age[senior_citizen])
avg_working_hours=working_hour_sum/senior_citizen_len
avg_working_hours=round(avg_working_hours,2)
education_num=cenus[:,1]
print(education_num)
high=education_num>10
#high=education_num[high]
print(high)
low=education_num<=10
#low=education_num[low]
print(low)
INCOME=cenus[:,7][high]
print(INCOME)
print(np.mean(INCOME))
avg_pay_high=round(np.mean(INCOME),2)
print(avg_pay_high)
LOW_AVG=cenus[:,7][low]
avg_pay_low=round(np.mean(LOW_AVG),2)
print(avg_pay_low)
#Code starts here
| [
"numpy.mean",
"numpy.concatenate",
"numpy.std",
"numpy.genfromtxt",
"warnings.filterwarnings"
]
| [((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((272, 314), 'numpy.concatenate', 'np.concatenate', (['(new_record, data)'], {'axis': '(0)'}), '((new_record, data), axis=0)\n', (286, 314), True, 'import numpy as np\n'), ((420, 432), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (427, 432), True, 'import numpy as np\n'), ((442, 453), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (448, 453), True, 'import numpy as np\n'), ((1220, 1235), 'numpy.mean', 'np.mean', (['INCOME'], {}), '(INCOME)\n', (1227, 1235), True, 'import numpy as np\n'), ((1257, 1272), 'numpy.mean', 'np.mean', (['INCOME'], {}), '(INCOME)\n', (1264, 1272), True, 'import numpy as np\n'), ((1341, 1357), 'numpy.mean', 'np.mean', (['LOW_AVG'], {}), '(LOW_AVG)\n', (1348, 1357), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scls', '0003_other_repos'),
]
operations = [
migrations.AlterField(
model_name='otherrepo',
name='arch',
field=models.CharField(default='', blank=True, verbose_name='Architecture', max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='command',
field=models.TextField(default='', blank=True, verbose_name='Command'),
),
migrations.AlterField(
model_name='otherrepo',
name='icon',
field=models.CharField(default='', blank=True, verbose_name='Icon', choices=[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')], max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='version',
field=models.CharField(default='', blank=True, verbose_name='Distribution version', max_length=20),
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField"
]
| [((343, 431), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'blank': '(True)', 'verbose_name': '"""Architecture"""', 'max_length': '(20)'}), "(default='', blank=True, verbose_name='Architecture',\n max_length=20)\n", (359, 431), False, 'from django.db import migrations, models\n'), ((553, 617), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'blank': '(True)', 'verbose_name': '"""Command"""'}), "(default='', blank=True, verbose_name='Command')\n", (569, 617), False, 'from django.db import migrations, models\n'), ((740, 915), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'blank': '(True)', 'verbose_name': '"""Icon"""', 'choices': "[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')\n ]", 'max_length': '(20)'}), "(default='', blank=True, verbose_name='Icon', choices=[(\n 'centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel',\n 'rhel')], max_length=20)\n", (756, 915), False, 'from django.db import migrations, models\n'), ((1032, 1129), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'blank': '(True)', 'verbose_name': '"""Distribution version"""', 'max_length': '(20)'}), "(default='', blank=True, verbose_name=\n 'Distribution version', max_length=20)\n", (1048, 1129), False, 'from django.db import migrations, models\n')] |
#import external libraries used in code
import requests, json
import pycountry
print('Currency Exchange')
currencies = []
def findCurrency():
#Finds all avaliable currencies
allCurrency = (list(pycountry.currencies))
for x in allCurrency:
y = str(x)
y = y[18:21]
#Adds the value of their ISO to the "currencies" list
currencies.append(y)
#Organizes all values in "currency" list
currecyDisplay = ''
inline = 0
for cs in currencies:
currecyDisplay += cs + ' | '
inline += 1
#Allows up to 26 ISOs to be in one line
if inline >= 26:
currecyDisplay += '\n '
inline = 0
#Displays all currency ISOs to user
print('Avaliable Currencies:\n',currecyDisplay)
def help():
#Ask user if they need help
questions = input('Type ? for help or Enter to continue: ')
#If user inputs "?" run help procedure
if questions == '?':
#Display information order
print('--------\nCurrency Exchange Help\nISO currency codes are three-letter alphabetic codes that represent the various currencies\n\nCurrency ISO:\nCurrency Name:\n--------')
#Obtains information of all currencies
allCurrency = (list(pycountry.currencies))
#For each currency obtain the ISO and the name of currency
#Display ISO and Data
for x in allCurrency:
y = str(x)
w = y[18:21]
n = int(y.index(',', y.index(',') + 1))
z = y[30:n-1]
print(w)
print(z + '\n')
print('--------\n')
#Else user does not input "?" continue program
else:
pass
def userInput():
#Program try asking user for data input
try:
fromCurrency = input('From (ISO): ').upper()
toCurrency = input('To (ISO): ').upper()
currencyAmount = input('Amount: ')
currencyAmount = int(currencyAmount.replace(',', ''))
#If data inputed is not the correct type of data inform user
except ValueError:
print('Amount Is A Number Value')
#Return inputed data
return currencyAmount, fromCurrency, toCurrency
def checkInfo(fromC, toC, currencyA, check):
#"validCurrency" value increses as data inputed if verified
validCurrency = 0
#Check if inputed ISO is valid
#If values are valid the vlue of "validCurrency" is increased
for givenCurrencies in currencies:
if fromC == givenCurrencies:
validCurrency += 1
for givenCurrencies in currencies:
if toC == givenCurrencies:
validCurrency += 1
#Check if "validCurrency" meets necessary verification value
#Check if "validCurrency" is not 2 (Data is not valid) or inputed amount data is not the correct value
if validCurrency != 2 or type(currencyA) != int:
#Let user know data is invalid
print('Information Invalid\n')
#Ask user if they need help
help()
#Reset "validCurrency"
validCurrency = 0
#Set "check" as False
checks = False
#If type of data is correct and valid "check" is set to True
else:
checks = True
return fromC, toC, currencyA, checks
def dataInput():
#Data has not been checked yet, therefore "check" is False
check = False
#While the data is not valid or not checked repeat data input and data check
while check == False:
currencyAmount, fromCurrency, toCurrency = userInput()
fromC, toC, currencyA, check = checkInfo(fromCurrency, toCurrency, currencyAmount, check)
#Once data is valid and checked return values
return fromC, toC, currencyA
def userData():
#No data if the information provided is correct
correctInfo = ''
#While the user does not approve of data, repeat data input and data check
while correctInfo != 'y':
fromC, toC, currencyA = dataInput()
#Display data user has inputed after being checked and validated
print('\nFrom:',fromC)
print('To:',toC)
print('Amount:', currencyA)
#Ask user if the data provided is correct
correctInfo = input('Is the information correct (y/n)?: ').lower()
print('')
help()
#Once data is approved by user, return values
return currencyA, fromC, toC
def realTimeRate(from_currency, to_currency):
#API key provided by Alpha Vanatage
api_key = "<KEY>"
#Define "url" where data is stored
#"url" varies from user selected data
url = ('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=%s&to_currency=%s&apikey=%s' % (from_currency, to_currency, api_key))
#Get response from reqest of "url"
req = requests.get(url)
#Obtain json format and set data for python to read
#"Result" has nested dictionaries
result = req.json()
#Display exchange rate information to user
print("Realtime Currency Exchange Rate for",
result["Realtime Currency Exchange Rate"]
["2. From_Currency Name"], "to",
result["Realtime Currency Exchange Rate"]
["4. To_Currency Name"], "is",
result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'], to_currency)
#Return the value of exchange
return float(result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'])
def completeExchange(rate, cAmount, fCurrency, tCurrency):
#Total of the "to" currency is the rate times the amount of the "from" currency
total = rate * cAmount
end = ' '
#Maintain program Running until user has inputed the Enter key
while end == ' ':
print('\n%s %s is %.2f %s' % (cAmount, fCurrency, total, tCurrency))
end = input('Press Enter To Close')
if __name__ == "__main__":
findCurrency()
help()
currencyAmount, fromCurrency, toCurrency = userData()
rate = realTimeRate(fromCurrency, toCurrency)
completeExchange(rate, currencyAmount, fromCurrency, toCurrency)
| [
"requests.get"
]
| [((4912, 4929), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4924, 4929), False, 'import requests, json\n')] |
# Copyright (C) 2018 <NAME>, <NAME>
# All rights reserved.
#
# This file is part of yambopy
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import operator
def analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack ):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_bse/ folder.
By default, the graphical interface is deactivated (assuming you run on a cluster because of ypp calls).
See line 2 inside the script.
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_bse')
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print(jobname)
# input value
# BndsRn__ is a special case
if var.startswith('BndsRnX'):
# format : [1, nband, ...]
inp = invars[key]['variables'][var][0][1]
else:
inp = invars[key]['variables'][var][0]
print('Preparing JSON file. Calling ypp if necessary.')
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=exc_int,max_energy=exc_max_E,Degen_Step=exc_degen)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
print('JSON file prepared and loaded.')
### Plotting the absorption spectra
# BSE spectra
plt.plot(data['E/ev[1]'], data['EPS-Im[2]'],label=jobname,lw=2)
# # Axes : lines for exciton energies (disabled, would make a mess)
# for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= exc_n-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
print(excitons)
np.savetxt(outname+'.dat',excitons,header=header)
#np.savetxt(outname,excitons,header=header,fmt='%1f')
print(outname+'.dat')
else:
print('-nt flag : no text produced.')
if draw:
plt.xlabel('$\omega$ (eV)')
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
#plt.draw()
#plt.show()
plt.savefig(outname+'.png', bbox_inches='tight')
print(outname+'.png')
else:
print('-nd flag : no plot produced.')
print('Done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Study convergence on BS calculations using ypp calls.')
pa = parser.add_argument
pa('folder', help='Folder containing SAVE and convergence runs.' )
pa('variable', help='Variable tested (e.g. FFTGvecs)' )
pa('-ne','--numbexc', help='Number of excitons to read beyond threshold', default=2,type=int)
pa('-ie','--intexc', help='Minimum intensity for excitons to be considered bright', default=0.05,type=float)
pa('-de','--degenexc', help='Energy threshold under which different peaks are merged (eV)', default=0.01,type=float)
pa('-me','--maxexc', help='Energy threshold after which excitons are not read anymore (eV)', default=8.0,type=float)
pa('-np','--nopack', help='Skips packing o- files into .json files', action='store_false')
pa('-nt','--notext', help='Skips writing the .dat file', action='store_false')
pa('-nd','--nodraw', help='Skips drawing (plotting) the abs spectra', action='store_false')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
folder = args.folder
var = args.variable
exc_n = args.numbexc
exc_int = args.intexc
exc_degen = args.degenexc
exc_max_E = args.maxexc
pack = args.nopack
text = args.text
draw = args.draw
analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack=pack, text=text, draw=draw )
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.savetxt",
"sys.exit",
"json.load",
"operator.itemgetter",
"matplotlib.pyplot.NullLocator",
"matplotlib.pyplot.legend"
]
| [((4633, 4730), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Study convergence on BS calculations using ypp calls."""'}), "(description=\n 'Study convergence on BS calculations using ypp calls.')\n", (4656, 4730), False, 'import argparse\n'), ((3281, 3293), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3290, 3293), False, 'import json\n'), ((3435, 3500), 'matplotlib.pyplot.plot', 'plt.plot', (["data['E/ev[1]']", "data['EPS-Im[2]']"], {'label': 'jobname', 'lw': '(2)'}), "(data['E/ev[1]'], data['EPS-Im[2]'], label=jobname, lw=2)\n", (3443, 3500), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4111), 'numpy.savetxt', 'np.savetxt', (["(outname + '.dat')", 'excitons'], {'header': 'header'}), "(outname + '.dat', excitons, header=header)\n", (4068, 4111), True, 'import numpy as np\n'), ((4278, 4306), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$ (eV)"""'], {}), "('$\\\\omega$ (eV)')\n", (4288, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4375, 4387), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4385, 4387), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4486), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outname + '.png')"], {'bbox_inches': '"""tight"""'}), "(outname + '.png', bbox_inches='tight')\n", (4447, 4486), True, 'import matplotlib.pyplot as plt\n'), ((5704, 5715), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5712, 5715), False, 'import sys\n'), ((1696, 1718), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1715, 1718), False, 'import operator\n'), ((4348, 4365), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (4363, 4365), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4323), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4321, 4323), True, 'import matplotlib.pyplot as plt\n')] |
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 554539540
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 3, 17)
assert board is not None
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_golden_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 3) == 1
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 2) == 1
board251673140 = gamma_board(board)
assert board251673140 is not None
assert board251673140 == (".2....\n"
".2....\n"
"...1..\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
".1...2\n"
".3....\n")
del board251673140
board251673140 = None
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 3, 4, 5) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 29
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 7) == 1
board281476409 = gamma_board(board)
assert board281476409 is not None
assert board281476409 == ("12....\n"
".2....\n"
"3..13.\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
"31...2\n"
".3.3..\n")
del board281476409
board281476409 = None
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 2, 1) == 1
board412285252 = gamma_board(board)
assert board412285252 is not None
assert board412285252 == ("12....\n"
".2....\n"
"3..13.\n"
"32..22\n"
"131.1.\n"
"113.1.\n"
"311..2\n"
"13.3..\n")
del board412285252
board412285252 = None
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_free_fields(board, 3) == 23
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_free_fields(board, 3) == 21
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 1, 5) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_busy_fields(board, 1) == 16
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_golden_move(board, 2, 2, 2) == 1
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_free_fields(board, 1) == 13
assert gamma_move(board, 2, 2, 6) == 1
assert gamma_move(board, 2, 5, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 7, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 1, 7, 2) == 0
board481507094 = gamma_board(board)
assert board481507094 is not None
assert board481507094 == ("12...1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board481507094
board481507094 = None
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 7) == 1
board984249076 = gamma_board(board)
assert board984249076 is not None
assert board984249076 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board984249076
board984249076 = None
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 1
board492321582 = gamma_board(board)
assert board492321582 is not None
assert board492321582 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board492321582
board492321582 = None
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 3, 2, 1) == 0
gamma_delete(board)
| [
"part1.gamma_move",
"part1.gamma_board",
"part1.gamma_new",
"part1.gamma_golden_move",
"part1.gamma_delete",
"part1.gamma_busy_fields",
"part1.gamma_golden_possible",
"part1.gamma_free_fields"
]
| [((283, 305), 'part1.gamma_new', 'gamma_new', (['(6)', '(8)', '(3)', '(17)'], {}), '(6, 8, 3, 17)\n', (292, 305), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1502, 1520), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (1513, 1520), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2217, 2235), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (2228, 2235), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3053, 3071), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (3064, 3071), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6075, 6093), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (6086, 6093), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7156, 7174), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (7167, 7174), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7469, 7487), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (7480, 7487), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8490, 8509), 'part1.gamma_delete', 'gamma_delete', (['board'], {}), '(board)\n', (8502, 8509), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((340, 366), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(4)'], {}), '(board, 1, 7, 4)\n', (350, 366), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((380, 406), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(3)'], {}), '(board, 1, 4, 3)\n', (390, 406), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((420, 447), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(1)'], {}), '(board, 1)\n', (437, 447), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((461, 487), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(1)'], {}), '(board, 2, 5, 1)\n', (471, 487), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((501, 527), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(7)'], {}), '(board, 2, 1, 7)\n', (511, 527), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((541, 568), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(2)'], {}), '(board, 2)\n', (558, 568), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((582, 613), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (603, 613), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((627, 653), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(0)'], {}), '(board, 3, 1, 0)\n', (637, 653), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((667, 700), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(3)', '(3)', '(4)'], {}), '(board, 3, 3, 4)\n', (684, 700), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((714, 741), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(2)'], {}), '(board, 2)\n', (731, 741), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((755, 781), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(3)'], {}), '(board, 3, 1, 3)\n', (765, 781), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((795, 821), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(5)'], {}), '(board, 1, 3, 5)\n', (805, 821), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((835, 861), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(3)'], {}), '(board, 1, 2, 3)\n', (845, 861), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((875, 906), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (896, 906), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((920, 946), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(0)'], {}), '(board, 2, 1, 0)\n', (930, 946), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((960, 986), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(2)', '(2)'], {}), '(board, 3, 2, 2)\n', (970, 986), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1000, 1031), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(3)'], {}), '(board, 3)\n', (1021, 1031), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1045, 1071), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(2)'], {}), '(board, 1, 0, 2)\n', (1055, 1071), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1085, 1111), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(1)'], {}), '(board, 1, 1, 1)\n', (1095, 1111), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1125, 1151), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(4)'], {}), '(board, 2, 5, 4)\n', (1135, 1151), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1165, 1191), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(4)'], {}), '(board, 3, 0, 4)\n', (1175, 1191), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1205, 1236), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(3)'], {}), '(board, 3)\n', (1226, 1236), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1250, 1276), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(2)'], {}), '(board, 1, 1, 2)\n', (1260, 1276), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1290, 1316), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(4)'], {}), '(board, 2, 1, 4)\n', (1300, 1316), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1330, 1356), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(6)'], {}), '(board, 2, 1, 6)\n', (1340, 1356), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1370, 1396), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(2)'], {}), '(board, 3, 1, 2)\n', (1380, 1396), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1410, 1436), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(3)'], {}), '(board, 1, 0, 3)\n', (1420, 1436), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1450, 1476), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(2)'], {}), '(board, 1, 4, 2)\n', (1460, 1476), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1718, 1744), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(3)'], {}), '(board, 2, 4, 3)\n', (1728, 1744), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1758, 1784), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(1)'], {}), '(board, 2, 5, 1)\n', (1768, 1784), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1798, 1824), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(5)'], {}), '(board, 3, 4, 5)\n', (1808, 1824), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1838, 1864), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(0)'], {}), '(board, 3, 3, 0)\n', (1848, 1864), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1878, 1905), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(3)'], {}), '(board, 3)\n', (1895, 1905), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1920, 1946), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(7)'], {}), '(board, 2, 1, 7)\n', (1930, 1946), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1960, 1986), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(5)'], {}), '(board, 2, 3, 5)\n', (1970, 1986), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2000, 2026), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(5)'], {}), '(board, 3, 0, 5)\n', (2010, 2026), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2040, 2066), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(1)'], {}), '(board, 3, 0, 1)\n', (2050, 2066), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2080, 2111), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(3)'], {}), '(board, 3)\n', (2101, 2111), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2125, 2151), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(0)'], {}), '(board, 1, 3, 0)\n', (2135, 2151), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2165, 2191), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(7)'], {}), '(board, 1, 0, 7)\n', (2175, 2191), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2433, 2459), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(1)'], {}), '(board, 2, 5, 1)\n', (2443, 2459), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2473, 2499), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(4)'], {}), '(board, 2, 5, 4)\n', (2483, 2499), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2513, 2544), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (2534, 2544), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2558, 2584), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(7)', '(3)'], {}), '(board, 3, 7, 3)\n', (2568, 2584), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2598, 2624), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(1)'], {}), '(board, 3, 5, 1)\n', (2608, 2624), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2638, 2665), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(3)'], {}), '(board, 3)\n', (2655, 2665), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2679, 2705), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(5)', '(4)'], {}), '(board, 1, 5, 4)\n', (2689, 2705), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2719, 2745), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(0)'], {}), '(board, 1, 0, 0)\n', (2729, 2745), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2759, 2785), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(6)', '(3)'], {}), '(board, 2, 6, 3)\n', (2769, 2785), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2799, 2825), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(4)'], {}), '(board, 2, 4, 4)\n', (2809, 2825), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2839, 2865), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(5)'], {}), '(board, 3, 0, 5)\n', (2849, 2865), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2879, 2905), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(1)'], {}), '(board, 3, 0, 1)\n', (2889, 2905), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2919, 2946), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(3)'], {}), '(board, 3)\n', (2936, 2946), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2961, 2987), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(7)'], {}), '(board, 1, 1, 7)\n', (2971, 2987), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3001, 3027), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(1)'], {}), '(board, 1, 2, 1)\n', (3011, 3027), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3269, 3295), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(6)'], {}), '(board, 2, 1, 6)\n', (3279, 3295), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3309, 3335), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(1)'], {}), '(board, 2, 2, 1)\n', (3319, 3335), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3349, 3375), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(2)'], {}), '(board, 3, 1, 2)\n', (3359, 3375), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3389, 3416), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(3)'], {}), '(board, 3)\n', (3406, 3416), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3431, 3464), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(3)', '(4)', '(4)'], {}), '(board, 3, 4, 4)\n', (3448, 3464), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3478, 3504), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(2)'], {}), '(board, 1, 0, 2)\n', (3488, 3504), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3518, 3544), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(6)'], {}), '(board, 1, 3, 6)\n', (3528, 3544), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3558, 3589), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (3579, 3589), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3603, 3629), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(4)'], {}), '(board, 2, 7, 4)\n', (3613, 3629), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3643, 3670), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(2)'], {}), '(board, 2)\n', (3660, 3670), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3685, 3711), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(5)'], {}), '(board, 3, 5, 5)\n', (3695, 3711), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3725, 3751), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(5)'], {}), '(board, 3, 5, 5)\n', (3735, 3751), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3765, 3792), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(3)'], {}), '(board, 3)\n', (3782, 3792), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3807, 3833), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(5)'], {}), '(board, 1, 0, 5)\n', (3817, 3833), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3847, 3873), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(5)', '(7)'], {}), '(board, 1, 5, 7)\n', (3857, 3873), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3887, 3913), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(6)'], {}), '(board, 2, 0, 6)\n', (3897, 3913), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3927, 3953), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(6)'], {}), '(board, 2, 5, 6)\n', (3937, 3953), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3967, 3993), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(2)', '(2)'], {}), '(board, 3, 2, 2)\n', (3977, 3993), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4007, 4033), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(5)', '(2)'], {}), '(board, 1, 5, 2)\n', (4017, 4033), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4047, 4073), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(4)'], {}), '(board, 2, 7, 4)\n', (4057, 4073), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4087, 4113), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(2)', '(3)'], {}), '(board, 3, 2, 3)\n', (4097, 4113), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4127, 4153), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(1)'], {}), '(board, 3, 3, 1)\n', (4137, 4153), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4167, 4193), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(5)', '(1)'], {}), '(board, 1, 5, 1)\n', (4177, 4193), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4207, 4234), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (4224, 4234), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4249, 4275), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(2)'], {}), '(board, 2, 4, 2)\n', (4259, 4275), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4289, 4315), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(1)'], {}), '(board, 3, 4, 1)\n', (4299, 4315), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4329, 4355), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(2)'], {}), '(board, 3, 5, 2)\n', (4339, 4355), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4369, 4395), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(4)'], {}), '(board, 1, 7, 4)\n', (4379, 4395), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4409, 4435), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(1)'], {}), '(board, 1, 4, 1)\n', (4419, 4435), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4449, 4475), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(2)'], {}), '(board, 2, 0, 2)\n', (4459, 4475), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4489, 4515), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(5)'], {}), '(board, 2, 0, 5)\n', (4499, 4515), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4529, 4556), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(2)'], {}), '(board, 2)\n', (4546, 4556), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4570, 4596), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(2)'], {}), '(board, 3, 5, 2)\n', (4580, 4596), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4610, 4636), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(5)'], {}), '(board, 1, 1, 5)\n', (4620, 4636), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4650, 4676), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(5)'], {}), '(board, 2, 3, 5)\n', (4660, 4676), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4690, 4716), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(4)', '(1)'], {}), '(board, 2, 4, 1)\n', (4700, 4716), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4730, 4756), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(3)'], {}), '(board, 3, 0, 3)\n', (4740, 4756), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4770, 4796), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(5)'], {}), '(board, 3, 1, 5)\n', (4780, 4796), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4810, 4836), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(4)'], {}), '(board, 1, 2, 4)\n', (4820, 4836), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4850, 4876), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(0)'], {}), '(board, 1, 3, 0)\n', (4860, 4876), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4890, 4917), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(1)'], {}), '(board, 1)\n', (4907, 4917), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4932, 4958), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(5)'], {}), '(board, 2, 3, 5)\n', (4942, 4958), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((4972, 4998), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(1)'], {}), '(board, 2, 3, 1)\n', (4982, 4998), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5012, 5038), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(2)'], {}), '(board, 3, 5, 2)\n', (5022, 5038), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5052, 5078), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(4)'], {}), '(board, 1, 0, 4)\n', (5062, 5078), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5092, 5118), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(6)'], {}), '(board, 1, 0, 6)\n', (5102, 5118), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5132, 5158), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(5)'], {}), '(board, 2, 5, 5)\n', (5142, 5158), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5172, 5205), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(2)', '(2)', '(2)'], {}), '(board, 2, 2, 2)\n', (5189, 5205), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5219, 5245), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(5)', '(5)'], {}), '(board, 1, 5, 5)\n', (5229, 5245), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5259, 5286), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (5276, 5286), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5301, 5327), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(6)'], {}), '(board, 2, 2, 6)\n', (5311, 5327), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5341, 5367), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(6)'], {}), '(board, 2, 5, 6)\n', (5351, 5367), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5381, 5407), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(3)'], {}), '(board, 3, 4, 3)\n', (5391, 5407), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5421, 5447), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(3)'], {}), '(board, 1, 4, 3)\n', (5431, 5447), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5461, 5487), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(5)'], {}), '(board, 1, 3, 5)\n', (5471, 5487), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5501, 5527), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(0)'], {}), '(board, 2, 2, 0)\n', (5511, 5527), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5541, 5567), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(4)'], {}), '(board, 3, 0, 4)\n', (5551, 5567), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5581, 5607), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(3)'], {}), '(board, 1, 7, 3)\n', (5591, 5607), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5621, 5647), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(3)'], {}), '(board, 2, 7, 3)\n', (5631, 5647), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5661, 5687), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(1)'], {}), '(board, 2, 3, 1)\n', (5671, 5687), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5701, 5727), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(7)', '(3)'], {}), '(board, 3, 7, 3)\n', (5711, 5727), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5741, 5767), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(2)'], {}), '(board, 3, 0, 2)\n', (5751, 5767), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5781, 5807), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(3)'], {}), '(board, 1, 3, 3)\n', (5791, 5807), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5821, 5847), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(2)'], {}), '(board, 2, 7, 2)\n', (5831, 5847), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5861, 5887), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(3)'], {}), '(board, 2, 2, 3)\n', (5871, 5887), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5901, 5928), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(2)'], {}), '(board, 2)\n', (5918, 5928), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5943, 5969), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(7)', '(3)'], {}), '(board, 3, 7, 3)\n', (5953, 5969), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((5983, 6009), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(1)'], {}), '(board, 3, 5, 1)\n', (5993, 6009), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6023, 6049), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(2)'], {}), '(board, 1, 7, 2)\n', (6033, 6049), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6291, 6317), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(4)'], {}), '(board, 2, 2, 4)\n', (6301, 6317), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6331, 6357), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(5)', '(4)'], {}), '(board, 2, 5, 4)\n', (6341, 6357), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6371, 6398), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(2)'], {}), '(board, 2)\n', (6388, 6398), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6413, 6439), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(2)'], {}), '(board, 1, 7, 2)\n', (6423, 6439), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6453, 6479), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(4)'], {}), '(board, 2, 7, 4)\n', (6463, 6479), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6493, 6519), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(4)'], {}), '(board, 3, 0, 4)\n', (6503, 6519), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6533, 6560), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(3)'], {}), '(board, 3)\n', (6550, 6560), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6575, 6606), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(3)'], {}), '(board, 3)\n', (6596, 6606), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6620, 6646), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(7)', '(2)'], {}), '(board, 2, 7, 2)\n', (6630, 6646), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6660, 6686), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(4)'], {}), '(board, 2, 1, 4)\n', (6670, 6686), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6700, 6727), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(2)'], {}), '(board, 2)\n', (6717, 6727), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6742, 6768), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(5)'], {}), '(board, 3, 0, 5)\n', (6752, 6768), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6782, 6809), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(3)'], {}), '(board, 3)\n', (6799, 6809), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6824, 6850), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(2)'], {}), '(board, 1, 7, 2)\n', (6834, 6850), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6864, 6890), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(6)'], {}), '(board, 1, 1, 6)\n', (6874, 6890), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6904, 6930), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(0)'], {}), '(board, 2, 2, 0)\n', (6914, 6930), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6944, 6970), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(7)'], {}), '(board, 2, 1, 7)\n', (6954, 6970), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((6984, 7010), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(1)'], {}), '(board, 3, 3, 1)\n', (6994, 7010), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7024, 7050), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(6)', '(4)'], {}), '(board, 1, 6, 4)\n', (7034, 7050), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7064, 7090), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (7074, 7090), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7104, 7130), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(7)'], {}), '(board, 2, 2, 7)\n', (7114, 7130), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7372, 7398), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(1)'], {}), '(board, 1, 4, 1)\n', (7382, 7398), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7412, 7443), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (7433, 7443), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7685, 7711), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(3)'], {}), '(board, 2, 2, 3)\n', (7695, 7711), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7725, 7751), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(4)'], {}), '(board, 2, 2, 4)\n', (7735, 7751), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7765, 7796), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (7786, 7796), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7810, 7836), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(2)', '(3)'], {}), '(board, 3, 2, 3)\n', (7820, 7836), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7850, 7876), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(7)', '(3)'], {}), '(board, 1, 7, 3)\n', (7860, 7876), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7890, 7916), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(4)', '(3)'], {}), '(board, 1, 4, 3)\n', (7900, 7916), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7930, 7956), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(4)'], {}), '(board, 2, 2, 4)\n', (7940, 7956), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((7970, 7996), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(4)'], {}), '(board, 1, 0, 4)\n', (7980, 7996), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8010, 8036), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(4)'], {}), '(board, 2, 0, 4)\n', (8020, 8036), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8050, 8076), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(6)'], {}), '(board, 2, 2, 6)\n', (8060, 8076), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8090, 8116), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(2)'], {}), '(board, 3, 5, 2)\n', (8100, 8116), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8130, 8156), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(5)'], {}), '(board, 1, 0, 5)\n', (8140, 8156), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8170, 8196), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(2)'], {}), '(board, 2, 3, 2)\n', (8180, 8196), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8210, 8236), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(5)'], {}), '(board, 3, 0, 5)\n', (8220, 8236), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8250, 8276), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(0)', '(5)'], {}), '(board, 1, 0, 5)\n', (8260, 8276), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8290, 8316), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(3)'], {}), '(board, 1, 2, 3)\n', (8300, 8316), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8330, 8361), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(1)'], {}), '(board, 1)\n', (8351, 8361), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8375, 8401), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(0)'], {}), '(board, 2, 2, 0)\n', (8385, 8401), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8415, 8441), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(5)', '(6)'], {}), '(board, 3, 5, 6)\n', (8425, 8441), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((8455, 8481), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(2)', '(1)'], {}), '(board, 3, 2, 1)\n', (8465, 8481), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n')] |
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.utils import LayerMapping
from django.core.management.base import BaseCommand
from envergo.geodata.models import Zone
class Command(BaseCommand):
help = "Importe des zones à partir de shapefiles."
def add_arguments(self, parser):
parser.add_argument("shapefile", type=str)
def handle(self, *args, **options):
shapefile = options["shapefile"]
ds = DataSource(shapefile)
mapping = {"code": "CODEZONE", "polygon": "POLYGON"}
lm = LayerMapping(Zone, ds, mapping)
self.stdout.write(self.style.SUCCESS("Importing"))
lm.save(verbose=True)
| [
"django.contrib.gis.gdal.DataSource",
"django.contrib.gis.utils.LayerMapping"
]
| [((459, 480), 'django.contrib.gis.gdal.DataSource', 'DataSource', (['shapefile'], {}), '(shapefile)\n', (469, 480), False, 'from django.contrib.gis.gdal import DataSource\n'), ((555, 586), 'django.contrib.gis.utils.LayerMapping', 'LayerMapping', (['Zone', 'ds', 'mapping'], {}), '(Zone, ds, mapping)\n', (567, 586), False, 'from django.contrib.gis.utils import LayerMapping\n')] |
import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| [
"jshbot.logger.warn",
"jshbot.data.get",
"jshbot.data.add",
"jshbot.commands.Response",
"jshbot.commands.Command",
"jshbot.exceptions.ConfiguredBotException",
"jshbot.data.remove"
]
| [((309, 352), 'jshbot.exceptions.ConfiguredBotException', 'ConfiguredBotException', (['"""0.3 to 0.4 plugin"""'], {}), "('0.3 to 0.4 plugin')\n", (331, 352), False, 'from jshbot.exceptions import BotException, ConfiguredBotException\n'), ((655, 677), 'jshbot.commands.Response', 'Response', (['"""Converted."""'], {}), "('Converted.')\n", (663, 677), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response\n'), ((717, 763), 'jshbot.data.get', 'data.get', (['bot', '"""core"""', 'None'], {'guild_id': 'guild.id'}), "(bot, 'core', None, guild_id=guild.id)\n", (725, 763), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((882, 940), 'jshbot.data.get', 'data.get', (['bot', '"""base"""', 'None'], {'guild_id': 'guild.id', 'default': '{}'}), "(bot, 'base', None, guild_id=guild.id, default={})\n", (890, 940), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((1891, 1954), 'jshbot.data.get', 'data.get', (['bot', '"""tags.py"""', '"""tags"""'], {'guild_id': 'guild.id', 'default': '{}'}), "(bot, 'tags.py', 'tags', guild_id=guild.id, default={})\n", (1899, 1954), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((2848, 2913), 'jshbot.data.remove', 'data.remove', (['bot', '"""tags.py"""', '"""tags"""'], {'guild_id': 'guild.id', 'safe': '(True)'}), "(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)\n", (2859, 2913), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((414, 467), 'jshbot.commands.Command', 'Command', (['"""convertdata"""'], {'hidden': '(True)', 'elevated_level': '(3)'}), "('convertdata', hidden=True, elevated_level=3)\n", (421, 467), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response\n'), ((773, 850), 'jshbot.logger.warn', 'logger.warn', (['"""Guild %s (%s) already had core converted"""', 'guild.name', 'guild.id'], {}), "('Guild %s (%s) already had core converted', guild.name, guild.id)\n", (784, 850), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((1632, 1681), 'jshbot.data.remove', 'data.remove', (['bot', '"""base"""', 'None'], {'guild_id': 'guild.id'}), "(bot, 'base', None, guild_id=guild.id)\n", (1643, 1681), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((1725, 1776), 'jshbot.data.get', 'data.get', (['bot', '"""tags.py"""', '"""tags"""'], {'guild_id': 'guild.id'}), "(bot, 'tags.py', 'tags', guild_id=guild.id)\n", (1733, 1776), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((1786, 1863), 'jshbot.logger.warn', 'logger.warn', (['"""Guild %s (%s) already had tags converted"""', 'guild.name', 'guild.id'], {}), "('Guild %s (%s) already had tags converted', guild.name, guild.id)\n", (1797, 1863), False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((1571, 1623), 'jshbot.data.add', 'data.add', (['bot', '"""core"""', 'key', 'value'], {'guild_id': 'guild.id'}), "(bot, 'core', key, value, guild_id=guild.id)\n", (1579, 1623), False, 'from jshbot import utilities, data, configurations, plugins, logger\n')] |
from tkinter import *
from PIL import Image, ImageTk
#python image library
#imagetk supports jpg image
a1 = Tk()
a1.geometry("455x244")
#for png image
#photo = PhotoImage(file="filename.png")
#a2 = Label(image = photo)
#a2.pack()
image = Image.open("PJXlVd.jpg")
photo = ImageTk.PhotoImage(image)
a2 = Label(image = photo)
a2.pack()
a1.mainloop() | [
"PIL.Image.open",
"PIL.ImageTk.PhotoImage"
]
| [((256, 280), 'PIL.Image.open', 'Image.open', (['"""PJXlVd.jpg"""'], {}), "('PJXlVd.jpg')\n", (266, 280), False, 'from PIL import Image, ImageTk\n'), ((290, 315), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (308, 315), False, 'from PIL import Image, ImageTk\n')] |
import os
import torch
from PIL import Image
from read_csv import csv_to_label_and_bbx
import numpy as np
from torch.utils.data import Subset, random_split, ConcatDataset
class NBIDataset(object):
def __init__(self, root, transforms, nob3=False):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), nob3)
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBINewDataset(object):
def __init__(self, root, transforms, train=True):
self.root = root
self.transforms = transforms
if train:
self.path = os.path.join(root, "train")
else:
self.path = os.path.join(root, "test")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations_all.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIFullDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
self.path = os.path.join(root, "all")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIDenseDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
density_path = os.path.join(self.root, "density_maps")
density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
density_map = torch.from_numpy(density_map)
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, density_map
def __len__(self):
return len(self.imgs)
class NBIPatchDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = [x for x in list(sorted(os.listdir(root))) if x[-3:] == "png"]
self.ans = np.load(os.path.join(root, "ans.npy"), allow_pickle=True).item()
def __getitem__(self, idx):
# img_path = os.path.join(self.root, "images", self.imgs[idx])
# img = Image.open(img_path).convert("RGB")
# density_path = os.path.join(self.root, "density_maps")
# density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
# density_map = torch.from_numpy(density_map)
#
# if self.transforms is not None:
# img = self.transforms(img)
# # target = self.transforms(target)
return self.imgs[idx]
def __len__(self):
return len(self.imgs)
def split_index(K=5, len=100):
idx = list(range(len))
final_list = []
for i in range(K):
final_list.append(idx[(i*len)//K:((i+1)*len)//K])
return final_list
def k_fold_index(K=5, len=100, fold=0):
split = split_index(K, len)
val = split[fold]
train = []
for i in range(K):
if i != fold:
train = train + split[i]
return train, val
def stat_dataset(dataset):
class_ids = {1: "A", 2: "B1", 3: "B2", 4: "B3"}
stats = {"A": 0, "B1": 0, "B2": 0, "B3": 0}
for img, target in dataset:
for k in target['labels']:
stats[class_ids[int(k)]] += 1
print(stats)
def NBIFiveFoldDataset(transforms):
ds = NBIFullDataset(root="./NBI_full_dataset/", transforms=transforms)
# n = len(ds)
# for i in range(5):
# train_idx, val_idx = k_fold_index(5, n, i)
# train_subset = Subset(ds, train_idx)
# val_subset = Subset(ds, val_idx)
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
torch.manual_seed(13)
all_subsets = random_split(ds, [46, 46, 46, 45, 45])
fold_i_subsets = []
for i in range(5):
val_subset = all_subsets[i]
train_subset = ConcatDataset([all_subsets[j] for j in range(5) if j != i])
fold_i_subsets.append({"train": train_subset, "val": val_subset})
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
return fold_i_subsets
if __name__ == '__main__':
# ds = NBIFiveFoldDataset(None)
di = "aaa".encode("UTF-8")
result = eval(di)
print(result)
| [
"torch.manual_seed",
"torch.as_tensor",
"PIL.Image.open",
"os.listdir",
"torch.utils.data.random_split",
"os.path.join",
"torch.from_numpy",
"torch.tensor"
]
| [((7492, 7513), 'torch.manual_seed', 'torch.manual_seed', (['(13)'], {}), '(13)\n', (7509, 7513), False, 'import torch\n'), ((7532, 7570), 'torch.utils.data.random_split', 'random_split', (['ds', '[46, 46, 46, 45, 45]'], {}), '(ds, [46, 46, 46, 45, 45])\n', (7544, 7570), False, 'from torch.utils.data import Subset, random_split, ConcatDataset\n'), ((611, 660), 'os.path.join', 'os.path.join', (['self.root', '"""images"""', 'self.imgs[idx]'], {}), "(self.root, 'images', self.imgs[idx])\n", (623, 660), False, 'import os\n'), ((880, 923), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {'dtype': 'torch.float32'}), '(boxes, dtype=torch.float32)\n', (895, 923), False, 'import torch\n'), ((967, 1009), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (982, 1009), False, 'import torch\n'), ((1030, 1049), 'torch.tensor', 'torch.tensor', (['[idx]'], {}), '([idx])\n', (1042, 1049), False, 'import torch\n'), ((2139, 2178), 'os.path.join', 'os.path.join', (['self.path', 'self.imgs[idx]'], {}), '(self.path, self.imgs[idx])\n', (2151, 2178), False, 'import os\n'), ((2398, 2441), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {'dtype': 'torch.float32'}), '(boxes, dtype=torch.float32)\n', (2413, 2441), False, 'import torch\n'), ((2485, 2527), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (2500, 2527), False, 'import torch\n'), ((2548, 2567), 'torch.tensor', 'torch.tensor', (['[idx]'], {}), '([idx])\n', (2560, 2567), False, 'import torch\n'), ((3309, 3334), 'os.path.join', 'os.path.join', (['root', '"""all"""'], {}), "(root, 'all')\n", (3321, 3334), False, 'import os\n'), ((3550, 3589), 'os.path.join', 'os.path.join', (['self.path', 'self.imgs[idx]'], {}), '(self.path, self.imgs[idx])\n', (3562, 3589), False, 'import os\n'), ((3809, 3852), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {'dtype': 'torch.float32'}), '(boxes, dtype=torch.float32)\n', (3824, 3852), False, 'import torch\n'), ((3896, 3938), 'torch.as_tensor', 'torch.as_tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (3911, 3938), False, 'import torch\n'), ((3959, 3978), 'torch.tensor', 'torch.tensor', (['[idx]'], {}), '([idx])\n', (3971, 3978), False, 'import torch\n'), ((4905, 4954), 'os.path.join', 'os.path.join', (['self.root', '"""images"""', 'self.imgs[idx]'], {}), "(self.root, 'images', self.imgs[idx])\n", (4917, 4954), False, 'import os\n'), ((5028, 5067), 'os.path.join', 'os.path.join', (['self.root', '"""density_maps"""'], {}), "(self.root, 'density_maps')\n", (5040, 5067), False, 'import os\n'), ((5178, 5207), 'torch.from_numpy', 'torch.from_numpy', (['density_map'], {}), '(density_map)\n', (5194, 5207), False, 'import torch\n'), ((509, 551), 'os.path.join', 'os.path.join', (['self.root', '"""annotations.csv"""'], {}), "(self.root, 'annotations.csv')\n", (521, 551), False, 'import os\n'), ((1824, 1851), 'os.path.join', 'os.path.join', (['root', '"""train"""'], {}), "(root, 'train')\n", (1836, 1851), False, 'import os\n'), ((1890, 1916), 'os.path.join', 'os.path.join', (['root', '"""test"""'], {}), "(root, 'test')\n", (1902, 1916), False, 'import os\n'), ((2017, 2063), 'os.path.join', 'os.path.join', (['self.root', '"""annotations_all.csv"""'], {}), "(self.root, 'annotations_all.csv')\n", (2029, 2063), False, 'import os\n'), ((3433, 3475), 'os.path.join', 'os.path.join', (['self.root', '"""annotations.csv"""'], {}), "(self.root, 'annotations.csv')\n", (3445, 3475), False, 'import os\n'), ((5098, 5154), 'os.path.join', 'os.path.join', (['density_path', "(self.imgs[idx][:-4] + '.npy')"], {}), "(density_path, self.imgs[idx][:-4] + '.npy')\n", (5110, 5154), False, 'import os\n'), ((675, 695), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (685, 695), False, 'from PIL import Image\n'), ((1950, 1971), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (1960, 1971), False, 'import os\n'), ((2193, 2213), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2203, 2213), False, 'from PIL import Image\n'), ((3367, 3388), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (3377, 3388), False, 'import os\n'), ((3604, 3624), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3614, 3624), False, 'from PIL import Image\n'), ((4969, 4989), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (4979, 4989), False, 'from PIL import Image\n'), ((435, 463), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (447, 463), False, 'import os\n'), ((4821, 4849), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (4833, 4849), False, 'import os\n'), ((5746, 5775), 'os.path.join', 'os.path.join', (['root', '"""ans.npy"""'], {}), "(root, 'ans.npy')\n", (5758, 5775), False, 'import os\n'), ((5680, 5696), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (5690, 5696), False, 'import os\n')] |
# coding: utf-8
import functools
def memoize(fn):
known = dict()
@functools.wraps(fn)
def memoizer(*args):
if args not in known:
known[args] = fn(*args)
return known[args]
return memoizer
@memoize
def nsum(n):
'''返回前n个数字的和'''
assert(n >= 0), 'n must be >= 0'
return 0 if n == 0 else n + nsum(n-1)
@memoize
def fibonacci(n):
'''返回斐波那契数列的第n个数'''
assert(n >= 0), 'n must be >= 0'
return n if n in (0, 1) else fibonacci(n-1) + fibonacci(n-2)
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci',
'func': fibonacci}, {'exec': 'nsum(200)', 'import': 'nsum',
'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import \
{}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: \
{}'.format(m['func'].__name__, m['func'].__doc__,
m['exec'], t.timeit()))
| [
"functools.wraps"
]
| [((78, 97), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (93, 97), False, 'import functools\n')] |
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
risk_phi_x = risk_phi_x + (phi_x_i).sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
tied_death_counts = deaths.sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_sum = np.einsum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5dff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].sum()
tie_phi_x = (phi_x_i_deaths).sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and sums them together.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then sum to (d, d)
# Alternatively, we can sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices together.
# which would be: np.einsum("Bi, Bj->Bij", t, t)
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_apply(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.groupby(by=self.strata):
baseline_at_T = np.append(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return pd.DataFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
df = self._compute_martingale(X, T, E, weights, index)
rmart = df.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
df["deviance"] = deviance
return df
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Assumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.append(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
df = pd.DataFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E])
return df
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.any(E):
# sometimes strata have no deaths. This means nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate sums of Ties, if this is an event
diff_against.append((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to sum.
weighted_mean = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_mean += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def _compute_score(self, X, T, E, weights, index=None):
_, d = X.shape
if self.strata is not None:
score_residuals = np.empty((0, d))
for score_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_score_within_strata
):
score_residuals = np.append(score_residuals, score_residuals_in_strata, axis=0)
else:
score_residuals = self._compute_score_within_strata(X.values, T, E.values, weights.values)
return pd.DataFrame(score_residuals, columns=self.params_.index, index=index)
def _compute_score_within_strata(self, X, _T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pdf
# lin1989
# https://www.ics.uci.edu/~dgillen/STAT255/Handouts/lecture10.pdf
# Assumes X already sorted by T with strata
# TODO: doesn't handle ties.
# TODO: _T unused
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.params_.values * self._norm_std
E = E.astype(int)
score_residuals = np.zeros((n, d))
phi_s = np.exp(np.dot(X, beta))
# need to store these histories, as we access them often
# this is a reverse cumulative sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumsum(0)[::-1]
risk_phi_history = (weights * phi_s)[::-1].cumsum()[::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i : i + 1]
phi_i = phi_s[i]
score = -phi_i * (
(
E[: i + 1] * weights[: i + 1] / risk_phi_history[: i + 1].T
).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[: i + 1] / risk_phi_history[: i + 1])
).sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals * weights[:, None]
def compute_residuals(self, training_dataframe, kind):
"""
Parameters
----------
training_dataframe : pandas DataFrame
the same training DataFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)
resids = getattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
se = self.standard_errors_
hazards = self.params_.values
return pd.DataFrame(
np.c_[hazards - z * se, hazards + z * se],
columns=["%g%% lower-bound" % ci, "%g%% upper-bound" % ci],
index=self.params_.index,
)
def _compute_standard_errors(self, X, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(X, T, E, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.Series(se, name="se", index=self.params_.index)
def _compute_sandwich_estimator(self, X, T, E, weights):
delta_betas = self._compute_delta_beta(X, T, E, weights)
if self.cluster_col:
delta_betas = delta_betas.groupby(self._clusters).sum()
sandwich_estimator = delta_betas.T.dot(delta_betas)
return sandwich_estimator.values
def _compute_z_values(self):
return self.params_ / self.standard_errors_
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : DataFrame
Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
with np.errstate(invalid="ignore", divide="ignore", over="ignore", under="ignore"):
df = pd.DataFrame(index=self.params_.index)
df["coef"] = self.params_
df["exp(coef)"] = self.hazard_ratios_
df["se(coef)"] = self.standard_errors_
df["coef lower %g%%" % ci] = self.confidence_intervals_["%g%% lower-bound" % ci]
df["coef upper %g%%" % ci] = self.confidence_intervals_["%g%% upper-bound" % ci]
df["exp(coef) lower %g%%" % ci] = self.hazard_ratios_ * np.exp(-z * self.standard_errors_)
df["exp(coef) upper %g%%" % ci] = self.hazard_ratios_ * np.exp(z * self.standard_errors_)
df["z"] = self._compute_z_values()
df["p"] = self._compute_p_values()
df["-log2(p)"] = -np.log2(df["p"])
return df
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(25)
headers = []
headers.append(("duration col", "'%s'" % self.duration_col))
if self.event_col:
headers.append(("event col", "'%s'" % self.event_col))
if self.weights_col:
headers.append(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
headers.append(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
headers.append(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
headers.append(("robust variance", True))
if self.strata:
headers.append(("strata", self.strata))
headers.extend(
[
("number of observations", "{:g}".format(self.weights.sum())),
("number of events observed", "{:g}".format(self.weights[self.event_observed > 0].sum())),
("partial log-likelihood", "{:.{prec}f}".format(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_called),
]
)
p = Printer(headers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: DataFrame
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - mean(x_{train}))'\beta}`
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{mean}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, "Series not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X.reindex(order, axis="columns")
X = X.astype(float)
X = X.values
X = X.astype(float)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).astype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _get_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = pd.DataFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: DataFrame
the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, partial_hazards, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = partial_hazards.copy()
ind_hazards["P"] *= ind_hazards["W"]
ind_hazards["E"] *= ind_hazards["W"]
ind_hazards_summed_over_durations = ind_hazards.groupby("T")[["P", "E"]].sum()
ind_hazards_summed_over_durations["P"] = ind_hazards_summed_over_durations["P"].loc[::-1].cumsum()
baseline_hazard = pd.DataFrame(
ind_hazards_summed_over_durations["E"] / ind_hazards_summed_over_durations["P"], columns=[name]
)
baseline_hazard.index.name = None
return baseline_hazard
def _compute_baseline_hazards(self):
if self.strata:
index = self.durations.unique()
baseline_hazards_ = pd.DataFrame(index=index).sort_index()
for name, stratum_predicted_partial_hazards_ in self._predicted_partial_hazards_.groupby(self.strata):
baseline_hazards_ = baseline_hazards_.merge(
self._compute_baseline_hazard(stratum_predicted_partial_hazards_, name),
left_index=True,
right_index=True,
how="left",
)
return baseline_hazards_.fillna(0)
return self._compute_baseline_hazard(self._predicted_partial_hazards_, name="baseline hazard")
def _compute_baseline_cumulative_hazard(self):
cumulative = self.baseline_hazard_.cumsum()
if not self.strata:
cumulative = cumulative.rename(columns={"baseline hazard": "baseline cumulative hazard"})
return cumulative
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_df = survival_df.rename(columns={"baseline cumulative hazard": "baseline survival"})
return survival_df
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(len(columns)))
log_hazards = self.params_.loc[columns].values.copy()
order = list(range(len(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.get_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the mean of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmap='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = len(covariates)
values = np.asarray(values)
if len(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_mean.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(len(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.append(ax)
return axes
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _df in self._predicted_partial_hazards_.groupby(self.strata):
if _df.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_summary_statistics(
_df["T"].values, -_df["P"].values, _df["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
df = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_summary_statistics(
df["T"].values, -df["P"].values, df["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_
| [
"lifelines.utils.normalize",
"lifelines.plotting.set_kwargs_drawstyle",
"numpy.clip",
"lifelines.statistics.proportional_hazard_test",
"lifelines.utils.check_complete_separation",
"numpy.sqrt",
"lifelines.utils.check_low_var",
"lifelines.utils.string_justify",
"numpy.log",
"numpy.warnings.catch_warnings",
"numpy.argsort",
"lifelines.utils.ConvergenceError",
"numpy.array",
"numpy.einsum",
"textwrap.fill",
"lifelines.utils.format_p_value",
"numpy.linalg.norm",
"numpy.arange",
"textwrap.dedent",
"scipy.integrate.trapz",
"numpy.asarray",
"numpy.zeros_like",
"lifelines.fitters.Printer",
"numpy.multiply.outer",
"numpy.exp",
"numpy.dot",
"numpy.empty",
"lifelines.utils.check_for_numeric_dtypes_or_raise",
"numpy.vstack",
"pandas.DataFrame",
"warnings.warn",
"numpy.round",
"numpy.tile",
"numpy.eye",
"lifelines.utils.concordance._concordance_ratio",
"lifelines.statistics.StatisticalResult",
"numpy.ones",
"lifelines.utils._to_1d_array",
"numpy.random.choice",
"matplotlib.pyplot.gca",
"lifelines.utils.concordance._concordance_summary_statistics",
"numpy.warnings.filterwarnings",
"lifelines.statistics.TimeTransformers",
"numpy.any",
"scipy.stats.chi2.sf",
"numpy.outer",
"numpy.isnan",
"numpy.sign",
"lifelines.utils._to_list",
"lifelines.utils.inv_normal_cdf",
"lifelines.utils.StepSizer",
"lifelines.utils.interpolate_at_times",
"numpy.log2",
"lifelines.utils._to_tuple",
"time.time",
"warnings.filterwarnings",
"matplotlib.pyplot.legend",
"lifelines.utils._get_index",
"pandas.Series",
"matplotlib.pyplot.subplots_adjust",
"numpy.atleast_1d",
"numpy.unique",
"lifelines.utils.lowess.lowess",
"datetime.datetime.utcnow",
"lifelines.utils.coalesce",
"scipy.linalg.solve",
"numpy.append",
"numpy.errstate",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.pyplot.figure",
"lifelines.statistics.chisq_test",
"matplotlib.pyplot.tight_layout",
"numpy.all",
"lifelines.utils.check_nans_or_infs",
"pandas.concat"
]
| [((10197, 10226), 'lifelines.utils.coalesce', 'coalesce', (['strata', 'self.strata'], {}), '(strata, self.strata)\n', (10205, 10226), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((10708, 10753), 'lifelines.utils.normalize', 'normalize', (['X', 'self._norm_mean', 'self._norm_std'], {}), '(X, self._norm_mean, self._norm_std)\n', (10717, 10753), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13110, 13131), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['E'], {}), '(E)\n', (13128, 13131), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13303, 13339), 'lifelines.utils.check_for_numeric_dtypes_or_raise', 'check_for_numeric_dtypes_or_raise', (['X'], {}), '(X)\n', (13336, 13339), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13348, 13369), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['T'], {}), '(T)\n', (13366, 13369), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13378, 13399), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['X'], {}), '(X)\n', (13396, 13399), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13408, 13424), 'lifelines.utils.check_low_var', 'check_low_var', (['X'], {}), '(X)\n', (13421, 13424), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((13433, 13483), 'lifelines.utils.check_complete_separation', 'check_complete_separation', (['X', 'E', 'T', 'self.event_col'], {}), '(X, E, T, self.event_col)\n', (13458, 13483), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((16025, 16045), 'lifelines.utils.StepSizer', 'StepSizer', (['step_size'], {}), '(step_size)\n', (16034, 16045), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((16570, 16581), 'time.time', 'time.time', ([], {}), '()\n', (16579, 16581), False, 'import time\n'), ((23623, 23639), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (23631, 23639), True, 'import numpy as np\n'), ((23659, 23673), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (23667, 23673), True, 'import numpy as np\n'), ((23758, 23772), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (23766, 23772), True, 'import numpy as np\n'), ((24151, 24167), 'numpy.empty', 'np.empty', (['(d, d)'], {}), '((d, d))\n', (24159, 24167), True, 'import numpy as np\n'), ((27090, 27123), 'numpy.unique', 'np.unique', (['(-T)'], {'return_counts': '(True)'}), '(-T, return_counts=True)\n', (27099, 27123), True, 'import numpy as np\n'), ((30598, 30614), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (30606, 30614), True, 'import numpy as np\n'), ((30634, 30648), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (30642, 30648), True, 'import numpy as np\n'), ((30970, 31003), 'numpy.unique', 'np.unique', (['(-T)'], {'return_counts': '(True)'}), '(-T, return_counts=True)\n', (30979, 31003), True, 'import numpy as np\n'), ((36176, 36295), 'pandas.DataFrame', 'pd.DataFrame', (["{self.duration_col: T.values, self.event_col: E.values, 'martingale':\n martingale.values}"], {'index': 'index'}), "({self.duration_col: T.values, self.event_col: E.values,\n 'martingale': martingale.values}, index=index)\n", (36188, 36295), True, 'import pandas as pd\n'), ((38730, 38819), 'pandas.DataFrame', 'pd.DataFrame', (['schoenfeld_residuals[E, :]'], {'columns': 'self.params_.index', 'index': 'index[E]'}), '(schoenfeld_residuals[E, :], columns=self.params_.index, index=\n index[E])\n', (38742, 38819), True, 'import pandas as pd\n'), ((39657, 39673), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (39665, 39673), True, 'import numpy as np\n'), ((42829, 42899), 'pandas.DataFrame', 'pd.DataFrame', (['score_residuals'], {'columns': 'self.params_.index', 'index': 'index'}), '(score_residuals, columns=self.params_.index, index=index)\n', (42841, 42899), True, 'import pandas as pd\n'), ((43479, 43495), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (43487, 43495), True, 'import numpy as np\n'), ((45093, 45155), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'ConvergenceWarning'}), "('ignore', category=ConvergenceWarning)\n", (45116, 45155), False, 'import warnings\n'), ((45477, 45511), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - self.alpha / 2)'], {}), '(1 - self.alpha / 2)\n', (45491, 45511), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((45600, 45751), 'pandas.DataFrame', 'pd.DataFrame', (['np.c_[hazards - z * se, hazards + z * se]'], {'columns': "['%g%% lower-bound' % ci, '%g%% upper-bound' % ci]", 'index': 'self.params_.index'}), "(np.c_[hazards - z * se, hazards + z * se], columns=[\n '%g%% lower-bound' % ci, '%g%% upper-bound' % ci], index=self.params_.index\n )\n", (45612, 45751), True, 'import pandas as pd\n'), ((46068, 46118), 'pandas.Series', 'pd.Series', (['se'], {'name': '"""se"""', 'index': 'self.params_.index'}), "(se, name='se', index=self.params_.index)\n", (46077, 46118), True, 'import pandas as pd\n'), ((46624, 46643), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (['U', '(1)'], {}), '(U, 1)\n', (46637, 46643), False, 'from scipy import stats\n'), ((46974, 47008), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - self.alpha / 2)'], {}), '(1 - self.alpha / 2)\n', (46988, 47008), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((48391, 48409), 'lifelines.utils.string_justify', 'string_justify', (['(25)'], {}), '(25)\n', (48405, 48409), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((49489, 49538), 'lifelines.fitters.Printer', 'Printer', (['headers', 'self', 'justify', 'decimals', 'kwargs'], {}), '(headers, self, justify, decimals, kwargs)\n', (49496, 49538), False, 'from lifelines.fitters import BaseFitter, Printer\n'), ((50444, 50498), 'lifelines.statistics.chisq_test', 'chisq_test', (['test_stat'], {'degrees_freedom': 'degrees_freedom'}), '(test_stat, degrees_freedom=degrees_freedom)\n', (50454, 50498), False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((50514, 50655), 'lifelines.statistics.StatisticalResult', 'StatisticalResult', (['p_value', 'test_stat'], {'name': '"""log-likelihood ratio test"""', 'null_distribution': '"""chi squared"""', 'degrees_freedom': 'degrees_freedom'}), "(p_value, test_stat, name='log-likelihood ratio test',\n null_distribution='chi squared', degrees_freedom=degrees_freedom)\n", (50531, 50655), False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((52907, 52920), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (52917, 52920), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((53143, 53182), 'lifelines.utils.normalize', 'normalize', (['X', 'self._norm_mean.values', '(1)'], {}), '(X, self._norm_mean.values, 1)\n', (53152, 53182), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((60676, 60689), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (60686, 60689), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((62979, 62992), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (62989, 62992), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((63618, 63731), 'pandas.DataFrame', 'pd.DataFrame', (["(ind_hazards_summed_over_durations['E'] / ind_hazards_summed_over_durations\n ['P'])"], {'columns': '[name]'}), "(ind_hazards_summed_over_durations['E'] /\n ind_hazards_summed_over_durations['P'], columns=[name])\n", (63630, 63731), True, 'import pandas as pd\n'), ((65511, 65552), 'numpy.exp', 'np.exp', (['(-self.baseline_cumulative_hazard_)'], {}), '(-self.baseline_cumulative_hazard_)\n', (65517, 65552), True, 'import numpy as np\n'), ((67166, 67200), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['(1 - self.alpha / 2)'], {}), '(1 - self.alpha / 2)\n', (67180, 67200), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((70977, 70997), 'lifelines.utils._to_list', '_to_list', (['covariates'], {}), '(covariates)\n', (70985, 70997), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((71054, 71072), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (71064, 71072), True, 'import numpy as np\n'), ((71500, 71542), 'lifelines.plotting.set_kwargs_drawstyle', 'set_kwargs_drawstyle', (['kwargs', '"""steps-post"""'], {}), "(kwargs, 'steps-post')\n", (71520, 71542), False, 'from lifelines.plotting import set_kwargs_drawstyle\n'), ((76189, 76300), 'lifelines.statistics.proportional_hazard_test', 'proportional_hazard_test', (['self', 'training_df'], {'time_transform': "['rank', 'km']", 'precomputed_residuals': 'residuals'}), "(self, training_df, time_transform=['rank', 'km'],\n precomputed_residuals=residuals)\n", (76213, 76300), False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((10945, 10993), 'pandas.Series', 'pd.Series', (['params_'], {'index': 'X.columns', 'name': '"""coef"""'}), "(params_, index=X.columns, name='coef')\n", (10954, 10993), True, 'import pandas as pd\n'), ((11051, 11071), 'numpy.exp', 'np.exp', (['self.params_'], {}), '(self.params_)\n', (11057, 11071), True, 'import numpy as np\n'), ((11164, 11204), 'numpy.outer', 'np.outer', (['self._norm_std', 'self._norm_std'], {}), '(self._norm_std, self._norm_std)\n', (11172, 11204), True, 'import numpy as np\n'), ((15988, 16002), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (15996, 16002), True, 'import numpy as np\n'), ((19281, 19292), 'numpy.linalg.norm', 'norm', (['delta'], {}), '(delta)\n', (19285, 19292), False, 'from numpy.linalg import norm, inv\n'), ((21489, 21760), 'warnings.warn', 'warnings.warn', (['("""Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"""\n % norm_delta)', 'ConvergenceWarning'], {}), '(\n """Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"""\n % norm_delta, ConvergenceWarning)\n', (21502, 21760), False, 'import warnings\n'), ((23838, 23852), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (23846, 23852), True, 'import numpy as np\n'), ((23854, 23868), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (23862, 23868), True, 'import numpy as np\n'), ((23905, 23921), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (23913, 23921), True, 'import numpy as np\n'), ((23923, 23939), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (23931, 23939), True, 'import numpy as np\n'), ((24597, 24627), 'numpy.multiply.outer', 'np.multiply.outer', (['xi', 'phi_x_i'], {}), '(xi, phi_x_i)\n', (24614, 24627), True, 'import numpy as np\n'), ((26751, 26765), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (26759, 26765), True, 'import numpy as np\n'), ((26814, 26828), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (26822, 26828), True, 'import numpy as np\n'), ((26855, 26871), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (26863, 26871), True, 'import numpy as np\n'), ((30813, 30827), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (30821, 30827), True, 'import numpy as np\n'), ((30829, 30843), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (30837, 30843), True, 'import numpy as np\n'), ((30880, 30896), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (30888, 30896), True, 'import numpy as np\n'), ((30898, 30914), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (30906, 30914), True, 'import numpy as np\n'), ((31424, 31449), 'numpy.dot', 'np.dot', (['X_at_t.T', 'phi_x_i'], {}), '(X_at_t.T, phi_x_i)\n', (31430, 31449), True, 'import numpy as np\n'), ((32200, 32247), 'numpy.einsum', 'np.einsum', (['"""a,ab->b"""', 'weights_deaths', 'xi_deaths'], {}), "('a,ab->b', weights_deaths, xi_deaths)\n", (32209, 32247), True, 'import numpy as np\n'), ((35930, 35941), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (35938, 35941), True, 'import numpy as np\n'), ((36492, 36520), 'numpy.warnings.catch_warnings', 'np.warnings.catch_warnings', ([], {}), '()\n', (36518, 36520), True, 'import numpy as np\n'), ((36534, 36570), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (36560, 36570), True, 'import numpy as np\n'), ((36702, 36716), 'numpy.sign', 'np.sign', (['rmart'], {}), '(rmart)\n', (36709, 36716), True, 'import numpy as np\n'), ((36719, 36751), 'numpy.sqrt', 'np.sqrt', (['(-2 * (rmart + log_term))'], {}), '(-2 * (rmart + log_term))\n', (36726, 36751), True, 'import numpy as np\n'), ((38193, 38209), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (38201, 38209), True, 'import numpy as np\n'), ((39146, 39155), 'numpy.any', 'np.any', (['E'], {}), '(E)\n', (39152, 39155), True, 'import numpy as np\n'), ((39287, 39303), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (39295, 39303), True, 'import numpy as np\n'), ((39411, 39427), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (39419, 39427), True, 'import numpy as np\n'), ((39429, 39445), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (39437, 39445), True, 'import numpy as np\n'), ((40994, 41010), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (41002, 41010), True, 'import numpy as np\n'), ((41520, 41536), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (41528, 41536), True, 'import numpy as np\n'), ((42416, 42432), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (42424, 42432), True, 'import numpy as np\n'), ((43520, 43535), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (43526, 43535), True, 'import numpy as np\n'), ((47022, 47099), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""', 'over': '"""ignore"""', 'under': '"""ignore"""'}), "(invalid='ignore', divide='ignore', over='ignore', under='ignore')\n", (47033, 47099), True, 'import numpy as np\n'), ((47118, 47156), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.params_.index'}), '(index=self.params_.index)\n', (47130, 47156), True, 'import pandas as pd\n'), ((53211, 53234), 'numpy.dot', 'np.dot', (['X', 'self.params_'], {}), '(X, self.params_)\n', (53217, 53234), True, 'import numpy as np\n'), ((55008, 55022), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (55020, 55022), True, 'import pandas as pd\n'), ((56814, 56827), 'lifelines.utils._get_index', '_get_index', (['v'], {}), '(v)\n', (56824, 56827), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((56849, 56904), 'lifelines.utils.coalesce', 'coalesce', (['times', 'self.baseline_cumulative_hazard_.index'], {}), '(times, self.baseline_cumulative_hazard_.index)\n', (56857, 56904), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((57540, 57601), 'pandas.DataFrame', 'pd.DataFrame', (['(c_0 * v.values[:, 0])'], {'columns': 'col', 'index': 'times_'}), '(c_0 * v.values[:, 0], columns=col, index=times_)\n', (57552, 57601), True, 'import pandas as pd\n'), ((63077, 63103), 'scipy.integrate.trapz', 'trapz', (['v.values.T', 'v.index'], {}), '(v.values.T, v.index)\n', (63082, 63103), False, 'from scipy.integrate import trapz\n'), ((66823, 66832), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (66830, 66832), True, 'from matplotlib import pyplot as plt\n'), ((67550, 67573), 'numpy.argsort', 'np.argsort', (['log_hazards'], {}), '(log_hazards)\n', (67560, 67573), True, 'import numpy as np\n'), ((67631, 67650), 'numpy.exp', 'np.exp', (['log_hazards'], {}), '(log_hazards)\n', (67637, 67650), True, 'import numpy as np\n'), ((71716, 71752), 'pandas.concat', 'pd.concat', (['([x_bar] * values.shape[0])'], {}), '([x_bar] * values.shape[0])\n', (71725, 71752), True, 'import pandas as pd\n'), ((84650, 84702), 'lifelines.utils.concordance._concordance_ratio', '_concordance_ratio', (['num_correct', 'num_tied', 'num_pairs'], {}), '(num_correct, num_tied, num_pairs)\n', (84668, 84702), False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((11142, 11161), 'numpy.linalg.inv', 'inv', (['self._hessian_'], {}), '(self._hessian_)\n', (11145, 11161), False, 'from numpy.linalg import norm, inv\n'), ((12624, 12649), 'numpy.ones', 'np.ones', (['self._n_examples'], {}), '(self._n_examples)\n', (12631, 12649), True, 'import numpy as np\n'), ((12811, 12839), 'numpy.ones', 'np.ones', (['(self._n_examples,)'], {}), '((self._n_examples,))\n', (12818, 12839), True, 'import numpy as np\n'), ((13644, 14088), 'warnings.warn', 'warnings.warn', (['"""It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""', 'StatisticalWarning'], {}), '(\n """It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""\n , StatisticalWarning)\n', (13657, 14088), False, 'import warnings\n'), ((16841, 16860), 'numpy.zeros_like', 'np.zeros_like', (['beta'], {}), '(beta)\n', (16854, 16860), True, 'import numpy as np\n'), ((16881, 16921), 'numpy.zeros', 'np.zeros', (['(beta.shape[0], beta.shape[0])'], {}), '((beta.shape[0], beta.shape[0]))\n', (16889, 16921), True, 'import numpy as np\n'), ((17169, 17186), 'numpy.all', 'np.all', (['(beta == 0)'], {}), '(beta == 0)\n', (17175, 17186), True, 'import numpy as np\n'), ((17773, 17823), 'scipy.linalg.solve', 'spsolve', (['(-h)', 'g'], {'assume_a': '"""pos"""', 'check_finite': '(False)'}), "(-h, g, assume_a='pos', check_finite=False)\n", (17780, 17823), True, 'from scipy.linalg import solve as spsolve, LinAlgError\n'), ((18841, 18856), 'numpy.isnan', 'np.isnan', (['delta'], {}), '(delta)\n', (18849, 18856), True, 'import numpy as np\n'), ((18881, 19146), 'lifelines.utils.ConvergenceError', 'ConvergenceError', (['"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""'], {}), '(\n """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n )\n', (18897, 19146), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((21851, 21971), 'warnings.warn', 'warnings.warn', (["('Newton-Rhaphson failed to converge sufficiently in %d steps.\\n' % max_steps)", 'ConvergenceWarning'], {}), "(\n 'Newton-Rhaphson failed to converge sufficiently in %d steps.\\n' %\n max_steps, ConvergenceWarning)\n", (21864, 21971), False, 'import warnings\n'), ((24074, 24089), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (24080, 24089), True, 'import numpy as np\n'), ((28044, 28060), 'numpy.log', 'np.log', (['risk_phi'], {}), '(risk_phi)\n', (28050, 28060), True, 'import numpy as np\n'), ((29570, 29586), 'numpy.log', 'np.log', (['risk_phi'], {}), '(risk_phi)\n', (29576, 29586), True, 'import numpy as np\n'), ((31038, 31053), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (31044, 31053), True, 'import numpy as np\n'), ((32932, 32967), 'numpy.dot', 'np.dot', (['xi_deaths.T', 'phi_x_i_deaths'], {}), '(xi_deaths.T, phi_x_i_deaths)\n', (32938, 32967), True, 'import numpy as np\n'), ((36029, 36101), 'numpy.append', 'np.append', (['baseline_at_T', 'self.baseline_cumulative_hazard_[name].loc[T_]'], {}), '(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])\n', (36038, 36101), True, 'import numpy as np\n'), ((38425, 38496), 'numpy.append', 'np.append', (['schoenfeld_residuals', 'schoenfeld_residuals_in_strata'], {'axis': '(0)'}), '(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)\n', (38434, 38496), True, 'import numpy as np\n'), ((39573, 39596), 'numpy.dot', 'np.dot', (['X', 'self.params_'], {}), '(X, self.params_)\n', (39579, 39596), True, 'import numpy as np\n'), ((41316, 41382), 'numpy.append', 'np.append', (['schoenfeld_residuals', '(ei * (xi - weighted_mean))'], {'axis': '(0)'}), '(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)\n', (41325, 41382), True, 'import numpy as np\n'), ((42079, 42117), 'numpy.tile', 'np.tile', (['self._norm_std.values', '(d, 1)'], {}), '(self._norm_std.values, (d, 1))\n', (42086, 42117), True, 'import numpy as np\n'), ((42633, 42694), 'numpy.append', 'np.append', (['score_residuals', 'score_residuals_in_strata'], {'axis': '(0)'}), '(score_residuals, score_residuals_in_strata, axis=0)\n', (42642, 42694), True, 'import numpy as np\n'), ((47550, 47584), 'numpy.exp', 'np.exp', (['(-z * self.standard_errors_)'], {}), '(-z * self.standard_errors_)\n', (47556, 47584), True, 'import numpy as np\n'), ((47653, 47686), 'numpy.exp', 'np.exp', (['(z * self.standard_errors_)'], {}), '(z * self.standard_errors_)\n', (47659, 47686), True, 'import numpy as np\n'), ((47811, 47827), 'numpy.log2', 'np.log2', (["df['p']"], {}), "(df['p'])\n", (47818, 47827), True, 'import numpy as np\n'), ((55677, 55701), 'lifelines.utils._get_index', '_get_index', (['stratified_X'], {}), '(stratified_X)\n', (55687, 55701), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((55789, 55844), 'lifelines.utils.coalesce', 'coalesce', (['times', 'self.baseline_cumulative_hazard_.index'], {}), '(times, self.baseline_cumulative_hazard_.index)\n', (55797, 55844), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((57058, 57134), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['self.baseline_cumulative_hazard_', 'times_to_evaluate_at'], {}), '(self.baseline_cumulative_hazard_, times_to_evaluate_at)\n', (57078, 57134), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((57175, 57248), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['self.baseline_cumulative_hazard_', 'conditional_after'], {}), '(self.baseline_cumulative_hazard_, conditional_after)\n', (57195, 57248), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((57271, 57322), 'numpy.clip', 'np.clip', (['(c_0 - c_0_conditional_after).T', '(0)', 'np.inf'], {}), '((c_0 - c_0_conditional_after).T, 0, np.inf)\n', (57278, 57322), True, 'import numpy as np\n'), ((57381, 57404), 'numpy.tile', 'np.tile', (['times_', '(n, 1)'], {}), '(times_, (n, 1))\n', (57388, 57404), True, 'import numpy as np\n'), ((71784, 71804), 'numpy.eye', 'np.eye', (['n_covariates'], {}), '(n_covariates)\n', (71790, 71804), True, 'import numpy as np\n'), ((72673, 72709), 'pandas.concat', 'pd.concat', (['([x_bar] * values.shape[0])'], {}), '([x_bar] * values.shape[0])\n', (72682, 72709), True, 'import pandas as pd\n'), ((73389, 73401), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (73399, 73401), True, 'from matplotlib import pyplot as plt\n'), ((76654, 76691), 'numpy.round', 'np.round', (['minumum_observed_p_value', '(2)'], {}), '(minumum_observed_p_value, 2)\n', (76662, 76691), True, 'import numpy as np\n'), ((80506, 80518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (80516, 80518), True, 'from matplotlib import pyplot as plt\n'), ((82008, 82026), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (82024, 82026), True, 'from matplotlib import pyplot as plt\n'), ((82043, 82071), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (82062, 82071), True, 'from matplotlib import pyplot as plt\n'), ((82144, 82919), 'textwrap.dedent', 'dedent', (['"""\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n """'], {}), '(\n """\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n """\n )\n', (82150, 82919), False, 'from textwrap import dedent, fill\n'), ((84491, 84576), 'lifelines.utils.concordance._concordance_summary_statistics', '_concordance_summary_statistics', (["df['T'].values", "(-df['P'].values)", "df['E'].values"], {}), "(df['T'].values, -df['P'].values, df['E'].values\n )\n", (84522, 84576), False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((9858, 9875), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9873, 9875), False, 'from datetime import datetime\n'), ((18429, 18733), 'lifelines.utils.ConvergenceError', 'ConvergenceError', (['"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""', 'e'], {}), '(\n """Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n , e)\n', (18445, 18733), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((25778, 25806), 'numpy.arange', 'np.arange', (['tied_death_counts'], {}), '(tied_death_counts)\n', (25787, 25806), True, 'import numpy as np\n'), ((25939, 25981), 'numpy.outer', 'np.outer', (['increasing_proportion', 'tie_phi_x'], {}), '(increasing_proportion, tie_phi_x)\n', (25947, 25981), True, 'import numpy as np\n'), ((26003, 26045), 'numpy.einsum', 'np.einsum', (['"""ab,i->ab"""', 'risk_phi_x_x', 'denom'], {}), "('ab,i->ab', risk_phi_x_x, denom)\n", (26012, 26045), True, 'import numpy as np\n'), ((26048, 26113), 'numpy.einsum', 'np.einsum', (['"""ab,i->ab"""', 'tie_phi_x_x', '(increasing_proportion * denom)'], {}), "('ab,i->ab', tie_phi_x_x, increasing_proportion * denom)\n", (26057, 26113), True, 'import numpy as np\n'), ((26200, 26220), 'numpy.array', 'np.array', (['[risk_phi]'], {}), '([risk_phi])\n', (26208, 26220), True, 'import numpy as np\n'), ((26500, 26525), 'numpy.dot', 'np.dot', (['x_death_sum', 'beta'], {}), '(x_death_sum, beta)\n', (26506, 26525), True, 'import numpy as np\n'), ((33171, 33213), 'numpy.outer', 'np.outer', (['increasing_proportion', 'tie_phi_x'], {}), '(increasing_proportion, tie_phi_x)\n', (33179, 33213), True, 'import numpy as np\n'), ((33748, 33790), 'numpy.einsum', 'np.einsum', (['"""ab,i->ab"""', 'risk_phi_x_x', 'denom'], {}), "('ab,i->ab', risk_phi_x_x, denom)\n", (33757, 33790), True, 'import numpy as np\n'), ((33793, 33858), 'numpy.einsum', 'np.einsum', (['"""ab,i->ab"""', 'tie_phi_x_x', '(increasing_proportion * denom)'], {}), "('ab,i->ab', tie_phi_x_x, increasing_proportion * denom)\n", (33802, 33858), True, 'import numpy as np\n'), ((34047, 34067), 'numpy.array', 'np.array', (['[risk_phi]'], {}), '([risk_phi])\n', (34055, 34067), True, 'import numpy as np\n'), ((34580, 34605), 'numpy.dot', 'np.dot', (['x_death_sum', 'beta'], {}), '(x_death_sum, beta)\n', (34586, 34605), True, 'import numpy as np\n'), ((36649, 36680), 'numpy.log', 'np.log', (['(E.values - rmart.values)'], {}), '(E.values - rmart.values)\n', (36655, 36680), True, 'import numpy as np\n'), ((54795, 54815), 'numpy.atleast_1d', 'np.atleast_1d', (['times'], {}), '(times)\n', (54808, 54815), True, 'import numpy as np\n'), ((54904, 54935), 'lifelines.utils._to_1d_array', '_to_1d_array', (['conditional_after'], {}), '(conditional_after)\n', (54916, 54935), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((56054, 56108), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['strata_c_0', 'times_to_evaluate_at'], {}), '(strata_c_0, times_to_evaluate_at)\n', (56074, 56108), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((56153, 56204), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['strata_c_0', 'conditional_after'], {}), '(strata_c_0, conditional_after)\n', (56173, 56204), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((56232, 56284), 'numpy.clip', 'np.clip', (['(c_0_ - c_0_conditional_after).T', '(0)', 'np.inf'], {}), '((c_0_ - c_0_conditional_after).T, 0, np.inf)\n', (56239, 56284), True, 'import numpy as np\n'), ((56351, 56375), 'numpy.tile', 'np.tile', (['times_', '(n_, 1)'], {}), '(times_, (n_, 1))\n', (56358, 56375), True, 'import numpy as np\n'), ((56544, 56606), 'pandas.DataFrame', 'pd.DataFrame', (['(c_0_ * v.values[:, 0])'], {'columns': 'col', 'index': 'times_'}), '(c_0_ * v.values[:, 0], columns=col, index=times_)\n', (56556, 56606), True, 'import pandas as pd\n'), ((56991, 57014), 'numpy.tile', 'np.tile', (['times_', '(n, 1)'], {}), '(times_, (n, 1))\n', (56998, 57014), True, 'import numpy as np\n'), ((57427, 57503), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['self.baseline_cumulative_hazard_', 'times_to_evaluate_at'], {}), '(self.baseline_cumulative_hazard_, times_to_evaluate_at)\n', (57447, 57503), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((63966, 63991), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (63978, 63991), True, 'import pandas as pd\n'), ((67697, 67746), 'numpy.exp', 'np.exp', (['(z * self.standard_errors_[columns].values)'], {}), '(z * self.standard_errors_[columns].values)\n', (67703, 67746), True, 'import numpy as np\n'), ((67802, 67852), 'numpy.exp', 'np.exp', (['(-z * self.standard_errors_[columns].values)'], {}), '(-z * self.standard_errors_[columns].values)\n', (67808, 67852), True, 'import numpy as np\n'), ((67973, 68026), 'numpy.vstack', 'np.vstack', (['[lower_errors[order], upper_errors[order]]'], {}), '([lower_errors[order], upper_errors[order]])\n', (67982, 68026), True, 'import numpy as np\n'), ((72568, 72589), 'lifelines.utils._to_list', '_to_list', (['self.strata'], {}), '(self.strata)\n', (72576, 72589), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((72591, 72609), 'lifelines.utils._to_tuple', '_to_tuple', (['stratum'], {}), '(stratum)\n', (72600, 72609), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((81099, 81126), 'lifelines.utils.lowess.lowess', 'lowess', (['tt.values', 'y.values'], {}), '(tt.values, y.values)\n', (81105, 81126), False, 'from lifelines.utils.lowess import lowess\n'), ((84104, 84192), 'lifelines.utils.concordance._concordance_summary_statistics', '_concordance_summary_statistics', (["_df['T'].values", "(-_df['P'].values)", "_df['E'].values"], {}), "(_df['T'].values, -_df['P'].values, _df['E']\n .values)\n", (84135, 84192), False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((12186, 12207), 'lifelines.utils._to_list', '_to_list', (['self.strata'], {}), '(self.strata)\n', (12194, 12207), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((17931, 18220), 'lifelines.utils.ConvergenceError', 'ConvergenceError', (['"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""', 'e'], {}), '(\n """Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n , e)\n', (17947, 18220), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((55981, 56005), 'numpy.tile', 'np.tile', (['times_', '(n_, 1)'], {}), '(times_, (n_, 1))\n', (55988, 56005), True, 'import numpy as np\n'), ((56403, 56457), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', (['strata_c_0', 'times_to_evaluate_at'], {}), '(strata_c_0, times_to_evaluate_at)\n', (56423, 56457), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((71621, 71633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (71631, 71633), True, 'from matplotlib import pyplot as plt\n'), ((72441, 72453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (72451, 72453), True, 'from matplotlib import pyplot as plt\n'), ((76872, 77241), 'textwrap.fill', 'fill', (["('The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged.'\n % p_value_threshold)"], {'width': '(100)'}), "(\n 'The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged.'\n % p_value_threshold, width=100)\n", (76876, 77241), False, 'from textwrap import dedent, fill\n'), ((77448, 77752), 'textwrap.fill', 'fill', (['"""With that in mind, it\'s best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example."""'], {'width': '(100)'}), '(\n "With that in mind, it\'s best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example."\n , width=100)\n', (77452, 77752), False, 'from textwrap import dedent, fill\n'), ((80130, 80264), 'textwrap.fill', 'fill', (['""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below."""'], {'width': '(100)'}), "(\n ' Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.'\n , width=100)\n", (80134, 80264), False, 'from textwrap import dedent, fill\n'), ((81523, 81548), 'lifelines.utils.lowess.lowess', 'lowess', (['tt_', 'y.values[ix]'], {}), '(tt_, y.values[ix])\n', (81529, 81548), False, 'from lifelines.utils.lowess import lowess\n'), ((26547, 26560), 'numpy.log', 'np.log', (['denom'], {}), '(denom)\n', (26553, 26560), True, 'import numpy as np\n'), ((34627, 34640), 'numpy.log', 'np.log', (['denom'], {}), '(denom)\n', (34633, 34640), True, 'import numpy as np\n'), ((40800, 40816), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (40808, 40816), True, 'import numpy as np\n'), ((55280, 55560), 'textwrap.dedent', 'dedent', (['("""The stratum %s was not found in the original training data. For example, try\n the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""\n % (stratum, self.strata, stratum))'], {}), '(\n """The stratum %s was not found in the original training data. For example, try\n the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""\n % (stratum, self.strata, stratum))\n', (55286, 55560), False, 'from textwrap import dedent, fill\n'), ((78112, 78129), 'lifelines.utils.format_p_value', 'format_p_value', (['(4)'], {}), '(4)\n', (78126, 78129), False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((80652, 80670), 'lifelines.statistics.TimeTransformers', 'TimeTransformers', ([], {}), '()\n', (80668, 80670), False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((81420, 81442), 'numpy.random.choice', 'np.random.choice', (['n', 'n'], {}), '(n, n)\n', (81436, 81442), True, 'import numpy as np\n'), ((19683, 19694), 'time.time', 'time.time', ([], {}), '()\n', (19692, 19694), False, 'import time\n'), ((20523, 20823), 'warnings.warn', 'warnings.warn', (['"""The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.stackexchange.com/q/11109/11867 for more.\n"""', 'ConvergenceWarning'], {}), '(\n """The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.stackexchange.com/q/11109/11867 for more.\n"""\n , ConvergenceWarning)\n', (20536, 20823), False, 'import warnings\n'), ((27935, 27963), 'numpy.arange', 'np.arange', (['tied_death_counts'], {}), '(tied_death_counts)\n', (27944, 27963), True, 'import numpy as np\n'), ((29461, 29489), 'numpy.arange', 'np.arange', (['tied_death_counts'], {}), '(tied_death_counts)\n', (29470, 29489), True, 'import numpy as np\n')] |
"""
Test configuration loading
@author aevans
"""
import os
from nlp_server.config import load_config
def test_load_config():
"""
Test loading a configuration
"""
current_dir = os.path.curdir
test_path = os.path.sep.join([current_dir, 'data', 'test_config.json'])
cfg = load_config.load_config(test_path)
assert cfg is not None
assert cfg.use_gpu is False
| [
"os.path.sep.join",
"nlp_server.config.load_config.load_config"
]
| [((229, 288), 'os.path.sep.join', 'os.path.sep.join', (["[current_dir, 'data', 'test_config.json']"], {}), "([current_dir, 'data', 'test_config.json'])\n", (245, 288), False, 'import os\n'), ((299, 333), 'nlp_server.config.load_config.load_config', 'load_config.load_config', (['test_path'], {}), '(test_path)\n', (322, 333), False, 'from nlp_server.config import load_config\n')] |
import os, json, inspect
import mimetypes
from html2text import html2text
from RestrictedPython import compile_restricted, safe_globals
import RestrictedPython.Guards
import frappe
import frappe.utils
import frappe.utils.data
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
from frappe.www.printview import get_visible_columns
import frappe.exceptions
class ServerScriptNotEnabled(frappe.PermissionError): pass
def safe_exec(script, _globals=None, _locals=None):
# script reports must be enabled via site_config.json
if not frappe.conf.server_script_enabled:
frappe.msgprint('Please Enable Server Scripts')
raise ServerScriptNotEnabled
# build globals
exec_globals = get_safe_globals()
if _globals:
exec_globals.update(_globals)
# execute script compiled by RestrictedPython
exec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_module_properties(frappe.utils.data, datautils, lambda obj: hasattr(obj, "__call__"))
if "_" in getattr(frappe.local, 'form_dict', {}):
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = frappe._dict(
# make available limited methods of frappe
json=json,
dict=dict,
frappe=frappe._dict(
_=frappe._,
_dict=frappe._dict,
flags=frappe.flags,
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=getattr(frappe.local, 'form_dict', {}),
get_meta=frappe.get_meta,
get_doc=frappe.get_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
request=getattr(frappe.local, 'request', {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
),
socketio_port=frappe.conf.socketio_port,
get_hooks=frappe.get_hooks,
),
style=frappe._dict(
border_color='#d1d8dd'
),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=1 if os.environ.get('DEV_SERVER', False) else 0
)
add_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))
if not frappe.flags.in_setup_help:
out.get_visible_columns = get_visible_columns
out.frappe.date_format = date_format
out.frappe.time_format = time_format
out.frappe.db = frappe._dict(
get_list = frappe.get_list,
get_all = frappe.get_all,
get_value = frappe.db.get_value,
set_value = frappe.db.set_value,
get_single_value = frappe.db.get_single_value,
get_default = frappe.db.get_default,
escape = frappe.db.escape,
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
out.sorted = sorted
return out
def _getitem(obj, key):
# guard function for RestrictedPython
# allow any key to be accessed as long as it does not start with underscore
if isinstance(key, str) and key.startswith('_'):
raise SyntaxError('Key starts with _')
return obj[key]
def _write(obj):
# guard function for RestrictedPython
# allow writing to any object
return obj
def add_module_properties(module, data, filter_method):
for key, obj in module.__dict__.items():
if key.startswith("_"):
# ignore
continue
if filter_method(obj):
# only allow functions
data[key] = obj | [
"frappe._dict",
"frappe.db.get_default",
"frappe.msgprint",
"os.environ.get",
"inspect.isclass",
"RestrictedPython.compile_restricted"
]
| [((969, 983), 'frappe._dict', 'frappe._dict', ([], {}), '()\n', (981, 983), False, 'import frappe\n'), ((620, 667), 'frappe.msgprint', 'frappe.msgprint', (['"""Please Enable Server Scripts"""'], {}), "('Please Enable Server Scripts')\n", (635, 667), False, 'import frappe\n'), ((852, 878), 'RestrictedPython.compile_restricted', 'compile_restricted', (['script'], {}), '(script)\n', (870, 878), False, 'from RestrictedPython import compile_restricted, safe_globals\n'), ((3273, 3517), 'frappe._dict', 'frappe._dict', ([], {'get_list': 'frappe.get_list', 'get_all': 'frappe.get_all', 'get_value': 'frappe.db.get_value', 'set_value': 'frappe.db.set_value', 'get_single_value': 'frappe.db.get_single_value', 'get_default': 'frappe.db.get_default', 'escape': 'frappe.db.escape'}), '(get_list=frappe.get_list, get_all=frappe.get_all, get_value=\n frappe.db.get_value, set_value=frappe.db.set_value, get_single_value=\n frappe.db.get_single_value, get_default=frappe.db.get_default, escape=\n frappe.db.escape)\n', (3285, 3517), False, 'import frappe\n'), ((1015, 1051), 'frappe.db.get_default', 'frappe.db.get_default', (['"""date_format"""'], {}), "('date_format')\n", (1036, 1051), False, 'import frappe\n'), ((1084, 1120), 'frappe.db.get_default', 'frappe.db.get_default', (['"""time_format"""'], {}), "('time_format')\n", (1105, 1120), False, 'import frappe\n'), ((2699, 2735), 'frappe._dict', 'frappe._dict', ([], {'border_color': '"""#d1d8dd"""'}), "(border_color='#d1d8dd')\n", (2711, 2735), False, 'import frappe\n'), ((2926, 2961), 'os.environ.get', 'os.environ.get', (['"""DEV_SERVER"""', '(False)'], {}), "('DEV_SERVER', False)\n", (2940, 2961), False, 'import os, json, inspect\n'), ((3039, 3059), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (3054, 3059), False, 'import os, json, inspect\n')] |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# test Microsoft.Scripting.Math
#
from iptest.assert_util import *
skiptest("win32")
from System import *
import clr
#silverlight already has this
if is_cli:
math_assembly = (1).GetType().Assembly
clr.AddReference(math_assembly)
load_iron_python_test()
import IronPythonTest
if is_net40:
from System.Numerics import BigInteger, Complex
else:
from Microsoft.Scripting.Math import BigInteger
from Microsoft.Scripting.Math import Complex64 as Complex
class myFormatProvider(IFormatProvider):
def ToString():pass
p = myFormatProvider()
def test_bigint():
AreEqual(BigInteger.Add(1,99999999999999999999999999999999999999999999999999999999999) ,BigInteger.Subtract(100000000000000000000000000000000000000000000000000000000001,1))
AreEqual(BigInteger.Multiply(400,500) , BigInteger.Divide(1000000,5))
AreEqual(BigInteger.Multiply(400,8) , BigInteger.LeftShift(400,3))
AreEqual(BigInteger.Divide(400,8) , BigInteger.RightShift(400,3))
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(400,100),100) , 400)
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(-12345678987654321,100),100) , -12345678987654321)
if is_net40:
AssertError(ValueError, BigInteger.RightShift, 400, -100)
AssertError(ValueError, BigInteger.LeftShift, 400, -100)
AssertError(ValueError, BigInteger.RightShift, -12345678987654321, -100)
AssertError(ValueError, BigInteger.LeftShift, -12345678987654321, -100)
else:
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(400,-100),-100) , 400)
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(-12345678987654321,-100),-100) , -12345678987654321)
AreEqual(BigInteger(-123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement().OnesComplement() , -123456781234567812345678123456781234567812345678123456781234567812345678)
AreEqual(BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement() , -(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + 1 ))
Assert(BigInteger.Xor(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678,BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement()) , -1)
AreEqual(BigInteger.BitwiseAnd(0xff00ff00,BigInteger.BitwiseOr(0x00ff00ff,0xaabbaabb)) , BigInteger(0xaa00aa00))
AreEqual(BigInteger.Mod(BigInteger(-9999999999999999999999999999999999999999),1000000000000000000) , -BigInteger.Mod(9999999999999999999999999999999999999999,BigInteger(-1000000000000000000)))
AreEqual(BigInteger.ToInt64(0x7fffffffffffffff) , 9223372036854775807)
AssertError(OverflowError, BigInteger.ToInt64, 0x8000000000000000)
AreEqual(BigInteger(-0).ToBoolean(p) , False )
AreEqual(BigInteger(-1212321.3213).ToBoolean(p) , True )
AreEqual(BigInteger(1212321384892342394723947).ToBoolean(p) , True )
AreEqual(BigInteger(0).ToChar(p) , Char.MinValue)
AreEqual(BigInteger(65).ToChar(p) , IConvertible.ToChar('A', p))
AreEqual(BigInteger(0xffff).ToChar(p) , Char.MaxValue)
AssertError(OverflowError, BigInteger(-1).ToChar, p)
AreEqual(BigInteger(100).ToDouble(p) , 100.0)
AreEqual(BigInteger(BigInteger(100).ToDouble(p)).ToSingle(p) , BigInteger(100.1213123).ToFloat())
Assert(BigInteger(100) != 100.32)
AreEqual(BigInteger(100) , 100.0)
Assert( 100.32 != BigInteger(100))
AreEqual(100.0 , BigInteger(100) )
def test_big_1():
for (a, m, t,x) in [
(7, "ToSByte", SByte,2),
(8, "ToByte", Byte, 0),
(15, "ToInt16", Int16,2),
(16, "ToUInt16", UInt16,0),
(31, "ToInt32", Int32,2),
(32, "ToUInt32", UInt32,0),
(63, "ToInt64", Int64,2),
(64, "ToUInt64", UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)(p)
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)(p)
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)(p)
right = t.MaxValue - t.MaxValue
AreEqual(left, 0)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m),p)
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m),p)
def test_big_2():
for (a, m, t,x) in [
(31, "ToInt32",Int32,2),
(32, "ToUInt32",UInt32,0),
(63, "ToInt64",Int64,2),
(64, "ToUInt64",UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)()
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)()
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)()
right = t.MaxValue - t.MaxValue
AreEqual(left, right)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m))
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m))
#complex
def test_complex():
AreEqual(
Complex.Add(
Complex(BigInteger(9999), -1234),
Complex.Conjugate(Complex(9999, -1234)) ),
Complex.Multiply(BigInteger(9999), 2) )
AreEqual(
Complex.Add(
Complex(99999.99e-200, 12345.88e+100),
Complex.Negate(Complex(99999.99e-200, 12345.88e+100)) ),
Complex.Subtract(
Complex(99999.99e-200, 12345.88e+100),
Complex(99999.99e-200, 12345.88e+100) ))
AreEqual(
Complex.Divide(4+2j,2),
(2 + 1j) )
Assert(not hasattr(Complex, "Mod")) #IP 1.x had limited support for modulo which has been removed
def test_bool_misc():
if is_net40:
def is_zero(bigint):
return bigint.IsZero
else:
def is_zero(bigint):
return bigint.IsZero()
AreEqual(BigInteger(-1234).Sign, -1)
AreEqual(is_zero(BigInteger(-1234)), False)
AreEqual(BigInteger(-1234).IsNegative(), True)
AreEqual(BigInteger(-1234).IsPositive(), False)
AreEqual(BigInteger(0).Sign, 0)
AreEqual(is_zero(BigInteger(0)), True)
AreEqual(BigInteger(0).IsNegative(), False)
AreEqual(BigInteger(0).IsPositive(), False)
AreEqual(BigInteger(1234).Sign, 1)
AreEqual(is_zero(BigInteger(1234)), False)
AreEqual(BigInteger(1234).IsNegative(), False)
AreEqual(BigInteger(1234).IsPositive(), True)
def test_byte_conversions():
def CheckByteConversions(bigint, bytes):
SequencesAreEqual(bigint.ToByteArray(), bytes)
AreEqual(BigInteger.Create(Array[Byte](bytes)), bigint)
CheckByteConversions(BigInteger(0x00), [0x00])
CheckByteConversions(BigInteger(-0x01), [0xff])
CheckByteConversions(BigInteger(-0x81), [0x7f, 0xff])
CheckByteConversions(BigInteger(-0x100), [0x00, 0xff])
CheckByteConversions(BigInteger(-0x1000), [0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000), [0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(-0x100000), [0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000000), [0x00, 0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x100000000), [0x00, 0x00, 0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(0x7f), [0x7f])
CheckByteConversions(BigInteger(0xff), [0xff, 0x00])
CheckByteConversions(BigInteger(0x0201), [0x01, 0x02])
CheckByteConversions(BigInteger(0xf2f1), [0xf1, 0xf2, 0x00])
CheckByteConversions(BigInteger(0x03020100), [0x00, 0x01, 0x02, 0x03])
CheckByteConversions(BigInteger(0x0403020100), [0x00, 0x01, 0x02, 0x03, 0x04])
CheckByteConversions(BigInteger(0x0706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07])
CheckByteConversions(BigInteger(0x080706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08])
def test_dword_conversions():
def CheckDwordConversions(bigint, dwords):
SequencesAreEqual(bigint.GetWords(), dwords)
if bigint == BigInteger.Zero:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
0,
Array[UInt32](dwords),),
bigint)
else:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
1,
Array[UInt32](dwords)),
bigint)
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
-1,
Array[UInt32](dwords)),
BigInteger.Negate(bigint))
CheckDwordConversions(BigInteger(0), [0x00000000])
CheckDwordConversions(BigInteger(1), [0x00000001])
CheckDwordConversions(BigInteger((1<<31)), [0x80000000])
CheckDwordConversions(BigInteger(((1<<31) + 9)), [0x80000009])
CheckDwordConversions(BigInteger((1<<32)), [0x00000000, 0x00000001])
def test_misc():
AssertError(ArgumentException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, (1, 2, 3))
AssertError(ArgumentNullException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, None)
AreEqual(BigInteger(1).CompareTo(None), 1)
if is_net40:
AreEqual(BigInteger(1).CompareTo(True), 0)
else:
AssertError(ArgumentException, BigInteger(1).CompareTo, True)
run_test(__name__)
| [
"Microsoft.Scripting.Math.BigInteger",
"Microsoft.Scripting.Math.BigInteger.Subtract",
"Microsoft.Scripting.Math.BigInteger.RightShift",
"Microsoft.Scripting.Math.BigInteger.BitwiseOr",
"Microsoft.Scripting.Math.BigInteger.Add",
"Microsoft.Scripting.Math.BigInteger.ToInt64",
"Microsoft.Scripting.Math.Complex64.Divide",
"Microsoft.Scripting.Math.Complex64",
"Microsoft.Scripting.Math.BigInteger.LeftShift",
"clr.AddReference",
"Microsoft.Scripting.Math.BigInteger.Negate",
"Microsoft.Scripting.Math.BigInteger.Multiply",
"Microsoft.Scripting.Math.BigInteger.Divide"
]
| [((924, 955), 'clr.AddReference', 'clr.AddReference', (['math_assembly'], {}), '(math_assembly)\n', (940, 955), False, 'import clr\n'), ((1317, 1395), 'Microsoft.Scripting.Math.BigInteger.Add', 'BigInteger.Add', (['(1)', '(99999999999999999999999999999999999999999999999999999999999)'], {}), '(1, 99999999999999999999999999999999999999999999999999999999999)\n', (1331, 1395), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1396, 1485), 'Microsoft.Scripting.Math.BigInteger.Subtract', 'BigInteger.Subtract', (['(100000000000000000000000000000000000000000000000000000000001)', '(1)'], {}), '(\n 100000000000000000000000000000000000000000000000000000000001, 1)\n', (1415, 1485), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1494, 1523), 'Microsoft.Scripting.Math.BigInteger.Multiply', 'BigInteger.Multiply', (['(400)', '(500)'], {}), '(400, 500)\n', (1513, 1523), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1525, 1554), 'Microsoft.Scripting.Math.BigInteger.Divide', 'BigInteger.Divide', (['(1000000)', '(5)'], {}), '(1000000, 5)\n', (1542, 1554), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1568, 1595), 'Microsoft.Scripting.Math.BigInteger.Multiply', 'BigInteger.Multiply', (['(400)', '(8)'], {}), '(400, 8)\n', (1587, 1595), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1597, 1625), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', (['(400)', '(3)'], {}), '(400, 3)\n', (1617, 1625), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1639, 1664), 'Microsoft.Scripting.Math.BigInteger.Divide', 'BigInteger.Divide', (['(400)', '(8)'], {}), '(400, 8)\n', (1656, 1664), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1666, 1695), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', (['(400)', '(3)'], {}), '(400, 3)\n', (1687, 1695), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3173, 3195), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(2852170240)'], {}), '(2852170240)\n', (3183, 3195), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3412, 3451), 'Microsoft.Scripting.Math.BigInteger.ToInt64', 'BigInteger.ToInt64', (['(9223372036854775807)'], {}), '(9223372036854775807)\n', (3430, 3451), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4192, 4207), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4202, 4207), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4282, 4297), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4292, 4297), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4792, 4811), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-x ** a)'], {}), '(-x ** a)\n', (4802, 4811), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4923, 4945), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(2 ** a - 1)'], {}), '(2 ** a - 1)\n', (4933, 4945), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5051, 5064), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (5061, 5064), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5594, 5613), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-x ** a)'], {}), '(-x ** a)\n', (5604, 5613), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5716, 5738), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(2 ** a - 1)'], {}), '(2 ** a - 1)\n', (5726, 5738), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5843, 5856), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (5853, 5856), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6620, 6647), 'Microsoft.Scripting.Math.Complex64.Divide', 'Complex.Divide', (['(4 + 2.0j)', '(2)'], {}), '(4 + 2.0j, 2)\n', (6634, 6647), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((7729, 7742), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (7739, 7742), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7781, 7795), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1)'], {}), '(-1)\n', (7791, 7795), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7833, 7849), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-129)'], {}), '(-129)\n', (7843, 7849), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7891, 7907), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-256)'], {}), '(-256)\n', (7901, 7907), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7950, 7967), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-4096)'], {}), '(-4096)\n', (7960, 7967), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8010, 8028), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-65536)'], {}), '(-65536)\n', (8020, 8028), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8077, 8097), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1048576)'], {}), '(-1048576)\n', (8087, 8097), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8145, 8167), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-268435456)'], {}), '(-268435456)\n', (8155, 8167), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8221, 8244), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-4294967296)'], {}), '(-4294967296)\n', (8231, 8244), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8305, 8320), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(127)'], {}), '(127)\n', (8315, 8320), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8356, 8371), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(255)'], {}), '(255)\n', (8366, 8371), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8413, 8428), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(513)'], {}), '(513)\n', (8423, 8428), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8472, 8489), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(62193)'], {}), '(62193)\n', (8482, 8489), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8537, 8557), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(50462976)'], {}), '(50462976)\n', (8547, 8557), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8612, 8635), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(17230332160)'], {}), '(17230332160)\n', (8622, 8635), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8695, 8725), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(506097522914230528)'], {}), '(506097522914230528)\n', (8705, 8725), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((8802, 8835), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(148080050112590643456)'], {}), '(148080050112590643456)\n', (8812, 8835), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9679, 9692), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (9689, 9692), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9734, 9747), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1)'], {}), '(1)\n', (9744, 9747), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9789, 9808), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1 << 31)'], {}), '(1 << 31)\n', (9799, 9808), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9850, 9875), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['((1 << 31) + 9)'], {}), '((1 << 31) + 9)\n', (9860, 9875), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9917, 9936), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1 << 32)'], {}), '(1 << 32)\n', (9927, 9936), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1731, 1761), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', (['(400)', '(100)'], {}), '(400, 100)\n', (1751, 1761), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((1808, 1853), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', (['(-12345678987654321)', '(100)'], {}), '(-12345678987654321, 100)\n', (1828, 1853), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3126, 3168), 'Microsoft.Scripting.Math.BigInteger.BitwiseOr', 'BigInteger.BitwiseOr', (['(16711935)', '(2864425659)'], {}), '(16711935, 2864425659)\n', (3146, 3168), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3225, 3278), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-9999999999999999999999999999999999999999)'], {}), '(-9999999999999999999999999999999999999999)\n', (3235, 3278), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3953, 3967), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1)'], {}), '(-1)\n', (3963, 3967), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4152, 4167), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4162, 4167), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4244, 4259), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4254, 4259), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6290, 6306), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(9999)'], {}), '(9999)\n', (6300, 6306), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6360, 6397), 'Microsoft.Scripting.Math.Complex64', 'Complex', (['(9.999999e-196)', '(1.234588e+104)'], {}), '(9.999999e-196, 1.234588e+104)\n', (6367, 6397), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((6506, 6543), 'Microsoft.Scripting.Math.Complex64', 'Complex', (['(9.999999e-196)', '(1.234588e+104)'], {}), '(9.999999e-196, 1.234588e+104)\n', (6513, 6543), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((6557, 6594), 'Microsoft.Scripting.Math.Complex64', 'Complex', (['(9.999999e-196)', '(1.234588e+104)'], {}), '(9.999999e-196, 1.234588e+104)\n', (6564, 6594), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((6960, 6977), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1234)'], {}), '(-1234)\n', (6970, 6977), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7009, 7026), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1234)'], {}), '(-1234)\n', (7019, 7026), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7157, 7170), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (7167, 7170), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7201, 7214), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (7211, 7214), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7333, 7349), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1234)'], {}), '(1234)\n', (7343, 7349), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7380, 7396), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1234)'], {}), '(1234)\n', (7390, 7396), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((2237, 2269), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', (['(400)', '(-100)'], {}), '(400, -100)\n', (2258, 2269), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((2320, 2367), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', (['(-12345678987654321)', '(-100)'], {}), '(-12345678987654321, -100)\n', (2341, 2367), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((2618, 2729), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )'], {}), '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )\n', (2628, 2729), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3359, 3391), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1000000000000000000)'], {}), '(-1000000000000000000)\n', (3369, 3391), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3563, 3577), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-0)'], {}), '(-0)\n', (3573, 3577), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3614, 3639), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1212321.3213)'], {}), '(-1212321.3213)\n', (3624, 3639), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3675, 3712), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1212321384892342394723947)'], {}), '(1212321384892342394723947)\n', (3685, 3712), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3753, 3766), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (3763, 3766), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3807, 3821), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(65)'], {}), '(65)\n', (3817, 3821), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3876, 3893), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(65535)'], {}), '(65535)\n', (3886, 3893), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((3997, 4012), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4007, 4012), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4101, 4124), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100.1213123)'], {}), '(100.1213123)\n', (4111, 4124), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5206, 5224), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(2 ** a)'], {}), '(2 ** a)\n', (5216, 5224), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((5275, 5298), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1 - x ** a)'], {}), '(-1 - x ** a)\n', (5285, 5298), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6001, 6019), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(2 ** a)'], {}), '(2 ** a)\n', (6011, 6019), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6068, 6091), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1 - x ** a)'], {}), '(-1 - x ** a)\n', (6078, 6091), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6184, 6200), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(9999)'], {}), '(9999)\n', (6194, 6200), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((6240, 6260), 'Microsoft.Scripting.Math.Complex64', 'Complex', (['(9999)', '(-1234)'], {}), '(9999, -1234)\n', (6247, 6260), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((6426, 6463), 'Microsoft.Scripting.Math.Complex64', 'Complex', (['(9.999999e-196)', '(1.234588e+104)'], {}), '(9.999999e-196, 1.234588e+104)\n', (6433, 6463), True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((7049, 7066), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1234)'], {}), '(-1234)\n', (7059, 7066), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7100, 7117), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-1234)'], {}), '(-1234)\n', (7110, 7117), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7236, 7249), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (7246, 7249), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7284, 7297), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(0)'], {}), '(0)\n', (7294, 7297), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7419, 7435), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1234)'], {}), '(1234)\n', (7429, 7435), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((7470, 7486), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1234)'], {}), '(1234)\n', (7480, 7486), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((9621, 9646), 'Microsoft.Scripting.Math.BigInteger.Negate', 'BigInteger.Negate', (['bigint'], {}), '(bigint)\n', (9638, 9646), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((10203, 10216), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1)'], {}), '(1)\n', (10213, 10216), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((10354, 10367), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1)'], {}), '(1)\n', (10364, 10367), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((2954, 3065), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )'], {}), '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )\n', (2964, 3065), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((10271, 10284), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(1)'], {}), '(1)\n', (10281, 10284), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((2408, 2498), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(-123456781234567812345678123456781234567812345678123456781234567812345678)'], {}), '(-\n 123456781234567812345678123456781234567812345678123456781234567812345678)\n', (2418, 2498), False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((4058, 4073), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', (['(100)'], {}), '(100)\n', (4068, 4073), False, 'from Microsoft.Scripting.Math import BigInteger\n')] |
import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
| [
"torch.squeeze",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.clamp",
"torch.transpose",
"torch.cat",
"torch.arange",
"torch.matmul",
"torch.sum",
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.nn.Embedding"
]
| [((1473, 1509), 'torch.nn.Embedding', 'nn.Embedding', (['max_sentence_length', '(1)'], {}), '(max_sentence_length, 1)\n', (1485, 1509), True, 'import torch.nn as nn\n'), ((1545, 1593), 'torch.nn.Linear', 'nn.Linear', (['embedding_size', 'num_units'], {'bias': '(False)'}), '(embedding_size, num_units, bias=False)\n', (1554, 1593), True, 'import torch.nn as nn\n'), ((3630, 3679), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['reshaped_attentions'], {'dim': '(1)'}), '(reshaped_attentions, dim=1)\n', (3651, 3679), True, 'import torch.nn as nn\n'), ((5399, 5427), 'torch.transpose', 'torch.transpose', (['repr2', '(1)', '(2)'], {}), '(repr2, 1, 2)\n', (5414, 5427), False, 'import torch\n'), ((5451, 5477), 'torch.matmul', 'torch.matmul', (['repr1', 'repr2'], {}), '(repr1, repr2)\n', (5463, 5477), False, 'import torch\n'), ((5716, 5746), 'torch.matmul', 'torch.matmul', (['att_sent1', 'sent2'], {}), '(att_sent1, sent2)\n', (5728, 5746), False, 'import torch\n'), ((6088, 6118), 'torch.matmul', 'torch.matmul', (['att_sent2', 'sent1'], {}), '(att_sent2, sent1)\n', (6100, 6118), False, 'import torch\n'), ((6613, 6653), 'torch.cat', 'torch.cat', (['[sentence, soft_alignment]', '(2)'], {}), '([sentence, soft_alignment], 2)\n', (6622, 6653), False, 'import torch\n'), ((7155, 7171), 'torch.sum', 'torch.sum', (['v1', '(1)'], {}), '(v1, 1)\n', (7164, 7171), False, 'import torch\n'), ((7188, 7204), 'torch.sum', 'torch.sum', (['v2', '(1)'], {}), '(v2, 1)\n', (7197, 7204), False, 'import torch\n'), ((1781, 1802), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (1791, 1802), True, 'import torch.nn as nn\n'), ((1804, 1835), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_units'], {}), '(num_units, num_units)\n', (1813, 1835), True, 'import torch.nn as nn\n'), ((1837, 1846), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1844, 1846), True, 'import torch.nn as nn\n'), ((1897, 1918), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (1907, 1918), True, 'import torch.nn as nn\n'), ((1920, 1951), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_units'], {}), '(num_units, num_units)\n', (1929, 1951), True, 'import torch.nn as nn\n'), ((1953, 1962), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1960, 1962), True, 'import torch.nn as nn\n'), ((2015, 2036), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2025, 2036), True, 'import torch.nn as nn\n'), ((2038, 2073), 'torch.nn.Linear', 'nn.Linear', (['(num_units * 2)', 'num_units'], {}), '(num_units * 2, num_units)\n', (2047, 2073), True, 'import torch.nn as nn\n'), ((2073, 2082), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2080, 2082), True, 'import torch.nn as nn\n'), ((2134, 2155), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2144, 2155), True, 'import torch.nn as nn\n'), ((2157, 2188), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_units'], {}), '(num_units, num_units)\n', (2166, 2188), True, 'import torch.nn as nn\n'), ((2190, 2199), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2197, 2199), True, 'import torch.nn as nn\n'), ((2254, 2275), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2264, 2275), True, 'import torch.nn as nn\n'), ((2277, 2312), 'torch.nn.Linear', 'nn.Linear', (['(num_units * 2)', 'num_units'], {}), '(num_units * 2, num_units)\n', (2286, 2312), True, 'import torch.nn as nn\n'), ((2312, 2321), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2319, 2321), True, 'import torch.nn as nn\n'), ((2375, 2396), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2385, 2396), True, 'import torch.nn as nn\n'), ((2398, 2429), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_units'], {}), '(num_units, num_units)\n', (2407, 2429), True, 'import torch.nn as nn\n'), ((2431, 2440), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2438, 2440), True, 'import torch.nn as nn\n'), ((2494, 2527), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_classes'], {}), '(num_units, num_classes)\n', (2503, 2527), True, 'import torch.nn as nn\n'), ((2529, 2544), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {}), '()\n', (2542, 2544), True, 'import torch.nn as nn\n'), ((4026, 4056), 'torch.transpose', 'torch.transpose', (['f_intra', '(1)', '(2)'], {}), '(f_intra, 1, 2)\n', (4041, 4056), False, 'import torch\n'), ((4086, 4118), 'torch.matmul', 'torch.matmul', (['f_intra', 'f_intra_t'], {}), '(f_intra, f_intra_t)\n', (4098, 4118), False, 'import torch\n'), ((4179, 4206), 'torch.arange', 'torch.arange', (['(0)', 'time_steps'], {}), '(0, time_steps)\n', (4191, 4206), False, 'import torch\n'), ((4347, 4398), 'torch.clamp', 'torch.clamp', (['raw_index', '(0)', '(self.distance_biases - 1)'], {}), '(raw_index, 0, self.distance_biases - 1)\n', (4358, 4398), False, 'import torch\n'), ((4469, 4494), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4492, 4494), False, 'import torch\n'), ((4631, 4650), 'torch.squeeze', 'torch.squeeze', (['bias'], {}), '(bias)\n', (4644, 4650), False, 'import torch\n'), ((4775, 4811), 'torch.matmul', 'torch.matmul', (['attentions', 'embed_sent'], {}), '(attentions, embed_sent)\n', (4787, 4811), False, 'import torch\n'), ((4833, 4869), 'torch.cat', 'torch.cat', (['[embed_sent, attended]', '(2)'], {}), '([embed_sent, attended], 2)\n', (4842, 4869), False, 'import torch\n'), ((7246, 7276), 'torch.cat', 'torch.cat', (['[v1_sum, v2_sum]', '(1)'], {}), '([v1_sum, v2_sum], 1)\n', (7255, 7276), False, 'import torch\n'), ((5788, 5825), 'torch.transpose', 'torch.transpose', (['raw_attentions', '(1)', '(2)'], {}), '(raw_attentions, 1, 2)\n', (5803, 5825), False, 'import torch\n')] |
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.stack",
"math.sqrt",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"math.exp",
"torch.cat"
]
| [((413, 502), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (422, 502), True, 'import torch.nn as nn\n'), ((756, 778), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (770, 778), True, 'import torch.nn as nn\n'), ((799, 808), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (806, 808), True, 'import torch.nn as nn\n'), ((873, 895), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (887, 895), True, 'import torch.nn as nn\n'), ((1495, 1549), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, bias=False)\n', (1504, 1549), True, 'import torch.nn as nn\n'), ((1569, 1591), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1583, 1591), True, 'import torch.nn as nn\n'), ((1613, 1691), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n', (1622, 1691), True, 'import torch.nn as nn\n'), ((1742, 1764), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1756, 1764), True, 'import torch.nn as nn\n'), ((1786, 1842), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * 4)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, planes * 4, kernel_size=1, bias=False)\n', (1795, 1842), True, 'import torch.nn as nn\n'), ((1862, 1888), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * 4)'], {}), '(planes * 4)\n', (1876, 1888), True, 'import torch.nn as nn\n'), ((1909, 1918), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1916, 1918), True, 'import torch.nn as nn\n'), ((2708, 2772), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (2717, 2772), True, 'import torch.nn as nn\n'), ((2823, 2841), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2837, 2841), True, 'import torch.nn as nn\n'), ((2862, 2871), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2869, 2871), True, 'import torch.nn as nn\n'), ((2895, 2943), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (2907, 2943), True, 'import torch.nn as nn\n'), ((3244, 3267), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (3264, 3267), True, 'import torch.nn as nn\n'), ((3291, 3306), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3301, 3306), True, 'import torch.nn as nn\n'), ((3331, 3346), 'torch.nn.Dropout', 'nn.Dropout', (['(0.6)'], {}), '(0.6)\n', (3341, 3346), True, 'import torch.nn as nn\n'), ((3578, 3595), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(7)'], {}), '(512, 7)\n', (3587, 3595), True, 'import torch.nn as nn\n'), ((3620, 3638), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(7)'], {}), '(1024, 7)\n', (3629, 3638), True, 'import torch.nn as nn\n'), ((4654, 4676), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (4667, 4676), True, 'import torch.nn as nn\n'), ((198, 210), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (206, 210), False, 'import math\n'), ((3382, 3399), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (3391, 3399), True, 'import torch.nn as nn\n'), ((3436, 3448), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3446, 3448), True, 'import torch.nn as nn\n'), ((3485, 3503), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1)'], {}), '(1024, 1)\n', (3494, 3503), True, 'import torch.nn as nn\n'), ((3539, 3551), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3549, 3551), True, 'import torch.nn as nn\n'), ((5676, 5698), 'torch.stack', 'torch.stack', (['vs'], {'dim': '(2)'}), '(vs, dim=2)\n', (5687, 5698), False, 'import torch\n'), ((5726, 5752), 'torch.stack', 'torch.stack', (['alphas'], {'dim': '(2)'}), '(alphas, dim=2)\n', (5737, 5752), False, 'import torch\n'), ((4213, 4310), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=\n stride, bias=False)\n', (4222, 4310), True, 'import torch.nn as nn\n'), ((4349, 4389), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (4363, 4389), True, 'import torch.nn as nn\n'), ((6250, 6272), 'torch.stack', 'torch.stack', (['vs'], {'dim': '(2)'}), '(vs, dim=2)\n', (6261, 6272), False, 'import torch\n'), ((6303, 6328), 'torch.stack', 'torch.stack', (['betas'], {'dim': '(2)'}), '(betas, dim=2)\n', (6314, 6328), False, 'import torch\n'), ((7617, 7649), 'torch.cat', 'torch.cat', (['[vectors, vms]'], {'dim': '(1)'}), '([vectors, vms], dim=1)\n', (7626, 7649), False, 'import torch\n'), ((3859, 3877), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (3868, 3877), False, 'import math\n'), ((6119, 6149), 'torch.cat', 'torch.cat', (['[vs[i], vm1]'], {'dim': '(1)'}), '([vs[i], vm1], dim=1)\n', (6128, 6149), False, 'import torch\n')] |
"""Serves the single page app web ui."""
import json
import tornado.gen
from ndscheduler import settings
from ndscheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
"""Index page request handler."""
@tornado.gen.coroutine
def get(self):
"""Serve up the single page app for scheduler dashboard."""
meta_info = utils.get_all_available_jobs()
self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
| [
"ndscheduler.utils.get_all_available_jobs",
"json.dumps"
]
| [((391, 421), 'ndscheduler.utils.get_all_available_jobs', 'utils.get_all_available_jobs', ([], {}), '()\n', (419, 421), False, 'from ndscheduler import utils\n'), ((482, 503), 'json.dumps', 'json.dumps', (['meta_info'], {}), '(meta_info)\n', (492, 503), False, 'import json\n')] |
import spacy
from spacy.scorer import PRFScore
import typer
from pathlib import Path
from wasabi import Printer, table
import operator
import benepar
import clausecat_component
import clausecat_model
import clausecat_reader
import clause_segmentation
import clause_aggregation
msg = Printer()
def main(model_path: Path, eval_path: Path):
"""This script is used to evaluate the clausecat component"""
nlp = spacy.load(model_path)
reader = clausecat_reader.ClausecatCorpus(eval_path)
examples = reader(nlp)
clausecat = nlp.get_pipe("clausecat")
scorer = {
"POSITIVE": PRFScore(),
"NEGATIVE": PRFScore(),
"NEUTRAL": PRFScore(),
"ANAMNESIS": PRFScore(),
}
for i, example in enumerate(examples):
prediction = example.predicted
reference = example.reference
# Prediction
prediction = clausecat(prediction)
# Iterate through prediction and references
for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses):
prediction_cats = pred_clause["cats"]
reference_cats = ref_clause["cats"]
prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[
0
]
# Add to matrix
for label in prediction_cats:
if label != prediction_class:
prediction = 0
else:
prediction = 1
if prediction == 0 and reference_cats[label] != 0:
scorer[label].fn += 1
elif prediction == 1 and reference_cats[label] != 1:
scorer[label].fp += 1
elif prediction == 1 and reference_cats[label] == 1:
scorer[label].tp += 1
# Printing
textcat_data = []
avg_fscore = 0
avg_recall = 0
avg_precision = 0
for label in scorer:
textcat_data.append(
(
label,
round(scorer[label].fscore, 2),
round(scorer[label].recall, 2),
round(scorer[label].precision, 2),
)
)
avg_fscore += scorer[label].fscore
avg_recall += scorer[label].recall
avg_precision += scorer[label].precision
textcat_data.append(
(
"AVERAGE",
round(avg_fscore / len(scorer), 2),
round(avg_recall / len(scorer), 2),
round(avg_precision / len(scorer), 2),
)
)
header = ("Label", "F-Score", "Recall", "Precision")
print(table(textcat_data, header=header, divider=True))
if __name__ == "__main__":
typer.run(main)
| [
"wasabi.Printer",
"spacy.load",
"clausecat_reader.ClausecatCorpus",
"wasabi.table",
"operator.itemgetter",
"typer.run",
"spacy.scorer.PRFScore"
]
| [((285, 294), 'wasabi.Printer', 'Printer', ([], {}), '()\n', (292, 294), False, 'from wasabi import Printer, table\n'), ((419, 441), 'spacy.load', 'spacy.load', (['model_path'], {}), '(model_path)\n', (429, 441), False, 'import spacy\n'), ((455, 498), 'clausecat_reader.ClausecatCorpus', 'clausecat_reader.ClausecatCorpus', (['eval_path'], {}), '(eval_path)\n', (487, 498), False, 'import clausecat_reader\n'), ((2677, 2692), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (2686, 2692), False, 'import typer\n'), ((605, 615), 'spacy.scorer.PRFScore', 'PRFScore', ([], {}), '()\n', (613, 615), False, 'from spacy.scorer import PRFScore\n'), ((637, 647), 'spacy.scorer.PRFScore', 'PRFScore', ([], {}), '()\n', (645, 647), False, 'from spacy.scorer import PRFScore\n'), ((668, 678), 'spacy.scorer.PRFScore', 'PRFScore', ([], {}), '()\n', (676, 678), False, 'from spacy.scorer import PRFScore\n'), ((701, 711), 'spacy.scorer.PRFScore', 'PRFScore', ([], {}), '()\n', (709, 711), False, 'from spacy.scorer import PRFScore\n'), ((2594, 2642), 'wasabi.table', 'table', (['textcat_data'], {'header': 'header', 'divider': '(True)'}), '(textcat_data, header=header, divider=True)\n', (2599, 2642), False, 'from wasabi import Printer, table\n'), ((1207, 1229), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1226, 1229), False, 'import operator\n')] |
import pathlib
print(pathlib.Path(__file__).parent.resolve())
while True:
next_cmd = input("> ")
print(eval(next_cmd))
| [
"pathlib.Path"
]
| [((22, 44), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (34, 44), False, 'import pathlib\n')] |
import logging
import os
import aiohttp.web
from connexion import AioHttpApp
from nlp_annotator_api.config.config import conf
from nlp_annotator_api.config.logging import setup_logging
from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware
from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory
setup_logging()
access_log = logging.getLogger("nlp_annotator_api.access")
_file_dir = os.path.dirname(__file__)
app = AioHttpApp(
__name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"),
server_args=dict(
client_max_size=8 * 1024**2
)
)
app.add_api("openapi.yaml", pass_context_arg_name="request")
aiohttp_app: aiohttp.web.Application = app.app
aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd))
aiohttp_app.middlewares.append(StatsdMiddleware())
if __name__ == "__main__":
app.run(access_log=access_log)
| [
"logging.getLogger",
"nlp_annotator_api.config.logging.setup_logging",
"nlp_annotator_api.server.signals.statsd_client.statsd_client_factory",
"os.path.join",
"os.path.dirname",
"nlp_annotator_api.server.middleware.statsd_middleware.StatsdMiddleware"
]
| [((352, 367), 'nlp_annotator_api.config.logging.setup_logging', 'setup_logging', ([], {}), '()\n', (365, 367), False, 'from nlp_annotator_api.config.logging import setup_logging\n'), ((382, 427), 'logging.getLogger', 'logging.getLogger', (['"""nlp_annotator_api.access"""'], {}), "('nlp_annotator_api.access')\n", (399, 427), False, 'import logging\n'), ((441, 466), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (456, 466), False, 'import os\n'), ((783, 817), 'nlp_annotator_api.server.signals.statsd_client.statsd_client_factory', 'statsd_client_factory', (['conf.statsd'], {}), '(conf.statsd)\n', (804, 817), False, 'from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory\n'), ((851, 869), 'nlp_annotator_api.server.middleware.statsd_middleware.StatsdMiddleware', 'StatsdMiddleware', ([], {}), '()\n', (867, 869), False, 'from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware\n'), ((518, 571), 'os.path.join', 'os.path.join', (['_file_dir', '""".."""', '"""resources"""', '"""schemas"""'], {}), "(_file_dir, '..', 'resources', 'schemas')\n", (530, 571), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.