code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Compile Nim libraries as Python Extension Modules.
If you want your namespace to coexist with your pthon code, name this ponim.nim
and then your import will look like `from ponim.nim import adder` and
`from ponim import subtractor`. There must be a way to smooth that out in the
__init__.py file somehow.
Note that the file must be in the included source code dir. Currently it is
easiest to just put this in with your python code.
"""
from os import listdir, mkdir
from os.path import join, expanduser
from setuptools import Extension
from shutil import copyfile, rmtree
from typing import Sequence, Dict, List
import subprocess
import sys
import pathlib
# class NimLib(TypedDict):
# """Wrapper around a lib name and path for nim cdoe"""
# name: str
# path: str
def nythonize(nimbase: str, modules: Sequence[Dict[str, str]]) -> List[Extension]:
"""Compile a Nim library as a Python Extension Module.
`nimbase` is the path to `nimbase.h` on your system, which is needed for
Python to compile gene Nim generated C code.
This builds a set of Extenstions, which are then passed back to setuptools.
"""
extensions = []
# Create a top level working dir
rmtree(join("build", "nim_build"), ignore_errors=True)
pathlib.Path(join("build", "nim_build")).mkdir(parents=True)
for module in modules:
module_dir = join("build", "nim_build", f"{module['name']}_build")
rmtree(module_dir, ignore_errors=True)
mkdir(module_dir)
subprocess.run(
[
"nim",
"compileToC",
"--compileOnly",
"-d:release",
"-d:ssl",
"--app:lib",
"--opt:speed",
"--gc:markAndSweep",
f"--nimcache:{module_dir}",
module["path"],
],
check=True,
stderr=sys.stdout.buffer,
)
copyfile(
nimbase, join(module_dir, "nimbase.h"),
)
sources = []
for c_source_file in listdir(module_dir):
if c_source_file.endswith(".c"):
sources.append(join(module_dir, c_source_file))
extensions.append(
Extension(
name=module["name"],
sources=sources,
extra_compile_args=[
"-flto",
"-ffast-math",
"-march=native",
"-mtune=native",
"-O3",
"-fno-ident",
"-fsingle-precision-constant",
],
extra_link_args=["-s"],
include_dirs=[module_dir],
)
)
return extensions
| [
"os.listdir",
"subprocess.run",
"os.path.join",
"setuptools.Extension",
"os.mkdir",
"shutil.rmtree"
]
| [((1214, 1240), 'os.path.join', 'join', (['"""build"""', '"""nim_build"""'], {}), "('build', 'nim_build')\n", (1218, 1240), False, 'from os.path import join, expanduser\n'), ((1375, 1428), 'os.path.join', 'join', (['"""build"""', '"""nim_build"""', 'f"""{module[\'name\']}_build"""'], {}), '(\'build\', \'nim_build\', f"{module[\'name\']}_build")\n', (1379, 1428), False, 'from os.path import join, expanduser\n'), ((1437, 1475), 'shutil.rmtree', 'rmtree', (['module_dir'], {'ignore_errors': '(True)'}), '(module_dir, ignore_errors=True)\n', (1443, 1475), False, 'from shutil import copyfile, rmtree\n'), ((1484, 1501), 'os.mkdir', 'mkdir', (['module_dir'], {}), '(module_dir)\n', (1489, 1501), False, 'from os import listdir, mkdir\n'), ((1510, 1732), 'subprocess.run', 'subprocess.run', (["['nim', 'compileToC', '--compileOnly', '-d:release', '-d:ssl', '--app:lib',\n '--opt:speed', '--gc:markAndSweep', f'--nimcache:{module_dir}', module[\n 'path']]"], {'check': '(True)', 'stderr': 'sys.stdout.buffer'}), "(['nim', 'compileToC', '--compileOnly', '-d:release',\n '-d:ssl', '--app:lib', '--opt:speed', '--gc:markAndSweep',\n f'--nimcache:{module_dir}', module['path']], check=True, stderr=sys.\n stdout.buffer)\n", (1524, 1732), False, 'import subprocess\n'), ((2072, 2091), 'os.listdir', 'listdir', (['module_dir'], {}), '(module_dir)\n', (2079, 2091), False, 'from os import listdir, mkdir\n'), ((1981, 2010), 'os.path.join', 'join', (['module_dir', '"""nimbase.h"""'], {}), "(module_dir, 'nimbase.h')\n", (1985, 2010), False, 'from os.path import join, expanduser\n'), ((2241, 2483), 'setuptools.Extension', 'Extension', ([], {'name': "module['name']", 'sources': 'sources', 'extra_compile_args': "['-flto', '-ffast-math', '-march=native', '-mtune=native', '-O3',\n '-fno-ident', '-fsingle-precision-constant']", 'extra_link_args': "['-s']", 'include_dirs': '[module_dir]'}), "(name=module['name'], sources=sources, extra_compile_args=['-flto',\n '-ffast-math', '-march=native', '-mtune=native', '-O3', '-fno-ident',\n '-fsingle-precision-constant'], extra_link_args=['-s'], include_dirs=[\n module_dir])\n", (2250, 2483), False, 'from setuptools import Extension\n'), ((1279, 1305), 'os.path.join', 'join', (['"""build"""', '"""nim_build"""'], {}), "('build', 'nim_build')\n", (1283, 1305), False, 'from os.path import join, expanduser\n'), ((2169, 2200), 'os.path.join', 'join', (['module_dir', 'c_source_file'], {}), '(module_dir, c_source_file)\n', (2173, 2200), False, 'from os.path import join, expanduser\n')] |
from __future__ import division
from collections import defaultdict
import numpy as np
from time import time
import random
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# import tensorflow as tf
class DataModule():
def __init__(self, conf, filename):
self.conf = conf
self.data_dict = {}
self.terminal_flag = 1
self.filename = filename
self.index = 0
####### Initalize Procedures #######
def prepareModelSupplement(self, model):
data_dict = {}
if 'CONSUMED_ITEMS_SPARSE_MATRIX' in model.supply_set:
self.generateConsumedItemsSparseMatrix()
#self.arrangePositiveData()
data_dict['CONSUMED_ITEMS_INDICES_INPUT'] = self.consumed_items_indices_list
data_dict['CONSUMED_ITEMS_VALUES_INPUT'] = self.consumed_items_values_list
data_dict['CONSUMED_ITEMS_VALUES_WEIGHT_AVG_INPUT'] = self.consumed_items_values_weight_avg_list
data_dict['CONSUMED_ITEMS_NUM_INPUT'] = self.consumed_item_num_list
data_dict['CONSUMED_ITEMS_NUM_DICT_INPUT'] = self.user_item_num_dict
data_dict['USER_ITEM_SPARSITY_DICT'] = self.user_item_sparsity_dict
if 'SOCIAL_NEIGHBORS_SPARSE_MATRIX' in model.supply_set:
self.readSocialNeighbors()
self.generateSocialNeighborsSparseMatrix()
data_dict['SOCIAL_NEIGHBORS_INDICES_INPUT'] = self.social_neighbors_indices_list
data_dict['SOCIAL_NEIGHBORS_VALUES_INPUT'] = self.social_neighbors_values_list
data_dict['SOCIAL_NEIGHBORS_VALUES_WEIGHT_AVG_INPUT'] = self.social_neighbors_values_weight_avg_list
data_dict['SOCIAL_NEIGHBORS_NUM_INPUT'] = self.social_neighbor_num_list
data_dict['SOCIAL_NEIGHBORS_NUM_DICT_INPUT'] = self.social_neighbors_num_dict
data_dict['USER_USER_SPARSITY_DICT']= self.user_user_sparsity_dict
if 'ITEM_CUSTOMER_SPARSE_MATRIX' in model.supply_set:
self.generateConsumedItemsSparseMatrixForItemUser()
data_dict['ITEM_CUSTOMER_INDICES_INPUT'] = self.item_customer_indices_list
data_dict['ITEM_CUSTOMER_VALUES_INPUT'] = self.item_customer_values_list
data_dict['ITEM_CUSTOMER_VALUES_WEIGHT_AVG_INPUT'] = self.item_customer_values_weight_avg_list
data_dict['ITEM_CUSTOMER_NUM_INPUT'] = self.item_customer_num_list
data_dict['ITEM_USER_NUM_DICT_INPUT'] = self.item_user_num_dict
return data_dict
def initializeRankingTrain(self):
self.readData()
self.arrangePositiveData()
self.arrangePositiveDataForItemUser()
self.generateTrainNegative()
def initializeRankingVT(self):
self.readData()
self.arrangePositiveData()
self.arrangePositiveDataForItemUser()
self.generateTrainNegative()
def initalizeRankingEva(self):
self.readData()
self.getEvaPositiveBatch()
self.generateEvaNegative()
def linkedMap(self):
self.data_dict['USER_LIST'] = self.user_list
self.data_dict['ITEM_LIST'] = self.item_list
self.data_dict['LABEL_LIST'] = self.labels_list
def linkedRankingEvaMap(self):
self.data_dict['EVA_USER_LIST'] = self.eva_user_list
self.data_dict['EVA_ITEM_LIST'] = self.eva_item_list
####### Data Loading #######
def readData(self):
f = open(self.filename)
total_user_list = set()
hash_data = defaultdict(int)
for _, line in enumerate(f):
arr = line.split("\t")
hash_data[(int(arr[0]), int(arr[1]))] = 1
total_user_list.add(int(arr[0]))
self.total_user_list = list(total_user_list)
self.hash_data = hash_data
def arrangePositiveData(self):
positive_data = defaultdict(set)
user_item_num_dict = defaultdict(set)
total_data = set()
hash_data = self.hash_data
for (u, i) in hash_data:
total_data.add((u, i))
positive_data[u].add(i)
user_list = sorted(list(positive_data.keys()))
for u in range(self.conf.num_users):
user_item_num_dict[u] = len(positive_data[u])+1
self.positive_data = positive_data
self.user_item_num_dict = user_item_num_dict
self.user_item_num_for_sparsity_dict = user_item_num_for_sparsity_dict
self.total_data = len(total_data)
def Sparsity_analysis_for_user_item_network(self):
hash_data_for_user_item = self.hash_data
sparisty_user_item_dict = {}
def arrangePositiveDataForItemUser(self):
positive_data_for_item_user = defaultdict(set)
item_user_num_dict = defaultdict(set)
total_data_for_item_user = set()
hash_data_for_item_user = self.hash_data
for (u, i) in hash_data_for_item_user:
total_data_for_item_user.add((i, u))
positive_data_for_item_user[i].add(u)
item_list = sorted(list(positive_data_for_item_user.keys()))
for i in range(self.conf.num_items):
item_user_num_dict[i] = len(positive_data_for_item_user[i])+1
self.item_user_num_dict = item_user_num_dict
self.positive_data_for_item_user = positive_data_for_item_user
self.total_data_for_item_user = len(total_data_for_item_user)
# ----------------------
# This function designes for generating train/val/test negative
def generateTrainNegative(self):
num_items = self.conf.num_items
num_negatives = self.conf.num_negatives
negative_data = defaultdict(set)
total_data = set()
hash_data = self.hash_data
for (u, i) in hash_data:
total_data.add((u, i))
for _ in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in hash_data:
j = np.random.randint(num_items)
negative_data[u].add(j)
total_data.add((u, j))
self.negative_data = negative_data
self.terminal_flag = 1
# ----------------------
# This function designes for val/test set, compute loss
def getVTRankingOneBatch(self):
positive_data = self.positive_data
negative_data = self.negative_data
total_user_list = self.total_user_list
user_list = []
item_list = []
labels_list = []
for u in total_user_list:
user_list.extend([u] * len(positive_data[u]))
item_list.extend(positive_data[u])
labels_list.extend([1] * len(positive_data[u]))
user_list.extend([u] * len(negative_data[u]))
item_list.extend(negative_data[u])
labels_list.extend([0] * len(negative_data[u]))
self.user_list = np.reshape(user_list, [-1, 1])
self.item_list = np.reshape(item_list, [-1, 1])
self.labels_list = np.reshape(labels_list, [-1, 1])
# ----------------------
# This function designes for the training process
def getTrainRankingBatch(self):
positive_data = self.positive_data
negative_data = self.negative_data
total_user_list = self.total_user_list
index = self.index
batch_size = self.conf.training_batch_size
user_list, item_list, labels_list = [], [], []
if index + batch_size < len(total_user_list):
target_user_list = total_user_list[index:index+batch_size]
self.index = index + batch_size
else:
target_user_list = total_user_list[index:len(total_user_list)]
self.index = 0
self.terminal_flag = 0
for u in target_user_list:
user_list.extend([u] * len(positive_data[u]))
item_list.extend(list(positive_data[u]))
labels_list.extend([1] * len(positive_data[u]))
user_list.extend([u] * len(negative_data[u]))
item_list.extend(list(negative_data[u]))
labels_list.extend([0] * len(negative_data[u]))
self.user_list = np.reshape(user_list, [-1, 1])
self.item_list = np.reshape(item_list, [-1, 1])
self.labels_list = np.reshape(labels_list, [-1, 1])
# ----------------------
# This function is designed for the positive data
def getEvaPositiveBatch(self):
hash_data = self.hash_data
user_list = []
item_list = []
index_dict = defaultdict(list)
index = 0
for (u, i) in hash_data:
user_list.append(u)
item_list.append(i)
index_dict[u].append(index)
index = index + 1
self.eva_user_list = np.reshape(user_list, [-1, 1])
self.eva_item_list = np.reshape(item_list, [-1, 1])
self.eva_index_dict = index_dict
# ----------------------
#This function is designed for generating negative data
def generateEvaNegative(self):
hash_data = self.hash_data
total_user_list = self.total_user_list
num_evaluate = self.conf.num_evaluate
num_items = self.conf.num_items
eva_negative_data = defaultdict(list)
for u in total_user_list:
for _ in range(num_evaluate):
j = np.random.randint(num_items)
while (u, j) in hash_data:
j = np.random.randint(num_items)
eva_negative_data[u].append(j)
self.eva_negative_data = eva_negative_data
# ----------------------
#This function designs for generating negative batch in rating evaluation,
def getEvaRankingBatch(self):
batch_size = self.conf.evaluate_batch_size
num_evaluate = self.conf.num_evaluate
eva_negative_data = self.eva_negative_data
total_user_list = self.total_user_list
index = self.index
terminal_flag = 1
total_users = len(total_user_list)
user_list = []
item_list = []
if index + batch_size < total_users:
batch_user_list = total_user_list[index:index+batch_size]
self.index = index + batch_size
else:
terminal_flag = 0
batch_user_list = total_user_list[index:total_users]
self.index = 0
for u in batch_user_list:
user_list.extend([u]*num_evaluate)
item_list.extend(eva_negative_data[u])
self.eva_user_list = np.reshape(user_list, [-1, 1])
self.eva_item_list = np.reshape(item_list, [-1, 1])
return batch_user_list, terminal_flag
# ----------------------
# Read social network information
def readSocialNeighbors(self, friends_flag=1):
social_neighbors = defaultdict(set)
social_neighbors_num_dict = defaultdict(set)
links_file = open(self.conf.links_filename)
for _, line in enumerate(links_file):
tmp = line.split('\t')
u1, u2 = int(tmp[0]), int(tmp[1])
social_neighbors[u1].add(u2)
if friends_flag == 1:
social_neighbors[u2].add(u1)
user_list = sorted(list(social_neighbors.keys()))
for u in range(self.conf.num_users):
social_neighbors_num_dict[u] = len(social_neighbors[u])+1
self.social_neighbors_num_dict = social_neighbors_num_dict
self.social_neighbors = social_neighbors
def arrangePositiveData(self):
positive_data = defaultdict(set)
user_item_num_dict = defaultdict(set)
total_data = set()
hash_data = self.hash_data
for (u, i) in hash_data:
total_data.add((u, i))
positive_data[u].add(i)
user_list = sorted(list(positive_data.keys()))
for u in range(self.conf.num_users):
user_item_num_dict[u] = len(positive_data[u])+1
self.positive_data = positive_data
self.user_item_num_dict = user_item_num_dict
self.total_data = len(total_data)
# ----------------------
#Generate Social Neighbors Sparse Matrix Indices and Values
def generateSocialNeighborsSparseMatrix(self):
social_neighbors = self.social_neighbors
social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg
social_neighbors_indices_list = []
social_neighbors_values_list = []
social_neighbors_values_weight_avg_list = []
social_neighbor_num_list = []
social_neighbors_dict = defaultdict(list)
user_user_num_for_sparsity_dict = defaultdict(set)
user_user_sparsity_dict = {}
user_user_sparsity_dict['0-4'] = []
user_user_sparsity_dict['4-8'] = []
user_user_sparsity_dict['8-16'] = []
user_user_sparsity_dict['16-32'] = []
user_user_sparsity_dict['32-64'] = []
user_user_sparsity_dict['64-'] = []
for u in range(self.conf.num_users):
user_user_num_for_sparsity_dict[u] = len(social_neighbors[u])
for u in social_neighbors:
social_neighbors_dict[u] = sorted(social_neighbors[u])
user_list = sorted(list(social_neighbors.keys()))
#node att
for user in range(self.conf.num_users):
if user in social_neighbors_dict:
social_neighbor_num_list.append(len(social_neighbors_dict[user]))
else:
social_neighbor_num_list.append(1)
for user in user_list:
for friend in social_neighbors_dict[user]:
social_neighbors_indices_list.append([user, friend])
social_neighbors_values_list.append(1.0/len(social_neighbors_dict[user]))
social_neighbors_values_weight_avg_list.append(1.0/(np.sqrt(social_neighbors_num_dict[user])*np.sqrt(social_neighbors_num_dict[friend]))) #weight avg
for u in range(self.conf.num_users):
cur_user_neighbors_num = user_user_num_for_sparsity_dict[u]
if( (cur_user_neighbors_num >=0) & (cur_user_neighbors_num<4) ):
user_user_sparsity_dict['0-4'].append(u)
elif( (cur_user_neighbors_num >=4) & (cur_user_neighbors_num<8) ):
user_user_sparsity_dict['4-8'].append(u)
elif( (cur_user_neighbors_num >=8) & (cur_user_neighbors_num<16) ):
user_user_sparsity_dict['8-16'].append(u)
elif( (cur_user_neighbors_num >=16) & (cur_user_neighbors_num<32) ):
user_user_sparsity_dict['16-32'].append(u)
elif( (cur_user_neighbors_num >=32) & (cur_user_neighbors_num<64) ):
user_user_sparsity_dict['32-64'].append(u)
elif( cur_user_neighbors_num >=64):
user_user_sparsity_dict['64-'].append(u)
self.user_user_sparsity_dict = user_user_sparsity_dict
self.social_neighbors_indices_list = np.array(social_neighbors_indices_list).astype(np.int64)
self.social_neighbors_values_list = np.array(social_neighbors_values_list).astype(np.float32)
self.social_neighbors_values_weight_avg_list = np.array(social_neighbors_values_weight_avg_list).astype(np.float32) # weight avg
self.social_neighbor_num_list = np.array(social_neighbor_num_list).astype(np.int64)
#self.social_neighbors_values_list = tf.Variable(tf.random_normal([len(self.social_neighbors_indices_list)], stddev=0.01))
# ----------------------
#Generate Consumed Items Sparse Matrix Indices and Values
def generateConsumedItemsSparseMatrix(self):
positive_data = self.positive_data
consumed_items_indices_list = []
consumed_items_values_list = []
consumed_items_values_weight_avg_list = []
consumed_item_num_list = []
consumed_items_dict = defaultdict(list)
user_item_num_for_sparsity_dict = defaultdict(set)
user_item_sparsity_dict = {}
user_item_sparsity_dict['0-4'] = []
user_item_sparsity_dict['4-8'] = []
user_item_sparsity_dict['8-16'] = []
user_item_sparsity_dict['16-32'] = []
user_item_sparsity_dict['32-64'] = []
user_item_sparsity_dict['64-'] = []
consumed_items_num_dict = self.user_item_num_dict #weight avg
#social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg
item_user_num_dict = self.item_user_num_dict #weight avg
for u in positive_data:
consumed_items_dict[u] = sorted(positive_data[u])
user_list = sorted(list(positive_data.keys()))
for u in range(self.conf.num_users):
user_item_num_for_sparsity_dict[u] = len(positive_data[u])
for user in range(self.conf.num_users):
if user in consumed_items_dict:
consumed_item_num_list.append(len(consumed_items_dict[user]))
else:
consumed_item_num_list.append(1)
for u in user_list:
for i in consumed_items_dict[u]:
consumed_items_indices_list.append([u, i])
consumed_items_values_list.append(1.0/len(consumed_items_dict[u]))
consumed_items_values_weight_avg_list.append(1.0/( np.sqrt(consumed_items_num_dict[u]) * np.sqrt(item_user_num_dict[i]) )) #weight avg
for u in range(self.conf.num_users):
cur_user_consumed_item_num = user_item_num_for_sparsity_dict[u]
if( (cur_user_consumed_item_num >=0) & (cur_user_consumed_item_num<4) ):
user_item_sparsity_dict['0-4'].append(u)
elif( (cur_user_consumed_item_num >=4) & (cur_user_consumed_item_num<8) ):
user_item_sparsity_dict['4-8'].append(u)
elif( (cur_user_consumed_item_num >=8) & (cur_user_consumed_item_num<16) ):
user_item_sparsity_dict['8-16'].append(u)
elif( (cur_user_consumed_item_num >=16) & (cur_user_consumed_item_num<32) ):
user_item_sparsity_dict['16-32'].append(u)
elif( (cur_user_consumed_item_num >=32) & (cur_user_consumed_item_num<64) ):
user_item_sparsity_dict['32-64'].append(u)
elif( cur_user_consumed_item_num >=64):
user_item_sparsity_dict['64-'].append(u)
self.user_item_sparsity_dict = user_item_sparsity_dict
self.consumed_items_indices_list = np.array(consumed_items_indices_list).astype(np.int64)
self.consumed_items_values_list = np.array(consumed_items_values_list).astype(np.float32)
self.consumed_items_values_weight_avg_list = np.array(consumed_items_values_weight_avg_list).astype(np.float32) #weight avg
self.consumed_item_num_list = np.array(consumed_item_num_list).astype(np.int64)
def generateConsumedItemsSparseMatrixForItemUser(self):
positive_data_for_item_user = self.positive_data_for_item_user
item_customer_indices_list = []
item_customer_values_list = []
item_customer_values_weight_avg_list = []
item_customer_num_list = []
item_customer_dict = defaultdict(list)
consumed_items_num_dict = self.user_item_num_dict #weight avg
#social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg
item_user_num_dict = self.item_user_num_dict #weight avg
for i in positive_data_for_item_user:
item_customer_dict[i] = sorted(positive_data_for_item_user[i])
item_list = sorted(list(positive_data_for_item_user.keys()))
for item in range(self.conf.num_items):
if item in item_customer_dict:
item_customer_num_list.append(len(item_customer_dict[item]))
else:
item_customer_num_list.append(1)
for i in item_list:
for u in item_customer_dict[i]:
item_customer_indices_list.append([i, u])
item_customer_values_list.append(1.0/len(item_customer_dict[i]))
item_customer_values_weight_avg_list.append(1.0/( np.sqrt(consumed_items_num_dict[u]) * np.sqrt(item_user_num_dict[i]) ))
self.item_customer_indices_list = np.array(item_customer_indices_list).astype(np.int64)
self.item_customer_values_list = np.array(item_customer_values_list).astype(np.float32)
self.item_customer_num_list = np.array(item_customer_num_list).astype(np.int64)
self.item_customer_values_weight_avg_list = np.array(item_customer_values_weight_avg_list).astype(np.float32)
| [
"numpy.reshape",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.sqrt",
"numpy.array",
"numpy.random.randint",
"collections.defaultdict"
]
| [((157, 181), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (179, 181), True, 'import tensorflow.compat.v1 as tf\n'), ((3493, 3509), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3504, 3509), False, 'from collections import defaultdict\n'), ((3831, 3847), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3842, 3847), False, 'from collections import defaultdict\n'), ((3877, 3893), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3888, 3893), False, 'from collections import defaultdict\n'), ((4666, 4682), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4677, 4682), False, 'from collections import defaultdict\n'), ((4712, 4728), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4723, 4728), False, 'from collections import defaultdict\n'), ((5605, 5621), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5616, 5621), False, 'from collections import defaultdict\n'), ((6828, 6858), 'numpy.reshape', 'np.reshape', (['user_list', '[-1, 1]'], {}), '(user_list, [-1, 1])\n', (6838, 6858), True, 'import numpy as np\n'), ((6884, 6914), 'numpy.reshape', 'np.reshape', (['item_list', '[-1, 1]'], {}), '(item_list, [-1, 1])\n', (6894, 6914), True, 'import numpy as np\n'), ((6942, 6974), 'numpy.reshape', 'np.reshape', (['labels_list', '[-1, 1]'], {}), '(labels_list, [-1, 1])\n', (6952, 6974), True, 'import numpy as np\n'), ((8107, 8137), 'numpy.reshape', 'np.reshape', (['user_list', '[-1, 1]'], {}), '(user_list, [-1, 1])\n', (8117, 8137), True, 'import numpy as np\n'), ((8163, 8193), 'numpy.reshape', 'np.reshape', (['item_list', '[-1, 1]'], {}), '(item_list, [-1, 1])\n', (8173, 8193), True, 'import numpy as np\n'), ((8221, 8253), 'numpy.reshape', 'np.reshape', (['labels_list', '[-1, 1]'], {}), '(labels_list, [-1, 1])\n', (8231, 8253), True, 'import numpy as np\n'), ((8479, 8496), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8490, 8496), False, 'from collections import defaultdict\n'), ((8711, 8741), 'numpy.reshape', 'np.reshape', (['user_list', '[-1, 1]'], {}), '(user_list, [-1, 1])\n', (8721, 8741), True, 'import numpy as np\n'), ((8771, 8801), 'numpy.reshape', 'np.reshape', (['item_list', '[-1, 1]'], {}), '(item_list, [-1, 1])\n', (8781, 8801), True, 'import numpy as np\n'), ((9170, 9187), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9181, 9187), False, 'from collections import defaultdict\n'), ((10444, 10474), 'numpy.reshape', 'np.reshape', (['user_list', '[-1, 1]'], {}), '(user_list, [-1, 1])\n', (10454, 10474), True, 'import numpy as np\n'), ((10504, 10534), 'numpy.reshape', 'np.reshape', (['item_list', '[-1, 1]'], {}), '(item_list, [-1, 1])\n', (10514, 10534), True, 'import numpy as np\n'), ((10728, 10744), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (10739, 10744), False, 'from collections import defaultdict\n'), ((10781, 10797), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (10792, 10797), False, 'from collections import defaultdict\n'), ((11448, 11464), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (11459, 11464), False, 'from collections import defaultdict\n'), ((11494, 11510), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (11505, 11510), False, 'from collections import defaultdict\n'), ((12460, 12477), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12471, 12477), False, 'from collections import defaultdict\n'), ((12521, 12537), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (12532, 12537), False, 'from collections import defaultdict\n'), ((15772, 15789), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15783, 15789), False, 'from collections import defaultdict\n'), ((15832, 15848), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (15843, 15848), False, 'from collections import defaultdict\n'), ((19040, 19057), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (19051, 19057), False, 'from collections import defaultdict\n'), ((5815, 5843), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (5832, 5843), True, 'import numpy as np\n'), ((9284, 9312), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (9301, 9312), True, 'import numpy as np\n'), ((14866, 14905), 'numpy.array', 'np.array', (['social_neighbors_indices_list'], {}), '(social_neighbors_indices_list)\n', (14874, 14905), True, 'import numpy as np\n'), ((14967, 15005), 'numpy.array', 'np.array', (['social_neighbors_values_list'], {}), '(social_neighbors_values_list)\n', (14975, 15005), True, 'import numpy as np\n'), ((15080, 15129), 'numpy.array', 'np.array', (['social_neighbors_values_weight_avg_list'], {}), '(social_neighbors_values_weight_avg_list)\n', (15088, 15129), True, 'import numpy as np\n'), ((15204, 15238), 'numpy.array', 'np.array', (['social_neighbor_num_list'], {}), '(social_neighbor_num_list)\n', (15212, 15238), True, 'import numpy as np\n'), ((18335, 18372), 'numpy.array', 'np.array', (['consumed_items_indices_list'], {}), '(consumed_items_indices_list)\n', (18343, 18372), True, 'import numpy as np\n'), ((18432, 18468), 'numpy.array', 'np.array', (['consumed_items_values_list'], {}), '(consumed_items_values_list)\n', (18440, 18468), True, 'import numpy as np\n'), ((18541, 18588), 'numpy.array', 'np.array', (['consumed_items_values_weight_avg_list'], {}), '(consumed_items_values_weight_avg_list)\n', (18549, 18588), True, 'import numpy as np\n'), ((18660, 18692), 'numpy.array', 'np.array', (['consumed_item_num_list'], {}), '(consumed_item_num_list)\n', (18668, 18692), True, 'import numpy as np\n'), ((20114, 20150), 'numpy.array', 'np.array', (['item_customer_indices_list'], {}), '(item_customer_indices_list)\n', (20122, 20150), True, 'import numpy as np\n'), ((20209, 20244), 'numpy.array', 'np.array', (['item_customer_values_list'], {}), '(item_customer_values_list)\n', (20217, 20244), True, 'import numpy as np\n'), ((20302, 20334), 'numpy.array', 'np.array', (['item_customer_num_list'], {}), '(item_customer_num_list)\n', (20310, 20334), True, 'import numpy as np\n'), ((20404, 20450), 'numpy.array', 'np.array', (['item_customer_values_weight_avg_list'], {}), '(item_customer_values_weight_avg_list)\n', (20412, 20450), True, 'import numpy as np\n'), ((5911, 5939), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (5928, 5939), True, 'import numpy as np\n'), ((9380, 9408), 'numpy.random.randint', 'np.random.randint', (['num_items'], {}), '(num_items)\n', (9397, 9408), True, 'import numpy as np\n'), ((13727, 13767), 'numpy.sqrt', 'np.sqrt', (['social_neighbors_num_dict[user]'], {}), '(social_neighbors_num_dict[user])\n', (13734, 13767), True, 'import numpy as np\n'), ((13768, 13810), 'numpy.sqrt', 'np.sqrt', (['social_neighbors_num_dict[friend]'], {}), '(social_neighbors_num_dict[friend])\n', (13775, 13810), True, 'import numpy as np\n'), ((17182, 17217), 'numpy.sqrt', 'np.sqrt', (['consumed_items_num_dict[u]'], {}), '(consumed_items_num_dict[u])\n', (17189, 17217), True, 'import numpy as np\n'), ((17221, 17251), 'numpy.sqrt', 'np.sqrt', (['item_user_num_dict[i]'], {}), '(item_user_num_dict[i])\n', (17228, 17251), True, 'import numpy as np\n'), ((19991, 20026), 'numpy.sqrt', 'np.sqrt', (['consumed_items_num_dict[u]'], {}), '(consumed_items_num_dict[u])\n', (19998, 20026), True, 'import numpy as np\n'), ((20030, 20060), 'numpy.sqrt', 'np.sqrt', (['item_user_num_dict[i]'], {}), '(item_user_num_dict[i])\n', (20037, 20060), True, 'import numpy as np\n')] |
from xmlrpc.server import MultiPathXMLRPCServer
import torch.nn as nn
import torch.nn.functional as F
import copy
from src.layers.layers import Encoder, EncoderLayer, Decoder, DecoderLayer, PositionwiseFeedForward
from src.layers.preprocessing import Embeddings, PositionalEncoding
from src.layers.attention import MultiHeadedAttention
### Generic EncoderDecoder structure:
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
encoded_src = self.encode(src, src_mask)
return self.decode(encoded_src, src_mask, tgt, tgt_mask)
def encode(self, src, src_mask):
embedded_src = self.src_embed(src)
return self.encoder(embedded_src, src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
embedded_tgt = self.tgt_embed(tgt)
return self.decoder(embedded_tgt, memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1, alpha=0.5):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, alpha=alpha)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab)
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
if __name__ == '__main__':
# Small example model
tmp_model = make_model(10, 10, 2)
print(tmp_model)
| [
"src.layers.preprocessing.PositionalEncoding",
"src.layers.layers.PositionwiseFeedForward",
"src.layers.preprocessing.Embeddings",
"torch.nn.init.xavier_uniform",
"torch.nn.Linear",
"src.layers.attention.MultiHeadedAttention"
]
| [((1828, 1873), 'src.layers.attention.MultiHeadedAttention', 'MultiHeadedAttention', (['h', 'd_model'], {'alpha': 'alpha'}), '(h, d_model, alpha=alpha)\n', (1848, 1873), False, 'from src.layers.attention import MultiHeadedAttention\n'), ((1883, 1930), 'src.layers.layers.PositionwiseFeedForward', 'PositionwiseFeedForward', (['d_model', 'd_ff', 'dropout'], {}), '(d_model, d_ff, dropout)\n', (1906, 1930), False, 'from src.layers.layers import Encoder, EncoderLayer, Decoder, DecoderLayer, PositionwiseFeedForward\n'), ((1946, 1982), 'src.layers.preprocessing.PositionalEncoding', 'PositionalEncoding', (['d_model', 'dropout'], {}), '(d_model, dropout)\n', (1964, 1982), False, 'from src.layers.preprocessing import Embeddings, PositionalEncoding\n'), ((1539, 1564), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'vocab'], {}), '(d_model, vocab)\n', (1548, 1564), True, 'import torch.nn as nn\n'), ((2178, 2208), 'src.layers.preprocessing.Embeddings', 'Embeddings', (['d_model', 'src_vocab'], {}), '(d_model, src_vocab)\n', (2188, 2208), False, 'from src.layers.preprocessing import Embeddings, PositionalEncoding\n'), ((2246, 2276), 'src.layers.preprocessing.Embeddings', 'Embeddings', (['d_model', 'tgt_vocab'], {}), '(d_model, tgt_vocab)\n', (2256, 2276), False, 'from src.layers.preprocessing import Embeddings, PositionalEncoding\n'), ((2499, 2524), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['p'], {}), '(p)\n', (2521, 2524), True, 'import torch.nn as nn\n')] |
import os
import timeit
from typing import List
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from scipy.special import gamma
import arch.univariate.recursions_python as recpy
CYTHON_COVERAGE = os.environ.get("ARCH_CYTHON_COVERAGE", "0") in ("true", "1", "True")
try:
import arch.univariate.recursions as rec_cython
missing_extension = False
except ImportError:
missing_extension = True
if missing_extension:
rec = recpy
else:
rec = rec_cython
try:
import numba # noqa
missing_numba = False
except ImportError:
missing_numba = True
pytestmark = pytest.mark.filterwarnings("ignore::arch.compat.numba.PerformanceWarning")
class Timer(object):
def __init__(
self,
first,
first_name,
second,
second_name,
model_name,
setup,
repeat=5,
number=10,
) -> None:
self.first_code = first
self.second_code = second
self.setup = setup
self.first_name = first_name
self.second_name = second_name
self.model_name = model_name
self.repeat = repeat
self.number = number
self._run = False
self.times: List[float] = []
self._codes = [first, second]
self.ratio = np.inf
def display(self):
if not self._run:
self.time()
self.ratio = self.times[0] / self.times[1]
title = self.model_name + " timing"
print("\n" + title)
print("-" * len(title))
print(self.first_name + ": " + "{:0.3f} ms".format(1000 * self.times[0]))
print(self.second_name + ": " + "{:0.3f} ms".format(1000 * self.times[1]))
if self.ratio < 1:
print(
"{0} is {1:0.1f}% faster".format(
self.first_name, 100 * (1 / self.ratio - 1)
)
)
else:
print(
"{0} is {1:0.1f}% faster".format(
self.second_name, 100 * (self.ratio - 1)
)
)
print(
self.first_name
+ "/"
+ self.second_name
+ " Ratio: {:0.3f}\n".format(self.ratio)
)
def time(self):
self.times = []
for code in self._codes:
timer = timeit.Timer(code, setup=self.setup)
self.times.append(min(timer.repeat(self.repeat, self.number)))
class TestRecursions(object):
@classmethod
def setup_class(cls):
cls.nobs = 1000
cls.rng = RandomState(12345)
cls.resids = cls.rng.standard_normal(cls.nobs)
cls.sigma2 = np.zeros_like(cls.resids)
var = cls.resids.var()
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
cls.var_bounds = np.ones((cls.nobs, 2)) * var_bounds
cls.backcast = 1.0
cls.timer_setup = """
import numpy as np
import arch.univariate.recursions as rec
import arch.univariate.recursions_python as recpy
nobs = 10000
resids = np.random.standard_normal(nobs)
sigma2 = np.zeros_like(resids)
var = resids.var()
backcast = 1.0
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
var_bounds = np.ones((nobs, 2)) * var_bounds
"""
def test_garch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([0.1, -0.4, 0.3, 0.2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 3, 2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 0.3, 0.2])
mod_fresids = fresids.copy()
mod_fresids[:1] = np.inf
recpy.garch_recursion_python(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.garch_recursion(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_harch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.harch_recursion_python(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.harch_recursion(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_arch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
p = 3
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.arch_recursion_python(
parameters, mod_resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.arch_recursion(
parameters, mod_resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_garch_power_1(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = np.abs(resids) ** 1.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_direct(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = np.abs(resids) ** 2.0
sresids = np.sign(resids)
for t in range(nobs):
if t == 0:
sigma2[t] = parameters.dot(
np.array([1.0, backcast, 0.5 * backcast, backcast])
)
else:
var = np.array(
[
1.0,
resids[t - 1] ** 2.0,
resids[t - 1] ** 2.0 * (resids[t - 1] < 0),
sigma2[t - 1],
]
)
sigma2[t] = parameters.dot(var)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_q(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
0,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
0,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_p(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
0,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
0,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_o(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
0,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
0,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_arch(self):
backcast = self.backcast
nobs, resids, sigma2 = self.nobs, self.resids, self.sigma2
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
3,
0,
0,
nobs,
backcast,
self.var_bounds,
)
sigma2_garch = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_garch, sigma2)
def test_bounds(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([1e100, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
parameters = np.array([1e100, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
parameters = np.array([1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
def test_egarch(self):
nobs = self.nobs
parameters = np.array([0.0, 0.1, -0.1, 0.95])
resids, sigma2 = self.resids, self.sigma2
p = o = q = 1
backcast = 0.0
var_bounds = self.var_bounds
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
recpy.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_numba = sigma2.copy()
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_python = sigma2.copy()
rec.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
norm_const = np.sqrt(2 / np.pi)
for t in range(nobs):
lnsigma2[t] = parameters[0]
if t == 0:
lnsigma2[t] += parameters[3] * backcast
else:
stdresid = resids[t - 1] / np.sqrt(sigma2[t - 1])
lnsigma2[t] += parameters[1] * (np.abs(stdresid) - norm_const)
lnsigma2[t] += parameters[2] * stdresid
lnsigma2[t] += parameters[3] * lnsigma2[t - 1]
sigma2[t] = np.exp(lnsigma2[t])
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-100.0, 0.1, -0.1, 0.95])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 9.5])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 0.95])
mod_resids = resids.copy()
mod_resids[:1] = np.inf
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_midas_hyperbolic(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.8, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.midas_recursion_python(
parameters, weights, mod_resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 10e10, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, -0.4, 0])
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_figarch_recursion(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([1.0, 0.2, 0.4, 0.3])
fresids = resids ** 2
p = q = 1
trunc_lag = 1000
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
lam = rec.figarch_weights(parameters[1:], p, q, trunc_lag=trunc_lag)
lam_rev = lam[::-1]
omega_tilde = parameters[0] / (1 - parameters[-1])
sigma2_direct = np.empty_like(sigma2)
for t in range(nobs):
backcasts = trunc_lag - t
sigma2_direct[t] = omega_tilde
if backcasts:
sigma2_direct[t] += backcast * lam_rev[:backcasts].sum()
if t:
sigma2_direct[t] += np.sum(lam_rev[-t:] * fresids[max(0, t - 1000) : t])
assert_almost_equal(sigma2_direct, sigma2)
recpy.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.figarch_recursion_python(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
def test_figarch_weights(self):
parameters = np.array([1.0, 0.4])
lam = rec.figarch_weights(parameters[1:], 0, 0, trunc_lag=1000)
lam_direct = np.empty_like(lam)
lam_direct[0] = parameters[-1]
for i in range(1, 1000):
lam_direct[i] = (i - parameters[-1]) / (i + 1) * lam_direct[i - 1]
assert_almost_equal(lam, lam_direct)
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_garch_performance(self):
garch_setup = """
parameters = np.array([.1, .4, .3, .2])
fresids = resids ** 2.0
sresids = np.sign(resids)
"""
garch_first = """
recpy.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,
backcast, var_bounds)
"""
garch_second = """
rec.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs, backcast,
var_bounds)
"""
timer = Timer(
garch_first,
"Numba",
garch_second,
"Cython",
"GARCH",
self.timer_setup + garch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_harch_performance(self):
harch_setup = """
parameters = np.array([.1, .4, .3, .2])
lags = np.array([1, 5, 22], dtype=np.int32)
"""
harch_first = """
recpy.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast,
var_bounds)
"""
harch_second = """
rec.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast, var_bounds)
"""
timer = Timer(
harch_first,
"Numba",
harch_second,
"Cython",
"HARCH",
self.timer_setup + harch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_egarch_performance(self):
egarch_setup = """
parameters = np.array([0.0, 0.1, -0.1, 0.95])
p = o = q = 1
backcast = 0.0
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
"""
egarch_first = """
recpy.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
egarch_second = """
rec.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
timer = Timer(
egarch_first,
"Numba",
egarch_second,
"Cython",
"EGARCH",
self.timer_setup + egarch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_midas_performance(self):
midas_setup = """
from scipy.special import gamma
parameters = np.array([.1, 0.8, 0])
j = np.arange(1,22+1)
weights = gamma(j+0.6) / (gamma(j+1) * gamma(0.6))
weights = weights / weights.sum()
"""
midas_first = """
recpy.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
midas_second = """
rec.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"MIDAS",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_figarch_performance(self):
midas_setup = """
p = q = 1
trunc_lag = 1000
parameters = np.array([1.0, 0.2, 0.2, 0.04])
fresids = resids ** 2.0
"""
midas_first = """
recpy.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
midas_second = """
rec.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"FIGARCH",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
def test_garch_aparch_equiv(self):
parameters = np.array([0.1, 0.1, 0.8])
fresids = self.resids ** 2
sresids = np.sign(self.resids)
sigma2 = np.empty(1000)
p = q = 1
o = 0
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
sigma2_garch = sigma2.copy()
parameters = np.array([0.1, 0.1, 0.8, 2])
sigma2[:] = np.nan
sigma2_delta = np.empty_like(sigma2)
recpy.aparch_recursion_python(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
recpy.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
rec.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
def test_asym_aparch_smoke(self):
sigma2 = np.empty(1000)
p = o = q = 1
parameters = np.array([0.1, 0.1, 0.1, 0.8, 1.3])
sigma2[:] = np.nan
sigma2_delta = np.empty_like(sigma2)
recpy.aparch_recursion_python(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
sigma2_py = sigma2.copy()
sigma2[:] = np.nan
recpy.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
assert_allclose(sigma2_py, sigma2)
sigma2[:] = np.nan
rec.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
assert_allclose(sigma2_py, sigma2)
def test_bounds_check():
var_bounds = np.array([0.1, 10])
assert_almost_equal(recpy.bounds_check_python(-1.0, var_bounds), 0.1)
assert_almost_equal(
recpy.bounds_check_python(20.0, var_bounds), 10 + np.log(20.0 / 10.0)
)
assert_almost_equal(recpy.bounds_check_python(np.inf, var_bounds), 1010.0)
| [
"pytest.mark.filterwarnings",
"numpy.sqrt",
"arch.univariate.recursions_python.figarch_recursion",
"arch.univariate.recursions_python.figarch_recursion_python",
"numpy.log",
"arch.univariate.recursions_python.harch_recursion",
"arch.univariate.recursions_python.egarch_recursion_python",
"numpy.array",
"arch.univariate.recursions_python.arch_recursion",
"numpy.isfinite",
"arch.univariate.recursions_python.garch_recursion",
"numpy.random.RandomState",
"numpy.arange",
"numpy.testing.assert_allclose",
"arch.univariate.recursions_python.midas_recursion_python",
"numpy.exp",
"numpy.testing.assert_almost_equal",
"numpy.empty",
"pytest.mark.skipif",
"numpy.abs",
"numpy.ones",
"arch.univariate.recursions_python.midas_recursion",
"arch.univariate.recursions_python.egarch_recursion",
"scipy.special.gamma",
"numpy.sign",
"timeit.Timer",
"os.environ.get",
"arch.univariate.recursions_python.harch_recursion_python",
"arch.univariate.recursions_python.arch_recursion_python",
"numpy.empty_like",
"numpy.all",
"arch.univariate.recursions_python.bounds_check_python",
"numpy.zeros_like",
"arch.univariate.recursions_python.garch_recursion_python"
]
| [((674, 748), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::arch.compat.numba.PerformanceWarning"""'], {}), "('ignore::arch.compat.numba.PerformanceWarning')\n", (700, 748), False, 'import pytest\n'), ((284, 327), 'os.environ.get', 'os.environ.get', (['"""ARCH_CYTHON_COVERAGE"""', '"""0"""'], {}), "('ARCH_CYTHON_COVERAGE', '0')\n", (298, 327), False, 'import os\n'), ((26657, 26746), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(missing_numba or missing_extension)'], {'reason': '"""numba not installed"""'}), "(missing_numba or missing_extension, reason=\n 'numba not installed')\n", (26675, 26746), False, 'import pytest\n'), ((27536, 27625), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(missing_numba or missing_extension)'], {'reason': '"""numba not installed"""'}), "(missing_numba or missing_extension, reason=\n 'numba not installed')\n", (27554, 27625), False, 'import pytest\n'), ((28385, 28474), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(missing_numba or missing_extension)'], {'reason': '"""numba not installed"""'}), "(missing_numba or missing_extension, reason=\n 'numba not installed')\n", (28403, 28474), False, 'import pytest\n'), ((29407, 29496), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(missing_numba or missing_extension)'], {'reason': '"""numba not installed"""'}), "(missing_numba or missing_extension, reason=\n 'numba not installed')\n", (29425, 29496), False, 'import pytest\n'), ((30343, 30432), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(missing_numba or missing_extension)'], {'reason': '"""numba not installed"""'}), "(missing_numba or missing_extension, reason=\n 'numba not installed')\n", (30361, 30432), False, 'import pytest\n'), ((34434, 34453), 'numpy.array', 'np.array', (['[0.1, 10]'], {}), '([0.1, 10])\n', (34442, 34453), True, 'import numpy as np\n'), ((2600, 2618), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (2611, 2618), False, 'from numpy.random import RandomState\n'), ((2695, 2720), 'numpy.zeros_like', 'np.zeros_like', (['cls.resids'], {}), '(cls.resids)\n', (2708, 2720), True, 'import numpy as np\n'), ((2773, 2817), 'numpy.array', 'np.array', (['[var / 1000000.0, var * 1000000.0]'], {}), '([var / 1000000.0, var * 1000000.0])\n', (2781, 2817), True, 'import numpy as np\n'), ((3422, 3452), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (3430, 3452), True, 'import numpy as np\n'), ((3503, 3518), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (3510, 3518), True, 'import numpy as np\n'), ((3528, 3633), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,\n backcast, self.var_bounds)\n', (3549, 3633), True, 'import arch.univariate.recursions_python as recpy\n'), ((3806, 3918), 'arch.univariate.recursions_python.garch_recursion_python', 'recpy.garch_recursion_python', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1,\n nobs, backcast, self.var_bounds)\n', (3834, 3918), True, 'import arch.univariate.recursions_python as recpy\n'), ((4331, 4372), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (4350, 4372), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4381, 4423), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (4400, 4423), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((4446, 4477), 'numpy.array', 'np.array', (['[0.1, -0.4, 0.3, 0.2]'], {}), '([0.1, -0.4, 0.3, 0.2])\n', (4454, 4477), True, 'import numpy as np\n'), ((4486, 4598), 'arch.univariate.recursions_python.garch_recursion_python', 'recpy.garch_recursion_python', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1,\n nobs, backcast, self.var_bounds)\n', (4514, 4598), True, 'import arch.univariate.recursions_python as recpy\n'), ((4741, 4780), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (4747, 4780), True, 'import numpy as np\n'), ((4796, 4839), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (4802, 4839), True, 'import numpy as np\n'), ((4862, 4888), 'numpy.array', 'np.array', (['[0.1, 0.4, 3, 2]'], {}), '([0.1, 0.4, 3, 2])\n', (4870, 4888), True, 'import numpy as np\n'), ((4897, 5009), 'arch.univariate.recursions_python.garch_recursion_python', 'recpy.garch_recursion_python', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1,\n nobs, backcast, self.var_bounds)\n', (4925, 5009), True, 'import arch.univariate.recursions_python as recpy\n'), ((5152, 5191), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (5158, 5191), True, 'import numpy as np\n'), ((5207, 5250), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (5213, 5250), True, 'import numpy as np\n'), ((5273, 5303), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (5281, 5303), True, 'import numpy as np\n'), ((5382, 5498), 'arch.univariate.recursions_python.garch_recursion_python', 'recpy.garch_recursion_python', (['parameters', 'mod_fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, mod_fresids, sresids, sigma2, 1, 1,\n 1, nobs, backcast, self.var_bounds)\n', (5410, 5498), True, 'import arch.univariate.recursions_python as recpy\n'), ((5641, 5680), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (5647, 5680), True, 'import numpy as np\n'), ((5696, 5739), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (5702, 5739), True, 'import numpy as np\n'), ((5998, 6037), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (6004, 6037), True, 'import numpy as np\n'), ((6053, 6096), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (6059, 6096), True, 'import numpy as np\n'), ((6246, 6276), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (6254, 6276), True, 'import numpy as np\n'), ((6292, 6328), 'numpy.array', 'np.array', (['[1, 5, 22]'], {'dtype': 'np.int32'}), '([1, 5, 22], dtype=np.int32)\n', (6300, 6328), True, 'import numpy as np\n'), ((6337, 6436), 'arch.univariate.recursions_python.harch_recursion_python', 'recpy.harch_recursion_python', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs,\n backcast, self.var_bounds)\n', (6365, 6436), True, 'import arch.univariate.recursions_python as recpy\n'), ((6501, 6593), 'arch.univariate.recursions_python.harch_recursion', 'recpy.harch_recursion', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs, backcast,\n self.var_bounds)\n', (6522, 6593), True, 'import arch.univariate.recursions_python as recpy\n'), ((6774, 6815), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (6793, 6815), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((6824, 6866), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (6843, 6866), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((6889, 6921), 'numpy.array', 'np.array', (['[-0.1, -0.4, 0.3, 0.2]'], {}), '([-0.1, -0.4, 0.3, 0.2])\n', (6897, 6921), True, 'import numpy as np\n'), ((6930, 7029), 'arch.univariate.recursions_python.harch_recursion_python', 'recpy.harch_recursion_python', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs,\n backcast, self.var_bounds)\n', (6958, 7029), True, 'import arch.univariate.recursions_python as recpy\n'), ((7063, 7102), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (7069, 7102), True, 'import numpy as np\n'), ((7118, 7161), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (7124, 7161), True, 'import numpy as np\n'), ((7184, 7218), 'numpy.array', 'np.array', (['[0.1, 400000000.0, 3, 2]'], {}), '([0.1, 400000000.0, 3, 2])\n', (7192, 7218), True, 'import numpy as np\n'), ((7219, 7318), 'arch.univariate.recursions_python.harch_recursion_python', 'recpy.harch_recursion_python', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs,\n backcast, self.var_bounds)\n', (7247, 7318), True, 'import arch.univariate.recursions_python as recpy\n'), ((7352, 7391), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (7358, 7391), True, 'import numpy as np\n'), ((7407, 7450), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (7413, 7450), True, 'import numpy as np\n'), ((7473, 7507), 'numpy.array', 'np.array', (['[0.1, 400000000.0, 3, 2]'], {}), '([0.1, 400000000.0, 3, 2])\n', (7481, 7507), True, 'import numpy as np\n'), ((7576, 7679), 'arch.univariate.recursions_python.harch_recursion_python', 'recpy.harch_recursion_python', (['parameters', 'mod_resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, mod_resids, sigma2, lags, nobs,\n backcast, self.var_bounds)\n', (7604, 7679), True, 'import arch.univariate.recursions_python as recpy\n'), ((7713, 7752), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (7719, 7752), True, 'import numpy as np\n'), ((7768, 7811), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (7774, 7811), True, 'import numpy as np\n'), ((7948, 7987), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (7954, 7987), True, 'import numpy as np\n'), ((8003, 8046), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (8009, 8046), True, 'import numpy as np\n'), ((8195, 8225), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (8203, 8225), True, 'import numpy as np\n'), ((8249, 8344), 'arch.univariate.recursions_python.arch_recursion_python', 'recpy.arch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, p, nobs, backcast,\n self.var_bounds)\n', (8276, 8344), True, 'import arch.univariate.recursions_python as recpy\n'), ((8409, 8498), 'arch.univariate.recursions_python.arch_recursion', 'recpy.arch_recursion', (['parameters', 'resids', 'sigma2', 'p', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, p, nobs, backcast, self.\n var_bounds)\n', (8429, 8498), True, 'import arch.univariate.recursions_python as recpy\n'), ((8674, 8715), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (8693, 8715), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((8724, 8766), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (8743, 8766), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((8789, 8821), 'numpy.array', 'np.array', (['[-0.1, -0.4, 0.3, 0.2]'], {}), '([-0.1, -0.4, 0.3, 0.2])\n', (8797, 8821), True, 'import numpy as np\n'), ((8830, 8925), 'arch.univariate.recursions_python.arch_recursion_python', 'recpy.arch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, p, nobs, backcast,\n self.var_bounds)\n', (8857, 8925), True, 'import arch.univariate.recursions_python as recpy\n'), ((8959, 8998), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (8965, 8998), True, 'import numpy as np\n'), ((9014, 9057), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (9020, 9057), True, 'import numpy as np\n'), ((9080, 9114), 'numpy.array', 'np.array', (['[0.1, 400000000.0, 3, 2]'], {}), '([0.1, 400000000.0, 3, 2])\n', (9088, 9114), True, 'import numpy as np\n'), ((9115, 9210), 'arch.univariate.recursions_python.arch_recursion_python', 'recpy.arch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, p, nobs, backcast,\n self.var_bounds)\n', (9142, 9210), True, 'import arch.univariate.recursions_python as recpy\n'), ((9244, 9283), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (9250, 9283), True, 'import numpy as np\n'), ((9299, 9342), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (9305, 9342), True, 'import numpy as np\n'), ((9420, 9519), 'arch.univariate.recursions_python.arch_recursion_python', 'recpy.arch_recursion_python', (['parameters', 'mod_resids', 'sigma2', 'p', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, mod_resids, sigma2, p, nobs,\n backcast, self.var_bounds)\n', (9447, 9519), True, 'import arch.univariate.recursions_python as recpy\n'), ((9553, 9592), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (9559, 9592), True, 'import numpy as np\n'), ((9608, 9651), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (9614, 9651), True, 'import numpy as np\n'), ((9784, 9823), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (9790, 9823), True, 'import numpy as np\n'), ((9839, 9882), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (9845, 9882), True, 'import numpy as np\n'), ((10040, 10070), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (10048, 10070), True, 'import numpy as np\n'), ((10129, 10144), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (10136, 10144), True, 'import numpy as np\n'), ((10154, 10259), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,\n backcast, self.var_bounds)\n', (10175, 10259), True, 'import arch.univariate.recursions_python as recpy\n'), ((10672, 10714), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (10691, 10714), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((10871, 10901), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (10879, 10901), True, 'import numpy as np\n'), ((10960, 10975), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (10967, 10975), True, 'import numpy as np\n'), ((11792, 11834), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (11811, 11834), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((11989, 12014), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3]'], {}), '([0.1, 0.4, 0.3])\n', (11997, 12014), True, 'import numpy as np\n'), ((12065, 12080), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (12072, 12080), True, 'import numpy as np\n'), ((12090, 12195), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(0)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 0, nobs,\n backcast, self.var_bounds)\n', (12111, 12195), True, 'import arch.univariate.recursions_python as recpy\n'), ((12608, 12650), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (12627, 12650), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((12805, 12830), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3]'], {}), '([0.1, 0.4, 0.3])\n', (12813, 12830), True, 'import numpy as np\n'), ((12881, 12896), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (12888, 12896), True, 'import numpy as np\n'), ((12906, 13011), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(0)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 0, 1, 1, nobs,\n backcast, self.var_bounds)\n', (12927, 13011), True, 'import arch.univariate.recursions_python as recpy\n'), ((13424, 13466), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (13443, 13466), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((13621, 13651), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (13629, 13651), True, 'import numpy as np\n'), ((13702, 13717), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (13709, 13717), True, 'import numpy as np\n'), ((13727, 13832), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(0)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 0, 1, nobs,\n backcast, self.var_bounds)\n', (13748, 13832), True, 'import arch.univariate.recursions_python as recpy\n'), ((14245, 14287), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (14264, 14287), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((14442, 14472), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.3, 0.2]'], {}), '([0.1, 0.4, 0.3, 0.2])\n', (14450, 14472), True, 'import numpy as np\n'), ((14523, 14538), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (14530, 14538), True, 'import numpy as np\n'), ((14938, 14979), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_garch', 'sigma2'], {}), '(sigma2_garch, sigma2)\n', (14957, 14979), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((15130, 15163), 'numpy.array', 'np.array', (['[1e+100, 0.4, 0.3, 0.2]'], {}), '([1e+100, 0.4, 0.3, 0.2])\n', (15138, 15163), True, 'import numpy as np\n'), ((15178, 15214), 'numpy.array', 'np.array', (['[1, 5, 22]'], {'dtype': 'np.int32'}), '([1, 5, 22], dtype=np.int32)\n', (15186, 15214), True, 'import numpy as np\n'), ((15223, 15315), 'arch.univariate.recursions_python.harch_recursion', 'recpy.harch_recursion', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs, backcast,\n self.var_bounds)\n', (15244, 15315), True, 'import arch.univariate.recursions_python as recpy\n'), ((15497, 15539), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (15516, 15539), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((15555, 15594), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (15561, 15594), True, 'import numpy as np\n'), ((15610, 15653), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (15616, 15653), True, 'import numpy as np\n'), ((15676, 15710), 'numpy.array', 'np.array', (['[-1e+100, 0.4, 0.3, 0.2]'], {}), '([-1e+100, 0.4, 0.3, 0.2])\n', (15684, 15710), True, 'import numpy as np\n'), ((15718, 15810), 'arch.univariate.recursions_python.harch_recursion', 'recpy.harch_recursion', (['parameters', 'resids', 'sigma2', 'lags', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, lags, nobs, backcast,\n self.var_bounds)\n', (15739, 15810), True, 'import arch.univariate.recursions_python as recpy\n'), ((15992, 16034), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (16011, 16034), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((16043, 16093), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2', 'self.var_bounds[:, 0]'], {}), '(sigma2, self.var_bounds[:, 0])\n', (16062, 16093), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((16116, 16149), 'numpy.array', 'np.array', (['[1e+100, 0.4, 0.3, 0.2]'], {}), '([1e+100, 0.4, 0.3, 0.2])\n', (16124, 16149), True, 'import numpy as np\n'), ((16199, 16214), 'numpy.sign', 'np.sign', (['resids'], {}), '(resids)\n', (16206, 16214), True, 'import numpy as np\n'), ((16224, 16329), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,\n backcast, self.var_bounds)\n', (16245, 16329), True, 'import arch.univariate.recursions_python as recpy\n'), ((16742, 16784), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (16761, 16784), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((16800, 16839), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (16806, 16839), True, 'import numpy as np\n'), ((16855, 16898), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (16861, 16898), True, 'import numpy as np\n'), ((16921, 16955), 'numpy.array', 'np.array', (['[-1e+100, 0.4, 0.3, 0.2]'], {}), '([-1e+100, 0.4, 0.3, 0.2])\n', (16929, 16955), True, 'import numpy as np\n'), ((16963, 17068), 'arch.univariate.recursions_python.garch_recursion', 'recpy.garch_recursion', (['parameters', 'fresids', 'sresids', 'sigma2', '(1)', '(1)', '(1)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,\n backcast, self.var_bounds)\n', (16984, 17068), True, 'import arch.univariate.recursions_python as recpy\n'), ((17481, 17523), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (17500, 17523), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((17532, 17582), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2', 'self.var_bounds[:, 0]'], {}), '(sigma2, self.var_bounds[:, 0])\n', (17551, 17582), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((17605, 17638), 'numpy.array', 'np.array', (['[1e+100, 0.4, 0.3, 0.2]'], {}), '([1e+100, 0.4, 0.3, 0.2])\n', (17613, 17638), True, 'import numpy as np\n'), ((17646, 17735), 'arch.univariate.recursions_python.arch_recursion', 'recpy.arch_recursion', (['parameters', 'resids', 'sigma2', '(3)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, 3, nobs, backcast, self.\n var_bounds)\n', (17666, 17735), True, 'import arch.univariate.recursions_python as recpy\n'), ((17912, 17954), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (17931, 17954), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((17970, 18009), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (17976, 18009), True, 'import numpy as np\n'), ((18025, 18068), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (18031, 18068), True, 'import numpy as np\n'), ((18091, 18125), 'numpy.array', 'np.array', (['[-1e+100, 0.4, 0.3, 0.2]'], {}), '([-1e+100, 0.4, 0.3, 0.2])\n', (18099, 18125), True, 'import numpy as np\n'), ((18133, 18222), 'arch.univariate.recursions_python.arch_recursion', 'recpy.arch_recursion', (['parameters', 'resids', 'sigma2', '(3)', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, resids, sigma2, 3, nobs, backcast, self.\n var_bounds)\n', (18153, 18222), True, 'import arch.univariate.recursions_python as recpy\n'), ((18399, 18441), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (18418, 18441), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((18450, 18500), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2', 'self.var_bounds[:, 0]'], {}), '(sigma2, self.var_bounds[:, 0])\n', (18469, 18500), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((18575, 18607), 'numpy.array', 'np.array', (['[0.0, 0.1, -0.1, 0.95]'], {}), '([0.0, 0.1, -0.1, 0.95])\n', (18583, 18607), True, 'import numpy as np\n'), ((18759, 18780), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (18772, 18780), True, 'import numpy as np\n'), ((18802, 18823), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (18815, 18823), True, 'import numpy as np\n'), ((18849, 18870), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (18862, 18870), True, 'import numpy as np\n'), ((18879, 19008), 'arch.univariate.recursions_python.egarch_recursion', 'recpy.egarch_recursion', (['parameters', 'resids', 'sigma2', 'p', 'o', 'q', 'nobs', 'backcast', 'var_bounds', 'lnsigma2', 'std_resids', 'abs_std_resids'], {}), '(parameters, resids, sigma2, p, o, q, nobs, backcast,\n var_bounds, lnsigma2, std_resids, abs_std_resids)\n', (18901, 19008), True, 'import arch.univariate.recursions_python as recpy\n'), ((19205, 19341), 'arch.univariate.recursions_python.egarch_recursion_python', 'recpy.egarch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'o', 'q', 'nobs', 'backcast', 'var_bounds', 'lnsigma2', 'std_resids', 'abs_std_resids'], {}), '(parameters, resids, sigma2, p, o, q, nobs,\n backcast, var_bounds, lnsigma2, std_resids, abs_std_resids)\n', (19234, 19341), True, 'import arch.univariate.recursions_python as recpy\n'), ((19826, 19867), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (19845, 19867), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((19876, 19918), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (19895, 19918), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((19941, 19959), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (19948, 19959), True, 'import numpy as np\n'), ((20443, 20485), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (20462, 20485), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((20508, 20543), 'numpy.array', 'np.array', (['[-100.0, 0.1, -0.1, 0.95]'], {}), '([-100.0, 0.1, -0.1, 0.95])\n', (20516, 20543), True, 'import numpy as np\n'), ((20552, 20688), 'arch.univariate.recursions_python.egarch_recursion_python', 'recpy.egarch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'o', 'q', 'nobs', 'backcast', 'var_bounds', 'lnsigma2', 'std_resids', 'abs_std_resids'], {}), '(parameters, resids, sigma2, p, o, q, nobs,\n backcast, var_bounds, lnsigma2, std_resids, abs_std_resids)\n', (20581, 20688), True, 'import arch.univariate.recursions_python as recpy\n'), ((20855, 20894), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (20861, 20894), True, 'import numpy as np\n'), ((20910, 20953), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (20916, 20953), True, 'import numpy as np\n'), ((20976, 21007), 'numpy.array', 'np.array', (['[0.0, 0.1, -0.1, 9.5]'], {}), '([0.0, 0.1, -0.1, 9.5])\n', (20984, 21007), True, 'import numpy as np\n'), ((21016, 21152), 'arch.univariate.recursions_python.egarch_recursion_python', 'recpy.egarch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'o', 'q', 'nobs', 'backcast', 'var_bounds', 'lnsigma2', 'std_resids', 'abs_std_resids'], {}), '(parameters, resids, sigma2, p, o, q, nobs,\n backcast, var_bounds, lnsigma2, std_resids, abs_std_resids)\n', (21045, 21152), True, 'import arch.univariate.recursions_python as recpy\n'), ((21319, 21358), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (21325, 21358), True, 'import numpy as np\n'), ((21374, 21417), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (21380, 21417), True, 'import numpy as np\n'), ((21440, 21472), 'numpy.array', 'np.array', (['[0.0, 0.1, -0.1, 0.95]'], {}), '([0.0, 0.1, -0.1, 0.95])\n', (21448, 21472), True, 'import numpy as np\n'), ((21548, 21684), 'arch.univariate.recursions_python.egarch_recursion_python', 'recpy.egarch_recursion_python', (['parameters', 'resids', 'sigma2', 'p', 'o', 'q', 'nobs', 'backcast', 'var_bounds', 'lnsigma2', 'std_resids', 'abs_std_resids'], {}), '(parameters, resids, sigma2, p, o, q, nobs,\n backcast, var_bounds, lnsigma2, std_resids, abs_std_resids)\n', (21577, 21684), True, 'import arch.univariate.recursions_python as recpy\n'), ((21851, 21890), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (21857, 21890), True, 'import numpy as np\n'), ((21906, 21949), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (21912, 21949), True, 'import numpy as np\n'), ((22110, 22133), 'numpy.array', 'np.array', (['[0.1, 0.8, 0]'], {}), '([0.1, 0.8, 0])\n', (22118, 22133), True, 'import numpy as np\n'), ((22146, 22166), 'numpy.arange', 'np.arange', (['(1)', '(22 + 1)'], {}), '(1, 22 + 1)\n', (22155, 22166), True, 'import numpy as np\n'), ((22280, 22375), 'arch.univariate.recursions_python.midas_recursion', 'recpy.midas_recursion', (['parameters', 'weights', 'resids', 'sigma2', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, weights, resids, sigma2, nobs, backcast,\n self.var_bounds)\n', (22301, 22375), True, 'import arch.univariate.recursions_python as recpy\n'), ((22439, 22541), 'arch.univariate.recursions_python.midas_recursion_python', 'recpy.midas_recursion_python', (['parameters', 'weights', 'resids', 'sigma2', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, weights, resids, sigma2, nobs,\n backcast, self.var_bounds)\n', (22467, 22541), True, 'import arch.univariate.recursions_python as recpy\n'), ((22726, 22767), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (22745, 22767), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((22776, 22818), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (22795, 22818), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((22896, 23002), 'arch.univariate.recursions_python.midas_recursion_python', 'recpy.midas_recursion_python', (['parameters', 'weights', 'mod_resids', 'sigma2', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, weights, mod_resids, sigma2, nobs,\n backcast, self.var_bounds)\n', (22924, 23002), True, 'import arch.univariate.recursions_python as recpy\n'), ((23036, 23075), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (23042, 23075), True, 'import numpy as np\n'), ((23091, 23134), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (23097, 23134), True, 'import numpy as np\n'), ((23157, 23191), 'numpy.array', 'np.array', (['[0.1, 100000000000.0, 0]'], {}), '([0.1, 100000000000.0, 0])\n', (23165, 23191), True, 'import numpy as np\n'), ((23195, 23215), 'numpy.arange', 'np.arange', (['(1)', '(22 + 1)'], {}), '(1, 22 + 1)\n', (23204, 23215), True, 'import numpy as np\n'), ((23329, 23431), 'arch.univariate.recursions_python.midas_recursion_python', 'recpy.midas_recursion_python', (['parameters', 'weights', 'resids', 'sigma2', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, weights, resids, sigma2, nobs,\n backcast, self.var_bounds)\n', (23357, 23431), True, 'import arch.univariate.recursions_python as recpy\n'), ((23465, 23504), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (23471, 23504), True, 'import numpy as np\n'), ((23520, 23563), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (23526, 23563), True, 'import numpy as np\n'), ((23699, 23738), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (23705, 23738), True, 'import numpy as np\n'), ((23754, 23797), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (23760, 23797), True, 'import numpy as np\n'), ((23820, 23844), 'numpy.array', 'np.array', (['[0.1, -0.4, 0]'], {}), '([0.1, -0.4, 0])\n', (23828, 23844), True, 'import numpy as np\n'), ((23853, 23955), 'arch.univariate.recursions_python.midas_recursion_python', 'recpy.midas_recursion_python', (['parameters', 'weights', 'resids', 'sigma2', 'nobs', 'backcast', 'self.var_bounds'], {}), '(parameters, weights, resids, sigma2, nobs,\n backcast, self.var_bounds)\n', (23881, 23955), True, 'import arch.univariate.recursions_python as recpy\n'), ((23989, 24028), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (23995, 24028), True, 'import numpy as np\n'), ((24044, 24087), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (24050, 24087), True, 'import numpy as np\n'), ((24223, 24262), 'numpy.all', 'np.all', (['(sigma2 >= self.var_bounds[:, 0])'], {}), '(sigma2 >= self.var_bounds[:, 0])\n', (24229, 24262), True, 'import numpy as np\n'), ((24278, 24321), 'numpy.all', 'np.all', (['(sigma2 <= 2 * self.var_bounds[:, 1])'], {}), '(sigma2 <= 2 * self.var_bounds[:, 1])\n', (24284, 24321), True, 'import numpy as np\n'), ((24482, 24512), 'numpy.array', 'np.array', (['[1.0, 0.2, 0.4, 0.3]'], {}), '([1.0, 0.2, 0.4, 0.3])\n', (24490, 24512), True, 'import numpy as np\n'), ((25002, 25023), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (25015, 25023), True, 'import numpy as np\n'), ((25349, 25391), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_direct', 'sigma2'], {}), '(sigma2_direct, sigma2)\n', (25368, 25391), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((25401, 25507), 'arch.univariate.recursions_python.figarch_recursion', 'recpy.figarch_recursion', (['parameters', 'fresids', 'sigma2', 'p', 'q', 'nobs', 'trunc_lag', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sigma2, p, q, nobs, trunc_lag,\n backcast, self.var_bounds)\n', (25424, 25507), True, 'import arch.univariate.recursions_python as recpy\n'), ((25668, 25781), 'arch.univariate.recursions_python.figarch_recursion_python', 'recpy.figarch_recursion_python', (['parameters', 'fresids', 'sigma2', 'p', 'q', 'nobs', 'trunc_lag', 'backcast', 'self.var_bounds'], {}), '(parameters, fresids, sigma2, p, q, nobs,\n trunc_lag, backcast, self.var_bounds)\n', (25698, 25781), True, 'import arch.univariate.recursions_python as recpy\n'), ((26171, 26212), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_numba', 'sigma2'], {}), '(sigma2_numba, sigma2)\n', (26190, 26212), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((26221, 26263), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sigma2_python', 'sigma2'], {}), '(sigma2_python, sigma2)\n', (26240, 26263), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((26322, 26342), 'numpy.array', 'np.array', (['[1.0, 0.4]'], {}), '([1.0, 0.4])\n', (26330, 26342), True, 'import numpy as np\n'), ((26436, 26454), 'numpy.empty_like', 'np.empty_like', (['lam'], {}), '(lam)\n', (26449, 26454), True, 'import numpy as np\n'), ((26614, 26650), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['lam', 'lam_direct'], {}), '(lam, lam_direct)\n', (26633, 26650), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((31281, 31306), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.8]'], {}), '([0.1, 0.1, 0.8])\n', (31289, 31306), True, 'import numpy as np\n'), ((31360, 31380), 'numpy.sign', 'np.sign', (['self.resids'], {}), '(self.resids)\n', (31367, 31380), True, 'import numpy as np\n'), ((31398, 31412), 'numpy.empty', 'np.empty', (['(1000)'], {}), '(1000)\n', (31406, 31412), True, 'import numpy as np\n'), ((31453, 31575), 'arch.univariate.recursions_python.garch_recursion_python', 'recpy.garch_recursion_python', (['parameters', 'fresids', 'sresids', 'sigma2', 'p', 'o', 'q', 'self.nobs', 'self.backcast', 'self.var_bounds'], {}), '(parameters, fresids, sresids, sigma2, p, o, q,\n self.nobs, self.backcast, self.var_bounds)\n', (31481, 31575), True, 'import arch.univariate.recursions_python as recpy\n'), ((31762, 31790), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.8, 2]'], {}), '([0.1, 0.1, 0.8, 2])\n', (31770, 31790), True, 'import numpy as np\n'), ((31841, 31862), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (31854, 31862), True, 'import numpy as np\n'), ((32172, 32221), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2_garch', 'sigma2'], {'atol': '(1e-06)'}), '(sigma2_garch, sigma2, atol=1e-06)\n', (32187, 32221), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((32551, 32600), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2_garch', 'sigma2'], {'atol': '(1e-06)'}), '(sigma2_garch, sigma2, atol=1e-06)\n', (32566, 32600), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((32928, 32977), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2_garch', 'sigma2'], {'atol': '(1e-06)'}), '(sigma2_garch, sigma2, atol=1e-06)\n', (32943, 32977), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((33033, 33047), 'numpy.empty', 'np.empty', (['(1000)'], {}), '(1000)\n', (33041, 33047), True, 'import numpy as np\n'), ((33091, 33126), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1, 0.8, 1.3]'], {}), '([0.1, 0.1, 0.1, 0.8, 1.3])\n', (33099, 33126), True, 'import numpy as np\n'), ((33177, 33198), 'numpy.empty_like', 'np.empty_like', (['sigma2'], {}), '(sigma2)\n', (33190, 33198), True, 'import numpy as np\n'), ((33949, 33983), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2_py', 'sigma2'], {}), '(sigma2_py, sigma2)\n', (33964, 33983), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((34355, 34389), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2_py', 'sigma2'], {}), '(sigma2_py, sigma2)\n', (34370, 34389), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((34478, 34521), 'arch.univariate.recursions_python.bounds_check_python', 'recpy.bounds_check_python', (['(-1.0)', 'var_bounds'], {}), '(-1.0, var_bounds)\n', (34503, 34521), True, 'import arch.univariate.recursions_python as recpy\n'), ((34561, 34604), 'arch.univariate.recursions_python.bounds_check_python', 'recpy.bounds_check_python', (['(20.0)', 'var_bounds'], {}), '(20.0, var_bounds)\n', (34586, 34604), True, 'import arch.univariate.recursions_python as recpy\n'), ((34661, 34706), 'arch.univariate.recursions_python.bounds_check_python', 'recpy.bounds_check_python', (['np.inf', 'var_bounds'], {}), '(np.inf, var_bounds)\n', (34686, 34706), True, 'import arch.univariate.recursions_python as recpy\n'), ((2371, 2407), 'timeit.Timer', 'timeit.Timer', (['code'], {'setup': 'self.setup'}), '(code, setup=self.setup)\n', (2383, 2407), False, 'import timeit\n'), ((2843, 2865), 'numpy.ones', 'np.ones', (['(cls.nobs, 2)'], {}), '((cls.nobs, 2))\n', (2850, 2865), True, 'import numpy as np\n'), ((10089, 10103), 'numpy.abs', 'np.abs', (['resids'], {}), '(resids)\n', (10095, 10103), True, 'import numpy as np\n'), ((10920, 10934), 'numpy.abs', 'np.abs', (['resids'], {}), '(resids)\n', (10926, 10934), True, 'import numpy as np\n'), ((20415, 20434), 'numpy.exp', 'np.exp', (['lnsigma2[t]'], {}), '(lnsigma2[t])\n', (20421, 20434), True, 'import numpy as np\n'), ((22185, 22199), 'scipy.special.gamma', 'gamma', (['(j + 0.6)'], {}), '(j + 0.6)\n', (22190, 22199), False, 'from scipy.special import gamma\n'), ((23234, 23248), 'scipy.special.gamma', 'gamma', (['(j + 0.6)'], {}), '(j + 0.6)\n', (23239, 23248), False, 'from scipy.special import gamma\n'), ((31963, 31982), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (31969, 31982), True, 'import numpy as np\n'), ((32342, 32361), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (32348, 32361), True, 'import numpy as np\n'), ((32719, 32738), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (32725, 32738), True, 'import numpy as np\n'), ((33299, 33318), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (33305, 33318), True, 'import numpy as np\n'), ((33522, 33541), 'numpy.isfinite', 'np.isfinite', (['sigma2'], {}), '(sigma2)\n', (33533, 33541), True, 'import numpy as np\n'), ((33697, 33716), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (33703, 33716), True, 'import numpy as np\n'), ((33920, 33939), 'numpy.isfinite', 'np.isfinite', (['sigma2'], {}), '(sigma2)\n', (33931, 33939), True, 'import numpy as np\n'), ((34103, 34122), 'numpy.abs', 'np.abs', (['self.resids'], {}), '(self.resids)\n', (34109, 34122), True, 'import numpy as np\n'), ((34326, 34345), 'numpy.isfinite', 'np.isfinite', (['sigma2'], {}), '(sigma2)\n', (34337, 34345), True, 'import numpy as np\n'), ((34611, 34630), 'numpy.log', 'np.log', (['(20.0 / 10.0)'], {}), '(20.0 / 10.0)\n', (34617, 34630), True, 'import numpy as np\n'), ((11204, 11304), 'numpy.array', 'np.array', (['[1.0, resids[t - 1] ** 2.0, resids[t - 1] ** 2.0 * (resids[t - 1] < 0),\n sigma2[t - 1]]'], {}), '([1.0, resids[t - 1] ** 2.0, resids[t - 1] ** 2.0 * (resids[t - 1] <\n 0), sigma2[t - 1]])\n', (11212, 11304), True, 'import numpy as np\n'), ((22203, 22215), 'scipy.special.gamma', 'gamma', (['(j + 1)'], {}), '(j + 1)\n', (22208, 22215), False, 'from scipy.special import gamma\n'), ((22218, 22228), 'scipy.special.gamma', 'gamma', (['(0.6)'], {}), '(0.6)\n', (22223, 22228), False, 'from scipy.special import gamma\n'), ((23252, 23264), 'scipy.special.gamma', 'gamma', (['(j + 1)'], {}), '(j + 1)\n', (23257, 23264), False, 'from scipy.special import gamma\n'), ((23267, 23277), 'scipy.special.gamma', 'gamma', (['(0.6)'], {}), '(0.6)\n', (23272, 23277), False, 'from scipy.special import gamma\n'), ((11094, 11145), 'numpy.array', 'np.array', (['[1.0, backcast, 0.5 * backcast, backcast]'], {}), '([1.0, backcast, 0.5 * backcast, backcast])\n', (11102, 11145), True, 'import numpy as np\n'), ((20170, 20192), 'numpy.sqrt', 'np.sqrt', (['sigma2[t - 1]'], {}), '(sigma2[t - 1])\n', (20177, 20192), True, 'import numpy as np\n'), ((20241, 20257), 'numpy.abs', 'np.abs', (['stdresid'], {}), '(stdresid)\n', (20247, 20257), True, 'import numpy as np\n')] |
##############################################
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# see LICENSE for full details
##############################################
# -*- coding: utf-8 -*
from math import atan, pi
def fov(w,f):
"""
Returns the FOV as in degrees, given:
w: image width (or height) in pixels
f: focalLength (fx or fy) in pixels
"""
return 2*atan(w/2/f) * 180/pi
| [
"math.atan"
]
| [((426, 441), 'math.atan', 'atan', (['(w / 2 / f)'], {}), '(w / 2 / f)\n', (430, 441), False, 'from math import atan, pi\n')] |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.sparse as sps
import time
RM_train=pd.read_csv('./input/data_train.csv')
R_test=pd.read_csv('./input/data_target_users_test.csv')
URM=pd.read_csv('./input/data_train.csv')
ICM = pd.read_csv('./input/data_ICM_title_abstract.csv')
##### URM
URM_tuples = [tuple(x) for x in URM.to_numpy()]
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
userList=np.array(userList,dtype=np.int64)
itemList = list(itemList)
itemList=np.array(itemList,dtype=np.int64)
ratingList = list(ratingList) #not needed
ratingList=np.array(ratingList,dtype=np.int64) #not needed
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
#### ICM
ICM_tuples = [tuple(x) for x in ICM.to_numpy()]
itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples)
itemList_icm = list(itemList_icm)
itemList_icm = np.array(itemList_icm,dtype=np.int64)
featureList_icm = list(featureList_icm)
featureList_icm = np.array(featureList_icm,dtype=np.int64)
scoreList_icm = list(scoreList_icm)
scoreList_icm = np.array(scoreList_icm,dtype=np.float64)
ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm)))
#### Test
userTestList = [x for x in R_test.to_numpy()]
userTestList = zip(*userTestList)
userTestList = [list(a) for a in userTestList][0]
#### make validation and test
from Base.Evaluation.Evaluator import EvaluatorHoldout
from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample
URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80)
URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)
evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10])
evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
### hybrid recommender
### Usinng TF IDF
ICM_all = ICM_all.tocsr()
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = np.ediff1d(ICM_all.indptr) + 1
# print(items_per_feature)
IDF = np.array(np.log(num_tot_items / items_per_feature))
from scipy.sparse import diags
diags(IDF)
ICM_idf = ICM_all.copy()
ICM_idf = diags(IDF)*ICM_idf
############## top pop
item_popularity = np.ediff1d(URM_all.tocsc().indptr)
popular_items = np.argsort(item_popularity)
popular_items = np.flip(popular_items, axis=0)
popular_items = popular_items[0:10]
###########
from HybridRecommender import HybridRecommender
recommender = HybridRecommender(URM_all)
recommender.fit([0.2, 0.3, 0.2], ICM_idf)
recoms = recommender.recommend(userTestList, cutoff=10)
recomList = []
for i in range(len(recoms)):
user_id = userTestList[i]
start_pos = URM_train.indptr[user_id]
end_pos = URM_train.indptr[user_id + 1]
if start_pos == end_pos:
recomList.append(' '.join(str(e) for e in popular_items))
else:
recomList.append(' '.join(str(e) for e in recoms[i]))
# print(recomList)
res = {"user_id": userTestList, "item_list": recomList}
result = pd.DataFrame(res, columns= ['user_id', 'item_list'])
result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header=True)
| [
"numpy.flip",
"Base.Evaluation.Evaluator.EvaluatorHoldout",
"scipy.sparse.diags",
"pandas.read_csv",
"numpy.ediff1d",
"Data_manager.split_functions.split_train_validation_random_holdout.split_train_in_two_percentage_global_sample",
"HybridRecommender.HybridRecommender",
"numpy.log",
"numpy.argsort",
"numpy.array",
"scipy.sparse.coo_matrix",
"pandas.DataFrame"
]
| [((384, 421), 'pandas.read_csv', 'pd.read_csv', (['"""./input/data_train.csv"""'], {}), "('./input/data_train.csv')\n", (395, 421), True, 'import pandas as pd\n'), ((429, 478), 'pandas.read_csv', 'pd.read_csv', (['"""./input/data_target_users_test.csv"""'], {}), "('./input/data_target_users_test.csv')\n", (440, 478), True, 'import pandas as pd\n'), ((483, 520), 'pandas.read_csv', 'pd.read_csv', (['"""./input/data_train.csv"""'], {}), "('./input/data_train.csv')\n", (494, 520), True, 'import pandas as pd\n'), ((527, 577), 'pandas.read_csv', 'pd.read_csv', (['"""./input/data_ICM_title_abstract.csv"""'], {}), "('./input/data_ICM_title_abstract.csv')\n", (538, 577), True, 'import pandas as pd\n'), ((724, 758), 'numpy.array', 'np.array', (['userList'], {'dtype': 'np.int64'}), '(userList, dtype=np.int64)\n', (732, 758), True, 'import numpy as np\n'), ((793, 827), 'numpy.array', 'np.array', (['itemList'], {'dtype': 'np.int64'}), '(itemList, dtype=np.int64)\n', (801, 827), True, 'import numpy as np\n'), ((904, 940), 'numpy.array', 'np.array', (['ratingList'], {'dtype': 'np.int64'}), '(ratingList, dtype=np.int64)\n', (912, 940), True, 'import numpy as np\n'), ((969, 1019), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(ratingList, (userList, itemList))'], {}), '((ratingList, (userList, itemList)))\n', (983, 1019), True, 'import scipy.sparse as sps\n'), ((1219, 1257), 'numpy.array', 'np.array', (['itemList_icm'], {'dtype': 'np.int64'}), '(itemList_icm, dtype=np.int64)\n', (1227, 1257), True, 'import numpy as np\n'), ((1316, 1357), 'numpy.array', 'np.array', (['featureList_icm'], {'dtype': 'np.int64'}), '(featureList_icm, dtype=np.int64)\n', (1324, 1357), True, 'import numpy as np\n'), ((1410, 1451), 'numpy.array', 'np.array', (['scoreList_icm'], {'dtype': 'np.float64'}), '(scoreList_icm, dtype=np.float64)\n', (1418, 1451), True, 'import numpy as np\n'), ((1462, 1526), 'scipy.sparse.coo_matrix', 'sps.coo_matrix', (['(scoreList_icm, (itemList_icm, featureList_icm))'], {}), '((scoreList_icm, (itemList_icm, featureList_icm)))\n', (1476, 1526), True, 'import scipy.sparse as sps\n'), ((1901, 1975), 'Data_manager.split_functions.split_train_validation_random_holdout.split_train_in_two_percentage_global_sample', 'split_train_in_two_percentage_global_sample', (['URM_all'], {'train_percentage': '(0.8)'}), '(URM_all, train_percentage=0.8)\n', (1944, 1975), False, 'from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample\n'), ((2007, 2083), 'Data_manager.split_functions.split_train_validation_random_holdout.split_train_in_two_percentage_global_sample', 'split_train_in_two_percentage_global_sample', (['URM_train'], {'train_percentage': '(0.8)'}), '(URM_train, train_percentage=0.8)\n', (2050, 2083), False, 'from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample\n'), ((2111, 2161), 'Base.Evaluation.Evaluator.EvaluatorHoldout', 'EvaluatorHoldout', (['URM_validation'], {'cutoff_list': '[10]'}), '(URM_validation, cutoff_list=[10])\n', (2127, 2161), False, 'from Base.Evaluation.Evaluator import EvaluatorHoldout\n'), ((2179, 2223), 'Base.Evaluation.Evaluator.EvaluatorHoldout', 'EvaluatorHoldout', (['URM_test'], {'cutoff_list': '[10]'}), '(URM_test, cutoff_list=[10])\n', (2195, 2223), False, 'from Base.Evaluation.Evaluator import EvaluatorHoldout\n'), ((2549, 2559), 'scipy.sparse.diags', 'diags', (['IDF'], {}), '(IDF)\n', (2554, 2559), False, 'from scipy.sparse import diags\n'), ((2710, 2737), 'numpy.argsort', 'np.argsort', (['item_popularity'], {}), '(item_popularity)\n', (2720, 2737), True, 'import numpy as np\n'), ((2754, 2784), 'numpy.flip', 'np.flip', (['popular_items'], {'axis': '(0)'}), '(popular_items, axis=0)\n', (2761, 2784), True, 'import numpy as np\n'), ((2896, 2922), 'HybridRecommender.HybridRecommender', 'HybridRecommender', (['URM_all'], {}), '(URM_all)\n', (2913, 2922), False, 'from HybridRecommender import HybridRecommender\n'), ((3437, 3488), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['user_id', 'item_list']"}), "(res, columns=['user_id', 'item_list'])\n", (3449, 3488), True, 'import pandas as pd\n'), ((2400, 2426), 'numpy.ediff1d', 'np.ediff1d', (['ICM_all.indptr'], {}), '(ICM_all.indptr)\n', (2410, 2426), True, 'import numpy as np\n'), ((2474, 2515), 'numpy.log', 'np.log', (['(num_tot_items / items_per_feature)'], {}), '(num_tot_items / items_per_feature)\n', (2480, 2515), True, 'import numpy as np\n'), ((2597, 2607), 'scipy.sparse.diags', 'diags', (['IDF'], {}), '(IDF)\n', (2602, 2607), False, 'from scipy.sparse import diags\n')] |
import os
import numpy as np
import scipy.io as sio
import tifffile
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
#Load dataset
def loadData(name,data_path):
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'PU':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
elif name == 'HU13':
# dict_keys(['__header__', '__version__', '__globals__', 'Houston'])
#dict_values([b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed Jul 17 16:45:01 2019', '1.0', [], array()])
#data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))
#labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))
data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))['Houston']
labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))['Houston_gt']
elif name == 'KSC':
data = sio.loadmat(os.path.join(data_path, 'KSC.mat'))['KSC']
labels = sio.loadmat(os.path.join(data_path,'KSC_gt.mat'))['KSC_gt']
return data, labels
# Use tifffile pkg read the hyperspectral img.
# Load .tiff data set and converted to .mat data
def loadTifDataTomat(data_path,save_DataPath,name):
if name=='HU13':
totalTif=tifffile.imread(os.path.join(data_path,'2013_IEEE_GRSS_DF_Contest_CASI.tif'))
trainTif=tifffile.imread(os.path.join(data_path,'train_roi.tif'))
valTif=tifffile.imread(os.path.join(data_path,'val_roi.tif'))
print(totalTif.shape,trainTif.shape,valTif.shape)
#spectral.imshow(totalTif)
#spectral.imshow(trainTif)
sio.savemat(os.path.join(save_DataPath,"totalTifHouston13.mat"),{'totalTifHouston13':totalTif})
sio.savemat(os.path.join(save_DataPath,"trainTifHouston13.mat"),{'trainTifHouston13':trainTif})
sio.savemat(os.path.join(save_DataPath,"valTifHouston13.mat"),{'valTifHouston13':valTif})
def loadTifMat(data_path,name):
if name=='HU13':
data=sio.loadmat(os.path.join(data_path, 'totalTifHouston13.mat'))['totalTifHouston13']
train=sio.loadmat(os.path.join(data_path, 'trainTifHouston13.mat'))['trainTifHouston13']
val=sio.loadmat(os.path.join(data_path, 'valTifHouston13.mat'))['valTifHouston13']
return data,train,val
### Using PCA for removing the spectral redundancy(冗余)
### Reduce the spectral dimension, from high-dimensional to low-dimensional.
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
### Padding zeros
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
### Create data cube,3D-patch.
def createImageCubes(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
# Dataset split.
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,
stratify=y)
return X_train, X_test, y_train, y_test | [
"numpy.reshape",
"sklearn.decomposition.PCA",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.zeros"
]
| [((2943, 2974), 'numpy.reshape', 'np.reshape', (['X', '(-1, X.shape[2])'], {}), '(X, (-1, X.shape[2]))\n', (2953, 2974), True, 'import numpy as np\n'), ((2985, 3029), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'numComponents', 'whiten': '(True)'}), '(n_components=numComponents, whiten=True)\n', (2988, 3029), False, 'from sklearn.decomposition import PCA\n'), ((3076, 3133), 'numpy.reshape', 'np.reshape', (['newX', '(X.shape[0], X.shape[1], numComponents)'], {}), '(newX, (X.shape[0], X.shape[1], numComponents))\n', (3086, 3133), True, 'import numpy as np\n'), ((3217, 3289), 'numpy.zeros', 'np.zeros', (['(X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2])'], {}), '((X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))\n', (3225, 3289), True, 'import numpy as np\n'), ((3656, 3727), 'numpy.zeros', 'np.zeros', (['(X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2])'], {}), '((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))\n', (3664, 3727), True, 'import numpy as np\n'), ((3748, 3781), 'numpy.zeros', 'np.zeros', (['(X.shape[0] * X.shape[1])'], {}), '(X.shape[0] * X.shape[1])\n', (3756, 3781), True, 'import numpy as np\n'), ((4482, 4567), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'testRatio', 'random_state': 'randomState', 'stratify': 'y'}), '(X, y, test_size=testRatio, random_state=randomState,\n stratify=y)\n', (4498, 4567), False, 'from sklearn.model_selection import train_test_split\n'), ((1751, 1812), 'os.path.join', 'os.path.join', (['data_path', '"""2013_IEEE_GRSS_DF_Contest_CASI.tif"""'], {}), "(data_path, '2013_IEEE_GRSS_DF_Contest_CASI.tif')\n", (1763, 1812), False, 'import os\n'), ((1846, 1886), 'os.path.join', 'os.path.join', (['data_path', '"""train_roi.tif"""'], {}), "(data_path, 'train_roi.tif')\n", (1858, 1886), False, 'import os\n'), ((1918, 1956), 'os.path.join', 'os.path.join', (['data_path', '"""val_roi.tif"""'], {}), "(data_path, 'val_roi.tif')\n", (1930, 1956), False, 'import os\n'), ((2106, 2158), 'os.path.join', 'os.path.join', (['save_DataPath', '"""totalTifHouston13.mat"""'], {}), "(save_DataPath, 'totalTifHouston13.mat')\n", (2118, 2158), False, 'import os\n'), ((2210, 2262), 'os.path.join', 'os.path.join', (['save_DataPath', '"""trainTifHouston13.mat"""'], {}), "(save_DataPath, 'trainTifHouston13.mat')\n", (2222, 2262), False, 'import os\n'), ((2314, 2364), 'os.path.join', 'os.path.join', (['save_DataPath', '"""valTifHouston13.mat"""'], {}), "(save_DataPath, 'valTifHouston13.mat')\n", (2326, 2364), False, 'import os\n'), ((254, 307), 'os.path.join', 'os.path.join', (['data_path', '"""Indian_pines_corrected.mat"""'], {}), "(data_path, 'Indian_pines_corrected.mat')\n", (266, 307), False, 'import os\n'), ((364, 410), 'os.path.join', 'os.path.join', (['data_path', '"""Indian_pines_gt.mat"""'], {}), "(data_path, 'Indian_pines_gt.mat')\n", (376, 410), False, 'import os\n'), ((2473, 2521), 'os.path.join', 'os.path.join', (['data_path', '"""totalTifHouston13.mat"""'], {}), "(data_path, 'totalTifHouston13.mat')\n", (2485, 2521), False, 'import os\n'), ((2570, 2618), 'os.path.join', 'os.path.join', (['data_path', '"""trainTifHouston13.mat"""'], {}), "(data_path, 'trainTifHouston13.mat')\n", (2582, 2618), False, 'import os\n'), ((2665, 2711), 'os.path.join', 'os.path.join', (['data_path', '"""valTifHouston13.mat"""'], {}), "(data_path, 'valTifHouston13.mat')\n", (2677, 2711), False, 'import os\n'), ((481, 529), 'os.path.join', 'os.path.join', (['data_path', '"""Salinas_corrected.mat"""'], {}), "(data_path, 'Salinas_corrected.mat')\n", (493, 529), False, 'import os\n'), ((581, 622), 'os.path.join', 'os.path.join', (['data_path', '"""Salinas_gt.mat"""'], {}), "(data_path, 'Salinas_gt.mat')\n", (593, 622), False, 'import os\n'), ((688, 725), 'os.path.join', 'os.path.join', (['data_path', '"""PaviaU.mat"""'], {}), "(data_path, 'PaviaU.mat')\n", (700, 725), False, 'import os\n'), ((766, 806), 'os.path.join', 'os.path.join', (['data_path', '"""PaviaU_gt.mat"""'], {}), "(data_path, 'PaviaU_gt.mat')\n", (778, 806), False, 'import os\n'), ((1214, 1252), 'os.path.join', 'os.path.join', (['data_path', '"""Houston.mat"""'], {}), "(data_path, 'Houston.mat')\n", (1226, 1252), False, 'import os\n'), ((1294, 1335), 'os.path.join', 'os.path.join', (['data_path', '"""Houston_gt.mat"""'], {}), "(data_path, 'Houston_gt.mat')\n", (1306, 1335), False, 'import os\n'), ((1401, 1435), 'os.path.join', 'os.path.join', (['data_path', '"""KSC.mat"""'], {}), "(data_path, 'KSC.mat')\n", (1413, 1435), False, 'import os\n'), ((1473, 1510), 'os.path.join', 'os.path.join', (['data_path', '"""KSC_gt.mat"""'], {}), "(data_path, 'KSC_gt.mat')\n", (1485, 1510), False, 'import os\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="marlin_binary_protocol",
version="0.0.7",
author="<NAME>",
author_email="<EMAIL>",
description="Transfer files with Marlin 2.0 firmware using Marlin Binary Protocol Mark II",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/charleswillis3/marlin-binary-protocol",
packages=setuptools.find_packages(),
install_requires=["heatshrink2>=0.9", "pyserial>=3.4", "backports.time_perf_counter; python_version < '3.3'"],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
)
| [
"setuptools.find_packages"
]
| [((478, 504), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (502, 504), False, 'import setuptools\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return XLNetTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"This is a test"
output_text = u"This is a test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize(u'This is a test')
self.assertListEqual(tokens, [u'▁This', u'▁is', u'▁a', u'▁t', u'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's', u'é', u'.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0,
602, 347, 347, 347, 3, 12, 66,
46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in',
SPIECE_UNDERLINE + u'', u'<unk>', u'2', u'0', u'0', u'0', u',',
SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's',
u'<unk>', u'.'])
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'', u'i', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), [u"▁he", u"ll", u"o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b', u'or',
u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.add_special_tokens_single_sequence(text)
encoded_pair = tokenizer.add_special_tokens_sequence_pair(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.abspath",
"transformers.tokenization_xlnet.XLNetTokenizer.from_pretrained",
"transformers.tokenization_xlnet.XLNetTokenizer"
]
| [((5237, 5252), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5250, 5252), False, 'import unittest\n'), ((907, 932), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (922, 932), False, 'import os\n'), ((1247, 1294), 'transformers.tokenization_xlnet.XLNetTokenizer', 'XLNetTokenizer', (['SAMPLE_VOCAB'], {'keep_accents': '(True)'}), '(SAMPLE_VOCAB, keep_accents=True)\n', (1261, 1294), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n'), ((1401, 1458), 'transformers.tokenization_xlnet.XLNetTokenizer.from_pretrained', 'XLNetTokenizer.from_pretrained', (['self.tmpdirname'], {}), '(self.tmpdirname, **kwargs)\n', (1431, 1458), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n'), ((1673, 1720), 'transformers.tokenization_xlnet.XLNetTokenizer', 'XLNetTokenizer', (['SAMPLE_VOCAB'], {'keep_accents': '(True)'}), '(SAMPLE_VOCAB, keep_accents=True)\n', (1687, 1720), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n'), ((3392, 3440), 'transformers.tokenization_xlnet.XLNetTokenizer', 'XLNetTokenizer', (['SAMPLE_VOCAB'], {'do_lower_case': '(True)'}), '(SAMPLE_VOCAB, do_lower_case=True)\n', (3406, 3440), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n'), ((4128, 4177), 'transformers.tokenization_xlnet.XLNetTokenizer', 'XLNetTokenizer', (['SAMPLE_VOCAB'], {'do_lower_case': '(False)'}), '(SAMPLE_VOCAB, do_lower_case=False)\n', (4142, 4177), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n'), ((4772, 4822), 'transformers.tokenization_xlnet.XLNetTokenizer.from_pretrained', 'XLNetTokenizer.from_pretrained', (['"""xlnet-base-cased"""'], {}), "('xlnet-base-cased')\n", (4802, 4822), False, 'from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE\n')] |
# forked from https://github.com/single-cell-genetics/cellSNP
## A python wrap of UCSC liftOver function for vcf file
## UCSC liftOver binary and hg19 to hg38 chain file:
## https://genome.ucsc.edu/cgi-bin/hgLiftOver
## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver
## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz
import sys
import gzip
import subprocess
from optparse import OptionParser
LIFTOVER_INFO = '##INFO=<ID=OLD,Number=1,Type=Integer,'
LIFTOVER_INFO += 'Description="position before liftover">\n'
def vcf_to_bed(vcf_file, out_file, chr_in=True):
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#") == False:
line_val = line.rstrip().split("\t")[:8]
if chr_in and line_val[0].startswith("chr") == False:
line_val[0] = "chr" + line_val[0]
line_val[2] = str(int(line_val[1]) + 1)
fid_out.writelines("\t".join(line_val[:3]) + "\n")
fid_in.close()
fid_out.close()
return None
def update_vcf(vcf_file, bed_new, bed_unmap, out_file):
## unmapped lines
unmap_pos = []
_fid = open(bed_unmap, "r")
for line in _fid:
if not line.startswith("#"):
_pos_id = "_".join(line.rstrip().split("\t")[:2])
unmap_pos.append(_pos_id)
_fid.close()
if vcf_file[-3:] == ".gz":
is_gzip = True
fid_in = gzip.open(vcf_file, "r")
else:
is_gzip = False
fid_in = open(vcf_file, "r")
cnt1 = 0
idx_unmap = 0
fid_bed = open(bed_new, "r")
fid_out = open(out_file, "w")
for line in fid_in:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("#CHROM"):
fid_out.writelines(LIFTOVER_INFO)
fid_out.writelines(line)
else:
line_val = line.rstrip().split("\t")
if idx_unmap < len(unmap_pos):
_pos_id = "_".join(line_val[:2])
if line_val[0].startswith("chr") == False:
_pos_id = "chr" + _pos_id
if _pos_id == unmap_pos[idx_unmap]:
idx_unmap += 1
continue
cnt1 += 1
bed_line = fid_bed.readline()
line_val[7] = "OLD=" + line_val[1] + ";" + line_val[7]
line_val[1] = bed_line.rstrip().split("\t")[1]
fid_out.writelines("\t".join(line_val) + "\n")
print(cnt1, idx_unmap)
fid_in.close()
fid_bed.close()
fid_out.close()
return None
def main():
import warnings
warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--chainFile", "-c", dest="chain_file", default=None,
help=("Chain file, full path."))
parser.add_option("--inFile", "-i", dest="in_file", default=None,
help=("Input vcf file, full path."))
parser.add_option("--outFile", "-o", dest="out_file", default=None,
help=("Output VCF file, full path."))
parser.add_option("--liftOverPath", "-P", dest="liftOver_path", default=None,
help=("liftOver_path if it is not in PATH variable."))
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("liftOver-vcf: a wrap of UCSC liftOver for VCF file.\n")
print("use -h or --help for help on argument.")
sys.exit(1)
in_file = options.in_file
bed_file = options.in_file.split(".vcf")[0] + ".bed"
new_bed_file = options.out_file.split(".vcf")[0] + ".bed"
unmap_bed_file = options.out_file.split(".vcf")[0] + ".unmap.bed"
## generate bed file
print("converting vcf to bed file ... ")
vcf_to_bed(in_file, bed_file)
## UCSC liftOver on bed file
chain_file = options.chain_file
if options.liftOver_path is None:
liftOver = "liftOver"
else:
# check if path exists
liftOver = options.liftOver_path
print("liftOver bed file ... ")
bashCommand = "%s %s %s %s %s" %(liftOver, bed_file, chain_file,
new_bed_file, unmap_bed_file)
#print(bashCommand)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
## update vcf file
out_file = options.out_file
if out_file[-3:] == ".gz":
out_file = out_file[:-3]
print("updating vcf file ... ")
update_vcf(in_file, new_bed_file, unmap_bed_file, out_file)
print("gzip vcf file ... ")
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file)
else:
bashCommand = "gzip -f %s" %(out_file)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
return None
if __name__ == "__main__":
main()
| [
"gzip.open",
"shutil.which",
"optparse.OptionParser",
"sys.exit",
"warnings.filterwarnings"
]
| [((2890, 2922), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (2913, 2922), False, 'import warnings\n'), ((2970, 2984), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (2982, 2984), False, 'from optparse import OptionParser\n'), ((682, 706), 'gzip.open', 'gzip.open', (['vcf_file', '"""r"""'], {}), "(vcf_file, 'r')\n", (691, 706), False, 'import gzip\n'), ((1671, 1695), 'gzip.open', 'gzip.open', (['vcf_file', '"""r"""'], {}), "(vcf_file, 'r')\n", (1680, 1695), False, 'import gzip\n'), ((3693, 3704), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3701, 3704), False, 'import sys\n'), ((4856, 4877), 'shutil.which', 'shutil.which', (['"""bgzip"""'], {}), "('bgzip')\n", (4868, 4877), False, 'import shutil\n')] |
import logging
import os
import redis
import moltin_aps
_database = None
db_logger = logging.getLogger('db_logger')
async def get_database_connection():
global _database
if _database is None:
database_password = os.getenv('DB_PASSWORD')
database_host = os.getenv('DB_HOST')
database_port = os.getenv('DB_PORT')
_database = redis.Redis(host=database_host, port=database_port, password=database_password)
db_logger.debug('Got new db connection')
return _database
def get_moltin_customer_id(customer_key):
db = await get_database_connection()
customer_id = db.get(customer_key)
if customer_id:
customer_id = customer_id.decode('utf-8')
db_logger.debug(f'Got moltin customer id «{customer_id}» from db')
return customer_id
def update_customer_info(customer_key, customer_info):
db = await get_database_connection()
customer_id = db.get(customer_key).decode('utf-8')
moltin_aps.update_customer_info(customer_id, customer_info)
db_logger.debug(f'Customer «{customer_id}» info was updated')
def create_customer(customer_key, customer_info):
db = await get_database_connection()
customer_id = moltin_aps.create_customer(customer_info)['data']['id']
db.set(customer_key, customer_id)
db_logger.debug(f'New customer «{customer_key}» was created')
| [
"logging.getLogger",
"moltin_aps.update_customer_info",
"os.getenv",
"redis.Redis",
"moltin_aps.create_customer"
]
| [((90, 120), 'logging.getLogger', 'logging.getLogger', (['"""db_logger"""'], {}), "('db_logger')\n", (107, 120), False, 'import logging\n'), ((965, 1024), 'moltin_aps.update_customer_info', 'moltin_aps.update_customer_info', (['customer_id', 'customer_info'], {}), '(customer_id, customer_info)\n', (996, 1024), False, 'import moltin_aps\n'), ((235, 259), 'os.getenv', 'os.getenv', (['"""DB_PASSWORD"""'], {}), "('DB_PASSWORD')\n", (244, 259), False, 'import os\n'), ((284, 304), 'os.getenv', 'os.getenv', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (293, 304), False, 'import os\n'), ((329, 349), 'os.getenv', 'os.getenv', (['"""DB_PORT"""'], {}), "('DB_PORT')\n", (338, 349), False, 'import os\n'), ((370, 449), 'redis.Redis', 'redis.Redis', ([], {'host': 'database_host', 'port': 'database_port', 'password': 'database_password'}), '(host=database_host, port=database_port, password=database_password)\n', (381, 449), False, 'import redis\n'), ((1202, 1243), 'moltin_aps.create_customer', 'moltin_aps.create_customer', (['customer_info'], {}), '(customer_info)\n', (1228, 1243), False, 'import moltin_aps\n')] |
from pathlib import Path
import plecos
import json
print(plecos.__version__)
#%%
path_to_json_local = Path("~/ocn/plecos/plecos/samples/sample_metadata_local.json").expanduser()
path_to_json_remote = Path("~/ocn/plecos/plecos/samples/sample_metadata_remote.json").expanduser()
path_to_broken_json = Path("~/ocn/plecos/plecos/samples/metadata_local_broken.json").expanduser()
path_to_schema_local = Path("~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json").expanduser()
path_to_schema_remote = Path("~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json").expanduser()
# Select remote or local metadata
LOCAL=True
if LOCAL:
path_json_file = path_to_json_local
path_schema_file = path_to_schema_local
with open(path_to_json_local) as f:
json_dict = json.load(f)
else:
path_json_file = path_to_json_remote
path_schema_file = path_to_schema_remote
with open(path_to_json_remote) as f:
json_dict = json.load(f)
print("Json file:", path_json_file)
print("Schema file:", path_schema_file)
#%%
del json_dict['base']['files'][0]['index']
# del json_dict['base']['files'][0]['url']
# json_dict['base']['extra'] = 1
plecos.is_valid_dict(json_dict)
# json_dict['base']['files'][0]['url']
# json_dict['base']['EXTRA ATTRIB!'] = 0
# json_dict['base']['files'][0]['EXTRA_ATTR'] = "????"
# json_dict['base']['price'] = "A string is not allowed!"
errors = plecos.list_errors(json_dict, path_schema_file)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors")
raise
#%%
json_dict = {
"base": {
"name": "10 Monkey Species Small",
"author": "Mario",
"license": "CC0: Public Domain",
"contentType": "jpg/txt",
"price": 5,
"categories": [
"image"
],
"tags": [
"image data",
" animals"
],
"type": "dataset",
"description": "Example description",
"copyrightHolder": "",
"encoding": "",
"compression": "",
"workExample": "",
"inLanguage": "en",
"files": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/training.zip"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/monkey_labels.txt"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/validation.zip"
}
],
"links": [
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/sample/sample.zip",
"name": "sample.zip",
"type": "sample"
},
{
"url": "https://github.com/slothkong/CNN_classification_10_monkey_species",
"name": "example code",
"type": "example code"
},
{
"url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/discovery/n5151.jpg",
"name": "n5151.jpg",
"type": "discovery"
}
],
"checksum": "0",
},
}
#%%
path_to_schema_local = Path("~/ocn/Plecos/plecos/schemas/metadata_local_190305.json").expanduser()
errors = plecos.list_errors(json_dict, path_to_schema_local)
if errors:
print("ERRORS:")
for e in errors:
print(e)
else:
print("No errors") | [
"json.load",
"plecos.list_errors",
"pathlib.Path",
"plecos.is_valid_dict"
]
| [((1160, 1191), 'plecos.is_valid_dict', 'plecos.is_valid_dict', (['json_dict'], {}), '(json_dict)\n', (1180, 1191), False, 'import plecos\n'), ((1396, 1443), 'plecos.list_errors', 'plecos.list_errors', (['json_dict', 'path_schema_file'], {}), '(json_dict, path_schema_file)\n', (1414, 1443), False, 'import plecos\n'), ((3143, 3194), 'plecos.list_errors', 'plecos.list_errors', (['json_dict', 'path_to_schema_local'], {}), '(json_dict, path_to_schema_local)\n', (3161, 3194), False, 'import plecos\n'), ((102, 164), 'pathlib.Path', 'Path', (['"""~/ocn/plecos/plecos/samples/sample_metadata_local.json"""'], {}), "('~/ocn/plecos/plecos/samples/sample_metadata_local.json')\n", (106, 164), False, 'from pathlib import Path\n'), ((200, 263), 'pathlib.Path', 'Path', (['"""~/ocn/plecos/plecos/samples/sample_metadata_remote.json"""'], {}), "('~/ocn/plecos/plecos/samples/sample_metadata_remote.json')\n", (204, 263), False, 'from pathlib import Path\n'), ((299, 361), 'pathlib.Path', 'Path', (['"""~/ocn/plecos/plecos/samples/metadata_local_broken.json"""'], {}), "('~/ocn/plecos/plecos/samples/metadata_local_broken.json')\n", (303, 361), False, 'from pathlib import Path\n'), ((398, 458), 'pathlib.Path', 'Path', (['"""~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json"""'], {}), "('~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json')\n", (402, 458), False, 'from pathlib import Path\n'), ((496, 557), 'pathlib.Path', 'Path', (['"""~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json"""'], {}), "('~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json')\n", (500, 557), False, 'from pathlib import Path\n'), ((773, 785), 'json.load', 'json.load', (['f'], {}), '(f)\n', (782, 785), False, 'import json\n'), ((941, 953), 'json.load', 'json.load', (['f'], {}), '(f)\n', (950, 953), False, 'import json\n'), ((3058, 3120), 'pathlib.Path', 'Path', (['"""~/ocn/Plecos/plecos/schemas/metadata_local_190305.json"""'], {}), "('~/ocn/Plecos/plecos/schemas/metadata_local_190305.json')\n", (3062, 3120), False, 'from pathlib import Path\n')] |
from __future__ import absolute_import, unicode_literals
import itertools
import os
import sys
from copy import copy
import pytest
from virtualenv.discovery.py_spec import PythonSpec
def test_bad_py_spec():
text = "python2.3.4.5"
spec = PythonSpec.from_string_spec(text)
assert text in repr(spec)
assert spec.str_spec == text
assert spec.path == os.path.abspath(text)
content = vars(spec)
del content[str("str_spec")]
del content[str("path")]
assert all(v is None for v in content.values())
def test_py_spec_first_digit_only_major():
spec = PythonSpec.from_string_spec("278")
assert spec.major == 2
assert spec.minor == 78
def test_spec_satisfies_path_ok():
spec = PythonSpec.from_string_spec(sys.executable)
assert spec.satisfies(spec) is True
def test_spec_satisfies_path_nok(tmp_path):
spec = PythonSpec.from_string_spec(sys.executable)
of = PythonSpec.from_string_spec(str(tmp_path))
assert spec.satisfies(of) is False
def test_spec_satisfies_arch():
spec_1 = PythonSpec.from_string_spec("python-32")
spec_2 = PythonSpec.from_string_spec("python-64")
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is False
@pytest.mark.parametrize(
"req, spec",
list(itertools.combinations(["py", "CPython", "python"], 2)) + [("jython", "jython")] + [("CPython", "cpython")],
)
def test_spec_satisfies_implementation_ok(req, spec):
spec_1 = PythonSpec.from_string_spec(req)
spec_2 = PythonSpec.from_string_spec(spec)
assert spec_1.satisfies(spec_1) is True
assert spec_2.satisfies(spec_1) is True
def test_spec_satisfies_implementation_nok():
spec_1 = PythonSpec.from_string_spec("python")
spec_2 = PythonSpec.from_string_spec("jython")
assert spec_2.satisfies(spec_1) is False
assert spec_1.satisfies(spec_2) is False
def _version_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version) + 1):
req = ".".join(version[0:i])
for j in range(i + 1):
sat = ".".join(version[0:j])
# can be satisfied in both directions
target.add((req, sat))
target.add((sat, req))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_satisfies_pairs())
def test_version_satisfies_ok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is True
def _version_not_satisfies_pairs():
target = set()
version = tuple(str(i) for i in sys.version_info[0:3])
for i in range(len(version)):
req = ".".join(version[0 : i + 1])
for j in range(i + 1):
sat_ver = list(sys.version_info[0 : j + 1])
for l in range(j + 1):
for o in [1, -1]:
temp = copy(sat_ver)
temp[l] += o
sat = ".".join(str(i) for i in temp)
target.add((req, sat))
return sorted(target)
@pytest.mark.parametrize("req, spec", _version_not_satisfies_pairs())
def test_version_satisfies_nok(req, spec):
req_spec = PythonSpec.from_string_spec("python{}".format(req))
sat_spec = PythonSpec.from_string_spec("python{}".format(spec))
assert sat_spec.satisfies(req_spec) is False
def test_relative_spec(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
a_relative_path = str((tmp_path / "a" / "b").relative_to(tmp_path))
spec = PythonSpec.from_string_spec(a_relative_path)
assert spec.path == os.path.abspath(str(tmp_path / a_relative_path))
| [
"os.path.abspath",
"copy.copy",
"virtualenv.discovery.py_spec.PythonSpec.from_string_spec",
"itertools.combinations"
]
| [((250, 283), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['text'], {}), '(text)\n', (277, 283), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((588, 622), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['"""278"""'], {}), "('278')\n", (615, 622), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((726, 769), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['sys.executable'], {}), '(sys.executable)\n', (753, 769), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((867, 910), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['sys.executable'], {}), '(sys.executable)\n', (894, 910), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1049, 1089), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['"""python-32"""'], {}), "('python-32')\n", (1076, 1089), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1103, 1143), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['"""python-64"""'], {}), "('python-64')\n", (1130, 1143), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1466, 1498), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['req'], {}), '(req)\n', (1493, 1498), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1512, 1545), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['spec'], {}), '(spec)\n', (1539, 1545), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1695, 1732), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['"""python"""'], {}), "('python')\n", (1722, 1732), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((1746, 1783), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['"""jython"""'], {}), "('jython')\n", (1773, 1783), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((3584, 3628), 'virtualenv.discovery.py_spec.PythonSpec.from_string_spec', 'PythonSpec.from_string_spec', (['a_relative_path'], {}), '(a_relative_path)\n', (3611, 3628), False, 'from virtualenv.discovery.py_spec import PythonSpec\n'), ((371, 392), 'os.path.abspath', 'os.path.abspath', (['text'], {}), '(text)\n', (386, 392), False, 'import os\n'), ((1288, 1342), 'itertools.combinations', 'itertools.combinations', (["['py', 'CPython', 'python']", '(2)'], {}), "(['py', 'CPython', 'python'], 2)\n", (1310, 1342), False, 'import itertools\n'), ((2948, 2961), 'copy.copy', 'copy', (['sat_ver'], {}), '(sat_ver)\n', (2952, 2961), False, 'from copy import copy\n')] |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "software_image_management_swim",
"name": "trigger_image_activation",
"operations": {
"post": [
"trigger_software_image_activation"
]
},
"parameters": {
"trigger_software_image_activation": [
{
"name": "schedule_validate",
"required": false,
"type": "boolean"
},
{
"array_type": "object",
"name": "payload",
"required": true,
"schema": [
{
"name": "activateLowerImageVersion",
"required": false,
"type": "boolean"
},
{
"name": "deviceUpgradeMode",
"required": false,
"type": "string"
},
{
"name": "deviceUuid",
"required": false,
"type": "string"
},
{
"name": "distributeIfNeeded",
"required": false,
"type": "boolean"
},
{
"array_type": "string",
"name": "imageUuidList",
"required": false,
"schema": [],
"type": "array"
},
{
"array_type": "string",
"name": "smuImageUuidList",
"required": false,
"schema": [],
"type": "array"
}
],
"type": "array"
}
]
},
"responses": {
"trigger_software_image_activation": {
"properties": [
"response",
"version"
],
"type": "object"
}
}
}"""
)
| [
"json.loads"
]
| [((121, 2254), 'json.loads', 'json.loads', (['"""{\n "family": "software_image_management_swim",\n "name": "trigger_image_activation",\n "operations": {\n "post": [\n "trigger_software_image_activation"\n ]\n },\n "parameters": {\n "trigger_software_image_activation": [\n {\n "name": "schedule_validate",\n "required": false,\n "type": "boolean"\n },\n {\n "array_type": "object",\n "name": "payload",\n "required": true,\n "schema": [\n {\n "name": "activateLowerImageVersion",\n "required": false,\n "type": "boolean"\n },\n {\n "name": "deviceUpgradeMode",\n "required": false,\n "type": "string"\n },\n {\n "name": "deviceUuid",\n "required": false,\n "type": "string"\n },\n {\n "name": "distributeIfNeeded",\n "required": false,\n "type": "boolean"\n },\n {\n "array_type": "string",\n "name": "imageUuidList",\n "required": false,\n "schema": [],\n "type": "array"\n },\n {\n "array_type": "string",\n "name": "smuImageUuidList",\n "required": false,\n "schema": [],\n "type": "array"\n }\n ],\n "type": "array"\n }\n ]\n },\n "responses": {\n "trigger_software_image_activation": {\n "properties": [\n "response",\n "version"\n ],\n "type": "object"\n }\n }\n}"""'], {}), '(\n """{\n "family": "software_image_management_swim",\n "name": "trigger_image_activation",\n "operations": {\n "post": [\n "trigger_software_image_activation"\n ]\n },\n "parameters": {\n "trigger_software_image_activation": [\n {\n "name": "schedule_validate",\n "required": false,\n "type": "boolean"\n },\n {\n "array_type": "object",\n "name": "payload",\n "required": true,\n "schema": [\n {\n "name": "activateLowerImageVersion",\n "required": false,\n "type": "boolean"\n },\n {\n "name": "deviceUpgradeMode",\n "required": false,\n "type": "string"\n },\n {\n "name": "deviceUuid",\n "required": false,\n "type": "string"\n },\n {\n "name": "distributeIfNeeded",\n "required": false,\n "type": "boolean"\n },\n {\n "array_type": "string",\n "name": "imageUuidList",\n "required": false,\n "schema": [],\n "type": "array"\n },\n {\n "array_type": "string",\n "name": "smuImageUuidList",\n "required": false,\n "schema": [],\n "type": "array"\n }\n ],\n "type": "array"\n }\n ]\n },\n "responses": {\n "trigger_software_image_activation": {\n "properties": [\n "response",\n "version"\n ],\n "type": "object"\n }\n }\n}"""\n )\n', (131, 2254), False, 'import json\n')] |
"""CoinGecko model"""
__docformat__ = "numpy"
# pylint: disable=C0301, E1101
import logging
import re
from typing import Any, List
import numpy as np
import pandas as pd
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
create_df_index,
long_number_format_with_type_check,
replace_underscores_in_column_names,
)
from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
HOLD_COINS = ["ethereum", "bitcoin"]
NEWS_FILTERS = ["Index", "Title", "Author", "Posted"]
CATEGORIES_FILTERS = [
"Rank",
"Name",
"Change_1h",
"Change_24h",
"Change_7d",
"Market_Cap",
"Volume_24h",
"Coins",
]
STABLES_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_24h",
"Exchanges",
"Market_Cap",
"Change_30d",
]
PRODUCTS_FILTERS = [
"Rank",
"Platform",
"Identifier",
"Supply_Rate",
"Borrow_Rate",
]
PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"]
EXCHANGES_FILTERS = [
"Rank",
"Trust_Score",
"Id",
"Name",
"Country",
"Year Established",
"Trade_Volume_24h_BTC",
]
EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"]
INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"]
DERIVATIVES_FILTERS = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
COINS_COLUMNS = [
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"price_change_percentage_7d_in_currency",
"price_change_percentage_24h_in_currency",
"total_volume",
]
@log_start_end(log=logger)
def get_holdings_overview(endpoint: str = "bitcoin") -> List[Any]:
"""Returns public companies that holds ethereum or bitcoin [Source: CoinGecko]
Parameters
----------
endpoint : str
"bitcoin" or "ethereum"
Returns
-------
List:
- str: Overall statistics
- pandas.DataFrame: Companies holding crypto
"""
cg = CoinGeckoAPI()
data = cg.get_companies_public_treasury_by_coin_id(coin_id=endpoint)
stats_str = f"""{len(data["companies"])} companies hold a total of {long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa
df = pd.json_normalize(data, record_path=["companies"])
df.columns = list(
map(
lambda x: replace_underscores_in_column_names(x)
if isinstance(x, str)
else x,
df.columns,
)
)
return [stats_str, df]
SORT_VALUES = [
"market_cap_desc",
"market_cap_asc",
"name_desc",
"name_asc",
"market_cap_change_24h_desc",
"market_cap_change_24h_asc",
]
@log_start_end(log=logger)
def coin_formatter(n):
# TODO: can be improved
coins = []
re_str = "small/(.*)(.jpg|.png|.JPG|.PNG)"
for coin in n:
if re.search(re_str, coin):
coin_stripped = re.search(re_str, coin).group(1)
coins.append(coin_stripped)
return ",".join(coins)
@log_start_end(log=logger)
def get_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> pd.DataFrame:
"""Returns top crypto categories [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url
"""
if sort_filter in SORT_VALUES:
client = CoinGeckoAPI()
data = client.get_coins_categories()
df = pd.DataFrame(data)
del df["id"]
del df["content"]
del df["updated_at"]
df["top_3_coins"] = df["top_3_coins"].apply(coin_formatter)
df.columns = [
replace_underscores_in_column_names(col) if isinstance(col, str) else col
for col in df.columns
]
return df
return pd.DataFrame()
# TODO: add string with overview
@log_start_end(log=logger)
def get_stable_coins(top: int = 20) -> pd.DataFrame:
"""Returns top stable coins [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url
"""
df = get_coins(top=top, category="stablecoins")
return df[COINS_COLUMNS]
@log_start_end(log=logger)
def get_exchanges() -> pd.DataFrame:
"""Get list of top exchanges from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchanges_list(per_page=250))
df.replace({float(np.NaN): None}, inplace=True)
df = df[
[
"trust_score",
"id",
"name",
"country",
"year_established",
"trade_volume_24h_btc",
"url",
]
]
df.columns = [
"Trust_Score",
"Id",
"Name",
"Country",
"Year_Established",
"Trade_Volume_24h_BTC",
"Url",
]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_financial_platforms() -> pd.DataFrame:
"""Get list of financial platforms from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Name, Category, Centralized, Url
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_finance_platforms())
df.drop("facts", axis=1, inplace=True)
create_df_index(df, "rank")
df.columns = ["Rank", "Name", "Category", "Centralized", "Url"]
return df
@log_start_end(log=logger)
def get_finance_products() -> pd.DataFrame:
"""Get list of financial products from CoinGecko API
Returns
-------
pandas.DataFrame
Rank, Platform, Identifier, Supply_Rate, Borrow_Rate
"""
client = CoinGeckoAPI()
df = pd.DataFrame(
client.get_finance_products(per_page=250),
columns=[
"platform",
"identifier",
"supply_rate_percentage",
"borrow_rate_percentage",
],
)
df.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_indexes() -> pd.DataFrame:
"""Get list of crypto indexes from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Name, Id, Market, Last, MultiAsset
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_indexes(per_page=250))
df.columns = ["Name", "Id", "Market", "Last", "MultiAsset"]
create_df_index(df, "Rank")
return df
@log_start_end(log=logger)
def get_derivatives() -> pd.DataFrame:
"""Get list of crypto derivatives from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h,
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_derivatives(include_tickers="unexpired"))
df.drop(
["index", "last_traded_at", "expired_at", "index_id", "open_interest"],
axis=1,
inplace=True,
)
df.rename(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True)
create_df_index(df, "rank")
df["price"] = df["price"].apply(
lambda x: "" if not x else float(x.strip("$").replace(",", ""))
)
df.columns = [
"Rank",
"Market",
"Symbol",
"Price",
"Pct_Change_24h",
"Contract_Type",
"Basis",
"Spread",
"Funding_Rate",
"Volume_24h",
]
return df
@log_start_end(log=logger)
def get_exchange_rates() -> pd.DataFrame:
"""Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Index, Name, Unit, Value, Type
"""
client = CoinGeckoAPI()
df = pd.DataFrame(client.get_exchange_rates()["rates"]).T.reset_index()
df.drop("index", axis=1, inplace=True)
create_df_index(df, "index")
df.columns = ["Index", "Name", "Unit", "Value", "Type"]
return df
@log_start_end(log=logger)
def get_global_info() -> pd.DataFrame:
"""Get global statistics about crypto from CoinGecko API like:
- market cap change
- number of markets
- icos
- number of active crypto
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global()
total_mcap = results.pop("market_cap_percentage")
btc, eth = total_mcap.get("btc"), total_mcap.get("eth")
for key in ["total_market_cap", "total_volume", "updated_at"]:
del results[key]
results["btc_market_cap_in_pct"] = btc
results["eth_market_cap_in_pct"] = eth
results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc))
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
@log_start_end(log=logger)
def get_global_markets_info() -> pd.DataFrame:
"""Get global statistics about crypto markets from CoinGecko API like:
Market_Cap, Volume, Market_Cap_Percentage
[Source: CoinGecko]
Returns
-------
pandas.DataFrame
Market_Cap, Volume, Market_Cap_Percentage
"""
columns = [
"Market_Cap",
"Volume",
"Market_Cap_Percentage",
]
data = []
client = CoinGeckoAPI()
results = client.get_global()
for key in columns:
data.append(results.get(key))
df = pd.DataFrame(data).T
df.columns = columns
df.replace({float("nan"): None}, inplace=True)
return df.reset_index()
@log_start_end(log=logger)
def get_global_defi_info() -> pd.DataFrame:
"""Get global statistics about Decentralized Finances [Source: CoinGecko]
Returns
-------
pandas.DataFrame
Metric, Value
"""
client = CoinGeckoAPI()
results = client.get_global_decentralized_finance_defi()
for key, value in results.items():
try:
results[key] = round(float(value), 4)
except (ValueError, TypeError):
pass
df = pd.Series(results).reset_index()
df.columns = ["Metric", "Value"]
df["Metric"] = df["Metric"].apply(
lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x
)
return df
| [
"logging.getLogger",
"pandas.Series",
"pandas.json_normalize",
"gamestonk_terminal.decorators.log_start_end",
"gamestonk_terminal.cryptocurrency.dataframe_helpers.long_number_format_with_type_check",
"gamestonk_terminal.cryptocurrency.dataframe_helpers.replace_underscores_in_column_names",
"gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index",
"pycoingecko.CoinGeckoAPI",
"pandas.DataFrame",
"gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model.get_coins",
"re.search"
]
| [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((1815, 1840), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (1828, 1840), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((3080, 3105), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (3093, 3105), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((3405, 3430), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (3418, 3430), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((4220, 4245), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (4233, 4245), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((4576, 4601), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (4589, 4601), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((5428, 5453), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (5441, 5453), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((5923, 5948), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (5936, 5948), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((6552, 6577), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (6565, 6577), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((6981, 7006), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (6994, 7006), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((8006, 8031), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (8019, 8031), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((8523, 8548), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (8536, 8548), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((9519, 9544), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (9532, 9544), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((10217, 10242), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (10230, 10242), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((2225, 2239), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (2237, 2239), False, 'from pycoingecko import CoinGeckoAPI\n'), ((2641, 2691), 'pandas.json_normalize', 'pd.json_normalize', (['data'], {'record_path': "['companies']"}), "(data, record_path=['companies'])\n", (2658, 2691), True, 'import pandas as pd\n'), ((4169, 4183), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4181, 4183), True, 'import pandas as pd\n'), ((4501, 4543), 'gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model.get_coins', 'get_coins', ([], {'top': 'top', 'category': '"""stablecoins"""'}), "(top=top, category='stablecoins')\n", (4510, 4543), False, 'from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins\n'), ((4863, 4877), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (4875, 4877), False, 'from pycoingecko import CoinGeckoAPI\n'), ((5383, 5410), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""Rank"""'], {}), "(df, 'Rank')\n", (5398, 5410), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((5694, 5708), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (5706, 5708), False, 'from pycoingecko import CoinGeckoAPI\n'), ((5810, 5837), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""rank"""'], {}), "(df, 'rank')\n", (5825, 5837), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((6179, 6193), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (6191, 6193), False, 'from pycoingecko import CoinGeckoAPI\n'), ((6507, 6534), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""Rank"""'], {}), "(df, 'Rank')\n", (6522, 6534), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((6797, 6811), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (6809, 6811), False, 'from pycoingecko import CoinGeckoAPI\n'), ((6936, 6963), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""Rank"""'], {}), "(df, 'Rank')\n", (6951, 6963), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((7300, 7314), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (7312, 7314), False, 'from pycoingecko import CoinGeckoAPI\n'), ((7619, 7646), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""rank"""'], {}), "(df, 'rank')\n", (7634, 7646), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((8279, 8293), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (8291, 8293), False, 'from pycoingecko import CoinGeckoAPI\n'), ((8417, 8445), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.create_df_index', 'create_df_index', (['df', '"""index"""'], {}), "(df, 'index')\n", (8432, 8445), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((8875, 8889), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (8887, 8889), False, 'from pycoingecko import CoinGeckoAPI\n'), ((9969, 9983), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (9981, 9983), False, 'from pycoingecko import CoinGeckoAPI\n'), ((10455, 10469), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (10467, 10469), False, 'from pycoingecko import CoinGeckoAPI\n'), ((3249, 3272), 're.search', 're.search', (['re_str', 'coin'], {}), '(re_str, coin)\n', (3258, 3272), False, 'import re\n'), ((3751, 3765), 'pycoingecko.CoinGeckoAPI', 'CoinGeckoAPI', ([], {}), '()\n', (3763, 3765), False, 'from pycoingecko import CoinGeckoAPI\n'), ((3824, 3842), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3836, 3842), True, 'import pandas as pd\n'), ((10089, 10107), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (10101, 10107), True, 'import pandas as pd\n'), ((2386, 2444), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.long_number_format_with_type_check', 'long_number_format_with_type_check', (["data['total_holdings']"], {}), "(data['total_holdings'])\n", (2420, 2444), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((9301, 9319), 'pandas.Series', 'pd.Series', (['results'], {}), '(results)\n', (9310, 9319), True, 'import pandas as pd\n'), ((10700, 10718), 'pandas.Series', 'pd.Series', (['results'], {}), '(results)\n', (10709, 10718), True, 'import pandas as pd\n'), ((4022, 4062), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.replace_underscores_in_column_names', 'replace_underscores_in_column_names', (['col'], {}), '(col)\n', (4057, 4062), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((9428, 9466), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.replace_underscores_in_column_names', 'replace_underscores_in_column_names', (['x'], {}), '(x)\n', (9463, 9466), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((10827, 10865), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.replace_underscores_in_column_names', 'replace_underscores_in_column_names', (['x'], {}), '(x)\n', (10862, 10865), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((2751, 2789), 'gamestonk_terminal.cryptocurrency.dataframe_helpers.replace_underscores_in_column_names', 'replace_underscores_in_column_names', (['x'], {}), '(x)\n', (2786, 2789), False, 'from gamestonk_terminal.cryptocurrency.dataframe_helpers import create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names\n'), ((3302, 3325), 're.search', 're.search', (['re_str', 'coin'], {}), '(re_str, coin)\n', (3311, 3325), False, 'import re\n')] |
#!/usr/bin/env python
import setuptools
MAINTAINER_NAME = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL_GIT = 'https://github.com/TransactPRO/gw3-python-client'
try:
import pypandoc
LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
LONG_DESCRIPTION = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules'
]
required = [
'requests',
]
setuptools.setup(
name='transactpro-gw3-client',
version='1.7.6',
description='Transact PRO Gateway3 implementation in Python.',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
install_requires=required,
url=URL_GIT,
packages=setuptools.find_packages(),
license='MIT',
classifiers=CLASSIFIERS,
keywords='GW3 gateway3 integration gateway TransactPRO python python3',
python_requires='>=3.6',
)
| [
"pypandoc.convert",
"setuptools.find_packages"
]
| [((209, 245), 'pypandoc.convert', 'pypandoc.convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (225, 245), False, 'import pypandoc\n'), ((1215, 1241), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1239, 1241), False, 'import setuptools\n')] |
import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
class DebugFeatureExtractor(FeatureExtractorMixin):
"""
Returns constant or random feature value for testing purposes.
"""
def __init__(self,
strategy: str,
num_features: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select)
self.strategy = strategy
self.num_features = num_features
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
if self.strategy == "random":
return np.random.normal(0, 1, (len(pairs), self.num_features))
elif self.strategy == "zero":
return np.zeros((len(pairs), self.num_features))
elif self.strategy == "mix":
num_zero_features = self.num_features // 2
print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.")
zero_features = np.zeros((len(pairs), num_zero_features))
random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features))
feature_matrix = np.hstack([zero_features, random_features])
np.random.shuffle(np.transpose(feature_matrix))
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return [str(i) for i in range(self.num_features)]
@classmethod
@overrides
def from_params(cls, config: Dict):
strategy = config.pop("strategy")
num_features = config.pop("num_features")
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | [
"numpy.transpose",
"pprint.pformat",
"numpy.hstack"
]
| [((1616, 1659), 'numpy.hstack', 'np.hstack', (['[zero_features, random_features]'], {}), '([zero_features, random_features])\n', (1625, 1659), True, 'import numpy as np\n'), ((2342, 2364), 'pprint.pformat', 'pprint.pformat', (['config'], {}), '(config)\n', (2356, 2364), False, 'import pprint\n'), ((1690, 1718), 'numpy.transpose', 'np.transpose', (['feature_matrix'], {}), '(feature_matrix)\n', (1702, 1718), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from __future__ import print_function
import sys
import traceback
import os
_ERROR_BRIEF = 'Kunquat Tracker encountered an error.'
_SUBMIT_INFO = \
'''Please submit an issue to Kunquat issue tracker at
https://github.com/kunquat/kunquat/issues with the following
information attached.'''
def get_error_details(eclass, einst, trace):
details_list = traceback.format_exception(eclass, einst, trace)
return ''.join(details_list)
def print_error_msg(eclass, einst, trace):
details = get_error_details(eclass, einst, trace)
print('\n{}\n{}\n\n{}'.format(_ERROR_BRIEF, _SUBMIT_INFO, details),
file=sys.stderr)
def log_error(eclass, einst, trace):
pass # TODO: implement once we decide where to write
def setup_basic_error_handler():
sys.excepthook = _basic_handler
def _basic_handler(eclass, einst, trace):
print_error_msg(eclass, einst, trace)
log_error(eclass, einst, trace)
os.abort()
| [
"os.abort",
"traceback.format_exception"
]
| [((654, 702), 'traceback.format_exception', 'traceback.format_exception', (['eclass', 'einst', 'trace'], {}), '(eclass, einst, trace)\n', (680, 702), False, 'import traceback\n'), ((1229, 1239), 'os.abort', 'os.abort', ([], {}), '()\n', (1237, 1239), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
class EnvVar(proto.Message):
r"""Represents an environment variable present in a Container or
Python Module.
Attributes:
name (str):
Required. Name of the environment variable.
Must be a valid C identifier.
value (str):
Required. Variables that reference a $(VAR_NAME) are
expanded using the previous defined environment variables in
the container and any service environment variables. If a
variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the
variable exists or not.
"""
name = proto.Field(proto.STRING, number=1)
value = proto.Field(proto.STRING, number=2)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"proto.Field",
"proto.module"
]
| [((648, 719), 'proto.module', 'proto.module', ([], {'package': '"""google.cloud.aiplatform.v1"""', 'manifest': "{'EnvVar'}"}), "(package='google.cloud.aiplatform.v1', manifest={'EnvVar'})\n", (660, 719), False, 'import proto\n'), ((1534, 1569), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (1545, 1569), False, 'import proto\n'), ((1583, 1618), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (1594, 1618), False, 'import proto\n')] |
"""
Utility functions and classes shared by multiple backends
"""
from collections import namedtuple
import logging
from . import symbols
from . import types
LOGGER = logging.getLogger('spc.backend_utils')
# NameContexts encapsulate both the function stack (which holds values) and
# the symbol table context (which binds them)
NameContext = namedtuple('NameContext', ['symbol_ctx', 'func_stack'])
# While loops are identified by two labels - the start label, for re-running
# the condition, and the end label, for exiting when the condition is false
WhileLabels = namedtuple('WhileLabels', ['cond', 'exit'])
# If conditions are identified by two labels - the else label, for when
# the condition is false (to skip the then block) and the end label, for
# when the condition is true (to skip the else block)
IfLabels = namedtuple('IfLabels', ['else_body', 'end'])
# Switch conditionals are handled sort of like if conditionals:
#
# (switch |
# (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime:
# (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime:
# (else B3)) | ...
# | l4:
class SwitchLabels:
"""
Switch labels are similar to conditionals:
(switch |
(case T1 B1) | jump-if-not T1, case_lbl_1; ...; jump end; case_lbl_1:
(case T2 B2) | jump-if-not T2, case_lbl_2; ...; jump end; case_lbl_2:
(else B3) | ...; end_lbl:
Since each case is processed in order, only the current case end label and
the end switch label is available at any given time.
"""
def __init__(self, end_label):
self.end_label = end_label
self.case_end_label = None
class CoercionContext:
"""
This is used to wrap up all the information needed to coerce values from
one type to another.
"""
def __init__(self, backend, temp_context, code_templates):
self.backend = backend
self.temp_context = temp_context
self.templates = code_templates
def copy_with_context(self, new_context):
"""
Creates a copy of this object, but within a new temporary context.
"""
return CoercionContext(self.backend, new_context, self.templates)
def coerce(self, input_offset, input_type, output_type):
"""
Coerces a value, located on the stack, from the given input type to the
given output type. Returns the stack offset of the converted
variable and the output type.
Raises a TypeError if this is not possible.
"""
if input_type == output_type:
return input_offset, output_type
elif (input_type, output_type) == (types.Integer, types.Byte):
return self._coerce_int_to_byte(input_offset), output_type
elif (input_type, output_type) == (types.Byte, types.Integer):
return self._coerce_byte_to_int(input_offset), output_type
else:
raise TypeError('Cannot coerce {} -> {}'.format(input_type, output_type))
def _coerce_int_to_byte(self, input_offset):
"""
Coerces an integer to a byte, returning the stack offset of the
resulting byte.
"""
byte_size = self.backend._type_size(types.Byte)
byte_align = self.backend._type_alignment(types.Byte)
dest_offset = self.temp_context.add_temp(byte_size, byte_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing int@{} to byte@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_word(tmp_reg, input_offset)
self.templates.emit_int_to_byte(tmp_reg)
self.templates.emit_save_stack_byte(tmp_reg, dest_offset)
return dest_offset
def _coerce_byte_to_int(self, input_offset):
"""
Coerces a byte to an integer, returning the stack offset of the
resulting integer.
"""
int_size = self.backend._type_size(types.Integer)
int_align = self.backend._type_alignment(types.Integer)
dest_offset = self.temp_context.add_temp(int_size, int_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing byte@{} to int@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_byte(tmp_reg, input_offset)
self.templates.emit_byte_to_int(tmp_reg)
self.templates.emit_save_stack_word(tmp_reg, dest_offset)
return dest_offset
class FunctionStack:
"""
Tracks where variables are on the function's stack.
Note that this makes a number of assumptions about how things are stored:
- All arguments are stored on the stack, in reverse order. This goes
against the calling conventions for register rich architectures, like
MIPS, but there are enough corner cases (like copying structs by value)
that ignoring the calling convention is worthwhile for a non-optimizing
compiler like this.
- Locals and temporaries are stored on the stack, in order of creation.
"""
def __init__(self, backend):
self.backend = backend
self.local_offset = self._starting_locals_offset()
self.param_offset = self._starting_param_offset()
self.vars = {}
def _starting_locals_offset(self):
"""
Returns the starting offset of the local variables on the stack.
"""
raise NotImplementedError
def _starting_param_offset(self):
"""
Returns the starting offset of the parameter on the stack.
"""
raise NotImplementedError
def _expand_stack(self, size):
"""
Emits code to expand the stack frame by the given size.
"""
raise NotImplementedError
def _shrink_stack(self, size):
"""
Emits code to reduce the stack frame by the given size.
"""
raise NotImplementedError
def pad_param(self, space):
"""
Adds blank space before the next parameter.
"""
self.param_offset += space
def add_param(self, name, size, alignment):
"""
Adds a new parameter to the stack.
"""
self.param_offset = types.align_address(self.param_offset, alignment)
self.vars[name] = self.param_offset
self.param_offset += size
self.backend._write_comment('Binding param "{}" to offset {}', name, self.vars[name])
def add_local(self, name, size, alignment):
"""
Adds a local variable to the stack.
"""
self.local_offset = (
types.align_address(self.local_offset - size, alignment,
types.Alignment.Down))
self.vars[name] = self.local_offset
self.backend._write_comment('Binding local "{}" to offset {}', name, self.vars[name])
def get_temp_context(self, backend):
"""
Returns a context which can be used for putting temporary values on
the stack. When the context exits, the space used by the temporary
variables is cleaned up.
"""
root = self
class TemporaryContext:
def __init__(self, start_offset):
self.tmp_offset = start_offset
self.total_tmp_size = 0
def __enter__(self):
pass
def __exit__(self, *exc_info):
root._shrink_stack(self.total_tmp_size)
def add_temp(self, size, alignment):
"""
Makes space for a new temporary, returning the $fp offset at
which to write it.
"""
old_tmp_offset = self.tmp_offset
self.tmp_offset = (
types.align_address(self.tmp_offset - size, alignment,
types.Alignment.Down))
size_used = old_tmp_offset - self.tmp_offset
self.total_tmp_size += size_used
root._expand_stack(size_used)
return self.tmp_offset
def get_temp_context(self):
"""
Creates a temporary context, which starts at this temporary context.
"""
return TemporaryContext(self.tmp_offset)
return TemporaryContext(self.local_offset)
def expand_locals(self):
"""
Makes enough space for the local variables on the stack.
"""
self._expand_stack(self.locals_size())
def cleanup_locals(self):
"""
Cleans up the space used by the local variables on the stack.
"""
self._shrink_stack(self.locals_size())
def locals_size(self):
"""
Gets the size used by all the locals.
"""
return abs(self.local_offset) - abs(self._starting_locals_offset())
def __getitem__(self, name):
"""
Gets the offset to the variable on the stack, or a Register (if the
name was bound to one of the first four parameters)
"""
return self.vars[name]
class VerificationContext:
"""
Used to record all values and types defined all at once (i.e. inside the
same declaration block), so that they can be verified all at once.
"Verification" here means that their types are checked to be valid, which
means different things for different types.
"""
def __init__(self):
self.types = []
self.values = []
def add_value(self, name):
"""
Registers a new value to be verified.
"""
self.values.append(name)
def add_type(self, name):
"""
Registers a new type to be defined.
"""
self.types.append(types)
def verify(self, backend):
"""
Verifies all the definitions against the backend.
"""
backend._check_valid_types(backend.ctx_types[name] for name in self.types)
backend._check_valid_types(backend.ctx_values[name] for name in self.values)
class ContextMixin:
"""
Manages the symbol table contexts for this backend (as well as its function stack
Depends upon the user of this mixin to inherit from BaseBackend in
addition to this one.
"""
def __init__(self):
self.parent_contexts = []
self.current_context = NameContext(symbols.Context(), None)
self.verify_context = VerificationContext()
def _register_file_ns(self, namespace):
"""
Replaces the current context, with one where the symbol context is
expanded to contain the file's namespace.
"""
file_context = self.current_context.symbol_ctx.register(namespace)
self.current_context = self.current_context._replace(symbol_ctx=file_context)
@property
def ctx_namespace(self):
"""
Gets the current namespace
"""
return self.current_context.symbol_ctx.search_path[0]
@property
def ctx_values(self):
"""
Returns the current context's value symbols.
"""
return self.current_context.symbol_ctx.values
@property
def ctx_types(self):
"""
Returns the current context's type symbols.
"""
return self.current_context.symbol_ctx.types
@property
def ctx_stack(self):
"""
Returns the current context's stack information.
"""
return self.current_context.func_stack
def _value_is_defined(self, name):
"""
Returns True if the given variable is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_values and
self.ctx_values.is_visible(name))
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_types and
self.ctx_types.is_visible(name))
def _make_func_stack(self):
raise NotImplementedError
def _push_context(self):
"""
Pushes a new binding context.
"""
old_context = self.current_context
self.parent_contexts.append(old_context)
self.current_context = NameContext(
self.current_context.symbol_ctx.enter(),
self._make_func_stack())
def _pop_context(self):
"""
Loads the previous binding context.
"""
self.current_context = self.parent_contexts.pop()
def _resolve_if_type_name(self, name):
"""
Resolves a type name into a concrete type.
"""
try:
return types.resolve_name(name, self.ctx_types)
except PermissionError as exn:
self.error(self.line, self.col,
'Cannot resolve hidden type "{}"', str(exn))
except RecursionError:
self.error(self.line, self.col,
'Type aliases too deep, when resolving "{}"', name)
except KeyError as exn:
self.error(self.line, self.col,
'Invalid type "{}"', str(exn))
def _verify_types(self):
"""
Verifies all the types across all this current context's symbols.
"""
self.verify_context.verify(self)
self.verify_context = VerificationContext()
class ThirtyTwoMixin:
"""
Defines some information about type sizes and alignment which 32-bit
platforms have in common.
Depends upon the user of this mixin to inherit from ContextMixin.
"""
def _type_alignment(self, type_obj):
"""
Returns alignment of the given type (1 for byte, 4 for word, etc.)
"""
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
return self._type_alignment(type_obj.type)
elif isinstance(type_obj, types.Struct):
# The alignment only concerns the first element of the struct -
# the struct's internal alignment doesn't come into play
#
# Also, an OrderdDict's fields are not iterable, for whatever reason
struct_types = list(type_obj.fields.values())
return self._type_alignment(struct_types[0])
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
def _type_size(self, type_obj, depth=0):
"""
Returns the size of a type object in bytes.
"""
MAX_DEPTH = 100
if depth >= MAX_DEPTH:
self.error(self.line, self.col,
"Type nested too deeply - potential self-referential type")
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
# To avoid wasting space on the last element, this pads all the
# elements but the last
base_size = self._type_size(type_obj.type, depth + 1)
return self._array_offset(type_obj, type_obj.count - 1) + base_size
elif isinstance(type_obj, types.Struct):
last_field = list(type_obj.fields)[-1]
last_field_type = type_obj.fields[last_field]
last_field_offset = self._field_offset(type_obj, last_field)
return last_field_offset + self._type_size(last_field_type, depth + 1)
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
class comment_after:
"""
Wraps a method - after the method executes, something is written to
the log.
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __call__(self, func):
def wrapper(parent, *args, **kwargs):
x = func(parent, *args, **kwargs)
parent._write_comment(self.fmt, *self.args, **self.kwargs)
return x
return wrapper
| [
"logging.getLogger",
"collections.namedtuple"
]
| [((169, 207), 'logging.getLogger', 'logging.getLogger', (['"""spc.backend_utils"""'], {}), "('spc.backend_utils')\n", (186, 207), False, 'import logging\n'), ((345, 400), 'collections.namedtuple', 'namedtuple', (['"""NameContext"""', "['symbol_ctx', 'func_stack']"], {}), "('NameContext', ['symbol_ctx', 'func_stack'])\n", (355, 400), False, 'from collections import namedtuple\n'), ((569, 612), 'collections.namedtuple', 'namedtuple', (['"""WhileLabels"""', "['cond', 'exit']"], {}), "('WhileLabels', ['cond', 'exit'])\n", (579, 612), False, 'from collections import namedtuple\n'), ((824, 868), 'collections.namedtuple', 'namedtuple', (['"""IfLabels"""', "['else_body', 'end']"], {}), "('IfLabels', ['else_body', 'end'])\n", (834, 868), False, 'from collections import namedtuple\n')] |
import math
import torch
import unittest
import gpytorch
from torch import optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Simple training data: let's try to learn a sine function
train_x = Variable(torch.linspace(0, 1, 11))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
self.covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
class TestSimpleGPRegression(unittest.TestCase):
def test_posterior_latent_gp_and_likelihood_without_optimization(self):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
# Update bounds to accommodate extreme parameters
gp_model.covar_module.set_bounds(log_lengthscale=(-10, 10))
likelihood.set_bounds(log_noise=(-10, 10))
# Update parameters
gp_model.covar_module.initialize(log_lengthscale=-10)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=-10)
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(
torch.norm(function_predictions.mean().data - train_y.data),
1e-3,
)
self.assertLess(torch.norm(function_predictions.var().data), 1e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(Variable(torch.Tensor([1.1])))
self.assertLess(
torch.norm(test_function_predictions.mean().data - 0),
1e-4,
)
self.assertLess(torch.norm(test_function_predictions.var().data - 1), 1e-4)
def test_posterior_latent_gp_and_likelihood_with_optimization(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(
torch.abs(test_y - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self):
with gpytorch.fast_pred_var():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.log_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.log_noise.exp()
var_diff = (test_function_predictions.var() - noise).abs()
self.assertLess(torch.max(var_diff.data / noise.data), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)).cuda()
gp_model = ExactGPModel(
train_x.data.cuda(),
train_y.data.cuda(),
likelihood
).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x.cuda())
loss = -mll(output, train_y.cuda())
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x.cuda()))
mean_abs_error = torch.mean(
torch.abs(test_y.cuda() - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
if __name__ == '__main__':
unittest.main()
| [
"gpytorch.ExactMarginalLogLikelihood",
"gpytorch.mlls.ExactMarginalLogLikelihood",
"gpytorch.kernels.RBFKernel",
"torch.sin",
"torch.Tensor",
"torch.max",
"gpytorch.random_variables.GaussianRandomVariable",
"torch.cuda.is_available",
"gpytorch.means.ConstantMean",
"unittest.main",
"gpytorch.likelihoods.GaussianLikelihood",
"gpytorch.fast_pred_var",
"torch.linspace"
]
| [((389, 413), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (403, 413), False, 'import torch\n'), ((434, 473), 'torch.sin', 'torch.sin', (['(train_x.data * (2 * math.pi))'], {}), '(train_x.data * (2 * math.pi))\n', (443, 473), False, 'import torch\n'), ((494, 518), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(51)'], {}), '(0, 1, 51)\n', (508, 518), False, 'import torch\n'), ((538, 576), 'torch.sin', 'torch.sin', (['(test_x.data * (2 * math.pi))'], {}), '(test_x.data * (2 * math.pi))\n', (547, 576), False, 'import torch\n'), ((7290, 7305), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7303, 7305), False, 'import unittest\n'), ((801, 838), 'gpytorch.means.ConstantMean', 'ConstantMean', ([], {'constant_bounds': '(-1, 1)'}), '(constant_bounds=(-1, 1))\n', (813, 838), False, 'from gpytorch.means import ConstantMean\n'), ((867, 908), 'gpytorch.kernels.RBFKernel', 'RBFKernel', ([], {'log_lengthscale_bounds': '(-3, 3)'}), '(log_lengthscale_bounds=(-3, 3))\n', (876, 908), False, 'from gpytorch.kernels import RBFKernel\n'), ((1027, 1066), 'gpytorch.random_variables.GaussianRandomVariable', 'GaussianRandomVariable', (['mean_x', 'covar_x'], {}), '(mean_x, covar_x)\n', (1049, 1066), False, 'from gpytorch.random_variables import GaussianRandomVariable\n'), ((1290, 1334), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', ([], {'log_noise_bounds': '(-3, 3)'}), '(log_noise_bounds=(-3, 3))\n', (1308, 1334), False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((2760, 2804), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', ([], {'log_noise_bounds': '(-3, 3)'}), '(log_noise_bounds=(-3, 3))\n', (2778, 2804), False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((2891, 2948), 'gpytorch.ExactMarginalLogLikelihood', 'gpytorch.ExactMarginalLogLikelihood', (['likelihood', 'gp_model'], {}), '(likelihood, gp_model)\n', (2926, 2948), False, 'import gpytorch\n'), ((5791, 5816), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5814, 5816), False, 'import torch\n'), ((4000, 4024), 'gpytorch.fast_pred_var', 'gpytorch.fast_pred_var', ([], {}), '()\n', (4022, 4024), False, 'import gpytorch\n'), ((4158, 4202), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', ([], {'log_noise_bounds': '(-3, 3)'}), '(log_noise_bounds=(-3, 3))\n', (4176, 4202), False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((4297, 4359), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['likelihood', 'gp_model'], {}), '(likelihood, gp_model)\n', (4337, 4359), False, 'import gpytorch\n'), ((6179, 6241), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['likelihood', 'gp_model'], {}), '(likelihood, gp_model)\n', (6219, 6241), False, 'import gpytorch\n'), ((2349, 2368), 'torch.Tensor', 'torch.Tensor', (['[1.1]'], {}), '([1.1])\n', (2361, 2368), False, 'import torch\n'), ((5656, 5693), 'torch.max', 'torch.max', (['(var_diff.data / noise.data)'], {}), '(var_diff.data / noise.data)\n', (5665, 5693), False, 'import torch\n'), ((5950, 5994), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', ([], {'log_noise_bounds': '(-3, 3)'}), '(log_noise_bounds=(-3, 3))\n', (5968, 5994), False, 'from gpytorch.likelihoods import GaussianLikelihood\n')] |
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def customer_match_denied_parties_list():
clientReferenceInformationCode = "verification example"
clientReferenceInformationComments = "Export-basic"
clientReferenceInformationPartnerDeveloperId = "7891234"
clientReferenceInformationPartnerSolutionId = "89012345"
clientReferenceInformationPartner = Riskv1decisionsClientReferenceInformationPartner(
developer_id = clientReferenceInformationPartnerDeveloperId,
solution_id = clientReferenceInformationPartnerSolutionId
)
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments,
partner = clientReferenceInformationPartner.__dict__
)
orderInformationBillToAddress1 = "901 Metro Centre Blvd"
orderInformationBillToAdministrativeArea = "CA"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Foster City"
orderInformationBillToPostalCode = "94404"
orderInformationBillToCompanyName = "A & C International Trade, Inc"
orderInformationBillToCompany = Riskv1exportcomplianceinquiriesOrderInformationBillToCompany(
name = orderInformationBillToCompanyName
)
orderInformationBillToFirstName = "ANDREE"
orderInformationBillToLastName = "AGNESE"
orderInformationBillToEmail = "<EMAIL>"
orderInformationBillTo = Riskv1exportcomplianceinquiriesOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode,
company = orderInformationBillToCompany.__dict__,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
email = orderInformationBillToEmail
)
orderInformationShipToCountry = "IN"
orderInformationShipToFirstName = "DumbelDore"
orderInformationShipToLastName = "Albus"
orderInformationShipTo = Riskv1exportcomplianceinquiriesOrderInformationShipTo(
country = orderInformationShipToCountry,
first_name = orderInformationShipToFirstName,
last_name = orderInformationShipToLastName
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1exportcomplianceinquiriesOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "123456",
product_name = "Qwe",
product_code = "physical_software"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformation = Riskv1exportcomplianceinquiriesOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
requestObj = ValidateExportComplianceRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.validate_export_compliance(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->validate_export_compliance: %s\n" % e)
if __name__ == "__main__":
customer_match_denied_parties_list()
| [
"importlib.machinery.SourceFileLoader",
"json.dumps",
"os.getcwd"
]
| [((125, 136), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (134, 136), False, 'import os\n'), ((3659, 3681), 'json.dumps', 'json.dumps', (['requestObj'], {}), '(requestObj)\n', (3669, 3681), False, 'import json\n'), ((182, 226), 'importlib.machinery.SourceFileLoader', 'SourceFileLoader', (['"""module.name"""', 'config_file'], {}), "('module.name', config_file)\n", (198, 226), False, 'from importlib.machinery import SourceFileLoader\n')] |
"""
This is where the web application starts running
"""
from app.index import create_app
app = create_app()
if __name__ == "__main__":
app.secret_key = 'mysecret'
app.run(port=8080, host="0.0.0.0", debug=True) | [
"app.index.create_app"
]
| [((96, 108), 'app.index.create_app', 'create_app', ([], {}), '()\n', (106, 108), False, 'from app.index import create_app\n')] |
# This script contains the get_joke() function to generate a new dad joke
import requests
def get_joke():
"""Return new joke string from icanhazdadjoke.com."""
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={'Accept': 'application/json'})
raw_joke = response.json()
joke = raw_joke['joke']
return joke
| [
"requests.get"
]
| [((222, 279), 'requests.get', 'requests.get', (['url'], {'headers': "{'Accept': 'application/json'}"}), "(url, headers={'Accept': 'application/json'})\n", (234, 279), False, 'import requests\n')] |
import sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from ironpython_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
pass
else:
try:
from win32_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
raise
def send_data(lists):
SetClipboardText(make_tab(lists))
def set_clipboard_text(toclipboard):
SetClipboardText(str(toclipboard))
def make_tab(lists):
if hasattr(lists, "tolist"):
lists = lists.tolist()
ut = []
for rad in lists:
if type(rad) in [list, tuple]:
ut.append("\t".join(["%s" % x for x in rad]))
else:
ut.append("%s" % rad)
return "\n".join(ut)
def make_list_of_list(txt):
def make_num(x):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
try:
return complex(x)
except ValueError:
return x
return x
ut = []
flag = False
for rad in [x for x in txt.split("\r\n") if x != ""]:
raden = [make_num(x) for x in rad.split("\t")]
if str in map(type, raden):
flag = True
ut.append(raden)
return ut, flag
def get_clipboard_text_and_convert(paste_list=False):
"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and "\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = "array(%s)" % repr(array)
txt = "".join([c for c in txt if c not in " \t\r\n"])
return txt
| [
"win32_clipboard.GetClipboardText"
]
| [((1639, 1657), 'win32_clipboard.GetClipboardText', 'GetClipboardText', ([], {}), '()\n', (1655, 1657), False, 'from win32_clipboard import GetClipboardText, SetClipboardText\n')] |
import logging
logging.disable(logging.CRITICAL)
import math
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import torch
import pickle
import imageio
import time as timer
import os
import copy
import matplotlib.pyplot as plt
try:
import exptools
from colorsys import hsv_to_rgb
import pyvista as pv
except ImportError:
exptools = None
def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir):
"""Loads the latest policy.
Returns the next step number to begin with.
"""
assert os.path.isdir(policy_dir), str(policy_dir)
assert os.path.isdir(logs_dir), str(logs_dir)
log_csv_path = os.path.join(logs_dir, 'log.csv')
if not os.path.exists(log_csv_path):
return 0 # fresh start
print("Reading: {}".format(log_csv_path))
agent.logger.read_log(log_csv_path)
last_step = agent.logger.max_len - 1
if last_step <= 0:
return 0 # fresh start
# find latest policy/baseline
i = last_step
while i >= 0:
policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i))
baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i))
if not os.path.isfile(policy_path):
i = i -1
continue
else:
print("Loaded last saved iteration: {}".format(i))
with open(policy_path, 'rb') as fp:
agent.policy = pickle.load(fp)
with open(baseline_path, 'rb') as fp:
agent.baseline = pickle.load(fp)
# additional
# global_status_path = os.path.join(policy_dir, 'global_status.pickle')
# with open(global_status_path, 'rb') as fp:
# agent.load_global_status( pickle.load(fp) )
agent.logger.shrink_to(i + 1)
assert agent.logger.max_len == i + 1
return agent.logger.max_len
# cannot find any saved policy
raise RuntimeError("Log file exists, but cannot find any saved policy.")
def save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, iternum, is_best_policy):
uniform_gt_data = np.load("/home/jianrenw/prox/tslam/assets/uniform_gt/uniform_{}_o3d.npz".format(obj_name))['pcd']
data_scale = uniform_gt_data * obj_scale
data_rotate = data_scale.copy()
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
x_theta = obj_orientation[0]
data_rotate[:, 0] = x
data_rotate[:, 1] = y*math.cos(x_theta) - z*math.sin(x_theta)
data_rotate[:, 2] = y*math.sin(x_theta) + z*math.cos(x_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
y_theta = obj_orientation[1]
data_rotate[:, 0] = x * math.cos(y_theta) + z * math.sin(y_theta)
data_rotate[:, 1] = y
data_rotate[:, 2] = z * math.cos(y_theta) - x * math.sin(y_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
z_theta = obj_orientation[2]
data_rotate[:, 0] = x * math.cos(z_theta) - y * math.sin(z_theta)
data_rotate[:, 1] = x * math.sin(z_theta) + y * math.cos(z_theta)
data_rotate[:, 2] = z
data_trans = data_rotate.copy()
data_trans[:, 0] += obj_relative_position[0]
data_trans[:, 1] += obj_relative_position[1]
data_trans[:, 2] += obj_relative_position[2]
uniform_gt_data = data_trans.copy()
data = pc_frame
resolution = 0.01
sep_x = math.ceil(0.3 / resolution)
sep_y = math.ceil(0.3 / resolution)
sep_z = math.ceil(0.3 / resolution)
x, y, z = np.indices((sep_x, sep_y, sep_z))
cube1 = (x<0) & (y <1) & (z<1)
gtcube = (x<0) & (y <1) & (z<1)
voxels = cube1
gt_voxels = gtcube
# draw gt
gt_map_list = []
for idx,val in enumerate(uniform_gt_data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in gt_map_list:
gt_map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
gt_voxels += cube
# draw cuboids in the top left and bottom right corners, and a link between them
map_list = []
for idx,val in enumerate(data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in map_list and name in gt_map_list:
map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
voxels += cube
# gt_obj4:668
occupancy = len(map_list) / len(gt_map_list)
# print(len(map_list) / sep_x / sep_y / sep_z )
is_best_reconstruct = True
files = os.listdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/'.format(obj_name, reset_mode_conf, reward_conf))
for file in files:
if "overlap" in file and "png" in file:
file_str = str(file)
previous_occup = file_str[(file_str.index("-")+1):file_str.index(".png")]
if occupancy < float(previous_occup):
is_best_reconstruct = False
# obj_name = "obj{}".format(obj_name)
# set the colors of each object
vis_voxel = gt_voxels | voxels
colors = np.empty(vis_voxel.shape, dtype=object)
colors[gt_voxels] = 'white'
colors[voxels] = 'cyan'
# and plot everything
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_overlap-{}.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
plt.savefig('voxel/iter-{}-{}-overlap-{}.png'.format(iternum, obj_name, occupancy))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/gt.png'.format(obj_name, reset_mode_conf, reward_conf))
plt.savefig('voxel/iter-{}-{}-gt.png'.format(iternum, obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_exp.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct))
plt.savefig('voxel/iter-{}-{}-exp.png'.format(iternum, obj_name))
plt.close()
return is_best_reconstruct, occupancy
def train_agent(job_name, agent,
seed = 0,
niter = 101,
gamma = 0.995,
gae_lambda = None,
num_cpu = 16,
sample_mode = 'trajectories',
horizon= int(150),
num_traj = 50,
num_samples = 50000, # has precedence, used with sample_mode = 'samples'
save_freq = 10,
evaluation_rollouts = None,
plot_keys = ['stoc_pol_mean'],
env_kwargs= dict(),
visualize_kwargs= dict(),
sample_paths_kwargs= dict(),
):
print("num_cpu{}".format(num_cpu))
np.random.seed(seed)
if os.path.isdir(job_name) == False:
os.mkdir(job_name)
previous_dir = os.getcwd()
obj_name = env_kwargs["obj_name"]
reset_mode_conf = env_kwargs["reset_mode"]
reward_conf = "cf{}knn{}voxel{}".format(env_kwargs["chamfer_r_factor"], env_kwargs["knn_r_factor"], env_kwargs["new_voxel_r_factor"])
os.chdir(job_name) # important! we are now in the directory to save data
if os.path.isdir('iterations') == False: os.mkdir('iterations')
if os.path.isdir('2dpointcloud') == False: os.mkdir('2dpointcloud')
if os.path.isdir('pointcloudnpz') == False: os.mkdir('pointcloudnpz')
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('voxel') == False: os.mkdir('voxel')
if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')
best_policy = copy.deepcopy(agent.policy)
best_perf = -1e8
train_curve = best_perf*np.ones(niter)
mean_pol_perf = 0.0
e = GymEnv(agent.env.env_id, env_kwargs)
# Load from any existing checkpoint, policy, statistics, etc.
# Why no checkpointing.. :(
i_start = _load_latest_policy_and_logs(agent,
policy_dir='iterations',
logs_dir='logs')
if i_start:
print("Resuming from an existing job folder ...")
for i in range(i_start, niter):
print("......................................................................................")
print("ITERATION : %i " % i)
is_best_policy = False
if train_curve[i-1] > best_perf:
if exptools: exptools.logging.logger.log_text("update best_policy")
best_policy = copy.deepcopy(agent.policy)
best_perf = train_curve[i-1]
is_best_policy = True
N = num_traj if sample_mode == 'trajectories' else num_samples
stats = agent.train_step(
N=N,
sample_mode=sample_mode,
horizon= horizon,
gamma=gamma,
gae_lambda=gae_lambda,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
sample_paths_kwargs= sample_paths_kwargs,
)
train_curve[i] = stats[0]
if evaluation_rollouts is not None and evaluation_rollouts > 0:
print("Performing evaluation rollouts ........")
eval_paths = sample_paths(
num_traj=evaluation_rollouts,
env=e.env_id,
policy=agent.policy,
eval_mode=True,
base_seed=seed,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
**sample_paths_kwargs)
mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])
if agent.save_logs:
agent.logger.log_kv('eval_score', mean_pol_perf)
if exptools: exptools.logging.logger.log_scalar('eval_score', mean_pol_perf, i)
if exptools:
env_infos = [path["env_infos"] for path in eval_paths] # a list of dict
rewards = dict()
total_points = list()
if env_infos:
# get decomposed reward statistics
keys = [k for k in env_infos[0].keys() if "_p" in k[-2:] or "_r" in k[-2:] or "occupancy" in k]
for k in keys:
rewards[k] = list()
for env_info in env_infos:
rewards[k].append(env_info[k])
for env_info in env_infos:
total_points.append(len(env_info["pointcloud"]))
for k, v in rewards.items():
exptools.logging.logger.log_scalar_batch(k, v, i)
exptools.logging.logger.log_scalar_batch("total_num_points", total_points, i)
print(">>> finish evaluation rollouts")
if (i % save_freq == 0 and i > 0):
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
obj_orientation = env_kwargs["obj_orientation"]
obj_relative_position = env_kwargs["obj_relative_position"]
obj_scale = env_kwargs["obj_scale"]
policy_file = 'policy_%i.pickle' % i
baseline_file = 'baseline_%i.pickle' % i
pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))
pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
pickle.dump(agent.global_status, open('iterations/global_status.pickle', 'wb'))
# save videos and pointcloud and reconstruted mesh
if exptools:
video, env_infos = e.visualize_policy_offscreen(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
video_explore, env_infos_explore = e.visualize_policy_explore(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
pc_frame = np.array(env_infos[-1]["pointcloud"] if len(env_infos[-1]["pointcloud"]) > 0 else np.empty((0, 3)))
# 3d voxel visualization
is_best_reconstruct, occupancy = save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, i, is_best_policy)
if is_best_policy or is_best_reconstruct:
pickle.dump(best_policy, open('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}/bp{}_br{}_best_policy.pickle'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct), 'wb'))
if is_best_policy or is_best_reconstruct:
np.savez_compressed("pointcloudnpz/alpha_pointcloud_"+str(i)+".npz",pcd=pc_frame)
np.savez_compressed("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_pointcloud_overlap-{}.npz".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy), pcd=pc_frame)
# else:
# np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame)
# pc_frames.append(pc_frame)
ax = plt.axes()
ax.scatter(pc_frame[:, 0], pc_frame[:, 1], cmap='viridis', linewidth=0.5)
if is_best_policy or is_best_reconstruct:
plt.savefig("2dpointcloud/alpha_{}.png".format('2dpointcloud' + str(i)))
plt.savefig("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_2dpointcloud_overlap-{}.png".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
# else:
# plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i)))
plt.close()
# =======================================================
# if obj_name in ["airplane", "apple", "glass", "cup"]:
exptools.logging.logger.record_image("rendered", video[-1], i)
exptools.logging.logger.record_gif("rendered", video, i)
# exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i)
# exptools.logging.logger.record_gif("rendered_explore", video_explore, i)
# print results to console
if i == 0:
result_file = open('results.txt', 'w')
print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n")
result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n")
result_file.close()
result_file = open('results.txt', 'a')
result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf))
result_file.close()
if agent.save_logs:
print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,
agent.logger.get_current_log().items()))
print(tabulate(print_data))
if exptools:
exptools.logging.logger.log_scalar("Iter", i, i)
exptools.logging.logger.log_scalar("SamplingPol", train_curve[i], i)
exptools.logging.logger.log_scalar("EvaluationPol", mean_pol_perf, i)
exptools.logging.logger.log_scalar("BestSampled", best_perf, i)
exptools.logging.logger.dump_data()
# final save
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
os.chdir(previous_dir)
| [
"math.floor",
"math.cos",
"copy.deepcopy",
"os.path.exists",
"exptools.logging.logger.dump_data",
"numpy.asarray",
"matplotlib.pyplot.close",
"mjrl.utils.gym_env.GymEnv",
"os.path.isdir",
"numpy.empty",
"numpy.random.seed",
"os.mkdir",
"logging.disable",
"tabulate.tabulate",
"exptools.logging.logger.log_text",
"exptools.logging.logger.log_scalar",
"numpy.ones",
"pickle.load",
"mjrl.samplers.core.sample_paths",
"numpy.indices",
"os.path.isfile",
"matplotlib.pyplot.axes",
"exptools.logging.logger.record_gif",
"exptools.logging.logger.log_scalar_batch",
"math.ceil",
"os.path.join",
"os.getcwd",
"os.chdir",
"mjrl.utils.make_train_plots.make_train_plots",
"matplotlib.pyplot.figure",
"numpy.sum",
"math.sin",
"exptools.logging.logger.record_image"
]
| [((15, 48), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (30, 48), False, 'import logging\n'), ((659, 684), 'os.path.isdir', 'os.path.isdir', (['policy_dir'], {}), '(policy_dir)\n', (672, 684), False, 'import os\n'), ((713, 736), 'os.path.isdir', 'os.path.isdir', (['logs_dir'], {}), '(logs_dir)\n', (726, 736), False, 'import os\n'), ((772, 805), 'os.path.join', 'os.path.join', (['logs_dir', '"""log.csv"""'], {}), "(logs_dir, 'log.csv')\n", (784, 805), False, 'import os\n'), ((3609, 3636), 'math.ceil', 'math.ceil', (['(0.3 / resolution)'], {}), '(0.3 / resolution)\n', (3618, 3636), False, 'import math\n'), ((3649, 3676), 'math.ceil', 'math.ceil', (['(0.3 / resolution)'], {}), '(0.3 / resolution)\n', (3658, 3676), False, 'import math\n'), ((3689, 3716), 'math.ceil', 'math.ceil', (['(0.3 / resolution)'], {}), '(0.3 / resolution)\n', (3698, 3716), False, 'import math\n'), ((3731, 3764), 'numpy.indices', 'np.indices', (['(sep_x, sep_y, sep_z)'], {}), '((sep_x, sep_y, sep_z))\n', (3741, 3764), True, 'import numpy as np\n'), ((5898, 5937), 'numpy.empty', 'np.empty', (['vis_voxel.shape'], {'dtype': 'object'}), '(vis_voxel.shape, dtype=object)\n', (5906, 5937), True, 'import numpy as np\n'), ((6579, 6590), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6588, 6590), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7017), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7015, 7017), True, 'import matplotlib.pyplot as plt\n'), ((7479, 7490), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7488, 7490), True, 'import matplotlib.pyplot as plt\n'), ((8228, 8248), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8242, 8248), True, 'import numpy as np\n'), ((8336, 8347), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8345, 8347), False, 'import os\n'), ((8575, 8593), 'os.chdir', 'os.chdir', (['job_name'], {}), '(job_name)\n', (8583, 8593), False, 'import os\n'), ((9540, 9567), 'copy.deepcopy', 'copy.deepcopy', (['agent.policy'], {}), '(agent.policy)\n', (9553, 9567), False, 'import copy\n'), ((9664, 9700), 'mjrl.utils.gym_env.GymEnv', 'GymEnv', (['agent.env.env_id', 'env_kwargs'], {}), '(agent.env.env_id, env_kwargs)\n', (9670, 9700), False, 'from mjrl.utils.gym_env import GymEnv\n'), ((17611, 17633), 'os.chdir', 'os.chdir', (['previous_dir'], {}), '(previous_dir)\n', (17619, 17633), False, 'import os\n'), ((817, 845), 'os.path.exists', 'os.path.exists', (['log_csv_path'], {}), '(log_csv_path)\n', (831, 845), False, 'import os\n'), ((3978, 4018), 'math.floor', 'math.floor', (['((val[0] + 0.15) / resolution)'], {}), '((val[0] + 0.15) / resolution)\n', (3988, 4018), False, 'import math\n'), ((4035, 4075), 'math.floor', 'math.floor', (['((val[1] + 0.15) / resolution)'], {}), '((val[1] + 0.15) / resolution)\n', (4045, 4075), False, 'import math\n'), ((4092, 4123), 'math.floor', 'math.floor', (['(val[2] / resolution)'], {}), '(val[2] / resolution)\n', (4102, 4123), False, 'import math\n'), ((4662, 4702), 'math.floor', 'math.floor', (['((val[0] + 0.15) / resolution)'], {}), '((val[0] + 0.15) / resolution)\n', (4672, 4702), False, 'import math\n'), ((4719, 4759), 'math.floor', 'math.floor', (['((val[1] + 0.15) / resolution)'], {}), '((val[1] + 0.15) / resolution)\n', (4729, 4759), False, 'import math\n'), ((4776, 4807), 'math.floor', 'math.floor', (['(val[2] / resolution)'], {}), '(val[2] / resolution)\n', (4786, 4807), False, 'import math\n'), ((8256, 8279), 'os.path.isdir', 'os.path.isdir', (['job_name'], {}), '(job_name)\n', (8269, 8279), False, 'import os\n'), ((8298, 8316), 'os.mkdir', 'os.mkdir', (['job_name'], {}), '(job_name)\n', (8306, 8316), False, 'import os\n'), ((8655, 8682), 'os.path.isdir', 'os.path.isdir', (['"""iterations"""'], {}), "('iterations')\n", (8668, 8682), False, 'import os\n'), ((8693, 8715), 'os.mkdir', 'os.mkdir', (['"""iterations"""'], {}), "('iterations')\n", (8701, 8715), False, 'import os\n'), ((8723, 8752), 'os.path.isdir', 'os.path.isdir', (['"""2dpointcloud"""'], {}), "('2dpointcloud')\n", (8736, 8752), False, 'import os\n'), ((8763, 8787), 'os.mkdir', 'os.mkdir', (['"""2dpointcloud"""'], {}), "('2dpointcloud')\n", (8771, 8787), False, 'import os\n'), ((8795, 8825), 'os.path.isdir', 'os.path.isdir', (['"""pointcloudnpz"""'], {}), "('pointcloudnpz')\n", (8808, 8825), False, 'import os\n'), ((8836, 8861), 'os.mkdir', 'os.mkdir', (['"""pointcloudnpz"""'], {}), "('pointcloudnpz')\n", (8844, 8861), False, 'import os\n'), ((9387, 9409), 'os.path.isdir', 'os.path.isdir', (['"""voxel"""'], {}), "('voxel')\n", (9400, 9409), False, 'import os\n'), ((9420, 9437), 'os.mkdir', 'os.mkdir', (['"""voxel"""'], {}), "('voxel')\n", (9428, 9437), False, 'import os\n'), ((9505, 9521), 'os.mkdir', 'os.mkdir', (['"""logs"""'], {}), "('logs')\n", (9513, 9521), False, 'import os\n'), ((9617, 9631), 'numpy.ones', 'np.ones', (['niter'], {}), '(niter)\n', (9624, 9631), True, 'import numpy as np\n'), ((17534, 17606), 'mjrl.utils.make_train_plots.make_train_plots', 'make_train_plots', ([], {'log': 'agent.logger.log', 'keys': 'plot_keys', 'save_loc': '"""logs/"""'}), "(log=agent.logger.log, keys=plot_keys, save_loc='logs/')\n", (17550, 17606), False, 'from mjrl.utils.make_train_plots import make_train_plots\n'), ((1310, 1337), 'os.path.isfile', 'os.path.isfile', (['policy_path'], {}), '(policy_path)\n', (1324, 1337), False, 'import os\n'), ((1530, 1545), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1541, 1545), False, 'import pickle\n'), ((1621, 1636), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1632, 1636), False, 'import pickle\n'), ((2626, 2643), 'math.cos', 'math.cos', (['x_theta'], {}), '(x_theta)\n', (2634, 2643), False, 'import math\n'), ((2648, 2665), 'math.sin', 'math.sin', (['x_theta'], {}), '(x_theta)\n', (2656, 2665), False, 'import math\n'), ((2692, 2709), 'math.sin', 'math.sin', (['x_theta'], {}), '(x_theta)\n', (2700, 2709), False, 'import math\n'), ((2714, 2731), 'math.cos', 'math.cos', (['x_theta'], {}), '(x_theta)\n', (2722, 2731), False, 'import math\n'), ((2893, 2910), 'math.cos', 'math.cos', (['y_theta'], {}), '(y_theta)\n', (2901, 2910), False, 'import math\n'), ((2917, 2934), 'math.sin', 'math.sin', (['y_theta'], {}), '(y_theta)\n', (2925, 2934), False, 'import math\n'), ((2989, 3006), 'math.cos', 'math.cos', (['y_theta'], {}), '(y_theta)\n', (2997, 3006), False, 'import math\n'), ((3013, 3030), 'math.sin', 'math.sin', (['y_theta'], {}), '(y_theta)\n', (3021, 3030), False, 'import math\n'), ((3192, 3209), 'math.cos', 'math.cos', (['z_theta'], {}), '(z_theta)\n', (3200, 3209), False, 'import math\n'), ((3216, 3233), 'math.sin', 'math.sin', (['z_theta'], {}), '(z_theta)\n', (3224, 3233), False, 'import math\n'), ((3262, 3279), 'math.sin', 'math.sin', (['z_theta'], {}), '(z_theta)\n', (3270, 3279), False, 'import math\n'), ((3286, 3303), 'math.cos', 'math.cos', (['z_theta'], {}), '(z_theta)\n', (3294, 3303), False, 'import math\n'), ((6033, 6045), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6043, 6045), True, 'import matplotlib.pyplot as plt\n'), ((6601, 6613), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6611, 6613), True, 'import matplotlib.pyplot as plt\n'), ((7028, 7040), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7038, 7040), True, 'import matplotlib.pyplot as plt\n'), ((9445, 9466), 'os.path.isdir', 'os.path.isdir', (['"""logs"""'], {}), "('logs')\n", (9458, 9466), False, 'import os\n'), ((10408, 10435), 'copy.deepcopy', 'copy.deepcopy', (['agent.policy'], {}), '(agent.policy)\n', (10421, 10435), False, 'import copy\n'), ((11085, 11267), 'mjrl.samplers.core.sample_paths', 'sample_paths', ([], {'num_traj': 'evaluation_rollouts', 'env': 'e.env_id', 'policy': 'agent.policy', 'eval_mode': '(True)', 'base_seed': 'seed', 'num_cpu': 'num_cpu', 'env_kwargs': 'env_kwargs'}), '(num_traj=evaluation_rollouts, env=e.env_id, policy=agent.\n policy, eval_mode=True, base_seed=seed, num_cpu=num_cpu, env_kwargs=\n env_kwargs, **sample_paths_kwargs)\n', (11097, 11267), False, 'from mjrl.samplers.core import sample_paths\n'), ((17035, 17083), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', (['"""Iter"""', 'i', 'i'], {}), "('Iter', i, i)\n", (17069, 17083), False, 'import exptools\n'), ((17096, 17164), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', (['"""SamplingPol"""', 'train_curve[i]', 'i'], {}), "('SamplingPol', train_curve[i], i)\n", (17130, 17164), False, 'import exptools\n'), ((17177, 17246), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', (['"""EvaluationPol"""', 'mean_pol_perf', 'i'], {}), "('EvaluationPol', mean_pol_perf, i)\n", (17211, 17246), False, 'import exptools\n'), ((17259, 17322), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', (['"""BestSampled"""', 'best_perf', 'i'], {}), "('BestSampled', best_perf, i)\n", (17293, 17322), False, 'import exptools\n'), ((17335, 17370), 'exptools.logging.logger.dump_data', 'exptools.logging.logger.dump_data', ([], {}), '()\n', (17368, 17370), False, 'import exptools\n'), ((10327, 10381), 'exptools.logging.logger.log_text', 'exptools.logging.logger.log_text', (['"""update best_policy"""'], {}), "('update best_policy')\n", (10359, 10381), False, 'import exptools\n'), ((12492, 12569), 'exptools.logging.logger.log_scalar_batch', 'exptools.logging.logger.log_scalar_batch', (['"""total_num_points"""', 'total_points', 'i'], {}), "('total_num_points', total_points, i)\n", (12532, 12569), False, 'import exptools\n'), ((12761, 12833), 'mjrl.utils.make_train_plots.make_train_plots', 'make_train_plots', ([], {'log': 'agent.logger.log', 'keys': 'plot_keys', 'save_loc': '"""logs/"""'}), "(log=agent.logger.log, keys=plot_keys, save_loc='logs/')\n", (12777, 12833), False, 'from mjrl.utils.make_train_plots import make_train_plots\n'), ((15197, 15207), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (15205, 15207), True, 'import matplotlib.pyplot as plt\n'), ((15809, 15820), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15818, 15820), True, 'import matplotlib.pyplot as plt\n'), ((15983, 16045), 'exptools.logging.logger.record_image', 'exptools.logging.logger.record_image', (['"""rendered"""', 'video[-1]', 'i'], {}), "('rendered', video[-1], i)\n", (16019, 16045), False, 'import exptools\n'), ((16062, 16118), 'exptools.logging.logger.record_gif', 'exptools.logging.logger.record_gif', (['"""rendered"""', 'video', 'i'], {}), "('rendered', video, i)\n", (16096, 16118), False, 'import exptools\n'), ((16980, 17000), 'tabulate.tabulate', 'tabulate', (['print_data'], {}), '(print_data)\n', (16988, 17000), False, 'from tabulate import tabulate\n'), ((11425, 11448), 'numpy.sum', 'np.sum', (["path['rewards']"], {}), "(path['rewards'])\n", (11431, 11448), True, 'import numpy as np\n'), ((11600, 11666), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', (['"""eval_score"""', 'mean_pol_perf', 'i'], {}), "('eval_score', mean_pol_perf, i)\n", (11634, 11666), False, 'import exptools\n'), ((12426, 12475), 'exptools.logging.logger.log_scalar_batch', 'exptools.logging.logger.log_scalar_batch', (['k', 'v', 'i'], {}), '(k, v, i)\n', (12466, 12475), False, 'import exptools\n'), ((14037, 14053), 'numpy.empty', 'np.empty', (['(0, 3)'], {}), '((0, 3))\n', (14045, 14053), True, 'import numpy as np\n'), ((16854, 16870), 'numpy.asarray', 'np.asarray', (['v[1]'], {}), '(v[1])\n', (16864, 16870), True, 'import numpy as np\n')] |
from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
def _get_cov_cases(jac_case, hess_case, design_info):
if jac_case == "skip" and hess_case == "skip":
raise ValueError("Jacobian and Hessian cannot both be False.")
elif jac_case == "skip" and hess_case != "skip":
cases = ["hessian"]
elif hess_case == "skip" and jac_case != "skip":
cases = ["jacobian"]
else:
cases = ["jacobian", "hessian", "robust"]
if design_info is not None:
if "psu" in design_info:
cases.append("cluster_robust")
if {"strata", "psu", "fpc"}.issubset(design_info):
cases.append("strata_robust")
return cases
| [
"estimagic.inference.shared.get_derivative_case",
"estimagic.parameters.process_constraints.process_constraints",
"estimagic.inference.ml_covs.cov_jacobian",
"estimagic.inference.shared.get_internal_first_derivative",
"estimagic.shared.check_option_dicts.check_numdiff_options",
"estimagic.inference.shared.check_is_optimized_and_derivative_case",
"estimagic.inference.ml_covs.cov_hessian",
"estimagic.inference.ml_covs.cov_cluster_robust",
"estimagic.optimization.optimize.maximize",
"estimagic.shared.check_option_dicts.check_optimization_options",
"estimagic.inference.ml_covs.cov_strata_robust",
"estimagic.inference.shared.transform_covariance",
"estimagic.inference.ml_covs.cov_robust",
"estimagic.parameters.parameter_conversion.get_derivative_conversion_function",
"estimagic.inference.shared.calculate_inference_quantities"
]
| [((9264, 9359), 'estimagic.shared.check_option_dicts.check_optimization_options', 'check_optimization_options', (['optimize_options'], {'usage': '"""estimate_ml"""', 'algorithm_mandatory': '(True)'}), "(optimize_options, usage='estimate_ml',\n algorithm_mandatory=True)\n", (9290, 9359), False, 'from estimagic.shared.check_option_dicts import check_optimization_options\n'), ((9403, 9432), 'estimagic.inference.shared.get_derivative_case', 'get_derivative_case', (['jacobian'], {}), '(jacobian)\n', (9422, 9432), False, 'from estimagic.inference.shared import get_derivative_case\n'), ((9449, 9477), 'estimagic.inference.shared.get_derivative_case', 'get_derivative_case', (['hessian'], {}), '(hessian)\n', (9468, 9477), False, 'from estimagic.inference.shared import get_derivative_case\n'), ((9483, 9545), 'estimagic.inference.shared.check_is_optimized_and_derivative_case', 'check_is_optimized_and_derivative_case', (['is_optimized', 'jac_case'], {}), '(is_optimized, jac_case)\n', (9521, 9545), False, 'from estimagic.inference.shared import check_is_optimized_and_derivative_case\n'), ((9550, 9613), 'estimagic.inference.shared.check_is_optimized_and_derivative_case', 'check_is_optimized_and_derivative_case', (['is_optimized', 'hess_case'], {}), '(is_optimized, hess_case)\n', (9588, 9613), False, 'from estimagic.inference.shared import check_is_optimized_and_derivative_case\n'), ((9685, 9738), 'estimagic.shared.check_option_dicts.check_numdiff_options', 'check_numdiff_options', (['numdiff_options', '"""estimate_ml"""'], {}), "(numdiff_options, 'estimate_ml')\n", (9706, 9738), False, 'from estimagic.shared.check_option_dicts import check_numdiff_options\n'), ((9915, 9955), 'estimagic.parameters.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (9934, 9955), False, 'from estimagic.parameters.process_constraints import process_constraints\n'), ((11033, 11107), 'estimagic.parameters.parameter_conversion.get_derivative_conversion_function', 'get_derivative_conversion_function', ([], {'params': 'params', 'constraints': 'constraints'}), '(params=params, constraints=constraints)\n', (11067, 11107), False, 'from estimagic.parameters.parameter_conversion import get_derivative_conversion_function\n'), ((10270, 10622), 'estimagic.optimization.optimize.maximize', 'maximize', ([], {'criterion': 'loglike', 'criterion_kwargs': 'loglike_kwargs', 'params': 'params', 'constraints': 'constraints', 'derivative': 'derivative', 'derivative_kwargs': 'derivative_kwargs', 'criterion_and_derivative': 'loglike_and_derivative', 'criterion_and_derivative_kwargs': 'loglike_and_derivative_kwargs', 'logging': 'logging', 'log_options': 'log_options'}), '(criterion=loglike, criterion_kwargs=loglike_kwargs, params=params,\n constraints=constraints, derivative=derivative, derivative_kwargs=\n derivative_kwargs, criterion_and_derivative=loglike_and_derivative,\n criterion_and_derivative_kwargs=loglike_and_derivative_kwargs, logging=\n logging, log_options=log_options, **optimize_options)\n', (10278, 10622), False, 'from estimagic.optimization.optimize import maximize\n'), ((13074, 13095), 'estimagic.inference.ml_covs.cov_jacobian', 'cov_jacobian', (['int_jac'], {}), '(int_jac)\n', (13086, 13095), False, 'from estimagic.inference.ml_covs import cov_jacobian\n'), ((13161, 13182), 'estimagic.inference.ml_covs.cov_hessian', 'cov_hessian', (['int_hess'], {}), '(int_hess)\n', (13172, 13182), False, 'from estimagic.inference.ml_covs import cov_hessian\n'), ((13246, 13284), 'estimagic.inference.ml_covs.cov_robust', 'cov_robust', ([], {'jac': 'int_jac', 'hess': 'int_hess'}), '(jac=int_jac, hess=int_hess)\n', (13256, 13284), False, 'from estimagic.inference.ml_covs import cov_robust\n'), ((13364, 13435), 'estimagic.inference.ml_covs.cov_cluster_robust', 'cov_cluster_robust', ([], {'jac': 'int_jac', 'hess': 'int_hess', 'design_info': 'design_info'}), '(jac=int_jac, hess=int_hess, design_info=design_info)\n', (13382, 13435), False, 'from estimagic.inference.ml_covs import cov_cluster_robust\n'), ((13535, 13605), 'estimagic.inference.ml_covs.cov_strata_robust', 'cov_strata_robust', ([], {'jac': 'int_jac', 'hess': 'int_hess', 'design_info': 'design_info'}), '(jac=int_jac, hess=int_hess, design_info=design_info)\n', (13552, 13605), False, 'from estimagic.inference.ml_covs import cov_strata_robust\n'), ((13940, 14104), 'estimagic.inference.shared.transform_covariance', 'transform_covariance', ([], {'params': 'estimates', 'internal_cov': "int_covs[f'cov_{case}']", 'constraints': 'constraints', 'n_samples': 'n_samples', 'bounds_handling': 'bounds_handling'}), "(params=estimates, internal_cov=int_covs[f'cov_{case}'],\n constraints=constraints, n_samples=n_samples, bounds_handling=\n bounds_handling)\n", (13960, 14104), False, 'from estimagic.inference.shared import transform_covariance\n'), ((14185, 14271), 'estimagic.inference.shared.calculate_inference_quantities', 'calculate_inference_quantities', ([], {'params': 'estimates', 'free_cov': 'cov', 'ci_level': 'ci_level'}), '(params=estimates, free_cov=cov, ci_level=\n ci_level)\n', (14215, 14271), False, 'from estimagic.inference.shared import calculate_inference_quantities\n'), ((11638, 11782), 'estimagic.inference.shared.get_internal_first_derivative', 'get_internal_first_derivative', ([], {'func': 'loglike', 'params': 'estimates', 'constraints': 'constraints', 'func_kwargs': 'loglike_kwargs', 'numdiff_options': 'options'}), '(func=loglike, params=estimates, constraints=\n constraints, func_kwargs=loglike_kwargs, numdiff_options=options)\n', (11667, 11782), False, 'from estimagic.inference.shared import get_internal_first_derivative\n')] |
import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
class Conv(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.conv(x)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class Conv_ReLU_BN(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv_ReLU_BN, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
bn=L.BatchNormalization(out_channel),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.bn(F.relu(self.conv(x)), test=not train)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class AppendixA(nutszebra_chainer.Model):
def __init__(self, category_num):
super(AppendixA, self).__init__()
out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)]
modules = []
in_channel = 3
for i in six.moves.range(len(out_channels)):
modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))]
in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i]
modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))]
# register layers
[self.add_link(*link) for link in modules]
self.modules = modules
self.category_num = category_num
self.out_channels = out_channels
self.skip_connections = skip_connections
self.filters = filters
self.name = 'appndix_a_{}'.format(category_num)
def weight_initialization(self):
[link.weight_initialization() for _, link in self.modules]
def count_parameters(self):
return int(np.sum([link.count_parameters() for _, link in self.modules]))
@staticmethod
def _zero_pads(x, pad, axis):
if type(x.data) is not np.ndarray:
pad.to_gpu()
return F.concat((x, pad), axis=axis)
@staticmethod
def zero_pads(x, sizes):
batch, channel, height, width = x.data.shape
diff_height = sizes[2] - height
diff_width = sizes[3] - width
# pad along with height
if diff_height >= 1:
pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=2)
_, _, height, _ = x.data.shape
# pad along with width
if diff_width >= 1:
pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=3)
return x
def _max(a, b):
return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3]))
@staticmethod
def concatenate(X):
sizes = (0, 0, 0, 0)
for x in X:
sizes = AppendixA._max(sizes, x.data.shape)
X = [AppendixA.zero_pads(x, sizes) for x in X]
return F.concat(X, axis=1)
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
| [
"chainer.functions.softmax_cross_entropy",
"functools.reduce",
"chainer.functions.concat",
"numpy.argmax",
"chainer.functions.average_pooling_2d",
"numpy.zeros",
"collections.defaultdict",
"chainer.links.Convolution2D",
"chainer.links.BatchNormalization"
]
| [((746, 806), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a * b)', 'self.conv.W.data.shape'], {}), '(lambda a, b: a * b, self.conv.W.data.shape)\n', (762, 806), False, 'import functools\n'), ((1473, 1533), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a * b)', 'self.conv.W.data.shape'], {}), '(lambda a, b: a * b, self.conv.W.data.shape)\n', (1489, 1533), False, 'import functools\n'), ((4203, 4232), 'chainer.functions.concat', 'F.concat', (['(x, pad)'], {'axis': 'axis'}), '((x, pad), axis=axis)\n', (4211, 4232), True, 'import chainer.functions as F\n'), ((5259, 5278), 'chainer.functions.concat', 'F.concat', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (5267, 5278), True, 'import chainer.functions as F\n'), ((5914, 5943), 'chainer.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['y', 't'], {}), '(y, t)\n', (5937, 5943), True, 'import chainer.functions as F\n'), ((6136, 6152), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6147, 6152), False, 'from collections import defaultdict\n'), ((6319, 6335), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6330, 6335), False, 'from collections import defaultdict\n'), ((6354, 6379), 'numpy.argmax', 'np.argmax', (['y.data'], {'axis': '(1)'}), '(y.data, axis=1)\n', (6363, 6379), True, 'import numpy as np\n'), ((5724, 5764), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['x', '(height, width)'], {}), '(x, (height, width))\n', (5744, 5764), True, 'import chainer.functions as F\n'), ((368, 434), 'chainer.links.Convolution2D', 'L.Convolution2D', (['in_channel', 'out_channel', 'filter_size', 'stride', 'pad'], {}), '(in_channel, out_channel, filter_size, stride, pad)\n', (383, 434), True, 'import chainer.links as L\n'), ((1012, 1078), 'chainer.links.Convolution2D', 'L.Convolution2D', (['in_channel', 'out_channel', 'filter_size', 'stride', 'pad'], {}), '(in_channel, out_channel, filter_size, stride, pad)\n', (1027, 1078), True, 'import chainer.links as L\n'), ((1095, 1128), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['out_channel'], {}), '(out_channel)\n', (1115, 1128), True, 'import chainer.links as L\n'), ((4508, 4569), 'numpy.zeros', 'np.zeros', (['(batch, channel, diff_height, width)'], {'dtype': 'x.dtype'}), '((batch, channel, diff_height, width), dtype=x.dtype)\n', (4516, 4569), True, 'import numpy as np\n'), ((4782, 4843), 'numpy.zeros', 'np.zeros', (['(batch, channel, height, diff_width)'], {'dtype': 'x.dtype'}), '((batch, channel, height, diff_width), dtype=x.dtype)\n', (4790, 4843), True, 'import numpy as np\n'), ((6078, 6103), 'numpy.argmax', 'np.argmax', (['y.data'], {'axis': '(1)'}), '(y.data, axis=1)\n', (6087, 6103), True, 'import numpy as np\n'), ((6254, 6279), 'numpy.argmax', 'np.argmax', (['y.data'], {'axis': '(1)'}), '(y.data, axis=1)\n', (6263, 6279), True, 'import numpy as np\n')] |
import numpy as np
class LinearRegression:
def __init__(self, num_features):
self.num_features = num_features
self.W = np.zeros((self.num_features, 1))
def train(self, x, y, epochs, batch_size, lr, optim):
final_loss = None # loss of final epoch
# Training should be done for 'epochs' times with minibatch size of 'batch_size'
# The function 'train' should return the loss of final epoch
# Loss of an epoch is calculated as an average of minibatch losses
# ========================= EDIT HERE ========================
# xline 과 n번째 y가 매칭됨. f(xline)=yi
final_loss=0
num_data=len(y)
k=0
def dlossF(k, j):
s=0
size = batch_size
for Xi, Yi in zip(x[k:k+batch_size], y[k:k+batch_size]):
fx=np.transpose(Xi).dot(self.W)
s = s + (fx-Yi)*Xi[j]
if (num_data - k) < batch_size:
size = num_data - k
return s/size
for iterative in range(0, epochs):
k = k + batch_size
if k == num_data:
k = batch_size
grad = np.zeros((self.num_features, 1))
for j in range(0, self.num_features):
grad[j] = dlossF(k, j)
self.W = optim.update(self.W, grad, lr)
# ============================================================
return final_loss
def eval(self, x):
pred = None
# Evaluation Function
# Given the input 'x', the function should return prediction for 'x'
# ========================= EDIT HERE ========================
ylist=[]
for xline in x:
y = np.transpose(xline).dot(self.W)
ylist.append(y[0])
pred = np.array(ylist)
# ============================================================
return pred
| [
"numpy.array",
"numpy.zeros",
"numpy.transpose"
]
| [((145, 177), 'numpy.zeros', 'np.zeros', (['(self.num_features, 1)'], {}), '((self.num_features, 1))\n', (153, 177), True, 'import numpy as np\n'), ((1880, 1895), 'numpy.array', 'np.array', (['ylist'], {}), '(ylist)\n', (1888, 1895), True, 'import numpy as np\n'), ((1227, 1259), 'numpy.zeros', 'np.zeros', (['(self.num_features, 1)'], {}), '((self.num_features, 1))\n', (1235, 1259), True, 'import numpy as np\n'), ((1800, 1819), 'numpy.transpose', 'np.transpose', (['xline'], {}), '(xline)\n', (1812, 1819), True, 'import numpy as np\n'), ((881, 897), 'numpy.transpose', 'np.transpose', (['Xi'], {}), '(Xi)\n', (893, 897), True, 'import numpy as np\n')] |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from collections import OrderedDict
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn,
tables.DataTableView):
table_class = volumes_tables.VolumesTable
page_title = _("Volumes")
FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'},
'encrypted': {_('yes'): True, _('no'): False}}
def get_data(self):
default_filters = {'all_tenants': True}
filters = self.get_filters(default_filters.copy())
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
volumes = []
self.table.needs_filter_first = False
if filter_first.get('admin.volumes', False) and \
len(filters) == len(default_filters):
self.table.needs_filter_first = True
return volumes
if 'project' in filters:
# Keystone returns a tuple ([],false) where the first element is
# tenant list that's why the 0 is hardcoded below
tenants = keystone.tenant_list(self.request)[0]
tenant_ids = [t.id for t in tenants
if t.name == filters['project']]
if not tenant_ids:
return []
del filters['project']
for id in tenant_ids:
filters['project_id'] = id
volumes += self._get_volumes(search_opts=filters)
else:
volumes = self._get_volumes(search_opts=filters)
attached_instance_ids = self._get_attached_instance_ids(volumes)
instances = self._get_instances(search_opts={'all_tenants': True},
instance_ids=attached_instance_ids)
volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots(
search_opts={'all_tenants': True})
self._set_volume_attributes(
volumes, instances, volume_ids_with_snapshots)
# Gather our tenants to correlate against IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_filters(self, filters):
self.table = self._tables['volumes']
self.handle_server_filter(self.request, table=self.table)
self.update_server_filter_action(self.request, table=self.table)
filters = super(VolumesView, self).get_filters(filters,
self.FILTERS_MAPPING)
return filters
class DetailView(volumes_views.DetailView):
tab_group_class = volumes_tabs.VolumeDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = volumes_tables.VolumesTable(self.request)
context["actions"] = table.render_row_actions(context["volume"])
return context
def get_search_opts(self, volume):
search_opts = super(DetailView, self).get_search_opts(volume)
search_opts['all_tenants'] = True
return search_opts
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class ManageVolumeView(forms.ModalFormView):
form_class = volumes_forms.ManageVolume
template_name = 'admin/volumes/manage_volume.html'
form_id = "manage_volume_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = reverse_lazy('horizon:admin:volumes:manage')
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume")
def get_context_data(self, **kwargs):
context = super(ManageVolumeView, self).get_context_data(**kwargs)
return context
class UnmanageVolumeView(forms.ModalFormView):
form_class = volumes_forms.UnmanageVolume
template_name = 'admin/volumes/unmanage_volume.html'
form_id = "unmanage_volume_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:unmanage'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Unmanage Volume")
def get_context_data(self, **kwargs):
context = super(UnmanageVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'host': getattr(volume, "os-vol-host-attr:host")}
class MigrateVolumeView(forms.ModalFormView):
form_class = volumes_forms.MigrateVolume
template_name = 'admin/volumes/migrate_volume.html'
form_id = "migrate_volume_modal"
submit_label = _("Migrate")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:migrate'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Migrate Volume")
def get_context_data(self, **kwargs):
context = super(MigrateVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
@memoized.memoized_method
def get_hosts(self):
try:
return cinder.pool_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve pools information.'),
redirect=self.success_url)
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'current_host': getattr(volume, "os-vol-host-attr:host"),
'hosts': self.get_hosts()}
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
modal_id = "update_volume_status_modal"
template_name = 'admin/volumes/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:update_status"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Update Volume Status")
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| [
"collections.OrderedDict",
"django.utils.translation.ugettext_lazy",
"horizon.exceptions.handle",
"openstack_dashboard.api.keystone.tenant_list",
"django.urls.reverse_lazy",
"openstack_dashboard.dashboards.admin.volumes.tables.VolumesTable",
"openstack_dashboard.api.cinder.volume_get",
"django.urls.reverse",
"openstack_dashboard.api.cinder.pool_list"
]
| [((1591, 1603), 'django.utils.translation.ugettext_lazy', '_', (['"""Volumes"""'], {}), "('Volumes')\n", (1592, 1603), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5132, 5143), 'django.utils.translation.ugettext_lazy', '_', (['"""Manage"""'], {}), "('Manage')\n", (5133, 5143), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5162, 5205), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (5174, 5205), False, 'from django.urls import reverse_lazy\n'), ((5223, 5267), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:manage"""'], {}), "('horizon:admin:volumes:manage')\n", (5235, 5267), False, 'from django.urls import reverse_lazy\n'), ((5285, 5328), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (5297, 5328), False, 'from django.urls import reverse_lazy\n'), ((5346, 5364), 'django.utils.translation.ugettext_lazy', '_', (['"""Manage Volume"""'], {}), "('Manage Volume')\n", (5347, 5364), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5715, 5728), 'django.utils.translation.ugettext_lazy', '_', (['"""Unmanage"""'], {}), "('Unmanage')\n", (5716, 5728), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5747, 5790), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (5759, 5790), False, 'from django.urls import reverse_lazy\n'), ((5858, 5901), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (5870, 5901), False, 'from django.urls import reverse_lazy\n'), ((5919, 5939), 'django.utils.translation.ugettext_lazy', '_', (['"""Unmanage Volume"""'], {}), "('Unmanage Volume')\n", (5920, 5939), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7019, 7031), 'django.utils.translation.ugettext_lazy', '_', (['"""Migrate"""'], {}), "('Migrate')\n", (7020, 7031), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7050, 7093), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (7062, 7093), False, 'from django.urls import reverse_lazy\n'), ((7160, 7203), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (7172, 7203), False, 'from django.urls import reverse_lazy\n'), ((7221, 7240), 'django.utils.translation.ugettext_lazy', '_', (['"""Migrate Volume"""'], {}), "('Migrate Volume')\n", (7222, 7240), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8694, 8712), 'django.utils.translation.ugettext_lazy', '_', (['"""Update Status"""'], {}), "('Update Status')\n", (8695, 8712), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8786, 8829), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (8798, 8829), False, 'from django.urls import reverse_lazy\n'), ((8847, 8872), 'django.utils.translation.ugettext_lazy', '_', (['"""Update Volume Status"""'], {}), "('Update Volume Status')\n", (8848, 8872), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3624, 3665), 'collections.OrderedDict', 'OrderedDict', (['[(t.id, t) for t in tenants]'], {}), '([(t.id, t) for t in tenants])\n', (3635, 3665), False, 'from collections import OrderedDict\n'), ((4527, 4568), 'openstack_dashboard.dashboards.admin.volumes.tables.VolumesTable', 'volumes_tables.VolumesTable', (['self.request'], {}), '(self.request)\n', (4554, 4568), True, 'from openstack_dashboard.dashboards.admin.volumes import tables as volumes_tables\n'), ((4892, 4930), 'django.urls.reverse', 'reverse', (['"""horizon:admin:volumes:index"""'], {}), "('horizon:admin:volumes:index')\n", (4899, 4930), False, 'from django.urls import reverse\n'), ((6135, 6170), 'django.urls.reverse', 'reverse', (['self.submit_url'], {'args': 'args'}), '(self.submit_url, args=args)\n', (6142, 6170), False, 'from django.urls import reverse\n'), ((7435, 7470), 'django.urls.reverse', 'reverse', (['self.submit_url'], {'args': 'args'}), '(self.submit_url, args=args)\n', (7442, 7470), False, 'from django.urls import reverse\n'), ((9122, 9157), 'django.urls.reverse', 'reverse', (['self.submit_url'], {'args': 'args'}), '(self.submit_url, args=args)\n', (9129, 9157), False, 'from django.urls import reverse\n'), ((1641, 1649), 'django.utils.translation.ugettext_lazy', '_', (['"""yes"""'], {}), "('yes')\n", (1642, 1649), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1659, 1666), 'django.utils.translation.ugettext_lazy', '_', (['"""no"""'], {}), "('no')\n", (1660, 1666), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1715, 1723), 'django.utils.translation.ugettext_lazy', '_', (['"""yes"""'], {}), "('yes')\n", (1716, 1723), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1731, 1738), 'django.utils.translation.ugettext_lazy', '_', (['"""no"""'], {}), "('no')\n", (1732, 1738), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3396, 3430), 'openstack_dashboard.api.keystone.tenant_list', 'keystone.tenant_list', (['self.request'], {}), '(self.request)\n', (3416, 3430), False, 'from openstack_dashboard.api import keystone\n'), ((6332, 6374), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', (['self.request', 'volume_id'], {}), '(self.request, volume_id)\n', (6349, 6374), False, 'from openstack_dashboard.api import cinder\n'), ((7632, 7674), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', (['self.request', 'volume_id'], {}), '(self.request, volume_id)\n', (7649, 7674), False, 'from openstack_dashboard.api import cinder\n'), ((7983, 8013), 'openstack_dashboard.api.cinder.pool_list', 'cinder.pool_list', (['self.request'], {}), '(self.request)\n', (7999, 8013), False, 'from openstack_dashboard.api import cinder\n'), ((9319, 9361), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', (['self.request', 'volume_id'], {}), '(self.request, volume_id)\n', (9336, 9361), False, 'from openstack_dashboard.api import cinder\n'), ((2399, 2433), 'openstack_dashboard.api.keystone.tenant_list', 'keystone.tenant_list', (['self.request'], {}), '(self.request)\n', (2419, 2433), False, 'from openstack_dashboard.api import keystone\n'), ((3500, 3551), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to retrieve volume project information."""'], {}), "('Unable to retrieve volume project information.')\n", (3501, 3551), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3564, 3600), 'horizon.exceptions.handle', 'exceptions.handle', (['self.request', 'msg'], {}), '(self.request, msg)\n', (3581, 3600), False, 'from horizon import exceptions\n'), ((6475, 6514), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to retrieve volume details."""'], {}), "('Unable to retrieve volume details.')\n", (6476, 6514), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7775, 7814), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to retrieve volume details."""'], {}), "('Unable to retrieve volume details.')\n", (7776, 7814), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8114, 8156), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to retrieve pools information."""'], {}), "('Unable to retrieve pools information.')\n", (8115, 8156), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9462, 9501), 'django.utils.translation.ugettext_lazy', '_', (['"""Unable to retrieve volume details."""'], {}), "('Unable to retrieve volume details.')\n", (9463, 9501), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import re
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import lark_module
class ActionHelloWorld(Action):
state_map = {}
def name(self) -> Text:
return "action_hello_world"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
sender_id = state["sender_id"]
if sender_id not in self.state_map:
self.state_map[sender_id] = 0
self.state_map[sender_id] += 1
dispatcher.utter_message(
text="Hello World!",
json_message={"data": "hogeohge"},
# template="<div></div>",
buttons=[{"title": "OK", "payload": "99!"}])
print("state: {}".format(self.state_map[sender_id]))
return []
class ActionCustomButton(Action):
def name(self) -> Text:
return "action_custom_button"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
buttons=[{"title": "OK", "payload": "1"},
{"title": "NG", "payload": "2"},
{"title": "Unknown", "payload": "9"}])
return []
class ActionJsonMessage(Action):
def name(self) -> Text:
return "action_json_message"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
json_message={"data": {
"key1": "value1",
"key2": "value2",
}}
)
return []
class ActionConversation(Action):
def name(self) -> Text:
return "action_conversation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
input_text = state['latest_message'].get('text')
latest_bot = None
for event in reversed(state['events']):
if event['event'] == 'bot':
data = event.get('data', {}).get('custom', {}).get('data', [])
latest_bot = data[0] if len(data) > 0 else None
break
print("latest_bot: {}".format(latest_bot))
if not latest_bot:
print("use utter_conversation_1")
dispatcher.utter_message(template="utter_conversation_1", json_message={"data": {"key1": "value1",
"key2": "value2"}})
else:
if latest_bot == 'conversation_1':
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_2':
result = re.match("\\d+", input_text)
if result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_3':
result = re.match("\\d+", input_text)
if not result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
return []
class ActionConversation2(Action):
action_state = {}
def name(self) -> Text:
return "action_conversation2"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
sender_id = state.get("sender_id")
current_action = self.action_state.get(sender_id)
input_text = state['latest_message'].get('text')
print("state: {}, current_action: {}".format(state, current_action))
if current_action:
result = lark_module.execute(input_text)
if result:
dispatcher.utter_message(text=result, json_message={"data": ["step2"]},
elements=[{"data": ["step2"]}])
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["step3"]})
else:
dispatcher.utter_message(text="Where are you from ?", json_message={"data": ["step3"]})
self.action_state[sender_id] = "get_start"
return []
| [
"lark_module.execute",
"re.match"
]
| [((4873, 4904), 'lark_module.execute', 'lark_module.execute', (['input_text'], {}), '(input_text)\n', (4892, 4904), False, 'import lark_module\n'), ((3285, 3313), 're.match', 're.match', (['"""\\\\d+"""', 'input_text'], {}), "('\\\\d+', input_text)\n", (3293, 3313), False, 'import re\n'), ((3787, 3815), 're.match', 're.match', (['"""\\\\d+"""', 'input_text'], {}), "('\\\\d+', input_text)\n", (3795, 3815), False, 'import re\n')] |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
class ExecutorSimulationTest(
unittest.TestCase,
ExecutorTestCommon.ExecutorTestCommon,
DictTestCases.DictTestCases,
ListTestCases.ListTestCases,
TupleTestCases.TupleTestCases):
@classmethod
def setUpClass(cls):
cls.config = Setup.config()
cls.executor = None
cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator()
cls.simulation.startService()
cls.simulation.getDesirePublisher().desireNumberOfWorkers(1)
@classmethod
def tearDownClass(cls):
cls.simulation.stopService()
@classmethod
def create_executor(cls, allowCached=True):
if not allowCached:
return pyfora.connect('http://localhost:30000')
if cls.executor is None:
cls.executor = pyfora.connect('http://localhost:30000')
cls.executor.stayOpenOnExit = True
return cls.executor
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
| [
"ufora.config.Setup.config",
"ufora.test.ClusterSimulation.Simulator.createGlobalSimulator",
"pyfora.connect",
"ufora.config.Mainline.UnitTestMainline"
]
| [((2001, 2028), 'ufora.config.Mainline.UnitTestMainline', 'Mainline.UnitTestMainline', ([], {}), '()\n', (2026, 2028), True, 'import ufora.config.Mainline as Mainline\n'), ((1282, 1296), 'ufora.config.Setup.config', 'Setup.config', ([], {}), '()\n', (1294, 1296), True, 'import ufora.config.Setup as Setup\n'), ((1350, 1401), 'ufora.test.ClusterSimulation.Simulator.createGlobalSimulator', 'ClusterSimulation.Simulator.createGlobalSimulator', ([], {}), '()\n', (1399, 1401), True, 'import ufora.test.ClusterSimulation as ClusterSimulation\n'), ((1705, 1745), 'pyfora.connect', 'pyfora.connect', (['"""http://localhost:30000"""'], {}), "('http://localhost:30000')\n", (1719, 1745), False, 'import pyfora\n'), ((1807, 1847), 'pyfora.connect', 'pyfora.connect', (['"""http://localhost:30000"""'], {}), "('http://localhost:30000')\n", (1821, 1847), False, 'import pyfora\n')] |
import os
import gc
import random
import numpy as np
import torch
def seed_everything(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def memory_cleanup():
"""
Cleans up GPU memory
https://github.com/huggingface/transformers/issues/1742
"""
for obj in gc.get_objects():
if torch.is_tensor(obj):
del obj
gc.collect()
torch.cuda.empty_cache()
| [
"torch.manual_seed",
"random.seed",
"torch.is_tensor",
"numpy.random.seed",
"gc.collect",
"torch.cuda.manual_seed",
"gc.get_objects",
"torch.cuda.empty_cache"
]
| [((143, 160), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (154, 160), False, 'import random\n'), ((165, 185), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (179, 185), True, 'import numpy as np\n'), ((190, 213), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (207, 213), False, 'import torch\n'), ((218, 246), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (240, 246), False, 'import torch\n'), ((479, 495), 'gc.get_objects', 'gc.get_objects', ([], {}), '()\n', (493, 495), False, 'import gc\n'), ((555, 567), 'gc.collect', 'gc.collect', ([], {}), '()\n', (565, 567), False, 'import gc\n'), ((572, 596), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (594, 596), False, 'import torch\n'), ((508, 528), 'torch.is_tensor', 'torch.is_tensor', (['obj'], {}), '(obj)\n', (523, 528), False, 'import torch\n')] |
from conans import ConanFile, CMake, tools
import os
import shutil
required_conan_version = ">=1.43.0"
class FreeImageConan(ConanFile):
name = "freeimage"
description = "Open Source library project for developers who would like to support popular graphics image formats"\
"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications."
homepage = "https://freeimage.sourceforge.io"
url = "https://github.com/conan-io/conan-center-index"
license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
topics = ("freeimage", "image", "decoding", "graphics")
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
"with_png": [True, False],
"with_tiff": [True, False],
"with_jpeg2000": [True, False],
"with_openexr": [True, False],
"with_eigen": [True, False],
"with_webp": [True, False],
"with_raw": [True, False],
"with_jxr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_jpeg": "libjpeg",
"with_png": True,
"with_tiff": True,
"with_jpeg2000": True,
"with_openexr": True,
"with_eigen": True,
"with_webp": True,
"with_raw": True,
"with_jxr": True,
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
tools.check_min_cppstd(self, "11")
if self.options.shared:
del self.options.fPIC
self.output.warn("G3 plugin and JPEGTransform are disabled.")
if self.options.with_jpeg is not None:
if self.options.with_tiff:
self.options["libtiff"].jpeg = self.options.with_jpeg
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.with_jpeg == "libjpeg":
self.requires("libjpeg/9d")
elif self.options.with_jpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.1.2")
if self.options.with_jpeg2000:
self.requires("openjpeg/2.4.0")
if self.options.with_png:
self.requires("libpng/1.6.37")
if self.options.with_webp:
self.requires("libwebp/1.2.2")
if self.options.with_openexr:
self.requires("openexr/2.5.7")
if self.options.with_raw:
self.requires("libraw/0.20.2")
if self.options.with_jxr:
self.requires("jxrlib/cci.20170615")
if self.options.with_tiff:
self.requires("libtiff/4.3.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False
self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000
self._cmake.definitions["WITH_PNG"] = self.options.with_png
self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
self._cmake.definitions["WITH_RAW"] = self.options.with_raw
self._cmake.definitions["WITH_JXR"] = self.options.with_jxr
self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def build(self):
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR"))
for patch in self.conan_data.get("patches", {}).get(self.version, {}):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder)
def package_info(self):
def imageformats_deps():
components = []
components.append("zlib::zlib")
if self.options.with_jpeg:
components.append("{0}::{0}".format(self.options.with_jpeg))
if self.options.with_jpeg2000:
components.append("openjpeg::openjpeg")
if self.options.with_png:
components.append("libpng::libpng")
if self.options.with_webp:
components.append("libwebp::libwebp")
if self.options.with_openexr:
components.append("openexr::openexr")
if self.options.with_raw:
components.append("libraw::libraw")
if self.options.with_jxr:
components.append("jxrlib::jxrlib")
if self.options.with_tiff:
components.append("libtiff::libtiff")
return components
self.cpp_info.names["pkg_config"] = "freeimage"
self.cpp_info.names["cmake_find_package"] = "FreeImage"
self.cpp_info.names["cmake_find_package_multi"] = "FreeImage"
self.cpp_info.components["FreeImage"].libs = ["freeimage"]
self.cpp_info.components["FreeImage"].requires = imageformats_deps()
self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"]
self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"]
if not self.options.shared:
self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
| [
"conans.CMake",
"os.path.join",
"conans.tools.patch",
"conans.tools.check_min_cppstd",
"conans.tools.get"
]
| [((2000, 2034), 'conans.tools.check_min_cppstd', 'tools.check_min_cppstd', (['self', '"""11"""'], {}), "(self, '11')\n", (2022, 2034), False, 'from conans import ConanFile, CMake, tools\n'), ((3174, 3285), 'conans.tools.get', 'tools.get', ([], {'destination': 'self._source_subfolder', 'strip_root': '(True)'}), "(**self.conan_data['sources'][self.version], destination=self.\n _source_subfolder, strip_root=True)\n", (3183, 3285), False, 'from conans import ConanFile, CMake, tools\n'), ((3409, 3420), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (3414, 3420), False, 'from conans import ConanFile, CMake, tools\n'), ((4130, 4186), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibPNG"""'], {}), "(self._source_subfolder, 'Source', 'LibPNG')\n", (4142, 4186), False, 'import os\n'), ((4208, 4266), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibTIFF4"""'], {}), "(self._source_subfolder, 'Source', 'LibTIFF4')\n", (4220, 4266), False, 'import os\n'), ((4288, 4349), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibOpenJPEG"""'], {}), "(self._source_subfolder, 'Source', 'LibOpenJPEG')\n", (4300, 4349), False, 'import os\n'), ((4371, 4427), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibJXR"""'], {}), "(self._source_subfolder, 'Source', 'LibJXR')\n", (4383, 4427), False, 'import os\n'), ((4449, 4506), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibWebP"""'], {}), "(self._source_subfolder, 'Source', 'LibWebP')\n", (4461, 4506), False, 'import os\n'), ((4528, 4588), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""LibRawLite"""'], {}), "(self._source_subfolder, 'Source', 'LibRawLite')\n", (4540, 4588), False, 'import os\n'), ((4610, 4667), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""Source"""', '"""OpenEXR"""'], {}), "(self._source_subfolder, 'Source', 'OpenEXR')\n", (4622, 4667), False, 'import os\n'), ((4761, 4781), 'conans.tools.patch', 'tools.patch', ([], {}), '(**patch)\n', (4772, 4781), False, 'from conans import ConanFile, CMake, tools\n')] |
#!/usr/bin/env python
"""
test_history.py
"""
# Copyright (c) 2011 <NAME>, Real Programmers. All rights reserved.
import unittest
from OR_Client_Library.openrefine_client.google.refine.history import *
class HistoryTest(unittest.TestCase):
def test_init(self):
response = {
u"code": "ok",
u"historyEntry": {
u"id": 1303851435223,
u"description": "Split 4 cells",
u"time": "2011-04-26T16:45:08Z"
}
}
he = response['historyEntry']
entry = HistoryEntry(he['id'], he['time'], he['description'])
self.assertEqual(entry.id, 1303851435223)
self.assertEqual(entry.description, 'Split 4 cells')
self.assertEqual(entry.time, '2011-04-26T16:45:08Z')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
]
| [((821, 836), 'unittest.main', 'unittest.main', ([], {}), '()\n', (834, 836), False, 'import unittest\n')] |
"""
Provide tests for command line interface's get batch command.
"""
import json
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = '<KEY>' \
'<KEY>'
def test_get_batch():
"""
Case: get a batch by identifier.
Expect: batch is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert isinstance(json.loads(result.output), dict)
def test_get_batch_with_invalid_id():
"""
Case: get a batch by its invalid identifier.
Expect: the following identifier is invalid error message.
"""
invalid_batch_id = 'abcefg'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
invalid_batch_id,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
expected_error_message = {
'errors': {
'id': [
f'The following identifier `{invalid_batch_id}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
def test_get_batch_without_node_url(mocker):
"""
Case: get a batch by its identifier without passing node URL.
Expect: batch is returned from a node on localhost.
"""
batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \
'085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751'
expected_result = {
"data": {
"header": {
"signer_public_key": "<KEY>",
"transaction_ids": [
"5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
],
},
"header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e"
"232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977",
"transactions": [
{
"header": {
"batcher_public_key": "<KEY>",
"family_name": "sawtooth_settings",
"family_version": "1.0",
"inputs": [
"<KEY>",
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7",
],
"outputs": [
"<KEY>",
],
"signer_public_key": "<KEY>",
},
"header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
"payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm"
"VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==",
},
],
},
}
mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete')
mock_get_batch_by_id.return_value = expected_result
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
batch_id,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result.get('data') == json.loads(result.output).get('result')
def test_get_batch_with_invalid_node_url():
"""
Case: get a batch by its identifier by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'my-node-url.com'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
invalid_node_url,
])
expected_error_message = {
'errors': f'Please check if your node running at http://{invalid_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_batch_node_url_with_protocol(node_url_with_protocol):
"""
Case: get a batch by its identifier by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
| [
"pytest.mark.parametrize",
"json.loads",
"click.testing.CliRunner",
"cli.utils.dict_to_pretty_json"
]
| [((4824, 4930), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""node_url_with_protocol"""', "['http://masternode.com', 'https://masternode.com']"], {}), "('node_url_with_protocol', ['http://masternode.com',\n 'https://masternode.com'])\n", (4847, 4930), False, 'import pytest\n'), ((583, 594), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (592, 594), False, 'from click.testing import CliRunner\n'), ((1138, 1149), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1147, 1149), False, 'from click.testing import CliRunner\n'), ((3843, 3854), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3852, 3854), False, 'from click.testing import CliRunner\n'), ((4356, 4367), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4365, 4367), False, 'from click.testing import CliRunner\n'), ((5180, 5191), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5189, 5191), False, 'from click.testing import CliRunner\n'), ((891, 916), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (901, 916), False, 'import json\n'), ((1590, 1633), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', (['expected_error_message'], {}), '(expected_error_message)\n', (1609, 1633), False, 'from cli.utils import dict_to_pretty_json\n'), ((4760, 4803), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', (['expected_error_message'], {}), '(expected_error_message)\n', (4779, 4803), False, 'from cli.utils import dict_to_pretty_json\n'), ((5676, 5711), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', (['expected_error'], {}), '(expected_error)\n', (5695, 5711), False, 'from cli.utils import dict_to_pretty_json\n'), ((4066, 4091), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (4076, 4091), False, 'import json\n')] |
import sys
from gssl.datasets import load_dataset
from gssl.inductive.datasets import load_ppi
from gssl.utils import seed
def main():
seed()
# Read dataset name
dataset_name = sys.argv[1]
# Load dataset
if dataset_name == "PPI":
load_ppi()
else:
load_dataset(name=dataset_name)
if __name__ == "__main__":
main()
| [
"gssl.inductive.datasets.load_ppi",
"gssl.utils.seed",
"gssl.datasets.load_dataset"
]
| [((142, 148), 'gssl.utils.seed', 'seed', ([], {}), '()\n', (146, 148), False, 'from gssl.utils import seed\n'), ((263, 273), 'gssl.inductive.datasets.load_ppi', 'load_ppi', ([], {}), '()\n', (271, 273), False, 'from gssl.inductive.datasets import load_ppi\n'), ((292, 323), 'gssl.datasets.load_dataset', 'load_dataset', ([], {'name': 'dataset_name'}), '(name=dataset_name)\n', (304, 323), False, 'from gssl.datasets import load_dataset\n')] |
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo, Length
class LoginForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
password = PasswordField('password', validators=[InputRequired()])
remember_me = BooleanField('remember_me', default=False)
class RegisterForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
email = StringField('email', validators=[InputRequired(), Email()])
password = PasswordField('password', validators=[InputRequired(),
EqualTo('confirm', message='Password must match')])
confirm = PasswordField('<PASSWORD>')
class NewPostForm(Form):
body = StringField('body', validators=[InputRequired(), Length(max=140)]) | [
"wtforms.validators.Email",
"wtforms.BooleanField",
"wtforms.PasswordField",
"wtforms.validators.EqualTo",
"wtforms.validators.Length",
"wtforms.validators.InputRequired"
]
| [((343, 385), 'wtforms.BooleanField', 'BooleanField', (['"""remember_me"""'], {'default': '(False)'}), "('remember_me', default=False)\n", (355, 385), False, 'from wtforms import StringField, BooleanField, PasswordField\n'), ((744, 771), 'wtforms.PasswordField', 'PasswordField', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (757, 771), False, 'from wtforms import StringField, BooleanField, PasswordField\n'), ((236, 251), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (249, 251), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((307, 322), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (320, 322), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((464, 479), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (477, 479), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((527, 542), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (540, 542), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((544, 551), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (549, 551), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((607, 622), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (620, 622), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((678, 727), 'wtforms.validators.EqualTo', 'EqualTo', (['"""confirm"""'], {'message': '"""Password must match"""'}), "('confirm', message='Password must match')\n", (685, 727), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((841, 856), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (854, 856), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((858, 873), 'wtforms.validators.Length', 'Length', ([], {'max': '(140)'}), '(max=140)\n', (864, 873), False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n')] |
from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
| [
"ast.parse",
"random.randint"
]
| [((140, 153), 'random.randint', 'randint', (['(1)', '(9)'], {}), '(1, 9)\n', (147, 153), False, 'from random import randint\n'), ((189, 202), 'random.randint', 'randint', (['(0)', '(3)'], {}), '(0, 3)\n', (196, 202), False, 'from random import randint\n'), ((234, 251), 'random.randint', 'randint', (['(0)', 'depth'], {}), '(0, depth)\n', (241, 251), False, 'from random import randint\n'), ((2316, 2344), 'ast.parse', 'ast.parse', (['expr'], {'mode': '"""eval"""'}), "(expr, mode='eval')\n", (2325, 2344), False, 'import ast\n')] |
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
def predict(train_data, id_map):
num_classes = 3
placeholders = construct_placeholders(num_classes)
placeholders['features'] = train_data
# feed_dict = dict()
# train_data = train_data.astype('float32')
# feed_dict.update({placeholders['features']: train_data})
dim = []
# print("f:{}".format(len(train_data[0])))
dim.append(len(train_data[0]))
dim.append(FLAGS.dim_1)
dim.append(num_classes)
model = FCPartition(placeholders, dim)
sess = tf.Session()
model.load(sess)
results = model.predict()
results_np = results.eval(session=sess)
# print(results.eval(session=sess))
# print(results_np.shape)
id_map = id_map.astype('int')
results_np = np.expand_dims(results_np, axis=1)
results_np = np.insert(results_np, 0, id_map, axis=1)
results_np = results_np[results_np[:,0].argsort()]
print(results_np)
np.save(FLAGS.outDir+'/predict_predict.npy', results_np)
def main():
print("load data ...")
train_data = load_embedded_data(FLAGS.train_prefix)
id_map = load_embedded_idmap(FLAGS.train_prefix)
predict(train_data, id_map)
if __name__ == '__main__':
main() | [
"numpy.insert",
"graphsage.partition_train.construct_placeholders",
"graphsage.utils.load_embedded_idmap",
"graphsage.utils.load_embedded_data",
"tensorflow.Session",
"graphsage.models.FCPartition",
"numpy.expand_dims",
"numpy.save"
]
| [((744, 779), 'graphsage.partition_train.construct_placeholders', 'construct_placeholders', (['num_classes'], {}), '(num_classes)\n', (766, 779), False, 'from graphsage.partition_train import construct_placeholders\n'), ((1132, 1162), 'graphsage.models.FCPartition', 'FCPartition', (['placeholders', 'dim'], {}), '(placeholders, dim)\n', (1143, 1162), False, 'from graphsage.models import FCPartition\n'), ((1175, 1187), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1185, 1187), True, 'import tensorflow as tf\n'), ((1411, 1445), 'numpy.expand_dims', 'np.expand_dims', (['results_np'], {'axis': '(1)'}), '(results_np, axis=1)\n', (1425, 1445), True, 'import numpy as np\n'), ((1464, 1504), 'numpy.insert', 'np.insert', (['results_np', '(0)', 'id_map'], {'axis': '(1)'}), '(results_np, 0, id_map, axis=1)\n', (1473, 1504), True, 'import numpy as np\n'), ((1589, 1647), 'numpy.save', 'np.save', (["(FLAGS.outDir + '/predict_predict.npy')", 'results_np'], {}), "(FLAGS.outDir + '/predict_predict.npy', results_np)\n", (1596, 1647), True, 'import numpy as np\n'), ((1709, 1747), 'graphsage.utils.load_embedded_data', 'load_embedded_data', (['FLAGS.train_prefix'], {}), '(FLAGS.train_prefix)\n', (1727, 1747), False, 'from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap\n'), ((1762, 1801), 'graphsage.utils.load_embedded_idmap', 'load_embedded_idmap', (['FLAGS.train_prefix'], {}), '(FLAGS.train_prefix)\n', (1781, 1801), False, 'from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol).
"""
# module to make use of regular expressions
import re
# set the default encoding to utf8 and ignore all decoding/encoding steps.
# (ToDo: check whether the encoding command is needed - debug)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# optparse - Parser for command-line options
from optparse import OptionParser
# import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text
#from xml.sax.saxutils import escape # (ToDo: not needed - debug)
from xml.sax.saxutils import quoteattr
### MAIN PART OF THE SCRIPT ###
if __name__=="__main__":
# configure parsing of command-line arguments
parser= OptionParser()
parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv")
parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml")
(options,args)=parser.parse_args()
# save parameters in an extra variable
input_file= options.i
output_file = options.o
# open input file with annotated sentences
infile = open(input_file,"r")
# open output file
outfile = open(output_file,"w")
#example for the input format:
#18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction
#example for the output format
"""
<?xml version="1.0" encoding="UTF-8">
<corpus source="DS1">
<document id="DS1.d0" origId="18227838">
<sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/>
<entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/>
<entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/>
<entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/>
<interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" />
<interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" />
</sentence>
[...]
</document>
[...]
</corpus>
"""
# add XML header and define corpus source
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outfile.write("<corpus source=\"DS1\">"+"\n")
# variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not
# the document ID refers to the PubMed ID (origID)
pre_pmid=""
# doc_num counts the number of created documents
doc_num =0
# read lines in CSV file
for line in infile :
# tab-separated format
temp = line.strip().split("\t")
# get PubMed ID, sentences ID, and the sentence itself
# (ToDo: use a split command instead of this regular expression - debug)
curr_pmid = re.match('(\d{8})',temp[0]).group(0)
pmid_sent_num = temp[0]
sentence_text = temp[1]
# find all annotated proteins and compounds by matching their tags
pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))]
cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))]
# join the two lists
positions = pro_positions + cmp_positions
positions.sort()
#Initialize the list with the number of identified tags
entity_list =[]
entity_list=[0]*len(positions)
# iterate over all identified positions of the identified tags
for i in range(len(positions)):
# initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset)
entity_list[i]=[0]*4
# store these four elements with grouping in the regular expression
obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]])
entity_list[i][0]=obj.group(1) #entity_type
entity_list[i][1]=obj.group(2) #entity_id
entity_list[i][2]=obj.group(3) #entity_text
entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")")
# the entity_charoffset will be assign later after having the pure sentence text generated (without any tags)
# the sentence without any tags will be generated by deleting all tags via text concatenation
# initialize (ToDo: initialization like this not needed - debug)
pur_sent_text = sentence_text
# enumerate over the list of positions (index, value)
for i,e in reversed(list(enumerate(positions))):
pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:]
# get the character offset of all identified synonyms
# decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc.
# make use of a list of repeated synonyms and synonym positions
repeated_syn_pos =[]
rep_syn =[]
for i in range(len(entity_list)) :
# check whether this is the fist occurrence of the current synonym
if not entity_list[i][2] in rep_syn :
# get the list of positions of all occurences of the current synonym
u_pur_sent_text = pur_sent_text.decode("utf8")
charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))]
# check whether it occures only once such that the charoffsetone directly be assigned
if len(charoffset_value) == 1 :
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
else:
# if it occures more than one time, the charoffset has to be assigned according to the first pair of positions
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
# append this synonym to the rep_syn list to store all repeated synonyms in this sentence
rep_syn.append(entity_list[i][2])
# delete the fist pair of positions from the list
charoffset_value = charoffset_value[1:]
# add the rest of positions pairs for the current synonym to another list
for j in range(len(charoffset_value)):
repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]])
else:
# this case refers to at least the second occurrence of the synonym
# for each repeated synonym, assign the first position pair from the repeated_syn_pos list
for k in range(len(repeated_syn_pos)):
if repeated_syn_pos[k][0] == entity_list[i][2]:
break
entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2])
# get pairs and their interaction status (separated by a double underscore)
listof_int_noint = temp[2:]
interaction_list=[0]*len(listof_int_noint)
for i in range(len(listof_int_noint)):
interaction_list[i]=listof_int_noint[i].split('__')
# interaction/no_interaction corresponds to True/False
TF_int_list=[0]*len(interaction_list)
for intid in range(len(interaction_list)) :
if interaction_list[intid][2]=="interaction" :
TF_int_list[intid]="True"
else :
TF_int_list[intid]="False"
# debug:
# print TF_int_list
# build XML structure
# check whether the PubMed ID changed in comparision to the last parsed sentence
if curr_pmid == pre_pmid :
# if this is the case, only the sentence ID has to be increased
sent_num +=1
# add sentence ID using the current document number
# (doc_num has to be decreased by one, because this index is automatically increased after each sentence)
# all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# build entity tags according to the list identified tags from the CSV file (entity_list)
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# insert types of interaction for each pair of entities
# get the index of the synonym interactions in entity_list
origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# if the current PubMed ID changed in comparison to the last parsed sentences
else :
if not doc_num == 0 :
outfile.write(" </document>\n")
sent_num =0
# a new document tag has to be opened and the sentences can be added
outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n")
# replace squared brackets ([,]) with round brackets
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# now have to make entity tags according to entity_list data.
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# build entity tags
origId = "DS1.d"+str(doc_num)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# set new PubMed ID as the last parsed document ID and increase document index
pre_pmid = curr_pmid
doc_num+=1
# close document tag
outfile.write("</document>\n")
# close corpus tag
outfile.write("</corpus>\n")
# close files
infile.close()
outfile.close()
| [
"re.escape",
"sys.setdefaultencoding",
"xml.sax.saxutils.quoteattr",
"re.match",
"optparse.OptionParser",
"re.finditer"
]
| [((605, 636), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (627, 636), False, 'import sys\n'), ((1063, 1077), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1075, 1077), False, 'from optparse import OptionParser\n'), ((4887, 5014), 're.match', 're.match', (['"""<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>"""', 'sentence_text[positions[i][0]:positions[i][1]]'], {}), '(\'<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>\',\n sentence_text[positions[i][0]:positions[i][1]])\n', (4895, 5014), False, 'import re\n'), ((3842, 3871), 're.match', 're.match', (['"""(\\\\d{8})"""', 'temp[0]'], {}), "('(\\\\d{8})', temp[0])\n", (3850, 3871), False, 'import re\n'), ((4078, 4146), 're.finditer', 're.finditer', (['"""<protein-id="(.*?)">(.*?)</protein-id>"""', 'sentence_text'], {}), '(\'<protein-id="(.*?)">(.*?)</protein-id>\', sentence_text)\n', (4089, 4146), False, 'import re\n'), ((4209, 4279), 're.finditer', 're.finditer', (['"""<compound-id="(.*?)">(.*?)</compound-id>"""', 'sentence_text'], {}), '(\'<compound-id="(.*?)">(.*?)</compound-id>\', sentence_text)\n', (4220, 4279), False, 'import re\n'), ((9612, 9636), 'xml.sax.saxutils.quoteattr', 'quoteattr', (['pur_sent_text'], {}), '(pur_sent_text)\n', (9621, 9636), False, 'from xml.sax.saxutils import quoteattr\n'), ((11699, 11723), 'xml.sax.saxutils.quoteattr', 'quoteattr', (['pur_sent_text'], {}), '(pur_sent_text)\n', (11708, 11723), False, 'from xml.sax.saxutils import quoteattr\n'), ((6533, 6561), 're.escape', 're.escape', (['entity_list[i][2]'], {}), '(entity_list[i][2])\n', (6542, 6561), False, 'import re\n')] |
# Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant
# adverse drug-drug interactions (2021)
# Code to extract single-drug side effect data from the BNF website
from bs4 import BeautifulSoup
import urllib
import os, csv
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
URL_BEGINNING = 'https://bnf.nice.org.uk/drug/'
print('beginning scrape for individual drugs...')
# Fetch the HTML containing the full list of APIs.
r = urllib.request.urlopen(URL_BEGINNING).read()
soup1 = BeautifulSoup(r, 'lxml')
# Extract the full URL list.
URL_list = []
for s in soup1.find_all('div', {'class': 'span11'}):
for ai in s(href=True):
temp = URL_BEGINNING + ai['href']
URL_list.append(temp)
print(URL_list)
# Create an empty dataframe for storing the extracted data for APIs.
scraped_API_count = 0
scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str)
row_count = 0
# Empty list to store API mappings to their drug class (if applicable).
API_to_drugclass = []
# Scrape individual drug (API) side effects.
HIGHEST_API_ID = len(URL_list)
for id in tqdm(range(0, HIGHEST_API_ID)):
# Try to fetch the HTML for each API.
try:
l = urllib.request.urlopen(URL_list[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped products.
scraped_API_count += 1
soup2 = BeautifulSoup(l, 'lxml')
API = soup2.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
# In case the API contains a side effect section.
if soup2.find('section', {'id':'sideEffects'}):
ae_list = soup2.find_all('span', {'class': 'sideEffect'})
for a in ae_list:
adv_event = a.getText()
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_API.at[row_count, 'Frequency'] = freq
row_count += 1
# Check if the drug belongs to a specific drug class. If yes, extract
# the drug class name and the link to the corresponding webpage.
if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')):
temp = []
temp.append(API)
drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText()
temp.append(drug_class)
li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href']
drug_class_link = 'https://bnf.nice.org.uk' + str(li)
temp.append(drug_class_link)
API_to_drugclass.append(temp)
# In case the API does not contain a side effect section.
else:
adv_event = 'NO AEs MENTIONED'
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
scraped_API.at[row_count,'Frequency'] = ''
row_count += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip()
scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip()
scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip()
print('BNF individual side effects succesfully scraped.')
print('beginning scrape for drug classes...')
# Create a dataframe with drug names, drug classes and related URLs (where applicable).
API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link'])
# Create a list with all the links for the drug class webpages.
class_links = API_class_df['Link'].unique().tolist()
# Scrape drug class side effects.
HIGHEST_DRUG_CLASS_ID = len(class_links)
scraped_class_count = 0
# Create an empty dataframe for storing the extracted data for drug classes.
scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str)
row_count_2 = 0
for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)):
# Try to fetch the HTML for each drug class.
try:
l = urllib.request.urlopen(class_links[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped drug classes.
scraped_class_count += 1
soup3 = BeautifulSoup(l, 'lxml')
# Extract the drug class name.
class_name = soup3.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
class_ae_list = soup3.find_all('span', {'class': 'sideEffect'})
for a in class_ae_list:
adv_event = a.getText()
scraped_class.at[row_count_2, 'Drug_Class'] = class_name
scraped_class.at[row_count_2,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_class.at[row_count_2, 'Frequency'] = freq
row_count_2 += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip()
scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip()
scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip()
print('BNF drug class side effects succesfully scraped.')
print('combine extracted data...')
## Combine both tables by adding drug class side effects to the individual
## ingredients of each drug class.
# Create a dictionary that contains all drug classes as keys and side effects
# with associated frequencies as values.
AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict()
# Remove URL column
API_class_df.drop(columns = 'Link', inplace = True)
# Create a dataframe with drug class as the index of APIs (if available)
# and add their drug class side effects and associated frequencies.
API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip()
API_class_df.set_index('Drug_Class', inplace = True)
API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict)
API_class_df.reset_index(inplace=True)
# Create a new dataframe to store drug class side effect data for each API.
AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True)
AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index)
AEs_from_class_df['from_drug_class'] = 'Yes'
AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True)
# Fill NAs in Frequency column if no side effects are mentioned.
scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A'
# Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'.
scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')
# Concatenate the two dataframes to get a final one.
final_df = pd.concat([scraped_API_dropna, AEs_from_class_df])
# Remove any rows that do not contain side effects.
final_df = final_df[final_df.AE != 'NO AEs MENTIONED']
# Convert dataframe to lowercase.
final_df = final_df.apply(lambda x: x.astype(str).str.lower())
# Sort alphabetically.
final_df = final_df.sort_values(by=['API', 'from_drug_class'])
# Remove any duplicates.
final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True)
# Rename columns.
final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class']
FILE_NAME = 'data_extraction/output/bnf_single_data.csv'
print('saving to file...')
# Save the dataset to a csv file.
final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
| [
"re.compile",
"numpy.where",
"bs4.BeautifulSoup",
"pandas.DataFrame",
"pandas.concat",
"urllib.request.urlopen"
]
| [((548, 572), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r', '"""lxml"""'], {}), "(r, 'lxml')\n", (561, 572), False, 'from bs4 import BeautifulSoup\n'), ((3904, 3973), 'pandas.DataFrame', 'pd.DataFrame', (['API_to_drugclass'], {'columns': "['API', 'Drug_Class', 'Link']"}), "(API_to_drugclass, columns=['API', 'Drug_Class', 'Link'])\n", (3916, 3973), True, 'import pandas as pd\n'), ((7563, 7632), 'numpy.where', 'np.where', (["(scraped_API_dropna['AE'] == 'NO AEs MENTIONED')", '"""N/A"""', '"""No"""'], {}), "(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')\n", (7571, 7632), True, 'import numpy as np\n'), ((7701, 7751), 'pandas.concat', 'pd.concat', (['[scraped_API_dropna, AEs_from_class_df]'], {}), '([scraped_API_dropna, AEs_from_class_df])\n', (7710, 7751), True, 'import pandas as pd\n'), ((1628, 1652), 'bs4.BeautifulSoup', 'BeautifulSoup', (['l', '"""lxml"""'], {}), "(l, 'lxml')\n", (1641, 1652), False, 'from bs4 import BeautifulSoup\n'), ((4870, 4894), 'bs4.BeautifulSoup', 'BeautifulSoup', (['l', '"""lxml"""'], {}), "(l, 'lxml')\n", (4883, 4894), False, 'from bs4 import BeautifulSoup\n'), ((494, 531), 'urllib.request.urlopen', 'urllib.request.urlopen', (['URL_BEGINNING'], {}), '(URL_BEGINNING)\n', (516, 531), False, 'import urllib\n'), ((1309, 1345), 'urllib.request.urlopen', 'urllib.request.urlopen', (['URL_list[id]'], {}), '(URL_list[id])\n', (1331, 1345), False, 'import urllib\n'), ((2476, 2506), 're.compile', 're.compile', (['""".*/drug-class/.*"""'], {}), "('.*/drug-class/.*')\n", (2486, 2506), False, 'import re\n'), ((4542, 4581), 'urllib.request.urlopen', 'urllib.request.urlopen', (['class_links[id]'], {}), '(class_links[id])\n', (4564, 4581), False, 'import urllib\n'), ((2776, 2806), 're.compile', 're.compile', (['""".*/drug-class/.*"""'], {}), "('.*/drug-class/.*')\n", (2786, 2806), False, 'import re\n'), ((2612, 2642), 're.compile', 're.compile', (['""".*/drug-class/.*"""'], {}), "('.*/drug-class/.*')\n", (2622, 2642), False, 'import re\n')] |
from django.utils import timezone
from django.utils.text import slugify
def generate_billed_document_path(instance, filename):
cur_time = timezone.now()
return f"{cur_time.strftime('%Y/%m')}/{slugify(instance.name)}-{cur_time.strftime('%d.%m.%Y %H:%M')}.csv"
| [
"django.utils.timezone.now",
"django.utils.text.slugify"
]
| [((144, 158), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (156, 158), False, 'from django.utils import timezone\n'), ((203, 225), 'django.utils.text.slugify', 'slugify', (['instance.name'], {}), '(instance.name)\n', (210, 225), False, 'from django.utils.text import slugify\n')] |
from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
def prepare_custom_objects(custom_object_info):
custom_objects = {}
custom_objects.update(_prepare_custom_layers(custom_object_info["layer_info"]))
if not custom_object_info["has_implicit_loss"]:
custom_objects.update(_prepare_custom_loss(custom_object_info["loss_info"]))
return custom_objects
def _prepare_custom_layers(layer_info):
custom_layers = {}
for layer_name in layer_info:
custom_layers[layer_name] = get_custom_layer_class(layer_name)
return custom_layers
def _prepare_custom_loss(loss_info):
return {"loss": get_loss_function(loss_info)}
| [
"categorical_embedder.embedders.core.aux.custom_layers.get_custom_layer_class",
"categorical_embedder.embedders.core.aux.loss_factory.get_loss_function"
]
| [((628, 662), 'categorical_embedder.embedders.core.aux.custom_layers.get_custom_layer_class', 'get_custom_layer_class', (['layer_name'], {}), '(layer_name)\n', (650, 662), False, 'from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class\n'), ((747, 775), 'categorical_embedder.embedders.core.aux.loss_factory.get_loss_function', 'get_loss_function', (['loss_info'], {}), '(loss_info)\n', (764, 775), False, 'from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function\n')] |
"""Utilities."""
from functools import wraps
import re
from typing import Callable, List, Optional, TypeVar, Union
from .data import (
all_classes, all_slots,
)
def pascal_to_snake(s: str, sep: str = "_") -> str:
"""Convert Pascal case to snake case.
Assumes that
a) all words are either all-lowercase or all-uppercase
b) all 1-letter words are lowercase
c) there are no adjacent 1-letter words
d) there are no adjacent uppercase words
Examples:
PhenotypicFeature -> phenotypic_feature
RNAProduct -> RNA_product
FeedACamel -> feed_a_camel
Optionally specify `sep` (default "_").
"""
# add an underscore before each capital letter
underscored = re.sub(
r"(?<!^)(?=[A-Z])",
sep,
s,
)
# collapse any adjacent one-letter words
collapsed = re.sub(
r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+",
lambda match: match.group(0).replace("_", ""),
underscored,
)
# lower-case any words containing only one uppercase letter
lowercased = re.sub(
r"(?<![A-Z])[A-Z](?![A-Z])",
lambda match: match.group(0).lower(),
collapsed,
)
return lowercased
def snake_to_pascal(s: str, sep: str = "_") -> str:
"""Convert snake case to Pascal case.
This is the inverse of pascal_to_snake() when its assumptions
are true.
Optionally specify `sep` (default "_").
"""
return re.sub(
fr"(?:^|{sep})([a-zA-Z])",
lambda match: match.group(1).upper(),
s
)
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
def normalize(s: str) -> str:
"""Normalize string input."""
if s.startswith("biolink:"):
s = s[8:]
if "_" in s:
# it's snake case
return s.replace("_", " ")
if " " in s:
return s
return pascal_to_snake(s, " ")
T = TypeVar("T")
def listify(func: Callable) -> Callable:
"""Expand function to take list of arguments."""
@wraps(func)
def wrapper(arg: Union[T, List[T]], **kwargs) -> Union[T, List[T]]:
"""Apply function to each element in list."""
if isinstance(arg, list):
return [
func(el, **kwargs)
for el in arg
]
else:
return func(arg, **kwargs)
return wrapper
@listify
def format(s: str, case: Optional[str] = None, **kwargs) -> str:
"""Format space-case string as biolink CURIE."""
if isinstance(case, str) and case.lower() == "pascal":
return "biolink:" + snake_to_pascal(s, " ")
elif isinstance(case, str) and case.lower() == "snake":
return "biolink:" + s.replace(" ", "_")
else:
return "biolink:" + s
def with_formatting():
"""Add format conversions to method."""
def decorator(func: Callable) -> Callable:
"""Generate decorator."""
@wraps(func)
def wrapper(self, s: str, *args, formatted=False, **kwargs):
"""Wrap in format conversions."""
case = guess_casing(s)
normalized = normalize(s)
output: Union[str, List[str]] = func(self, normalized, *args, **kwargs)
if formatted:
if normalized in all_classes:
output = format(output, case="pascal")
elif normalized in all_slots:
output = format(output, case="snake")
else:
output = format(output, case=case)
return output
return wrapper
return decorator
| [
"re.sub",
"functools.wraps",
"typing.TypeVar"
]
| [((2016, 2028), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (2023, 2028), False, 'from typing import Callable, List, Optional, TypeVar, Union\n'), ((714, 747), 're.sub', 're.sub', (['"""(?<!^)(?=[A-Z])"""', 'sep', 's'], {}), "('(?<!^)(?=[A-Z])', sep, s)\n", (720, 747), False, 'import re\n'), ((2130, 2141), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2135, 2141), False, 'from functools import wraps\n'), ((3029, 3040), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3034, 3040), False, 'from functools import wraps\n')] |
from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| [
"collections.namedtuple"
]
| [((52, 109), 'collections.namedtuple', 'namedtuple', (['"""Accelerometer"""', "['timestamp', 'x', 'y', 'z']"], {}), "('Accelerometer', ['timestamp', 'x', 'y', 'z'])\n", (62, 109), False, 'from collections import namedtuple\n'), ((125, 181), 'collections.namedtuple', 'namedtuple', (['"""Magnetometer"""', "['timestamp', 'x', 'y', 'z']"], {}), "('Magnetometer', ['timestamp', 'x', 'y', 'z'])\n", (135, 181), False, 'from collections import namedtuple\n'), ((194, 247), 'collections.namedtuple', 'namedtuple', (['"""Gyroscope"""', "['timestamp', 'x', 'y', 'z']"], {}), "('Gyroscope', ['timestamp', 'x', 'y', 'z'])\n", (204, 247), False, 'from collections import namedtuple\n'), ((256, 305), 'collections.namedtuple', 'namedtuple', (['"""Euler"""', "['timestamp', 'x', 'y', 'z']"], {}), "('Euler', ['timestamp', 'x', 'y', 'z'])\n", (266, 305), False, 'from collections import namedtuple\n'), ((320, 379), 'collections.namedtuple', 'namedtuple', (['"""Quaternion"""', "['timestamp', 'w', 'x', 'y', 'z']"], {}), "('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])\n", (330, 379), False, 'from collections import namedtuple\n'), ((391, 432), 'collections.namedtuple', 'namedtuple', (['"""Heading"""', "['timestamp', 'h']"], {}), "('Heading', ['timestamp', 'h'])\n", (401, 432), False, 'from collections import namedtuple\n'), ((443, 763), 'collections.namedtuple', 'namedtuple', (['"""Status"""', "['magnetometer_enabled', 'gyroscope_enabled', 'accelerometer_enabled',\n 'gyroscope_resolution', 'accelerometer_resolution', 'low_output_rate',\n 'heading_streaming', 'euler_streaming', 'magnetometer_streaming',\n 'quaternions_streaming', 'gyroscope_streaming', 'accelerometer_streaming']"], {}), "('Status', ['magnetometer_enabled', 'gyroscope_enabled',\n 'accelerometer_enabled', 'gyroscope_resolution',\n 'accelerometer_resolution', 'low_output_rate', 'heading_streaming',\n 'euler_streaming', 'magnetometer_streaming', 'quaternions_streaming',\n 'gyroscope_streaming', 'accelerometer_streaming'])\n", (453, 763), False, 'from collections import namedtuple\n')] |
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import zipfile
class Zip(MutableMapping):
"""Mutable Mapping interface to a Zip file
Keys must be strings, values must be bytes
Parameters
----------
filename: string
mode: string, ('r', 'w', 'a'), defaults to 'a'
Examples
--------
>>> z = Zip('myfile.zip') # doctest: +SKIP
>>> z['x'] = b'123' # doctest: +SKIP
>>> z['x'] # doctest: +SKIP
b'123'
>>> z.flush() # flush and write metadata to disk # doctest: +SKIP
"""
def __init__(self, filename, mode="a"):
self.filename = filename
self.mode = mode
self._file = None
@property
def file(self):
if self.mode == "closed":
raise OSError("File closed")
if not self._file or not self._file.fp:
self._file = zipfile.ZipFile(self.filename, mode=self.mode)
return self._file
def __getitem__(self, key):
return self.file.read(key)
def __setitem__(self, key, value):
self.file.writestr(key, value)
def keys(self):
return (zi.filename for zi in self.file.filelist)
def values(self):
return map(self.file.read, self.keys())
def items(self):
return ((zi.filename, self.file.read(zi.filename)) for zi in self.file.filelist)
def __iter__(self):
return self.keys()
def __delitem__(self, key):
raise NotImplementedError("Not supported by stdlib zipfile")
def __len__(self):
return len(self.file.filelist)
def flush(self):
self.file.fp.flush()
self.file.close()
def close(self):
self.flush()
self.mode = "closed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| [
"zipfile.ZipFile"
]
| [((910, 956), 'zipfile.ZipFile', 'zipfile.ZipFile', (['self.filename'], {'mode': 'self.mode'}), '(self.filename, mode=self.mode)\n', (925, 956), False, 'import zipfile\n')] |
# Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.plugins.common import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractproperty
def db_delete_method(self):
pass
@abc.abstractmethod
def create(self, context, obj):
pass
@abc.abstractmethod
def update(self, context, obj_old, obj):
pass
@abc.abstractmethod
def delete(self, context, obj):
pass
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
Sets the provisioning_status of the load balancer and obj to
ACTIVE. Should be called last in the implementor's BaseManagerMixin
methods for successful runs.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
:param delete: set True if being called from a delete method. Will
most likely result in the obj being deleted from the db.
:param lb_create: set True if this is being called after a successful
load balancer create.
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver
# is responsible, then it is also responsible for cleaning it up.
# At this point, the VIP should already be cleaned up, so we are
# just doing neutron lbaas db cleanup.
if (obj == obj.root_loadbalancer and
self.driver.load_balancer.allocates_vip):
# NOTE(blogan): this is quite dumb to do but it is necessary
# so that a false negative pep8 error does not get thrown. An
# "unexpected-keyword-argument" pep8 error occurs bc
# self.db_delete_method is a @property method that returns a
# method.
kwargs = {'delete_vip_port': False}
self.db_delete_method(context, obj.id, **kwargs)
else:
self.db_delete_method(context, obj.id)
if obj == obj.root_loadbalancer and delete:
# Load balancer was deleted and no longer exists
return
lb_op_status = None
lb_p_status = constants.ACTIVE
if obj == obj.root_loadbalancer:
# only set the status to online if this an operation on the
# load balancer
lb_op_status = lb_const.ONLINE
# Update the load balancer's vip address and vip port id if the driver
# was responsible for allocating the vip.
if (self.driver.load_balancer.allocates_vip and lb_create and
isinstance(obj, data_models.LoadBalancer)):
self.driver.plugin.db.update_loadbalancer(
context, obj.id, {'vip_address': obj.vip_address,
'vip_port_id': obj.vip_port_id})
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
if obj == obj.root_loadbalancer or delete:
# Do not want to update the status of the load balancer again
# Or the obj was deleted from the db so no need to update the
# statuses
return
obj_op_status = lb_const.ONLINE
if isinstance(obj, data_models.HealthMonitor):
# Health Monitor does not have an operating status
obj_op_status = None
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ACTIVE, obj_op_status))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ACTIVE,
operating_status=obj_op_status)
def failed_completion(self, context, obj):
"""
Sets the provisioning status of the obj to ERROR. If obj is a
loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should
be called whenever something goes wrong (raised exception) in an
implementor's BaseManagerMixin methods.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
"""
LOG.debug("Starting failed_completion method after a failed driver "
"action.")
if isinstance(obj, data_models.LoadBalancer):
LOG.debug("Updating load balancer {0} to provisioning_status = "
"{1}, operating_status = {2}.".format(
obj.root_loadbalancer.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
LOG.debug("Updating load balancer {0} to "
"provisioning_status = {1}".format(obj.root_loadbalancer.id,
constants.ACTIVE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ACTIVE)
def update_vip(self, context, loadbalancer_id, vip_address,
vip_port_id=None):
lb_update = {'vip_address': vip_address}
if vip_port_id:
lb_update['vip_port_id'] = vip_port_id
self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id,
lb_update)
@six.add_metaclass(abc.ABCMeta)
class BaseRefreshMixin(object):
@abc.abstractmethod
def refresh(self, context, obj):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseStatsMixin(object):
@abc.abstractmethod
def stats(self, context, obj):
pass
| [
"six.add_metaclass",
"oslo_log.log.getLogger"
]
| [((896, 923), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (913, 923), True, 'from oslo_log import log as logging\n'), ((927, 957), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (944, 957), False, 'import six\n'), ((7466, 7496), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (7483, 7496), False, 'import six\n'), ((7607, 7637), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (7624, 7637), False, 'import six\n')] |
import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| [
"PIL.Image.open",
"configparser.ConfigParser",
"torch.load",
"Net.Net",
"torch.autograd.Variable"
]
| [((76, 103), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (101, 103), False, 'import configparser\n'), ((216, 225), 'Net.Net', 'Net.Net', ([], {}), '()\n', (223, 225), False, 'import Net\n'), ((286, 315), 'PIL.Image.open', 'Image.open', (['"""./html/rwby.jpg"""'], {}), "('./html/rwby.jpg')\n", (296, 315), False, 'from PIL import Image\n'), ((363, 404), 'torch.autograd.Variable', 'torch.autograd.Variable', (['image[None, ...]'], {}), '(image[None, ...])\n', (386, 404), False, 'import torch\n'), ((258, 275), 'torch.load', 'torch.load', (['MODEL'], {}), '(MODEL)\n', (268, 275), False, 'import torch\n')] |
import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
class tuframeworkTrainerV2_MMS(tuframeworkTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| [
"torch.cuda.is_available",
"tuframework.network_architecture.initialization.InitWeights_He"
]
| [((2410, 2435), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2433, 2435), False, 'import torch\n'), ((2259, 2279), 'tuframework.network_architecture.initialization.InitWeights_He', 'InitWeights_He', (['(0.01)'], {}), '(0.01)\n', (2273, 2279), False, 'from tuframework.network_architecture.initialization import InitWeights_He\n')] |
from build.chart_data_functions import get_confirmed_cases_by_county
from build.chart_data_functions import get_county_by_day
from build.constants import CONFIRMED_CASES_BY_COUNTIES_PATH
from build.constants import COUNTY_MAPPING
from build.constants import COUNTY_POPULATION
from build.constants import DATE_SETTINGS
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
import pandas as pd
@analyze_time
@analyze_memory
def main():
# Log status
logger.info("Loading local data files")
test_results = read_json_from_file(TEST_RESULTS_PATH)
# Log status
logger.info("Calculating main statistics")
# Create date ranges for charts
case_dates = pd.date_range(start=DATE_SETTINGS["firstCaseDate"], end=YESTERDAY_YMD)
# Get data for each chart
logger.info("Calculating data for charts")
county_by_day = get_county_by_day(
test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION
)
confirmed_cases_by_county = get_confirmed_cases_by_county(
test_results, COUNTY_MAPPING
)
del county_by_day["mapPlayback"]
del county_by_day["mapPlayback10k"]
# Create dictionary for final JSON
logger.info("Compiling final JSON")
final_json = {
"updatedOn": TODAY_DMYHM,
"dataConfirmedCasesByCounties": confirmed_cases_by_county,
"countyByDay": county_by_day,
}
# Dump JSON output
save_as_json(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)
# Log finish time
logger.info("Finished update process")
if __name__ == "__main__":
main()
| [
"build.utils.save_as_json",
"build.utils.logger.info",
"pandas.date_range",
"build.chart_data_functions.get_confirmed_cases_by_county",
"build.chart_data_functions.get_county_by_day",
"build.utils.read_json_from_file"
]
| [((719, 758), 'build.utils.logger.info', 'logger.info', (['"""Loading local data files"""'], {}), "('Loading local data files')\n", (730, 758), False, 'from build.utils import logger\n'), ((778, 816), 'build.utils.read_json_from_file', 'read_json_from_file', (['TEST_RESULTS_PATH'], {}), '(TEST_RESULTS_PATH)\n', (797, 816), False, 'from build.utils import read_json_from_file\n'), ((839, 881), 'build.utils.logger.info', 'logger.info', (['"""Calculating main statistics"""'], {}), "('Calculating main statistics')\n", (850, 881), False, 'from build.utils import logger\n'), ((936, 1006), 'pandas.date_range', 'pd.date_range', ([], {'start': "DATE_SETTINGS['firstCaseDate']", 'end': 'YESTERDAY_YMD'}), "(start=DATE_SETTINGS['firstCaseDate'], end=YESTERDAY_YMD)\n", (949, 1006), True, 'import pandas as pd\n'), ((1042, 1084), 'build.utils.logger.info', 'logger.info', (['"""Calculating data for charts"""'], {}), "('Calculating data for charts')\n", (1053, 1084), False, 'from build.utils import logger\n'), ((1105, 1183), 'build.chart_data_functions.get_county_by_day', 'get_county_by_day', (['test_results', 'case_dates', 'COUNTY_MAPPING', 'COUNTY_POPULATION'], {}), '(test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION)\n', (1122, 1183), False, 'from build.chart_data_functions import get_county_by_day\n'), ((1230, 1289), 'build.chart_data_functions.get_confirmed_cases_by_county', 'get_confirmed_cases_by_county', (['test_results', 'COUNTY_MAPPING'], {}), '(test_results, COUNTY_MAPPING)\n', (1259, 1289), False, 'from build.chart_data_functions import get_confirmed_cases_by_county\n'), ((1426, 1461), 'build.utils.logger.info', 'logger.info', (['"""Compiling final JSON"""'], {}), "('Compiling final JSON')\n", (1437, 1461), False, 'from build.utils import logger\n'), ((1654, 1712), 'build.utils.save_as_json', 'save_as_json', (['CONFIRMED_CASES_BY_COUNTIES_PATH', 'final_json'], {}), '(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)\n', (1666, 1712), False, 'from build.utils import save_as_json\n'), ((1740, 1778), 'build.utils.logger.info', 'logger.info', (['"""Finished update process"""'], {}), "('Finished update process')\n", (1751, 1778), False, 'from build.utils import logger\n')] |
# encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # 发布base_link到link1的平移和翻转
rate.sleep()
| [
"tf.TransformBroadcaster",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Time.now",
"tf.transformations.quaternion_from_euler",
"rospy.Rate"
]
| [((73, 109), 'rospy.init_node', 'rospy.init_node', (['"""py_tf_broadcaster"""'], {}), "('py_tf_broadcaster')\n", (88, 109), False, 'import rospy\n'), ((119, 144), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (142, 144), False, 'import tf\n'), ((235, 248), 'rospy.Rate', 'rospy.Rate', (['(1)'], {}), '(1)\n', (245, 248), False, 'import rospy\n'), ((263, 282), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (280, 282), False, 'import rospy\n'), ((395, 453), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', (['roll', 'pitch', 'yaw'], {}), '(roll, pitch, yaw)\n', (435, 453), False, 'import tf\n'), ((480, 496), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (494, 496), False, 'import rospy\n')] |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
| [
"logging.debug",
"torch.load",
"trained_gpt_model.get_inference2",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"torch.no_grad"
]
| [((464, 511), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['PRETRAINED_MODEL'], {}), '(PRETRAINED_MODEL)\n', (493, 511), False, 'from transformers import AutoTokenizer\n'), ((631, 704), 'torch.load', 'torch.load', (['"""../trained_models/t5_model_hotpot_supporting_facts_last.pth"""'], {}), "('../trained_models/t5_model_hotpot_supporting_facts_last.pth')\n", (641, 704), False, 'import torch\n'), ((1087, 1135), 'logging.debug', 'logging.debug', (["('Decoded string' + decoded_string)"], {}), "('Decoded string' + decoded_string)\n", (1100, 1135), False, 'import logging\n'), ((1538, 1585), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['PRETRAINED_MODEL'], {}), '(PRETRAINED_MODEL)\n', (1567, 1585), False, 'from transformers import AutoTokenizer\n'), ((1705, 1774), 'torch.load', 'torch.load', (['"""../trained_models/t5_model_hotpot_full_context_last.pth"""'], {}), "('../trained_models/t5_model_hotpot_full_context_last.pth')\n", (1715, 1774), False, 'import torch\n'), ((2157, 2205), 'logging.debug', 'logging.debug', (["('Decoded string' + decoded_string)"], {}), "('Decoded string' + decoded_string)\n", (2170, 2205), False, 'import logging\n'), ((2621, 2668), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['PRETRAINED_MODEL'], {}), '(PRETRAINED_MODEL)\n', (2650, 2668), False, 'from transformers import AutoTokenizer\n'), ((2788, 2863), 'torch.load', 'torch.load', (['"""../trained_models/bart_model_hotpot_supporting_facts_last.pth"""'], {}), "('../trained_models/bart_model_hotpot_supporting_facts_last.pth')\n", (2798, 2863), False, 'import torch\n'), ((3246, 3294), 'logging.debug', 'logging.debug', (["('Decoded string' + decoded_string)"], {}), "('Decoded string' + decoded_string)\n", (3259, 3294), False, 'import logging\n'), ((3710, 3757), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['PRETRAINED_MODEL'], {}), '(PRETRAINED_MODEL)\n', (3739, 3757), False, 'from transformers import AutoTokenizer\n'), ((3877, 3948), 'torch.load', 'torch.load', (['"""../trained_models/bart_model_hotpot_full_context_last.pth"""'], {}), "('../trained_models/bart_model_hotpot_full_context_last.pth')\n", (3887, 3948), False, 'import torch\n'), ((4331, 4379), 'logging.debug', 'logging.debug', (["('Decoded string' + decoded_string)"], {}), "('Decoded string' + decoded_string)\n", (4344, 4379), False, 'import logging\n'), ((948, 963), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (961, 963), False, 'import torch\n'), ((2018, 2033), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2031, 2033), False, 'import torch\n'), ((3107, 3122), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3120, 3122), False, 'import torch\n'), ((4192, 4207), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4205, 4207), False, 'import torch\n'), ((246, 271), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (269, 271), False, 'import torch\n'), ((1320, 1345), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1343, 1345), False, 'import torch\n'), ((2392, 2417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2415, 2417), False, 'import torch\n'), ((3481, 3506), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3504, 3506), False, 'import torch\n'), ((5300, 5331), 'trained_gpt_model.get_inference2', 'get_inference2', (['answer', 'context'], {}), '(answer, context)\n', (5314, 5331), False, 'from trained_gpt_model import get_inference2\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_cmdline_args(parser):
# Runtime environment
agent = parser.add_argument_group('DrQA Arguments')
agent.add_argument('--no_cuda', type='bool', default=False)
agent.add_argument('--gpu', type=int, default=-1)
agent.add_argument('--random_seed', type=int, default=1013)
# Basics
agent.add_argument('--embedding_file', type=str, default=None,
help='File of space separated embeddings: w e1 ... ed')
agent.add_argument('--pretrained_model', type=str, default=None,
help='Load dict/features/weights/opts from this file')
agent.add_argument('--log_file', type=str, default=None)
# Model details
agent.add_argument('--fix_embeddings', type='bool', default=True)
agent.add_argument('--tune_partial', type=int, default=0,
help='Train the K most frequent word embeddings')
agent.add_argument('--embedding_dim', type=int, default=300,
help=('Default embedding size if '
'embedding_file is not given'))
agent.add_argument('--hidden_size', type=int, default=128,
help='Hidden size of RNN units')
agent.add_argument('--doc_layers', type=int, default=3,
help='Number of RNN layers for passage')
agent.add_argument('--question_layers', type=int, default=3,
help='Number of RNN layers for question')
agent.add_argument('--rnn_type', type=str, default='lstm',
help='RNN type: lstm (default), gru, or rnn')
# Optimization details
agent.add_argument('--valid_metric', type=str,
choices=['accuracy', 'f1'], default='f1',
help='Metric for choosing best valid model')
agent.add_argument('--max_len', type=int, default=15,
help='The max span allowed during decoding')
agent.add_argument('--rnn_padding', type='bool', default=False)
agent.add_argument('--display_iter', type=int, default=10,
help='Print train error after every \
<display_iter> epoches (default 10)')
agent.add_argument('--dropout_emb', type=float, default=0.4,
help='Dropout rate for word embeddings')
agent.add_argument('--dropout_rnn', type=float, default=0.4,
help='Dropout rate for RNN states')
agent.add_argument('--dropout_rnn_output', type='bool', default=True,
help='Whether to dropout the RNN output')
agent.add_argument('--optimizer', type=str, default='adamax',
help='Optimizer: sgd or adamax (default)')
agent.add_argument('--learning_rate', '-lr', type=float, default=0.1,
help='Learning rate for SGD (default 0.1)')
agent.add_argument('--grad_clipping', type=float, default=10,
help='Gradient clipping (default 10.0)')
agent.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (default 0)')
agent.add_argument('--momentum', type=float, default=0,
help='Momentum (default 0)')
# Model-specific
agent.add_argument('--concat_rnn_layers', type='bool', default=True)
agent.add_argument('--question_merge', type=str, default='self_attn',
help='The way of computing question representation')
agent.add_argument('--use_qemb', type='bool', default=True,
help='Whether to use weighted question embeddings')
agent.add_argument('--use_in_question', type='bool', default=True,
help='Whether to use in_question features')
agent.add_argument('--use_tf', type='bool', default=True,
help='Whether to use tf features')
agent.add_argument('--use_time', type=int, default=0,
help='Time features marking how recent word was said')
def set_defaults(opt):
# Embeddings options
if opt.get('embedding_file'):
if not os.path.isfile(opt['embedding_file']):
raise IOError('No such file: %s' % args.embedding_file)
with open(opt['embedding_file']) as f:
dim = len(f.readline().strip().split(' ')) - 1
opt['embedding_dim'] = dim
elif not opt.get('embedding_dim'):
raise RuntimeError(('Either embedding_file or embedding_dim '
'needs to be specified.'))
# Make sure tune_partial and fix_embeddings are consistent
if opt['tune_partial'] > 0 and opt['fix_embeddings']:
print('Setting fix_embeddings to False as tune_partial > 0.')
opt['fix_embeddings'] = False
# Make sure fix_embeddings and embedding_file are consistent
if opt['fix_embeddings']:
if not opt.get('embedding_file') and not opt.get('pretrained_model'):
print('Setting fix_embeddings to False as embeddings are random.')
opt['fix_embeddings'] = False
def override_args(opt, override_opt):
# Major model args are reset to the values in override_opt.
# Non-architecture args (like dropout) are kept.
args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers',
'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers',
'question_merge', 'use_qemb', 'use_in_question', 'use_tf',
'vocab_size', 'num_features', 'use_time'])
for k, v in override_opt.items():
if k in args:
opt[k] = v
| [
"os.path.isfile"
]
| [((4508, 4545), 'os.path.isfile', 'os.path.isfile', (["opt['embedding_file']"], {}), "(opt['embedding_file'])\n", (4522, 4545), False, 'import os\n')] |
"""
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
class View(ui.View):
def __init__(self):
self.wv = wkwebview.WKWebView(flex='WH')
self.wv.load_url(str(uri))
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| [
"pythonista.wkwebview.WKWebView",
"pathlib.Path"
]
| [((109, 142), 'pathlib.Path', 'pathlib.Path', (['"""./main_index.html"""'], {}), "('./main_index.html')\n", (121, 142), False, 'import pathlib\n'), ((202, 232), 'pythonista.wkwebview.WKWebView', 'wkwebview.WKWebView', ([], {'flex': '"""WH"""'}), "(flex='WH')\n", (221, 232), True, 'import pythonista.wkwebview as wkwebview\n')] |
from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
class TestReadingManualTaskSimilarities(TestCase):
def test_convert_matrix_to_map_from_topic_to_array_of_services(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_multiple_services_for_a_topic(self):
data = [
['topic1', ],
['service1'],
['service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service2', 'service3'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_different_numbers_of_services_for_different_topics(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service3'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_empty_entries(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['', 'service3'],
[None, 'service4'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2', 'service3', 'service4'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
| [
"search.read_similarities.build_manual_similarity_map"
]
| [((519, 552), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', (['data'], {}), '(data)\n', (546, 552), False, 'from search.read_similarities import build_manual_similarity_map\n'), ((911, 944), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', (['data'], {}), '(data)\n', (938, 944), False, 'from search.read_similarities import build_manual_similarity_map\n'), ((1342, 1375), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', (['data'], {}), '(data)\n', (1369, 1375), False, 'from search.read_similarities import build_manual_similarity_map\n'), ((1784, 1817), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', (['data'], {}), '(data)\n', (1811, 1817), False, 'from search.read_similarities import build_manual_similarity_map\n')] |
"""
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
class Fortuna(object):
"""
Class to load the fortuna dataset and call different methods for visualization in a web frontend.
Args:
There are no required arguments at the moment. Input files could be defined.
"""
def __init__(self, **kwargs):
"""
Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables.
"""
# hardcode geometry
self.size_raster = (250,162)
self.X_corner = 390885
self.Y_corner = 7156947
self.dx, self.dy, self.dz = 25, 25, 100
self.top_model = 950
self.bottom_model = 1050
self.base_cube = None
self.top_cube = None
self.base_n = None
self.top_n = None
self.vol = None
# Create empty xarray dataset
self.ds = xr.Dataset()
self.xx = None
self.yy = None
self.zz = None
self.model = None
self.base_mean = None
self.base_std = None
self.top_mean = None
self.top_std = None
## Initial methods to load
self.import_data()
self.calc_xarray()
self.calc_stat()
### Methods for initiating the object
def folder2cube(self, files):
"""
Method to read a file.
"""
base_set = glob.glob(files)
cube = np.zeros(self.size_raster + (len(base_set),))
for i, model in enumerate(base_set):
cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster)
return cube, len(base_set)
def import_data(self):
"""
Method to load different data objects from files.
"""
self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data')
self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data')
self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)
def calc_xarray (self):
self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0])
self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1])
self.zz = np.linspace(self.top_model, self.bottom_model, self.dz)
self.model = np.linspace(0, self.top_model, self.base_n)
self.ds.coords['X'] = self.xx
self.ds.coords['Y'] = self.yy
self.ds.coords['Z'] = self.zz
self.ds.coords['MODEL'] = self.model
self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube)
self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube)
def calc_stat (self):
self.base_mean = self.ds['BASE'].mean(dim='MODEL')
self.base_std = self.ds['BASE'].std(dim='MODEL')
self.top_mean = self.ds['TOP'].mean(dim='MODEL')
self.top_std = self.ds['TOP'].std(dim='MODEL')
## Data Management methods
def load_pickle(self, path):
return np.load(path)
## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend
def calc_lithology(self, iterations = 2):
"""
Sample from both distributions and fill each z-stack accordingly
"""
# create empty array
block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8')
for i in range(iterations):
for j in range(self.size_raster[0]): # size_raster[0]
for k in range(self.size_raster[1]):
# sample from top and base distributions for specific x,y position
top = np.random.normal(self.top_mean[j, k], self.top_std[j, k])
base = np.random.normal(self.base_mean[j, k], self.base_std[j, k])
# iterate over vertical z-stack
for l in range(self.zz.size):
if self.zz[l] <= top:
block[i, j, k, l] = 1
elif self.zz[l] > base:
block[i, j, k, l] = 3
elif ((self.zz[l] > top) and (l <= base)):
block[i, j, k, l] = 2
return block
def calc_lithology_vect(self, iterations=2):
"""
Resample from z value statistics and fill each z-stack in a lithology block accordingly.
This is the new method with vectorized operations to speed up calculations.
"""
# create empty array
block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')
for i in range(iterations):
# create meshgrids grid for coordinate-wise iterations
mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size),
np.arange(self.yy.size),
np.arange(self.zz.size))
# sample from top and base distributions for specific x,y position
top = np.zeros([self.xx.size, self.yy.size])
base = np.zeros([self.xx.size, self.yy.size])
top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
# compare each cell to resampled reference values
# TODO generalize for any number of lithologies
block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1,
np.where(self.zz < base[mesh_x, mesh_y], 2, 3))
return block
### Modifyed from GemPy!
def calc_probability_lithology(self, cube):
"""Blocks must be just the lith blocks!"""
lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)])
lith_id = np.unique(lith_blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1]))
for i, l_id in enumerate(lith_id):
lith_count[i] = np.sum(lith_blocks == l_id, axis=0)
lith_prob = lith_count / len(lith_blocks)
return lith_prob
### Modyfied from GemPy!
def calc_information_entropy(self, lith_prob):
"""Calculates information entropy for the given probability array."""
cube = np.zeros_like(lith_prob[0])
for l in lith_prob:
pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0
cube -= (pm * np.ma.log2(pm)).filled(0)
return cube.reshape([self.xx.size, self.yy.size, self.zz.size])
# Try numpy.flatten and numpy.ravel
## Simple plotting methods
def plot_entropy(self, cube, slice=10):
plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis')
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.random.normal",
"numpy.ma.masked_equal",
"numpy.unique",
"pandas.read_csv",
"numpy.arange",
"numpy.where",
"numpy.ma.log2",
"xarray.Dataset",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"numpy.loadtxt",
"numpy.load",
"numpy.zeros_like",
"glob.glob",
"matplotlib.pyplot.show"
]
| [((1237, 1249), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (1247, 1249), True, 'import xarray as xr\n'), ((1733, 1749), 'glob.glob', 'glob.glob', (['files'], {}), '(files)\n', (1742, 1749), False, 'import glob\n'), ((2295, 2373), 'pandas.read_csv', 'pd.read_csv', (['"""data/Hackaton/VolumeDistribution/Volumes"""'], {'delim_whitespace': '(True)'}), "('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)\n", (2306, 2373), True, 'import pandas as pd\n'), ((2422, 2520), 'numpy.linspace', 'np.linspace', (['self.X_corner', '(self.X_corner + self.size_raster[0] * self.dx)', 'self.size_raster[0]'], {}), '(self.X_corner, self.X_corner + self.size_raster[0] * self.dx,\n self.size_raster[0])\n', (2433, 2520), True, 'import numpy as np\n'), ((2535, 2633), 'numpy.linspace', 'np.linspace', (['self.Y_corner', '(self.Y_corner + self.size_raster[1] * self.dy)', 'self.size_raster[1]'], {}), '(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy,\n self.size_raster[1])\n', (2546, 2633), True, 'import numpy as np\n'), ((2648, 2703), 'numpy.linspace', 'np.linspace', (['self.top_model', 'self.bottom_model', 'self.dz'], {}), '(self.top_model, self.bottom_model, self.dz)\n', (2659, 2703), True, 'import numpy as np\n'), ((2726, 2769), 'numpy.linspace', 'np.linspace', (['(0)', 'self.top_model', 'self.base_n'], {}), '(0, self.top_model, self.base_n)\n', (2737, 2769), True, 'import numpy as np\n'), ((3397, 3410), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3404, 3410), True, 'import numpy as np\n'), ((3699, 3796), 'numpy.zeros', 'np.zeros', (['(iterations, self.size_raster[0], self.size_raster[1], self.zz.size)'], {'dtype': '"""int8"""'}), "((iterations, self.size_raster[0], self.size_raster[1], self.zz.\n size), dtype='int8')\n", (3707, 3796), True, 'import numpy as np\n'), ((4947, 5025), 'numpy.zeros', 'np.zeros', (['(iterations, self.xx.size, self.yy.size, self.zz.size)'], {'dtype': '"""int8"""'}), "((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')\n", (4955, 5025), True, 'import numpy as np\n'), ((6490, 6512), 'numpy.unique', 'np.unique', (['lith_blocks'], {}), '(lith_blocks)\n', (6499, 6512), True, 'import numpy as np\n'), ((7019, 7046), 'numpy.zeros_like', 'np.zeros_like', (['lith_prob[0]'], {}), '(lith_prob[0])\n', (7032, 7046), True, 'import numpy as np\n'), ((7400, 7467), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cube[slice, :, :].T'], {'origin': '"""upperleft"""', 'cmap': '"""viridis"""'}), "(cube[slice, :, :].T, origin='upperleft', cmap='viridis')\n", (7410, 7467), True, 'from matplotlib import pyplot as plt\n'), ((7476, 7486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7484, 7486), True, 'from matplotlib import pyplot as plt\n'), ((5451, 5489), 'numpy.zeros', 'np.zeros', (['[self.xx.size, self.yy.size]'], {}), '([self.xx.size, self.yy.size])\n', (5459, 5489), True, 'import numpy as np\n'), ((5509, 5547), 'numpy.zeros', 'np.zeros', (['[self.xx.size, self.yy.size]'], {}), '([self.xx.size, self.yy.size])\n', (5517, 5547), True, 'import numpy as np\n'), ((5583, 5679), 'numpy.random.normal', 'np.random.normal', (['self.top_mean.values[mesh_x, mesh_y]', 'self.top_std.values[mesh_x, mesh_y]'], {}), '(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[\n mesh_x, mesh_y])\n', (5599, 5679), True, 'import numpy as np\n'), ((5761, 5857), 'numpy.random.normal', 'np.random.normal', (['self.top_mean.values[mesh_x, mesh_y]', 'self.top_std.values[mesh_x, mesh_y]'], {}), '(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[\n mesh_x, mesh_y])\n', (5777, 5857), True, 'import numpy as np\n'), ((6733, 6768), 'numpy.sum', 'np.sum', (['(lith_blocks == l_id)'], {'axis': '(0)'}), '(lith_blocks == l_id, axis=0)\n', (6739, 6768), True, 'import numpy as np\n'), ((7092, 7116), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['l', '(0)'], {}), '(l, 0)\n', (7110, 7116), True, 'import numpy as np\n'), ((5180, 5203), 'numpy.arange', 'np.arange', (['self.xx.size'], {}), '(self.xx.size)\n', (5189, 5203), True, 'import numpy as np\n'), ((5254, 5277), 'numpy.arange', 'np.arange', (['self.yy.size'], {}), '(self.yy.size)\n', (5263, 5277), True, 'import numpy as np\n'), ((5328, 5351), 'numpy.arange', 'np.arange', (['self.zz.size'], {}), '(self.zz.size)\n', (5337, 5351), True, 'import numpy as np\n'), ((6174, 6220), 'numpy.where', 'np.where', (['(self.zz < base[mesh_x, mesh_y])', '(2)', '(3)'], {}), '(self.zz < base[mesh_x, mesh_y], 2, 3)\n', (6182, 6220), True, 'import numpy as np\n'), ((1884, 1913), 'numpy.loadtxt', 'np.loadtxt', (['model'], {'skiprows': '(1)'}), '(model, skiprows=1)\n', (1894, 1913), True, 'import numpy as np\n'), ((4063, 4120), 'numpy.random.normal', 'np.random.normal', (['self.top_mean[j, k]', 'self.top_std[j, k]'], {}), '(self.top_mean[j, k], self.top_std[j, k])\n', (4079, 4120), True, 'import numpy as np\n'), ((4148, 4207), 'numpy.random.normal', 'np.random.normal', (['self.base_mean[j, k]', 'self.base_std[j, k]'], {}), '(self.base_mean[j, k], self.base_std[j, k])\n', (4164, 4207), True, 'import numpy as np\n'), ((6614, 6636), 'numpy.unique', 'np.unique', (['lith_blocks'], {}), '(lith_blocks)\n', (6623, 6636), True, 'import numpy as np\n'), ((7173, 7187), 'numpy.ma.log2', 'np.ma.log2', (['pm'], {}), '(pm)\n', (7183, 7187), True, 'import numpy as np\n')] |
from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| [
"os.listdir",
"PIL.Image.open"
]
| [((140, 152), 'os.listdir', 'os.listdir', ([], {}), '()\n', (150, 152), False, 'import os\n'), ((217, 238), 'PIL.Image.open', 'Image.open', (['imageFile'], {}), '(imageFile)\n', (227, 238), False, 'from PIL import Image\n')] |
from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
@run_async
def start(update, context):
sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \
"\nSend /help for checking all available commands.",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
@is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('clone', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('help', helper)
log_handler = CommandHandler('logs', sendLogs)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
| [
"bot.LOGGER.error",
"bot.LOGGER.info",
"bot.bot.send_document",
"bot.gDrive.GoogleDriveHelper",
"bot.msg_utils.sendMessage",
"bot.dispatcher.add_handler",
"bot.updater.start_polling",
"bot.clone_status.CloneStatus",
"bot.msg_utils.deleteMessage",
"telegram.ext.CommandHandler"
]
| [((594, 779), 'bot.msg_utils.sendMessage', 'sendMessage', (['"""Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!\nSend /help for checking all available commands."""', 'context.bot', 'update', '"""Markdown"""'], {}), '(\n """Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!\nSend /help for checking all available commands."""\n , context.bot, update, \'Markdown\')\n', (605, 779), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((839, 1534), 'bot.msg_utils.sendMessage', 'sendMessage', (['f"""Here are the available commands of the bot\n\n*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone.\n\nYou can also *ignore folders* from clone process by doing the following:\n`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message.*Make sure to not put any space between commas (,).*\nSource of this bot: [GitHub]({REPO_LINK})"""', 'context.bot', 'update', '"""Markdown"""'], {}), '(\n f"""Here are the available commands of the bot\n\n*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone.\n\nYou can also *ignore folders* from clone process by doing the following:\n`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message.*Make sure to not put any space between commas (,).*\nSource of this bot: [GitHub]({REPO_LINK})"""\n , context.bot, update, \'Markdown\')\n', (850, 1534), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((4019, 4046), 'bot.LOGGER.info', 'LOGGER.info', (['"""Bot Started!"""'], {}), "('Bot Started!')\n", (4030, 4046), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((4067, 4101), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""clone"""', 'cloneNode'], {}), "('clone', cloneNode)\n", (4081, 4101), False, 'from telegram.ext import CommandHandler, run_async\n'), ((4122, 4152), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (4136, 4152), False, 'from telegram.ext import CommandHandler, run_async\n'), ((4172, 4202), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""help"""', 'helper'], {}), "('help', helper)\n", (4186, 4202), False, 'from telegram.ext import CommandHandler, run_async\n'), ((4221, 4253), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""logs"""', 'sendLogs'], {}), "('logs', sendLogs)\n", (4235, 4253), False, 'from telegram.ext import CommandHandler, run_async\n'), ((4258, 4293), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', (['log_handler'], {}), '(log_handler)\n', (4280, 4293), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((4298, 4335), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', (['start_handler'], {}), '(start_handler)\n', (4320, 4335), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((4340, 4377), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', (['clone_handler'], {}), '(clone_handler)\n', (4362, 4377), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((4382, 4418), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', (['help_handler'], {}), '(help_handler)\n', (4404, 4418), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((4423, 4446), 'bot.updater.start_polling', 'updater.start_polling', ([], {}), '()\n', (4444, 4446), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((2245, 2317), 'bot.msg_utils.sendMessage', 'sendMessage', (['f"""<b>Cloning:</b> <code>{link}</code>"""', 'context.bot', 'update'], {}), "(f'<b>Cloning:</b> <code>{link}</code>', context.bot, update)\n", (2256, 2317), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((2341, 2354), 'bot.clone_status.CloneStatus', 'CloneStatus', ([], {}), '()\n', (2352, 2354), False, 'from bot.clone_status import CloneStatus\n'), ((2368, 2412), 'bot.gDrive.GoogleDriveHelper', 'GoogleDriveHelper', ([], {'GFolder_ID': 'DESTINATION_ID'}), '(GFolder_ID=DESTINATION_ID)\n', (2385, 2412), False, 'from bot.gDrive import GoogleDriveHelper\n'), ((2556, 2587), 'bot.msg_utils.deleteMessage', 'deleteMessage', (['context.bot', 'msg'], {}), '(context.bot, msg)\n', (2569, 2587), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((2634, 2674), 'bot.msg_utils.sendMessage', 'sendMessage', (['result', 'context.bot', 'update'], {}), '(result, context.bot, update)\n', (2645, 2674), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((2693, 2772), 'bot.msg_utils.sendMessage', 'sendMessage', (['"""Please Provide a Google Drive Shared Link to Clone."""', 'bot', 'update'], {}), "('Please Provide a Google Drive Shared Link to Clone.', bot, update)\n", (2704, 2772), False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((3828, 3958), 'bot.bot.send_document', 'bot.send_document', ([], {'document': 'f', 'filename': 'f.name', 'reply_to_message_id': 'update.message.message_id', 'chat_id': 'update.message.chat_id'}), '(document=f, filename=f.name, reply_to_message_id=update.\n message.message_id, chat_id=update.message.chat_id)\n', (3845, 3958), False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((3510, 3525), 'bot.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (3522, 3525), False, 'from bot import LOGGER, dispatcher, updater, bot\n')] |
import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
@cronjobs.register
def collect_tweets():
# Don't (ab)use the twitter API from dev and stage.
if settings.STAGE:
return
"""Collect new tweets about Firefox."""
with statsd.timer('customercare.tweets.time_elapsed'):
t = Twython(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
search_options = {
'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos '
'OR @firefox_es'),
'count': settings.CC_TWEETS_PERPAGE, # Items per page.
'result_type': 'recent', # Retrieve tweets by date.
}
# If we already have some tweets, collect nothing older than what we
# have.
try:
latest_tweet = Tweet.latest()
except Tweet.DoesNotExist:
log.debug('No existing tweets. Retrieving %d tweets from search.' %
settings.CC_TWEETS_PERPAGE)
else:
search_options['since_id'] = latest_tweet.tweet_id
log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id)
# Retrieve Tweets
results = t.search(**search_options)
if len(results['statuses']) == 0:
# Twitter returned 0 results.
return
# Drop tweets into DB
for item in results['statuses']:
# Apply filters to tweet before saving
# Allow links in #fxinput tweets
statsd.incr('customercare.tweet.collected')
item = _filter_tweet(item,
allow_links='#fxinput' in item['text'])
if not item:
continue
created_date = datetime.utcfromtimestamp(calendar.timegm(
rfc822.parsedate(item['created_at'])))
item_lang = item['metadata'].get('iso_language_code', 'en')
tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item),
locale=item_lang, created=created_date)
try:
tweet.save()
statsd.incr('customercare.tweet.saved')
except IntegrityError:
pass
@cronjobs.register
def purge_tweets():
"""Periodically purge old tweets for each locale.
This does a lot of DELETEs on master, so it shouldn't run too frequently.
Probably once every hour or more.
"""
# Pin to master
pin_this_thread()
# Build list of tweets to delete, by id.
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
# Some locales don't have an iso639_1 code, too bad for them.
if not locale:
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug('Truncating tweet list: Removing tweets older than %s, '
'for [%s].' % (oldest.created, locale))
Tweet.objects.filter(locale=locale,
created__lte=oldest.created).delete()
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
@cronjobs.register
def get_customercare_stats():
"""
Generate customer care stats from the Replies table.
This gets cached in Redis as a sorted list of contributors, stored as JSON.
Example Top Contributor data:
[
{
'twitter_username': 'username1',
'avatar': 'http://twitter.com/path/to/the/avatar.png',
'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
'all': 5211,
'1m': 230,
'1w': 33,
'1d': 3,
},
{ ... },
{ ... },
]
"""
if settings.STAGE:
return
contributor_stats = {}
now = datetime.now()
one_month_ago = now - timedelta(days=30)
one_week_ago = now - timedelta(days=7)
yesterday = now - timedelta(days=1)
for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()):
for reply in chunk:
user = reply.twitter_username
if user not in contributor_stats:
raw = json.loads(reply.raw_json)
if 'from_user' in raw: # For tweets collected using v1 API
user_data = raw
else:
user_data = raw['user']
contributor_stats[user] = {
'twitter_username': user,
'avatar': user_data['profile_image_url'],
'avatar_https': user_data['profile_image_url_https'],
'all': 0, '1m': 0, '1w': 0, '1d': 0,
}
contributor = contributor_stats[reply.twitter_username]
contributor['all'] += 1
if reply.created > one_month_ago:
contributor['1m'] += 1
if reply.created > one_week_ago:
contributor['1w'] += 1
if reply.created > yesterday:
contributor['1d'] += 1
sort_key = settings.CC_TOP_CONTRIB_SORT
limit = settings.CC_TOP_CONTRIB_LIMIT
# Sort by whatever is in settings, break ties with 'all'
contributor_stats = sorted(contributor_stats.values(),
key=lambda c: (c[sort_key], c['all']),
reverse=True)[:limit]
try:
redis = redis_client(name='default')
key = settings.CC_TOP_CONTRIB_CACHE_KEY
redis.set(key, json.dumps(contributor_stats))
except RedisError as e:
statsd.incr('redis.error')
log.error('Redis error: %s' % e)
return contributor_stats
| [
"logging.getLogger",
"kitsune.customercare.models.Reply.objects.all",
"json.loads",
"kitsune.customercare.models.Tweet.objects.filter",
"kitsune.customercare.models.TwitterAccount.objects.filter",
"twython.Twython",
"re.compile",
"statsd.statsd.incr",
"json.dumps",
"multidb.pinning.pin_this_thread",
"kitsune.customercare.models.Tweet.latest",
"kitsune.sumo.redis_utils.redis_client",
"kitsune.customercare.models.Reply.objects.count",
"datetime.datetime.now",
"statsd.statsd.timer",
"rfc822.parsedate",
"datetime.timedelta"
]
| [((486, 524), 're.compile', 're.compile', (['"""https?\\\\:"""', 're.IGNORECASE'], {}), "('https?\\\\:', re.IGNORECASE)\n", (496, 524), False, 'import re\n'), ((535, 570), 're.compile', 're.compile', (['"""^rt\\\\W"""', 're.IGNORECASE'], {}), "('^rt\\\\W', re.IGNORECASE)\n", (545, 570), False, 'import re\n'), ((743, 773), 'logging.getLogger', 'logging.getLogger', (['"""k.twitter"""'], {}), "('k.twitter')\n", (760, 773), False, 'import logging\n'), ((3762, 3779), 'multidb.pinning.pin_this_thread', 'pin_this_thread', ([], {}), '()\n', (3777, 3779), False, 'from multidb.pinning import pin_this_thread\n'), ((7176, 7190), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7188, 7190), False, 'from datetime import datetime, timedelta\n'), ((1441, 1489), 'statsd.statsd.timer', 'statsd.timer', (['"""customercare.tweets.time_elapsed"""'], {}), "('customercare.tweets.time_elapsed')\n", (1453, 1489), False, 'from statsd import statsd\n'), ((1503, 1648), 'twython.Twython', 'Twython', (['settings.TWITTER_CONSUMER_KEY', 'settings.TWITTER_CONSUMER_SECRET', 'settings.TWITTER_ACCESS_TOKEN', 'settings.TWITTER_ACCESS_TOKEN_SECRET'], {}), '(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET,\n settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)\n', (1510, 1648), False, 'from twython import Twython\n'), ((5100, 5159), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.reply_or_mention"""'], {}), "('customercare.tweet.rejected.reply_or_mention')\n", (5111, 5159), False, 'from statsd import statsd\n'), ((5502, 5552), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.retweet"""'], {}), "('customercare.tweet.rejected.retweet')\n", (5513, 5552), False, 'from statsd import statsd\n'), ((5649, 5696), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.link"""'], {}), "('customercare.tweet.rejected.link')\n", (5660, 5696), False, 'from statsd import statsd\n'), ((6025, 6072), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.user"""'], {}), "('customercare.tweet.rejected.user')\n", (6036, 6072), False, 'from statsd import statsd\n'), ((6189, 6249), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.firefox_in_handle"""'], {}), "('customercare.tweet.rejected.firefox_in_handle')\n", (6200, 6249), False, 'from statsd import statsd\n'), ((6407, 6476), 'statsd.statsd.incr', 'statsd.incr', (["('customercare.tweet.rejected.blacklist_word.' + bad_word)"], {}), "('customercare.tweet.rejected.blacklist_word.' + bad_word)\n", (6418, 6476), False, 'from statsd import statsd\n'), ((7217, 7235), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (7226, 7235), False, 'from datetime import datetime, timedelta\n'), ((7261, 7278), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (7270, 7278), False, 'from datetime import datetime, timedelta\n'), ((7301, 7318), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (7310, 7318), False, 'from datetime import datetime, timedelta\n'), ((7345, 7364), 'kitsune.customercare.models.Reply.objects.all', 'Reply.objects.all', ([], {}), '()\n', (7362, 7364), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((7372, 7393), 'kitsune.customercare.models.Reply.objects.count', 'Reply.objects.count', ([], {}), '()\n', (7391, 7393), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((8776, 8804), 'kitsune.sumo.redis_utils.redis_client', 'redis_client', ([], {'name': '"""default"""'}), "(name='default')\n", (8788, 8804), False, 'from kitsune.sumo.redis_utils import redis_client, RedisError\n'), ((2120, 2134), 'kitsune.customercare.models.Tweet.latest', 'Tweet.latest', ([], {}), '()\n', (2132, 2134), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((2813, 2856), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.collected"""'], {}), "('customercare.tweet.collected')\n", (2824, 2856), False, 'from statsd import statsd\n'), ((5334, 5393), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.rejected.reply_or_mention"""'], {}), "('customercare.tweet.rejected.reply_or_mention')\n", (5345, 5393), False, 'from statsd import statsd\n'), ((8876, 8905), 'json.dumps', 'json.dumps', (['contributor_stats'], {}), '(contributor_stats)\n', (8886, 8905), False, 'import json\n'), ((8943, 8969), 'statsd.statsd.incr', 'statsd.incr', (['"""redis.error"""'], {}), "('redis.error')\n", (8954, 8969), False, 'from statsd import statsd\n'), ((3421, 3460), 'statsd.statsd.incr', 'statsd.incr', (['"""customercare.tweet.saved"""'], {}), "('customercare.tweet.saved')\n", (3432, 3460), False, 'from statsd import statsd\n'), ((5847, 5890), 'kitsune.customercare.models.TwitterAccount.objects.filter', 'TwitterAccount.objects.filter', ([], {'ignored': '(True)'}), '(ignored=True)\n', (5876, 5890), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((7534, 7560), 'json.loads', 'json.loads', (['reply.raw_json'], {}), '(reply.raw_json)\n', (7544, 7560), False, 'import json\n'), ((3106, 3142), 'rfc822.parsedate', 'rfc822.parsedate', (["item['created_at']"], {}), "(item['created_at'])\n", (3122, 3142), False, 'import rfc822\n'), ((3275, 3291), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (3285, 3291), False, 'import json\n'), ((4273, 4337), 'kitsune.customercare.models.Tweet.objects.filter', 'Tweet.objects.filter', ([], {'locale': 'locale', 'created__lte': 'oldest.created'}), '(locale=locale, created__lte=oldest.created)\n', (4293, 4337), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((4513, 4548), 'kitsune.customercare.models.Tweet.objects.filter', 'Tweet.objects.filter', ([], {'locale': 'locale'}), '(locale=locale)\n', (4533, 4548), False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n')] |
import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
def vocChecker(image_id, width, height, keep_difficult = False):
target = ET.parse(annopath % image_id).getroot()
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height
bndbox.append(cur_pt)
print(name)
label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
print(res)
try :
print(np.array(res)[:,4])
print(np.array(res)[:,:4])
except IndexError:
print("\nINDEX ERROR HERE !\n")
exit(0)
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i)) | [
"os.path.join",
"numpy.array",
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser"
]
| [((240, 335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Single Shot MultiBox Detector Training With Pytorch"""'}), "(description=\n 'Single Shot MultiBox Detector Training With Pytorch')\n", (263, 335), False, 'import argparse\n'), ((765, 794), 'xml.etree.ElementTree.parse', 'ET.parse', (['(annopath % image_id)'], {}), '(annopath % image_id)\n', (773, 794), True, 'import xml.etree.ElementTree as ET\n'), ((1937, 1971), 'os.path.join', 'osp.join', (['args.root', '"""Annotations"""'], {}), "(args.root, 'Annotations')\n", (1945, 1971), True, 'import os.path as osp\n'), ((1666, 1679), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1674, 1679), True, 'import numpy as np\n'), ((1700, 1713), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1708, 1713), True, 'import numpy as np\n')] |
"""Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| [
"mcipc.rcon.functions.boolmap"
]
| [((374, 414), 'mcipc.rcon.functions.boolmap', 'boolmap', (['text'], {'true': 'SET', 'false': 'UNCHANGED'}), '(text, true=SET, false=UNCHANGED)\n', (381, 414), False, 'from mcipc.rcon.functions import boolmap\n')] |
# Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("scripts", "0012_auto_20190128_1820")]
operations = [
migrations.AlterField(
model_name="scriptdb",
name="db_typeclass_path",
field=models.CharField(
db_index=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
)
]
| [
"django.db.models.CharField"
]
| [((334, 563), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'help_text': '"""this defines what \'type\' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass."""', 'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""typeclass"""'}), '(db_index=True, help_text=\n "this defines what \'type\' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass."\n , max_length=255, null=True, verbose_name=\'typeclass\')\n', (350, 563), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- <NAME>
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| [
"pytest.main",
"segno.make"
]
| [((496, 512), 'segno.make', 'segno.make', (['data'], {}), '(data)\n', (506, 512), False, 'import segno\n'), ((698, 733), 'segno.make', 'segno.make', (['data'], {'encoding': 'encoding'}), '(data, encoding=encoding)\n', (708, 733), False, 'import segno\n'), ((806, 829), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (817, 829), False, 'import pytest\n')] |
from src import app, db
from .models import User, Role, RoleUsers
from .security_admin import UserAdmin, RoleAdmin
from flask_security import Security, SQLAlchemyUserDatastore, \
login_required, roles_accepted
from flask_security.utils import encrypt_password
def config_security_admin(admin):
admin.add_view(UserAdmin(db.session))
admin.add_view(RoleAdmin(db.session))
def configure_security():
# Create the Roles "admin" and "end-user" -- unless they already exist
user_datastore.find_or_create_role(name='admin', description='Administrator')
user_datastore.find_or_create_role(name='end-user', description='End user')
user_datastore.find_or_create_role(name='blogger', description='Blogger')
# Create two Users for testing purposes -- unless they already exists.
# In each case, use Flask-Security utility function to encrypt the password.
pw = encrypt_password('password')
# pw = 'password'
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
# Give one User has the "end-user" role, while the other has the "admin" role.
#(This will have no effect if the
# Users already have these Roles.) Again, commit any database changes.
user_datastore.add_role_to_user('<EMAIL>', 'end-user')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
user_datastore.add_role_to_user('<EMAIL>', 'admin')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
db.session.commit()
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create any database tables that don't exist yet.
db.create_all()
| [
"src.db.session.commit",
"src.db.create_all",
"flask_security.SQLAlchemyUserDatastore",
"flask_security.Security",
"flask_security.utils.encrypt_password"
]
| [((1667, 1706), 'flask_security.SQLAlchemyUserDatastore', 'SQLAlchemyUserDatastore', (['db', 'User', 'Role'], {}), '(db, User, Role)\n', (1690, 1706), False, 'from flask_security import Security, SQLAlchemyUserDatastore, login_required, roles_accepted\n'), ((1718, 1747), 'flask_security.Security', 'Security', (['app', 'user_datastore'], {}), '(app, user_datastore)\n', (1726, 1747), False, 'from flask_security import Security, SQLAlchemyUserDatastore, login_required, roles_accepted\n'), ((1800, 1815), 'src.db.create_all', 'db.create_all', ([], {}), '()\n', (1813, 1815), False, 'from src import app, db\n'), ((893, 921), 'flask_security.utils.encrypt_password', 'encrypt_password', (['"""password"""'], {}), "('password')\n", (909, 921), False, 'from flask_security.utils import encrypt_password\n'), ((1606, 1625), 'src.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1623, 1625), False, 'from src import app, db\n')] |
import time
from jina.executors.crafters import BaseCrafter
from .helper import foo
class DummyHubExecutorSlow(BaseCrafter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(15)
foo()
| [
"time.sleep"
]
| [((220, 234), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (230, 234), False, 'import time\n')] |
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list:
thresholds = [np.quantile(data, quantile) for quantile in quantiles]
return thresholds
def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
threshold_map = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if not np.isnan(threshold):
threshold_map[lat, lon] = threshold
return threshold_map
def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
mask = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if np.isnan(threshold):
mask[lat, lon] = 1
return mask
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
class ForecastSkill:
""" A collection of categorical forecast skill metrics """
def __init__(self, prediction, target):
self.prediction = prediction
self.target = target
self.true_positive = 0
self.false_positive = 0
self.false_negative = 0
self.true_negative = 0
def compute_categories(self, mask=None):
self.target = self.target.flatten().astype('int')
self.prediction = self.prediction.flatten().astype('int')
if mask is not None:
mask = mask.flatten()
indices_to_remove = np.where(mask==1)
self.target = np.delete(self.target, indices_to_remove)
self.prediction = np.delete(self.prediction, indices_to_remove)
categories = confusion_matrix(self.target, self.prediction)
self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel()
def print_category_sums(self):
total = self.target.size
print(f'tp: {self.true_positive/total*100:2.3f}')
print(f'fp: {self.false_positive/total*100:2.3f}')
print(f'fn: {self.false_negative/total*100:2.3f}')
print(f'tn: {self.true_negative/total*100:2.3f}')
def get_category_sums(self):
return self.true_positive, self.false_positive, self.false_negative, self.true_negative
def get_heidke_skill_score(self) -> float:
tp = self.true_positive
fp = self.false_positive
fn = self.false_negative
tn = self.true_negative
nominator = 2*(tp*tn - fp*fn)
denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn))
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_critical_success_index(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_false_alarm_ratio(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
nominator = false_alarms
denominator = hits + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_probability_of_detection(self) -> float:
hits = self.true_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_f1(self) -> float:
return f1_score(self.target, self.prediction, average='binary')
def get_recall(self) -> float:
return recall_score(self.target, self.prediction, average='binary')
def get_precision(self) -> float:
return precision_score(self.target, self.prediction, average='binary')
def rmse(output, target):
return np.sqrt(((output-target)**2).mean(axis=0))
def me(output, target):
return (output-target).mean(axis=0)
def corr(output, target):
result = np.zeros((output.shape[1], output.shape[2]))
for i in range(output.shape[1]):
for j in range(output.shape[2]):
result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0]
return result
| [
"sklearn.metrics.f1_score",
"scipy.stats.scoreatpercentile",
"numpy.where",
"numpy.delete",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"IPython.display.clear_output",
"numpy.zeros",
"numpy.quantile",
"numpy.isnan",
"scipy.stats.spearmanr",
"sklearn.metrics.confusion_matrix"
]
| [((1345, 1369), 'numpy.zeros', 'np.zeros', (['(n_lat, n_lon)'], {}), '((n_lat, n_lon))\n', (1353, 1369), True, 'import numpy as np\n'), ((1814, 1838), 'numpy.zeros', 'np.zeros', (['(n_lat, n_lon)'], {}), '((n_lat, n_lon))\n', (1822, 1838), True, 'import numpy as np\n'), ((4224, 4259), 'numpy.zeros', 'np.zeros', (['(n_classes, n_lat, n_lon)'], {}), '((n_classes, n_lat, n_lon))\n', (4232, 4259), True, 'import numpy as np\n'), ((8614, 8658), 'numpy.zeros', 'np.zeros', (['(output.shape[1], output.shape[2])'], {}), '((output.shape[1], output.shape[2]))\n', (8622, 8658), True, 'import numpy as np\n'), ((821, 848), 'numpy.quantile', 'np.quantile', (['data', 'quantile'], {}), '(data, quantile)\n', (832, 848), True, 'import numpy as np\n'), ((1091, 1118), 'numpy.quantile', 'np.quantile', (['data', 'quantile'], {}), '(data, quantile)\n', (1102, 1118), True, 'import numpy as np\n'), ((5835, 5881), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['self.target', 'self.prediction'], {}), '(self.target, self.prediction)\n', (5851, 5881), False, 'from sklearn.metrics import confusion_matrix\n'), ((8119, 8175), 'sklearn.metrics.f1_score', 'f1_score', (['self.target', 'self.prediction'], {'average': '"""binary"""'}), "(self.target, self.prediction, average='binary')\n", (8127, 8175), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((8236, 8296), 'sklearn.metrics.recall_score', 'recall_score', (['self.target', 'self.prediction'], {'average': '"""binary"""'}), "(self.target, self.prediction, average='binary')\n", (8248, 8296), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((8360, 8423), 'sklearn.metrics.precision_score', 'precision_score', (['self.target', 'self.prediction'], {'average': '"""binary"""'}), "(self.target, self.prediction, average='binary')\n", (8375, 8423), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((1492, 1545), 'scipy.stats.scoreatpercentile', 'st.scoreatpercentile', (['tmp[tmp > data_min]', 'percentile'], {}), '(tmp[tmp > data_min], percentile)\n', (1512, 1545), True, 'import scipy.stats as st\n'), ((1961, 2014), 'scipy.stats.scoreatpercentile', 'st.scoreatpercentile', (['tmp[tmp > data_min]', 'percentile'], {}), '(tmp[tmp > data_min], percentile)\n', (1981, 2014), True, 'import scipy.stats as st\n'), ((2028, 2047), 'numpy.isnan', 'np.isnan', (['threshold'], {}), '(threshold)\n', (2036, 2047), True, 'import numpy as np\n'), ((5651, 5670), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (5659, 5670), True, 'import numpy as np\n'), ((5695, 5736), 'numpy.delete', 'np.delete', (['self.target', 'indices_to_remove'], {}), '(self.target, indices_to_remove)\n', (5704, 5736), True, 'import numpy as np\n'), ((5767, 5812), 'numpy.delete', 'np.delete', (['self.prediction', 'indices_to_remove'], {}), '(self.prediction, indices_to_remove)\n', (5776, 5812), True, 'import numpy as np\n'), ((866, 898), 'numpy.where', 'np.where', (['(data > threshold)', '(1)', '(0)'], {}), '(data > threshold, 1, 0)\n', (874, 898), True, 'import numpy as np\n'), ((1563, 1582), 'numpy.isnan', 'np.isnan', (['threshold'], {}), '(threshold)\n', (1571, 1582), True, 'import numpy as np\n'), ((2668, 2700), 'numpy.where', 'np.where', (['(data > threshold)', '(1)', '(0)'], {}), '(data > threshold, 1, 0)\n', (2676, 2700), True, 'import numpy as np\n'), ((4997, 5020), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (5009, 5020), False, 'from IPython.display import display, clear_output\n'), ((8765, 8808), 'scipy.stats.spearmanr', 'spearmanr', (['output[:, i, j]', 'target[:, i, j]'], {}), '(output[:, i, j], target[:, i, j])\n', (8774, 8808), False, 'from scipy.stats import spearmanr\n')] |
from datetime import datetime,timezone
import sys
import boto3
import json
def pipeline_event(event, context):
state = get_final_state(event)
if state is None:
return
event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
metric_data = []
if event['detail-type'] == "CodePipeline Pipeline Execution State Change":
# Write green/red time based on last execution state
prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if prior_execution is not None:
last_execution_state = prior_execution['status']
seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds()
if last_execution_state == "Succeeded":
append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution)
elif last_execution_state == "Failed":
append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution)
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if current_execution is not None:
duration = (event_time - current_execution['startTime']).total_seconds()
append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Stage Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
#append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Action Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
if len(metric_data) > 0:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace='Pipeline',
MetricData=metric_data
)
# Return the state from the event iff it's one of SUCCEEDED or FAILED
def get_final_state(event):
if 'detail' in event and 'state' in event['detail']:
if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']):
return event['detail']['state']
return None
# Return the execution summary for a given execution id
def get_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
for e in response['pipelineExecutionSummaries']:
if e['pipelineExecutionId'] == execution_id:
return e
return None
# Return the execution summary for the most prior final execution before a given execution id
def get_prior_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
found_current = False
for e in response['pipelineExecutionSummaries']:
if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']):
return e
elif e['pipelineExecutionId'] == execution_id:
found_current = True
return None
def append_metric(metric_list, metric_name, event, seconds=0, count=0):
data = {
'MetricName': metric_name,
'Dimensions': [],
'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'),
}
resource_parts = []
if 'pipeline' in event['detail']:
data['Dimensions'].append({
'Name': 'PipelineName',
'Value': event['detail']['pipeline']
})
resource_parts.append(event['detail']['pipeline'])
if 'stage' in event['detail']:
data['Dimensions'].append({
'Name': 'StageName',
'Value': event['detail']['stage']
})
resource_parts.append(event['detail']['stage'])
if 'action' in event['detail']:
data['Dimensions'].append({
'Name': 'ActionName',
'Value': event['detail']['action']
})
resource_parts.append(event['detail']['action'])
if seconds > 0:
data['Value'] = seconds
data['Unit'] = 'Seconds'
elif count > 0:
data['Value'] = count
data['Unit'] = 'Count'
else:
# no metric to add
return
print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value']))
metric_list.append(data)
def generate_dashboard(client):
paginator = client.get_paginator('list_metrics')
response_iterator = paginator.paginate(
Namespace='Pipeline'
)
pipeline_names = set()
for response in response_iterator:
for metric in response['Metrics']:
for dim in metric['Dimensions']:
if dim['Name'] == 'PipelineName':
pipeline_names.add(dim['Value'])
widgets = []
dashboard = {
"widgets": widgets
}
y = 0
for pipeline_name in sorted(pipeline_names):
widgets.append({
"type": "metric",
"x": 0,
"y": y,
"width": 18,
"height": 3,
"properties": {
"view": "singleValue",
"metrics": [
[ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ],
[ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ],
[ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ],
[ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ],
[ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ]
],
"region": "eu-central-1",
"title": pipeline_name,
"period": 300
}
})
y += 3
widgets.append({
"type": "text",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n"
}
})
return dashboard
def dashboard_event(event, context):
client = boto3.client('cloudwatch')
dashboard = generate_dashboard(client)
client.put_dashboard(
DashboardName='Pipeline',
DashboardBody=json.dumps(dashboard)
)
if __name__ == '__main__':
dashboard_event(None, None)
| [
"datetime.datetime.strptime",
"json.dumps",
"boto3.client"
]
| [((2854, 2882), 'boto3.client', 'boto3.client', (['"""codepipeline"""'], {}), "('codepipeline')\n", (2866, 2882), False, 'import boto3\n'), ((3265, 3293), 'boto3.client', 'boto3.client', (['"""codepipeline"""'], {}), "('codepipeline')\n", (3277, 3293), False, 'import boto3\n'), ((7079, 7105), 'boto3.client', 'boto3.client', (['"""cloudwatch"""'], {}), "('cloudwatch')\n", (7091, 7105), False, 'import boto3\n'), ((2300, 2326), 'boto3.client', 'boto3.client', (['"""cloudwatch"""'], {}), "('cloudwatch')\n", (2312, 2326), False, 'import boto3\n'), ((3832, 3886), 'datetime.datetime.strptime', 'datetime.strptime', (["event['time']", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(event['time'], '%Y-%m-%dT%H:%M:%SZ')\n", (3849, 3886), False, 'from datetime import datetime, timezone\n'), ((204, 258), 'datetime.datetime.strptime', 'datetime.strptime', (["event['time']", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(event['time'], '%Y-%m-%dT%H:%M:%SZ')\n", (221, 258), False, 'from datetime import datetime, timezone\n'), ((7231, 7252), 'json.dumps', 'json.dumps', (['dashboard'], {}), '(dashboard)\n', (7241, 7252), False, 'import json\n')] |
import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
def send_hubot_alert_crowdstrike(detection):
logger.info("Send hubot alert for detection %s" % detection.detection_id)
# Emoji for slack based on action taken
green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked',
'Operation blocked']
red_alerts = ['Policy disabled']
amber_alerts = []
actions = []
for behavior in detection.behavior:
actions.extend(behavior['action_taken'])
if actions:
actions = list(set(actions))
alerts = []
if actions:
if list(set(actions).intersection(red_alerts)):
alerts.append(':red-alert: Allowed')
if list(set(actions).intersection(green_alerts)):
alerts.append(':green-alert: Blocked')
else:
alerts.append(':red-alert: Allowed')
if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts:
alerts = [':amber-alert: Suspicious']
message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % (
detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", ""))
message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device)
for behavior in detection.behavior:
message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash'])
message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline'])
message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique'])
if behavior['action_taken']:
message_to_send = "%sAction Taken: %s" % (
message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", ""))
else:
message_to_send = "%sAction Taken: %s" % (message_to_send, 'None')
if len(detection.behavior) > 1:
message_to_send = "%s\n" % message_to_send
# Whom to send the alert
send_to = 'yourchannel or a user'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
@click.command()
@click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds")
def main(duration):
crowdstrike_detections = crowdstrike.fetch_detections(duration)
if crowdstrike_detections:
logger.info("Sending alerts")
for detection in crowdstrike_detections:
send_hubot_alert_crowdstrike(detection)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"requests.post",
"click.option",
"ConfigParser.ConfigParser",
"os.path.dirname",
"click.command",
"crowdstrike_detection.fetch_detections"
]
| [((139, 285), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S %p')\n", (158, 285), False, 'import logging\n'), ((309, 336), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (326, 336), False, 'import logging\n'), ((347, 374), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (372, 374), False, 'import ConfigParser\n'), ((3274, 3289), 'click.command', 'click.command', ([], {}), '()\n', (3287, 3289), False, 'import click\n'), ((3291, 3475), 'click.option', 'click.option', (['"""-d"""', '"""--duration"""'], {'default': '(600)', 'show_default': '(True)', 'nargs': '(1)', 'type': 'int', 'required': '(False)', 'help': '"""Crowdstrike detections that were last seen since \'duration\' seconds"""'}), '(\'-d\', \'--duration\', default=600, show_default=True, nargs=1,\n type=int, required=False, help=\n "Crowdstrike detections that were last seen since \'duration\' seconds")\n', (3303, 3475), False, 'import click\n'), ((2993, 3053), 'requests.post', 'requests.post', (['hubot_webhook_url'], {'headers': 'headers', 'data': 'data'}), '(hubot_webhook_url, headers=headers, data=data)\n', (3006, 3053), False, 'import requests\n'), ((3516, 3554), 'crowdstrike_detection.fetch_detections', 'crowdstrike.fetch_detections', (['duration'], {}), '(duration)\n', (3544, 3554), True, 'import crowdstrike_detection as crowdstrike\n'), ((416, 441), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (431, 441), False, 'import os\n')] |
import requests
import json
HEADERS = {"Authorization": "OAuth <KEY>", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""Получение информации о статусе папок на диске
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки.
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Получение информации о файле
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Путь до файла.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
"""Создание папок на диске.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках через вызов другой функции.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Загрузка файла на диск.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информацию о созданном файле через вызов другой функции.
"""
assert len(file_name) > 0, "Не введено имя файла"
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
"""Перемещение папки с содержимым в корзину.
Args:
folder_name: имя корневой папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Ссылку для проверки статуса.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
"""Получение статуса операции по ссылке.
Args:
link: ссылка, для которой проверяется статус.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Статус операции.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
"""Очистка корзины.
Returns:
Ссылку для проверки статуса.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| [
"requests.put",
"json.loads",
"requests.get",
"requests.delete"
]
| [((602, 719), 'requests.get', 'requests.get', ([], {'url': "(URL + 'resources?path=' + folder_name_1 + '/' + folder_name_2 + '&fields=path'\n )", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1 + '/' +\n folder_name_2 + '&fields=path', headers=HEADERS)\n", (614, 719), False, 'import requests\n'), ((737, 761), 'json.loads', 'json.loads', (['info.content'], {}), '(info.content)\n', (747, 761), False, 'import json\n'), ((1300, 1439), 'requests.get', 'requests.get', ([], {'url': "(URL + 'resources?path=' + folder_name_1 + '/' + folder_name_2 + '/' +\n file_name + '.jpg&fields=path')", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1 + '/' +\n folder_name_2 + '/' + file_name + '.jpg&fields=path', headers=HEADERS)\n", (1312, 1439), False, 'import requests\n'), ((1494, 1528), 'json.loads', 'json.loads', (['file_info_json.content'], {}), '(file_info_json.content)\n', (1504, 1528), False, 'import json\n'), ((2094, 2168), 'requests.put', 'requests.put', ([], {'url': "(URL + 'resources?path=' + folder_name_1)", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1, headers=HEADERS)\n", (2106, 2168), False, 'import requests\n'), ((2474, 2574), 'requests.put', 'requests.put', ([], {'url': "(URL + 'resources?path=' + folder_name_1 + '/' + folder_name_2)", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1 + '/' +\n folder_name_2, headers=HEADERS)\n", (2486, 2574), False, 'import requests\n'), ((3126, 3275), 'requests.get', 'requests.get', ([], {'url': "(URL + 'resources/upload?path=' + folder_name_1 + '/' + folder_name_2 + '/' +\n file_name + '.jpg&overwrite=true')", 'headers': 'HEADERS'}), "(url=URL + 'resources/upload?path=' + folder_name_1 + '/' +\n folder_name_2 + '/' + file_name + '.jpg&overwrite=true', headers=HEADERS)\n", (3138, 3275), False, 'import requests\n'), ((3344, 3364), 'json.loads', 'json.loads', (['get_link'], {}), '(get_link)\n', (3354, 3364), False, 'import json\n'), ((3369, 3399), 'requests.put', 'requests.put', ([], {'url': "link['href']"}), "(url=link['href'])\n", (3381, 3399), False, 'import requests\n'), ((3805, 3880), 'requests.delete', 'requests.delete', ([], {'url': "(URL + 'resources?path=' + folder_name)", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name, headers=HEADERS)\n", (3820, 3880), False, 'import requests\n'), ((4220, 4259), 'requests.get', 'requests.get', ([], {'url': 'link', 'headers': 'HEADERS'}), '(url=link, headers=HEADERS)\n', (4232, 4259), False, 'import requests\n'), ((4443, 4504), 'requests.delete', 'requests.delete', ([], {'url': "(URL + 'trash/resources')", 'headers': 'HEADERS'}), "(url=URL + 'trash/resources', headers=HEADERS)\n", (4458, 4504), False, 'import requests\n'), ((2229, 2332), 'requests.delete', 'requests.delete', ([], {'url': "(URL + 'resources?path=' + folder_name_1 + '&permanently=true')", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1 +\n '&permanently=true', headers=HEADERS)\n", (2244, 2332), False, 'import requests\n'), ((3893, 3927), 'json.loads', 'json.loads', (['order_response.content'], {}), '(order_response.content)\n', (3903, 3927), False, 'import json\n'), ((4271, 4306), 'json.loads', 'json.loads', (['status_response.content'], {}), '(status_response.content)\n', (4281, 4306), False, 'import json\n'), ((4517, 4550), 'json.loads', 'json.loads', (['remove_folder.content'], {}), '(remove_folder.content)\n', (4527, 4550), False, 'import json\n'), ((2394, 2468), 'requests.put', 'requests.put', ([], {'url': "(URL + 'resources?path=' + folder_name_1)", 'headers': 'HEADERS'}), "(url=URL + 'resources?path=' + folder_name_1, headers=HEADERS)\n", (2406, 2468), False, 'import requests\n')] |
import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
@operator.route('/')
@login_required
@operator_required
def index():
title = os.environ.get('APP_NAME')
# get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3
students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter(
and_(Payment.status_of_payment is not None,
Payment.status_of_payment != PaymentStatus.PENDING.name,
Payment.status_of_payment != PaymentStatus.REJECTED.name,
Payment.status_of_payment != PaymentStatus.WARNING_3.name))
# get the amount of Teachers and Students
total_students = Student.query.count()
total_teachers = Teacher.query.count()
month_name_list = []
for data in MonthNameList:
month_name_list.append(str(data))
# make a query object for "Tahsin" and "Arabic Language" course
tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin")
arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab")
# the total payment for the courses each month
tahsin_course_data = []
arabic_course_data = []
for data in tahsin:
for month_name in month_name_list:
tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
for data in arabic:
for month_name in month_name_list:
arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
# merge and sum the total value from the dictionary on the same month from the _courses_data result above
total_tahsin_students_per_month = defaultdict(int)
total_arabic_students_per_month = defaultdict(int)
for d in tahsin_course_data:
for key, value in d.items():
total_tahsin_students_per_month[key] += value
for d in arabic_course_data:
for key, value in d.items():
total_arabic_students_per_month[key] += value
# store all of the month values on a list for each course
tahsin_values = []
arabic_values = []
for key, value in total_tahsin_students_per_month.items():
tahsin_values.append(value)
for key, value in total_arabic_students_per_month.items():
arabic_values.append(value)
# make a dictionary to represent course name with the matching total student that do the payment for each month
data_courses_each_month = [
{
'Tahsin': tahsin_values,
},
{
'Bahasa Arab': arabic_values
}
]
return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers,
total_students=total_students, month_name_list=month_name_list,
data_courses_each_month=data_courses_each_month)
| [
"flask.render_template",
"app.models.Teacher.query.count",
"app.users.operator.operator.route",
"app.models.Student.query.count",
"os.environ.get",
"collections.defaultdict",
"app.db.session.query",
"sqlalchemy.and_"
]
| [((353, 372), 'app.users.operator.operator.route', 'operator.route', (['"""/"""'], {}), "('/')\n", (367, 372), False, 'from app.users.operator import operator\n'), ((433, 459), 'os.environ.get', 'os.environ.get', (['"""APP_NAME"""'], {}), "('APP_NAME')\n", (447, 459), False, 'import os\n'), ((1000, 1021), 'app.models.Student.query.count', 'Student.query.count', ([], {}), '()\n', (1019, 1021), False, 'from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule\n'), ((1043, 1064), 'app.models.Teacher.query.count', 'Teacher.query.count', ([], {}), '()\n', (1062, 1064), False, 'from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule\n'), ((2019, 2035), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2030, 2035), False, 'from collections import defaultdict\n'), ((2074, 2090), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2085, 2090), False, 'from collections import defaultdict\n'), ((2940, 3166), 'flask.render_template', 'render_template', (['"""main/operator/operator-dashboard.html"""'], {'title': 'title', 'total_teachers': 'total_teachers', 'total_students': 'total_students', 'month_name_list': 'month_name_list', 'data_courses_each_month': 'data_courses_each_month'}), "('main/operator/operator-dashboard.html', title=title,\n total_teachers=total_teachers, total_students=total_students,\n month_name_list=month_name_list, data_courses_each_month=\n data_courses_each_month)\n", (2955, 3166), False, 'from flask import render_template\n'), ((674, 900), 'sqlalchemy.and_', 'and_', (['(Payment.status_of_payment is not None)', '(Payment.status_of_payment != PaymentStatus.PENDING.name)', '(Payment.status_of_payment != PaymentStatus.REJECTED.name)', '(Payment.status_of_payment != PaymentStatus.WARNING_3.name)'], {}), '(Payment.status_of_payment is not None, Payment.status_of_payment !=\n PaymentStatus.PENDING.name, Payment.status_of_payment != PaymentStatus.\n REJECTED.name, Payment.status_of_payment != PaymentStatus.WARNING_3.name)\n', (678, 900), False, 'from sqlalchemy import and_\n'), ((608, 643), 'app.db.session.query', 'db.session.query', (['Schedule', 'Payment'], {}), '(Schedule, Payment)\n', (624, 643), False, 'from app import db\n')] |
import datetime
from homeschool.courses.tests.factories import (
CourseFactory,
CourseTaskFactory,
GradedWorkFactory,
)
from homeschool.schools.tests.factories import GradeLevelFactory
from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm
from homeschool.students.models import Coursework, Grade
from homeschool.students.tests.factories import (
CourseworkFactory,
EnrollmentFactory,
GradeFactory,
StudentFactory,
)
from homeschool.test import TestCase
class TestCourseworkForm(TestCase):
def test_is_valid(self):
"""The coursework validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_student_can_create_coursework(self):
"""The student is enrolled in a course that contains the task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The student is not enrolled in this course."
]
def test_save_new_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_existing_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_deletes_coursework(self):
"""A blank completed date deletes an existing coursework."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 0
)
def test_completed_date_outside_school_year(self):
"""The completed data must be in the school year."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(
grade_level.school_year.start_date - datetime.timedelta(days=1)
),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The completed date must be in the school year."
]
def test_invalid_course_task(self):
"""An invalid course task is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": "0",
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_invalid_completed_date(self):
"""An invalid completed date is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": "boom",
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
class TestEnrollmentForm(TestCase):
def test_students_only_enroll_in_one_grade_level_per_year(self):
"""A student can only be enrolled in a single grade level in a school year."""
user = self.make_user()
enrollment = EnrollmentFactory(
student__school=user.school, grade_level__school_year__school=user.school
)
another_grade_level = GradeLevelFactory(
school_year=enrollment.grade_level.school_year
)
data = {
"student": str(enrollment.student.id),
"grade_level": str(another_grade_level.id),
}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert (
"A student may not be enrolled in multiple grade levels in a school year. "
f"{enrollment.student} is enrolled in {enrollment.grade_level}."
in form.non_field_errors()
)
def test_no_grade_level(self):
"""A missing grade level raises a validation error."""
user = self.make_user()
school = user.school
enrollment = EnrollmentFactory(
student__school=school, grade_level__school_year__school=school
)
data = {"student": str(enrollment.student.id), "grade_level": "0"}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert "You need to select a grade level." in form.non_field_errors()
class TestGradeForm(TestCase):
def test_is_valid(self):
"""The new grade validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_invalid_graded_work(self):
"""An invalid graded work is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
GradedWorkFactory(course_task__course=course)
data = {"student": str(student.id), "graded_work": "0", "score": "100"}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_save(self):
"""The form creates a new grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(
student=student, graded_work=graded_work, score=100
).count()
== 1
)
def test_save_update(self):
"""The form updates a grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
GradeFactory(student=student, graded_work=graded_work)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(student=student, graded_work=graded_work).count() == 1
)
| [
"homeschool.courses.tests.factories.CourseFactory",
"homeschool.students.tests.factories.CourseworkFactory",
"homeschool.students.forms.EnrollmentForm",
"homeschool.students.tests.factories.EnrollmentFactory",
"homeschool.courses.tests.factories.GradedWorkFactory",
"homeschool.students.forms.GradeForm",
"homeschool.students.tests.factories.GradeFactory",
"datetime.timedelta",
"homeschool.courses.tests.factories.CourseTaskFactory",
"homeschool.schools.tests.factories.GradeLevelFactory",
"homeschool.students.tests.factories.StudentFactory",
"homeschool.students.models.Coursework.objects.filter",
"homeschool.students.forms.CourseworkForm",
"homeschool.students.models.Grade.objects.filter"
]
| [((665, 699), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (679, 699), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((722, 772), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (739, 772), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((781, 840), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (798, 840), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((858, 899), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (871, 899), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((922, 954), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (939, 954), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((1156, 1181), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (1170, 1181), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((1418, 1452), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (1432, 1452), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((1475, 1525), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (1492, 1525), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((1543, 1584), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (1556, 1584), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((1607, 1639), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (1624, 1639), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((1841, 1866), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (1855, 1866), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((2201, 2235), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (2215, 2235), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((2258, 2308), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (2275, 2308), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((2317, 2376), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (2334, 2376), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((2394, 2435), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (2407, 2435), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((2458, 2490), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (2475, 2490), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((2692, 2717), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (2706, 2717), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((3058, 3092), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (3072, 3092), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((3115, 3165), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (3132, 3165), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((3174, 3233), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (3191, 3233), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((3251, 3292), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (3264, 3292), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((3315, 3347), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (3332, 3347), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((3356, 3415), 'homeschool.students.tests.factories.CourseworkFactory', 'CourseworkFactory', ([], {'student': 'student', 'course_task': 'course_task'}), '(student=student, course_task=course_task)\n', (3373, 3415), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((3617, 3642), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (3631, 3642), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((3985, 4019), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (3999, 4019), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((4042, 4092), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (4059, 4092), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((4101, 4160), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (4118, 4160), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((4178, 4219), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (4191, 4219), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((4242, 4274), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (4259, 4274), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((4283, 4342), 'homeschool.students.tests.factories.CourseworkFactory', 'CourseworkFactory', ([], {'student': 'student', 'course_task': 'course_task'}), '(student=student, course_task=course_task)\n', (4300, 4342), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((4473, 4498), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (4487, 4498), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((4844, 4878), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (4858, 4878), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((4901, 4951), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (4918, 4951), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((4960, 5019), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (4977, 5019), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((5037, 5078), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (5050, 5078), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((5101, 5133), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (5118, 5133), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((5394, 5419), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (5408, 5419), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((5741, 5775), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (5755, 5775), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((5798, 5848), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (5815, 5848), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((5857, 5916), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (5874, 5916), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((5934, 5975), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (5947, 5975), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((5984, 6016), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (6001, 6016), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((6202, 6227), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (6216, 6227), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((6440, 6474), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (6454, 6474), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((6497, 6547), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (6514, 6547), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((6556, 6615), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (6573, 6615), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((6633, 6674), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (6646, 6674), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((6697, 6729), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', ([], {'course': 'course'}), '(course=course)\n', (6714, 6729), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((6898, 6923), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', ([], {'data': 'data'}), '(data=data)\n', (6912, 6923), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((7236, 7332), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student__school': 'user.school', 'grade_level__school_year__school': 'user.school'}), '(student__school=user.school,\n grade_level__school_year__school=user.school)\n', (7253, 7332), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((7381, 7446), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year': 'enrollment.grade_level.school_year'}), '(school_year=enrollment.grade_level.school_year)\n', (7398, 7446), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((7618, 7654), 'homeschool.students.forms.EnrollmentForm', 'EnrollmentForm', ([], {'user': 'user', 'data': 'data'}), '(user=user, data=data)\n', (7632, 7654), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((8132, 8219), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student__school': 'school', 'grade_level__school_year__school': 'school'}), '(student__school=school, grade_level__school_year__school=\n school)\n', (8149, 8219), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((8327, 8363), 'homeschool.students.forms.EnrollmentForm', 'EnrollmentForm', ([], {'user': 'user', 'data': 'data'}), '(user=user, data=data)\n', (8341, 8363), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((8658, 8692), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (8672, 8692), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((8715, 8765), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (8732, 8765), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((8774, 8833), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (8791, 8833), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((8851, 8892), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (8864, 8892), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((8915, 8960), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', ([], {'course_task__course': 'course'}), '(course_task__course=course)\n', (8932, 8960), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((9119, 9139), 'homeschool.students.forms.GradeForm', 'GradeForm', ([], {'data': 'data'}), '(data=data)\n', (9128, 9139), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((9342, 9376), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (9356, 9376), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((9399, 9449), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (9416, 9449), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((9458, 9517), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (9475, 9517), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((9535, 9576), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (9548, 9576), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((9585, 9630), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', ([], {'course_task__course': 'course'}), '(course_task__course=course)\n', (9602, 9630), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((9726, 9746), 'homeschool.students.forms.GradeForm', 'GradeForm', ([], {'data': 'data'}), '(data=data)\n', (9735, 9746), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((9932, 9966), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (9946, 9966), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((9989, 10039), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (10006, 10039), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((10048, 10107), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (10065, 10107), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((10125, 10166), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (10138, 10166), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((10189, 10234), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', ([], {'course_task__course': 'course'}), '(course_task__course=course)\n', (10206, 10234), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((10393, 10413), 'homeschool.students.forms.GradeForm', 'GradeForm', ([], {'data': 'data'}), '(data=data)\n', (10402, 10413), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((10751, 10785), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', ([], {'school': 'user.school'}), '(school=user.school)\n', (10765, 10785), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((10808, 10858), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', ([], {'school_year__school': 'user.school'}), '(school_year__school=user.school)\n', (10825, 10858), False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((10867, 10926), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', ([], {'student': 'student', 'grade_level': 'grade_level'}), '(student=student, grade_level=grade_level)\n', (10884, 10926), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((10944, 10985), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', ([], {'grade_levels': '[grade_level]'}), '(grade_levels=[grade_level])\n', (10957, 10985), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((11008, 11053), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', ([], {'course_task__course': 'course'}), '(course_task__course=course)\n', (11025, 11053), False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((11062, 11116), 'homeschool.students.tests.factories.GradeFactory', 'GradeFactory', ([], {'student': 'student', 'graded_work': 'graded_work'}), '(student=student, graded_work=graded_work)\n', (11074, 11116), False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((11275, 11295), 'homeschool.students.forms.GradeForm', 'GradeForm', ([], {'data': 'data'}), '(data=data)\n', (11284, 11295), False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((2793, 2860), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task': 'course_task'}), '(student=student, course_task=course_task)\n', (2818, 2860), False, 'from homeschool.students.models import Coursework, Grade\n'), ((3718, 3785), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task': 'course_task'}), '(student=student, course_task=course_task)\n', (3743, 3785), False, 'from homeschool.students.models import Coursework, Grade\n'), ((4574, 4641), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', ([], {'student': 'student', 'course_task': 'course_task'}), '(student=student, course_task=course_task)\n', (4599, 4641), False, 'from homeschool.students.models import Coursework, Grade\n'), ((5327, 5353), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5345, 5353), False, 'import datetime\n'), ((10489, 10562), 'homeschool.students.models.Grade.objects.filter', 'Grade.objects.filter', ([], {'student': 'student', 'graded_work': 'graded_work', 'score': '(100)'}), '(student=student, graded_work=graded_work, score=100)\n', (10509, 10562), False, 'from homeschool.students.models import Coursework, Grade\n'), ((11371, 11433), 'homeschool.students.models.Grade.objects.filter', 'Grade.objects.filter', ([], {'student': 'student', 'graded_work': 'graded_work'}), '(student=student, graded_work=graded_work)\n', (11391, 11433), False, 'from homeschool.students.models import Coursework, Grade\n')] |
""" @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
def func1():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 0
api_url = 'https://api.github.com/'
while i < 10000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 1 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file1.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 1 finished", len(repo_result))
def func2():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 10000
api_url = 'https://api.github.com/'
while i < 20000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 2 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file2.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 2 finished", len(repo_result))
def func3():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 20000
api_url = 'https://api.github.com/'
while i < 30000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 3 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file3.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 3 finished", len(repo_result))
def func4():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 30000
api_url = 'https://api.github.com/'
while i < 40000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 4 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file4.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 4 finished", len(repo_result))
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| [
"multiprocessing.Process",
"requests.get",
"time.sleep",
"multiprocessing.Lock",
"json.dump"
]
| [((22908, 22914), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (22912, 22914), False, 'from multiprocessing import Process, Lock\n'), ((22926, 22947), 'multiprocessing.Process', 'Process', ([], {'target': 'func1'}), '(target=func1)\n', (22933, 22947), False, 'from multiprocessing import Process, Lock\n'), ((22955, 22976), 'multiprocessing.Process', 'Process', ([], {'target': 'func2'}), '(target=func2)\n', (22962, 22976), False, 'from multiprocessing import Process, Lock\n'), ((22984, 23005), 'multiprocessing.Process', 'Process', ([], {'target': 'func3'}), '(target=func3)\n', (22991, 23005), False, 'from multiprocessing import Process, Lock\n'), ((23013, 23034), 'multiprocessing.Process', 'Process', ([], {'target': 'func4'}), '(target=func4)\n', (23020, 23034), False, 'from multiprocessing import Process, Lock\n'), ((5838, 5871), 'json.dump', 'json.dump', (['repo_result', 'repo_file'], {}), '(repo_result, repo_file)\n', (5847, 5871), False, 'import json\n'), ((11485, 11518), 'json.dump', 'json.dump', (['repo_result', 'repo_file'], {}), '(repo_result, repo_file)\n', (11494, 11518), False, 'import json\n'), ((17131, 17164), 'json.dump', 'json.dump', (['repo_result', 'repo_file'], {}), '(repo_result, repo_file)\n', (17140, 17164), False, 'import json\n'), ((22777, 22810), 'json.dump', 'json.dump', (['repo_result', 'repo_file'], {}), '(repo_result, repo_file)\n', (22786, 22810), False, 'import json\n'), ((859, 898), 'requests.get', 'requests.get', (['repo_url'], {'headers': 'headers'}), '(repo_url, headers=headers)\n', (871, 898), False, 'import requests\n'), ((2852, 2892), 'requests.get', 'requests.get', (['issue_url'], {'headers': 'headers'}), '(issue_url, headers=headers)\n', (2864, 2892), False, 'import requests\n'), ((4008, 4045), 'requests.get', 'requests.get', (['PR_url'], {'headers': 'headers'}), '(PR_url, headers=headers)\n', (4020, 4045), False, 'import requests\n'), ((5154, 5195), 'requests.get', 'requests.get', (['commit_url'], {'headers': 'headers'}), '(commit_url, headers=headers)\n', (5166, 5195), False, 'import requests\n'), ((6506, 6545), 'requests.get', 'requests.get', (['repo_url'], {'headers': 'headers'}), '(repo_url, headers=headers)\n', (6518, 6545), False, 'import requests\n'), ((8499, 8539), 'requests.get', 'requests.get', (['issue_url'], {'headers': 'headers'}), '(issue_url, headers=headers)\n', (8511, 8539), False, 'import requests\n'), ((9655, 9692), 'requests.get', 'requests.get', (['PR_url'], {'headers': 'headers'}), '(PR_url, headers=headers)\n', (9667, 9692), False, 'import requests\n'), ((10801, 10842), 'requests.get', 'requests.get', (['commit_url'], {'headers': 'headers'}), '(commit_url, headers=headers)\n', (10813, 10842), False, 'import requests\n'), ((12152, 12191), 'requests.get', 'requests.get', (['repo_url'], {'headers': 'headers'}), '(repo_url, headers=headers)\n', (12164, 12191), False, 'import requests\n'), ((14145, 14185), 'requests.get', 'requests.get', (['issue_url'], {'headers': 'headers'}), '(issue_url, headers=headers)\n', (14157, 14185), False, 'import requests\n'), ((15301, 15338), 'requests.get', 'requests.get', (['PR_url'], {'headers': 'headers'}), '(PR_url, headers=headers)\n', (15313, 15338), False, 'import requests\n'), ((16447, 16488), 'requests.get', 'requests.get', (['commit_url'], {'headers': 'headers'}), '(commit_url, headers=headers)\n', (16459, 16488), False, 'import requests\n'), ((17798, 17837), 'requests.get', 'requests.get', (['repo_url'], {'headers': 'headers'}), '(repo_url, headers=headers)\n', (17810, 17837), False, 'import requests\n'), ((19791, 19831), 'requests.get', 'requests.get', (['issue_url'], {'headers': 'headers'}), '(issue_url, headers=headers)\n', (19803, 19831), False, 'import requests\n'), ((20947, 20984), 'requests.get', 'requests.get', (['PR_url'], {'headers': 'headers'}), '(PR_url, headers=headers)\n', (20959, 20984), False, 'import requests\n'), ((22093, 22134), 'requests.get', 'requests.get', (['commit_url'], {'headers': 'headers'}), '(commit_url, headers=headers)\n', (22105, 22134), False, 'import requests\n'), ((1118, 1133), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (1128, 1133), False, 'import time\n'), ((3038, 3053), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (3048, 3053), False, 'import time\n'), ((4188, 4203), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (4198, 4203), False, 'import time\n'), ((5352, 5367), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (5362, 5367), False, 'import time\n'), ((6765, 6780), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (6775, 6780), False, 'import time\n'), ((8685, 8700), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (8695, 8700), False, 'import time\n'), ((9835, 9850), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (9845, 9850), False, 'import time\n'), ((10999, 11014), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (11009, 11014), False, 'import time\n'), ((12411, 12426), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (12421, 12426), False, 'import time\n'), ((14331, 14346), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (14341, 14346), False, 'import time\n'), ((15481, 15496), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (15491, 15496), False, 'import time\n'), ((16645, 16660), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (16655, 16660), False, 'import time\n'), ((18057, 18072), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (18067, 18072), False, 'import time\n'), ((19977, 19992), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (19987, 19992), False, 'import time\n'), ((21127, 21142), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (21137, 21142), False, 'import time\n'), ((22291, 22306), 'time.sleep', 'time.sleep', (['(600)'], {}), '(600)\n', (22301, 22306), False, 'import time\n')] |
import functools
def create_maybe_get_wire(conn):
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_type_pkey(tile):
c.execute('SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?',
(tile, ))
return c.fetchone()
@functools.lru_cache(maxsize=None)
def maybe_get_wire(tile, wire):
phy_tile_pkey, tile_type_pkey = get_tile_type_pkey(tile)
c.execute(
'SELECT pkey FROM wire_in_tile WHERE phy_tile_type_pkey = ? and name = ?',
(tile_type_pkey, wire))
result = c.fetchone()
if result is None:
return None
wire_in_tile_pkey = result[0]
c.execute(
'SELECT pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?',
(phy_tile_pkey, wire_in_tile_pkey))
return c.fetchone()[0]
return maybe_get_wire
def maybe_add_pip(top, maybe_get_wire, feature):
if feature.value != 1:
return
parts = feature.feature.split('.')
assert len(parts) == 3
sink_wire = maybe_get_wire(parts[0], parts[2])
if sink_wire is None:
return
src_wire = maybe_get_wire(parts[0], parts[1])
if src_wire is None:
return
top.active_pips.add((sink_wire, src_wire))
def get_node_pkey(conn, wire_pkey):
c = conn.cursor()
c.execute("SELECT node_pkey FROM wire WHERE pkey = ?", (wire_pkey, ))
return c.fetchone()[0]
def get_wires_in_node(conn, node_pkey):
c = conn.cursor()
c.execute("SELECT pkey FROM wire WHERE node_pkey = ?", (node_pkey, ))
for row in c.fetchall():
yield row[0]
def get_wire(conn, phy_tile_pkey, wire_in_tile_pkey):
c = conn.cursor()
c.execute(
"SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;",
(
wire_in_tile_pkey,
phy_tile_pkey,
))
return c.fetchone()[0]
def get_tile_type(conn, tile_name):
c = conn.cursor()
c.execute(
"""
SELECT name FROM tile_type WHERE pkey = (
SELECT tile_type_pkey FROM phy_tile WHERE name = ?);""", (tile_name, ))
return c.fetchone()[0]
def get_wire_pkey(conn, tile_name, wire):
c = conn.cursor()
c.execute(
"""
WITH selected_tile(phy_tile_pkey, tile_type_pkey) AS (
SELECT
pkey,
tile_type_pkey
FROM
phy_tile
WHERE
name = ?
)
SELECT
wire.pkey
FROM
wire
WHERE
wire.phy_tile_pkey = (
SELECT
selected_tile.phy_tile_pkey
FROM
selected_tile
)
AND wire.wire_in_tile_pkey = (
SELECT
wire_in_tile.pkey
FROM
wire_in_tile
WHERE
wire_in_tile.name = ?
AND wire_in_tile.phy_tile_type_pkey = (
SELECT
tile_type_pkey
FROM
selected_tile
)
);
""", (tile_name, wire))
results = c.fetchone()
assert results is not None, (tile_name, wire)
return results[0]
| [
"functools.lru_cache"
]
| [((80, 113), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (99, 113), False, 'import functools\n'), ((288, 321), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (307, 321), False, 'import functools\n')] |
"""A module that provides functionality for accessing the Payments API."""
import enum
import http
import logging
import requests
from fastapi import Depends, Header, HTTPException
from fastapi.security.http import HTTPAuthorizationCredentials
import auth.authentication
import config
import schemas.payment
logger = logging.getLogger(__name__)
CORP_TYPE = 'PPR'
class FilingCode(enum.Enum):
"""An enumeration of the filing codes available to PPR."""
SEARCH = 'SERCH'
YEARLY_REGISTRATION = 'FSREG'
INFINITE_REGISTRATION = 'INFRG'
class PaymentService:
"""A service used for interacting with the Payments API."""
auth_header: HTTPAuthorizationCredentials
account_id: str
def __init__(self, auth_header: HTTPAuthorizationCredentials = Depends(auth.authentication.bearer_scheme),
account_id: str = Header(None)):
"""Initialize the repository with the Authorization and Account-Id headers provided in the request."""
self.auth_header = auth_header
self.account_id = account_id
def create_payment(self, filing_code: FilingCode):
"""Submit a payment request and provide the details to the caller."""
request = {
'businessInfo': {'corpType': CORP_TYPE},
'filingInfo': {'filingTypes': [{'filingTypeCode': filing_code.value}]}
}
pay_response = requests.post(
'{}/payment-requests'.format(config.PAY_API_URL), json=request,
headers={
'Authorization': '{} {}'.format(self.auth_header.scheme, self.auth_header.credentials),
'Account-Id': self.account_id
}
)
try:
auth.authentication.check_auth_response(pay_response)
except HTTPException as auth_ex:
logger.error('Create Payment call failed auth with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise auth_ex
if not pay_response: # status_code is unsuccessful
logger.error('Create Payment call failed unexpectedly with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise HTTPException(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)
body = pay_response.json()
return schemas.payment.Payment(id=body['id'], status=body['statusCode'], method=body['paymentMethod'])
| [
"logging.getLogger",
"fastapi.HTTPException",
"fastapi.Header",
"fastapi.Depends"
]
| [((322, 349), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (339, 349), False, 'import logging\n'), ((778, 820), 'fastapi.Depends', 'Depends', (['auth.authentication.bearer_scheme'], {}), '(auth.authentication.bearer_scheme)\n', (785, 820), False, 'from fastapi import Depends, Header, HTTPException\n'), ((857, 869), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (863, 869), False, 'from fastapi import Depends, Header, HTTPException\n'), ((2231, 2295), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'http.HTTPStatus.INTERNAL_SERVER_ERROR'}), '(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)\n', (2244, 2295), False, 'from fastapi import Depends, Header, HTTPException\n')] |
from datetime import datetime
import inspect
def log_time(msg=None):
def decorator(f):
nonlocal msg
if msg is None:
msg = '{} time spent: '.format(f.__name__)
def inner(*args, **kwargs):
# check if the object has a logger
global logger
if args and hasattr(args[0], 'logger'):
logger = args[0].logger
start = datetime.now()
result = f(*args, **kwargs)
logger.info(
msg + ' {} seconds'.format((datetime.now() - start).total_seconds())
)
return result
return inner
return decorator
def log_params(f):
arg_spec = inspect.getargspec(f).args
has_self = arg_spec and arg_spec[0] == 'self'
def decorator(*args, **kwargs):
logger.info(
'calling {} with args: {}, and kwargs: {}'.format(
f.__name__, args if not has_self else args[1:], kwargs
)
)
return f(*args, **kwargs)
return decorator
| [
"datetime.datetime.now",
"inspect.getargspec"
]
| [((700, 721), 'inspect.getargspec', 'inspect.getargspec', (['f'], {}), '(f)\n', (718, 721), False, 'import inspect\n'), ((415, 429), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (427, 429), False, 'from datetime import datetime\n'), ((539, 553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (551, 553), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that the blockmaxsize and excessiveblocksize parameters are also
settable via the bitcoin.conf file.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.cdefs import (ONE_MEGABYTE)
import os
class BSVBlockSizeParams(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.maxminedblocksize = 4 * ONE_MEGABYTE
self.maxblocksize = 16 * ONE_MEGABYTE
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("blockmaxsize=" + str(self.maxminedblocksize) + "\n")
f.write("excessiveblocksize=" + str(self.maxblocksize) + "\n")
def add_options(self, parser):
super().add_options(parser)
def run_test(self):
gires = self.nodes[0].getinfo()
assert_equal(gires["maxblocksize"], self.maxblocksize)
assert_equal(gires["maxminedblocksize"], self.maxminedblocksize)
if __name__ == '__main__':
BSVBlockSizeParams().main()
| [
"os.path.join",
"test_framework.util.assert_equal"
]
| [((1229, 1283), 'test_framework.util.assert_equal', 'assert_equal', (["gires['maxblocksize']", 'self.maxblocksize'], {}), "(gires['maxblocksize'], self.maxblocksize)\n", (1241, 1283), False, 'from test_framework.util import assert_equal, assert_raises_rpc_error\n'), ((1292, 1356), 'test_framework.util.assert_equal', 'assert_equal', (["gires['maxminedblocksize']", 'self.maxminedblocksize'], {}), "(gires['maxminedblocksize'], self.maxminedblocksize)\n", (1304, 1356), False, 'from test_framework.util import assert_equal, assert_raises_rpc_error\n'), ((845, 905), 'os.path.join', 'os.path.join', (["(self.options.tmpdir + '/node0')", '"""bitcoin.conf"""'], {}), "(self.options.tmpdir + '/node0', 'bitcoin.conf')\n", (857, 905), False, 'import os\n')] |
import pdb
import warnings
from jax import custom_vjp
@custom_vjp
def debug_identity(x):
"""
acts as identity, but inserts a pdb trace on the backwards pass
"""
warnings.warn('Using a module intended for debugging')
return x
def _debug_fwd(x):
warnings.warn('Using a module intended for debugging')
return x, x
# noinspection PyUnusedLocal
def _debug_bwd(x, g):
pdb.set_trace()
return g
debug_identity.defvjp(_debug_fwd, _debug_bwd)
| [
"warnings.warn",
"pdb.set_trace"
]
| [((180, 234), 'warnings.warn', 'warnings.warn', (['"""Using a module intended for debugging"""'], {}), "('Using a module intended for debugging')\n", (193, 234), False, 'import warnings\n'), ((273, 327), 'warnings.warn', 'warnings.warn', (['"""Using a module intended for debugging"""'], {}), "('Using a module intended for debugging')\n", (286, 327), False, 'import warnings\n'), ((401, 416), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (414, 416), False, 'import pdb\n')] |
#!/usr/bin/env python
# Some helpful links
# https://docs.python.org/3/library/tkinter.html
# https://www.python-course.eu/tkinter_entry_widgets.php
import tkinter as tk
class Application(tk.Frame):
def __init__(self, root=None):
super().__init__(root)
self.root = root
self.root.title("Mileage")
self.root.geometry("250x125")
self.pack()
self.miles = tk.Entry(self);
self.gallons = tk.Entry(self);
self.mpg = tk.Label(self)
self.init_widgets()
def init_widgets(self):
self.miles.grid(row=0)
tk.Label(self, text="Miles").grid(row=0, column=1)
self.gallons.grid(row=1)
tk.Label(self, text="Gallons").grid(row=1, column=1)
self.mpg.grid(row=2)
tk.Label(self, text="MPG").grid(row=2, column=1)
tk.Button(self, text="Calculate", command = self.calculate).grid(row=3, column=1)
tk.Button(self, text="Quit", command=self.root.destroy).grid(row=4, column=1)
def calculate(self):
self.mpg['text'] = float(self.miles.get()) / float(self.gallons.get())
app = Application(root=tk.Tk())
app.mainloop()
| [
"tkinter.Button",
"tkinter.Tk",
"tkinter.Entry",
"tkinter.Label"
]
| [((448, 462), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (456, 462), True, 'import tkinter as tk\n'), ((488, 502), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (496, 502), True, 'import tkinter as tk\n'), ((524, 538), 'tkinter.Label', 'tk.Label', (['self'], {}), '(self)\n', (532, 538), True, 'import tkinter as tk\n'), ((1220, 1227), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1225, 1227), True, 'import tkinter as tk\n'), ((657, 685), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Miles"""'}), "(self, text='Miles')\n", (665, 685), True, 'import tkinter as tk\n'), ((753, 783), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Gallons"""'}), "(self, text='Gallons')\n", (761, 783), True, 'import tkinter as tk\n'), ((847, 873), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""MPG"""'}), "(self, text='MPG')\n", (855, 873), True, 'import tkinter as tk\n'), ((915, 972), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Calculate"""', 'command': 'self.calculate'}), "(self, text='Calculate', command=self.calculate)\n", (924, 972), True, 'import tkinter as tk\n'), ((1006, 1061), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Quit"""', 'command': 'self.root.destroy'}), "(self, text='Quit', command=self.root.destroy)\n", (1015, 1061), True, 'import tkinter as tk\n')] |
# -*- coding: utf-8 -*-
__author__ = 'isee15'
import LunarSolarConverter
converter = LunarSolarConverter.LunarSolarConverter()
def LunarToSolar(year, month, day, isleap = False):
lunar = LunarSolarConverter.Lunar(year, month, day, isleap)
solar = converter.LunarToSolar(lunar)
return (solar.solarYear, solar.solarMonth, solar.solarDay)
def SolarToLunar(year, month, day):
solar = LunarSolarConverter.Solar(year, month, day)
lunar = converter.SolarToLunar(solar)
return (lunar.lunarYear, lunar.lunarMonth, lunar.lunarDay)
def LunarMonthDays(year, month, isleap = False):
converter = LunarSolarConverter.LunarSolarConverter
days = converter.lunar_month_days[year - converter.lunar_month_days[0]]
leap = LunarSolarConverter.GetBitInt(days, 4, 13)
offset = 0
loopend = leap
if not isleap:
if month <= leap or leap == 0:
loopend = month - 1
else:
loopend = month
days = LunarSolarConverter.GetBitInt(days, 1, 12 - loopend) == 1 and 30 or 29
return days
| [
"LunarSolarConverter.GetBitInt",
"LunarSolarConverter.Solar",
"LunarSolarConverter.Lunar",
"LunarSolarConverter.LunarSolarConverter"
]
| [((87, 128), 'LunarSolarConverter.LunarSolarConverter', 'LunarSolarConverter.LunarSolarConverter', ([], {}), '()\n', (126, 128), False, 'import LunarSolarConverter\n'), ((194, 245), 'LunarSolarConverter.Lunar', 'LunarSolarConverter.Lunar', (['year', 'month', 'day', 'isleap'], {}), '(year, month, day, isleap)\n', (219, 245), False, 'import LunarSolarConverter\n'), ((401, 444), 'LunarSolarConverter.Solar', 'LunarSolarConverter.Solar', (['year', 'month', 'day'], {}), '(year, month, day)\n', (426, 444), False, 'import LunarSolarConverter\n'), ((746, 788), 'LunarSolarConverter.GetBitInt', 'LunarSolarConverter.GetBitInt', (['days', '(4)', '(13)'], {}), '(days, 4, 13)\n', (775, 788), False, 'import LunarSolarConverter\n'), ((968, 1020), 'LunarSolarConverter.GetBitInt', 'LunarSolarConverter.GetBitInt', (['days', '(1)', '(12 - loopend)'], {}), '(days, 1, 12 - loopend)\n', (997, 1020), False, 'import LunarSolarConverter\n')] |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
class Cifar10Validator(Executor):
def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.test_data = CIFAR10(root='~/data', train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct/float(total)
return metric
| [
"simple_network.SimpleNetwork",
"nvflare.apis.shareable.make_reply",
"torch.as_tensor",
"torch.max",
"nvflare.apis.dxo.DXO",
"nvflare.apis.dxo.from_shareable",
"torchvision.datasets.CIFAR10",
"torch.cuda.is_available",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torch.device"
]
| [((1404, 1419), 'simple_network.SimpleNetwork', 'SimpleNetwork', ([], {}), '()\n', (1417, 1419), False, 'from simple_network import SimpleNetwork\n'), ((1746, 1803), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': '"""~/data"""', 'train': '(False)', 'transform': 'transforms'}), "(root='~/data', train=False, transform=transforms)\n", (1753, 1803), False, 'from torchvision.datasets import CIFAR10\n'), ((1831, 1886), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_data'], {'batch_size': '(4)', 'shuffle': '(False)'}), '(self.test_data, batch_size=4, shuffle=False)\n', (1841, 1886), False, 'from torch.utils.data import DataLoader\n'), ((1466, 1491), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1489, 1491), False, 'import torch\n'), ((1442, 1462), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1454, 1462), False, 'import torch\n'), ((1497, 1516), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1509, 1516), False, 'import torch\n'), ((3625, 3660), 'nvflare.apis.shareable.make_reply', 'make_reply', (['ReturnCode.TASK_UNKNOWN'], {}), '(ReturnCode.TASK_UNKNOWN)\n', (3635, 3660), False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((3837, 3852), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3850, 3852), False, 'import torch\n'), ((1641, 1651), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1649, 1651), False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((1665, 1708), 'torchvision.transforms.Normalize', 'Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1674, 1708), False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((3304, 3367), 'nvflare.apis.dxo.DXO', 'DXO', ([], {'data_kind': 'DataKind.METRICS', 'data': "{'val_acc': val_accuracy}"}), "(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})\n", (3307, 3367), False, 'from nvflare.apis.dxo import from_shareable, DataKind, DXO\n'), ((4152, 4172), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4161, 4172), False, 'import torch\n'), ((2147, 2172), 'nvflare.apis.dxo.from_shareable', 'from_shareable', (['shareable'], {}), '(shareable)\n', (2161, 2172), False, 'from nvflare.apis.dxo import from_shareable, DataKind, DXO\n'), ((2589, 2625), 'nvflare.apis.shareable.make_reply', 'make_reply', (['ReturnCode.BAD_TASK_DATA'], {}), '(ReturnCode.BAD_TASK_DATA)\n', (2599, 2625), False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((2801, 2839), 'torch.as_tensor', 'torch.as_tensor', (['v'], {'device': 'self.device'}), '(v, device=self.device)\n', (2816, 2839), False, 'import torch\n'), ((3056, 3091), 'nvflare.apis.shareable.make_reply', 'make_reply', (['ReturnCode.TASK_ABORTED'], {}), '(ReturnCode.TASK_ABORTED)\n', (3066, 3091), False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((3549, 3591), 'nvflare.apis.shareable.make_reply', 'make_reply', (['ReturnCode.EXECUTION_EXCEPTION'], {}), '(ReturnCode.EXECUTION_EXCEPTION)\n', (3559, 3591), False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((2310, 2346), 'nvflare.apis.shareable.make_reply', 'make_reply', (['ReturnCode.BAD_TASK_DATA'], {}), '(ReturnCode.BAD_TASK_DATA)\n', (2320, 2346), False, 'from nvflare.apis.shareable import Shareable, make_reply\n')] |
# MIT License
# Copyright (c) 2020-2021 <NAME> (https://www.chrisfarris.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import boto3
from botocore.exceptions import ClientError
import json
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
TAG_KEY=os.getenv('TAG_KEY', default='WireShark')
def handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
ec2_client = boto3.client('ec2')
mirror_sessions = ec2_client.describe_traffic_mirror_sessions()['TrafficMirrorSessions']
enabled_enis = []
max_session_id = 0
for s in mirror_sessions:
enabled_enis.append(s['NetworkInterfaceId'])
if s['SessionNumber'] > max_session_id:
max_session_id = s['SessionNumber']
response = ec2_client.describe_instances(
Filters=[
{'Name': 'instance-state-name', 'Values': ['running']},
],
MaxResults=1000 # I should never need to paginate.
)
for r in response['Reservations']:
for i in r['Instances']:
if not i['InstanceType'].startswith("t3"):
logger.debug(f"Instance {i['InstanceId']} is not a t3 and does not support Traffic Mirroring")
continue
for tag in i['Tags']:
if tag['Key'] == TAG_KEY:
# See if a mirror session is setup
for eni in i['NetworkInterfaces']:
if eni['NetworkInterfaceId'] not in enabled_enis:
logger.info(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} needs Mirroring Enabled")
max_session_id += 1
enable_traffic_mirroring(ec2_client, eni['NetworkInterfaceId'], i['InstanceId'], max_session_id)
else:
logger.debug(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} is already Enabled")
def enable_traffic_mirroring(ec2_client, eni, instance_id, session_id):
response = ec2_client.create_traffic_mirror_session(
NetworkInterfaceId=eni,
TrafficMirrorTargetId=os.environ['TARGET_ID'],
TrafficMirrorFilterId=os.environ['FILTER_ID'],
SessionNumber=session_id,
Description=f"Enabled by Lambda for {instance_id}"
)
## END OF FUNCTION ##
if __name__ == '__main__':
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
handler(None, None)
except KeyboardInterrupt:
exit(1)
| [
"logging.getLogger",
"logging.StreamHandler",
"boto3.client",
"os.getenv",
"logging.Formatter",
"json.dumps"
]
| [((1237, 1256), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1254, 1256), False, 'import logging\n'), ((1504, 1545), 'os.getenv', 'os.getenv', (['"""TAG_KEY"""'], {'default': '"""WireShark"""'}), "('TAG_KEY', default='WireShark')\n", (1513, 1545), False, 'import os\n'), ((1667, 1686), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (1679, 1686), False, 'import boto3\n'), ((3783, 3806), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3804, 3806), False, 'import logging\n'), ((3975, 4023), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (3992, 4023), False, 'import logging\n'), ((1290, 1328), 'os.getenv', 'os.getenv', (['"""LOG_LEVEL"""'], {'default': '"""INFO"""'}), "('LOG_LEVEL', default='INFO')\n", (1299, 1328), False, 'import os\n'), ((1331, 1360), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (1348, 1360), False, 'import logging\n'), ((1387, 1413), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (1404, 1413), False, 'import logging\n'), ((1440, 1468), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (1457, 1468), False, 'import logging\n'), ((1614, 1647), 'json.dumps', 'json.dumps', (['event'], {'sort_keys': '(True)'}), '(event, sort_keys=True)\n', (1624, 1647), False, 'import json\n')] |
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.models import (
Conference,
UserMessage,
)
from gbe_logging import log_func
from gbe.functions import (
validate_profile,
)
from gbe.email.functions import notify_reviewers_on_bid_change
from gbetext import (
no_login_msg,
fee_instructions,
full_login_msg,
payment_needed_msg,
payment_details_error,
)
from gbe_utils.text import no_profile_msg
from gbe.ticketing_idd_interface import (
get_payment_details,
get_ticket_form,
fee_paid,
)
class MakeBidView(View):
form = None
has_draft = True
instructions = ''
payment_form = None
coordinated = False
def groundwork(self, request, args, kwargs):
self.owner = validate_profile(request, require=False)
if not self.owner or not self.owner.complete:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PROFILE_INCOMPLETE",
defaults={
'summary': "Profile Incomplete",
'description': no_profile_msg})
messages.warning(request, user_message[0].description)
return '%s?next=%s' % (
reverse('profile_update', urlconf='gbe.urls'),
reverse('%s_create' % self.bid_type.lower(),
urlconf='gbe.urls'))
self.bid_object = None
if "bid_id" in kwargs:
bid_id = kwargs.get("bid_id")
self.bid_object = get_object_or_404(self.bid_class, pk=bid_id)
self.conference = self.bid_object.b_conference
else:
self.conference = Conference.objects.filter(
accepting_bids=True).first()
def make_post_forms(self, request, the_form):
if self.bid_object:
self.form = the_form(
request.POST,
instance=self.bid_object,
initial=self.get_initial(),
prefix=self.prefix)
else:
self.form = the_form(
request.POST,
initial=self.get_initial(),
prefix=self.prefix)
self.set_up_form()
def set_up_post(self, request):
the_form = None
if 'submit' in list(request.POST.keys()) or not self.has_draft:
the_form = self.submit_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="SUBMIT_SUCCESS",
defaults={
'summary': "%s Submit Success" % self.bid_type,
'description': self.submit_msg})
else:
the_form = self.draft_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="DRAFT_SUCCESS",
defaults={
'summary': "%s Save Draft Success" % self.bid_type,
'description': self.draft_msg})
self.make_post_forms(request, the_form)
return user_message
def make_context(self, request):
paid = fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference)
instructions = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="BID_INSTRUCTIONS",
defaults={
'summary': "%s Bid Instructions" % self.bid_type,
'description': self.instructions})
context = {
'conference': self.conference,
'forms': [self.form],
'page_title': self.page_title,
'view_title': self.view_title,
'draft_fields': self.draft_fields,
'submit_fields': self.submit_fields,
'fee_paid': paid,
'view_header_text': instructions[0].description,
}
if not paid and not self.coordinated:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="FEE_MESSAGE",
defaults={
'summary': "%s Pre-submit Message" % self.bid_type,
'description': fee_instructions})
messages.info(
request,
user_message[0].description)
if self.payment_form:
context['forms'] += [self.payment_form]
else:
context['forms'] += [get_ticket_form(self.bid_class.__name__,
self.conference)]
return context
def get_create_form(self, request):
if self.bid_object:
self.form = self.submit_form(
prefix=self.prefix,
instance=self.bid_object,
initial=self.get_initial())
else:
self.form = self.submit_form(
prefix=self.prefix,
initial=self.get_initial())
self.set_up_form()
return render(
request,
'gbe/bid.tmpl',
self.make_context(request)
)
def check_validity(self, request):
return self.form.is_valid()
def set_up_form(self):
pass
def get_invalid_response(self, request):
self.set_up_form()
context = self.make_context(request)
return render(
request,
'gbe/bid.tmpl',
context)
def submit_bid(self, request):
self.bid_object.submitted = True
self.bid_object.save()
notify_reviewers_on_bid_change(
self.owner,
self.bid_object,
self.bid_type,
"Submission",
self.conference,
'%s Reviewers' % self.bid_type,
reverse('%s_review' % self.bid_type.lower(),
urlconf='gbe.urls'))
@never_cache
@log_func
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
follow_on = '?next=%s' % reverse(
'%s_create' % self.bid_type.lower(),
urlconf='gbe.urls')
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="USER_NOT_LOGGED_IN",
defaults={
'summary': "Need Login - %s Bid",
'description': no_login_msg})
full_msg = full_login_msg % (
user_message[0].description,
reverse('login', urlconf='gbe.urls') + follow_on)
messages.warning(request, full_msg)
return HttpResponseRedirect(
reverse('register', urlconf='gbe.urls') + follow_on)
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
return self.get_create_form(request)
@never_cache
@log_func
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
cart_items = []
paypal_button = None
total = None
redirect = None
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
user_message = self.set_up_post(request)
# check bid validity
if not self.check_validity(request):
return self.get_invalid_response(request)
if not self.coordinated and not fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference) and "draft" not in list(request.POST.keys()):
self.payment_form = get_ticket_form(self.bid_class.__name__,
self.conference,
request.POST)
if not self.payment_form.is_valid():
error_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PAYMENT_CHOICE_INVALID",
defaults={
'summary': "User Made Invalid Ticket Choice",
'description': payment_details_error})
messages.error(request, error_message[0].description)
return self.get_invalid_response(request)
# save bid
if not self.bid_object:
self.bid_object = self.form.save(commit=False)
self.set_valid_form(request)
# if this isn't a draft, move forward through process, setting up
# payment review if payment is needed
if "submit" in list(request.POST.keys()):
if self.payment_form:
cart_items, paypal_button, total = get_payment_details(
request,
self.payment_form,
self.bid_type,
self.bid_object.pk,
self.owner.user_object.pk)
dynamic_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="NOT_PAID_INSTRUCTIONS",
defaults={
'summary': "%s Not Paid" % self.bid_type,
'description': payment_needed_msg})
page_title = '%s Payment' % self.bid_type
return render(
request,
'gbe/confirm_pay.tmpl',
{'dynamic_message': dynamic_message[0].description,
'page_title': page_title,
'cart_items': cart_items,
'total': total,
'paypal_button': paypal_button})
else:
redirect = self.submit_bid(request)
messages.success(request, user_message[0].description)
return HttpResponseRedirect(
redirect or reverse('home', urlconf='gbe.urls'))
def dispatch(self, *args, **kwargs):
return super(MakeBidView, self).dispatch(*args, **kwargs)
| [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"gbe.ticketing_idd_interface.get_payment_details",
"django.urls.reverse",
"django.contrib.messages.error",
"django.contrib.messages.warning",
"gbe.ticketing_idd_interface.fee_paid",
"django.shortcuts.get_object_or_404",
"django.utils.decorators.method_decorator",
"django.contrib.messages.info",
"django.contrib.messages.success",
"gbe.models.UserMessage.objects.get_or_create",
"gbe.ticketing_idd_interface.get_ticket_form",
"gbe.functions.validate_profile",
"gbe.models.Conference.objects.filter"
]
| [((7313, 7345), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (7329, 7345), False, 'from django.utils.decorators import method_decorator\n'), ((1078, 1118), 'gbe.functions.validate_profile', 'validate_profile', (['request'], {'require': '(False)'}), '(request, require=False)\n', (1094, 1118), False, 'from gbe.functions import validate_profile\n'), ((3484, 3557), 'gbe.ticketing_idd_interface.fee_paid', 'fee_paid', (['self.bid_type', 'self.owner.user_object.username', 'self.conference'], {}), '(self.bid_type, self.owner.user_object.username, self.conference)\n', (3492, 3557), False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((3618, 3811), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""BID_INSTRUCTIONS"""', 'defaults': "{'summary': '%s Bid Instructions' % self.bid_type, 'description': self.\n instructions}"}), "(view=self.__class__.__name__, code=\n 'BID_INSTRUCTIONS', defaults={'summary': '%s Bid Instructions' % self.\n bid_type, 'description': self.instructions})\n", (3651, 3811), False, 'from gbe.models import Conference, UserMessage\n'), ((5745, 5785), 'django.shortcuts.render', 'render', (['request', '"""gbe/bid.tmpl"""', 'context'], {}), "(request, 'gbe/bid.tmpl', context)\n", (5751, 5785), False, 'from django.shortcuts import get_object_or_404, render\n'), ((10178, 10232), 'django.contrib.messages.success', 'messages.success', (['request', 'user_message[0].description'], {}), '(request, user_message[0].description)\n', (10194, 10232), False, 'from django.contrib import messages\n'), ((1200, 1374), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""PROFILE_INCOMPLETE"""', 'defaults': "{'summary': 'Profile Incomplete', 'description': no_profile_msg}"}), "(view=self.__class__.__name__, code=\n 'PROFILE_INCOMPLETE', defaults={'summary': 'Profile Incomplete',\n 'description': no_profile_msg})\n", (1233, 1374), False, 'from gbe.models import Conference, UserMessage\n'), ((1468, 1522), 'django.contrib.messages.warning', 'messages.warning', (['request', 'user_message[0].description'], {}), '(request, user_message[0].description)\n', (1484, 1522), False, 'from django.contrib import messages\n'), ((1863, 1907), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.bid_class'], {'pk': 'bid_id'}), '(self.bid_class, pk=bid_id)\n', (1880, 1907), False, 'from django.shortcuts import get_object_or_404, render\n'), ((2737, 2924), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""SUBMIT_SUCCESS"""', 'defaults': "{'summary': '%s Submit Success' % self.bid_type, 'description': self.submit_msg\n }"}), "(view=self.__class__.__name__, code=\n 'SUBMIT_SUCCESS', defaults={'summary': '%s Submit Success' % self.\n bid_type, 'description': self.submit_msg})\n", (2770, 2924), False, 'from gbe.models import Conference, UserMessage\n'), ((3085, 3274), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""DRAFT_SUCCESS"""', 'defaults': "{'summary': '%s Save Draft Success' % self.bid_type, 'description': self.\n draft_msg}"}), "(view=self.__class__.__name__, code=\n 'DRAFT_SUCCESS', defaults={'summary': '%s Save Draft Success' % self.\n bid_type, 'description': self.draft_msg})\n", (3118, 3274), False, 'from gbe.models import Conference, UserMessage\n'), ((4329, 4518), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""FEE_MESSAGE"""', 'defaults': "{'summary': '%s Pre-submit Message' % self.bid_type, 'description':\n fee_instructions}"}), "(view=self.__class__.__name__, code=\n 'FEE_MESSAGE', defaults={'summary': '%s Pre-submit Message' % self.\n bid_type, 'description': fee_instructions})\n", (4362, 4518), False, 'from gbe.models import Conference, UserMessage\n'), ((4612, 4663), 'django.contrib.messages.info', 'messages.info', (['request', 'user_message[0].description'], {}), '(request, user_message[0].description)\n', (4625, 4663), False, 'from django.contrib import messages\n'), ((6533, 6706), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""USER_NOT_LOGGED_IN"""', 'defaults': "{'summary': 'Need Login - %s Bid', 'description': no_login_msg}"}), "(view=self.__class__.__name__, code=\n 'USER_NOT_LOGGED_IN', defaults={'summary': 'Need Login - %s Bid',\n 'description': no_login_msg})\n", (6566, 6706), False, 'from gbe.models import Conference, UserMessage\n'), ((6953, 6988), 'django.contrib.messages.warning', 'messages.warning', (['request', 'full_msg'], {}), '(request, full_msg)\n', (6969, 6988), False, 'from django.contrib import messages\n'), ((7199, 7229), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redirect'], {}), '(redirect)\n', (7219, 7229), False, 'from django.http import HttpResponseRedirect\n'), ((7588, 7618), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redirect'], {}), '(redirect)\n', (7608, 7618), False, 'from django.http import HttpResponseRedirect\n'), ((8040, 8111), 'gbe.ticketing_idd_interface.get_ticket_form', 'get_ticket_form', (['self.bid_class.__name__', 'self.conference', 'request.POST'], {}), '(self.bid_class.__name__, self.conference, request.POST)\n', (8055, 8111), False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((7839, 7912), 'gbe.ticketing_idd_interface.fee_paid', 'fee_paid', (['self.bid_type', 'self.owner.user_object.username', 'self.conference'], {}), '(self.bid_type, self.owner.user_object.username, self.conference)\n', (7847, 7912), False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((8289, 8487), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""PAYMENT_CHOICE_INVALID"""', 'defaults': "{'summary': 'User Made Invalid Ticket Choice', 'description':\n payment_details_error}"}), "(view=self.__class__.__name__, code=\n 'PAYMENT_CHOICE_INVALID', defaults={'summary':\n 'User Made Invalid Ticket Choice', 'description': payment_details_error})\n", (8322, 8487), False, 'from gbe.models import Conference, UserMessage\n'), ((8625, 8678), 'django.contrib.messages.error', 'messages.error', (['request', 'error_message[0].description'], {}), '(request, error_message[0].description)\n', (8639, 8678), False, 'from django.contrib import messages\n'), ((9142, 9256), 'gbe.ticketing_idd_interface.get_payment_details', 'get_payment_details', (['request', 'self.payment_form', 'self.bid_type', 'self.bid_object.pk', 'self.owner.user_object.pk'], {}), '(request, self.payment_form, self.bid_type, self.\n bid_object.pk, self.owner.user_object.pk)\n', (9161, 9256), False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((9388, 9579), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', ([], {'view': 'self.__class__.__name__', 'code': '"""NOT_PAID_INSTRUCTIONS"""', 'defaults': "{'summary': '%s Not Paid' % self.bid_type, 'description': payment_needed_msg}"}), "(view=self.__class__.__name__, code=\n 'NOT_PAID_INSTRUCTIONS', defaults={'summary': '%s Not Paid' % self.\n bid_type, 'description': payment_needed_msg})\n", (9421, 9579), False, 'from gbe.models import Conference, UserMessage\n'), ((9761, 9962), 'django.shortcuts.render', 'render', (['request', '"""gbe/confirm_pay.tmpl"""', "{'dynamic_message': dynamic_message[0].description, 'page_title':\n page_title, 'cart_items': cart_items, 'total': total, 'paypal_button':\n paypal_button}"], {}), "(request, 'gbe/confirm_pay.tmpl', {'dynamic_message': dynamic_message\n [0].description, 'page_title': page_title, 'cart_items': cart_items,\n 'total': total, 'paypal_button': paypal_button})\n", (9767, 9962), False, 'from django.shortcuts import get_object_or_404, render\n'), ((10294, 10329), 'django.urls.reverse', 'reverse', (['"""home"""'], {'urlconf': '"""gbe.urls"""'}), "('home', urlconf='gbe.urls')\n", (10301, 10329), False, 'from django.urls import reverse\n'), ((1575, 1620), 'django.urls.reverse', 'reverse', (['"""profile_update"""'], {'urlconf': '"""gbe.urls"""'}), "('profile_update', urlconf='gbe.urls')\n", (1582, 1620), False, 'from django.urls import reverse\n'), ((2011, 2057), 'gbe.models.Conference.objects.filter', 'Conference.objects.filter', ([], {'accepting_bids': '(True)'}), '(accepting_bids=True)\n', (2036, 2057), False, 'from gbe.models import Conference, UserMessage\n'), ((4842, 4899), 'gbe.ticketing_idd_interface.get_ticket_form', 'get_ticket_form', (['self.bid_class.__name__', 'self.conference'], {}), '(self.bid_class.__name__, self.conference)\n', (4857, 4899), False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((7047, 7086), 'django.urls.reverse', 'reverse', (['"""register"""'], {'urlconf': '"""gbe.urls"""'}), "('register', urlconf='gbe.urls')\n", (7054, 7086), False, 'from django.urls import reverse\n'), ((6891, 6927), 'django.urls.reverse', 'reverse', (['"""login"""'], {'urlconf': '"""gbe.urls"""'}), "('login', urlconf='gbe.urls')\n", (6898, 6927), False, 'from django.urls import reverse\n')] |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
class KnowValues(unittest.TestCase):
def test_aos_libnao(self):
""" Computing of the atomic orbitals """
from pyscf.nao import system_vars_c
from pyscf.tools.cubegen import Cube
sv = system_vars_c().init_siesta_xml(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
cc = Cube(sv, nx=20, ny=20, nz=20)
aos = sv.comp_aos_den(cc.get_coords())
self.assertEqual(aos.shape[0], cc.nx*cc.ny*cc.nz)
self.assertEqual(aos.shape[1], sv.norbs)
if __name__ == "__main__": unittest.main()
| [
"unittest.main",
"pyscf.tools.cubegen.Cube",
"os.path.abspath",
"pyscf.nao.system_vars_c"
]
| [((1201, 1216), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1214, 1216), False, 'import os, unittest, numpy as np\n'), ((1001, 1030), 'pyscf.tools.cubegen.Cube', 'Cube', (['sv'], {'nx': '(20)', 'ny': '(20)', 'nz': '(20)'}), '(sv, nx=20, ny=20, nz=20)\n', (1005, 1030), False, 'from pyscf.tools.cubegen import Cube\n'), ((898, 913), 'pyscf.nao.system_vars_c', 'system_vars_c', ([], {}), '()\n', (911, 913), False, 'from pyscf.nao import system_vars_c\n'), ((964, 989), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (979, 989), False, 'import os, unittest, numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 06:44:47 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
# take all the columns but leave the last one(-1)
# always make sure our independent variable is a matrix not a vector and
# dependent variable can be a vector
X = dataset.iloc[:, 1:-1].values
Y = dataset.iloc[:, 2].values
# splitting the dataset into a training set and a test set
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# feature scaling
"""sc_X = StandardScaler()
x_train = sc_X.fit_transform(x_train)
x_test = sc_X.transform(x_test)
sc_Y = StandardScaler()
x_train = sc_X.fit_transform(x_train)"""
# fitting the Decision Tree regression Model to the dataset
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, Y)
# predicting a new result
y_pred = regressor.predict(6.5)
# Visualizing the Decision tree regression result (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, Y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title("Truth or Bluff (Regression Model)")
plt.xlabel("Position Level")
plt.ylabel("Salary")
plt.show() | [
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
]
| [((239, 275), 'pandas.read_csv', 'pd.read_csv', (['"""Position_Salaries.csv"""'], {}), "('Position_Salaries.csv')\n", (250, 275), True, 'import pandas as pd\n'), ((921, 958), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (942, 958), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1213, 1243), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {'color': '"""red"""'}), "(X, Y, color='red')\n", (1224, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1302, 1348), 'matplotlib.pyplot.title', 'plt.title', (['"""Truth or Bluff (Regression Model)"""'], {}), "('Truth or Bluff (Regression Model)')\n", (1311, 1348), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position Level"""'], {}), "('Position Level')\n", (1359, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1398), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary"""'], {}), "('Salary')\n", (1388, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1407, 1409), True, 'import matplotlib.pyplot as plt\n')] |
from itertools import product
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef
from evalml.objectives import (
F1,
MAPE,
MSE,
AccuracyBinary,
AccuracyMulticlass,
BalancedAccuracyBinary,
BalancedAccuracyMulticlass,
BinaryClassificationObjective,
CostBenefitMatrix,
ExpVariance,
F1Macro,
F1Micro,
F1Weighted,
LogLossBinary,
MCCBinary,
MCCMulticlass,
MeanSquaredLogError,
Precision,
PrecisionMacro,
PrecisionMicro,
PrecisionWeighted,
Recall,
RecallMacro,
RecallMicro,
RecallWeighted,
RootMeanSquaredError,
RootMeanSquaredLogError
)
from evalml.objectives.utils import (
_all_objectives_dict,
get_non_core_objectives
)
EPS = 1e-5
all_automl_objectives = _all_objectives_dict()
all_automl_objectives = {name: class_() for name, class_ in all_automl_objectives.items() if class_ not in get_non_core_objectives()}
def test_input_contains_nan():
y_predicted = np.array([np.nan, 0, 0])
y_true = np.array([1, 2, 1])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([np.nan, 0, 0])
y_predicted = np.array([1, 2, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, np.nan], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted_proba)
def test_input_contains_inf():
y_predicted = np.array([np.inf, 0, 0])
y_true = np.array([1, 0, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([np.inf, 0, 0])
y_predicted = np.array([1, 0, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="y_true contains NaN or infinity"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, np.inf], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains NaN or infinity"):
objective.score(y_true, y_predicted_proba)
def test_different_input_lengths():
y_predicted = np.array([0, 0])
y_true = np.array([1])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
objective.score(y_true, y_predicted)
y_true = np.array([0, 0])
y_predicted = np.array([1, 2, 0])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Inputs have mismatched dimensions"):
objective.score(y_true, y_predicted)
def test_zero_input_lengths():
y_predicted = np.array([])
y_true = np.array([])
for objective in all_automl_objectives.values():
with pytest.raises(ValueError, match="Length of inputs is 0"):
objective.score(y_true, y_predicted)
def test_probabilities_not_in_0_1_range():
y_predicted = np.array([0.3, 1.001, 0.3])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted)
y_predicted = np.array([0.3, -0.001, 0.3])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted)
y_true = np.array([1, 0])
y_predicted_proba = np.array([[1, 3], [0.1, 0]])
for objective in all_automl_objectives.values():
if objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains probability estimates"):
objective.score(y_true, y_predicted_proba)
def test_negative_with_log():
y_predicted = np.array([-1, 10, 30])
y_true = np.array([-1, 0, 1])
for objective in [MeanSquaredLogError(), RootMeanSquaredLogError()]:
with pytest.raises(ValueError, match="Mean Squared Logarithmic Error cannot be used when targets contain negative values."):
objective.score(y_true, y_predicted)
def test_binary_more_than_two_unique_values():
y_predicted = np.array([0, 1, 2])
y_true = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba:
with pytest.raises(ValueError, match="y_predicted contains more than two unique values"):
objective.score(y_true, y_predicted)
y_true = np.array([0, 1, 2])
y_predicted = np.array([1, 0, 1])
for objective in all_automl_objectives.values():
if isinstance(objective, BinaryClassificationObjective) and not objective.score_needs_proba:
with pytest.raises(ValueError, match="y_true contains more than two unique values"):
objective.score(y_true, y_predicted)
def test_accuracy_binary():
obj = AccuracyBinary()
assert obj.score(np.array([0, 0, 1, 1]),
np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS)
def test_accuracy_multi():
obj = AccuracyMulticlass()
assert obj.score(np.array([0, 0, 1, 1]),
np.array([1, 1, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 1, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 1, 1]),
np.array([0, 0, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 1, 1, 2, 2]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([0, 0, 1, 1, 2, 2])) == pytest.approx(1 / 3.0, EPS)
def test_balanced_accuracy_binary():
obj = BalancedAccuracyBinary()
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.625, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 1, 0])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([1, 0, 1, 1, 0, 1])) == pytest.approx(0.0, EPS)
def test_balanced_accuracy_multi():
obj = BalancedAccuracyMulticlass()
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([0, 0, 2, 0, 0, 2, 3])) == pytest.approx(0.75, EPS)
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([0, 1, 2, 0, 1, 2, 3])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 1, 2, 0, 1, 2, 3]),
np.array([1, 0, 3, 1, 2, 1, 0])) == pytest.approx(0.0, EPS)
def test_f1_binary():
obj = F1()
assert obj.score(np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 1, 0, 0, 1, 1]),
np.array([0, 1, 0, 0, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 1, 0]),
np.array([0, 1, 0, 0, 0, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_micro_multi():
obj = F1Micro()
assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 2]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_macro_multi():
obj = F1Macro()
assert obj.score(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) \
== pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([2, 2, 2, 0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 2]),
np.array([0, 0])) == pytest.approx(0.0, EPS)
def test_f1_weighted_multi():
obj = F1Weighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) \
== pytest.approx(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_binary():
obj = Precision()
assert obj.score(np.array([1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.5, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS)
def test_precision_micro_multi():
obj = PrecisionMicro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_macro_multi():
obj = PrecisionMacro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_precision_weighted_multi():
obj = PrecisionWeighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 9.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_binary():
obj = Recall()
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([1, 1, 1, 1, 1, 1])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 1, 1, 1])) == pytest.approx(0.5, EPS)
def test_recall_micro_multi():
obj = RecallMicro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_macro_multi():
obj = RecallMacro()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_recall_weighted_multi():
obj = RecallWeighted()
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])) == pytest.approx(1 / 3.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])) == pytest.approx(1.0, EPS)
assert obj.score(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])) == pytest.approx(0.0, EPS)
assert obj.score(np.array([0, 0]),
np.array([1, 2])) == pytest.approx(0.0, EPS)
def test_log_linear_model():
obj = MeanSquaredLogError()
root_obj = RootMeanSquaredLogError()
s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])
assert obj.score(s1_predicted, s1_actual) == pytest.approx(0.562467324910)
assert obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert obj.score(s3_predicted, s3_actual) == pytest.approx(0.617267976207983)
assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(0.562467324910))
assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(0.617267976207983))
def test_mse_linear_model():
obj = MSE()
root_obj = RootMeanSquaredError()
s1_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s1_actual = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
s2_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s2_actual = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_predicted = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
s3_actual = np.array([2, 2, 2, 0, 0, 0, 1, 1, 1])
assert obj.score(s1_predicted, s1_actual) == pytest.approx(5. / 3.)
assert obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert obj.score(s3_predicted, s3_actual) == pytest.approx(2.)
assert root_obj.score(s1_predicted, s1_actual) == pytest.approx(np.sqrt(5. / 3.))
assert root_obj.score(s2_predicted, s2_actual) == pytest.approx(0)
assert root_obj.score(s3_predicted, s3_actual) == pytest.approx(np.sqrt(2.))
def test_mcc_catches_warnings():
y_true = [1, 0, 1, 1]
y_predicted = [0, 0, 0, 0]
with pytest.warns(RuntimeWarning) as record:
sk_matthews_corrcoef(y_true, y_predicted)
assert "invalid value" in str(record[-1].message)
with pytest.warns(None) as record:
MCCBinary().objective_function(y_true, y_predicted)
MCCMulticlass().objective_function(y_true, y_predicted)
assert len(record) == 0
def test_mape_time_series_model():
obj = MAPE()
s1_actual = np.array([0, 0, 1, 1, 1, 1, 2, 0, 2])
s1_predicted = np.array([0, 1, 0, 1, 1, 2, 1, 2, 0])
s2_actual = np.array([-1, -2, 1, 3])
s2_predicted = np.array([1, 2, -1, -3])
s3_actual = np.array([1, 2, 4, 2, 1, 2])
s3_predicted = np.array([0, 2, 2, 1, 3, 2])
with pytest.raises(ValueError, match="Mean Absolute Percentage Error cannot be used when targets contain the value 0."):
obj.score(s1_actual, s1_predicted)
assert obj.score(s2_actual, s2_predicted) == pytest.approx(8 / 4 * 100)
assert obj.score(s3_actual, s3_predicted) == pytest.approx(4 / 6 * 100)
assert obj.score(pd.Series(s3_actual, index=range(-12, -6)), s3_predicted) == pytest.approx(4 / 6 * 100)
assert obj.score(pd.Series(s2_actual, index=range(10, 14)),
pd.Series(s2_predicted, index=range(20, 24))) == pytest.approx(8 / 4 * 100)
@pytest.mark.parametrize("objective_class", _all_objectives_dict().values())
def test_calculate_percent_difference(objective_class):
score = 5
reference_score = 10
change = ((-1) ** (not objective_class.greater_is_better) * (score - reference_score)) / reference_score
answer = 100 * change
assert objective_class.calculate_percent_difference(score, reference_score) == answer
assert objective_class.perfect_score is not None
@pytest.mark.parametrize("objective_class,nan_value", product(_all_objectives_dict().values(), [None, np.nan]))
def test_calculate_percent_difference_with_nan(objective_class, nan_value):
assert pd.isna(objective_class.calculate_percent_difference(nan_value, 2))
assert pd.isna(objective_class.calculate_percent_difference(-1, nan_value))
assert pd.isna(objective_class.calculate_percent_difference(nan_value, nan_value))
assert pd.isna(objective_class.calculate_percent_difference(2, 0))
def test_calculate_percent_difference_negative_and_equal_numbers():
assert CostBenefitMatrix.calculate_percent_difference(score=5, baseline_score=5) == 0
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=-10) == 50
assert CostBenefitMatrix.calculate_percent_difference(score=-10, baseline_score=-5) == -100
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=10) == -150
assert CostBenefitMatrix.calculate_percent_difference(score=10, baseline_score=-5) == 300
# These values are not possible for LogLossBinary but we need them for 100% coverage
# We might add an objective where lower is better that can take negative values in the future
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=-10) == -50
assert LogLossBinary.calculate_percent_difference(score=-10, baseline_score=-5) == 100
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=10) == 150
assert LogLossBinary.calculate_percent_difference(score=10, baseline_score=-5) == -300
def test_calculate_percent_difference_small():
expected_value = 100 * -1 * np.abs(1e-9 / (1e-9))
assert np.isclose(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-9), expected_value, atol=1e-8)
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-10))
assert pd.isna(ExpVariance.calculate_percent_difference(score=1e-9, baseline_score=0))
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=0))
| [
"evalml.objectives.PrecisionWeighted",
"numpy.sqrt",
"evalml.objectives.F1Micro",
"evalml.objectives.PrecisionMicro",
"evalml.objectives.F1Macro",
"evalml.objectives.MCCBinary",
"evalml.objectives.RecallMacro",
"sklearn.metrics.matthews_corrcoef",
"numpy.array",
"evalml.objectives.RootMeanSquaredError",
"evalml.objectives.BalancedAccuracyMulticlass",
"evalml.objectives.AccuracyMulticlass",
"evalml.objectives.utils.get_non_core_objectives",
"evalml.objectives.AccuracyBinary",
"evalml.objectives.MAPE",
"evalml.objectives.RecallMicro",
"evalml.objectives.F1Weighted",
"evalml.objectives.utils._all_objectives_dict",
"evalml.objectives.CostBenefitMatrix.calculate_percent_difference",
"evalml.objectives.MSE",
"evalml.objectives.LogLossBinary.calculate_percent_difference",
"numpy.abs",
"evalml.objectives.MeanSquaredLogError",
"evalml.objectives.PrecisionMacro",
"evalml.objectives.RootMeanSquaredLogError",
"pytest.raises",
"evalml.objectives.RecallWeighted",
"evalml.objectives.ExpVariance.calculate_percent_difference",
"evalml.objectives.Recall",
"pytest.approx",
"evalml.objectives.MCCMulticlass",
"evalml.objectives.F1",
"evalml.objectives.Precision",
"evalml.objectives.BalancedAccuracyBinary",
"pytest.warns"
]
| [((843, 865), 'evalml.objectives.utils._all_objectives_dict', '_all_objectives_dict', ([], {}), '()\n', (863, 865), False, 'from evalml.objectives.utils import _all_objectives_dict, get_non_core_objectives\n'), ((1051, 1075), 'numpy.array', 'np.array', (['[np.nan, 0, 0]'], {}), '([np.nan, 0, 0])\n', (1059, 1075), True, 'import numpy as np\n'), ((1089, 1108), 'numpy.array', 'np.array', (['[1, 2, 1]'], {}), '([1, 2, 1])\n', (1097, 1108), True, 'import numpy as np\n'), ((1311, 1335), 'numpy.array', 'np.array', (['[np.nan, 0, 0]'], {}), '([np.nan, 0, 0])\n', (1319, 1335), True, 'import numpy as np\n'), ((1354, 1373), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (1362, 1373), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1579, 1587), True, 'import numpy as np\n'), ((1612, 1645), 'numpy.array', 'np.array', (['[[1, np.nan], [0.1, 0]]'], {}), '([[1, np.nan], [0.1, 0]])\n', (1620, 1645), True, 'import numpy as np\n'), ((1939, 1963), 'numpy.array', 'np.array', (['[np.inf, 0, 0]'], {}), '([np.inf, 0, 0])\n', (1947, 1963), True, 'import numpy as np\n'), ((1977, 1996), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1985, 1996), True, 'import numpy as np\n'), ((2199, 2223), 'numpy.array', 'np.array', (['[np.inf, 0, 0]'], {}), '([np.inf, 0, 0])\n', (2207, 2223), True, 'import numpy as np\n'), ((2242, 2261), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2250, 2261), True, 'import numpy as np\n'), ((2459, 2475), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2467, 2475), True, 'import numpy as np\n'), ((2500, 2533), 'numpy.array', 'np.array', (['[[1, np.inf], [0.1, 0]]'], {}), '([[1, np.inf], [0.1, 0]])\n', (2508, 2533), True, 'import numpy as np\n'), ((2832, 2848), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2840, 2848), True, 'import numpy as np\n'), ((2862, 2875), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2870, 2875), True, 'import numpy as np\n'), ((3075, 3091), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3083, 3091), True, 'import numpy as np\n'), ((3110, 3129), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (3118, 3129), True, 'import numpy as np\n'), ((3366, 3378), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3374, 3378), True, 'import numpy as np\n'), ((3392, 3404), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3400, 3404), True, 'import numpy as np\n'), ((3641, 3668), 'numpy.array', 'np.array', (['[0.3, 1.001, 0.3]'], {}), '([0.3, 1.001, 0.3])\n', (3649, 3668), True, 'import numpy as np\n'), ((3682, 3701), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (3690, 3701), True, 'import numpy as np\n'), ((3963, 3991), 'numpy.array', 'np.array', (['[0.3, -0.001, 0.3]'], {}), '([0.3, -0.001, 0.3])\n', (3971, 3991), True, 'import numpy as np\n'), ((4005, 4024), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (4013, 4024), True, 'import numpy as np\n'), ((4281, 4297), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (4289, 4297), True, 'import numpy as np\n'), ((4322, 4350), 'numpy.array', 'np.array', (['[[1, 3], [0.1, 0]]'], {}), '([[1, 3], [0.1, 0]])\n', (4330, 4350), True, 'import numpy as np\n'), ((4649, 4671), 'numpy.array', 'np.array', (['[-1, 10, 30]'], {}), '([-1, 10, 30])\n', (4657, 4671), True, 'import numpy as np\n'), ((4685, 4705), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (4693, 4705), True, 'import numpy as np\n'), ((5028, 5047), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (5036, 5047), True, 'import numpy as np\n'), ((5061, 5080), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (5069, 5080), True, 'import numpy as np\n'), ((5404, 5423), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (5412, 5423), True, 'import numpy as np\n'), ((5442, 5461), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (5450, 5461), True, 'import numpy as np\n'), ((5806, 5822), 'evalml.objectives.AccuracyBinary', 'AccuracyBinary', ([], {}), '()\n', (5820, 5822), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((6213, 6233), 'evalml.objectives.AccuracyMulticlass', 'AccuracyMulticlass', ([], {}), '()\n', (6231, 6233), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((6900, 6924), 'evalml.objectives.BalancedAccuracyBinary', 'BalancedAccuracyBinary', ([], {}), '()\n', (6922, 6924), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((7364, 7392), 'evalml.objectives.BalancedAccuracyMulticlass', 'BalancedAccuracyMulticlass', ([], {}), '()\n', (7390, 7392), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((7835, 7839), 'evalml.objectives.F1', 'F1', ([], {}), '()\n', (7837, 7839), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((8374, 8383), 'evalml.objectives.F1Micro', 'F1Micro', ([], {}), '()\n', (8381, 8383), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((8976, 8985), 'evalml.objectives.F1Macro', 'F1Macro', ([], {}), '()\n', (8983, 8985), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((9631, 9643), 'evalml.objectives.F1Weighted', 'F1Weighted', ([], {}), '()\n', (9641, 9643), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((10288, 10299), 'evalml.objectives.Precision', 'Precision', ([], {}), '()\n', (10297, 10299), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((10865, 10881), 'evalml.objectives.PrecisionMicro', 'PrecisionMicro', ([], {}), '()\n', (10879, 10881), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((11481, 11497), 'evalml.objectives.PrecisionMacro', 'PrecisionMacro', ([], {}), '()\n', (11495, 11497), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((12100, 12119), 'evalml.objectives.PrecisionWeighted', 'PrecisionWeighted', ([], {}), '()\n', (12117, 12119), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((12711, 12719), 'evalml.objectives.Recall', 'Recall', ([], {}), '()\n', (12717, 12719), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((13152, 13165), 'evalml.objectives.RecallMicro', 'RecallMicro', ([], {}), '()\n', (13163, 13165), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((13762, 13775), 'evalml.objectives.RecallMacro', 'RecallMacro', ([], {}), '()\n', (13773, 13775), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((14375, 14391), 'evalml.objectives.RecallWeighted', 'RecallWeighted', ([], {}), '()\n', (14389, 14391), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((14986, 15007), 'evalml.objectives.MeanSquaredLogError', 'MeanSquaredLogError', ([], {}), '()\n', (15005, 15007), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((15023, 15048), 'evalml.objectives.RootMeanSquaredLogError', 'RootMeanSquaredLogError', ([], {}), '()\n', (15046, 15048), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((15069, 15106), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (15077, 15106), True, 'import numpy as np\n'), ((15123, 15160), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (15131, 15160), True, 'import numpy as np\n'), ((15181, 15218), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (15189, 15218), True, 'import numpy as np\n'), ((15235, 15272), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (15243, 15272), True, 'import numpy as np\n'), ((15293, 15330), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (15301, 15330), True, 'import numpy as np\n'), ((15347, 15384), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (15355, 15384), True, 'import numpy as np\n'), ((15915, 15920), 'evalml.objectives.MSE', 'MSE', ([], {}), '()\n', (15918, 15920), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((15936, 15958), 'evalml.objectives.RootMeanSquaredError', 'RootMeanSquaredError', ([], {}), '()\n', (15956, 15958), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((15979, 16016), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (15987, 16016), True, 'import numpy as np\n'), ((16033, 16070), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (16041, 16070), True, 'import numpy as np\n'), ((16091, 16128), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (16099, 16128), True, 'import numpy as np\n'), ((16145, 16182), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (16153, 16182), True, 'import numpy as np\n'), ((16203, 16240), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (16211, 16240), True, 'import numpy as np\n'), ((16257, 16294), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (16265, 16294), True, 'import numpy as np\n'), ((17231, 17237), 'evalml.objectives.MAPE', 'MAPE', ([], {}), '()\n', (17235, 17237), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((17255, 17292), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 1, 2, 0, 2]'], {}), '([0, 0, 1, 1, 1, 1, 2, 0, 2])\n', (17263, 17292), True, 'import numpy as np\n'), ((17312, 17349), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 1, 2, 1, 2, 0]'], {}), '([0, 1, 0, 1, 1, 2, 1, 2, 0])\n', (17320, 17349), True, 'import numpy as np\n'), ((17367, 17391), 'numpy.array', 'np.array', (['[-1, -2, 1, 3]'], {}), '([-1, -2, 1, 3])\n', (17375, 17391), True, 'import numpy as np\n'), ((17411, 17435), 'numpy.array', 'np.array', (['[1, 2, -1, -3]'], {}), '([1, 2, -1, -3])\n', (17419, 17435), True, 'import numpy as np\n'), ((17453, 17481), 'numpy.array', 'np.array', (['[1, 2, 4, 2, 1, 2]'], {}), '([1, 2, 4, 2, 1, 2])\n', (17461, 17481), True, 'import numpy as np\n'), ((17501, 17529), 'numpy.array', 'np.array', (['[0, 2, 2, 1, 3, 2]'], {}), '([0, 2, 2, 1, 3, 2])\n', (17509, 17529), True, 'import numpy as np\n'), ((4728, 4749), 'evalml.objectives.MeanSquaredLogError', 'MeanSquaredLogError', ([], {}), '()\n', (4747, 4749), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((4751, 4776), 'evalml.objectives.RootMeanSquaredLogError', 'RootMeanSquaredLogError', ([], {}), '()\n', (4774, 4776), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((5916, 5939), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (5929, 5939), False, 'import pytest\n'), ((6033, 6056), 'pytest.approx', 'pytest.approx', (['(0.5)', 'EPS'], {}), '(0.5, EPS)\n', (6046, 6056), False, 'import pytest\n'), ((6150, 6173), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (6163, 6173), False, 'import pytest\n'), ((6327, 6350), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (6340, 6350), False, 'import pytest\n'), ((6444, 6467), 'pytest.approx', 'pytest.approx', (['(0.5)', 'EPS'], {}), '(0.5, EPS)\n', (6457, 6467), False, 'import pytest\n'), ((6561, 6584), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (6574, 6584), False, 'import pytest\n'), ((6690, 6717), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (6703, 6717), False, 'import pytest\n'), ((6823, 6850), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (6836, 6850), False, 'import pytest\n'), ((7030, 7055), 'pytest.approx', 'pytest.approx', (['(0.625)', 'EPS'], {}), '(0.625, EPS)\n', (7043, 7055), False, 'import pytest\n'), ((7162, 7185), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (7175, 7185), False, 'import pytest\n'), ((7292, 7315), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (7305, 7315), False, 'import pytest\n'), ((7504, 7528), 'pytest.approx', 'pytest.approx', (['(0.75)', 'EPS'], {}), '(0.75, EPS)\n', (7517, 7528), False, 'import pytest\n'), ((7641, 7664), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (7654, 7664), False, 'import pytest\n'), ((7777, 7800), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (7790, 7800), False, 'import pytest\n'), ((7945, 7968), 'pytest.approx', 'pytest.approx', (['(0.5)', 'EPS'], {}), '(0.5, EPS)\n', (7958, 7968), False, 'import pytest\n'), ((8075, 8098), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (8088, 8098), False, 'import pytest\n'), ((8205, 8228), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (8218, 8228), False, 'import pytest\n'), ((8311, 8334), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (8324, 8334), False, 'import pytest\n'), ((8507, 8534), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (8520, 8534), False, 'import pytest\n'), ((8659, 8682), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (8672, 8682), False, 'import pytest\n'), ((8807, 8830), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (8820, 8830), False, 'import pytest\n'), ((8913, 8936), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (8926, 8936), False, 'import pytest\n'), ((9119, 9186), 'pytest.approx', 'pytest.approx', (['(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0))', 'EPS'], {}), '(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)\n', (9132, 9186), False, 'import pytest\n'), ((9311, 9334), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (9324, 9334), False, 'import pytest\n'), ((9459, 9482), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (9472, 9482), False, 'import pytest\n'), ((9565, 9588), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (9578, 9588), False, 'import pytest\n'), ((9777, 9844), 'pytest.approx', 'pytest.approx', (['(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0))', 'EPS'], {}), '(2 * (1 / 3.0) * (1 / 9.0) / (1 / 3.0 + 1 / 9.0), EPS)\n', (9790, 9844), False, 'import pytest\n'), ((9969, 9992), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (9982, 9992), False, 'import pytest\n'), ((10117, 10140), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (10130, 10140), False, 'import pytest\n'), ((10223, 10246), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (10236, 10246), False, 'import pytest\n'), ((10405, 10428), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (10418, 10428), False, 'import pytest\n'), ((10535, 10558), 'pytest.approx', 'pytest.approx', (['(0.5)', 'EPS'], {}), '(0.5, EPS)\n', (10548, 10558), False, 'import pytest\n'), ((10665, 10688), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (10678, 10688), False, 'import pytest\n'), ((10795, 10818), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (10808, 10818), False, 'import pytest\n'), ((11005, 11032), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (11018, 11032), False, 'import pytest\n'), ((11157, 11180), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (11170, 11180), False, 'import pytest\n'), ((11305, 11328), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (11318, 11328), False, 'import pytest\n'), ((11411, 11434), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (11424, 11434), False, 'import pytest\n'), ((11621, 11648), 'pytest.approx', 'pytest.approx', (['(1 / 9.0)', 'EPS'], {}), '(1 / 9.0, EPS)\n', (11634, 11648), False, 'import pytest\n'), ((11773, 11796), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (11786, 11796), False, 'import pytest\n'), ((11921, 11944), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (11934, 11944), False, 'import pytest\n'), ((12027, 12050), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (12040, 12050), False, 'import pytest\n'), ((12243, 12270), 'pytest.approx', 'pytest.approx', (['(1 / 9.0)', 'EPS'], {}), '(1 / 9.0, EPS)\n', (12256, 12270), False, 'import pytest\n'), ((12395, 12418), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (12408, 12418), False, 'import pytest\n'), ((12543, 12566), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (12556, 12566), False, 'import pytest\n'), ((12649, 12672), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (12662, 12672), False, 'import pytest\n'), ((12825, 12848), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (12838, 12848), False, 'import pytest\n'), ((12955, 12978), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (12968, 12978), False, 'import pytest\n'), ((13085, 13108), 'pytest.approx', 'pytest.approx', (['(0.5)', 'EPS'], {}), '(0.5, EPS)\n', (13098, 13108), False, 'import pytest\n'), ((13289, 13316), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (13302, 13316), False, 'import pytest\n'), ((13441, 13464), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (13454, 13464), False, 'import pytest\n'), ((13589, 13612), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (13602, 13612), False, 'import pytest\n'), ((13695, 13718), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (13708, 13718), False, 'import pytest\n'), ((13899, 13926), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (13912, 13926), False, 'import pytest\n'), ((14051, 14074), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (14064, 14074), False, 'import pytest\n'), ((14199, 14222), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (14212, 14222), False, 'import pytest\n'), ((14305, 14328), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (14318, 14328), False, 'import pytest\n'), ((14515, 14542), 'pytest.approx', 'pytest.approx', (['(1 / 3.0)', 'EPS'], {}), '(1 / 3.0, EPS)\n', (14528, 14542), False, 'import pytest\n'), ((14667, 14690), 'pytest.approx', 'pytest.approx', (['(1.0)', 'EPS'], {}), '(1.0, EPS)\n', (14680, 14690), False, 'import pytest\n'), ((14815, 14838), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (14828, 14838), False, 'import pytest\n'), ((14921, 14944), 'pytest.approx', 'pytest.approx', (['(0.0)', 'EPS'], {}), '(0.0, EPS)\n', (14934, 14944), False, 'import pytest\n'), ((15435, 15463), 'pytest.approx', 'pytest.approx', (['(0.56246732491)'], {}), '(0.56246732491)\n', (15448, 15463), False, 'import pytest\n'), ((15514, 15530), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (15527, 15530), False, 'import pytest\n'), ((15580, 15612), 'pytest.approx', 'pytest.approx', (['(0.617267976207983)'], {}), '(0.617267976207983)\n', (15593, 15612), False, 'import pytest\n'), ((15761, 15777), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (15774, 15777), False, 'import pytest\n'), ((16345, 16369), 'pytest.approx', 'pytest.approx', (['(5.0 / 3.0)'], {}), '(5.0 / 3.0)\n', (16358, 16369), False, 'import pytest\n'), ((16417, 16433), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (16430, 16433), False, 'import pytest\n'), ((16483, 16501), 'pytest.approx', 'pytest.approx', (['(2.0)'], {}), '(2.0)\n', (16496, 16501), False, 'import pytest\n'), ((16642, 16658), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (16655, 16658), False, 'import pytest\n'), ((16841, 16869), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (16853, 16869), False, 'import pytest\n'), ((16889, 16930), 'sklearn.metrics.matthews_corrcoef', 'sk_matthews_corrcoef', (['y_true', 'y_predicted'], {}), '(y_true, y_predicted)\n', (16909, 16930), True, 'from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef\n'), ((16998, 17016), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (17010, 17016), False, 'import pytest\n'), ((17540, 17664), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Mean Absolute Percentage Error cannot be used when targets contain the value 0."""'}), "(ValueError, match=\n 'Mean Absolute Percentage Error cannot be used when targets contain the value 0.'\n )\n", (17553, 17664), False, 'import pytest\n'), ((17748, 17774), 'pytest.approx', 'pytest.approx', (['(8 / 4 * 100)'], {}), '(8 / 4 * 100)\n', (17761, 17774), False, 'import pytest\n'), ((17824, 17850), 'pytest.approx', 'pytest.approx', (['(4 / 6 * 100)'], {}), '(4 / 6 * 100)\n', (17837, 17850), False, 'import pytest\n'), ((17933, 17959), 'pytest.approx', 'pytest.approx', (['(4 / 6 * 100)'], {}), '(4 / 6 * 100)\n', (17946, 17959), False, 'import pytest\n'), ((18094, 18120), 'pytest.approx', 'pytest.approx', (['(8 / 4 * 100)'], {}), '(8 / 4 * 100)\n', (18107, 18120), False, 'import pytest\n'), ((19166, 19239), 'evalml.objectives.CostBenefitMatrix.calculate_percent_difference', 'CostBenefitMatrix.calculate_percent_difference', ([], {'score': '(5)', 'baseline_score': '(5)'}), '(score=5, baseline_score=5)\n', (19212, 19239), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19257, 19333), 'evalml.objectives.CostBenefitMatrix.calculate_percent_difference', 'CostBenefitMatrix.calculate_percent_difference', ([], {'score': '(-5)', 'baseline_score': '(-10)'}), '(score=-5, baseline_score=-10)\n', (19303, 19333), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19351, 19427), 'evalml.objectives.CostBenefitMatrix.calculate_percent_difference', 'CostBenefitMatrix.calculate_percent_difference', ([], {'score': '(-10)', 'baseline_score': '(-5)'}), '(score=-10, baseline_score=-5)\n', (19397, 19427), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19447, 19522), 'evalml.objectives.CostBenefitMatrix.calculate_percent_difference', 'CostBenefitMatrix.calculate_percent_difference', ([], {'score': '(-5)', 'baseline_score': '(10)'}), '(score=-5, baseline_score=10)\n', (19493, 19522), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19542, 19617), 'evalml.objectives.CostBenefitMatrix.calculate_percent_difference', 'CostBenefitMatrix.calculate_percent_difference', ([], {'score': '(10)', 'baseline_score': '(-5)'}), '(score=10, baseline_score=-5)\n', (19588, 19617), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19824, 19896), 'evalml.objectives.LogLossBinary.calculate_percent_difference', 'LogLossBinary.calculate_percent_difference', ([], {'score': '(-5)', 'baseline_score': '(-10)'}), '(score=-5, baseline_score=-10)\n', (19866, 19896), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((19915, 19987), 'evalml.objectives.LogLossBinary.calculate_percent_difference', 'LogLossBinary.calculate_percent_difference', ([], {'score': '(-10)', 'baseline_score': '(-5)'}), '(score=-10, baseline_score=-5)\n', (19957, 19987), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20006, 20077), 'evalml.objectives.LogLossBinary.calculate_percent_difference', 'LogLossBinary.calculate_percent_difference', ([], {'score': '(-5)', 'baseline_score': '(10)'}), '(score=-5, baseline_score=10)\n', (20048, 20077), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20096, 20167), 'evalml.objectives.LogLossBinary.calculate_percent_difference', 'LogLossBinary.calculate_percent_difference', ([], {'score': '(10)', 'baseline_score': '(-5)'}), '(score=10, baseline_score=-5)\n', (20138, 20167), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20257, 20278), 'numpy.abs', 'np.abs', (['(1e-09 / 1e-09)'], {}), '(1e-09 / 1e-09)\n', (20263, 20278), True, 'import numpy as np\n'), ((20301, 20372), 'evalml.objectives.ExpVariance.calculate_percent_difference', 'ExpVariance.calculate_percent_difference', ([], {'score': '(0)', 'baseline_score': '(1e-09)'}), '(score=0, baseline_score=1e-09)\n', (20341, 20372), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20419, 20490), 'evalml.objectives.ExpVariance.calculate_percent_difference', 'ExpVariance.calculate_percent_difference', ([], {'score': '(0)', 'baseline_score': '(1e-10)'}), '(score=0, baseline_score=1e-10)\n', (20459, 20490), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20511, 20582), 'evalml.objectives.ExpVariance.calculate_percent_difference', 'ExpVariance.calculate_percent_difference', ([], {'score': '(1e-09)', 'baseline_score': '(0)'}), '(score=1e-09, baseline_score=0)\n', (20551, 20582), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((20602, 20669), 'evalml.objectives.ExpVariance.calculate_percent_difference', 'ExpVariance.calculate_percent_difference', ([], {'score': '(0)', 'baseline_score': '(0)'}), '(score=0, baseline_score=0)\n', (20642, 20669), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((973, 998), 'evalml.objectives.utils.get_non_core_objectives', 'get_non_core_objectives', ([], {}), '()\n', (996, 998), False, 'from evalml.objectives.utils import _all_objectives_dict, get_non_core_objectives\n'), ((1175, 1246), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains NaN or infinity"""'}), "(ValueError, match='y_predicted contains NaN or infinity')\n", (1188, 1246), False, 'import pytest\n'), ((1440, 1506), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_true contains NaN or infinity"""'}), "(ValueError, match='y_true contains NaN or infinity')\n", (1453, 1506), False, 'import pytest\n'), ((2063, 2134), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains NaN or infinity"""'}), "(ValueError, match='y_predicted contains NaN or infinity')\n", (2076, 2134), False, 'import pytest\n'), ((2328, 2394), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_true contains NaN or infinity"""'}), "(ValueError, match='y_true contains NaN or infinity')\n", (2341, 2394), False, 'import pytest\n'), ((2942, 3010), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Inputs have mismatched dimensions"""'}), "(ValueError, match='Inputs have mismatched dimensions')\n", (2955, 3010), False, 'import pytest\n'), ((3196, 3264), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Inputs have mismatched dimensions"""'}), "(ValueError, match='Inputs have mismatched dimensions')\n", (3209, 3264), False, 'import pytest\n'), ((3471, 3527), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Length of inputs is 0"""'}), "(ValueError, match='Length of inputs is 0')\n", (3484, 3527), False, 'import pytest\n'), ((4792, 4920), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Mean Squared Logarithmic Error cannot be used when targets contain negative values."""'}), "(ValueError, match=\n 'Mean Squared Logarithmic Error cannot be used when targets contain negative values.'\n )\n", (4805, 4920), False, 'import pytest\n'), ((5844, 5866), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (5852, 5866), True, 'import numpy as np\n'), ((5889, 5911), 'numpy.array', 'np.array', (['[1, 1, 0, 0]'], {}), '([1, 1, 0, 0])\n', (5897, 5911), True, 'import numpy as np\n'), ((5961, 5983), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (5969, 5983), True, 'import numpy as np\n'), ((6006, 6028), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (6014, 6028), True, 'import numpy as np\n'), ((6078, 6100), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6086, 6100), True, 'import numpy as np\n'), ((6123, 6145), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6131, 6145), True, 'import numpy as np\n'), ((6255, 6277), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6263, 6277), True, 'import numpy as np\n'), ((6300, 6322), 'numpy.array', 'np.array', (['[1, 1, 0, 0]'], {}), '([1, 1, 0, 0])\n', (6308, 6322), True, 'import numpy as np\n'), ((6372, 6394), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6380, 6394), True, 'import numpy as np\n'), ((6417, 6439), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (6425, 6439), True, 'import numpy as np\n'), ((6489, 6511), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6497, 6511), True, 'import numpy as np\n'), ((6534, 6556), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6542, 6556), True, 'import numpy as np\n'), ((6606, 6634), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (6614, 6634), True, 'import numpy as np\n'), ((6657, 6685), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (6665, 6685), True, 'import numpy as np\n'), ((6739, 6767), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (6747, 6767), True, 'import numpy as np\n'), ((6790, 6818), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (6798, 6818), True, 'import numpy as np\n'), ((6946, 6974), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0]'], {}), '([0, 1, 0, 0, 1, 0])\n', (6954, 6974), True, 'import numpy as np\n'), ((6997, 7025), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 0, 1])\n', (7005, 7025), True, 'import numpy as np\n'), ((7078, 7106), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0]'], {}), '([0, 1, 0, 0, 1, 0])\n', (7086, 7106), True, 'import numpy as np\n'), ((7129, 7157), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0]'], {}), '([0, 1, 0, 0, 1, 0])\n', (7137, 7157), True, 'import numpy as np\n'), ((7208, 7236), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0]'], {}), '([0, 1, 0, 0, 1, 0])\n', (7216, 7236), True, 'import numpy as np\n'), ((7259, 7287), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 0, 1]'], {}), '([1, 0, 1, 1, 0, 1])\n', (7267, 7287), True, 'import numpy as np\n'), ((7414, 7445), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3]'], {}), '([0, 1, 2, 0, 1, 2, 3])\n', (7422, 7445), True, 'import numpy as np\n'), ((7468, 7499), 'numpy.array', 'np.array', (['[0, 0, 2, 0, 0, 2, 3]'], {}), '([0, 0, 2, 0, 0, 2, 3])\n', (7476, 7499), True, 'import numpy as np\n'), ((7551, 7582), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3]'], {}), '([0, 1, 2, 0, 1, 2, 3])\n', (7559, 7582), True, 'import numpy as np\n'), ((7605, 7636), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3]'], {}), '([0, 1, 2, 0, 1, 2, 3])\n', (7613, 7636), True, 'import numpy as np\n'), ((7687, 7718), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3]'], {}), '([0, 1, 2, 0, 1, 2, 3])\n', (7695, 7718), True, 'import numpy as np\n'), ((7741, 7772), 'numpy.array', 'np.array', (['[1, 0, 3, 1, 2, 1, 0]'], {}), '([1, 0, 3, 1, 2, 1, 0])\n', (7749, 7772), True, 'import numpy as np\n'), ((7861, 7889), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0]'], {}), '([0, 1, 0, 0, 1, 0])\n', (7869, 7889), True, 'import numpy as np\n'), ((7912, 7940), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 0, 1])\n', (7920, 7940), True, 'import numpy as np\n'), ((7991, 8019), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 1]'], {}), '([0, 1, 0, 0, 1, 1])\n', (7999, 8019), True, 'import numpy as np\n'), ((8042, 8070), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 1]'], {}), '([0, 1, 0, 0, 1, 1])\n', (8050, 8070), True, 'import numpy as np\n'), ((8121, 8149), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 1, 0])\n', (8129, 8149), True, 'import numpy as np\n'), ((8172, 8200), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 0, 1])\n', (8180, 8200), True, 'import numpy as np\n'), ((8251, 8267), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8259, 8267), True, 'import numpy as np\n'), ((8290, 8306), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8298, 8306), True, 'import numpy as np\n'), ((8405, 8442), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (8413, 8442), True, 'import numpy as np\n'), ((8465, 8502), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8473, 8502), True, 'import numpy as np\n'), ((8557, 8594), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8565, 8594), True, 'import numpy as np\n'), ((8617, 8654), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8625, 8654), True, 'import numpy as np\n'), ((8705, 8742), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (8713, 8742), True, 'import numpy as np\n'), ((8765, 8802), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (8773, 8802), True, 'import numpy as np\n'), ((8853, 8869), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (8861, 8869), True, 'import numpy as np\n'), ((8892, 8908), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8900, 8908), True, 'import numpy as np\n'), ((9007, 9044), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (9015, 9044), True, 'import numpy as np\n'), ((9067, 9104), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9075, 9104), True, 'import numpy as np\n'), ((9209, 9246), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9217, 9246), True, 'import numpy as np\n'), ((9269, 9306), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9277, 9306), True, 'import numpy as np\n'), ((9357, 9394), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (9365, 9394), True, 'import numpy as np\n'), ((9417, 9454), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9425, 9454), True, 'import numpy as np\n'), ((9505, 9521), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (9513, 9521), True, 'import numpy as np\n'), ((9544, 9560), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9552, 9560), True, 'import numpy as np\n'), ((9665, 9702), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9673, 9702), True, 'import numpy as np\n'), ((9725, 9762), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (9733, 9762), True, 'import numpy as np\n'), ((9867, 9904), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9875, 9904), True, 'import numpy as np\n'), ((9927, 9964), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (9935, 9964), True, 'import numpy as np\n'), ((10015, 10052), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (10023, 10052), True, 'import numpy as np\n'), ((10075, 10112), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (10083, 10112), True, 'import numpy as np\n'), ((10163, 10179), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10171, 10179), True, 'import numpy as np\n'), ((10202, 10218), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (10210, 10218), True, 'import numpy as np\n'), ((10321, 10349), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (10329, 10349), True, 'import numpy as np\n'), ((10372, 10400), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (10380, 10400), True, 'import numpy as np\n'), ((10451, 10479), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (10459, 10479), True, 'import numpy as np\n'), ((10502, 10530), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (10510, 10530), True, 'import numpy as np\n'), ((10581, 10609), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (10589, 10609), True, 'import numpy as np\n'), ((10632, 10660), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (10640, 10660), True, 'import numpy as np\n'), ((10711, 10739), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (10719, 10739), True, 'import numpy as np\n'), ((10762, 10790), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (10770, 10790), True, 'import numpy as np\n'), ((10903, 10940), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (10911, 10940), True, 'import numpy as np\n'), ((10963, 11000), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (10971, 11000), True, 'import numpy as np\n'), ((11055, 11092), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11063, 11092), True, 'import numpy as np\n'), ((11115, 11152), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11123, 11152), True, 'import numpy as np\n'), ((11203, 11240), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11211, 11240), True, 'import numpy as np\n'), ((11263, 11300), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (11271, 11300), True, 'import numpy as np\n'), ((11351, 11367), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (11359, 11367), True, 'import numpy as np\n'), ((11390, 11406), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (11398, 11406), True, 'import numpy as np\n'), ((11519, 11556), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11527, 11556), True, 'import numpy as np\n'), ((11579, 11616), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (11587, 11616), True, 'import numpy as np\n'), ((11671, 11708), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11679, 11708), True, 'import numpy as np\n'), ((11731, 11768), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11739, 11768), True, 'import numpy as np\n'), ((11819, 11856), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (11827, 11856), True, 'import numpy as np\n'), ((11879, 11916), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (11887, 11916), True, 'import numpy as np\n'), ((11967, 11983), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (11975, 11983), True, 'import numpy as np\n'), ((12006, 12022), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (12014, 12022), True, 'import numpy as np\n'), ((12141, 12178), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (12149, 12178), True, 'import numpy as np\n'), ((12201, 12238), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (12209, 12238), True, 'import numpy as np\n'), ((12293, 12330), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (12301, 12330), True, 'import numpy as np\n'), ((12353, 12390), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (12361, 12390), True, 'import numpy as np\n'), ((12441, 12478), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (12449, 12478), True, 'import numpy as np\n'), ((12501, 12538), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (12509, 12538), True, 'import numpy as np\n'), ((12589, 12605), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (12597, 12605), True, 'import numpy as np\n'), ((12628, 12644), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (12636, 12644), True, 'import numpy as np\n'), ((12741, 12769), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (12749, 12769), True, 'import numpy as np\n'), ((12792, 12820), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (12800, 12820), True, 'import numpy as np\n'), ((12871, 12899), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (12879, 12899), True, 'import numpy as np\n'), ((12922, 12950), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (12930, 12950), True, 'import numpy as np\n'), ((13001, 13029), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (13009, 13029), True, 'import numpy as np\n'), ((13052, 13080), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1])\n', (13060, 13080), True, 'import numpy as np\n'), ((13187, 13224), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13195, 13224), True, 'import numpy as np\n'), ((13247, 13284), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (13255, 13284), True, 'import numpy as np\n'), ((13339, 13376), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13347, 13376), True, 'import numpy as np\n'), ((13399, 13436), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13407, 13436), True, 'import numpy as np\n'), ((13487, 13524), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13495, 13524), True, 'import numpy as np\n'), ((13547, 13584), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (13555, 13584), True, 'import numpy as np\n'), ((13635, 13651), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (13643, 13651), True, 'import numpy as np\n'), ((13674, 13690), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (13682, 13690), True, 'import numpy as np\n'), ((13797, 13834), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13805, 13834), True, 'import numpy as np\n'), ((13857, 13894), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (13865, 13894), True, 'import numpy as np\n'), ((13949, 13986), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (13957, 13986), True, 'import numpy as np\n'), ((14009, 14046), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14017, 14046), True, 'import numpy as np\n'), ((14097, 14134), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14105, 14134), True, 'import numpy as np\n'), ((14157, 14194), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (14165, 14194), True, 'import numpy as np\n'), ((14245, 14261), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (14253, 14261), True, 'import numpy as np\n'), ((14284, 14300), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (14292, 14300), True, 'import numpy as np\n'), ((14413, 14450), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14421, 14450), True, 'import numpy as np\n'), ((14473, 14510), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (14481, 14510), True, 'import numpy as np\n'), ((14565, 14602), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14573, 14602), True, 'import numpy as np\n'), ((14625, 14662), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14633, 14662), True, 'import numpy as np\n'), ((14713, 14750), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (14721, 14750), True, 'import numpy as np\n'), ((14773, 14810), 'numpy.array', 'np.array', (['[2, 2, 2, 0, 0, 0, 1, 1, 1]'], {}), '([2, 2, 2, 0, 0, 0, 1, 1, 1])\n', (14781, 14810), True, 'import numpy as np\n'), ((14861, 14877), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (14869, 14877), True, 'import numpy as np\n'), ((14900, 14916), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (14908, 14916), True, 'import numpy as np\n'), ((15682, 15704), 'numpy.sqrt', 'np.sqrt', (['(0.56246732491)'], {}), '(0.56246732491)\n', (15689, 15704), True, 'import numpy as np\n'), ((15846, 15872), 'numpy.sqrt', 'np.sqrt', (['(0.617267976207983)'], {}), '(0.617267976207983)\n', (15853, 15872), True, 'import numpy as np\n'), ((16570, 16588), 'numpy.sqrt', 'np.sqrt', (['(5.0 / 3.0)'], {}), '(5.0 / 3.0)\n', (16577, 16588), True, 'import numpy as np\n'), ((16727, 16739), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (16734, 16739), True, 'import numpy as np\n'), ((18167, 18189), 'evalml.objectives.utils._all_objectives_dict', '_all_objectives_dict', ([], {}), '()\n', (18187, 18189), False, 'from evalml.objectives.utils import _all_objectives_dict, get_non_core_objectives\n'), ((1756, 1827), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains NaN or infinity"""'}), "(ValueError, match='y_predicted contains NaN or infinity')\n", (1769, 1827), False, 'import pytest\n'), ((2644, 2715), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains NaN or infinity"""'}), "(ValueError, match='y_predicted contains NaN or infinity')\n", (2657, 2715), False, 'import pytest\n'), ((3812, 3889), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains probability estimates"""'}), "(ValueError, match='y_predicted contains probability estimates')\n", (3825, 3889), False, 'import pytest\n'), ((4135, 4212), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains probability estimates"""'}), "(ValueError, match='y_predicted contains probability estimates')\n", (4148, 4212), False, 'import pytest\n'), ((4461, 4538), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains probability estimates"""'}), "(ValueError, match='y_predicted contains probability estimates')\n", (4474, 4538), False, 'import pytest\n'), ((5252, 5340), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_predicted contains more than two unique values"""'}), "(ValueError, match=\n 'y_predicted contains more than two unique values')\n", (5265, 5340), False, 'import pytest\n'), ((5633, 5711), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_true contains more than two unique values"""'}), "(ValueError, match='y_true contains more than two unique values')\n", (5646, 5711), False, 'import pytest\n'), ((17036, 17047), 'evalml.objectives.MCCBinary', 'MCCBinary', ([], {}), '()\n', (17045, 17047), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((17096, 17111), 'evalml.objectives.MCCMulticlass', 'MCCMulticlass', ([], {}), '()\n', (17109, 17111), False, 'from evalml.objectives import F1, MAPE, MSE, AccuracyBinary, AccuracyMulticlass, BalancedAccuracyBinary, BalancedAccuracyMulticlass, BinaryClassificationObjective, CostBenefitMatrix, ExpVariance, F1Macro, F1Micro, F1Weighted, LogLossBinary, MCCBinary, MCCMulticlass, MeanSquaredLogError, Precision, PrecisionMacro, PrecisionMicro, PrecisionWeighted, Recall, RecallMacro, RecallMicro, RecallWeighted, RootMeanSquaredError, RootMeanSquaredLogError\n'), ((18639, 18661), 'evalml.objectives.utils._all_objectives_dict', '_all_objectives_dict', ([], {}), '()\n', (18659, 18661), False, 'from evalml.objectives.utils import _all_objectives_dict, get_non_core_objectives\n')] |
# -*- encoding: utf-8 -*-
"""
Unit test for roger_promote.py
"""
import tests.helper
import unittest
import os
import os.path
import pytest
import requests
from mockito import mock, Mock, when
from cli.roger_promote import RogerPromote
from cli.appconfig import AppConfig
from cli.settings import Settings
from cli.framework import Framework
from cli.frameworkUtils import FrameworkUtils
from cli.marathon import Marathon
from cli.chronos import Chronos
class TestRogerPromote(unittest.TestCase):
def setUp(self):
self.marathon = mock(Marathon)
self.settings = mock(Settings)
self.app_config = mock(AppConfig)
self.framework = self.marathon
self.framework_utils = mock(FrameworkUtils)
self.config_file = "test.yml"
self.roger_env = {}
os.environ['ROGER_CONFIG_DIR'] = '/vagrant/config'
@property
def config_dir(self):
return os.environ['ROGER_CONFIG_DIR']
def test_config_dir(self):
rp = RogerPromote()
assert rp.config_dir == '/vagrant/config'
def test_roger_env(self):
fake_config = tests.helper.fake_config()
settings = mock(Settings)
when(self.app_config).getRogerEnv(
self.config_dir
).thenReturn(fake_config)
rp = RogerPromote(app_config=self.app_config)
assert rp.roger_env == fake_config
def test_set_framework(self):
app_data = {'test_app': {'name': 'test_app'}}
when(self.app_config).getAppData(
self.config_dir, self.config_file, 'test_app'
).thenReturn(app_data)
rp = RogerPromote(app_config=self.app_config)
rp._set_framework(self.config_file, 'test_app')
assert rp._framework.getName() == 'Marathon'
def test_image_name(self):
os.environ['ROGER_USER'] = "first.last"
os.environ['ROGER_USER_PASS_DEV'] = "password"
os.environ['ROGER_USER_PASS_STAGE'] = "password"
os.environ['ROGER_USER_PASS_PROD'] = "password"
framework = mock(Marathon)
when(framework).getName().thenReturn("Marathon")
when(framework).get_app_id(
"test_path/test_app.json",
"Marathon"
).thenReturn("app_id")
when(framework).get_image_name(
'first.last',
"password",
"dev",
"app_id",
self.config_dir,
self.config_file
).thenReturn("test_image")
rp = RogerPromote(framework=framework)
assert rp._image_name(
'dev',
self.config_file,
"test_path/test_app.json") == 'test_image'
def test_config_resolver(self):
framework = mock(Framework)
settings = mock(Settings)
app_config = mock(AppConfig)
config_dir = '/vagrant/config'
fake_team_config = tests.helper.fake_team_config()
when(settings).getConfigDir().thenReturn(config_dir)
when(app_config).getConfig(
config_dir, 'roger.json'
).thenReturn(fake_team_config)
rp = RogerPromote(settings=settings, app_config=app_config)
val = rp._config_resolver('template_path', 'test_app', 'roger.json')
assert val == 'framework_template_path'
def test_roger_push_script(self):
path = RogerPromote()._roger_push_script()
assert 'roger-mesos-tools/cli/roger_push.py' in path
| [
"cli.roger_promote.RogerPromote",
"mockito.mock",
"mockito.when"
]
| [((551, 565), 'mockito.mock', 'mock', (['Marathon'], {}), '(Marathon)\n', (555, 565), False, 'from mockito import mock, Mock, when\n'), ((590, 604), 'mockito.mock', 'mock', (['Settings'], {}), '(Settings)\n', (594, 604), False, 'from mockito import mock, Mock, when\n'), ((631, 646), 'mockito.mock', 'mock', (['AppConfig'], {}), '(AppConfig)\n', (635, 646), False, 'from mockito import mock, Mock, when\n'), ((717, 737), 'mockito.mock', 'mock', (['FrameworkUtils'], {}), '(FrameworkUtils)\n', (721, 737), False, 'from mockito import mock, Mock, when\n'), ((996, 1010), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {}), '()\n', (1008, 1010), False, 'from cli.roger_promote import RogerPromote\n'), ((1161, 1175), 'mockito.mock', 'mock', (['Settings'], {}), '(Settings)\n', (1165, 1175), False, 'from mockito import mock, Mock, when\n'), ((1296, 1336), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {'app_config': 'self.app_config'}), '(app_config=self.app_config)\n', (1308, 1336), False, 'from cli.roger_promote import RogerPromote\n'), ((1615, 1655), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {'app_config': 'self.app_config'}), '(app_config=self.app_config)\n', (1627, 1655), False, 'from cli.roger_promote import RogerPromote\n'), ((2036, 2050), 'mockito.mock', 'mock', (['Marathon'], {}), '(Marathon)\n', (2040, 2050), False, 'from mockito import mock, Mock, when\n'), ((2476, 2509), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {'framework': 'framework'}), '(framework=framework)\n', (2488, 2509), False, 'from cli.roger_promote import RogerPromote\n'), ((2702, 2717), 'mockito.mock', 'mock', (['Framework'], {}), '(Framework)\n', (2706, 2717), False, 'from mockito import mock, Mock, when\n'), ((2737, 2751), 'mockito.mock', 'mock', (['Settings'], {}), '(Settings)\n', (2741, 2751), False, 'from mockito import mock, Mock, when\n'), ((2773, 2788), 'mockito.mock', 'mock', (['AppConfig'], {}), '(AppConfig)\n', (2777, 2788), False, 'from mockito import mock, Mock, when\n'), ((3075, 3129), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {'settings': 'settings', 'app_config': 'app_config'}), '(settings=settings, app_config=app_config)\n', (3087, 3129), False, 'from cli.roger_promote import RogerPromote\n'), ((3309, 3323), 'cli.roger_promote.RogerPromote', 'RogerPromote', ([], {}), '()\n', (3321, 3323), False, 'from cli.roger_promote import RogerPromote\n'), ((1185, 1206), 'mockito.when', 'when', (['self.app_config'], {}), '(self.app_config)\n', (1189, 1206), False, 'from mockito import mock, Mock, when\n'), ((1478, 1499), 'mockito.when', 'when', (['self.app_config'], {}), '(self.app_config)\n', (1482, 1499), False, 'from mockito import mock, Mock, when\n'), ((2060, 2075), 'mockito.when', 'when', (['framework'], {}), '(framework)\n', (2064, 2075), False, 'from mockito import mock, Mock, when\n'), ((2117, 2132), 'mockito.when', 'when', (['framework'], {}), '(framework)\n', (2121, 2132), False, 'from mockito import mock, Mock, when\n'), ((2246, 2261), 'mockito.when', 'when', (['framework'], {}), '(framework)\n', (2250, 2261), False, 'from mockito import mock, Mock, when\n'), ((2896, 2910), 'mockito.when', 'when', (['settings'], {}), '(settings)\n', (2900, 2910), False, 'from mockito import mock, Mock, when\n'), ((2957, 2973), 'mockito.when', 'when', (['app_config'], {}), '(app_config)\n', (2961, 2973), False, 'from mockito import mock, Mock, when\n')] |
from planning_framework import path
import cv2 as cv
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Path Planning Visualisation")
parser.add_argument(
"-n",
"--n_heuristic",
default=2,
help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm",
)
args = parser.parse_args()
N_H = int(args.n_heuristic)
drawing = False # true if mouse is pressed
mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
sx, sy = 0, 0
dx, dy = 50, 50
# mouse callback function
def draw(event, x, y, flags, param):
global mode, sx, sy, dx, dy, drawing
if event == cv.EVENT_LBUTTONDOWN:
drawing = True
elif event == cv.EVENT_MOUSEMOVE:
if drawing == True:
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif event == cv.EVENT_LBUTTONUP:
drawing = False
if mode == "obs":
cv.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)
elif mode == "src":
cv.circle(img, (x, y), 5, (255, 0, 0), -1)
sx, sy = x, y
elif mode == "dst":
cv.circle(img, (x, y), 5, (0, 255, 0), -1)
dx, dy = x, y
img = np.zeros((512, 512, 3), np.uint8)
inv_im = np.ones(img.shape) * 255
cv.namedWindow("Draw the Occupancy Map")
cv.setMouseCallback("Draw the Occupancy Map", draw)
while 1:
cv.imshow("Draw the Occupancy Map", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
mode = "src"
img_ = img
cv.namedWindow("Set the Starting Point")
cv.setMouseCallback("Set the Starting Point", draw)
while 1:
cv.imshow("Set the Starting Point", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
# cv.waitKey(20)
cv.destroyAllWindows()
mode = "dst"
end = "Set the End Point"
cv.namedWindow(end)
cv.setMouseCallback(end, draw)
while cv.getWindowProperty(end, 0) >= 0:
cv.imshow(end, inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA)
inv_img = np.ones(img.shape)
np.savetxt("map.txt", np.array(img[:, :, 0]))
plt.imshow(inv_img - img)
start = np.array([sx, sy]) * 50 // 512
end = np.array([dx, dy]) * 50 // 512
path(start, end, N_H)
| [
"cv2.setMouseCallback",
"matplotlib.pyplot.imshow",
"cv2.rectangle",
"numpy.ones",
"argparse.ArgumentParser",
"planning_framework.path",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.getWindowProperty",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow"
]
| [((130, 196), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Path Planning Visualisation"""'}), "(description='Path Planning Visualisation')\n", (153, 196), False, 'import argparse\n'), ((1313, 1346), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (1321, 1346), True, 'import numpy as np\n'), ((1382, 1422), 'cv2.namedWindow', 'cv.namedWindow', (['"""Draw the Occupancy Map"""'], {}), "('Draw the Occupancy Map')\n", (1396, 1422), True, 'import cv2 as cv\n'), ((1423, 1474), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""Draw the Occupancy Map"""', 'draw'], {}), "('Draw the Occupancy Map', draw)\n", (1442, 1474), True, 'import cv2 as cv\n'), ((1588, 1610), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1608, 1610), True, 'import cv2 as cv\n'), ((1636, 1676), 'cv2.namedWindow', 'cv.namedWindow', (['"""Set the Starting Point"""'], {}), "('Set the Starting Point')\n", (1650, 1676), True, 'import cv2 as cv\n'), ((1677, 1728), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""Set the Starting Point"""', 'draw'], {}), "('Set the Starting Point', draw)\n", (1696, 1728), True, 'import cv2 as cv\n'), ((1863, 1885), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1883, 1885), True, 'import cv2 as cv\n'), ((1926, 1945), 'cv2.namedWindow', 'cv.namedWindow', (['end'], {}), '(end)\n', (1940, 1945), True, 'import cv2 as cv\n'), ((1946, 1976), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['end', 'draw'], {}), '(end, draw)\n', (1965, 1976), True, 'import cv2 as cv\n'), ((2101, 2123), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2121, 2123), True, 'import cv2 as cv\n'), ((2131, 2185), 'cv2.resize', 'cv.resize', (['img_', '(50, 50)'], {'interpolation': 'cv.INTER_AREA'}), '(img_, (50, 50), interpolation=cv.INTER_AREA)\n', (2140, 2185), True, 'import cv2 as cv\n'), ((2196, 2214), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (2203, 2214), True, 'import numpy as np\n'), ((2261, 2286), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(inv_img - img)'], {}), '(inv_img - img)\n', (2271, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2386), 'planning_framework.path', 'path', (['start', 'end', 'N_H'], {}), '(start, end, N_H)\n', (2369, 2386), False, 'from planning_framework import path\n'), ((1356, 1374), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (1363, 1374), True, 'import numpy as np\n'), ((1488, 1537), 'cv2.imshow', 'cv.imshow', (['"""Draw the Occupancy Map"""', '(inv_im - img)'], {}), "('Draw the Occupancy Map', inv_im - img)\n", (1497, 1537), True, 'import cv2 as cv\n'), ((1742, 1791), 'cv2.imshow', 'cv.imshow', (['"""Set the Starting Point"""', '(inv_im - img)'], {}), "('Set the Starting Point', inv_im - img)\n", (1751, 1791), True, 'import cv2 as cv\n'), ((1983, 2011), 'cv2.getWindowProperty', 'cv.getWindowProperty', (['end', '(0)'], {}), '(end, 0)\n', (2003, 2011), True, 'import cv2 as cv\n'), ((2022, 2050), 'cv2.imshow', 'cv.imshow', (['end', '(inv_im - img)'], {}), '(end, inv_im - img)\n', (2031, 2050), True, 'import cv2 as cv\n'), ((2237, 2259), 'numpy.array', 'np.array', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (2245, 2259), True, 'import numpy as np\n'), ((2296, 2314), 'numpy.array', 'np.array', (['[sx, sy]'], {}), '([sx, sy])\n', (2304, 2314), True, 'import numpy as np\n'), ((2333, 2351), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (2341, 2351), True, 'import numpy as np\n'), ((1545, 1559), 'cv2.waitKey', 'cv.waitKey', (['(20)'], {}), '(20)\n', (1555, 1559), True, 'import cv2 as cv\n'), ((1799, 1813), 'cv2.waitKey', 'cv.waitKey', (['(20)'], {}), '(20)\n', (1809, 1813), True, 'import cv2 as cv\n'), ((2058, 2072), 'cv2.waitKey', 'cv.waitKey', (['(20)'], {}), '(20)\n', (2068, 2072), True, 'import cv2 as cv\n'), ((844, 914), 'cv2.rectangle', 'cv.rectangle', (['img', '(x - 5, y - 5)', '(x + 5, y + 5)', '(255, 255, 255)', '(-1)'], {}), '(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)\n', (856, 914), True, 'import cv2 as cv\n'), ((1016, 1086), 'cv2.rectangle', 'cv.rectangle', (['img', '(x - 5, y - 5)', '(x + 5, y + 5)', '(255, 255, 255)', '(-1)'], {}), '(img, (x - 5, y - 5), (x + 5, y + 5), (255, 255, 255), -1)\n', (1028, 1086), True, 'import cv2 as cv\n'), ((1127, 1169), 'cv2.circle', 'cv.circle', (['img', '(x, y)', '(5)', '(255, 0, 0)', '(-1)'], {}), '(img, (x, y), 5, (255, 0, 0), -1)\n', (1136, 1169), True, 'import cv2 as cv\n'), ((1236, 1278), 'cv2.circle', 'cv.circle', (['img', '(x, y)', '(5)', '(0, 255, 0)', '(-1)'], {}), '(img, (x, y), 5, (0, 255, 0), -1)\n', (1245, 1278), True, 'import cv2 as cv\n')] |
import re
from pkg_resources import parse_requirements
import pathlib
from setuptools import find_packages, setup
README_FILE = 'README.md'
REQUIREMENTS_FILE = 'requirements.txt'
VERSION_FILE = 'mtg/_version.py'
VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\''
r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M)
if r is None:
raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.')
version = r.group(1)
long_description = open(README_FILE, encoding='utf-8').read()
install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))]
setup(
name='mtg',
version=version,
description='mtg is a collection of data science and ml projects for Magic:the Gathering',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/RyanSaxe/mtg',
packages=find_packages(),
install_requires=install_requires,
)
| [
"setuptools.find_packages"
]
| [((929, 944), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (942, 944), False, 'from setuptools import find_packages, setup\n')] |
"""
功能:模拟掷骰子
版本:1.0
"""
import random
def roll_dice():
roll = random.randint(1, 6)
return roll
def main():
total_times = 100000
result_list = [0] * 6
for i in range(total_times):
roll = roll_dice()
result_list[roll-1] += 1
for i, x in enumerate(result_list):
print('点数{}的次数:{},频率:{}'.format(i+1, x, x/total_times))
print(result_list)
if __name__ == '__main__':
main()
| [
"random.randint"
]
| [((77, 97), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (91, 97), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/30 下午3:02
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : messages.py
# @Software: PyCharm
import json
from ..foundation import *
from json import JSONDecodeError
__author__ = 'blackmatrix'
__all__ = ['async_send_msg', 'get_msg_send_result', 'get_msg_send_progress']
@dingtalk_resp
def async_send_msg(access_token, msgtype, agent_id, msgcontent, userid_list=None, dept_id_list=None, to_all_user=False):
try:
msgcontent = json.dumps(msgcontent)
except JSONDecodeError:
# 如果传入的msgcontent不能转换为json格式,依旧传给钉钉,由钉钉处理
pass
if not isinstance(userid_list, str):
userid_list = ','.join(userid_list)
args = locals().copy()
payload = {}
# 请求参数整理
for k, v in args.items():
if k in ('msgtype', 'agent_id', 'msgcontent', 'userid_list', 'dept_id_list'):
if v is not None:
payload.update({k: v})
resp = call_dingtalk_webapi(access_token, 'dingtalk.corp.message.corpconversation.asyncsend', **payload)
return resp
@dingtalk_resp
def get_msg_send_result(access_token, agent_id, task_id):
url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendresult')
payload = {'task_id': task_id, 'agent_id': agent_id}
return requests.get(url, params=payload)
@dingtalk_resp
def get_msg_send_progress(access_token, agent_id, task_id):
url = get_request_url(access_token, 'dingtalk.corp.message.corpconversation.getsendprogress')
payload = {'task_id': task_id, 'agent_id': agent_id}
return requests.get(url, params=payload)
if __name__ == '__main__':
pass
| [
"json.dumps"
]
| [((573, 595), 'json.dumps', 'json.dumps', (['msgcontent'], {}), '(msgcontent)\n', (583, 595), False, 'import json\n')] |
# -*- coding: utf-8 -*-
""" p2p-streams (c) 2014 enen92 fightnight
This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon!
Functions:
xml_lists_menu() -> main menu for the xml list category
addlista() -> add a new list. It'll ask for local or remote and processes the given input
remove_list(name) -> Remove a list
get_groups(url) -> First regex function to parse a given list. Sopcast type list
get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists
getData(url,fanart) -> Get the item data such as iconimage, fanart, etc
getChannelItems(name,url,fanart) -> Function to grab the channel items
getItems(items,fanart) -> Function to grab the items from the xml
removeNonAscii(s) -> Function to remove non-ascii characters from the list
getSoup(url) -> uses beautifulsoup to parse a remote xml
addon_log(string) -> Simple log/print function
getRegexParsed(regexs, url) -> parse the regex expression
list_type(url) -> Checks if the list is xml or m3u
parse_m3u(url) -> Parses a m3u type list
"""
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
from peertopeerutils.pluginxbmc import *
from peertopeerutils.webutils import *
from peertopeerutils.directoryhandle import *
from peertopeerutils.iofile import *
"""
Main Menu
"""
def xml_lists_menu():
if settings.getSetting('sopcast-oficial') == "true":
addDir(translate(40116),"http://sopcast.org/chlist.xml",101,addonpath + art + 'xml_list_sopcast.png',2,True)
try:
if os.path.exists(os.path.join(pastaperfil,"Lists")):
dirs, files = xbmcvfs.listdir(os.path.join(pastaperfil,"Lists"))
for file in files:
f = open(os.path.join(pastaperfil,"Lists",file), "r")
string = f.read()
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))):addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True,fan_art=os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg')))
else: addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True)
except: pass
addDir(translate(40121),MainURL,107,addonpath + art + 'plus-menu.png',2,False)
#xbmc.executebuiltin("Container.SetViewMode(51)")
"""
Add a new list function
"""
def addlista():
opcao= xbmcgui.Dialog().yesno(translate(40000), translate(40123),"","",translate(40124),translate(40125))
if opcao:
dialog = xbmcgui.Dialog()
lista_xml = dialog.browse(int(1), translate(40186), 'myprograms','.xml|.m3u')
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),lista_xml)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
keyb = xbmc.Keyboard("", translate(40127))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
if search=='': sys.exit(0)
if "dropbox" in search and not "?dl=1" in search: search = search + '?dl=1'
if "xml" not in search.split(".")[-1] and "m3u" not in search.split(".")[-1]: mensagemok(translate(40000),translate(40128)); sys.exit(0)
else:
try:
code = get_page_source(search)
except:
mensagemok(translate(40000),translate(40128))
sys.exit(0)
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if os.path.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),search)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
"""
Remove a List
"""
def remove_list(name):
xbmcvfs.delete(name)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(40000), translate(40150), 1,addonpath+"/icon.png"))
xbmc.executebuiltin("Container.Refresh")
"""
Parsing functions
"""
def list_type(url):
ltype = url.split('.')[-1]
if 'xml' in ltype: get_groups(url)
elif 'm3u' in ltype: parse_m3u(url)
else: pass
def parse_m3u(url):
if "http" in url: content = get_page_source(url)
else: content = readfile(url)
match = re.compile('#EXTINF:.+?,(.*?)\n(.*?)(?:\r|\n)').findall(content)
for channel_name,stream_url in match:
if 'plugin://' in stream_url:
stream_url = 'XBMC.RunPlugin('+stream_url+')'
addDir(channel_name,stream_url,106,'',1,False)
elif 'sop://' in stream_url:
addDir(channel_name,stream_url,2,'',1,False)
elif ('acestream://' in stream_url) or ('.acelive' in stream_url) or ('.torrent' in stream_url):
addDir(channel_name,stream_url,1,'',1,False)
else: addLink(channel_name,stream_url,'')
def get_groups(url):
from xml.etree import ElementTree
try:
print("Sopcast xml-type list detected")
if "http" in url:
source = get_page_source(url)
save(os.path.join(pastaperfil,"working.xml"),source)
workingxml = os.path.join(pastaperfil,"working.xml")
else:
workingxml = url
groups = ElementTree.parse(workingxml).findall('.//group')
unname_group_index = 1
LANGUAGE = "en"
for group in groups:
if group.attrib[LANGUAGE] == "":
group.attrib[LANGUAGE] = str(unname_group_index)
unname_group_index = unname_group_index + 1
if re.sub('c','e',LANGUAGE) == LANGUAGE:
OTHER_LANG = re.sub('e','c',LANGUAGE)
else:
OTHER_LANG = re.sub('c','e',LANGUAGE)
if LANGUAGE == "cn":
try:
if len(group.attrib[OTHER_LANG]) > 0:
group.attrib[LANGUAGE] = group.attrib[OTHER_LANG]
unname_group_index = unname_group_index - 1
except:
pass
if (group.find('.//channel')==None): continue
group_name=group.attrib[LANGUAGE]
try:
addDir_livestreams_common(group_name,url,102,addonpath + art + 'xml_list_sopcast.png',True)
except: pass
#xbmc.executebuiltin("Container.SetViewMode(51)")
except:
print("Other type of xml list")
getData(url,"")
def get_channels(name,url):
from xml.etree import ElementTree
if url.startswith('http://'):
source = get_page_source(url)
else:
source = readfile(url)
save(os.path.join(pastaperfil,"working.xml"),source)
chlist_tree = ElementTree.parse(os.path.join(pastaperfil,"working.xml"))
LANGUAGE = "en"
groups = ElementTree.parse(os.path.join(pastaperfil,"working.xml")).findall('.//group')
for group in groups:
if group.attrib[LANGUAGE].encode('utf-8') == name:
channels = group.findall('.//channel')
for channel in channels:
try:
try:
title = channel.find('.//name').attrib['en'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').attrib['cn'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').text
except: title = ''
tipo = channel.find('.//stream_type').text
sop_address = channel.find('.//item').text
if not tipo: tipo = "N/A"
if not title: title = "N/A"
thumbnail = ""
try:
thumbnail = channel.find('.//thumbnail').text
except: pass
if sop_address:
if thumbnail == "": thumbnail = addonpath + art + 'sopcast_link.png'
try: addDir_livestreams_common('[B][COLOR orange]' + title + ' [/B][/COLOR](' + tipo +')',sop_address,2,thumbnail,False)
except:pass
else: pass
except: pass
else: pass
def getData(url,fanart):
soup = getSoup(url)
if len(soup('channels')) > 0:
channels = soup('channel')
for channel in channels:
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),103,thumbnail,fanArt,desc,genre,date,credits,True)
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getItems(items,fanart):
total = len(items)
addon_log('Total Items: %s' %total)
for item in items:
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
for i in item('link'):
if not i.string == None:
url.append(i.string)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
regexs = {}
for i in item('regex'):
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['expre'] = i('expres')[0].string
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['refer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
regexs = urllib.quote(repr(regexs))
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
try:
if "RunPlugin" in url[0]:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],106,thumbnail,fanArt,desc,genre,"credits",date)
except:
match = re.compile("&name=(.+?)\)").findall(url[0].replace(";",""))
if match:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),removeNonAscii(url[0]),106,thumbnail,fanArt,desc,genre,credits,date)
except:
try:
addDir_livestreams(removeNonAscii(name.encode('utf-8', 'ignore')),removeNonAscii(url[0].replace(";","")),106,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
if ('acestream://' in url[0]) or ('.acelive' in url[0]) or ('.torrent' in url[0]):
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],1,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
elif 'sop://' in url[0]:
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],2,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def getSoup(url):
if url.startswith('http://'):
data = makeRequest(url)
else:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
else:
addon_log("Soup Data not found!")
return
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def addon_log(string):
print(string)
def getRegexParsed(regexs, url):
regexs = eval(urllib.unquote(regexs))
cachedPages = {}
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
for k in doRegexs:
if k in regexs:
m = regexs[k]
if m['page'] in cachedPages:
link = cachedPages[m['page']]
else:
req = urllib2.Request(m['page'])
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'refer' in m:
req.add_header('Referer', m['refer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
response = urllib2.urlopen(req)
link = response.read()
response.close()
cachedPages[m['page']] = link
reg = re.compile(m['expre']).search(link)
url = url.replace("$doregex[" + k + "]", reg.group(1).strip())
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
| [
"urllib2.urlopen",
"xml.etree.ElementTree.parse",
"re.compile",
"urllib.unquote",
"xbmcvfs.exists",
"os.path.join",
"urllib.quote",
"BeautifulSoup.BeautifulSOAP",
"xbmcvfs.delete",
"xbmcgui.ListItem",
"urllib2.Request",
"xbmcgui.Dialog",
"sys.exit",
"re.sub",
"xbmc.executebuiltin"
]
| [((4891, 4911), 'xbmcvfs.delete', 'xbmcvfs.delete', (['name'], {}), '(name)\n', (4905, 4911), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((5033, 5073), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Container.Refresh"""'], {}), "('Container.Refresh')\n", (5052, 5073), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((19032, 19100), 'BeautifulSoup.BeautifulSOAP', 'BeautifulSOAP', (['data'], {'convertEntities': 'BeautifulStoneSoup.XML_ENTITIES'}), '(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)\n', (19045, 19100), False, 'from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP\n'), ((20138, 20164), 'xbmcgui.ListItem', 'xbmcgui.ListItem', ([], {'path': 'url'}), '(path=url)\n', (20154, 20164), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((2813, 2829), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (2827, 2829), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((7719, 7759), 'os.path.join', 'os.path.join', (['pastaperfil', '"""working.xml"""'], {}), "(pastaperfil, 'working.xml')\n", (7731, 7759), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((7803, 7843), 'os.path.join', 'os.path.join', (['pastaperfil', '"""working.xml"""'], {}), "(pastaperfil, 'working.xml')\n", (7815, 7843), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((18417, 18436), 'xbmcvfs.exists', 'xbmcvfs.exists', (['url'], {}), '(url)\n', (18431, 18436), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((19195, 19217), 'urllib.unquote', 'urllib.unquote', (['regexs'], {}), '(regexs)\n', (19209, 19217), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((1769, 1803), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (1781, 1803), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((2683, 2699), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (2697, 2699), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3141, 3165), 'urllib.quote', 'urllib.quote', (['searchname'], {}), '(searchname)\n', (3153, 3165), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3488, 3533), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""XBMC.Container.Refresh"""'], {}), "('XBMC.Container.Refresh')\n", (3507, 3533), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((5368, 5415), 're.compile', 're.compile', (["'#EXTINF:.+?,(.*?)\\n(.*?)(?:\\r|\\n)'"], {}), "('#EXTINF:.+?,(.*?)\\n(.*?)(?:\\r|\\n)')\n", (5378, 5415), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6212, 6252), 'os.path.join', 'os.path.join', (['pastaperfil', '"""working.xml"""'], {}), "(pastaperfil, 'working.xml')\n", (6224, 6252), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((19255, 19294), 're.compile', 're.compile', (['"""\\\\$doregex\\\\[([^\\\\]]*)\\\\]"""'], {}), "('\\\\$doregex\\\\[([^\\\\]]*)\\\\]')\n", (19265, 19294), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((1847, 1881), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (1859, 1881), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3110, 3121), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3118, 3121), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3196, 3230), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (3208, 3230), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3364, 3408), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""', 'txt_name'], {}), "(pastaperfil, 'Lists', txt_name)\n", (3376, 3408), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3714, 3725), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3722, 3725), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3951, 3962), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3959, 3962), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4424, 4448), 'urllib.quote', 'urllib.quote', (['searchname'], {}), '(searchname)\n', (4436, 4448), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4792, 4837), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""XBMC.Container.Refresh"""'], {}), "('XBMC.Container.Refresh')\n", (4811, 4837), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6139, 6179), 'os.path.join', 'os.path.join', (['pastaperfil', '"""working.xml"""'], {}), "(pastaperfil, 'working.xml')\n", (6151, 6179), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6312, 6341), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['workingxml'], {}), '(workingxml)\n', (6329, 6341), False, 'from xml.etree import ElementTree\n'), ((7895, 7935), 'os.path.join', 'os.path.join', (['pastaperfil', '"""working.xml"""'], {}), "(pastaperfil, 'working.xml')\n", (7907, 7935), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((19504, 19530), 'urllib2.Request', 'urllib2.Request', (["m['page']"], {}), "(m['page'])\n", (19519, 19530), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((19859, 19879), 'urllib2.urlopen', 'urllib2.urlopen', (['req'], {}), '(req)\n', (19874, 19879), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((1938, 1978), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""', 'file'], {}), "(pastaperfil, 'Lists', file)\n", (1950, 1978), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((3269, 3303), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (3281, 3303), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4389, 4400), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4397, 4400), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4483, 4517), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (4495, 4517), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4663, 4707), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""', 'txt_name'], {}), "(pastaperfil, 'Lists', txt_name)\n", (4675, 4707), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6635, 6661), 're.sub', 're.sub', (['"""c"""', '"""e"""', 'LANGUAGE'], {}), "('c', 'e', LANGUAGE)\n", (6641, 6661), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6706, 6732), 're.sub', 're.sub', (['"""e"""', '"""c"""', 'LANGUAGE'], {}), "('e', 'c', LANGUAGE)\n", (6712, 6732), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((6786, 6812), 're.sub', 're.sub', (['"""c"""', '"""e"""', 'LANGUAGE'], {}), "('c', 'e', LANGUAGE)\n", (6792, 6812), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((18548, 18595), 'os.path.join', 'os.path.join', (['profile', '"""temp"""', '"""sorce_temp.txt"""'], {}), "(profile, 'temp', 'sorce_temp.txt')\n", (18560, 18595), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((20016, 20038), 're.compile', 're.compile', (["m['expre']"], {}), "(m['expre'])\n", (20026, 20038), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4163, 4174), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4171, 4174), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((4560, 4594), 'os.path.join', 'os.path.join', (['pastaperfil', '"""Lists"""'], {}), "(pastaperfil, 'Lists')\n", (4572, 4594), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((18750, 18797), 'os.path.join', 'os.path.join', (['profile', '"""temp"""', '"""sorce_temp.txt"""'], {}), "(profile, 'temp', 'sorce_temp.txt')\n", (18762, 18797), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((16314, 16342), 're.compile', 're.compile', (['"""&name=(.+?)\\\\)"""'], {}), "('&name=(.+?)\\\\)')\n", (16324, 16342), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n'), ((18654, 18701), 'os.path.join', 'os.path.join', (['profile', '"""temp"""', '"""sorce_temp.txt"""'], {}), "(profile, 'temp', 'sorce_temp.txt')\n", (18666, 18701), False, 'import urllib, urllib2, re, xbmcplugin, xbmcgui, xbmc, xbmcaddon, HTMLParser, time, datetime, os, xbmcvfs, sys\n')] |
#!/usr/bin/env python
# ================================================================
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ================================================================
import sys
import json
import TE
TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN")
postParams = {
"descriptor_id": "4036655176350945", # ID of the descriptor to be updated
"reactions": "INGESTED,IN_REVIEW",
}
showURLs = False
dryRun = False
validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor(
postParams, showURLs, dryRun
)
if validationErrorMessage != None:
sys.stderr.write(validationErrorMessage + "\n")
sys.exit(1)
if serverSideError != None:
sys.stderr.write(str(serverSideError) + "\n")
sys.stderr.write(json.dumps(responseBody) + "\n")
sys.exit(1)
print(json.dumps(responseBody))
| [
"json.dumps",
"TE.Net.updateThreatDescriptor",
"sys.stderr.write",
"sys.exit",
"TE.Net.setAppTokenFromEnvName"
]
| [((263, 311), 'TE.Net.setAppTokenFromEnvName', 'TE.Net.setAppTokenFromEnvName', (['"""TX_ACCESS_TOKEN"""'], {}), "('TX_ACCESS_TOKEN')\n", (292, 311), False, 'import TE\n'), ((537, 596), 'TE.Net.updateThreatDescriptor', 'TE.Net.updateThreatDescriptor', (['postParams', 'showURLs', 'dryRun'], {}), '(postParams, showURLs, dryRun)\n', (566, 596), False, 'import TE\n'), ((643, 690), 'sys.stderr.write', 'sys.stderr.write', (["(validationErrorMessage + '\\n')"], {}), "(validationErrorMessage + '\\n')\n", (659, 690), False, 'import sys\n'), ((695, 706), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (703, 706), False, 'import sys\n'), ((844, 855), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (852, 855), False, 'import sys\n'), ((863, 887), 'json.dumps', 'json.dumps', (['responseBody'], {}), '(responseBody)\n', (873, 887), False, 'import json\n'), ((807, 831), 'json.dumps', 'json.dumps', (['responseBody'], {}), '(responseBody)\n', (817, 831), False, 'import json\n')] |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.api.shelf_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mock
from protorpc import message_types
from google.appengine.api import search
import endpoints
from loaner.web_app.backend.api import root_api # pylint: disable=unused-import
from loaner.web_app.backend.api import shelf_api
from loaner.web_app.backend.api.messages import shared_messages
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import
from loaner.web_app.backend.testing import loanertest
class ShelfApiTest(parameterized.TestCase, loanertest.EndpointsTestCase):
"""Test for the Shelf API."""
def setUp(self):
super(ShelfApiTest, self).setUp()
self.patcher_directory = mock.patch(
'__main__.device_model.directory.DirectoryApiClient')
self.mock_directoryclass = self.patcher_directory.start()
self.addCleanup(self.patcher_directory.stop)
self.service = shelf_api.ShelfApi()
self.login_admin_endpoints_user()
self.patcher_xsrf = mock.patch(
'__main__.shelf_api.root_api.Service.check_xsrf_token')
self.shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='NYC', capacity=10,
friendly_name='GnG', latitude=40.6892534, longitude=-74.0466891,
altitude=1.0)
shelf1 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='MTV', capacity=20)
shelf2 = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SAO', capacity=10)
self.disabled_shelf = shelf_model.Shelf.enroll(
user_email=loanertest.USER_EMAIL, location='SVL', capacity=10,
friendly_name='Bay')
self.disabled_shelf.disable(loanertest.USER_EMAIL)
self.shelf_locations = [
self.shelf.location, shelf1.location, shelf2.location,
self.disabled_shelf.location]
self.device1_key = device_model.Device(
serial_number='12345',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_1',
damaged=False,
).put()
self.device2_key = device_model.Device(
serial_number='54321',
enrolled=True,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_2',
damaged=False,
).put()
self.device3_key = device_model.Device(
serial_number='67890',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_3',
damaged=False,
).put()
self.device4_key = device_model.Device(
serial_number='ABC123',
enrolled=True,
shelf=self.shelf.key,
device_model='HP Chromebook 13 G1',
current_ou='/',
chrome_device_id='unique_id_4',
damaged=False,
).put()
self.device_identifiers = [
self.device1_key.get().serial_number,
self.device2_key.get().serial_number,
self.device3_key.get().serial_number]
def tearDown(self):
super(ShelfApiTest, self).tearDown()
self.service = None
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_model.Shelf.enroll')
def test_enroll(self, mock_enroll, mock_xsrf_token):
"""Test Enroll with mock methods."""
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=100, friendly_name='test', latitude=12.5,
longitude=12.5, altitude=2.0, responsible_for_audit='precise',
audit_interval_override=33, audit_notification_enabled=True)
response = self.service.enroll(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertIsInstance(response, message_types.VoidMessage)
def test_enroll_bad_request(self):
request = shelf_messages.EnrollShelfRequest(capacity=10)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
'Entity has uninitialized properties'):
self.service.enroll(request)
request = shelf_messages.EnrollShelfRequest(
location='nyc', capacity=10, latitude=12.5)
with self.assertRaisesRegexp(
shelf_api.endpoints.BadRequestException,
shelf_model._LAT_LONG_MSG):
self.service.enroll(request)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_get_by_location(self, mock_xsrf_token):
request = shelf_messages.ShelfRequest(location='NYC')
response = self.service.get(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, response.location)
self.assertEqual(self.shelf.friendly_name, response.friendly_name)
def test_disable_by_location(self):
request = shelf_messages.ShelfRequest(location='NYC')
self.assertTrue(self.shelf.enabled)
response = self.service.disable(request)
self.assertFalse(self.shelf.enabled)
self.assertIsInstance(response, message_types.VoidMessage)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_update_using_location(self, mock_xsrf_token):
request = shelf_messages.UpdateShelfRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
location='NYC-9th')
response = self.service.update(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(self.shelf.location, 'NYC-9th')
shelf = shelf_model.Shelf.get(friendly_name='GnG')
self.assertEqual(shelf.location, 'NYC-9th')
self.assertIsInstance(response, message_types.VoidMessage)
@parameterized.parameters(
(shelf_messages.Shelf(capacity=10), 2,),
(shelf_messages.Shelf(enabled=False), 1,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:True capacity:10')), 2,),
(shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='enabled:False')), 1,))
@mock.patch('__main__.root_api.Service.check_xsrf_token')
def test_list_shelves(self, request, response_length, mock_xsrf_token):
response = self.service.list_shelves(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
self.assertEqual(response_length, len(response.shelves))
def test_list_shelves_invalid_page_size(self):
with self.assertRaises(endpoints.BadRequestException):
request = shelf_messages.Shelf(page_size=0)
self.service.list_shelves(request)
def test_list_shelves_with_search_constraints(self):
expressions = shared_messages.SearchExpression(expression='location')
expected_response = shelf_messages.ListShelfResponse(
shelves=[shelf_messages.Shelf(
location=self.shelf.location,
shelf_request=shelf_messages.ShelfRequest(
location=self.shelf.location,
urlsafe_key=self.shelf.key.urlsafe()))],
total_results=1, total_pages=1)
request = shelf_messages.Shelf(
query=shared_messages.SearchRequest(
query_string='location:NYC',
expressions=[expressions],
returned_fields=['location']))
response = self.service.list_shelves(request)
self.assertEqual(response, expected_response)
def test_list_shelves_with_offset(self):
previouse_shelf_locations = []
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=1)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=2)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
# Get next page results and make sure it's not the same as last 2.
request = shelf_messages.Shelf(enabled=True, page_size=1, page_number=3)
response = self.service.list_shelves(request)
self.assertEqual(len(response.shelves), 1)
self.assertNotIn(response.shelves[0], previouse_shelf_locations)
previouse_shelf_locations.append(response.shelves[0].location)
@mock.patch('__main__.root_api.Service.check_xsrf_token')
@mock.patch('__main__.shelf_api.logging.info')
def test_audit_using_shelf_location(self, mock_logging, mock_xsrf_token):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=self.device_identifiers)
response = self.service.audit(request)
self.assertEqual(mock_xsrf_token.call_count, 1)
mock_logging.assert_called()
for identifier in self.device_identifiers:
datastore_device = device_model.Device.get(serial_number=identifier)
self.assertEqual(datastore_device.shelf.get().location, 'NYC')
self.assertFalse(self.shelf.audit_requested)
self.assertEqual(self.shelf.last_audit_by, loanertest.SUPER_ADMIN_EMAIL)
self.assertIsInstance(response, message_types.VoidMessage)
def test_audit_invalid_device(self):
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location='NYC'),
device_identifiers=['Invalid'])
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._DEVICE_DOES_NOT_EXIST_MSG % 'Invalid'):
self.service.audit(request)
@mock.patch.object(device_model.Device, 'search')
@mock.patch.object(shelf_api, 'get_shelf', autospec=True)
def test_audit_remove_devices(
self, mock_get_shelf, mock_model_device_search):
shelf = self.device2_key.get()
shelf.shelf = self.shelf.key
shelf.put()
mock_model_device_search.return_value = (
search.SearchResults(
results=[
search.ScoredDocument(
doc_id=self.device2_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device3_key.urlsafe()),
search.ScoredDocument(
doc_id=self.device4_key.urlsafe())],
number_found=3))
mock_get_shelf.return_value = self.shelf
request = shelf_messages.ShelfAuditRequest(
shelf_request=shelf_messages.ShelfRequest(location=self.shelf.location),
device_identifiers=[self.device3_key.get().serial_number])
self.service.audit(request)
self.assertEqual(self.device3_key.get().shelf, self.shelf.key)
self.assertIsNone(self.device2_key.get().shelf)
self.assertIsNone(self.device4_key.get().shelf)
def test_get_shelf_urlsafe_key(self):
"""Test getting a shelf using the urlsafe key."""
request = shelf_messages.ShelfRequest(urlsafe_key=self.shelf.key.urlsafe())
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location(self):
"""Test getting a shelf using the location."""
request = shelf_messages.ShelfRequest(location=self.shelf.location)
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location_error(self):
"""Test getting a shelf with an invalid location."""
request = shelf_messages.ShelfRequest(location='Not_Valid')
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._SHELF_DOES_NOT_EXIST_MSG % request.location):
shelf_api.get_shelf(request)
if __name__ == '__main__':
loanertest.main()
| [
"loaner.web_app.backend.models.shelf_model.Shelf.enroll",
"mock.patch",
"loaner.web_app.backend.models.device_model.Device.get",
"loaner.web_app.backend.api.messages.shared_messages.SearchExpression",
"loaner.web_app.backend.testing.loanertest.main",
"loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest",
"loaner.web_app.backend.models.device_model.Device",
"loaner.web_app.backend.api.messages.shelf_messages.Shelf",
"mock.patch.object",
"loaner.web_app.backend.api.shelf_api.ShelfApi",
"loaner.web_app.backend.api.shelf_api.get_shelf",
"loaner.web_app.backend.api.messages.shelf_messages.EnrollShelfRequest",
"loaner.web_app.backend.api.messages.shared_messages.SearchRequest",
"loaner.web_app.backend.models.shelf_model.Shelf.get"
]
| [((3959, 4015), 'mock.patch', 'mock.patch', (['"""__main__.root_api.Service.check_xsrf_token"""'], {}), "('__main__.root_api.Service.check_xsrf_token')\n", (3969, 4015), False, 'import mock\n'), ((4019, 4066), 'mock.patch', 'mock.patch', (['"""__main__.shelf_model.Shelf.enroll"""'], {}), "('__main__.shelf_model.Shelf.enroll')\n", (4029, 4066), False, 'import mock\n'), ((5110, 5166), 'mock.patch', 'mock.patch', (['"""__main__.root_api.Service.check_xsrf_token"""'], {}), "('__main__.root_api.Service.check_xsrf_token')\n", (5120, 5166), False, 'import mock\n'), ((5791, 5847), 'mock.patch', 'mock.patch', (['"""__main__.root_api.Service.check_xsrf_token"""'], {}), "('__main__.root_api.Service.check_xsrf_token')\n", (5801, 5847), False, 'import mock\n'), ((6758, 6814), 'mock.patch', 'mock.patch', (['"""__main__.root_api.Service.check_xsrf_token"""'], {}), "('__main__.root_api.Service.check_xsrf_token')\n", (6768, 6814), False, 'import mock\n'), ((9109, 9165), 'mock.patch', 'mock.patch', (['"""__main__.root_api.Service.check_xsrf_token"""'], {}), "('__main__.root_api.Service.check_xsrf_token')\n", (9119, 9165), False, 'import mock\n'), ((9169, 9214), 'mock.patch', 'mock.patch', (['"""__main__.shelf_api.logging.info"""'], {}), "('__main__.shelf_api.logging.info')\n", (9179, 9214), False, 'import mock\n'), ((10329, 10377), 'mock.patch.object', 'mock.patch.object', (['device_model.Device', '"""search"""'], {}), "(device_model.Device, 'search')\n", (10346, 10377), False, 'import mock\n'), ((10381, 10437), 'mock.patch.object', 'mock.patch.object', (['shelf_api', '"""get_shelf"""'], {'autospec': '(True)'}), "(shelf_api, 'get_shelf', autospec=True)\n", (10398, 10437), False, 'import mock\n'), ((12344, 12361), 'loaner.web_app.backend.testing.loanertest.main', 'loanertest.main', ([], {}), '()\n', (12359, 12361), False, 'from loaner.web_app.backend.testing import loanertest\n'), ((1542, 1606), 'mock.patch', 'mock.patch', (['"""__main__.device_model.directory.DirectoryApiClient"""'], {}), "('__main__.device_model.directory.DirectoryApiClient')\n", (1552, 1606), False, 'import mock\n'), ((1746, 1766), 'loaner.web_app.backend.api.shelf_api.ShelfApi', 'shelf_api.ShelfApi', ([], {}), '()\n', (1764, 1766), False, 'from loaner.web_app.backend.api import shelf_api\n'), ((1829, 1895), 'mock.patch', 'mock.patch', (['"""__main__.shelf_api.root_api.Service.check_xsrf_token"""'], {}), "('__main__.shelf_api.root_api.Service.check_xsrf_token')\n", (1839, 1895), False, 'import mock\n'), ((1922, 2097), 'loaner.web_app.backend.models.shelf_model.Shelf.enroll', 'shelf_model.Shelf.enroll', ([], {'user_email': 'loanertest.USER_EMAIL', 'location': '"""NYC"""', 'capacity': '(10)', 'friendly_name': '"""GnG"""', 'latitude': '(40.6892534)', 'longitude': '(-74.0466891)', 'altitude': '(1.0)'}), "(user_email=loanertest.USER_EMAIL, location='NYC',\n capacity=10, friendly_name='GnG', latitude=40.6892534, longitude=-\n 74.0466891, altitude=1.0)\n", (1946, 2097), False, 'from loaner.web_app.backend.models import shelf_model\n'), ((2127, 2218), 'loaner.web_app.backend.models.shelf_model.Shelf.enroll', 'shelf_model.Shelf.enroll', ([], {'user_email': 'loanertest.USER_EMAIL', 'location': '"""MTV"""', 'capacity': '(20)'}), "(user_email=loanertest.USER_EMAIL, location='MTV',\n capacity=20)\n", (2151, 2218), False, 'from loaner.web_app.backend.models import shelf_model\n'), ((2237, 2328), 'loaner.web_app.backend.models.shelf_model.Shelf.enroll', 'shelf_model.Shelf.enroll', ([], {'user_email': 'loanertest.USER_EMAIL', 'location': '"""SAO"""', 'capacity': '(10)'}), "(user_email=loanertest.USER_EMAIL, location='SAO',\n capacity=10)\n", (2261, 2328), False, 'from loaner.web_app.backend.models import shelf_model\n'), ((2360, 2472), 'loaner.web_app.backend.models.shelf_model.Shelf.enroll', 'shelf_model.Shelf.enroll', ([], {'user_email': 'loanertest.USER_EMAIL', 'location': '"""SVL"""', 'capacity': '(10)', 'friendly_name': '"""Bay"""'}), "(user_email=loanertest.USER_EMAIL, location='SVL',\n capacity=10, friendly_name='Bay')\n", (2384, 2472), False, 'from loaner.web_app.backend.models import shelf_model\n'), ((4177, 4413), 'loaner.web_app.backend.api.messages.shelf_messages.EnrollShelfRequest', 'shelf_messages.EnrollShelfRequest', ([], {'location': '"""nyc"""', 'capacity': '(100)', 'friendly_name': '"""test"""', 'latitude': '(12.5)', 'longitude': '(12.5)', 'altitude': '(2.0)', 'responsible_for_audit': '"""precise"""', 'audit_interval_override': '(33)', 'audit_notification_enabled': '(True)'}), "(location='nyc', capacity=100,\n friendly_name='test', latitude=12.5, longitude=12.5, altitude=2.0,\n responsible_for_audit='precise', audit_interval_override=33,\n audit_notification_enabled=True)\n", (4210, 4413), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((4638, 4684), 'loaner.web_app.backend.api.messages.shelf_messages.EnrollShelfRequest', 'shelf_messages.EnrollShelfRequest', ([], {'capacity': '(10)'}), '(capacity=10)\n', (4671, 4684), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((4865, 4942), 'loaner.web_app.backend.api.messages.shelf_messages.EnrollShelfRequest', 'shelf_messages.EnrollShelfRequest', ([], {'location': '"""nyc"""', 'capacity': '(10)', 'latitude': '(12.5)'}), "(location='nyc', capacity=10, latitude=12.5)\n", (4898, 4942), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((5232, 5275), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""NYC"""'}), "(location='NYC')\n", (5259, 5275), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((5554, 5597), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""NYC"""'}), "(location='NYC')\n", (5581, 5597), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((6210, 6252), 'loaner.web_app.backend.models.shelf_model.Shelf.get', 'shelf_model.Shelf.get', ([], {'friendly_name': '"""GnG"""'}), "(friendly_name='GnG')\n", (6231, 6252), False, 'from loaner.web_app.backend.models import shelf_model\n'), ((7326, 7381), 'loaner.web_app.backend.api.messages.shared_messages.SearchExpression', 'shared_messages.SearchExpression', ([], {'expression': '"""location"""'}), "(expression='location')\n", (7358, 7381), False, 'from loaner.web_app.backend.api.messages import shared_messages\n'), ((8116, 8178), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'enabled': '(True)', 'page_size': '(1)', 'page_number': '(1)'}), '(enabled=True, page_size=1, page_number=1)\n', (8136, 8178), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((8427, 8489), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'enabled': '(True)', 'page_size': '(1)', 'page_number': '(2)'}), '(enabled=True, page_size=1, page_number=2)\n', (8447, 8489), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((8809, 8871), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'enabled': '(True)', 'page_size': '(1)', 'page_number': '(3)'}), '(enabled=True, page_size=1, page_number=3)\n', (8829, 8871), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((11654, 11682), 'loaner.web_app.backend.api.shelf_api.get_shelf', 'shelf_api.get_shelf', (['request'], {}), '(request)\n', (11673, 11682), False, 'from loaner.web_app.backend.api import shelf_api\n'), ((11832, 11889), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': 'self.shelf.location'}), '(location=self.shelf.location)\n', (11859, 11889), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((11902, 11930), 'loaner.web_app.backend.api.shelf_api.get_shelf', 'shelf_api.get_shelf', (['request'], {}), '(request)\n', (11921, 11930), False, 'from loaner.web_app.backend.api import shelf_api\n'), ((12092, 12141), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""Not_Valid"""'}), "(location='Not_Valid')\n", (12119, 12141), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((6401, 6434), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'capacity': '(10)'}), '(capacity=10)\n', (6421, 6434), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((6448, 6483), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'enabled': '(False)'}), '(enabled=False)\n', (6468, 6483), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((7177, 7210), 'loaner.web_app.backend.api.messages.shelf_messages.Shelf', 'shelf_messages.Shelf', ([], {'page_size': '(0)'}), '(page_size=0)\n', (7197, 7210), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((9658, 9707), 'loaner.web_app.backend.models.device_model.Device.get', 'device_model.Device.get', ([], {'serial_number': 'identifier'}), '(serial_number=identifier)\n', (9681, 9707), False, 'from loaner.web_app.backend.models import device_model\n'), ((12284, 12312), 'loaner.web_app.backend.api.shelf_api.get_shelf', 'shelf_api.get_shelf', (['request'], {}), '(request)\n', (12303, 12312), False, 'from loaner.web_app.backend.api import shelf_api\n'), ((2695, 2860), 'loaner.web_app.backend.models.device_model.Device', 'device_model.Device', ([], {'serial_number': '"""12345"""', 'enrolled': '(True)', 'device_model': '"""HP Chromebook 13 G1"""', 'current_ou': '"""/"""', 'chrome_device_id': '"""unique_id_1"""', 'damaged': '(False)'}), "(serial_number='12345', enrolled=True, device_model=\n 'HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_1',\n damaged=False)\n", (2714, 2860), False, 'from loaner.web_app.backend.models import device_model\n'), ((2936, 3101), 'loaner.web_app.backend.models.device_model.Device', 'device_model.Device', ([], {'serial_number': '"""54321"""', 'enrolled': '(True)', 'device_model': '"""HP Chromebook 13 G1"""', 'current_ou': '"""/"""', 'chrome_device_id': '"""unique_id_2"""', 'damaged': '(False)'}), "(serial_number='54321', enrolled=True, device_model=\n 'HP Chromebook 13 G1', current_ou='/', chrome_device_id='unique_id_2',\n damaged=False)\n", (2955, 3101), False, 'from loaner.web_app.backend.models import device_model\n'), ((3177, 3364), 'loaner.web_app.backend.models.device_model.Device', 'device_model.Device', ([], {'serial_number': '"""67890"""', 'enrolled': '(True)', 'shelf': 'self.shelf.key', 'device_model': '"""HP Chromebook 13 G1"""', 'current_ou': '"""/"""', 'chrome_device_id': '"""unique_id_3"""', 'damaged': '(False)'}), "(serial_number='67890', enrolled=True, shelf=self.shelf.\n key, device_model='HP Chromebook 13 G1', current_ou='/',\n chrome_device_id='unique_id_3', damaged=False)\n", (3196, 3364), False, 'from loaner.web_app.backend.models import device_model\n'), ((3448, 3636), 'loaner.web_app.backend.models.device_model.Device', 'device_model.Device', ([], {'serial_number': '"""ABC123"""', 'enrolled': '(True)', 'shelf': 'self.shelf.key', 'device_model': '"""HP Chromebook 13 G1"""', 'current_ou': '"""/"""', 'chrome_device_id': '"""unique_id_4"""', 'damaged': '(False)'}), "(serial_number='ABC123', enrolled=True, shelf=self.shelf\n .key, device_model='HP Chromebook 13 G1', current_ou='/',\n chrome_device_id='unique_id_4', damaged=False)\n", (3467, 3636), False, 'from loaner.web_app.backend.models import device_model\n'), ((5976, 6019), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""NYC"""'}), "(location='NYC')\n", (6003, 6019), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((7769, 7889), 'loaner.web_app.backend.api.messages.shared_messages.SearchRequest', 'shared_messages.SearchRequest', ([], {'query_string': '"""location:NYC"""', 'expressions': '[expressions]', 'returned_fields': "['location']"}), "(query_string='location:NYC', expressions=[\n expressions], returned_fields=['location'])\n", (7798, 7889), False, 'from loaner.web_app.backend.api.messages import shared_messages\n'), ((9361, 9404), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""NYC"""'}), "(location='NYC')\n", (9388, 9404), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((10076, 10119), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': '"""NYC"""'}), "(location='NYC')\n", (10103, 10119), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((11138, 11195), 'loaner.web_app.backend.api.messages.shelf_messages.ShelfRequest', 'shelf_messages.ShelfRequest', ([], {'location': 'self.shelf.location'}), '(location=self.shelf.location)\n', (11165, 11195), False, 'from loaner.web_app.backend.api.messages import shelf_messages\n'), ((6535, 6605), 'loaner.web_app.backend.api.messages.shared_messages.SearchRequest', 'shared_messages.SearchRequest', ([], {'query_string': '"""enabled:True capacity:10"""'}), "(query_string='enabled:True capacity:10')\n", (6564, 6605), False, 'from loaner.web_app.backend.api.messages import shared_messages\n'), ((6673, 6732), 'loaner.web_app.backend.api.messages.shared_messages.SearchRequest', 'shared_messages.SearchRequest', ([], {'query_string': '"""enabled:False"""'}), "(query_string='enabled:False')\n", (6702, 6732), False, 'from loaner.web_app.backend.api.messages import shared_messages\n')] |
from flask import render_template, jsonify, Flask, redirect, url_for, request
from app import app
import random
import os
# import tensorflow as tf
# import numpy as np
# import sys
# import spacy
# nlp = spacy.load('en')
# sys.path.insert(0, "/content/bert_experimental")
# from bert_experimental.finetuning.text_preprocessing import build_preprocessor
# from bert_experimental.finetuning.graph_ops import load_graph
# restored_graph = load_graph("models/frozen_graph.pb")
# graph_ops = restored_graph.get_operations()
# input_op, output_op = graph_ops[0].name, graph_ops[-1].name
# x = restored_graph.get_tensor_by_name(input_op + ':0')
# y = restored_graph.get_tensor_by_name(output_op + ':0')
# preprocessor = build_preprocessor("./uncased_L-12_H-768_A-12/vocab.txt", 256)
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32], name='preprocessor')
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32])
# sess = tf.Session(graph=restored_graph)
# delimiter = " ||| "
@app.route('/')
def index1():
return render_template('index.html', title='Home')
@app.route('/predict', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
exp_st = request.form.get('exp')
job_st = request.form.get('job')
# y_out = sess.run(y, feed_dict={
# x: pd.DataFrame([delimiter.join((exp_st, job_st ))], columns=['name'])
# })
# doc1 = nlp(exp_st)
# doc2 = nlp(job_st )
# y_out2 = doc1.similarity(doc2)
return render_template('index.html', title='Success', predictions=80, predictions_sp =75, exp=exp_st, job= job_st)
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact') | [
"flask.render_template",
"random.uniform",
"flask.request.form.get",
"app.app.route",
"random.randint",
"flask.jsonify"
]
| [((1029, 1043), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (1038, 1043), False, 'from app import app\n'), ((1117, 1163), 'app.app.route', 'app.route', (['"""/predict"""'], {'methods': "['GET', 'POST']"}), "('/predict', methods=['GET', 'POST'])\n", (1126, 1163), False, 'from app import app\n'), ((1642, 1661), 'app.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (1651, 1661), False, 'from app import app\n'), ((1732, 1749), 'app.app.route', 'app.route', (['"""/map"""'], {}), "('/map')\n", (1741, 1749), False, 'from app import app\n'), ((1816, 1859), 'app.app.route', 'app.route', (['"""/map/refresh"""'], {'methods': "['POST']"}), "('/map/refresh', methods=['POST'])\n", (1825, 1859), False, 'from app import app\n'), ((2081, 2102), 'app.app.route', 'app.route', (['"""/contact"""'], {}), "('/contact')\n", (2090, 2102), False, 'from app import app\n'), ((1069, 1112), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Home"""'}), "('index.html', title='Home')\n", (1084, 1112), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1531, 1640), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Success"""', 'predictions': '(80)', 'predictions_sp': '(75)', 'exp': 'exp_st', 'job': 'job_st'}), "('index.html', title='Success', predictions=80,\n predictions_sp=75, exp=exp_st, job=job_st)\n", (1546, 1640), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1686, 1729), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Home"""'}), "('index.html', title='Home')\n", (1701, 1729), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1772, 1812), 'flask.render_template', 'render_template', (['"""map.html"""'], {'title': '"""Map"""'}), "('map.html', title='Map')\n", (1787, 1812), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((2050, 2077), 'flask.jsonify', 'jsonify', (["{'points': points}"], {}), "({'points': points})\n", (2057, 2077), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((2129, 2177), 'flask.render_template', 'render_template', (['"""contact.html"""'], {'title': '"""Contact"""'}), "('contact.html', title='Contact')\n", (2144, 2177), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1232, 1255), 'flask.request.form.get', 'request.form.get', (['"""exp"""'], {}), "('exp')\n", (1248, 1255), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1271, 1294), 'flask.request.form.get', 'request.form.get', (['"""job"""'], {}), "('job')\n", (1287, 1294), False, 'from flask import render_template, jsonify, Flask, redirect, url_for, request\n'), ((1894, 1928), 'random.uniform', 'random.uniform', (['(48.84341)', '(48.86341)'], {}), '(48.84341, 48.86341)\n', (1908, 1928), False, 'import random\n'), ((1949, 1979), 'random.uniform', 'random.uniform', (['(2.3388)', '(2.3588)'], {}), '(2.3388, 2.3588)\n', (1963, 1979), False, 'import random\n'), ((2016, 2036), 'random.randint', 'random.randint', (['(2)', '(9)'], {}), '(2, 9)\n', (2030, 2036), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
Complementary Filter
====================
Attitude quaternion obtained with gyroscope and accelerometer-magnetometer
measurements, via complementary filter.
First, the current orientation is estimated at time :math:`t`, from a previous
orientation at time :math:`t-1`, and a given angular velocity,
:math:`\\omega`, in rad/s.
This orientation is computed by numerically integrating the angular velocity
and adding it to the previous orientation, which is known as an **attitude
propagation**.
.. math::
\\begin{array}{rcl}
\\mathbf{q}_\\omega &=& \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} \\\\
&=&
\\begin{bmatrix}
1 & -\\frac{\\Delta t}{2}\\omega_x & -\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z \\\\
\\frac{\\Delta t}{2}\\omega_x & 1 & \\frac{\\Delta t}{2}\\omega_z & -\\frac{\\Delta t}{2}\\omega_y \\\\
\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z & 1 & \\frac{\\Delta t}{2}\\omega_x \\\\
\\frac{\\Delta t}{2}\\omega_z & \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_x & 1
\\end{bmatrix}
\\begin{bmatrix}q_w \\\\ q_x \\\\ q_y \\\\ q_z \\end{bmatrix} \\\\
&=&
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
\\end{array}
Secondly, the *tilt* is computed from the accelerometer measurements as:
.. math::
\\begin{array}{rcl}
\\theta &=& \\mathrm{arctan2}(a_y, a_z) \\\\
\\phi &=& \\mathrm{arctan2}\\big(-a_x, \\sqrt{a_y^2+a_z^2}\\big)
\\end{array}
Only the pitch, :math:`\\phi`, and roll, :math:`\\theta`, angles are computed,
leaving the yaw angle, :math:`\\psi` equal to zero.
If a magnetometer sample is available, the yaw angle can be computed. First
compensate the measurement using the *tilt*:
.. math::
\\begin{array}{rcl}
\\mathbf{b} &=&
\\begin{bmatrix}
\\cos\\theta & \\sin\\theta\\sin\\phi & \\sin\\theta\\cos\\phi \\\\
0 & \\cos\\phi & -\\sin\\phi \\\\
-\\sin\\theta & \\cos\\theta\\sin\\phi & \\cos\\theta\\cos\\phi
\\end{bmatrix}
\\begin{bmatrix}m_x \\\\ m_y \\\\ m_z\\end{bmatrix} \\\\
\\begin{bmatrix}b_x \\\\ b_y \\\\ b_z\\end{bmatrix} &=&
\\begin{bmatrix}
m_x\\cos\\theta + m_y\\sin\\theta\\sin\\phi + m_z\\sin\\theta\\cos\\phi \\\\
m_y\\cos\\phi - m_z\\sin\\phi \\\\
-m_x\\sin\\theta + m_y\\cos\\theta\\sin\\phi + m_z\\cos\\theta\\cos\\phi
\\end{bmatrix}
\\end{array}
Then, the yaw angle, :math:`\\psi`, is obtained as:
.. math::
\\begin{array}{rcl}
\\psi &=& \\mathrm{arctan2}(-b_y, b_x) \\\\
&=& \\mathrm{arctan2}\\big(m_z\\sin\\phi - m_y\\cos\\phi, \\; m_x\\cos\\theta + \\sin\\theta(m_y\\sin\\phi + m_z\\cos\\phi)\\big)
\\end{array}
We transform the roll-pitch-yaw angles to a quaternion representation:
.. math::
\\mathbf{q}_{am} =
\\begin{pmatrix}q_w\\\\q_x\\\\q_y\\\\q_z\\end{pmatrix} =
\\begin{pmatrix}
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) - \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) - \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big)
\\end{pmatrix}
Finally, after each orientation is estimated independently, they are fused with
the complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
where :math:`\\mathbf{q}_\\omega` is the attitude estimated from the gyroscope,
:math:`\\mathbf{q}_{am}` is the attitude estimated from the accelerometer and
the magnetometer, and :math:`\\alpha` is the gain of the filter.
The filter gain must be a floating value within the range :math:`[0.0, 1.0]`.
It can be seen that when :math:`\\alpha=1`, the attitude is estimated entirely
with the accelerometer and the magnetometer. When :math:`\\alpha=0`, it is
estimated solely with the gyroscope. The values within the range decide how
much of each estimation is "blended" into the quaternion.
This is actually a simple implementation of `LERP
<https://en.wikipedia.org/wiki/Linear_interpolation>`_ commonly used to
linearly interpolate quaternions with small differences between them.
"""
import numpy as np
from ..common.orientation import ecompass
class Complementary:
"""
Complementary filter for attitude estimation as quaternion.
Parameters
----------
gyr : numpy.ndarray, default: None
N-by-3 array with measurements of angular velocity, in rad/s.
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration, in m/s^2.
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field, in mT.
frequency : float, default: 100.0
Sampling frequency in Herz.
Dt : float, default: 0.01
Sampling step in seconds. Inverse of sampling frequency. Not required
if ``frequency`` value is given.
gain : float, default: 0.1
Filter gain.
q0 : numpy.ndarray, default: None
Initial orientation, as a versor (normalized quaternion).
Raises
------
ValueError
When dimension of input arrays ``acc``, ``gyr``, or ``mag`` are not equal.
"""
def __init__(self,
gyr: np.ndarray = None,
acc: np.ndarray = None,
mag: np.ndarray = None,
frequency: float = 100.0,
gain = 0.9,
**kwargs):
self.gyr: np.ndarray = gyr
self.acc: np.ndarray = acc
self.mag: np.ndarray = mag
self.frequency: float = frequency
self.gain: float = gain
if not(0.0 <= self.gain <= 1.0):
raise ValueError(f"Filter gain must be in the range [0, 1]. Got {self.gain}")
self.Dt: float = kwargs.get('Dt', 1.0/self.frequency)
self.q0: np.ndarray = kwargs.get('q0')
# Process of given data
if self.gyr is not None and self.acc is not None:
self.Q = self._compute_all()
def _compute_all(self) -> np.ndarray:
"""
Estimate the quaternions given all data
Attributes ``gyr``, ``acc`` and, optionally, ``mag`` must contain data.
Returns
-------
Q : numpy.ndarray
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.gyr.shape:
raise ValueError("acc and gyr are not the same size")
num_samples = len(self.acc)
Q = np.zeros((num_samples, 4))
if self.mag is None:
self.mag = [None]*num_samples
else:
if self.mag.shape != self.gyr.shape:
raise ValueError("mag and gyr are not the same size")
Q[0] = self.am_estimation(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy()
for t in range(1, num_samples):
Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])
return Q
def attitude_propagation(self, q: np.ndarray, omega: np.ndarray, dt: float) -> np.ndarray:
"""
Attitude propagation of the orientation.
Estimate the current orientation at time :math:`t`, from a given
orientation at time :math:`t-1` and a given angular velocity,
:math:`\\omega`, in rad/s.
It is computed by numerically integrating the angular velocity and
adding it to the previous orientation.
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
omega : numpy.ndarray
Tri-axial angular velocity, in rad/s.
dt : float
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q_omega : numpy.ndarray
Estimated orientation, as quaternion.
"""
w = -0.5*dt*omega
A = np.array([
[1.0, -w[0], -w[1], -w[2]],
[w[0], 1.0, w[2], -w[1]],
[w[1], -w[2], 1.0, w[0]],
[w[2], w[1], -w[0], 1.0]])
q_omega = A @ q
return q_omega / np.linalg.norm(q_omega)
def am_estimation(self, acc: np.ndarray, mag: np.ndarray = None) -> np.ndarray:
"""
Attitude estimation from an Accelerometer-Magnetometer architecture.
Parameters
----------
acc : numpy.ndarray
Tri-axial sample of the accelerometer.
mag : numpy.ndarray, default: None
Tri-axial sample of the magnetometer.
Returns
-------
q_am : numpy.ndarray
Estimated attitude.
"""
return ecompass(acc, mag, frame='NED', representation='quaternion')
def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray = None, dt: float = None) -> np.ndarray:
"""
Attitude Estimation from given measurements and previous orientation.
The new orientation is first estimated with the angular velocity, then
another orientation is computed using the accelerometers and
magnetometers. The magnetometer is optional.
Each orientation is estimated independently and fused with a
complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
gyr : numpy.ndarray
Sample of tri-axial Gyroscope in rad/s.
acc : numpy.ndarray
Sample of tri-axial Accelerometer in m/s^2.
mag : numpy.ndarray, default: None
Sample of tri-axial Magnetometer in uT.
dt : float, default: None
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
dt = self.Dt if dt is None else dt
if gyr is None or not np.linalg.norm(gyr) > 0:
return q
q_omega = self.attitude_propagation(q, gyr, dt)
q_am = self.am_estimation(acc, mag)
# Complementary Estimation
if np.linalg.norm(q_omega + q_am) < np.sqrt(2):
q = (1.0 - self.gain)*q_omega - self.gain*q_am
else:
q = (1.0 - self.gain)*q_omega + self.gain*q_am
return q/np.linalg.norm(q)
| [
"numpy.array",
"numpy.zeros",
"numpy.sqrt",
"numpy.linalg.norm"
]
| [((7766, 7792), 'numpy.zeros', 'np.zeros', (['(num_samples, 4)'], {}), '((num_samples, 4))\n', (7774, 7792), True, 'import numpy as np\n'), ((9157, 9278), 'numpy.array', 'np.array', (['[[1.0, -w[0], -w[1], -w[2]], [w[0], 1.0, w[2], -w[1]], [w[1], -w[2], 1.0, w\n [0]], [w[2], w[1], -w[0], 1.0]]'], {}), '([[1.0, -w[0], -w[1], -w[2]], [w[0], 1.0, w[2], -w[1]], [w[1], -w[2\n ], 1.0, w[0]], [w[2], w[1], -w[0], 1.0]])\n', (9165, 9278), True, 'import numpy as np\n'), ((9388, 9411), 'numpy.linalg.norm', 'np.linalg.norm', (['q_omega'], {}), '(q_omega)\n', (9402, 9411), True, 'import numpy as np\n'), ((11493, 11523), 'numpy.linalg.norm', 'np.linalg.norm', (['(q_omega + q_am)'], {}), '(q_omega + q_am)\n', (11507, 11523), True, 'import numpy as np\n'), ((11526, 11536), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11533, 11536), True, 'import numpy as np\n'), ((11691, 11708), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (11705, 11708), True, 'import numpy as np\n'), ((11296, 11315), 'numpy.linalg.norm', 'np.linalg.norm', (['gyr'], {}), '(gyr)\n', (11310, 11315), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decapod_common.models.execution."""
import pytest
from decapod_common.models import execution
def test_create(new_execution, new_pcmodel, pymongo_connection):
db_model = pymongo_connection.db.execution.find_one(
{"_id": new_execution._id}
)
assert db_model
assert new_execution.model_id == db_model["model_id"]
assert new_execution.version == db_model["version"]
assert new_execution.time_created == db_model["time_created"]
assert new_execution.time_deleted == db_model["time_deleted"]
assert new_execution.initiator_id == db_model["initiator_id"]
assert new_execution.playbook_configuration_model_id == \
db_model["pc_model_id"]
assert new_execution.playbook_configuration_version == \
db_model["pc_version"]
assert new_execution.state.name == db_model["state"]
assert new_execution.state == execution.ExecutionState.created
assert new_execution.playbook_configuration_model_id == \
new_pcmodel.model_id
assert new_execution.playbook_configuration_version == \
new_pcmodel.version
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_change_state_ok(state, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.state == state
@pytest.mark.parametrize("state", (
"", "changed", "started", 0, None, -1.0, [], {}, object(), set()
))
def test_change_state_fail(state, new_execution):
with pytest.raises(ValueError):
new_execution.state = state
@pytest.mark.parametrize("state", execution.ExecutionState)
def test_api_response(state, new_pcmodel, new_execution):
new_execution.state = state
new_execution.save()
assert new_execution.make_api_structure() == {
"id": new_execution.model_id,
"initiator_id": new_execution.initiator_id,
"time_deleted": new_execution.time_deleted,
"time_updated": new_execution.time_created,
"model": execution.ExecutionModel.MODEL_NAME,
"version": 2,
"data": {
"playbook_configuration": {
"id": new_pcmodel.model_id,
"version": new_pcmodel.version,
"playbook_name": new_pcmodel.playbook_id
},
"state": state.name
}
}
def test_getting_logfile(new_execution, execution_log_storage):
new_execution.logfile
execution_log_storage.get.assert_called_once_with(new_execution.model_id)
def test_create_logfile(new_execution, execution_log_storage):
new_execution.new_logfile.write("1")
execution_log_storage.delete.assert_called_once_with(
new_execution.model_id
)
execution_log_storage.new_file.assert_called_once_with(
new_execution.model_id,
filename="{0}.log".format(new_execution.model_id),
content_type="text/plain"
)
execution_log_storage.new_file().write.assert_called_once_with("1")
| [
"pytest.mark.parametrize",
"pytest.raises"
]
| [((1708, 1766), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""state"""', 'execution.ExecutionState'], {}), "('state', execution.ExecutionState)\n", (1731, 1766), False, 'import pytest\n'), ((2148, 2206), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""state"""', 'execution.ExecutionState'], {}), "('state', execution.ExecutionState)\n", (2171, 2206), False, 'import pytest\n'), ((2082, 2107), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2095, 2107), False, 'import pytest\n')] |
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Task(models.Model):
CLOSE = 'cl'
CANCEL = 'ca'
LATER = 'la'
UNDEFINED = 'un'
CHOICES = (
(UNDEFINED, _("Неизвестно")),
(CLOSE, _("Завершить")),
(CANCEL, _("Отменить")),
(LATER, _("Отложить")),
)
title = models.CharField(_("Заголовок"), max_length=50)
description = models.TextField(_("Описание"))
executor = models.ForeignKey(User, verbose_name=_("Исполнитель"), on_delete=models.CASCADE)
status = models.CharField(_("Статус"), choices=CHOICES, default=UNDEFINED, max_length=2)
deadline = models.DateTimeField(_("Дедлайн"))
priority = models.IntegerField(_("Приоритет"), default=1, validators=[MinValueValidator(1), MaxValueValidator(3)])
changed = models.DateTimeField(_("Дата последнего изменения"), auto_now=True)
created = models.DateTimeField(_("Дата создания"), auto_now_add=True)
@property
def text_status(self):
choices = dict(self.CHOICES)
return choices[self.status]
@property
def text_deadline(self):
return self.deadline.strftime("%d.%m.%Y %H:%M")
class Comment(models.Model):
task = models.ForeignKey(Task, related_name="comments", on_delete=models.CASCADE)
creator = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.TextField(_('Комментарий'))
created = models.DateTimeField(_("Дата создания"), auto_now_add=True)
| [
"django.core.validators.MinValueValidator",
"django.utils.translation.ugettext_lazy",
"django.core.validators.MaxValueValidator",
"django.db.models.ForeignKey"
]
| [((1342, 1416), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Task'], {'related_name': '"""comments"""', 'on_delete': 'models.CASCADE'}), "(Task, related_name='comments', on_delete=models.CASCADE)\n", (1359, 1416), False, 'from django.db import models\n'), ((1431, 1492), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(User, on_delete=models.SET_NULL, null=True)\n', (1448, 1492), False, 'from django.db import models\n'), ((490, 504), 'django.utils.translation.ugettext_lazy', '_', (['"""Заголовок"""'], {}), "('Заголовок')\n", (491, 504), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((556, 569), 'django.utils.translation.ugettext_lazy', '_', (['"""Описание"""'], {}), "('Описание')\n", (557, 569), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((697, 708), 'django.utils.translation.ugettext_lazy', '_', (['"""Статус"""'], {}), "('Статус')\n", (698, 708), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((796, 808), 'django.utils.translation.ugettext_lazy', '_', (['"""Дедлайн"""'], {}), "('Дедлайн')\n", (797, 808), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((845, 859), 'django.utils.translation.ugettext_lazy', '_', (['"""Приоритет"""'], {}), "('Приоритет')\n", (846, 859), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((964, 994), 'django.utils.translation.ugettext_lazy', '_', (['"""Дата последнего изменения"""'], {}), "('Дата последнего изменения')\n", (965, 994), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1046, 1064), 'django.utils.translation.ugettext_lazy', '_', (['"""Дата создания"""'], {}), "('Дата создания')\n", (1047, 1064), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1521, 1537), 'django.utils.translation.ugettext_lazy', '_', (['"""Комментарий"""'], {}), "('Комментарий')\n", (1522, 1537), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1574, 1592), 'django.utils.translation.ugettext_lazy', '_', (['"""Дата создания"""'], {}), "('Дата создания')\n", (1575, 1592), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((338, 353), 'django.utils.translation.ugettext_lazy', '_', (['"""Неизвестно"""'], {}), "('Неизвестно')\n", (339, 353), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((372, 386), 'django.utils.translation.ugettext_lazy', '_', (['"""Завершить"""'], {}), "('Завершить')\n", (373, 386), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((406, 419), 'django.utils.translation.ugettext_lazy', '_', (['"""Отменить"""'], {}), "('Отменить')\n", (407, 419), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((438, 451), 'django.utils.translation.ugettext_lazy', '_', (['"""Отложить"""'], {}), "('Отложить')\n", (439, 451), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((623, 639), 'django.utils.translation.ugettext_lazy', '_', (['"""Исполнитель"""'], {}), "('Исполнитель')\n", (624, 639), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((893, 913), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(1)'], {}), '(1)\n', (910, 913), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((915, 935), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(3)'], {}), '(3)\n', (932, 935), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n')] |
import numpy as np
import hexy as hx
def test_get_hex_line():
expected = [
[-3, 3, 0],
[-2, 2, 0],
[-1, 2, -1],
[0, 2, -2],
[1, 1, -2],
]
start = np.array([-3, 3, 0])
end = np.array([1, 1, -2])
print(hx.get_hex_line(start, end))
print(expected);
assert(np.array_equal(
hx.get_hex_line(start, end),
expected));
if __name__ == "__main__":
test_get_hex_line()
| [
"numpy.array",
"hexy.get_hex_line"
]
| [((227, 247), 'numpy.array', 'np.array', (['[-3, 3, 0]'], {}), '([-3, 3, 0])\n', (235, 247), True, 'import numpy as np\n'), ((258, 278), 'numpy.array', 'np.array', (['[1, 1, -2]'], {}), '([1, 1, -2])\n', (266, 278), True, 'import numpy as np\n'), ((289, 316), 'hexy.get_hex_line', 'hx.get_hex_line', (['start', 'end'], {}), '(start, end)\n', (304, 316), True, 'import hexy as hx\n'), ((374, 401), 'hexy.get_hex_line', 'hx.get_hex_line', (['start', 'end'], {}), '(start, end)\n', (389, 401), True, 'import hexy as hx\n')] |
"""
A :py:mod:`filter <tiddlyweb.filters>` type to limit a group of entities
using a syntax similar to SQL Limit::
limit=<index>,<count>
limit=<count>
"""
import itertools
def limit_parse(count='0'):
"""
Parse the argument of a ``limit`` :py:mod:`filter <tiddlyweb.filters>`
for a count and index argument, return a function which does the limiting.
Exceptions while parsing are passed up the stack.
"""
index = '0'
if ',' in count:
index, count = count.split(',', 1)
index = int(index)
count = int(count)
def limiter(entities, indexable=False, environ=None):
return limit(entities, index=index, count=count)
return limiter
def limit(entities, count=0, index=0):
"""
Make a slice of a list of entities based on a count and index.
"""
return itertools.islice(entities, index, index + count)
| [
"itertools.islice"
]
| [((835, 883), 'itertools.islice', 'itertools.islice', (['entities', 'index', '(index + count)'], {}), '(entities, index, index + count)\n', (851, 883), False, 'import itertools\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.