code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import urllib.request
import urllib.parse
import re
# very little ideawhat this part does
url = 'http://pythonprogramming.net'
values = {'s':'basics','submit':'search'}
data = urllib.parse.urlencode(values)
print(data)
data = data.encode('utf-8')
req = urllib.request.Request(url,data)
resp = urllib.request.urlopen(req)
respData = resp.read()
paras = re.findall(r'<p>(.*?)</p>',str(respData))
for p in paras:
print(p)
print(re.split(r'<a*',p))
print("\n\n") | [
"re.split"
] | [((432, 450), 're.split', 're.split', (['"""<a*"""', 'p'], {}), "('<a*', p)\n", (440, 450), False, 'import re\n')] |
import streamlit as st
from back import CustomEncoder,request_categories,request_data
from sklearn.base import BaseEstimator
import sys
st.title('TAS Predictor')
st.header('Predict the best possible times of your favorite speedgame.')
st.write('''Your speedgame don't have TASes ? Do you want to know how fast this goofy extension category can be ?
This machine learning-driven app will give you answers.''')
game_form = st.form("game_form")
game = game_form.text_input('Type a game :')
game_submitted = game_form.form_submit_button("Get best possible times !")
if game_submitted:
try:
categories = request_categories(game)
except IndexError:
st.error("Your game has not been found. Try to look for typos.")
sys.exit("Your game has not been found. Try to look for typos.")
for category in categories:
try:
results = request_data(game,category)
WR_link = results['WR_link']
WR_time = results['WR_time']
predicted_time = results['predicted_time']
st.subheader(category + ':')
'''
[Link to world record]('''+WR_link+''') (time : '''+WR_time+''')
**The best possible time is :** '''+predicted_time
except IndexError:
st.error(category + " category doesn't have any runs so we can't calculate the best time.")
| [
"streamlit.form",
"streamlit.write",
"streamlit.error",
"back.request_data",
"streamlit.subheader",
"sys.exit",
"back.request_categories",
"streamlit.header",
"streamlit.title"
] | [((142, 167), 'streamlit.title', 'st.title', (['"""TAS Predictor"""'], {}), "('TAS Predictor')\n", (150, 167), True, 'import streamlit as st\n'), ((169, 241), 'streamlit.header', 'st.header', (['"""Predict the best possible times of your favorite speedgame."""'], {}), "('Predict the best possible times of your favorite speedgame.')\n", (178, 241), True, 'import streamlit as st\n'), ((243, 428), 'streamlit.write', 'st.write', (['"""Your speedgame don\'t have TASes ? Do you want to know how fast this goofy extension category can be ?\n \nThis machine learning-driven app will give you answers."""'], {}), '(\n """Your speedgame don\'t have TASes ? Do you want to know how fast this goofy extension category can be ?\n \nThis machine learning-driven app will give you answers."""\n )\n', (251, 428), True, 'import streamlit as st\n'), ((437, 457), 'streamlit.form', 'st.form', (['"""game_form"""'], {}), "('game_form')\n", (444, 457), True, 'import streamlit as st\n'), ((632, 656), 'back.request_categories', 'request_categories', (['game'], {}), '(game)\n', (650, 656), False, 'from back import CustomEncoder, request_categories, request_data\n'), ((690, 754), 'streamlit.error', 'st.error', (['"""Your game has not been found. Try to look for typos."""'], {}), "('Your game has not been found. Try to look for typos.')\n", (698, 754), True, 'import streamlit as st\n'), ((764, 828), 'sys.exit', 'sys.exit', (['"""Your game has not been found. Try to look for typos."""'], {}), "('Your game has not been found. Try to look for typos.')\n", (772, 828), False, 'import sys\n'), ((899, 927), 'back.request_data', 'request_data', (['game', 'category'], {}), '(game, category)\n', (911, 927), False, 'from back import CustomEncoder, request_categories, request_data\n'), ((1080, 1108), 'streamlit.subheader', 'st.subheader', (["(category + ':')"], {}), "(category + ':')\n", (1092, 1108), True, 'import streamlit as st\n'), ((1320, 1415), 'streamlit.error', 'st.error', (['(category +\n " category doesn\'t have any runs so we can\'t calculate the best time.")'], {}), '(category +\n " category doesn\'t have any runs so we can\'t calculate the best time.")\n', (1328, 1415), True, 'import streamlit as st\n')] |
#!/usr/bin/python
import os
import sys
import json
if sys.version_info[0] < 3:
import _thread
else:
import _thread
import threading
import time
from datetime import datetime
import queue
from mksdk import MkSFile
class Database():
def __init__(self):
self.ClassName = "Database"
self.ThreadRunning = False
self.CurrentFolderPath = ""
self.QueueLock = threading.Lock()
self.Orders = queue.Queue()
# Events
# Create file system for storing videos
if not os.path.exists("csv_db"):
os.makedirs("csv_db")
self.GenrateFolder()
self.RunServer()
def GenrateFolder(self):
now = datetime.now()
if not os.path.exists(os.path.join("csv_db", str(now.year))):
os.makedirs(os.path.join("csv_db", str(now.year)))
if not os.path.exists(os.path.join("csv_db", str(now.year), str(now.month))):
os.makedirs(os.path.join("csv_db", str(now.year), str(now.month)))
if not os.path.exists(os.path.join("csv_db", str(now.year), str(now.month), str(now.day))):
os.makedirs(os.path.join("csv_db", str(now.year), str(now.month), str(now.day)))
self.CurrentFolderPath = os.path.join("csv_db", str(now.year), str(now.month), str(now.day))
def Worker(self):
try:
file = MkSFile.File()
self.ThreadRunning = True
while self.ThreadRunning:
item = self.Orders.get(block=True,timeout=None)
file.Append(item["file"], item["data"])
# Check if date was changed
now = datetime.now()
if self.CurrentFolderPath != os.path.join("csv_db", str(now.year), str(now.month), str(now.day)):
self.GenrateFolder()
except Exception as e:
print ("({classname})# [ERROR] Stoping CSV DB worker ... {0}".format(str(e), classname=self.ClassName))
self.ServerRunning = False
def RunServer(self):
if self.ThreadRunning is False:
_thread.start_new_thread(self.Worker, ())
def WriteDB(self, key, values):
self.QueueLock.acquire()
# dt_object = datetime.fromtimestamp(timestamp)
data_csv = str(time.time()) + ","
for item in values:
data_csv += str(item) + ","
data_csv = data_csv[:-1] + "\n"
self.Orders.put( {
'file': os.path.join(self.CurrentFolderPath, key + ".csv"),
'data': data_csv
})
self.QueueLock.release()
def ReadDB(self, date_path):
path = os.path.join("csv_db", date_path) + ".csv"
file = MkSFile.File()
return file.Load(path)
def SplitDataByHourSegment(self, date, data, graph_type):
rows = data.split("\n")
start_dt = datetime(int(date["year"]), int(date["month"]), int(date["day"]), 0, 0)
start_ts = time.mktime(start_dt.timetuple())
next_ts = start_ts + (60*60)
sensors_count = len(rows[0].split(",")) - 1
sensor_prev_data = []
sensor_change = []
avg_count = 0
for idx in range(sensors_count):
sensor_prev_data.append(0)
sensor_change.append(0)
hours_list = []
for item in rows[:-1]:
cols = item.split(",")
if len(cols) > 1:
if next_ts < float(cols[0]):
for idx in range(sensors_count):
if graph_type[idx] == "avg":
if avg_count > 1:
sensor_change[idx] /= float(avg_count)
else:
sensor_change[idx] = 0
elif graph_type[idx] == "change":
pass
hours_list.append(sensor_change)
next_ts += (60*60)
sensor_change = [0]*sensors_count
avg_count = 0
avg_count += 1
for idx in range(sensors_count):
if graph_type[idx] == "avg":
sensor_change[idx] += float(cols[idx+1])
elif graph_type[idx] == "change":
if float(cols[idx+1]) != sensor_prev_data[idx]:
sensor_change[idx] += 1
sensor_prev_data[idx] = float(cols[idx+1])
else:
return None, 0
return hours_list, sensors_count
| [
"os.path.exists",
"os.makedirs",
"threading.Lock",
"os.path.join",
"mksdk.MkSFile.File",
"datetime.datetime.now",
"queue.Queue",
"time.time",
"_thread.start_new_thread"
] | [((386, 402), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (400, 402), False, 'import threading\n'), ((427, 440), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (438, 440), False, 'import queue\n'), ((638, 652), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (650, 652), False, 'from datetime import datetime\n'), ((2338, 2352), 'mksdk.MkSFile.File', 'MkSFile.File', ([], {}), '()\n', (2350, 2352), False, 'from mksdk import MkSFile\n'), ((506, 530), 'os.path.exists', 'os.path.exists', (['"""csv_db"""'], {}), "('csv_db')\n", (520, 530), False, 'import os\n'), ((535, 556), 'os.makedirs', 'os.makedirs', (['"""csv_db"""'], {}), "('csv_db')\n", (546, 556), False, 'import os\n'), ((1243, 1257), 'mksdk.MkSFile.File', 'MkSFile.File', ([], {}), '()\n', (1255, 1257), False, 'from mksdk import MkSFile\n'), ((1830, 1871), '_thread.start_new_thread', '_thread.start_new_thread', (['self.Worker', '()'], {}), '(self.Worker, ())\n', (1854, 1871), False, 'import _thread\n'), ((2286, 2319), 'os.path.join', 'os.path.join', (['"""csv_db"""', 'date_path'], {}), "('csv_db', date_path)\n", (2298, 2319), False, 'import os\n'), ((1459, 1473), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1471, 1473), False, 'from datetime import datetime\n'), ((2001, 2012), 'time.time', 'time.time', ([], {}), '()\n', (2010, 2012), False, 'import time\n'), ((2142, 2192), 'os.path.join', 'os.path.join', (['self.CurrentFolderPath', "(key + '.csv')"], {}), "(self.CurrentFolderPath, key + '.csv')\n", (2154, 2192), False, 'import os\n')] |
"""Test for SYMPAIS"""
from absl.testing import absltest
import sympy
from sympais import algorithm
from sympais import distributions
from sympais.tasks import Task
class SYMPAISTest(absltest.TestCase):
def test_unsat_returns_zero_prob(self):
profile = {"x": distributions.Normal()}
x = sympy.Symbol("x")
cs = (x * x >= 1, x * x <= -1)
task = Task(profile, cs, domains={"x": (-10., 10.)})
algorithm.sympais(task, 0)
if __name__ == "__main__":
absltest.main()
| [
"sympy.Symbol",
"absl.testing.absltest.main",
"sympais.tasks.Task",
"sympais.distributions.Normal",
"sympais.algorithm.sympais"
] | [((474, 489), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (487, 489), False, 'from absl.testing import absltest\n'), ((302, 319), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (314, 319), False, 'import sympy\n'), ((366, 413), 'sympais.tasks.Task', 'Task', (['profile', 'cs'], {'domains': "{'x': (-10.0, 10.0)}"}), "(profile, cs, domains={'x': (-10.0, 10.0)})\n", (370, 413), False, 'from sympais.tasks import Task\n'), ((416, 442), 'sympais.algorithm.sympais', 'algorithm.sympais', (['task', '(0)'], {}), '(task, 0)\n', (433, 442), False, 'from sympais import algorithm\n'), ((270, 292), 'sympais.distributions.Normal', 'distributions.Normal', ([], {}), '()\n', (290, 292), False, 'from sympais import distributions\n')] |
"""
Example code that implements a simple Neural Net predictor
for z_mode, and Gaussian centered at z_mode with base_width
read in fromfile and pdf width set to base_width*(1+zmode).
"""
import numpy as np
# from numpy import inf
import sklearn.neural_network as sknn
from sklearn.preprocessing import StandardScaler
from ceci.config import StageParameter as Param
from rail.estimation.estimator import Estimator, Informer
import string
import qp
def make_color_data(data_dict, bands):
"""
make a dataset consisting of the i-band mag and the five colors
Returns
--------
input_data: `ndarray` array of imag and 5 colors
"""
input_data = data_dict['mag_i_lsst']
# make colors and append to input data
for i in range(len(bands)-1):
# replace the non-detect 99s with 28.0 just arbitrarily for now
band1 = data_dict[f'mag_{bands[i]}_lsst']
# band1err = data_dict[f'mag_err_{bands[i]}_lsst']
band2 = data_dict[f'mag_{bands[i+1]}_lsst']
# band2err = data_dict[f'mag_err_{bands[i+1]}_lsst']
# for j,xx in enumerate(band1):
# if np.isclose(xx,99.,atol=.01):
# band1[j] = band1err[j]
# band1err[j] = 1.0
# for j,xx in enumerate(band2):
# if np.isclose(xx,99.,atol=0.01):
# band2[j] = band2err[j]
# band2err[j] = 1.0
input_data = np.vstack((input_data, band1-band2))
return input_data.T
def regularize_data(data):
"""Utility function to prepare data for sklearn"""
scaler = StandardScaler()
scaler.fit(data)
regularized_data = scaler.transform(data)
return regularized_data
class Train_SimpleNN(Informer):
"""
Subclass to train a simple point estimate Neural Net photoz
rather than actually predict PDF, for now just predict point zb
and then put an error of width*(1+zb). We'll do a "real" NN
photo-z later.
"""
name = 'Train_SimpleNN'
config_options = Informer.config_options.copy()
config_options.update(zmin=Param(float, 0.0, msg="The minimum redshift of the z grid"),
zmax=Param(float, 3.0, msg="The maximum redshift of the z grid"),
nzbins=Param(int, 301, msg="The number of gridpoints in the z grid"),
width=Param(float, 0.05, msg="The ad hoc base width of the PDFs"),
bands=Param(str, 'ugrizy', msg="bands to use in estimation"),
max_iter=Param(int, 500,
msg="max number of iterations while "
"training the neural net. Too low a value will cause an "
"error to be printed (though the code will still work, just"
"not optimally)"))
def __init__(self, args, comm=None):
""" Constructor:
Do Informer specific initialization """
Informer.__init__(self, args, comm=comm)
if not all(c in string.ascii_letters for c in self.config.bands):
raise ValueError("'bands' option should be letters only (no spaces or commas etc)")
def run(self):
"""Train the NN model
"""
if self.config.hdf5_groupname:
training_data = self.get_data('input')[self.config.hdf5_groupname]
else: #pragma: no cover
training_data = self.get_data('input')
speczs = training_data['redshift']
print("stacking some data...")
color_data = make_color_data(training_data, self.config.bands)
input_data = regularize_data(color_data)
simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12),
activation='tanh', solver='lbfgs',
max_iter=self.config.max_iter)
simplenn.fit(input_data, speczs)
self.model = simplenn
self.add_data('model', self.model)
class SimpleNN(Estimator):
"""
Subclass to implement a simple point estimate Neural Net photoz
rather than actually predict PDF, for now just predict point zb
and then put an error of width*(1+zb). We'll do a "real" NN
photo-z later.
"""
name = 'SimpleNN'
config_options = Estimator.config_options.copy()
config_options.update(width=Param(float, 0.05, msg="The ad hoc base width of the PDFs"),
bands=Param(str, 'ugrizy', msg="bands to use in estimation"))
def __init__(self, args, comm=None):
""" Constructor:
Do Estimator specific initialization """
Estimator.__init__(self, args, comm=comm)
if not all(c in string.ascii_letters for c in self.config.bands):
raise ValueError("'bands' option should be letters only (no spaces or commas etc)")
def run(self):
if self.config.hdf5_groupname:
test_data = self.get_data('input')[self.config.hdf5_groupname]
else: #pragma: no cover
test_data = self.get_data('input')
color_data = make_color_data(test_data, self.config.bands)
input_data = regularize_data(color_data)
zmode = np.round(self.model.predict(input_data), 3)
widths = self.config.width * (1.0+zmode)
qp_dstn = qp.Ensemble(qp.stats.norm, data=dict(loc=np.expand_dims(zmode, -1), #pylint: disable=no-member
scale=np.expand_dims(widths, -1)))
qp_dstn.set_ancil(dict(zmode=zmode))
self.add_data('output', qp_dstn)
| [
"sklearn.neural_network.MLPRegressor",
"ceci.config.StageParameter",
"sklearn.preprocessing.StandardScaler",
"rail.estimation.estimator.Estimator.config_options.copy",
"numpy.vstack",
"numpy.expand_dims",
"rail.estimation.estimator.Informer.__init__",
"rail.estimation.estimator.Estimator.__init__",
"rail.estimation.estimator.Informer.config_options.copy"
] | [((1564, 1580), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1578, 1580), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1992, 2022), 'rail.estimation.estimator.Informer.config_options.copy', 'Informer.config_options.copy', ([], {}), '()\n', (2020, 2022), False, 'from rail.estimation.estimator import Estimator, Informer\n'), ((4307, 4338), 'rail.estimation.estimator.Estimator.config_options.copy', 'Estimator.config_options.copy', ([], {}), '()\n', (4336, 4338), False, 'from rail.estimation.estimator import Estimator, Informer\n'), ((1406, 1444), 'numpy.vstack', 'np.vstack', (['(input_data, band1 - band2)'], {}), '((input_data, band1 - band2))\n', (1415, 1444), True, 'import numpy as np\n'), ((3001, 3041), 'rail.estimation.estimator.Informer.__init__', 'Informer.__init__', (['self', 'args'], {'comm': 'comm'}), '(self, args, comm=comm)\n', (3018, 3041), False, 'from rail.estimation.estimator import Estimator, Informer\n'), ((3698, 3815), 'sklearn.neural_network.MLPRegressor', 'sknn.MLPRegressor', ([], {'hidden_layer_sizes': '(12, 12)', 'activation': '"""tanh"""', 'solver': '"""lbfgs"""', 'max_iter': 'self.config.max_iter'}), "(hidden_layer_sizes=(12, 12), activation='tanh', solver=\n 'lbfgs', max_iter=self.config.max_iter)\n", (3715, 3815), True, 'import sklearn.neural_network as sknn\n'), ((4644, 4685), 'rail.estimation.estimator.Estimator.__init__', 'Estimator.__init__', (['self', 'args'], {'comm': 'comm'}), '(self, args, comm=comm)\n', (4662, 4685), False, 'from rail.estimation.estimator import Estimator, Informer\n'), ((2054, 2113), 'ceci.config.StageParameter', 'Param', (['float', '(0.0)'], {'msg': '"""The minimum redshift of the z grid"""'}), "(float, 0.0, msg='The minimum redshift of the z grid')\n", (2059, 2113), True, 'from ceci.config import StageParameter as Param\n'), ((2146, 2205), 'ceci.config.StageParameter', 'Param', (['float', '(3.0)'], {'msg': '"""The maximum redshift of the z grid"""'}), "(float, 3.0, msg='The maximum redshift of the z grid')\n", (2151, 2205), True, 'from ceci.config import StageParameter as Param\n'), ((2240, 2301), 'ceci.config.StageParameter', 'Param', (['int', '(301)'], {'msg': '"""The number of gridpoints in the z grid"""'}), "(int, 301, msg='The number of gridpoints in the z grid')\n", (2245, 2301), True, 'from ceci.config import StageParameter as Param\n'), ((2335, 2394), 'ceci.config.StageParameter', 'Param', (['float', '(0.05)'], {'msg': '"""The ad hoc base width of the PDFs"""'}), "(float, 0.05, msg='The ad hoc base width of the PDFs')\n", (2340, 2394), True, 'from ceci.config import StageParameter as Param\n'), ((2428, 2482), 'ceci.config.StageParameter', 'Param', (['str', '"""ugrizy"""'], {'msg': '"""bands to use in estimation"""'}), "(str, 'ugrizy', msg='bands to use in estimation')\n", (2433, 2482), True, 'from ceci.config import StageParameter as Param\n'), ((2519, 2711), 'ceci.config.StageParameter', 'Param', (['int', '(500)'], {'msg': '"""max number of iterations while training the neural net. Too low a value will cause an error to be printed (though the code will still work, justnot optimally)"""'}), "(int, 500, msg=\n 'max number of iterations while training the neural net. Too low a value will cause an error to be printed (though the code will still work, justnot optimally)'\n )\n", (2524, 2711), True, 'from ceci.config import StageParameter as Param\n'), ((4371, 4430), 'ceci.config.StageParameter', 'Param', (['float', '(0.05)'], {'msg': '"""The ad hoc base width of the PDFs"""'}), "(float, 0.05, msg='The ad hoc base width of the PDFs')\n", (4376, 4430), True, 'from ceci.config import StageParameter as Param\n'), ((4464, 4518), 'ceci.config.StageParameter', 'Param', (['str', '"""ugrizy"""'], {'msg': '"""bands to use in estimation"""'}), "(str, 'ugrizy', msg='bands to use in estimation')\n", (4469, 4518), True, 'from ceci.config import StageParameter as Param\n'), ((5355, 5380), 'numpy.expand_dims', 'np.expand_dims', (['zmode', '(-1)'], {}), '(zmode, -1)\n', (5369, 5380), True, 'import numpy as np\n'), ((5470, 5496), 'numpy.expand_dims', 'np.expand_dims', (['widths', '(-1)'], {}), '(widths, -1)\n', (5484, 5496), True, 'import numpy as np\n')] |
""" result of analyzers work - data from ast that needed to asserter """
import ast
import _ast
from copy import deepcopy
from typing import Any, Text, Dict, Union, List, Tuple
from pprint import pprint
from collections import defaultdict, OrderedDict
from laziest import ast_meta as meta
import jedi
from jedi.api.completion import get_signature_param_names
pytest_needed = False
jedi_param_type_line = 'param {param_name}: '
class StrategyAny:
pass
class Analyzer(ast.NodeVisitor):
""" class to parse files in dict structure to provide to generator data,
that needed for tests generation """
def __init__(self, source: Text, debug: bool):
"""
source - code massive
:param debug:
:param source:
"""
self.debug = debug
self.tree = {"import": [],
"from": [],
"def": {},
"raises": [],
"classes": [],
"async": {}}
# list of source lines
self.source = source.split("\n")
self.func_data = {}
self.variables = []
self.variables_names = []
def visit_Import(self, node: ast.Import):
for alias in node.names:
self.tree["import"].append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node: _ast.ImportFrom):
for alias in node.names:
self.tree["from"].append(alias.name)
self.generic_visit(node)
@staticmethod
def get_operand_value(value: Any):
""" arg can be single like int, str and etc or it can be slice like dict, list, tuple and etc.
sample with slice {'arg': {'args': 'arg1'}, 'slice': 3}
"""
if isinstance(value, dict):
if 'slice' in value:
value = f'{value["arg"]["args"]}[\'{value["slice"]}\']' if isinstance(
value['slice'], str) else f'{value["arg"]["args"]}[{value["slice"]}]'
elif 'args' in value:
value = value['args']
return value
def process_if_construction(self, statement: _ast.If, func_data: Dict,
variables_names: Dict, variables: List,
previous_statements: List = None):
if previous_statements is None:
previous_statements = []
# we get variables from statement
# we work with args from statements
args = {}
_conditions_op = None
variables_names = deepcopy(variables_names)
variables = deepcopy(variables)
if_variables, if_variables_names = self.extract_variables_in_scope(statement, variables, variables_names)
values = self.get_value(statement.test, if_variables_names, if_variables)
if len(statement.body) > 1:
for body_node in statement.body[:-1]:
self.process_body_node(body_node, variables_names, variables, in_if=True)
result = self.get_value(statement.body[-1], if_variables_names, if_variables)
if 'print' in result:
result = result['print']['text'].strip()
return_pack = {'args': args, 'result': result}
if '\'' in result:
# TODO: this is a hack, need to normalize it
return_pack['log'] = True
if 'comparators' not in values:
# or/and between conditions
_conditions_op = self.get_value(values['op'])
_values = []
for value in values['values']:
if 'left' not in value:
_type = self.func_data['args'][value['args']]['type']
value = {'left': value, 'comparators': meta.empty_types[_type], 'ops': '!='}
_values.append(value)
values = _values
if not isinstance(values, list):
values = [values]
if _conditions_op == 'or':
for value in values:
previous_statements.append([value])
func_data['return'].append(return_pack)
else:
previous_statements.append(values)
func_data['return'].append(return_pack)
for orelse in statement.orelse:
if isinstance(orelse, _ast.If):
func_data = self.process_if_construction(
orelse, func_data, variables_names, variables, previous_statements)
elif isinstance(orelse, ast.Return):
func_data['return'].append(self.get_value(orelse))
func_data['ifs'] = previous_statements
return func_data
def separate_assing_nodes(self, node: _ast.Assign, variables_names, variables):
if isinstance(node.targets[0], _ast.Tuple):
tuple_key = meta.values_for_ast_type[type(node.targets[0])]
for num, target_node in enumerate(getattr(node.targets[0], tuple_key)):
if isinstance(node.value, _ast.Tuple):
inner_value = getattr(node.value, tuple_key)[num]
else:
node_value = self.get_value(node.value, variables_names, variables)
if isinstance(node_value, dict):
inner_value = _ast.Subscript(value=node.value, slice=_ast.Index(value=_ast.Num(n=num)))
else:
inner_value = node_value
var = _ast.Assign(targets=[target_node], value=inner_value)
yield var
elif isinstance(node.value, _ast.List):
for target in node.targets:
for elem in node.value.elts:
# for items
var = _ast.Assign(targets=[target], value=elem)
yield var
elif isinstance(node.value, _ast.Call):
value = self.get_value(node.value, variables_names, variables)
for target in node.targets:
# for items in call result
var = _ast.Assign(targets=[target], value=value)
yield var
def extract_variables_in_scope(self,
node: Union[ast.FunctionDef, _ast.If, _ast.For],
variables: List = None,
variables_names: Dict = None,
main_run: bool = False):
"""
method to extract variables and variables names, that used in scope
:param node:
:param variables:
:param variables_names:
:param main_run:
:return:
"""
# local variables, assign statements in function body
# variables 'id' -> Name node, 'value' -> Node
if not variables:
variables = []
variables_names = {}
for node in node.body:
if getattr(node, 'body', None) and not isinstance(node, _ast.For):
_variables, _ = self.extract_variables_in_scope(node)
variables += _variables
if isinstance(node, _ast.Assign):
if getattr(node.targets[0], 'id', None) and node.targets[0].id not in self.func_data['args']:
# for single assignments var2 = 1
variables.append(node)
elif isinstance(node.targets[0], _ast.Tuple):
# in one Assign node can be several targets and values, like var1, var2 = 1, 2 (multi assign)
for var in self.separate_assing_nodes(node, variables_names, variables):
if var.targets[0].id not in self.func_data['args']:
variables.append(var)
elif isinstance(node, _ast.If):
_variables, _ = self.extract_variables_in_scope(node, variables, variables_names)
variables += _variables
variables_names = {}
if variables:
# define code variables in dict
for index, var in enumerate(variables):
var_names = {name_node.id: index for name_node in var.targets}
variables_names.update(var_names)
if main_run:
self.variables = variables
self.variables_names = deepcopy(variables_names)
self.func_data['variables'] = self.variables
return variables, variables_names
def check_arg_in_assing_node(self, node: _ast.Assign) -> Tuple[bool, bool]:
# need find if arg used in some variable (in right sight) or maybe modified with same name - to find steps
if getattr(node, 'target', None):
if node.target.id in self.func_data['args'] or node.target.id in self.func_data[
'steps_dependencies']:
return True, False
elif getattr(node, 'targets', None):
if getattr(node.targets[0], 'id', None):
if node.targets[0].id in self.func_data['args']:
# operations like arg_1 *= 10
# if at the left function arg name
return True, False
elif node.targets[0].id in self.func_data[
'steps_dependencies']:
return True, False
if getattr(node, 'value', None):
if (getattr(node.value, 'id', None) and node.value.id in self.func_data['args']) or (
getattr(node.value, 'id', None) and node.value.id in self.func_data['steps_dependencies']):
return True, True
elif isinstance(node.value, _ast.Tuple):
for inner_node in node.value.elts:
if getattr(inner_node, 'id', None) and inner_node.id in self.func_data['args'] or \
getattr(inner_node, 'id', None) and inner_node.id in self.func_data['steps_dependencies']:
return True, True
elif isinstance(node.value, _ast.Call):
func = node.value.func
if not isinstance(func, _ast.Name):
if getattr(func.value, 'id', None) and func.value.id \
in self.func_data['args'] or func.value.id in self.func_data['steps_dependencies']:
return True, True
return False, False
def process_for_node(self, node, variables_names, variables):
for var in self.separate_assing_nodes(_ast.Assign(targets=[node.target], value=node.iter),
variables_names, variables):
variables.append(var)
_index = len(variables) - 1
variables_names[var.targets[0].id] = _index
for inner_node in node.body:
self.process_body_node(inner_node, variables_names, variables)
return variables
def process_body_node(self, node: Any, variables_names: Dict, variables: List, in_if: bool = False):
step, arg_in_value = self.check_arg_in_assing_node(node)
if isinstance(node, ast.Return):
if getattr(node.value, 'id', None) and node.value.id in self.func_data['steps_dependencies']:
return_ = {'result': {'var': node.value.id}}
else:
return_ = {'result': self.get_value(node.value, variables_names, variables)}
self.func_data['return'].append(return_)
elif isinstance(node, _ast.If):
self.func_data = self.process_if_construction(
node, self.func_data, variables_names, variables)
elif getattr(node, 'targets', None) and isinstance(node.targets[0], _ast.Tuple):
for inner_node in self.separate_assing_nodes(node, variables_names, variables):
step, arg_in_value = self.check_arg_in_assing_node(inner_node)
if step:
# if at the left function arg name
self.add_step_for_arg(inner_node, variables_names, variables, arg_in_value)
elif step:
# operations like arg_1 *= 10
self.add_step_for_arg(node, variables_names, variables, arg_in_value)
elif isinstance(node, _ast.Pass):
return
elif isinstance(node, _ast.For):
self.process_for_node(node, variables_names, variables)
elif isinstance(node, _ast.Expr):
print(node.value.__dict__)
return
else:
print(node)
print(node.__dict__)
if in_if:
return
raise
def function_data_base(self, node: Union[ast.FunctionDef, ast.AsyncFunctionDef], async_f: bool) -> Dict:
"""
define base for collection function_data
:param node:
:param async_f:
:return:
"""
self.func_data = {'args': self.get_function_args(node),
'kargs_def': node.args.kw_defaults,
'kargs': node.args.kwarg,
'return': [],
'async_f': async_f,
'keys': defaultdict(dict),
'variables': [],
'steps': {},
'steps_dependencies': {},
'for': {}}
return self.func_data
def visit_FunctionDef(self, node: ast.FunctionDef, async_f: bool = False, class_: Dict = None):
""" main methods to """
try:
func_data = self.function_data_base(node, async_f)
variables, variables_names = self.extract_variables_in_scope(node, main_run=True)
non_variables_nodes_bodies = [node for node in node.body if node not in variables]
for body_item in non_variables_nodes_bodies:
self.process_body_node(body_item, variables_names, variables)
for result in func_data['return']:
result = result['result']
if isinstance(result, dict) and 'func' in result:
arg = result['args']
if isinstance(arg, dict) and 'args' in arg:
arg = arg['args']
if not isinstance(arg, dict) and arg in self.func_data['args']:
# mean in function we use upper function argument
self.identify_type_by_attr(arg, result['func'], variables, variables_names)
func_data = self.form_strategies(func_data)
if not func_data['return']:
# if function does not return anything
func_data['return'] = [{'args': (), 'result': None}]
for arg, value in func_data['args'].items():
if value.get('type') is None:
# set default type
func_data['args'][arg]['type'] = int
if not class_:
self.tree['def'][node.name] = func_data
except Exception as e:
print(self.debug)
if self.debug:
func_data = {'error': e.__class__.__name__, 'comment': e}
else:
raise e
return func_data
def visit_If(self, node: ast.If):
raise Exception(node.__dict__)
def visit_Raise(self, node: ast.Name) -> None:
self.tree['raises'].append(node.exc.__dict__)
def add_step_for_arg(self, node, variables_names: Dict, variables: List, in_value: bool = False):
arg_name = None
if getattr(node, 'target', None):
var_name = node.target.id
elif getattr(node, 'targets', None):
var_name = node.targets[0].id
if in_value:
if getattr(node.value, 'id', None):
if node.value.id in self.func_data['args']:
arg_name = node.value.id
elif isinstance(node.value, _ast.Tuple):
for inner_node in node.value.elts:
if inner_node.id in self.func_data['args']:
arg_name = inner_node.id
elif isinstance(node.value, _ast.Call):
func = node.value.func
if getattr(func.value, 'id', None):
if func.value.id in self.func_data['args']:
arg_name = func.value.id
if not arg_name:
if var_name in self.func_data['steps_dependencies']:
arg_name = self.func_data['steps_dependencies'][var_name]
if var_name not in self.func_data['args']:
# TODO: need to add work around when as var used different function arg
self.func_data['steps_dependencies'][var_name] = arg_name
else:
arg_name = var_name
if not arg_name:
raise
if arg_name not in self.func_data['steps']:
self.func_data['steps'][arg_name] = []
step = self.get_value(node, variables_names, variables)
if isinstance(step, dict) and 'func' in step:
self.set_type_by_attrib(arg_name, step['func']['attr'])
self.func_data['steps'][arg_name].append({'var_name': var_name, 'step': step})
def set_slices_to_func_args(self, arg: Text, _slice: Union[Text, int]):
self.func_data['args'][arg]['type'] = dict if isinstance(_slice, str) else list
self.func_data['keys'][_slice][arg] = {'type': None}
def set_type_to_func_args(self, arg: Union[Text, Dict], _type: Any):
optional = 'Optional['
print(arg)
print(type(arg))
if isinstance(_type, str) and optional in _type:
_type = _type.split(optional)[1].split(']')[0]
if isinstance(arg, dict) and arg.get('arg'):
arg_name = arg.get('arg')
if isinstance(arg_name, dict) and 'args' in arg_name:
arg_name = arg_name['args']
if 'slice' in arg:
self.func_data['keys'][arg['slice']][arg_name]['type'] = _type
elif isinstance(arg, dict) and arg.get('args'):
arg_name = arg['args']
if 'slice' in arg:
self.func_data['keys'][arg['slice']][arg_name]['type'] = _type
elif arg in self.func_data['args']:
self.func_data['args'][arg]['type'] = _type
return _type
def process_ast_name(self, node: _ast.Name, variables_names: Dict, variables: List):
"""
find value of 'Name' node
:param node:
:param variables_names:
:param variables:
:return:
"""
alias = node.id
if alias in variables_names:
# check in variables
variable = variables[variables_names[alias]]
return self.get_value(variable, variables_names, variables)
elif alias in self.func_data.get('args', {}):
# check in function arguments
return {'args': node.id}
elif alias in self.tree['import']:
# check in imports
return {'value': node.id, 't': 'import'}
elif alias in globals()['__builtins__']:
# built_in name
return {'builtin': alias}
else:
print(node.__dict__)
print(node.id)
print(node)
# TODO: this is hack, like almost of the code )) but it's need to be refactored
return {'exec': True}
@staticmethod
def extract_args_in_bin_op(item: Union[Dict, Any], args: List):
if isinstance(item, dict) and 'arg' in item:
if 'args' in item['arg']:
# mean this is a function arg, need to set type
if 'slice' in item:
args.append({'arg': item["arg"]["args"], 'slice': item["slice"]})
else:
args.append(item['arg']['args'])
elif isinstance(item, dict) and 'args' in item:
if item['args']:
args.append(item['args'])
else:
if not isinstance(item, dict):
args.append(item)
elif item:
# if for some reason come dict with args
raise
return args
def bin_op_value_extract(self, node, variables_names, variables):
bin_op_left = self.get_value(node.left, variables_names, variables)
bin_op_right = self.get_value(node.right, variables_names, variables)
args = []
sides = [bin_op_left, bin_op_right]
_simple = [int, float]
if type(bin_op_left) in _simple and type(bin_op_right) in _simple:
# count result of bin op
return eval(f'{bin_op_left}{meta.operators[node.op.__class__]}{bin_op_right}')
math_type = True
if (isinstance(node.left, _ast.Str) or isinstance(node.right, _ast.Str)) \
and isinstance(node.op, _ast.Add):
# concatination
math_type = False
if (isinstance(bin_op_left, dict) and 'BinOp' not in bin_op_left) \
and (isinstance(bin_op_right, dict) and 'BinOp' not in bin_op_right) or (
not (isinstance(bin_op_left, dict) or not (isinstance(bin_op_right, dict)))):
for item in [bin_op_right, bin_op_left]:
args = self.extract_args_in_bin_op(item, args)
if args:
for arg in args:
if math_type:
_type = None
# TODO: need to refactor all this logic about assigne types by operations
if (isinstance(arg, dict) and 'slice' not in arg) or not isinstance(arg, dict):
if isinstance(bin_op_left, dict) and 'slice' in bin_op_left:
_type = self.func_data['keys'][bin_op_left['slice']][
bin_op_left['arg']['args']]['type']
elif isinstance(bin_op_right, dict) and 'slice' in bin_op_right:
_type = self.func_data['keys'][bin_op_right['slice']][
bin_op_right['arg']['args']]['type']
if _type:
self.set_type_to_func_args(arg, _type)
if not _type:
if (isinstance(node.op, _ast.Mult) or isinstance(node.op, _ast.Add)) and \
isinstance(bin_op_left, str) or isinstance(bin_op_right, str):
# if at least one operand is string - we can multiply only with int
self.set_type_to_func_args(arg, int)
else:
# mean both of them - function args
if isinstance(arg, dict):
self.set_type_to_func_args(arg, float)
elif isinstance(arg, list):
print(f'Strange things {arg}')
elif not self.func_data.get('args', {}).get(arg, {}).get('type'):
self.set_type_to_func_args(arg, float)
else:
self.set_type_to_func_args(arg, str)
for side in sides:
# TODO: need refactor all this logic with setting type by binop
opposite_side = [x for x in sides if x != side]
if isinstance(side, dict) and ('args' in side or 'arg' in side) \
and opposite_side and 'func' not in side:
if side.get('args', None):
_side_args = side.get('args', None)
elif side.get('arg'):
# with slice
_side_args = side
opposite_side = opposite_side[0]
if isinstance(_side_args, list):
# TODO: maybe for arg in args?
_side_args = _side_args[0]
if isinstance(_side_args, dict) and 'slice' in _side_args:
if 'args' not in _side_args['arg']:
# mean we have simple dict
continue
_arg_name = _side_args['arg']['args']
elif isinstance(_side_args, dict) and 'func' in _side_args:
# need check by func
continue
else:
_arg_name = _side_args
if 'slice' not in _side_args:
_type_check = bool(self.func_data['args'][_arg_name]['type'])
else:
_type_check = bool(self.func_data['keys'][_side_args['slice']][_arg_name]['type'])
if isinstance(side, dict) and _side_args and not _type_check:
if isinstance(opposite_side, dict):
if 'args' in opposite_side:
self.set_type_to_func_args(_side_args,
self.func_data['args'][opposite_side['args']]['type'])
elif 'BinOp' in opposite_side:
if 'args' in opposite_side['left']:
self.set_type_to_func_args(
_side_args, self.func_data['args'][opposite_side['left']['args']]['type'])
_type = True
return {'BinOp': True, 'left': bin_op_left, 'op': node.op, 'right': bin_op_right}
def func_value_extract(self, node, variables_names, variables):
if 'id' in node.func.__dict__:
if node.func.id == 'print':
return ", ".join([self.get_value(x)['text'] for x in node.args])
if node.keywords:
args = [str("{}={},".format(
x.arg, self.get_value(x.value, variables_names, variables))) for x in node.keywords]
return {'func': node.func.id, 'args': "".join(args)}
else:
args = [self.get_value(x, variables_names, variables)
for x in node.args]
if 'args' in args[0]:
return {'func': node.func.id, 'args': args}
else:
return eval("{}({})".format(node.func.id, ", ".join(args)))
else:
if node.args:
arg = self.get_value(node.args[0], variables_names, variables)
else:
arg = {}
func = self.get_value(node.func, variables_names, variables)
result = {'func': func, 'args': arg}
return result
def get_value(self, node: Any, variables_names: Dict = None, variables: List = None) -> Any:
"""
extract values from different types of node
:param node:
:param variables_names:
:param variables:
:return:
"""
node_type = node.__class__
if not variables:
variables = self.variables or []
if not variables_names:
variables_names = self.variables_names or {}
if node_type in meta.simple:
return node.__dict__[meta.values_for_ast_type[node_type]]
elif node_type in meta.iterated:
result = meta.iterated[node_type]([self.get_value(x, variables_names, variables)
for x in node.__dict__[meta.values_for_ast_type[node_type]]])
return result
elif isinstance(node, _ast.Name):
return self.process_ast_name(node, variables_names, variables)
elif isinstance(node, _ast.Assign):
if isinstance(node.targets[0], _ast.Tuple):
return self.separate_assing_nodes(node, variables_names, variables)
return self.get_value(node.value, variables_names, variables)
elif isinstance(node, _ast.Dict):
return {self.get_value(key, variables_names, variables): self.get_value(
node.values[num], variables_names, variables)
for num, key in enumerate(node.keys)}
elif isinstance(node, _ast.Raise):
return {'error': node.exc.func.id, 'comment': self.get_value(node.exc.args[0])}
elif isinstance(node, ast.BinOp):
return self.bin_op_value_extract(node, variables_names, variables)
elif isinstance(node, _ast.Subscript):
arg = self.get_value(node.value, variables_names, variables)
_slice = self.get_value(node.slice, variables_names, variables)
if 'args' in arg and 'func' not in arg:
self.set_slices_to_func_args(arg['args'], _slice)
return {'arg': arg, 'slice': _slice}
elif isinstance(node, _ast.Index):
return self.get_value(node.value, variables_names, variables)
elif isinstance(node, dict):
return node
elif 'func' in node.__dict__:
return self.func_value_extract(node, variables_names, variables)
elif isinstance(node, _ast.Compare):
result = {'left': self.get_value(node.left, variables_names, variables),
'ops': self.get_value(node.ops[0], variables_names, variables),
'comparators': self.get_value(node.comparators[0], variables_names, variables)}
if 'args' in result['left']:
# TODO: need to change this, because one arg can be several diff types for diff result
print(result['left']['args'])
print(node.__dict__)
self.set_type_to_func_args(result['left']['args'],
type(result['comparators']))
return result
elif type(node) in meta.operators:
return meta.operators[type(node)]
elif isinstance(node, _ast.Expr):
return self.get_value(node.value)
elif isinstance(node, _ast.Call):
return {node.func.id: [self.get_value(arg) for arg in node.args][0]}
elif isinstance(node, _ast.JoinedStr):
# TODO: need to make normal process
result = {'text': self.source[node.lineno - 1][node.col_offset:-1]}
return result
elif isinstance(node, _ast.FormattedValue):
return self.get_value(node.value)
elif isinstance(node, _ast.UnaryOp):
_op_map = {
_ast.USub: '-',
_ast.UAdd: '+',
_ast.Invert: '~',
_ast.Not: '!'
}
return eval(f'{_op_map[node.op.__class__]}{self.get_value(node.operand)}')
elif isinstance(node, _ast.Attribute):
if getattr(node.value, 'id', None) and getattr(node.value, 'id', None) in self.func_data.get('args', {}):
# TODO: need to add with slice
self.set_type_by_attrib(node.value.id, node.attr)
value = self.get_value(node.value, variables_names, variables)
attr = self.get_attr_call_line(node)
return {'l_value': value, 'attr': attr}
elif isinstance(node, _ast.Return):
return {'result': self.get_value(node.value, variables_names, variables)}
elif isinstance(node, _ast.AugAssign):
# arg_1 *= 10 operations
arg = self.get_value(node.target, variables_names, variables)
# TODO: need to modify type set, can be str also
if not isinstance(arg['args'], dict):
if not self.func_data['args'][arg['args']].get('type') or (
isinstance(self.func_data['args'][arg['args']]['type'], dict)
and self.func_data['args'][arg['args']]['type'] is None):
self.set_type_to_func_args(arg['args'], int)
if 'args' in arg:
return {'arg': arg,
'op': f'{meta.operators[node.op.__class__]}',
'l_value': self.get_value(node.value, variables_names, variables)}
else:
raise
elif isinstance(node, _ast.NameConstant):
# True - False
return node.value
elif isinstance(node, _ast.In):
# True - False
return 'in'
elif isinstance(node, _ast.NotIn):
return 'not in'
elif isinstance(node, _ast.BoolOp):
return {'op': node.op, 'values': [self.get_value(
value, variables_names, variables) for value in node.values]}
else:
print("new type",
node,
node.__dict__)
raise
@staticmethod
def reverse_condition(statement: Dict) -> Dict:
not_statement = deepcopy(statement)
# change to opposite in pair != to == > to <= and etc
not_statement['ops'] = meta.ops_pairs[not_statement['ops']]
# mean that this is reversed from previous strategy,
# we don't need to reverse it in next strategies
not_statement['previous'] = True
return not_statement
def reverse_condition_group(self, statement_group: Union[Dict, List]) -> Dict:
if isinstance(statement_group, list):
not_statement_group = []
for statement in statement_group:
not_statement_group.append(self.reverse_condition(statement))
return not_statement_group
else:
return self.reverse_condition(statement_group)
def get_reversed_previous_statement(self, previous_statement: List) -> List:
""" iterate other conditions in strategy and reverse
them if they are was not not reversed previous """
not_previous_statement = []
for statement in previous_statement:
if 'previous' not in statement:
not_previous_statement.append(self.reverse_condition_group(statement))
else:
not_previous_statement.append(statement)
return not_previous_statement
def form_strategies(self, func_data: Dict) -> Dict:
# TODO: need to add strategies for depend_on args - when arg match to expected value and when not
s = []
if not func_data.get('ifs'):
s.append(StrategyAny())
else:
for num, condition in enumerate(func_data['ifs']):
# for every next if after if with 0 number we add rule not previous rule
if num != 0:
previous_statement = func_data['ifs'][num - 1]
condition += self.get_reversed_previous_statement(previous_statement)
s.append(condition)
# now add last strategy, that exclude all previous strategies
s.append(self.get_reversed_previous_statement(s[-1]))
new_rules = []
funcs_checked = []
for strategy_block in s:
if not isinstance(strategy_block, StrategyAny):
# TODO: need to refactor and move out
for strategy in strategy_block:
for side in ['comparators', 'left']:
if isinstance(strategy[side], dict) and 'func' in strategy[
side] and strategy[side]['func'] not in funcs_checked:
if 'args' in strategy[side] and isinstance(strategy[side]['args'], str):
arg = strategy[side]['func']['l_value'].get('args', None)
funcs_checked.append(strategy[side]['func'])
if arg:
rule = {'comparators': strategy[side]['args'],
'left': {'args': arg},
'ops': 'in'}
opposite_rule = {'comparators': strategy[side]['args'],
'left': {'args': arg},
'ops': 'not in'}
new_rules.append(rule)
new_rules.append(opposite_rule)
_new_s = []
for rule in new_rules:
for strategy_block in s:
new_block = [x for x in strategy_block]
new_block.append(rule)
_new_s.append(new_block)
if _new_s:
s = _new_s
func_data['s'] = s
if len(func_data['return']) < len(func_data['s']):
if _new_s:
result = deepcopy(func_data['return'][-1])
else:
result = None
for _ in range(len(func_data['s']) - len(func_data['return'])):
func_data['return'].append({'result': result})
return func_data
def get_attr_call_line(self, node: _ast.Attribute) -> Text:
line = self.source[node.lineno-1][node.col_offset:]
_call = ''
# line after attr
split_by_attr = line.split(node.attr)[1]
if split_by_attr.startswith('.'):
_call = split_by_attr.split(',')[0]
attr_call = node.attr + _call
return attr_call
def identify_type_by_attr(self, inner_function_arg: Union[Dict, Any],
func: Dict, variables: List, variables_names: Dict) -> None:
# arg - l_value for attrib in function
# TODO: add check for args in variables, split method
arg = func['l_value']['args']
arg_type = None
if arg in self.func_data['args']:
func_arg_type = self.func_data['args'][arg].get('type', None)
if func_arg_type:
if isinstance(func_arg_type, dict):
if not func_arg_type:
arg_type = func_arg_type
else:
arg_type = func_arg_type
attrib = func["attr"]
if not arg_type:
arg_type = self.set_type_by_attrib(arg, attrib=attrib)
init_arg_line = f'{arg} = {arg_type.__name__}(); '
# get method complition
line = f'{init_arg_line}{arg}.' + attrib
line = line[:-1]
script = jedi.Script(line)
completions = script.complete(1, len(line))
# get params names
first_param = [x for x in get_signature_param_names([completions[0]])][0]
line = line.split('.')[0] + '.' + completions[0].name + '(' + str(first_param.get_public_name())
line = line[:-1]
script = jedi.Script(line)
# get paramas details - types
completions = script.complete(1, len(line))
split_line = jedi_param_type_line.format(param_name=first_param.get_public_name()[:-1])
split_description = completions[0].description.split(split_line)
complete_type = split_description[1]
self.set_type_to_func_args(inner_function_arg, complete_type)
self.set_dependency_to_arg(inner_function_arg, arg)
def set_dependency_to_arg(self, arg: Text, dependency_arg: Text):
"""
:param arg:
:param dependency_arg:
:return:
"""
# TODO: dependency can be different types - must be a part of (for strings), must include, must be a index of
# and etc.
self.func_data['args'][arg]['depend_on'] = dependency_arg
def set_type_by_attrib(self, arg_name: Union[Text, Dict], attrib: Text, _slice: Union[Text, int] = None):
for _type in meta.stnd_types:
if getattr(_type, attrib, None):
return self.set_type_to_func_args(arg_name, _type)
def get_function_args(self, body_item: _ast.Name):
args = OrderedDict()
for arg in body_item.args.args:
if arg.annotation:
if 'value' in arg.annotation.__dict__:
type_arg = arg.annotation.value.id
else:
type_arg = arg.annotation.id
else:
type_arg = self.extract_types_from_docstring(body_item)
args[arg.arg] = {'type': type_arg}
return args
def visit_ClassDef(self, node: ast.ClassDef) -> None:
class_dict = {'name': node.name,
'def': defaultdict(dict),
'async': defaultdict(dict),
'args': []}
for body_item in node.body:
if isinstance(body_item, ast.Assign):
var = [x.id for x in body_item.targets][0] if len(
body_item.targets) == 1 else [x.id for x in body_item.targets]
if isinstance(body_item.value, _ast.List):
value = [self.get_value(x) for x in body_item.value.elts]
else:
value = self.get_value(body_item.value)
class_dict['args'].append((var, value))
if not isinstance(body_item, ast.FunctionDef):
continue
args = self.get_function_args(body_item)
defaults = []
for item in body_item.args.defaults:
if isinstance(item, _ast.Str):
defaults.append(item.s)
elif isinstance(item, _ast.Num):
defaults.append(item.n)
else:
defaults.append(item.value)
if len(args) > len(defaults):
[defaults.insert(0, 'no_default')
for _ in range(len(args) - len(defaults))]
for num, arg in enumerate(args):
args[arg]['default'] = defaults[num]
funct_info = self.visit_FunctionDef(body_item, class_=True)
if funct_info.get('args'):
funct_info['doc'] = self.extract_types_from_docstring(body_item)
for decorator in body_item.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == 'staticmethod':
class_dict['def']['static'][body_item.name] = funct_info
break
elif isinstance(decorator, ast.Name) and decorator.id == 'classmethod':
class_dict['def']['class'][body_item.name] = funct_info
break
else:
class_dict['def']['self'][body_item.name] = funct_info
self.tree['classes'].append(class_dict)
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node, async_f=True)
@staticmethod
def extract_types_from_docstring(body_item: _ast.Name) -> dict:
""" try to get types form node
:param body_item:
"""
# TODO: need change this to jedi
doc = ast.get_docstring(body_item)
doc_types = None
if not doc or 'type' not in doc:
doc_types = None
else:
for arg in body_item.args.args:
print('type', arg.arg, doc.split(arg.arg))
return doc_types
def report(self) -> None:
pprint(self.tree)
| [
"collections.OrderedDict",
"_ast.Assign",
"jedi.Script",
"collections.defaultdict",
"jedi.api.completion.get_signature_param_names",
"copy.deepcopy",
"_ast.Num",
"ast.get_docstring",
"pprint.pprint"
] | [((2528, 2553), 'copy.deepcopy', 'deepcopy', (['variables_names'], {}), '(variables_names)\n', (2536, 2553), False, 'from copy import deepcopy\n'), ((2574, 2593), 'copy.deepcopy', 'deepcopy', (['variables'], {}), '(variables)\n', (2582, 2593), False, 'from copy import deepcopy\n'), ((32453, 32472), 'copy.deepcopy', 'deepcopy', (['statement'], {}), '(statement)\n', (32461, 32472), False, 'from copy import deepcopy\n'), ((37873, 37890), 'jedi.Script', 'jedi.Script', (['line'], {}), '(line)\n', (37884, 37890), False, 'import jedi\n'), ((38199, 38216), 'jedi.Script', 'jedi.Script', (['line'], {}), '(line)\n', (38210, 38216), False, 'import jedi\n'), ((39355, 39368), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (39366, 39368), False, 'from collections import defaultdict, OrderedDict\n'), ((42321, 42349), 'ast.get_docstring', 'ast.get_docstring', (['body_item'], {}), '(body_item)\n', (42338, 42349), False, 'import ast\n'), ((42627, 42644), 'pprint.pprint', 'pprint', (['self.tree'], {}), '(self.tree)\n', (42633, 42644), False, 'from pprint import pprint\n'), ((8168, 8193), 'copy.deepcopy', 'deepcopy', (['variables_names'], {}), '(variables_names)\n', (8176, 8193), False, 'from copy import deepcopy\n'), ((10321, 10372), '_ast.Assign', '_ast.Assign', ([], {'targets': '[node.target]', 'value': 'node.iter'}), '(targets=[node.target], value=node.iter)\n', (10332, 10372), False, 'import _ast\n'), ((12964, 12981), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (12975, 12981), False, 'from collections import defaultdict, OrderedDict\n'), ((39907, 39924), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (39918, 39924), False, 'from collections import defaultdict, OrderedDict\n'), ((39957, 39974), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (39968, 39974), False, 'from collections import defaultdict, OrderedDict\n'), ((5365, 5418), '_ast.Assign', '_ast.Assign', ([], {'targets': '[target_node]', 'value': 'inner_value'}), '(targets=[target_node], value=inner_value)\n', (5376, 5418), False, 'import _ast\n'), ((36258, 36291), 'copy.deepcopy', 'deepcopy', (["func_data['return'][-1]"], {}), "(func_data['return'][-1])\n", (36266, 36291), False, 'from copy import deepcopy\n'), ((38004, 38047), 'jedi.api.completion.get_signature_param_names', 'get_signature_param_names', (['[completions[0]]'], {}), '([completions[0]])\n', (38029, 38047), False, 'from jedi.api.completion import get_signature_param_names\n'), ((5636, 5677), '_ast.Assign', '_ast.Assign', ([], {'targets': '[target]', 'value': 'elem'}), '(targets=[target], value=elem)\n', (5647, 5677), False, 'import _ast\n'), ((5936, 5978), '_ast.Assign', '_ast.Assign', ([], {'targets': '[target]', 'value': 'value'}), '(targets=[target], value=value)\n', (5947, 5978), False, 'import _ast\n'), ((5250, 5265), '_ast.Num', '_ast.Num', ([], {'n': 'num'}), '(n=num)\n', (5258, 5265), False, 'import _ast\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 <NAME> <<EMAIL>>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from mom import _compat
from mom import builtins
from mom import codec
from mom.codec import _alt_base
from mom.codec import base58
from mom.codec import integer
from mom.security import random
from mom.tests import constants
__author__ = "<EMAIL> (<NAME>)"
b = builtins.b
ZERO_BYTE = _compat.ZERO_BYTE
RANDOM_BYTES = random.generate_random_bytes(384)
ZERO_BYTES_4 = ZERO_BYTE * 4
#raw_data = hex_decode(b("005cc87f4a3fdfe3a2346b6953267ca867282630d3f9b78e64"))
RAW_DATA = b("\x00\\\xc8\x7fJ?\xdf\xe3\xa24kiS&|\xa8g(&0\xd3\xf9\xb7\x8ed")
ENCODED = b("19TbMSWwHvnxAKy12iNm3KdbGfzfaMFViT")
ENCODED_WITH_WHITESPACE = b("""
19TbMSWwHvnxAKy12iN
m3KdbGfzfaMFViT
""")
PADDING_RAW = b("""\
\x00\x00\xa4\x97\xf2\x10\xfc\x9c]\x02\xfc}\xc7\xbd!\x1c\xb0\xc7M\xa0\xae\x16\
""")
class Test_base58_codec(unittest2.TestCase):
def test_ensure_charset_length(self):
self.assertEqual(len(base58.ASCII58_BYTES), 58)
self.assertEqual(len(base58.ALT58_BYTES), 58)
def test_codec_identity(self):
self.assertEqual(base58.b58decode(base58.b58encode(RANDOM_BYTES)), RANDOM_BYTES)
self.assertEqual(_alt_base.b58decode_naive(base58.b58encode(RANDOM_BYTES)), RANDOM_BYTES)
self.assertEqual(codec.base58_decode(codec.base58_encode(RANDOM_BYTES)),
RANDOM_BYTES)
def test_encodes_zero_prefixed_padding(self):
self.assertEqual(base58.b58decode(base58.b58encode(PADDING_RAW)), PADDING_RAW)
self.assertEqual(_alt_base.b58decode_naive(base58.b58encode(PADDING_RAW)), PADDING_RAW)
self.assertEqual(codec.base58_decode(codec.base58_encode(PADDING_RAW)), PADDING_RAW)
def test_zero_bytes(self):
self.assertEqual(base58.b58encode(ZERO_BYTES_4), b("1111"))
self.assertEqual(base58.b58decode(b("1111")), ZERO_BYTES_4)
self.assertEqual(base58.b58encode(ZERO_BYTE), b("1"))
self.assertEqual(base58.b58decode(b("1")), ZERO_BYTE)
self.assertEqual(_alt_base.b58encode_naive(ZERO_BYTES_4), b("1111"))
self.assertEqual(_alt_base.b58decode_naive(b("1111")), ZERO_BYTES_4)
self.assertEqual(_alt_base.b58encode_naive(ZERO_BYTE), b("1"))
self.assertEqual(_alt_base.b58decode_naive(b("1")), ZERO_BYTE)
self.assertEqual(codec.base58_encode(ZERO_BYTES_4), b("1111"))
self.assertEqual(codec.base58_decode(b("1111")), ZERO_BYTES_4)
self.assertEqual(codec.base58_encode(ZERO_BYTE), b("1"))
self.assertEqual(codec.base58_decode(b("1")), ZERO_BYTE)
# The bitcoin implementation is a broken one. Do not use.
# def test_bitcoin_implementation(self):
# hello_world = b("\x48\x65\x6c\x6c\x6f\x20\x77\x6f\x72\x6c\x64")
# encoded_hello_world = base58.b58encode(hello_world)
#
# self.assertEqual(bitcointools_base58.b58encode_bitcoin(raw_data), encoded)
# self.assertEqual(bitcointools_base58.b58decode_bitcoin(encoded), raw_data)
# self.assertEqual(encoded_hello_world, bitcointools_base58.b58encode_bitcoin(hello_world))
# self.assertEqual(bitcointools_base58.b58decode_bitcoin(encoded_hello_world), hello_world)
#
# def test_bitcoin_zero_encode(self):
# self.assertEqual(bitcointools_base58.b58encode_bitcoin(zero_bytes_4), b("1111"))
# self.assertEqual(bitcointools_base58.b58encode_bitcoin(ZERO_BYTE), b("1"))
#
# def test_bitcoin_zero_decode(self):
# self.assertEqual(bitcointools_base58.b58decode_bitcoin(b("1111")), zero_bytes_4)
# self.assertEqual(bitcointools_base58.b58decode_bitcoin(b("1")), ZERO_BYTE)
def test_encoding_and_decoding(self):
hello_world = b("\x48\x65\x6c\x6c\x6f\x20\x77\x6f\x72\x6c\x64")
encoded_hello_world = base58.b58encode(hello_world)
self.assertEqual(encoded_hello_world, _alt_base.b58encode_naive(hello_world))
self.assertEqual(base58.b58decode(encoded_hello_world), hello_world)
self.assertEqual(integer.bytes_to_uint(base58.b58decode(b("16Ho7Hs"))), 3471844090)
self.assertEqual(base58.b58encode(integer.uint_to_bytes(3471844090, 5)), b("16Ho7Hs"))
self.assertEqual(base58.b58encode(RAW_DATA), ENCODED)
self.assertEqual(base58.b58decode(ENCODED), RAW_DATA)
self.assertEqual(base58.b58decode(ENCODED_WITH_WHITESPACE), RAW_DATA)
self.assertEqual(_alt_base.b58decode_naive(ENCODED), RAW_DATA)
self.assertEqual(_alt_base.b58decode_naive(ENCODED_WITH_WHITESPACE), RAW_DATA)
self.assertEqual(codec.base58_encode(RAW_DATA), ENCODED)
self.assertEqual(codec.base58_decode(ENCODED), RAW_DATA)
self.assertEqual(codec.base58_decode(ENCODED_WITH_WHITESPACE), RAW_DATA)
def test_TypeError_when_bad_type(self):
self.assertRaises(TypeError, base58.b58encode, constants.UNICODE_STRING)
self.assertRaises(TypeError, _alt_base.b58encode_naive, constants.UNICODE_STRING)
self.assertRaises(TypeError, base58.b58decode, constants.UNICODE_STRING)
self.assertRaises(TypeError, _alt_base.b58decode_naive, constants.UNICODE_STRING)
| [
"mom.codec.base58.b58encode",
"mom.security.random.generate_random_bytes",
"mom.codec.base58_encode",
"mom.codec._alt_base.b58encode_naive",
"mom.codec.base58_decode",
"mom.codec._alt_base.b58decode_naive",
"mom.codec.base58.b58decode",
"mom.codec.integer.uint_to_bytes"
] | [((1061, 1094), 'mom.security.random.generate_random_bytes', 'random.generate_random_bytes', (['(384)'], {}), '(384)\n', (1089, 1094), False, 'from mom.security import random\n'), ((4355, 4384), 'mom.codec.base58.b58encode', 'base58.b58encode', (['hello_world'], {}), '(hello_world)\n', (4371, 4384), False, 'from mom.codec import base58\n'), ((2386, 2416), 'mom.codec.base58.b58encode', 'base58.b58encode', (['ZERO_BYTES_4'], {}), '(ZERO_BYTES_4)\n', (2402, 2416), False, 'from mom.codec import base58\n'), ((2514, 2541), 'mom.codec.base58.b58encode', 'base58.b58encode', (['ZERO_BYTE'], {}), '(ZERO_BYTE)\n', (2530, 2541), False, 'from mom.codec import base58\n'), ((2631, 2670), 'mom.codec._alt_base.b58encode_naive', '_alt_base.b58encode_naive', (['ZERO_BYTES_4'], {}), '(ZERO_BYTES_4)\n', (2656, 2670), False, 'from mom.codec import _alt_base\n'), ((2777, 2813), 'mom.codec._alt_base.b58encode_naive', '_alt_base.b58encode_naive', (['ZERO_BYTE'], {}), '(ZERO_BYTE)\n', (2802, 2813), False, 'from mom.codec import _alt_base\n'), ((2912, 2945), 'mom.codec.base58_encode', 'codec.base58_encode', (['ZERO_BYTES_4'], {}), '(ZERO_BYTES_4)\n', (2931, 2945), False, 'from mom import codec\n'), ((3046, 3076), 'mom.codec.base58_encode', 'codec.base58_encode', (['ZERO_BYTE'], {}), '(ZERO_BYTE)\n', (3065, 3076), False, 'from mom import codec\n'), ((4428, 4466), 'mom.codec._alt_base.b58encode_naive', '_alt_base.b58encode_naive', (['hello_world'], {}), '(hello_world)\n', (4453, 4466), False, 'from mom.codec import _alt_base\n'), ((4489, 4526), 'mom.codec.base58.b58decode', 'base58.b58decode', (['encoded_hello_world'], {}), '(encoded_hello_world)\n', (4505, 4526), False, 'from mom.codec import base58\n'), ((4743, 4769), 'mom.codec.base58.b58encode', 'base58.b58encode', (['RAW_DATA'], {}), '(RAW_DATA)\n', (4759, 4769), False, 'from mom.codec import base58\n'), ((4801, 4826), 'mom.codec.base58.b58decode', 'base58.b58decode', (['ENCODED'], {}), '(ENCODED)\n', (4817, 4826), False, 'from mom.codec import base58\n'), ((4859, 4900), 'mom.codec.base58.b58decode', 'base58.b58decode', (['ENCODED_WITH_WHITESPACE'], {}), '(ENCODED_WITH_WHITESPACE)\n', (4875, 4900), False, 'from mom.codec import base58\n'), ((4933, 4967), 'mom.codec._alt_base.b58decode_naive', '_alt_base.b58decode_naive', (['ENCODED'], {}), '(ENCODED)\n', (4958, 4967), False, 'from mom.codec import _alt_base\n'), ((5000, 5050), 'mom.codec._alt_base.b58decode_naive', '_alt_base.b58decode_naive', (['ENCODED_WITH_WHITESPACE'], {}), '(ENCODED_WITH_WHITESPACE)\n', (5025, 5050), False, 'from mom.codec import _alt_base\n'), ((5084, 5113), 'mom.codec.base58_encode', 'codec.base58_encode', (['RAW_DATA'], {}), '(RAW_DATA)\n', (5103, 5113), False, 'from mom import codec\n'), ((5145, 5173), 'mom.codec.base58_decode', 'codec.base58_decode', (['ENCODED'], {}), '(ENCODED)\n', (5164, 5173), False, 'from mom import codec\n'), ((5206, 5250), 'mom.codec.base58_decode', 'codec.base58_decode', (['ENCODED_WITH_WHITESPACE'], {}), '(ENCODED_WITH_WHITESPACE)\n', (5225, 5250), False, 'from mom import codec\n'), ((1769, 1799), 'mom.codec.base58.b58encode', 'base58.b58encode', (['RANDOM_BYTES'], {}), '(RANDOM_BYTES)\n', (1785, 1799), False, 'from mom.codec import base58\n'), ((1863, 1893), 'mom.codec.base58.b58encode', 'base58.b58encode', (['RANDOM_BYTES'], {}), '(RANDOM_BYTES)\n', (1879, 1893), False, 'from mom.codec import base58\n'), ((1951, 1984), 'mom.codec.base58_encode', 'codec.base58_encode', (['RANDOM_BYTES'], {}), '(RANDOM_BYTES)\n', (1970, 1984), False, 'from mom import codec\n'), ((2109, 2138), 'mom.codec.base58.b58encode', 'base58.b58encode', (['PADDING_RAW'], {}), '(PADDING_RAW)\n', (2125, 2138), False, 'from mom.codec import base58\n'), ((2201, 2230), 'mom.codec.base58.b58encode', 'base58.b58encode', (['PADDING_RAW'], {}), '(PADDING_RAW)\n', (2217, 2230), False, 'from mom.codec import base58\n'), ((2287, 2319), 'mom.codec.base58_encode', 'codec.base58_encode', (['PADDING_RAW'], {}), '(PADDING_RAW)\n', (2306, 2319), False, 'from mom import codec\n'), ((4668, 4704), 'mom.codec.integer.uint_to_bytes', 'integer.uint_to_bytes', (['(3471844090)', '(5)'], {}), '(3471844090, 5)\n', (4689, 4704), False, 'from mom.codec import integer\n')] |
import threading
from kottos.modbus.client import Client
from kottos.modbus.registers import MNS_REGISTER_TABLE
from kottos.modbus.server import data_store, serve
def test_read():
for k, v in data_store.items():
print("{}: {}".format(k, v))
# Create our server thread
t = threading.Thread(target=serve)
# Daemon threads automatically get cleaned up
t.setDaemon(True)
t.start()
c = Client("localhost", 54321, MNS_REGISTER_TABLE)
states = c.read(0)
for r in MNS_REGISTER_TABLE:
print(
"{} ({}): {}, {}".format(
r.label, r.register, states[r.label], data_store[r.register - 1]
)
)
assert data_store[r.register - 1] == states[r.label]
| [
"kottos.modbus.server.data_store.items",
"threading.Thread",
"kottos.modbus.client.Client"
] | [((199, 217), 'kottos.modbus.server.data_store.items', 'data_store.items', ([], {}), '()\n', (215, 217), False, 'from kottos.modbus.server import data_store, serve\n'), ((296, 326), 'threading.Thread', 'threading.Thread', ([], {'target': 'serve'}), '(target=serve)\n', (312, 326), False, 'import threading\n'), ((422, 468), 'kottos.modbus.client.Client', 'Client', (['"""localhost"""', '(54321)', 'MNS_REGISTER_TABLE'], {}), "('localhost', 54321, MNS_REGISTER_TABLE)\n", (428, 468), False, 'from kottos.modbus.client import Client\n')] |
#! /usr/bin/env python3.6
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
try:
html = urlopen("http://www.pythonscraping.com/pages/page3.html")
except HTTPError as e:
print(e)
bs_obj = BeautifulSoup(html.read(), "html.parser")
find_obj = bs_obj.findAll(lambda tag: len(tag.attrs) == 2)
print(type(find_obj))
print("")
for element in find_obj:
print(f"type(element) = {type(element)}")
print(f"element = {element}")
| [
"urllib.request.urlopen"
] | [((144, 201), 'urllib.request.urlopen', 'urlopen', (['"""http://www.pythonscraping.com/pages/page3.html"""'], {}), "('http://www.pythonscraping.com/pages/page3.html')\n", (151, 201), False, 'from urllib.request import urlopen\n')] |
from app import logger
from models import *
from goals import *
class CreateClassGoal(HomeGoal):
def __init__(self, context, name=None):
super().__init__(context)
self.klass = Class(name)
self.context.current = self.klass
self.todos = [GetClassPropertiesGoal(context, self.klass)]
self.setattr("name", name)
@property
def message(self):
return f"Created class {self.klass.name}!" if self.is_complete else self.todos[-1].message
def complete(self):
self.context.add_class(self.klass)
self.context.transition("complete")
return super().complete()
def setattr(self, attr, value):
if (attr == "name"):
if value is None:
self.todos.append(GetInputGoal(self.context, self, "name", "What do you want to call the class?"))
elif value in self.context.classes:
self.todos.append(GetInputGoal(self.context, self, attr, f"The name, {value}, has already been used. Try another name or say cancel."))
else:
self.klass.name = value
return
setattr(self, attr, value)
class GetClassPropertiesGoal(BaseGoal):
def __init__(self, context, klass):
super().__init__(context)
self.klass = klass
self.done = False
@property
def is_complete(self):
return self.done and super().is_complete
@property
def message(self):
if self.is_complete:
return f"{self.__class__.__name__} completed!"
if len(self.todos) == 0:
return "Any other properties?" if len(self.klass.properties) > 0 else "What properties does it have?"
else:
return self.todos[-1].message
def advance(self):
if self.todos:
super().advance()
return
logger.debug(f"Advancing {self.__class__.__name__}...")
if self.context.current_message == "no":
self.done = True
else:
self.todos.append(GetPropertyGoal(self.context, self.context.current_message, self.klass))
class GetPropertyGoal(BaseGoal):
def __init__(self, context, name, klass):
super().__init__(context)
self.klass = klass
self.setattr("name", name)
self.setattr("value", None)
def complete(self):
property = Property(self.klass, self.name, self.type) if self.type != "list" else ListProperty(self.klass, self.name)
self.klass.add_property(property)
return super().complete()
def setattr(self, attr, value):
if (attr == "name") and value in self.klass.properties:
self.todos.append(GetInputGoal(self.context, self, attr, f"The name, {value}, has already been used. Try another name."))
elif (attr == "value") and value is None:
self.todos.append(GetInputGoal(self.context, self, "type", "What is the property type?"))
setattr(self, attr, value)
class AddPropertyGoal(BaseGoal):
def __init__(self, context, klass=None, name=None, type=None):
super().__init__(context)
self.setattr("type", type)
self.setattr("name", name)
self.setattr("klass", klass)
@property
def message(self):
return f"Property {self.name} added to class {self.klass.name}! Anything else?" if self.is_complete else self.todos[-1].message
def complete(self):
self.klass.add_property(Property(self.klass, self.name, self.type))
return super().complete()
def setattr(self, attr, value):
if (attr == "klass"):
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, "Which class do you want to add the property to?"))
elif value not in self.context.classes:
self.todos.append(GetInputGoal(self.context, self, attr, f"Class, {value}, hasn't been created yet. Try another class or say cancel."))
else:
self.klass = self.context.classes[value]
if hasattr(self, "name") and self.name in self.klass.properties:
self.todos.append(GetInputGoal(self.context, self, "name", f"The name, {self.name}, has already been used. Try another name or say cancel."))
return
elif (attr == "name"):
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, "What do you want to call the property?"))
elif hasattr(self, "klass") and value in self.klass.properties:
self.todos.append(GetInputGoal(self.context, self, attr, f"The name, {value}, has already been used. Try another name or say cancel."))
return
elif (attr == "type") and value is None:
self.todos.append(GetInputGoal(self.context, self, "type", "What is the property type?"))
setattr(self, attr, value)
class SetPropertyActionGoal(BaseGoal):
def __init__(self, context, name=None, value=None):
super().__init__(context)
self.setattr("value", value)
self.setattr("name", name)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(SetPropertyAction(self.name, self.value))
return super().complete()
def setattr(self, attr, value):
if attr == "name" and value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What property do you want to set?"))
elif attr == "value" and value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What value do you want to set the property to?"))
setattr(self, attr, value)
| [
"app.logger.debug"
] | [((1853, 1908), 'app.logger.debug', 'logger.debug', (['f"""Advancing {self.__class__.__name__}..."""'], {}), "(f'Advancing {self.__class__.__name__}...')\n", (1865, 1908), False, 'from app import logger\n')] |
import os
from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor
# diplay board
# board = [" ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
# " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ", "_", " ",
# "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|", " ", "|",
# "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|", "_", "|",
#
class PositionHandler:
def __init__(self, all_pieces:list):
self.all_pieces = all_pieces
def get_piece(self, position):
for piece in self.all_pieces:
if piece.position == position:
return piece
return False
class Player:
def __init__(self, team, pieces):
self.team = team
self.pieces = pieces
def update_piece(self, pieces):
self.pieces = pieces
def give_pieces_position(self):
return f"{self.team} team has :\n"+"\n".join([f"{pos.name} {pos.position}" for pos in self.pieces])
class PlayerHandler:
def __init__(self, p1, p2):
self.player1 = p1
self.player1_king = p1.pieces[-1]
self.player2 = p2
self.player2_king = p2.pieces[-1]
self.current = p1
def change_player(self):
if self.current == self.player1:
self.current = self.player2
else:
self.current = self.player1
def remove_piece(self, piece):
team = piece.team
if self.player1.team == team:
all_pieces = self.player1.pieces
all_pieces.pop(all_pieces.index(piece))
self.player1.update_piece(all_pieces)
else:
all_pieces = self.player2.pieces
all_pieces.pop(all_pieces.index(piece))
self.player2.update_piece(all_pieces)
def play_piece(self, piece, position, board, pos_handler):
check, n_board, piece = piece.play_move(position, board, pos_handler)
if check:
return True, piece, n_board
else:
return False, piece, n_board
def checkmate(self, board, pos_handler):
if self.current.team == self.player1.team:
opp_pieces = self.player2.pieces
player_king = self.current.pieces[-1]
if player_king.symbol == "E":
check = player_king.checkmate(opp_pieces, board, pos_handler)
return check
else:
return False
elif self.current.team == self.player2.team:
opp_pieces = self.player1.pieces
player_king = self.current.pieces[-1]
if player_king.symbol == "E":
check = player_king.checkmate(opp_pieces, board, pos_handler)
return check
else:
return False
def game_end(self):
if self.player1_king not in self.player1.pieces:
return True, self.player1
elif self.player2_king not in self.player2.pieces:
return True, self.player2
else:
return False, None
def DisplayBoard(board):
row = 0
print(" ", end="")
for c in range(8):
print(" "+str(c), end="")
print()
print(" ", end="")
for _ in range(8):
print(" _", end="")
print()
for i in range(1, 17):
if i%2 == 1:
cur_elements = board[row]
print(str(row)+" ", end="")
print("|", end="")
cur_e_index = 0
for i in range(8):
print(cur_elements[cur_e_index]+"|", end="")
cur_e_index += 1
print()
row += 1
else:
print(" |", end="")
for _ in range(8):
print("_|", end="")
print()
def Initiator():
white_pieces = []
black_pieces = []
for i in range(0, 8):
white_pieces.append(Pawn((0, i)))
black_pieces.append(Pawn((1, i)))
white_pieces.append(Rook((0, 0)))
black_pieces.append(Rook((1, 0)))
white_pieces.append(Rook((0, 7)))
black_pieces.append(Rook((1, 7)))
white_pieces.append(Knight((0, 1)))
black_pieces.append(Knight((1, 1)))
white_pieces.append(Knight((0, 6)))
black_pieces.append(Knight((1, 6)))
white_pieces.append(Bishop((0, 2)))
black_pieces.append(Bishop((1, 2)))
white_pieces.append(Bishop((0, 5)))
black_pieces.append(Bishop((1, 5)))
white_pieces.append(Queen((0, 3)))
black_pieces.append(Queen((1, 3)))
white_pieces.append(Emperor((0, 4)))
black_pieces.append(Emperor((1, 4)))
return [white_pieces, black_pieces, []]
def PositionChecks(pos):
if type(pos) == tuple:
if len(pos) == 2:
if type(pos[0]) == int and type(pos[1]) == int:
if (pos[0] >= 0 and pos[0] <= 7) and (pos[1] >= 0 and pos[1] <= 7):
return True
return False
def clear_screen():
if os.name == "posix":
os.system("clear")
else:
os.system("cls")
| [
"chess_pieces.Knight",
"chess_pieces.Rook",
"chess_pieces.Queen",
"chess_pieces.Bishop",
"chess_pieces.Pawn",
"chess_pieces.Emperor",
"os.system"
] | [((5152, 5164), 'chess_pieces.Rook', 'Rook', (['(0, 0)'], {}), '((0, 0))\n', (5156, 5164), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5187, 5199), 'chess_pieces.Rook', 'Rook', (['(1, 0)'], {}), '((1, 0))\n', (5191, 5199), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5222, 5234), 'chess_pieces.Rook', 'Rook', (['(0, 7)'], {}), '((0, 7))\n', (5226, 5234), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5257, 5269), 'chess_pieces.Rook', 'Rook', (['(1, 7)'], {}), '((1, 7))\n', (5261, 5269), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5292, 5306), 'chess_pieces.Knight', 'Knight', (['(0, 1)'], {}), '((0, 1))\n', (5298, 5306), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5329, 5343), 'chess_pieces.Knight', 'Knight', (['(1, 1)'], {}), '((1, 1))\n', (5335, 5343), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5366, 5380), 'chess_pieces.Knight', 'Knight', (['(0, 6)'], {}), '((0, 6))\n', (5372, 5380), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5403, 5417), 'chess_pieces.Knight', 'Knight', (['(1, 6)'], {}), '((1, 6))\n', (5409, 5417), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5440, 5454), 'chess_pieces.Bishop', 'Bishop', (['(0, 2)'], {}), '((0, 2))\n', (5446, 5454), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5477, 5491), 'chess_pieces.Bishop', 'Bishop', (['(1, 2)'], {}), '((1, 2))\n', (5483, 5491), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5514, 5528), 'chess_pieces.Bishop', 'Bishop', (['(0, 5)'], {}), '((0, 5))\n', (5520, 5528), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5551, 5565), 'chess_pieces.Bishop', 'Bishop', (['(1, 5)'], {}), '((1, 5))\n', (5557, 5565), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5588, 5601), 'chess_pieces.Queen', 'Queen', (['(0, 3)'], {}), '((0, 3))\n', (5593, 5601), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5624, 5637), 'chess_pieces.Queen', 'Queen', (['(1, 3)'], {}), '((1, 3))\n', (5629, 5637), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5660, 5675), 'chess_pieces.Emperor', 'Emperor', (['(0, 4)'], {}), '((0, 4))\n', (5667, 5675), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5698, 5713), 'chess_pieces.Emperor', 'Emperor', (['(1, 4)'], {}), '((1, 4))\n', (5705, 5713), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((6027, 6045), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (6036, 6045), False, 'import os\n'), ((6055, 6071), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (6064, 6071), False, 'import os\n'), ((5081, 5093), 'chess_pieces.Pawn', 'Pawn', (['(0, i)'], {}), '((0, i))\n', (5085, 5093), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n'), ((5117, 5129), 'chess_pieces.Pawn', 'Pawn', (['(1, i)'], {}), '((1, i))\n', (5121, 5129), False, 'from chess_pieces import Pawn, Rook, Bishop, Knight, Queen, Emperor\n')] |
from cc3d import CompuCellSetup
from CellGDerivedPropertiesSteppables import CellGDerivedPropertiesSteppable
CompuCellSetup.register_steppable(steppable=CellGDerivedPropertiesSteppable(frequency=1))
CompuCellSetup.run()
| [
"CellGDerivedPropertiesSteppables.CellGDerivedPropertiesSteppable",
"cc3d.CompuCellSetup.run"
] | [((213, 233), 'cc3d.CompuCellSetup.run', 'CompuCellSetup.run', ([], {}), '()\n', (231, 233), False, 'from cc3d import CompuCellSetup\n'), ((165, 209), 'CellGDerivedPropertiesSteppables.CellGDerivedPropertiesSteppable', 'CellGDerivedPropertiesSteppable', ([], {'frequency': '(1)'}), '(frequency=1)\n', (196, 209), False, 'from CellGDerivedPropertiesSteppables import CellGDerivedPropertiesSteppable\n')] |
import torch
from torch.nn import Module
from torch import nn
from torchsummary import summary
from dataset import XrayDataset
class DoubleConv(Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
self.relu2 = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=2)
def forward(self, input_tensor):
output_tensor = self.conv1(input_tensor)
output_tensor = self.bn1(output_tensor)
output_tensor = self.relu1(output_tensor)
output_tensor = self.conv2(output_tensor)
output_tensor = self.bn2(output_tensor)
output_tensor = self.relu2(output_tensor)
return output_tensor
class Down(Module):
def __init__(self, in_channels, out_channels):
super(Down, self).__init__()
self.double_conv = DoubleConv(in_channels=in_channels, out_channels=out_channels)
self.pool = nn.MaxPool2d(kernel_size=2)
def forward(self, input_tensor):
output_tensor = self.double_conv(input_tensor)
return self.pool(output_tensor), output_tensor
class Up(Module):
def __init__(self, in_channels, out_channels):
super(Up, self).__init__()
self.up_conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=int(in_channels / 2), kernel_size=2,
stride=2)
self.relu = nn.ReLU()
self.double_conv = DoubleConv(in_channels=in_channels, out_channels=out_channels)
def forward(self, concat_tensor, input_tensor):
output_tensor = self.relu(self.up_conv(input_tensor))
output_tensor = torch.cat([concat_tensor, output_tensor], dim=1)
output_tensor = self.double_conv(output_tensor)
return output_tensor
class BottleNeck(Module):
def __init__(self, in_channels, out_channels):
super(BottleNeck, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
self.relu2 = nn.ReLU()
def forward(self, input_tensor):
output_tensor = self.relu1(self.bn1(self.conv1(input_tensor)))
output_tensor = self.relu2(self.bn2(self.conv2(output_tensor)))
return output_tensor
class Unet(Module):
def __init__(self):
super(Unet, self).__init__()
self.down1 = Down(in_channels=1, out_channels=32)
self.down2 = Down(in_channels=32, out_channels=64)
self.down3 = Down(in_channels=64, out_channels=128)
self.down4 = Down(in_channels=128, out_channels=256)
self.bottle_neck = BottleNeck(in_channels=256, out_channels=512)
self.up4 = Up(in_channels=512, out_channels=256)
self.up3 = Up(in_channels=256, out_channels=128)
self.up2 = Up(in_channels=128, out_channels=64)
self.up1 = Up(in_channels=64, out_channels=32)
self.conv = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=3, padding=1)
self.classifier = nn.Sigmoid()
def forward(self, input_tensor):
output1 = self.down1(input_tensor)
output2 = self.down2(output1[0])
output3 = self.down3(output2[0])
output4 = self.down4(output3[0])
output_tensor = self.bottle_neck(output4[0])
output_tensor = self.up4(output4[-1], output_tensor)
output_tensor = self.up3(output3[-1], output_tensor)
output_tensor = self.up2(output2[-1], output_tensor)
output_tensor = self.up1(output1[-1], output_tensor)
output_tensor = self.classifier(self.conv(output_tensor))
return output_tensor
if __name__ == '__main__':
model = Unet()
summary(model, (1, 512, 512))
pass
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torchsummary.summary",
"torch.cat"
] | [((4273, 4302), 'torchsummary.summary', 'summary', (['model', '(1, 512, 512)'], {}), '(model, (1, 512, 512))\n', (4280, 4302), False, 'from torchsummary import summary\n'), ((272, 363), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=3,\n padding=1)\n', (281, 363), False, 'from torch import nn\n'), ((379, 420), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (393, 420), False, 'from torch import nn\n'), ((442, 451), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (449, 451), False, 'from torch import nn\n'), ((474, 567), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=\n 3, padding=1)\n', (483, 567), False, 'from torch import nn\n'), ((582, 623), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (596, 623), False, 'from torch import nn\n'), ((645, 654), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (652, 654), False, 'from torch import nn\n'), ((676, 703), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (688, 703), False, 'from torch import nn\n'), ((1288, 1315), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (1300, 1315), False, 'from torch import nn\n'), ((1760, 1769), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1767, 1769), False, 'from torch import nn\n'), ((2000, 2048), 'torch.cat', 'torch.cat', (['[concat_tensor, output_tensor]'], {'dim': '(1)'}), '([concat_tensor, output_tensor], dim=1)\n', (2009, 2048), False, 'import torch\n'), ((2278, 2369), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=3,\n padding=1)\n', (2287, 2369), False, 'from torch import nn\n'), ((2385, 2426), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (2399, 2426), False, 'from torch import nn\n'), ((2448, 2457), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2455, 2457), False, 'from torch import nn\n'), ((2480, 2573), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=\n 3, padding=1)\n', (2489, 2573), False, 'from torch import nn\n'), ((2588, 2629), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (2602, 2629), False, 'from torch import nn\n'), ((2651, 2660), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2658, 2660), False, 'from torch import nn\n'), ((3513, 3580), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(1)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=32, out_channels=1, kernel_size=3, padding=1)\n', (3522, 3580), False, 'from torch import nn\n'), ((3607, 3619), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3617, 3619), False, 'from torch import nn\n')] |
import os
import winreg
import subprocess
import datetime
import webbrowser
def main():
# 1. Get the location of XAMPP (by registry first)
try:
xamppReg = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\WOW6432NODE\\xampp'
)
# Enum Value returns: value_name, value_data, value_data integer that identifies the data type
xamppDirKey = winreg.EnumValue(xamppReg, 0)
winreg.CloseKey(xamppReg)
xamppDir = xamppDirKey[1]
isValidKey = xamppDirKey[0] == 'Install_Dir'
if (not isValidKey):
raise ValueError('Got incorrect key name. Key name is: ' + xamppDirKey[0])
if (not os.path.isdir(xamppDir)):
raise ValueError('Registry path is not a directory!')
except (WindowsError, ValueError):
print ('Failed to find XAMPP installation directory.')
xamppDir = input('XAMPP directory: ')
if (xamppDir == ''):
raise SyntaxError('Directory path can not be empty!')
else:
if (not os.path.isdir(xamppDir)):
raise NotADirectoryError('Input is not a directory!')
# 2. Ask the user for input
YEAR = str(datetime.datetime.now().year)
projectName = input('Project name: ')
if (projectName == ''):
raise SyntaxError('Project name can not be empty!')
if (os.path.isdir(xamppDir + '\\htdocs\\' + YEAR + '\\' + projectName)):
raise IsADirectoryError(projectName + ' directory already exists!')
TRUE_TYPES = ['y', 'yes']
FALSE_TYPES = ['n', 'no']
isLaravel = input('Laravel project (y/n): ')
if (isLaravel == ''):
raise SyntaxError('Laravel check can not be empty!')
if (not isLaravel in (TRUE_TYPES + FALSE_TYPES)):
raise SyntaxError('Use only accepted entry types!')
if (isLaravel in TRUE_TYPES): isLaravel = True
if (isLaravel in FALSE_TYPES): isLaravel = False
# 3. Create the directories
try:
# 3.1 Create the year directory if not present
if (not os.path.isdir(xamppDir + '\\htdocs\\' + YEAR)):
os.makedirs(xamppDir + '\\htdocs\\' + YEAR)
# 3.2 Create the project directory
os.makedirs(xamppDir + '\\htdocs\\' + YEAR + '\\' + projectName)
except OSError:
print ('Failed to create the required directories!')
# 4. Stop the httpd (Apache) service
subprocess.Popen(xamppDir + '\\apache_stop.bat', cwd=xamppDir)
# 5. Add the config for vhosts
apacheConfigVhosts = xamppDir + '\\apache\\conf\\extra\\httpd-vhosts.conf'
if (isLaravel):
public = '\\public'
else:
public = ''
VHOSTSLINE1 = '\n\n<VirtualHost *:80>\n'
VHOSTSLINE2 = '\tDocumentRoot "'+ xamppDir + '\\htdocs\\' + YEAR + '\\' + projectName + public +'"\n'
VHOSTSLINE3 = '\tServerName '+ projectName +'.localhost\n'
VHOSTSLINE4 = '\tErrorLog "logs/'+ projectName +'-error.log"\n'
VHOSTSLINE5 = '\tCustomLog "logs/'+ projectName +'-access.log" common\n'
VHOSTSLINE6 = '</VirtualHost>'
open(apacheConfigVhosts, 'a').writelines([VHOSTSLINE1, VHOSTSLINE2, VHOSTSLINE3, VHOSTSLINE4, VHOSTSLINE5, VHOSTSLINE6])
# 6. Start the httpd service
subprocess.Popen(xamppDir + '\\apache_start.bat', cwd=xamppDir)
# 7. Open the URL
webbrowser.open('http://' + projectName + '.localhost')
# 8. Cleanup
print ('\nProject created!')
print ('The ' + projectName + 'is available at: http://' + projectName + '.localhost!')
if __name__ == '__main__':
main() | [
"winreg.CloseKey",
"winreg.OpenKey",
"os.makedirs",
"subprocess.Popen",
"webbrowser.open",
"winreg.EnumValue",
"datetime.datetime.now",
"os.path.isdir"
] | [((1376, 1442), 'os.path.isdir', 'os.path.isdir', (["(xamppDir + '\\\\htdocs\\\\' + YEAR + '\\\\' + projectName)"], {}), "(xamppDir + '\\\\htdocs\\\\' + YEAR + '\\\\' + projectName)\n", (1389, 1442), False, 'import os\n'), ((2400, 2462), 'subprocess.Popen', 'subprocess.Popen', (["(xamppDir + '\\\\apache_stop.bat')"], {'cwd': 'xamppDir'}), "(xamppDir + '\\\\apache_stop.bat', cwd=xamppDir)\n", (2416, 2462), False, 'import subprocess\n'), ((3220, 3283), 'subprocess.Popen', 'subprocess.Popen', (["(xamppDir + '\\\\apache_start.bat')"], {'cwd': 'xamppDir'}), "(xamppDir + '\\\\apache_start.bat', cwd=xamppDir)\n", (3236, 3283), False, 'import subprocess\n'), ((3311, 3366), 'webbrowser.open', 'webbrowser.open', (["('http://' + projectName + '.localhost')"], {}), "('http://' + projectName + '.localhost')\n", (3326, 3366), False, 'import webbrowser\n'), ((172, 245), 'winreg.OpenKey', 'winreg.OpenKey', (['winreg.HKEY_LOCAL_MACHINE', '"""SOFTWARE\\\\WOW6432NODE\\\\xampp"""'], {}), "(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\WOW6432NODE\\\\xampp')\n", (186, 245), False, 'import winreg\n'), ((406, 435), 'winreg.EnumValue', 'winreg.EnumValue', (['xamppReg', '(0)'], {}), '(xamppReg, 0)\n', (422, 435), False, 'import winreg\n'), ((444, 469), 'winreg.CloseKey', 'winreg.CloseKey', (['xamppReg'], {}), '(xamppReg)\n', (459, 469), False, 'import winreg\n'), ((2207, 2271), 'os.makedirs', 'os.makedirs', (["(xamppDir + '\\\\htdocs\\\\' + YEAR + '\\\\' + projectName)"], {}), "(xamppDir + '\\\\htdocs\\\\' + YEAR + '\\\\' + projectName)\n", (2218, 2271), False, 'import os\n'), ((692, 715), 'os.path.isdir', 'os.path.isdir', (['xamppDir'], {}), '(xamppDir)\n', (705, 715), False, 'import os\n'), ((1207, 1230), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1228, 1230), False, 'import datetime\n'), ((2051, 2096), 'os.path.isdir', 'os.path.isdir', (["(xamppDir + '\\\\htdocs\\\\' + YEAR)"], {}), "(xamppDir + '\\\\htdocs\\\\' + YEAR)\n", (2064, 2096), False, 'import os\n'), ((2111, 2154), 'os.makedirs', 'os.makedirs', (["(xamppDir + '\\\\htdocs\\\\' + YEAR)"], {}), "(xamppDir + '\\\\htdocs\\\\' + YEAR)\n", (2122, 2154), False, 'import os\n'), ((1063, 1086), 'os.path.isdir', 'os.path.isdir', (['xamppDir'], {}), '(xamppDir)\n', (1076, 1086), False, 'import os\n')] |
"""
Structs
"""
from typing import overload
from xml.etree import ElementTree
from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, \
c_double, c_uint, c_bool
from .enums import PACKET_IDS, PACKET_NAMES, LEGO_DATA_TYPES, LDF_VALUE_TYPES
class Vector3(Serializable):
"""
Vector3
"""
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
@classmethod
def deserialize(cls, stream):
x = stream.read(c_float)
y = stream.read(c_float)
z = stream.read(c_float)
return cls(x,y,z)
@classmethod
def from_array(cls, arr):
"""
Creates a Vector3 from an array
"""
return cls(arr[0], arr[1], arr[2])
@classmethod
def from_ldf(cls, ldf_val):
"""
Creates a Vector3 from a ldf value
"""
arr = ldf_val.split('\x1f')
return cls(arr[0], arr[1], arr[2])
def serialize(self, stream):
stream.write(c_float(self.x))
stream.write(c_float(self.y))
stream.write(c_float(self.z))
class Vector4(Serializable):
"""
Vector4
"""
def __init__(self, x, y, z, w=0):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
@classmethod
def deserialize(cls, stream):
x = stream.read(c_float)
y = stream.read(c_float)
z = stream.read(c_float)
w = stream.read(c_float)
return cls(x, y, z, w)
@classmethod
def from_array(cls, arr):
"""
Creates a Vector4 from an array
"""
return cls(arr[0], arr[1], arr[2], arr[3])
@classmethod
def from_vec3(cls, vec):
"""
Creates a Vector4 from a Vector3
"""
return cls(vec.x, vec.y, vec.z, 0)
def serialize(self, stream):
stream.write(c_float(self.x))
stream.write(c_float(self.y))
stream.write(c_float(self.z))
stream.write(c_float(self.w))
class LVLVector4(Vector4):
@classmethod
def deserialize(cls, stream):
w = stream.read(c_float)
x = stream.read(c_float)
y = stream.read(c_float)
z = stream.read(c_float)
return cls(x, y, z, w)
def serialize(self, stream):
stream.write(c_float(self.w))
stream.write(c_float(self.x))
stream.write(c_float(self.y))
stream.write(c_float(self.z))
class CString(Serializable):
"""
C string serializable
"""
def __init__(self, data='', allocated_length=None, length_type=None):
self.data = data
self.allocated_length = allocated_length
self.length_type = length_type
def serialize(self, stream):
stream.write(self.data if isinstance(self.data, bytes) else bytes(self.data, 'latin1'),
allocated_length=self.allocated_length, length_type=self.length_type)
def deserialize(self, stream):
return stream.read(bytes, allocated_length=self.allocated_length, length_type=self.length_type).decode('latin1')
class LUHeader(Serializable):
"""
LEGO Universe header serializable
"""
@overload
def __init__(self, packet_name: str):
pass
@overload
def __init__(self, remote_conn_id: int, packet_id: int):
pass
def __init__(self, *args):
if isinstance(args[0], str):
self.packet_name = args[0]
else:
self.raw_ids = (args[0], args[1])
@property
def remote_conn_id(self):
"""
Returns the remote connection ID
"""
if getattr(self, 'packet_name', None):
return PACKET_IDS[self.packet_name][0]
return self.raw_ids[0]
@property
def packet_id(self):
"""
Returns the packet ID
"""
if getattr(self, 'packet_name', None):
return PACKET_IDS[self.packet_name][1]
return self.raw_ids[1]
def serialize(self, stream):
stream.write(c_uint8(0x53))
stream.write(c_uint16(self.remote_conn_id))
stream.write(c_uint32(self.packet_id))
stream.write(c_uint8(0x00))
@classmethod
def deserialize(cls, stream):
# RakNet ID is NOT read because pyraknet already removes that for us (however we do need to write it ourselves)
remote_conn_id = stream.read(c_uint16)
packet_id = stream.read(c_uint32)
stream.read(c_uint8) # unknown
packet_name = PACKET_NAMES.get(remote_conn_id, {}).get(packet_id)
if packet_name:
return cls(packet_name)
return cls(remote_conn_id, packet_id)
class LegoDataKey(Serializable):
"""
LDF key serializable
"""
def __init__(self, key, data, data_type, data_num=None):
self.key = key
self.data = data
self.data_type = data_type
self.data_num = data_num
def serialize(self, stream):
stream.write(c_uint8(len(self.key) * 2))
for char in self.key:
stream.write(char.encode('latin1'))
stream.write(b'\0')
if not self.data_num:
if isinstance(self.data, ElementTree.Element):
stream.write(c_uint8(13))
txt = b'<?xml version="1.0">' + ElementTree.tostring(self.data)
stream.write(c_uint32(len(txt)))
stream.write(txt)
else:
stream.write(c_uint8(LEGO_DATA_TYPES[self.data_type]))
if self.data_type == str:
stream.write(self.data, length_type=c_uint)
else:
stream.write(self.data_type(self.data))
else:
stream.write(c_uint8(self.data_num))
stream.write(self.data)
@classmethod
def deserialize(cls, stream):
pass
class LegoData(Serializable):
"""
LDF serializable
"""
def __init__(self):
self.keys = []
def write(self, key, data, data_type=None, data_num=None):
ldf_key = LegoDataKey(key, data, data_type)
self.keys.append(ldf_key)
def serialize(self, stream):
super().serialize(stream)
stream.write(c_uint32(len(self.keys)))
for key in self.keys:
key.serialize(stream)
@classmethod
def deserialize(cls, stream):
return cls()
class Packet(Serializable):
"""
Packet class
"""
allow_without_session = False
@overload
def __init__(self):
pass
def __init__(self, header: LUHeader = None, data: bytes = None, **kwargs):
packet_name = getattr(self.__class__, 'packet_name', None)
if packet_name:
self.header = LUHeader(packet_name)
for prop, val in kwargs.items():
setattr(self, prop, val)
elif header and data != None:
self.header = header
self.data = data
else:
raise KeyError('Packets must either be instantiated from a base class with a packet_name, Packet.deserialize(), or with a header and data argument')
def __bytes__(self):
stream = WriteStream()
self.serialize(stream)
return bytes(stream)
def serialize(self, stream):
"""
Serialize the packet
"""
self.header.serialize(stream)
if getattr(self, 'data', None) is not None:
stream.write(self.data)
@classmethod
def deserialize(cls, stream, packet_types):
"""
Deserialize the packet
"""
header = LUHeader.deserialize(stream)
packet = packet_types.get(getattr(header, 'packet_name', None))
if packet:
return packet.deserialize(stream)
return cls(header=header, data=stream.read_remaining())
class ServerGameMessage(Packet):
"""
Game message packet
"""
packet_name = 'server_game_message'
def __init__(self, objid, message_id, extra_data=None):
super().__init__(**{k: v for k, v in locals().items() if k != 'self'})
def serialize(self, stream):
"""
Serializes the game message
"""
super().serialize(stream)
stream.write(c_int64(self.objid))
stream.write(c_uint16(self.message_id))
if self.extra_data:
if isinstance(self.extra_data, bytes):
stream.write(self.extra_data)
else:
stream.write(bytes(self.extra_data))
class ClientGameMessage(Packet):
"""
Client game message packet
"""
packet_name = 'client_game_message'
def __init__(self, objid, message_id, extra_data=None):
super().__init__(**{k: v for k, v in locals().items() if k != 'self'})
@classmethod
def deserialize(cls, stream):
"""
Deserializes the game message
"""
objid = stream.read(c_int64)
msg_id = stream.read(c_uint16)
data = stream.read_remaining()
return cls(objid, msg_id, data)
def parse_ldf(ldf):
"""
Parses the LDF string and returns a dict
"""
d = {}
for line in ldf.splitlines():
arr = line.partition('=')
key = arr[0]
val = arr[2]
arr = val.partition(':')
if not arr[0]:
d[key] = None
else:
val_type = int(arr[0])
val = arr[2]
val = int(val) == 1 if val_type == 7 else LDF_VALUE_TYPES[val_type](arr[2])
d[key] = val
return d
| [
"bitstream.c_uint16",
"bitstream.WriteStream",
"bitstream.c_uint8",
"xml.etree.ElementTree.tostring",
"bitstream.c_float",
"bitstream.c_uint32",
"bitstream.c_int64"
] | [((7256, 7269), 'bitstream.WriteStream', 'WriteStream', ([], {}), '()\n', (7267, 7269), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((1102, 1117), 'bitstream.c_float', 'c_float', (['self.x'], {}), '(self.x)\n', (1109, 1117), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((1140, 1155), 'bitstream.c_float', 'c_float', (['self.y'], {}), '(self.y)\n', (1147, 1155), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((1178, 1193), 'bitstream.c_float', 'c_float', (['self.z'], {}), '(self.z)\n', (1185, 1193), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2004, 2019), 'bitstream.c_float', 'c_float', (['self.x'], {}), '(self.x)\n', (2011, 2019), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2042, 2057), 'bitstream.c_float', 'c_float', (['self.y'], {}), '(self.y)\n', (2049, 2057), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2080, 2095), 'bitstream.c_float', 'c_float', (['self.z'], {}), '(self.z)\n', (2087, 2095), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2118, 2133), 'bitstream.c_float', 'c_float', (['self.w'], {}), '(self.w)\n', (2125, 2133), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2445, 2460), 'bitstream.c_float', 'c_float', (['self.w'], {}), '(self.w)\n', (2452, 2460), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2483, 2498), 'bitstream.c_float', 'c_float', (['self.x'], {}), '(self.x)\n', (2490, 2498), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2521, 2536), 'bitstream.c_float', 'c_float', (['self.y'], {}), '(self.y)\n', (2528, 2536), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((2559, 2574), 'bitstream.c_float', 'c_float', (['self.z'], {}), '(self.z)\n', (2566, 2574), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((4142, 4153), 'bitstream.c_uint8', 'c_uint8', (['(83)'], {}), '(83)\n', (4149, 4153), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((4178, 4207), 'bitstream.c_uint16', 'c_uint16', (['self.remote_conn_id'], {}), '(self.remote_conn_id)\n', (4186, 4207), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((4230, 4254), 'bitstream.c_uint32', 'c_uint32', (['self.packet_id'], {}), '(self.packet_id)\n', (4238, 4254), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((4277, 4287), 'bitstream.c_uint8', 'c_uint8', (['(0)'], {}), '(0)\n', (4284, 4287), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((8318, 8337), 'bitstream.c_int64', 'c_int64', (['self.objid'], {}), '(self.objid)\n', (8325, 8337), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((8360, 8385), 'bitstream.c_uint16', 'c_uint16', (['self.message_id'], {}), '(self.message_id)\n', (8368, 8385), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((5837, 5859), 'bitstream.c_uint8', 'c_uint8', (['self.data_num'], {}), '(self.data_num)\n', (5844, 5859), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((5342, 5353), 'bitstream.c_uint8', 'c_uint8', (['(13)'], {}), '(13)\n', (5349, 5353), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n'), ((5404, 5435), 'xml.etree.ElementTree.tostring', 'ElementTree.tostring', (['self.data'], {}), '(self.data)\n', (5424, 5435), False, 'from xml.etree import ElementTree\n'), ((5567, 5607), 'bitstream.c_uint8', 'c_uint8', (['LEGO_DATA_TYPES[self.data_type]'], {}), '(LEGO_DATA_TYPES[self.data_type])\n', (5574, 5607), False, 'from bitstream import WriteStream, Serializable, c_uint8, c_uint16, c_uint32, c_int32, c_int64, c_float, c_bit, c_double, c_uint, c_bool\n')] |
from models import Character, Born, PB2, PPath, Path, Looper, KPath
from seed_data import seed_data
from random import randrange
import jinja2
import os
born_opt = 0
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def panda_born():
panda_key = Character.query(Character.species == "Panda").get()
panda_born = Born.query(Born.owner == panda_key.key).get()
global born_opt
born_opt = randrange(1,4)
if born_opt == 1:
return panda_born.born_1
elif born_opt == 2:
return panda_born.born_2
elif born_opt == 3:
return panda_born.born_3
def panda_post_born():
global born_opt
panda_key = Character.query(Character.species == "Panda").get()
panda_post_born = PB2.query(PB2.owner == panda_key.key).get()
if born_opt == 1:
return panda_post_born.pb_1
elif born_opt == 2:
return panda_post_born.pb_2
elif born_opt == 3:
return panda_post_born.pb_3
def panda_pathA():
global born_opt
panda_key = Character.query(Character.species == "Panda").get()
panda_path = Path.query(Path.owner == panda_key.key).get()
if born_opt == 1:
return panda_path.p_1a
elif born_opt == 2:
return panda_path.p_2a
elif born_opt == 3:
return panda_path.p_3a
def panda_pathB():
global born_opt
panda_key = Character.query(Character.species == "Panda").get()
panda_path = Path.query(Path.owner == panda_key.key).get()
if born_opt == 1:
return panda_path.p_1b
elif born_opt == 2:
return panda_path.p_2b
elif born_opt == 3:
return panda_path.p_3b
def panda_post_pathA():
panda_key = Character.query(Character.species == "Panda").get()
panda_post_path = PPath.query(PPath.owner == panda_key.key).get()
return panda_post_path.PP_1
def panda_post_pathB():
panda_key = Character.query(Character.species == "Panda").get()
panda_post_path = PPath.query(PPath.owner == panda_key.key).get()
return panda_post_path.PP_2
def number():
global born_opt
return born_opt
| [
"models.PPath.query",
"models.Born.query",
"random.randrange",
"models.Character.query",
"models.PB2.query",
"models.Path.query",
"os.path.dirname"
] | [((509, 524), 'random.randrange', 'randrange', (['(1)', '(4)'], {}), '(1, 4)\n', (518, 524), False, 'from random import randrange\n'), ((234, 259), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'import os\n'), ((359, 404), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (374, 404), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((428, 467), 'models.Born.query', 'Born.query', (['(Born.owner == panda_key.key)'], {}), '(Born.owner == panda_key.key)\n', (438, 467), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((754, 799), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (769, 799), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((828, 865), 'models.PB2.query', 'PB2.query', (['(PB2.owner == panda_key.key)'], {}), '(PB2.owner == panda_key.key)\n', (837, 865), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1106, 1151), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (1121, 1151), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1175, 1214), 'models.Path.query', 'Path.query', (['(Path.owner == panda_key.key)'], {}), '(Path.owner == panda_key.key)\n', (1185, 1214), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1440, 1485), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (1455, 1485), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1509, 1548), 'models.Path.query', 'Path.query', (['(Path.owner == panda_key.key)'], {}), '(Path.owner == panda_key.key)\n', (1519, 1548), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1760, 1805), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (1775, 1805), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1834, 1875), 'models.PPath.query', 'PPath.query', (['(PPath.owner == panda_key.key)'], {}), '(PPath.owner == panda_key.key)\n', (1845, 1875), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((1955, 2000), 'models.Character.query', 'Character.query', (["(Character.species == 'Panda')"], {}), "(Character.species == 'Panda')\n", (1970, 2000), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n'), ((2029, 2070), 'models.PPath.query', 'PPath.query', (['(PPath.owner == panda_key.key)'], {}), '(PPath.owner == panda_key.key)\n', (2040, 2070), False, 'from models import Character, Born, PB2, PPath, Path, Looper, KPath\n')] |
import torch
def get_gpu_name():
return torch.cuda.get_device_name(0)
def get_all_gpu_names():
d = []
for i in range(torch.cuda.device_count()):
d.append(torch.cuda.get_device_name(i))
return d
if __name__ == '__main__':
print(get_all_gpu_names()) | [
"torch.cuda.get_device_name",
"torch.cuda.device_count"
] | [((42, 71), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (68, 71), False, 'import torch\n'), ((122, 147), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (145, 147), False, 'import torch\n'), ((161, 190), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['i'], {}), '(i)\n', (187, 190), False, 'import torch\n')] |
# Evaluate the Logistic Regression model
# Accuracy is generally not a very reliable metric because it can be biased by the most common target class.
# There are two other useful metrics:
# precision and
# recall.
# Check the slides for this lesson to get the relevant expressions.
# Precision is the proportion of positive predictions which are correct. For all flights which are predicted to be delayed, what proportion is actually delayed?
# Recall is the proportion of positives outcomes which are correctly predicted. For all delayed flights, what proportion is correctly predicted by the model?
# The precision and recall are generally formulated in terms of the positive target class. But it's also possible to calculate weighted versions of these metrics which look at both target classes.
# The components of the confusion matrix are available as TN, TP, FN and FP, as well as the object prediction.
# Instructions
# 100 XP
# Instructions
# 100 XP
# Find the precision and recall.
# Create a multi-class evaluator and evaluate weighted precision.
# Create a binary evaluator and evaluate AUC.
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
# Calculate precision and recall
precision = TP / (TP + FP)
recall = TP / (TP + FN)
print('precision = {:.2f}\nrecall = {:.2f}'.format(precision, recall))
# Find weighted precision
multi_evaluator = MulticlassClassificationEvaluator()
weighted_precision = multi_evaluator.evaluate(prediction, {multi_evaluator.metricName: "weightedPrecision"})
# Find AUC
binary_evaluator = BinaryClassificationEvaluator()
auc = binary_evaluator.evaluate(prediction, {binary_evaluator.metricName: "areaUnderROC"}) | [
"pyspark.ml.evaluation.BinaryClassificationEvaluator",
"pyspark.ml.evaluation.MulticlassClassificationEvaluator"
] | [((1413, 1448), 'pyspark.ml.evaluation.MulticlassClassificationEvaluator', 'MulticlassClassificationEvaluator', ([], {}), '()\n', (1446, 1448), False, 'from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator\n'), ((1589, 1620), 'pyspark.ml.evaluation.BinaryClassificationEvaluator', 'BinaryClassificationEvaluator', ([], {}), '()\n', (1618, 1620), False, 'from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator\n')] |
##
# \namespace cross3d.abstract.abstractsceneobjectgroup
#
# \remarks The AbstractSceneObjectGroup class provides an interface for working on sets of SceneObject's as a singular group
#
# \author eric
# \author <NAME>
# \date 09/08/10
#
from cross3d import abstractmethod
from abstractcontainer import AbstractContainer
import cross3d
class AbstractGroup(AbstractContainer):
"""
A group is an object that allows to manipulate several objects at once.
When it's possible a group has a native equivalent. See the various implementations for details.
"""
@abstractmethod
def _nativeObjects(self):
return []
@abstractmethod
def isHidden(self):
return False
@abstractmethod
def isFrozen(self):
return False
@abstractmethod
def toggleHidden(self):
return False
@abstractmethod
def toggleFrozen(self):
return False
@abstractmethod
def setHidden(self, hidden, options=None, affectObjects=False):
return False
@abstractmethod
def setFrozen(self, frozen, affectObjects=False):
return False
@abstractmethod
def name(self):
return ''
# register the symbol
cross3d.registerSymbol('Group', AbstractGroup, ifNotFound=True)
| [
"cross3d.registerSymbol"
] | [((1167, 1230), 'cross3d.registerSymbol', 'cross3d.registerSymbol', (['"""Group"""', 'AbstractGroup'], {'ifNotFound': '(True)'}), "('Group', AbstractGroup, ifNotFound=True)\n", (1189, 1230), False, 'import cross3d\n')] |
"""
Author: <NAME>
Date: September 14, 2018
File: block.py
Purpose: Reading in an image, calculating average intensity per block based on number of rows and columns given,
building an image from those blocks based on block size given, and converting that into a binary image based
on a calculated threshold.
---_ ......._-_--.
(|\ / / /| \ \
/ / .' -=-' `.
/ / .' )
_/ / .' _.) /
/ o o _.-' / .'
\ _.-' / .'*|
\______.-'// .'.' \*|
\| \ | // .'.' _ |*|
` \|// .'.'_ _ _|*|
. .// .'.' | _ _ \*|
\`-|\_/ / \ _ _ \*\
`/'\__/ \ _ _ \*\
/^| \ _ _ \*
' ` \ _ _ \ Hssssss...it's nice to be using Python again...
\_
"""
import sys
import math as mt
import cv2 as cv
import numpy as np
def downsizeAvg(image, m, n):
"""
Takes in original greyscale image and desired dimensions. Calculates scale factor then iterates
through each block of the downsized image, converting the original image to its average intenstiy per block
Return downsized image
"""
imgM = image.shape[0]
imgN = image.shape[1]
sM = imgM/m
sN = imgN/n
for i in range(0,m):
# Boundaries of the block as specified in the homework instructions
xBound1 = mt.floor(i * sM)
xBound2 = mt.floor((i+1) * (sM))
for j in range(0,n):
# Boundaries of the block as specified in the homework instructions
yBound1 = mt.floor(j * sN)
yBound2 = mt.floor((j+1)*(sN))
# Find average per block and change all pixels in boundaries to average
imageBound = image[xBound1:xBound2, yBound1:yBound2]
avg = np.average(imageBound)
image[xBound1:xBound2, yBound1:yBound2] = avg
return cv.resize(image, (n*b, m*b))
def downsizeBin(image):
"""
Takes in the downsized image (greyscale pixelated), finds its size (since it's changed)
Changes pixels over threshold (median) to white and under to black
Returns binarized image
"""
M = image.shape[0]
N = image.shape[1]
med = np.median(image)
image[image >= med] = 255
image[image < med] = 0
return image
def outputStats(m, n, b, image):
"""
Output stats on the downsized images
"""
print("Downsized images are ({}, {})".format(m//b, n//b))
print("Block images are ({}, {})".format(m, n))
print("Average intensity at ({}, {}) is {:.2f}".format(m//4//b, n//4//b, image[m//4, n//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(m//4//b, 3*n//4//b, image[m//4, (3*n)//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(3*m//4//b, n//4//b, image[3*m//4, n//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(3*m//4//b, 3*n//4//b, image[3*m//4, 3*n//4]))
print("Binary threshold: {:.2f}".format(np.median(image)))
def rename(imgName, char):
"""
Helper function for renaming output files
"""
title = imgName.split(".jpg")[0]
newTitle = "{}_{}.jpg".format(title, char)
return newTitle
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 5:
print("Correct usage: {} inputImage rows cols blockSize".format(sys.argv[0]))
sys.exit()
else:
imgName = sys.argv[1]
m = sys.argv[2]
n = sys.argv[3]
b = sys.argv[4]
try:
m = int(m)
n = int(n)
b = int(b)
except ValueError:
print("Rows, cols, and block size must be ints!")
sys.exit()
# Read image as greyscale of type float64
try:
img = cv.imread(imgName, 0).astype(np.float64)
except AttributeError:
print("Image not found or not valid format!")
sys.exit()
# Create first downsized image (pixelated greyscale), output stats, and write out new image
avgImg = downsizeAvg(img, m, n)
outputStats(m*b, n*b, b, avgImg)
cv.imwrite(rename(imgName, "g"), avgImg)
print("Wrote image {}".format(rename(imgName, "g")))
# Create second downsized image (binary) and write out new image
binImg = downsizeBin(avgImg)
cv.imwrite(rename(imgName, "b"), binImg)
print("Wrote image {}".format(rename(imgName, "b")))
| [
"numpy.median",
"numpy.average",
"math.floor",
"sys.exit",
"cv2.resize",
"cv2.imread"
] | [((2194, 2226), 'cv2.resize', 'cv.resize', (['image', '(n * b, m * b)'], {}), '(image, (n * b, m * b))\n', (2203, 2226), True, 'import cv2 as cv\n'), ((2512, 2528), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (2521, 2528), True, 'import numpy as np\n'), ((1663, 1679), 'math.floor', 'mt.floor', (['(i * sM)'], {}), '(i * sM)\n', (1671, 1679), True, 'import math as mt\n'), ((1698, 1720), 'math.floor', 'mt.floor', (['((i + 1) * sM)'], {}), '((i + 1) * sM)\n', (1706, 1720), True, 'import math as mt\n'), ((3674, 3684), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3682, 3684), False, 'import sys\n'), ((1857, 1873), 'math.floor', 'mt.floor', (['(j * sN)'], {}), '(j * sN)\n', (1865, 1873), True, 'import math as mt\n'), ((1896, 1918), 'math.floor', 'mt.floor', (['((j + 1) * sN)'], {}), '((j + 1) * sN)\n', (1904, 1918), True, 'import math as mt\n'), ((2097, 2119), 'numpy.average', 'np.average', (['imageBound'], {}), '(imageBound)\n', (2107, 2119), True, 'import numpy as np\n'), ((3262, 3278), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (3271, 3278), True, 'import numpy as np\n'), ((3957, 3967), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3965, 3967), False, 'import sys\n'), ((4168, 4178), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4176, 4178), False, 'import sys\n'), ((4038, 4059), 'cv2.imread', 'cv.imread', (['imgName', '(0)'], {}), '(imgName, 0)\n', (4047, 4059), True, 'import cv2 as cv\n')] |
from argparse import ArgumentParser
from os import remove
from src import PALETTE, DEFAULT_NUM_NEIGHBORS, DEFAULT_VIEW_DIST
from src import Universe, Canvas, Boid
if __name__ == "__main__":
# setup args
parser = ArgumentParser()
# basic
parser.add_argument("-n",
dest="n",
type=int,
default=60,
help="the number of boids in the simulation")
parser.add_argument("--fps",
type=float,
default=30.0,
help="the (maximum) framerate")
parser.add_argument("--res",
type=str,
default="1920x1080",
help="the resolution")
parser.add_argument("--highlight",
action="store_true",
help="highlight a single boid")
parser.add_argument("--preview-only",
dest="preview_only",
action="store_true",
help="dont save the video, just show the preview")
# weights
parser.add_argument("-c", "--cohesion",
dest="cohes",
type=float,
default=1.0,
help="the weight of the cohesion rule")
parser.add_argument("-a", "--alignment",
dest="align",
type=float,
default=1.0,
help="the weight of the alignment rule")
parser.add_argument("-s", "--seperation",
dest="sep",
type=float,
default=1.0,
help="the weight of the seperation rule")
# behaviour near edges
parser.add_argument("-e", "--edge-behaviour",
dest="edge_behaviour",
type=str,
choices={"avoid", "wrap"},
default="avoid",
help="the behaviour of the boids near edges, either avoid them or just wrap around to the other side")
# what method to use to decide which boids are close ('nearby')
group = parser.add_mutually_exclusive_group()
group.add_argument("--dist",
nargs="?",
type=float,
const=DEFAULT_VIEW_DIST,
help=f"all boids which are at most DIST units away from the current boid can be seen (defaults to {DEFAULT_VIEW_DIST})")
group.add_argument("--count",
dest="num_neighbors",
nargs="?",
type=int,
const=DEFAULT_NUM_NEIGHBORS,
help=f"the COUNT closest boids are seen by the current boid (defaults to {DEFAULT_NUM_NEIGHBORS})")
args = parser.parse_args()
# run simulation
with Canvas(args.res.split("x"), args.fps) as canvas:
u = Universe(canvas,
edge_behaviour=args.edge_behaviour,
nearby_method="dist" if args.num_neighbors is None else "count",
view_dist=args.dist or DEFAULT_VIEW_DIST,
num_neighbors=args.num_neighbors or DEFAULT_NUM_NEIGHBORS,
sep=args.sep,
align=args.align,
cohes=args.cohes)
if args.highlight:
u.add_boid(color=PALETTE["highlight"], pos=(0, 0))
args.n -= 1
u.populate(args.n)
u.loop()
# delete file if wanted
if args.preview_only or input("Save video? (Y/n) ").lower() == "n":
remove(canvas.filename)
| [
"src.Universe",
"argparse.ArgumentParser",
"os.remove"
] | [((222, 238), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (236, 238), False, 'from argparse import ArgumentParser\n'), ((3041, 3321), 'src.Universe', 'Universe', (['canvas'], {'edge_behaviour': 'args.edge_behaviour', 'nearby_method': "('dist' if args.num_neighbors is None else 'count')", 'view_dist': '(args.dist or DEFAULT_VIEW_DIST)', 'num_neighbors': '(args.num_neighbors or DEFAULT_NUM_NEIGHBORS)', 'sep': 'args.sep', 'align': 'args.align', 'cohes': 'args.cohes'}), "(canvas, edge_behaviour=args.edge_behaviour, nearby_method='dist' if\n args.num_neighbors is None else 'count', view_dist=args.dist or\n DEFAULT_VIEW_DIST, num_neighbors=args.num_neighbors or\n DEFAULT_NUM_NEIGHBORS, sep=args.sep, align=args.align, cohes=args.cohes)\n", (3049, 3321), False, 'from src import Universe, Canvas, Boid\n'), ((3734, 3757), 'os.remove', 'remove', (['canvas.filename'], {}), '(canvas.filename)\n', (3740, 3757), False, 'from os import remove\n')] |
from __future__ import print_function
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic)
from IPython.utils.ipstruct import Struct
import os
import argparse
import shlex
import re
from cStringIO import StringIO
import sys
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
@magics_class
class AMLHelpers(Magics):
@staticmethod
def print_and_update_env(k, v):
os.environ[k] = v
print(' os.environ["{}"]="{}"'.format(k, v))
return 'export {}={}'.format(k, v)
@cell_magic
def save_file(self, parameter_s='', cell=None):
opts, arg_str = self.parse_options(parameter_s, 'f:', list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
return self._save_file(arg_str, opts, self.shell.user_ns)
@line_magic
def list_subs(self, line):
from azure.cli.core._profile import Profile
try:
from azure.cli.core.util import CLIError
except ImportError:
from azure.cli.core._util import CLIError
self._redirect_logging('az.azure.cli.core._profile')
profile = Profile()
try:
profile.get_subscription()
except CLIError:
profile.find_subscriptions_on_login(True, None, None, None, None)
subs = profile.load_cached_subscriptions()
if not subs:
print('No subscriptions available.')
print('Please run `az login` from the console then try again')
return
print("Available subscriptions:\n {}".format('\n '.join(
[sub['name'] for sub in subs])))
@line_magic
def select_sub(self, line):
from azure.cli.core._profile import Profile
try:
from azure.cli.core.util import CLIError
except ImportError:
from azure.cli.core._util import CLIError
self._redirect_logging('az.azure.cli.core._profile')
p = argparse.ArgumentParser()
p.add_argument('subscription')
parsed_args = p.parse_args(shlex.split(line))
profile = Profile()
subs = profile.load_cached_subscriptions()
if not subs:
profile.find_subscriptions_on_login(True, None, None, None, None)
try:
profile.set_active_subscription(parsed_args.subscription)
print('Active subscription set to {}'.format(profile.get_subscription()['name']))
except CLIError as exc:
print(exc)
print('Active subscription remains {}'.format(profile.get_subscription()['name']))
@line_magic
def check_deployment(self, line):
from azure.cli.core._profile import Profile
from azure.cli.core.commands import client_factory
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.cli.command_modules.ml._az_util import az_get_app_insights_account
self._redirect_logging('az.azure.cli.core._profile')
p = argparse.ArgumentParser()
p.add_argument('-d', '--deployment', help='Long running deployment to check', required=True)
parsed_args = p.parse_args(line.split())
deployment_name = parsed_args.deployment
# validate that user has selected a subscription
profile = Profile()
subs = profile.load_cached_subscriptions()
if not subs:
print('Please run %%select_sub before attempting to query.')
return
if 'deployment' not in deployment_name:
print("Not a valid AML deployment name.")
resource_group = deployment_name.split('deployment')[0]
client = client_factory.get_mgmt_service_client(ResourceManagementClient).deployments
result = client.get(resource_group, deployment_name)
if result.properties.provisioning_state != 'Succeeded':
print('Deployment status: {}'.format(result.properties.provisioning_state))
return
completed_deployment = result
if 'appinsights' in completed_deployment.name:
(app_insights_account_name,
app_insights_account_key) = az_get_app_insights_account(completed_deployment)
if app_insights_account_name and app_insights_account_key:
print("Environment updated with AppInsight information.")
print("This notebook will keep this environment available, though kernel restarts will clear it.")
print("To reset the environment, use the following commands:")
print(' import os')
result_str = '\n'.join([
self.print_and_update_env('AML_APP_INSIGHTS_NAME', app_insights_account_name),
self.print_and_update_env('AML_APP_INSIGHTS_KEY', app_insights_account_key)])
try:
with open(os.path.join(os.path.expanduser('~'), '.amlenvrc'), 'a') as env_file:
env_file.write(result_str)
print('{} has also been updated.'.format(env_file.name))
except IOError:
pass
else:
acs_master = completed_deployment.properties.outputs['masterFQDN']['value']
acs_agent = completed_deployment.properties.outputs['agentpublicFQDN'][
'value']
if acs_master and acs_agent:
print('ACS deployment succeeded.')
print("Environment updated with ACS information.")
print("This notebook will keep this environment available, though kernel restarts will clear it.")
print("To reset the environment, use the following commands:")
print(' import os')
result_str = '\n'.join([
self.print_and_update_env('AML_ACS_MASTER', acs_master),
self.print_and_update_env('AML_ACS_AGENT', acs_agent)])
try:
with open(os.path.join(os.path.expanduser('~'), '.amlenvrc'), 'a') as env_file:
env_file.write(result_str)
print('{} has also been updated.'.format(env_file.name))
except IOError:
pass
@line_magic
def aml_env_setup(self, line):
from azure.cli.core._profile import Profile
self._redirect_logging('az.azure.cli.core._profile')
p = argparse.ArgumentParser()
p.add_argument('-n', '--name', help='base name for your environment', required=True)
parsed_args = p.parse_args(line.split())
# validate that user has selected a subscription
profile = Profile()
subs = profile.load_cached_subscriptions()
if not subs:
print('Please run %%select_sub before attempting to set up environment.')
return
from azure.cli.command_modules.ml._util import create_ssh_key_if_not_exists
from azure.cli.command_modules.ml._util import JupyterContext
from azure.cli.command_modules.ml._az_util import az_create_resource_group
from azure.cli.command_modules.ml._az_util import az_create_app_insights_account
from azure.cli.command_modules.ml._az_util import az_create_storage_and_acr
from azure.cli.command_modules.ml._az_util import az_create_acs
from azure.cli.command_modules.ml._az_util import query_deployment_status
from azure.cli.command_modules.ml._az_util import az_get_app_insights_account
from azure.cli.command_modules.ml._az_util import AzureCliError
import time
print('Setting up your Azure ML environment with a storage account, App Insights account, ACR registry and ACS cluster.')
c = JupyterContext()
try:
ssh_public_key = create_ssh_key_if_not_exists()
except:
return
resource_group = az_create_resource_group(c, parsed_args.name)
app_insights_deployment_id = az_create_app_insights_account(parsed_args.name, resource_group)
(acr_login_server, c.acr_username, acr_password, storage_account_name, storage_account_key) = \
az_create_storage_and_acr(parsed_args.name, resource_group)
with Capturing() as output:
az_create_acs(parsed_args.name, resource_group, acr_login_server, c.acr_username,
acr_password, ssh_public_key)
acs_regex = r"az ml env setup -s (?P<deployment_id>[^']+)"
for line in output:
s = re.search(acs_regex, line)
if s:
print('To check the status of the deployment, run line magic %check_deployment -d {}'.format(s.group('deployment_id')))
else:
print(line)
completed_deployment = None
while not completed_deployment:
try:
print('Querying App Insights deployment...')
completed_deployment = query_deployment_status(resource_group,
app_insights_deployment_id)
time.sleep(5)
except AzureCliError as exc:
print(exc.message)
break
print("Environment configured, pending ACS deployment completion.")
print("This notebook will keep this environment available, though kernel restarts will clear it.")
print("To reset the environment, use the following commands:")
print(' import os')
result_str = ''
if completed_deployment:
app_insights_account_name, app_insights_account_key = az_get_app_insights_account(
completed_deployment)
result_str = '\n'.join([
self.print_and_update_env('AML_APP_INSIGHTS_NAME', app_insights_account_name),
self.print_and_update_env('AML_APP_INSIGHTS_KEY', app_insights_account_key)])
result_str += '\n'.join([
self.print_and_update_env('AML_STORAGE_ACCT_NAME', storage_account_name),
self.print_and_update_env('AML_STORAGE_ACCT_KEY', storage_account_name),
self.print_and_update_env('AML_ACR_HOME', acr_login_server),
self.print_and_update_env('AML_ACR_USER', c.acr_username),
self.print_and_update_env('AML_ACR_PW', acr_password)])
try:
with open(os.path.expanduser('~/.amlenvrc'), 'w+') as env_file:
env_file.write(result_str)
print('You can also find these settings saved in {}'.format(env_file.name))
except IOError:
pass
@cell_magic
def publish_realtime_local(self, parameter_s='', cell=None):
import tempfile
import azure.cli.command_modules.ml.service.realtime as r
import azure.cli.command_modules.ml._util as u
# reload util to get new environment vars
self.easy_reload(u)
p = argparse.ArgumentParser()
p.add_argument('-s', '--schema', help='local path to schema file', required=True)
p.add_argument('-m', '--model', help='local path to model', required=True)
p.add_argument('-n', '--name', help='name of the webservice', required=True)
p.add_argument('-d', '--dependency', dest='dependencies', help='arbitrary dependencies', action='append', default=[])
p.add_argument('-o', '--overwrite', help='flag to overwrite existing service',
action='store_true')
args = p.parse_args(parameter_s.split())
context = u.JupyterContext()
context.local_mode = True
context.set_input_response('Delete existing service and create new service (y/N)? ',
'y' if args.overwrite else 'n')
_, fp = tempfile.mkstemp()
with open(fp, 'w') as score_file:
score_file.write(cell)
try:
resp_code = r.realtime_service_create(score_file.name,
dependencies=args.dependencies, requirements='',
schema_file=args.schema, service_name=args.name,
verb=False, custom_ice_url='', target_runtime='spark-py',
logging_level='debug', model=args.model, context=context)
if resp_code == 1:
print('Use -o flag to magic to overwrite the existing service.')
finally:
# cleanup
os.remove(fp)
@line_magic
def list_realtime_local(self, line):
from azure.cli.command_modules.ml.service.realtime import realtime_service_list
from azure.cli.command_modules.ml._util import JupyterContext
c = JupyterContext()
c.local_mode = True
realtime_service_list(context=c)
@line_magic
def view_realtime_local(self, line):
import azure.cli.command_modules.ml.service.realtime as r
import azure.cli.command_modules.ml._util as u
p = argparse.ArgumentParser()
p.add_argument('-n', '--name', help='name of the webservice', required=True)
name = p.parse_args(line.split()).name
context = u.JupyterContext()
context.local_mode = True
r.realtime_service_view(service_name=name, context=context)
@line_magic
def run_realtime_local(self, line):
import azure.cli.command_modules.ml.service.realtime as r
p = argparse.ArgumentParser()
p.add_argument('-n', '--name', help='name of the webservice', required=True)
p.add_argument('-d', '--data', help='data to send', default='')
parsed_args = p.parse_args(shlex.split(line))
name = parsed_args.name
input_data = parsed_args.data
r.realtime_service_run_local(service_name=name, input_data=input_data, verbose=False)
@line_magic
def delete_realtime_local(self, line):
import azure.cli.command_modules.ml.service.realtime as r
p = argparse.ArgumentParser()
p.add_argument('-n', '--name', help='name of the webservice', required=True)
name = p.parse_args(line.split()).name
r.realtime_service_delete_local(service_name=name, verbose=False)
@staticmethod
def _redirect_logging(module_name):
import logging
from azure.cli.core.azlogging import CustomStreamHandler
profile_logger = logging.getLogger(module_name)
if not profile_logger.handlers:
profile_logger.addHandler(CustomStreamHandler(logging.DEBUG, {
True: '%(message)s',
False: '%(levelname)s: %(message)s',
}))
@staticmethod
def _save_file(code, opts, namespace):
# read arguments
opts.merge(Struct(f=None))
file_name = opts.f
if not file_name:
return "Usage: %%save_file -f file_name"
file_name = file_name[0]
with open(file_name, 'w') as fileName:
fileName.write(code)
print("Saved cell to {}".format(file_name))
return
@staticmethod
def easy_reload(module):
try:
# python 3.4+ import
from importlib import reload
except ImportError:
try:
# 3 < 3.4
from imp import reload
except ImportError:
pass
# builtin for p2
reload(module)
from IPython import get_ipython
get_ipython().register_magics(AMLHelpers)
| [
"logging.getLogger",
"azure.cli.command_modules.ml._az_util.query_deployment_status",
"shlex.split",
"azure.cli.command_modules.ml.service.realtime.realtime_service_delete_local",
"time.sleep",
"azure.cli.command_modules.ml.service.realtime.realtime_service_view",
"re.search",
"os.remove",
"cStringIO.StringIO",
"azure.cli.command_modules.ml._util.create_ssh_key_if_not_exists",
"argparse.ArgumentParser",
"azure.cli.command_modules.ml.service.realtime.realtime_service_create",
"azure.cli.command_modules.ml._az_util.az_create_resource_group",
"azure.cli.command_modules.ml._az_util.az_get_app_insights_account",
"azure.cli.command_modules.ml._az_util.az_create_acs",
"azure.cli.core.commands.client_factory.get_mgmt_service_client",
"IPython.utils.ipstruct.Struct",
"os.path.expanduser",
"IPython.get_ipython",
"azure.cli.core.azlogging.CustomStreamHandler",
"azure.cli.command_modules.ml._az_util.az_create_storage_and_acr",
"azure.cli.command_modules.ml.service.realtime.realtime_service_run_local",
"imp.reload",
"azure.cli.command_modules.ml._az_util.az_create_app_insights_account",
"azure.cli.command_modules.ml.service.realtime.realtime_service_list",
"tempfile.mkstemp",
"azure.cli.command_modules.ml._util.JupyterContext",
"azure.cli.core._profile.Profile"
] | [((372, 382), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (380, 382), False, 'from cStringIO import StringIO\n'), ((1417, 1426), 'azure.cli.core._profile.Profile', 'Profile', ([], {}), '()\n', (1424, 1426), False, 'from azure.cli.core._profile import Profile\n'), ((2231, 2256), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2254, 2256), False, 'import argparse\n'), ((2368, 2377), 'azure.cli.core._profile.Profile', 'Profile', ([], {}), '()\n', (2375, 2377), False, 'from azure.cli.core._profile import Profile\n'), ((3256, 3281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3279, 3281), False, 'import argparse\n'), ((3557, 3566), 'azure.cli.core._profile.Profile', 'Profile', ([], {}), '()\n', (3564, 3566), False, 'from azure.cli.core._profile import Profile\n'), ((6674, 6699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6697, 6699), False, 'import argparse\n'), ((6918, 6927), 'azure.cli.core._profile.Profile', 'Profile', ([], {}), '()\n', (6925, 6927), False, 'from azure.cli.core._profile import Profile\n'), ((7989, 8005), 'azure.cli.command_modules.ml._util.JupyterContext', 'JupyterContext', ([], {}), '()\n', (8003, 8005), False, 'from azure.cli.command_modules.ml._util import JupyterContext\n'), ((8139, 8184), 'azure.cli.command_modules.ml._az_util.az_create_resource_group', 'az_create_resource_group', (['c', 'parsed_args.name'], {}), '(c, parsed_args.name)\n', (8163, 8184), False, 'from azure.cli.command_modules.ml._az_util import az_create_resource_group\n'), ((8222, 8286), 'azure.cli.command_modules.ml._az_util.az_create_app_insights_account', 'az_create_app_insights_account', (['parsed_args.name', 'resource_group'], {}), '(parsed_args.name, resource_group)\n', (8252, 8286), False, 'from azure.cli.command_modules.ml._az_util import az_create_app_insights_account\n'), ((8403, 8462), 'azure.cli.command_modules.ml._az_util.az_create_storage_and_acr', 'az_create_storage_and_acr', (['parsed_args.name', 'resource_group'], {}), '(parsed_args.name, resource_group)\n', (8428, 8462), False, 'from azure.cli.command_modules.ml._az_util import az_create_storage_and_acr\n'), ((11143, 11168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11166, 11168), False, 'import argparse\n'), ((11751, 11769), 'azure.cli.command_modules.ml._util.JupyterContext', 'u.JupyterContext', ([], {}), '()\n', (11767, 11769), True, 'import azure.cli.command_modules.ml._util as u\n'), ((11971, 11989), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (11987, 11989), False, 'import tempfile\n'), ((12918, 12934), 'azure.cli.command_modules.ml._util.JupyterContext', 'JupyterContext', ([], {}), '()\n', (12932, 12934), False, 'from azure.cli.command_modules.ml._util import JupyterContext\n'), ((12971, 13003), 'azure.cli.command_modules.ml.service.realtime.realtime_service_list', 'realtime_service_list', ([], {'context': 'c'}), '(context=c)\n', (12992, 13003), False, 'from azure.cli.command_modules.ml.service.realtime import realtime_service_list\n'), ((13196, 13221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13219, 13221), False, 'import argparse\n'), ((13372, 13390), 'azure.cli.command_modules.ml._util.JupyterContext', 'u.JupyterContext', ([], {}), '()\n', (13388, 13390), True, 'import azure.cli.command_modules.ml._util as u\n'), ((13433, 13492), 'azure.cli.command_modules.ml.service.realtime.realtime_service_view', 'r.realtime_service_view', ([], {'service_name': 'name', 'context': 'context'}), '(service_name=name, context=context)\n', (13456, 13492), True, 'import azure.cli.command_modules.ml.service.realtime as r\n'), ((13629, 13654), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13652, 13654), False, 'import argparse\n'), ((13944, 14033), 'azure.cli.command_modules.ml.service.realtime.realtime_service_run_local', 'r.realtime_service_run_local', ([], {'service_name': 'name', 'input_data': 'input_data', 'verbose': '(False)'}), '(service_name=name, input_data=input_data,\n verbose=False)\n', (13972, 14033), True, 'import azure.cli.command_modules.ml.service.realtime as r\n'), ((14169, 14194), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14192, 14194), False, 'import argparse\n'), ((14335, 14400), 'azure.cli.command_modules.ml.service.realtime.realtime_service_delete_local', 'r.realtime_service_delete_local', ([], {'service_name': 'name', 'verbose': '(False)'}), '(service_name=name, verbose=False)\n', (14366, 14400), True, 'import azure.cli.command_modules.ml.service.realtime as r\n'), ((14573, 14603), 'logging.getLogger', 'logging.getLogger', (['module_name'], {}), '(module_name)\n', (14590, 14603), False, 'import logging\n'), ((15577, 15591), 'imp.reload', 'reload', (['module'], {}), '(module)\n', (15583, 15591), False, 'from imp import reload\n'), ((15626, 15639), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (15637, 15639), False, 'from IPython import get_ipython\n'), ((2331, 2348), 'shlex.split', 'shlex.split', (['line'], {}), '(line)\n', (2342, 2348), False, 'import shlex\n'), ((3916, 3980), 'azure.cli.core.commands.client_factory.get_mgmt_service_client', 'client_factory.get_mgmt_service_client', (['ResourceManagementClient'], {}), '(ResourceManagementClient)\n', (3954, 3980), False, 'from azure.cli.core.commands import client_factory\n'), ((4401, 4450), 'azure.cli.command_modules.ml._az_util.az_get_app_insights_account', 'az_get_app_insights_account', (['completed_deployment'], {}), '(completed_deployment)\n', (4428, 4450), False, 'from azure.cli.command_modules.ml._az_util import az_get_app_insights_account\n'), ((8048, 8078), 'azure.cli.command_modules.ml._util.create_ssh_key_if_not_exists', 'create_ssh_key_if_not_exists', ([], {}), '()\n', (8076, 8078), False, 'from azure.cli.command_modules.ml._util import create_ssh_key_if_not_exists\n'), ((8512, 8628), 'azure.cli.command_modules.ml._az_util.az_create_acs', 'az_create_acs', (['parsed_args.name', 'resource_group', 'acr_login_server', 'c.acr_username', 'acr_password', 'ssh_public_key'], {}), '(parsed_args.name, resource_group, acr_login_server, c.\n acr_username, acr_password, ssh_public_key)\n', (8525, 8628), False, 'from azure.cli.command_modules.ml._az_util import az_create_acs\n'), ((8762, 8788), 're.search', 're.search', (['acs_regex', 'line'], {}), '(acs_regex, line)\n', (8771, 8788), False, 'import re\n'), ((9849, 9898), 'azure.cli.command_modules.ml._az_util.az_get_app_insights_account', 'az_get_app_insights_account', (['completed_deployment'], {}), '(completed_deployment)\n', (9876, 9898), False, 'from azure.cli.command_modules.ml._az_util import az_get_app_insights_account\n'), ((12104, 12374), 'azure.cli.command_modules.ml.service.realtime.realtime_service_create', 'r.realtime_service_create', (['score_file.name'], {'dependencies': 'args.dependencies', 'requirements': '""""""', 'schema_file': 'args.schema', 'service_name': 'args.name', 'verb': '(False)', 'custom_ice_url': '""""""', 'target_runtime': '"""spark-py"""', 'logging_level': '"""debug"""', 'model': 'args.model', 'context': 'context'}), "(score_file.name, dependencies=args.dependencies,\n requirements='', schema_file=args.schema, service_name=args.name, verb=\n False, custom_ice_url='', target_runtime='spark-py', logging_level=\n 'debug', model=args.model, context=context)\n", (12129, 12374), True, 'import azure.cli.command_modules.ml.service.realtime as r\n'), ((12676, 12689), 'os.remove', 'os.remove', (['fp'], {}), '(fp)\n', (12685, 12689), False, 'import os\n'), ((13847, 13864), 'shlex.split', 'shlex.split', (['line'], {}), '(line)\n', (13858, 13864), False, 'import shlex\n'), ((14931, 14945), 'IPython.utils.ipstruct.Struct', 'Struct', ([], {'f': 'None'}), '(f=None)\n', (14937, 14945), False, 'from IPython.utils.ipstruct import Struct\n'), ((9183, 9250), 'azure.cli.command_modules.ml._az_util.query_deployment_status', 'query_deployment_status', (['resource_group', 'app_insights_deployment_id'], {}), '(resource_group, app_insights_deployment_id)\n', (9206, 9250), False, 'from azure.cli.command_modules.ml._az_util import query_deployment_status\n'), ((9330, 9343), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9340, 9343), False, 'import time\n'), ((14682, 14784), 'azure.cli.core.azlogging.CustomStreamHandler', 'CustomStreamHandler', (['logging.DEBUG', "{(True): '%(message)s', (False): '%(levelname)s: %(message)s'}"], {}), "(logging.DEBUG, {(True): '%(message)s', (False):\n '%(levelname)s: %(message)s'})\n", (14701, 14784), False, 'from azure.cli.core.azlogging import CustomStreamHandler\n'), ((10594, 10627), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.amlenvrc"""'], {}), "('~/.amlenvrc')\n", (10612, 10627), False, 'import os\n'), ((5137, 5160), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (5155, 5160), False, 'import os\n'), ((6250, 6273), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (6268, 6273), False, 'import os\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from jax import api
from jax import config
from jax import dtypes
from jax.experimental import sparse_ops
from jax.lib import cusparse
from jax.lib import xla_bridge
from jax import jit
from jax import test_util as jtu
from jax import xla
import jax.numpy as jnp
from jax import jvp
import numpy as np
from scipy import sparse
config.parse_flags_with_absl()
FLAGS = config.FLAGS
MATMUL_TOL = {
np.float32: 1E-5,
np.float64: 1E-10,
np.complex64: 1e-5,
np.complex128: 1E-10,
}
def rand_sparse(rng, nnz=0.5, post=lambda x: x):
def _rand_sparse(shape, dtype, nnz=nnz):
rand = jtu.rand_default(rng)
size = np.prod(shape)
if 0 <= nnz < 1:
nnz = nnz * size
nnz = min(size, int(nnz))
M = rand(shape, dtype)
indices = rng.choice(size, size - nnz, replace=False)
M.flat[indices] = 0
return post(M)
return _rand_sparse
class cuSparseTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_csr_todense(self, shape, dtype):
rng = rand_sparse(self.rng(), post=sparse.csr_matrix)
M = rng(shape, dtype)
args = (M.data, M.indices, M.indptr)
todense = lambda *args: sparse_ops.csr_todense(*args, shape=M.shape)
self.assertArraysEqual(M.toarray(), todense(*args))
self.assertArraysEqual(M.toarray(), jit(todense)(*args))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_csr_fromdense(self, shape, dtype):
rng = rand_sparse(self.rng())
M = rng(shape, dtype)
M_csr = sparse.csr_matrix(M)
nnz = M_csr.nnz
index_dtype = jnp.int32
fromdense = lambda M: sparse_ops.csr_fromdense(M, nnz=nnz, index_dtype=jnp.int32)
data, indices, indptr = fromdense(M)
self.assertArraysEqual(data, M_csr.data.astype(dtype))
self.assertArraysEqual(indices, M_csr.indices.astype(index_dtype))
self.assertArraysEqual(indptr, M_csr.indptr.astype(index_dtype))
data, indices, indptr = jit(fromdense)(M)
self.assertArraysEqual(data, M_csr.data.astype(dtype))
self.assertArraysEqual(indices, M_csr.indices.astype(index_dtype))
self.assertArraysEqual(indptr, M_csr.indptr.astype(index_dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_T={}".format(jtu.format_shape_dtype_string(shape, dtype), transpose),
"shape": shape, "dtype": dtype, "transpose": transpose}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
for transpose in [True, False]))
def test_csr_matvec(self, shape, dtype, transpose):
op = lambda M: M.T if transpose else M
v_rng = jtu.rand_default(self.rng())
rng = rand_sparse(self.rng(), post=sparse.csr_matrix)
M = rng(shape, dtype)
v = v_rng(op(M).shape[1], dtype)
args = (M.data, M.indices, M.indptr, v)
matvec = lambda *args: sparse_ops.csr_matvec(*args, shape=M.shape, transpose=transpose)
self.assertAllClose(op(M) @ v, matvec(*args), rtol=MATMUL_TOL)
self.assertAllClose(op(M) @ v, jit(matvec)(*args), rtol=MATMUL_TOL)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_T={}".format(jtu.format_shape_dtype_string(shape, dtype), transpose),
"shape": shape, "dtype": dtype, "transpose": transpose}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
for transpose in [True, False]))
def test_csr_matmat(self, shape, dtype, transpose):
op = lambda M: M.T if transpose else M
B_rng = jtu.rand_default(self.rng())
rng = rand_sparse(self.rng(), post=sparse.csr_matrix)
M = rng(shape, dtype)
B = B_rng((op(M).shape[1], 4), dtype)
args = (M.data, M.indices, M.indptr, B)
matmat = lambda *args: sparse_ops.csr_matmat(*args, shape=shape, transpose=transpose)
self.assertAllClose(op(M) @ B, matmat(*args), rtol=MATMUL_TOL)
self.assertAllClose(op(M) @ B, jit(matmat)(*args), rtol=MATMUL_TOL)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_coo_todense(self, shape, dtype):
rng = rand_sparse(self.rng(), post=sparse.coo_matrix)
M = rng(shape, dtype)
args = (M.data, M.row, M.col)
todense = lambda *args: sparse_ops.coo_todense(*args, shape=M.shape)
self.assertArraysEqual(M.toarray(), todense(*args))
self.assertArraysEqual(M.toarray(), jit(todense)(*args))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_coo_fromdense(self, shape, dtype):
rng = rand_sparse(self.rng())
M = rng(shape, dtype)
M_coo = sparse.coo_matrix(M)
nnz = M_coo.nnz
index_dtype = jnp.int32
fromdense = lambda M: sparse_ops.coo_fromdense(M, nnz=nnz, index_dtype=jnp.int32)
data, row, col = fromdense(M)
self.assertArraysEqual(data, M_coo.data.astype(dtype))
self.assertArraysEqual(row, M_coo.row.astype(index_dtype))
self.assertArraysEqual(col, M_coo.col.astype(index_dtype))
data, indices, indptr = jit(fromdense)(M)
self.assertArraysEqual(data, M_coo.data.astype(dtype))
self.assertArraysEqual(row, M_coo.row.astype(index_dtype))
self.assertArraysEqual(col, M_coo.col.astype(index_dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_T={}".format(jtu.format_shape_dtype_string(shape, dtype), transpose),
"shape": shape, "dtype": dtype, "transpose": transpose}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
for transpose in [True, False]))
def test_coo_matvec(self, shape, dtype, transpose):
op = lambda M: M.T if transpose else M
v_rng = jtu.rand_default(self.rng())
rng = rand_sparse(self.rng(), post=sparse.coo_matrix)
M = rng(shape, dtype)
v = v_rng(op(M).shape[1], dtype)
args = (M.data, M.row, M.col, v)
matvec = lambda *args: sparse_ops.coo_matvec(*args, shape=M.shape, transpose=transpose)
self.assertAllClose(op(M) @ v, matvec(*args), rtol=MATMUL_TOL)
self.assertAllClose(op(M) @ v, jit(matvec)(*args), rtol=MATMUL_TOL)
@unittest.skipIf(jtu.device_under_test() != "gpu", "test requires GPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_T={}".format(jtu.format_shape_dtype_string(shape, dtype), transpose),
"shape": shape, "dtype": dtype, "transpose": transpose}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
for transpose in [True, False]))
def test_coo_matmat(self, shape, dtype, transpose):
op = lambda M: M.T if transpose else M
B_rng = jtu.rand_default(self.rng())
rng = rand_sparse(self.rng(), post=sparse.coo_matrix)
M = rng(shape, dtype)
B = B_rng((op(M).shape[1], 4), dtype)
args = (M.data, M.row, M.col, B)
matmat = lambda *args: sparse_ops.coo_matmat(*args, shape=shape, transpose=transpose)
self.assertAllClose(op(M) @ B, matmat(*args), rtol=MATMUL_TOL)
self.assertAllClose(op(M) @ B, jit(matmat)(*args), rtol=MATMUL_TOL)
y, dy = jvp(lambda x: sparse_ops.coo_matmat(M.data, M.row, M.col, x, shape=shape, transpose=transpose).sum(), (B, ), (jnp.ones_like(B), ))
self.assertAllClose((op(M) @ B).sum(), y, rtol=MATMUL_TOL)
y, dy = jvp(lambda x: sparse_ops.coo_matmat(x, M.row, M.col, B, shape=shape, transpose=transpose).sum(), (M.data, ), (jnp.ones_like(M.data), ))
self.assertAllClose((op(M) @ B).sum(), y, rtol=MATMUL_TOL)
@unittest.skipIf(jtu.device_under_test() != "gpu", "test requires GPU")
def test_gpu_translation_rule(self):
version = xla_bridge.get_backend().platform_version
cuda_version = None if version == "<unknown>" else int(version.split()[-1])
if cuda_version is None or cuda_version < 11000:
self.assertFalse(cusparse and cusparse.is_supported)
self.assertNotIn(sparse_ops.csr_todense_p, xla.backend_specific_translations["gpu"])
else:
self.assertTrue(cusparse and cusparse.is_supported)
self.assertIn(sparse_ops.csr_todense_p, xla.backend_specific_translations["gpu"])
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), mat_type),
"shape": shape, "dtype": dtype, "mat_type": mat_type}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
for mat_type in ['csr', 'coo']))
def test_extra_nnz(self, shape, dtype, mat_type):
rng = rand_sparse(self.rng())
M = rng(shape, dtype)
nnz = (M != 0).sum() + 5
fromdense = getattr(sparse_ops, f"{mat_type}_fromdense")
todense = getattr(sparse_ops, f"{mat_type}_todense")
args = fromdense(M, nnz=nnz, index_dtype=jnp.int32)
M_out = todense(*args, shape=M.shape)
self.assertArraysEqual(M, M_out)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_coo_todense_ad(self, shape, dtype):
rng = rand_sparse(self.rng(), post=jnp.array)
M = rng(shape, dtype)
data, row, col = sparse_ops.coo_fromdense(M, nnz=(M != 0).sum())
f = lambda data: sparse_ops.coo_todense(data, row, col, shape=M.shape)
# Forward-mode
primals, tangents = api.jvp(f, [data], [jnp.ones_like(data)])
self.assertArraysEqual(primals, f(data))
self.assertArraysEqual(tangents, jnp.zeros_like(M).at[row, col].set(1))
# Reverse-mode
primals, vjp_fun = api.vjp(f, data)
data_out, = vjp_fun(primals)
self.assertArraysEqual(primals, f(data))
self.assertArraysEqual(data_out, data)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex))
def test_coo_fromdense_ad(self, shape, dtype):
rng = rand_sparse(self.rng(), post=jnp.array)
M = rng(shape, dtype)
nnz = (M != 0).sum()
f = lambda M: sparse_ops.coo_fromdense(M, nnz=nnz)
# Forward-mode
primals, tangents = api.jvp(f, [M], [jnp.ones_like(M)])
self.assertArraysEqual(primals[0], f(M)[0])
self.assertArraysEqual(primals[1], f(M)[1])
self.assertArraysEqual(primals[2], f(M)[2])
self.assertArraysEqual(tangents[0], jnp.ones(nnz, dtype=dtype))
self.assertEqual(tangents[1].dtype, dtypes.float0)
self.assertEqual(tangents[2].dtype, dtypes.float0)
# Reverse-mode
primals, vjp_fun = api.vjp(f, M)
M_out, = vjp_fun(primals)
self.assertArraysEqual(primals[0], f(M)[0])
self.assertArraysEqual(primals[1], f(M)[1])
self.assertArraysEqual(primals[2], f(M)[2])
self.assertArraysEqual(M_out, M)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(bshape, dtype)),
"shape": shape, "dtype": dtype, "bshape": bshape}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for bshape in [shape[-1:] + s for s in [()]] # TODO: matmul autodiff
for dtype in jtu.dtypes.floating + jtu.dtypes.complex)) # TODO: other types
def test_coo_matvec_ad(self, shape, dtype, bshape):
tol = {np.float32: 1E-6, np.float64: 1E-13, np.complex64: 1E-6, np.complex128: 1E-13}
rng = rand_sparse(self.rng(), post=jnp.array)
rng_b = jtu.rand_default(self.rng())
M = rng(shape, dtype)
data, row, col = sparse_ops.coo_fromdense(M, nnz=(M != 0).sum())
x = rng_b(bshape, dtype)
xdot = rng_b(bshape, dtype)
# Forward-mode with respect to the vector
f_dense = lambda x: M @ x
f_sparse = lambda x: sparse_ops.coo_matvec(data, row, col, x, shape=M.shape)
v_sparse, t_sparse = api.jvp(f_sparse, [x], [xdot])
v_dense, t_dense = api.jvp(f_dense, [x], [xdot])
self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)
self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)
# Reverse-mode with respect to the vector
primals_dense, vjp_dense = api.vjp(f_dense, x)
primals_sparse, vjp_sparse = api.vjp(f_sparse, x)
out_dense, = vjp_dense(primals_dense)
out_sparse, = vjp_sparse(primals_sparse)
self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)
self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)
# Forward-mode with respect to nonzero elements of the matrix
f_sparse = lambda data: sparse_ops.coo_matvec(data, row, col, x, shape=M.shape)
f_dense = lambda data: sparse_ops.coo_todense(data, row, col, shape=M.shape) @ x
data = rng((len(data),), data.dtype)
data_dot = rng((len(data),), data.dtype)
v_sparse, t_sparse = api.jvp(f_sparse, [data], [data_dot])
v_dense, t_dense = api.jvp(f_dense, [data], [data_dot])
self.assertAllClose(v_sparse, v_dense, atol=tol, rtol=tol)
self.assertAllClose(t_sparse, t_dense, atol=tol, rtol=tol)
# Reverse-mode with respect to nonzero elements of the matrix
primals_dense, vjp_dense = api.vjp(f_dense, data)
primals_sparse, vjp_sparse = api.vjp(f_sparse, data)
out_dense, = vjp_dense(primals_dense)
out_sparse, = vjp_sparse(primals_sparse)
self.assertAllClose(primals_dense[0], primals_sparse[0], atol=tol, rtol=tol)
self.assertAllClose(out_dense, out_sparse, atol=tol, rtol=tol)
class SparseObjectTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_{}".format(Obj.__name__), "Obj": Obj}
for Obj in [sparse_ops.CSR, sparse_ops.CSC, sparse_ops.COO])
def test_attrs(self, Obj, shape=(5, 8), dtype=np.float16):
rng = rand_sparse(self.rng(), post=Obj.fromdense)
M = rng(shape, dtype)
assert isinstance(M, Obj)
assert M.shape == shape
assert M.dtype == dtype
assert M.nnz == (M.todense() != 0).sum()
assert M.data.dtype == dtype
if isinstance(M, sparse_ops.CSR):
assert len(M.data) == len(M.indices)
assert len(M.indptr) == M.shape[0] + 1
elif isinstance(M, sparse_ops.CSC):
assert len(M.data) == len(M.indices)
assert len(M.indptr) == M.shape[1] + 1
elif isinstance(M, sparse_ops.COO):
assert len(M.data) == len(M.row) == len(M.col)
else:
raise ValueError("Obj={Obj} not expected.")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_Obj={}".format(
jtu.format_shape_dtype_string(shape, dtype), Obj.__name__),
"shape": shape, "dtype": dtype, "Obj": Obj}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex)
for Obj in [sparse_ops.CSR, sparse_ops.CSC, sparse_ops.COO]))
def test_dense_round_trip(self, shape, dtype, Obj):
rng = rand_sparse(self.rng())
M = rng(shape, dtype)
Msparse = Obj.fromdense(M)
self.assertArraysEqual(M, Msparse.todense())
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_Obj={}".format(
jtu.format_shape_dtype_string(shape, dtype), Obj.__name__),
"shape": shape, "dtype": dtype, "Obj": Obj}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex)
for Obj in [sparse_ops.CSR, sparse_ops.CSC, sparse_ops.COO]))
def test_transpose(self, shape, dtype, Obj):
rng = rand_sparse(self.rng())
M = rng(shape, dtype)
Msparse = Obj.fromdense(M)
self.assertArraysEqual(M.T, Msparse.T.todense())
@unittest.skipIf(jtu.device_under_test() == "tpu", "TPU has insufficient precision")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_Obj={}_bshape={}".format(
jtu.format_shape_dtype_string(shape, dtype), Obj.__name__, bshape),
"shape": shape, "dtype": dtype, "Obj": Obj, "bshape": bshape}
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for bshape in [shape[-1:] + s for s in [(), (3,), (4,)]]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex)
for Obj in [sparse_ops.CSR, sparse_ops.CSC, sparse_ops.COO]))
def test_matmul(self, shape, dtype, Obj, bshape):
rng = rand_sparse(self.rng(), post=jnp.array)
rng_b = jtu.rand_default(self.rng())
M = rng(shape, dtype)
Msp = Obj.fromdense(M)
x = rng_b(bshape, dtype)
x = jnp.asarray(x)
self.assertAllClose(M @ x, Msp @ x, rtol=MATMUL_TOL)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| [
"numpy.prod",
"jax.api.jvp",
"jax.test_util.rand_default",
"jax.jit",
"jax.experimental.sparse_ops.coo_todense",
"jax.numpy.asarray",
"jax.test_util.device_under_test",
"jax.experimental.sparse_ops.coo_fromdense",
"jax.experimental.sparse_ops.coo_matmat",
"jax.lib.xla_bridge.get_backend",
"jax.numpy.ones_like",
"scipy.sparse.coo_matrix",
"scipy.sparse.csr_matrix",
"jax.experimental.sparse_ops.csr_todense",
"jax.config.parse_flags_with_absl",
"jax.experimental.sparse_ops.coo_matvec",
"jax.api.vjp",
"jax.experimental.sparse_ops.csr_matmat",
"jax.experimental.sparse_ops.csr_fromdense",
"jax.numpy.ones",
"jax.numpy.zeros_like",
"jax.experimental.sparse_ops.csr_matvec",
"jax.test_util.JaxTestLoader",
"jax.test_util.format_shape_dtype_string"
] | [((1011, 1041), 'jax.config.parse_flags_with_absl', 'config.parse_flags_with_absl', ([], {}), '()\n', (1039, 1041), False, 'from jax import config\n'), ((1273, 1294), 'jax.test_util.rand_default', 'jtu.rand_default', (['rng'], {}), '(rng)\n', (1289, 1294), True, 'from jax import test_util as jtu\n'), ((1306, 1320), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1313, 1320), True, 'import numpy as np\n'), ((2646, 2666), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['M'], {}), '(M)\n', (2663, 2666), False, 'from scipy import sparse\n'), ((6160, 6180), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['M'], {}), '(M)\n', (6177, 6180), False, 'from scipy import sparse\n'), ((11248, 11264), 'jax.api.vjp', 'api.vjp', (['f', 'data'], {}), '(f, data)\n', (11255, 11264), False, 'from jax import api\n'), ((12328, 12341), 'jax.api.vjp', 'api.vjp', (['f', 'M'], {}), '(f, M)\n', (12335, 12341), False, 'from jax import api\n'), ((13603, 13633), 'jax.api.jvp', 'api.jvp', (['f_sparse', '[x]', '[xdot]'], {}), '(f_sparse, [x], [xdot])\n', (13610, 13633), False, 'from jax import api\n'), ((13657, 13686), 'jax.api.jvp', 'api.jvp', (['f_dense', '[x]', '[xdot]'], {}), '(f_dense, [x], [xdot])\n', (13664, 13686), False, 'from jax import api\n'), ((13891, 13910), 'jax.api.vjp', 'api.vjp', (['f_dense', 'x'], {}), '(f_dense, x)\n', (13898, 13910), False, 'from jax import api\n'), ((13944, 13964), 'jax.api.vjp', 'api.vjp', (['f_sparse', 'x'], {}), '(f_sparse, x)\n', (13951, 13964), False, 'from jax import api\n'), ((14547, 14584), 'jax.api.jvp', 'api.jvp', (['f_sparse', '[data]', '[data_dot]'], {}), '(f_sparse, [data], [data_dot])\n', (14554, 14584), False, 'from jax import api\n'), ((14608, 14644), 'jax.api.jvp', 'api.jvp', (['f_dense', '[data]', '[data_dot]'], {}), '(f_dense, [data], [data_dot])\n', (14615, 14644), False, 'from jax import api\n'), ((14870, 14892), 'jax.api.vjp', 'api.vjp', (['f_dense', 'data'], {}), '(f_dense, data)\n', (14877, 14892), False, 'from jax import api\n'), ((14926, 14949), 'jax.api.vjp', 'api.vjp', (['f_sparse', 'data'], {}), '(f_sparse, data)\n', (14933, 14949), False, 'from jax import api\n'), ((18210, 18224), 'jax.numpy.asarray', 'jnp.asarray', (['x'], {}), '(x)\n', (18221, 18224), True, 'import jax.numpy as jnp\n'), ((2073, 2117), 'jax.experimental.sparse_ops.csr_todense', 'sparse_ops.csr_todense', (['*args'], {'shape': 'M.shape'}), '(*args, shape=M.shape)\n', (2095, 2117), False, 'from jax.experimental import sparse_ops\n'), ((2742, 2801), 'jax.experimental.sparse_ops.csr_fromdense', 'sparse_ops.csr_fromdense', (['M'], {'nnz': 'nnz', 'index_dtype': 'jnp.int32'}), '(M, nnz=nnz, index_dtype=jnp.int32)\n', (2766, 2801), False, 'from jax.experimental import sparse_ops\n'), ((3072, 3086), 'jax.jit', 'jit', (['fromdense'], {}), '(fromdense)\n', (3075, 3086), False, 'from jax import jit\n'), ((3990, 4054), 'jax.experimental.sparse_ops.csr_matvec', 'sparse_ops.csr_matvec', (['*args'], {'shape': 'M.shape', 'transpose': 'transpose'}), '(*args, shape=M.shape, transpose=transpose)\n', (4011, 4054), False, 'from jax.experimental import sparse_ops\n'), ((4901, 4963), 'jax.experimental.sparse_ops.csr_matmat', 'sparse_ops.csr_matmat', (['*args'], {'shape': 'shape', 'transpose': 'transpose'}), '(*args, shape=shape, transpose=transpose)\n', (4922, 4963), False, 'from jax.experimental import sparse_ops\n'), ((5587, 5631), 'jax.experimental.sparse_ops.coo_todense', 'sparse_ops.coo_todense', (['*args'], {'shape': 'M.shape'}), '(*args, shape=M.shape)\n', (5609, 5631), False, 'from jax.experimental import sparse_ops\n'), ((6256, 6315), 'jax.experimental.sparse_ops.coo_fromdense', 'sparse_ops.coo_fromdense', (['M'], {'nnz': 'nnz', 'index_dtype': 'jnp.int32'}), '(M, nnz=nnz, index_dtype=jnp.int32)\n', (6280, 6315), False, 'from jax.experimental import sparse_ops\n'), ((6565, 6579), 'jax.jit', 'jit', (['fromdense'], {}), '(fromdense)\n', (6568, 6579), False, 'from jax import jit\n'), ((7462, 7526), 'jax.experimental.sparse_ops.coo_matvec', 'sparse_ops.coo_matvec', (['*args'], {'shape': 'M.shape', 'transpose': 'transpose'}), '(*args, shape=M.shape, transpose=transpose)\n', (7483, 7526), False, 'from jax.experimental import sparse_ops\n'), ((8440, 8502), 'jax.experimental.sparse_ops.coo_matmat', 'sparse_ops.coo_matmat', (['*args'], {'shape': 'shape', 'transpose': 'transpose'}), '(*args, shape=shape, transpose=transpose)\n', (8461, 8502), False, 'from jax.experimental import sparse_ops\n'), ((7687, 7710), 'jax.test_util.device_under_test', 'jtu.device_under_test', ([], {}), '()\n', (7708, 7710), True, 'from jax import test_util as jtu\n'), ((9190, 9214), 'jax.lib.xla_bridge.get_backend', 'xla_bridge.get_backend', ([], {}), '()\n', (9212, 9214), False, 'from jax.lib import xla_bridge\n'), ((9082, 9105), 'jax.test_util.device_under_test', 'jtu.device_under_test', ([], {}), '()\n', (9103, 9105), True, 'from jax import test_util as jtu\n'), ((10944, 10997), 'jax.experimental.sparse_ops.coo_todense', 'sparse_ops.coo_todense', (['data', 'row', 'col'], {'shape': 'M.shape'}), '(data, row, col, shape=M.shape)\n', (10966, 10997), False, 'from jax.experimental import sparse_ops\n'), ((11846, 11882), 'jax.experimental.sparse_ops.coo_fromdense', 'sparse_ops.coo_fromdense', (['M'], {'nnz': 'nnz'}), '(M, nnz=nnz)\n', (11870, 11882), False, 'from jax.experimental import sparse_ops\n'), ((12147, 12173), 'jax.numpy.ones', 'jnp.ones', (['nnz'], {'dtype': 'dtype'}), '(nnz, dtype=dtype)\n', (12155, 12173), True, 'import jax.numpy as jnp\n'), ((13522, 13577), 'jax.experimental.sparse_ops.coo_matvec', 'sparse_ops.coo_matvec', (['data', 'row', 'col', 'x'], {'shape': 'M.shape'}), '(data, row, col, x, shape=M.shape)\n', (13543, 13577), False, 'from jax.experimental import sparse_ops\n'), ((14295, 14350), 'jax.experimental.sparse_ops.coo_matvec', 'sparse_ops.coo_matvec', (['data', 'row', 'col', 'x'], {'shape': 'M.shape'}), '(data, row, col, x, shape=M.shape)\n', (14316, 14350), False, 'from jax.experimental import sparse_ops\n'), ((17377, 17400), 'jax.test_util.device_under_test', 'jtu.device_under_test', ([], {}), '()\n', (17398, 17400), True, 'from jax import test_util as jtu\n'), ((18339, 18358), 'jax.test_util.JaxTestLoader', 'jtu.JaxTestLoader', ([], {}), '()\n', (18356, 18358), True, 'from jax import test_util as jtu\n'), ((2215, 2227), 'jax.jit', 'jit', (['todense'], {}), '(todense)\n', (2218, 2227), False, 'from jax import jit\n'), ((4158, 4169), 'jax.jit', 'jit', (['matvec'], {}), '(matvec)\n', (4161, 4169), False, 'from jax import jit\n'), ((5067, 5078), 'jax.jit', 'jit', (['matmat'], {}), '(matmat)\n', (5070, 5078), False, 'from jax import jit\n'), ((5729, 5741), 'jax.jit', 'jit', (['todense'], {}), '(todense)\n', (5732, 5741), False, 'from jax import jit\n'), ((7630, 7641), 'jax.jit', 'jit', (['matvec'], {}), '(matvec)\n', (7633, 7641), False, 'from jax import jit\n'), ((8606, 8617), 'jax.jit', 'jit', (['matmat'], {}), '(matmat)\n', (8609, 8617), False, 'from jax import jit\n'), ((8766, 8782), 'jax.numpy.ones_like', 'jnp.ones_like', (['B'], {}), '(B)\n', (8779, 8782), True, 'import jax.numpy as jnp\n'), ((8973, 8994), 'jax.numpy.ones_like', 'jnp.ones_like', (['M.data'], {}), '(M.data)\n', (8986, 8994), True, 'import jax.numpy as jnp\n'), ((11062, 11081), 'jax.numpy.ones_like', 'jnp.ones_like', (['data'], {}), '(data)\n', (11075, 11081), True, 'import jax.numpy as jnp\n'), ((11944, 11960), 'jax.numpy.ones_like', 'jnp.ones_like', (['M'], {}), '(M)\n', (11957, 11960), True, 'import jax.numpy as jnp\n'), ((14378, 14431), 'jax.experimental.sparse_ops.coo_todense', 'sparse_ops.coo_todense', (['data', 'row', 'col'], {'shape': 'M.shape'}), '(data, row, col, shape=M.shape)\n', (14400, 14431), False, 'from jax.experimental import sparse_ops\n'), ((1676, 1719), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (1705, 1719), True, 'from jax import test_util as jtu\n'), ((2329, 2372), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (2358, 2372), True, 'from jax import test_util as jtu\n'), ((3387, 3430), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (3416, 3430), True, 'from jax import test_util as jtu\n'), ((4293, 4336), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (4322, 4336), True, 'from jax import test_util as jtu\n'), ((5197, 5240), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (5226, 5240), True, 'from jax import test_util as jtu\n'), ((5843, 5886), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (5872, 5886), True, 'from jax import test_util as jtu\n'), ((6866, 6909), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (6895, 6909), True, 'from jax import test_util as jtu\n'), ((8670, 8755), 'jax.experimental.sparse_ops.coo_matmat', 'sparse_ops.coo_matmat', (['M.data', 'M.row', 'M.col', 'x'], {'shape': 'shape', 'transpose': 'transpose'}), '(M.data, M.row, M.col, x, shape=shape, transpose=transpose\n )\n', (8691, 8755), False, 'from jax.experimental import sparse_ops\n'), ((8877, 8952), 'jax.experimental.sparse_ops.coo_matmat', 'sparse_ops.coo_matmat', (['x', 'M.row', 'M.col', 'B'], {'shape': 'shape', 'transpose': 'transpose'}), '(x, M.row, M.col, B, shape=shape, transpose=transpose)\n', (8898, 8952), False, 'from jax.experimental import sparse_ops\n'), ((7839, 7882), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (7868, 7882), True, 'from jax import test_util as jtu\n'), ((9777, 9820), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (9806, 9820), True, 'from jax import test_util as jtu\n'), ((10532, 10575), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (10561, 10575), True, 'from jax import test_util as jtu\n'), ((11479, 11522), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (11508, 11522), True, 'from jax import test_util as jtu\n'), ((12658, 12701), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (12687, 12701), True, 'from jax import test_util as jtu\n'), ((12711, 12755), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['bshape', 'dtype'], {}), '(bshape, dtype)\n', (12740, 12755), True, 'from jax import test_util as jtu\n'), ((11166, 11183), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['M'], {}), '(M)\n', (11180, 11183), True, 'import jax.numpy as jnp\n'), ((16248, 16291), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (16277, 16291), True, 'from jax import test_util as jtu\n'), ((16876, 16919), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (16905, 16919), True, 'from jax import test_util as jtu\n'), ((17598, 17641), 'jax.test_util.format_shape_dtype_string', 'jtu.format_shape_dtype_string', (['shape', 'dtype'], {}), '(shape, dtype)\n', (17627, 17641), True, 'from jax import test_util as jtu\n')] |
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class StorageStatisticsFast(BaseObject):
"""
Contains approximate storage usage statistics, excluding files of unknown file type
:param files_size: Approximate total size of files, in bytes
:type files_size: :class:`int`
:param file_count: Approximate number of files
:type file_count: :class:`int`
:param database_size: Size of the database
:type database_size: :class:`int`
:param language_pack_database_size: Size of the language pack database
:type language_pack_database_size: :class:`int`
:param log_size: Size of the TDLib internal log
:type log_size: :class:`int`
"""
ID: str = Field("storageStatisticsFast", alias="@type")
files_size: int
file_count: int
database_size: int
language_pack_database_size: int
log_size: int
@staticmethod
def read(q: dict) -> StorageStatisticsFast:
return StorageStatisticsFast.construct(**q)
| [
"pydantic.Field"
] | [((1196, 1241), 'pydantic.Field', 'Field', (['"""storageStatisticsFast"""'], {'alias': '"""@type"""'}), "('storageStatisticsFast', alias='@type')\n", (1201, 1241), False, 'from pydantic import Field\n')] |
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import (
DetailView,
ListView,
)
from django.views.generic.edit import (
CreateView,
DeleteView,
UpdateView,
)
from recipes.forms import IngredientForm, RecipeForm, RecipePartFormset
from recipes.models import Ingredient, Recipe
class UserBound:
context_object_name = 'instance'
def get_queryset(self):
return self.model.objects.for_user(self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['model_name'] = self.model._meta.verbose_name.title()
context['model_name_plural'] = (
self.model._meta.verbose_name_plural.title())
context['path_prefix'] = self.model.path_prefix
context['path_prefix_plural'] = self.model.path_prefix_plural
return context
def get_form(self, form_class=None):
form = super().get_form(form_class=form_class)
form.instance.user = self.request.user
return form
def get_success_url(self):
return reverse_lazy(self.model.path_prefix_plural)
class IngredientBound(UserBound):
model = Ingredient
form_class = IngredientForm
class RecipeBound(UserBound):
model = Recipe
form_class = RecipeForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
instance = getattr(self, 'object', None)
form_kwargs = {
'user': self.request.user,
}
if self.request.POST:
context['recipe_part_form'] = RecipePartFormset(
self.request.POST, instance=instance, form_kwargs=form_kwargs)
else:
context['recipe_part_form'] = RecipePartFormset(
instance=instance, form_kwargs=form_kwargs)
return context
def form_valid(self, form):
context = self.get_context_data()
recipe_part_form = context['recipe_part_form']
if recipe_part_form.is_valid():
self.object = form.save()
recipe_part_form.instance = self.object
recipe_part_form.save()
return super().form_valid(form=form)
else:
return super().form_invalid(form=form)
@method_decorator(login_required, name='dispatch')
class IngredientList(IngredientBound, ListView):
pass
@method_decorator(login_required, name='dispatch')
class IngredientDetail(IngredientBound, DetailView):
pass
@method_decorator(login_required, name='dispatch')
class IngredientCreate(IngredientBound, CreateView):
pass
@method_decorator(login_required, name='dispatch')
class IngredientUpdate(IngredientBound, UpdateView):
pass
@method_decorator(login_required, name='dispatch')
class IngredientDelete(IngredientBound, DeleteView):
pass
@method_decorator(login_required, name='dispatch')
class RecipeList(RecipeBound, ListView):
pass
@method_decorator(login_required, name='dispatch')
class RecipeDetail(RecipeBound, DetailView):
pass
@method_decorator(login_required, name='dispatch')
class RecipeCreate(RecipeBound, CreateView):
pass
@method_decorator(login_required, name='dispatch')
class RecipeUpdate(RecipeBound, UpdateView):
pass
@method_decorator(login_required, name='dispatch')
class RecipeDelete(RecipeBound, DeleteView):
pass
@login_required
def clone_recipe(request, pk):
recipe = Recipe.objects.for_user(request.user).get(pk=pk).clone()
return redirect('recipe-update', pk=recipe.pk)
| [
"recipes.forms.RecipePartFormset",
"django.utils.decorators.method_decorator",
"django.shortcuts.redirect",
"django.urls.reverse_lazy",
"recipes.models.Recipe.objects.for_user"
] | [((2411, 2460), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2427, 2460), False, 'from django.utils.decorators import method_decorator\n'), ((2522, 2571), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2538, 2571), False, 'from django.utils.decorators import method_decorator\n'), ((2637, 2686), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2653, 2686), False, 'from django.utils.decorators import method_decorator\n'), ((2752, 2801), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2768, 2801), False, 'from django.utils.decorators import method_decorator\n'), ((2867, 2916), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2883, 2916), False, 'from django.utils.decorators import method_decorator\n'), ((2982, 3031), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (2998, 3031), False, 'from django.utils.decorators import method_decorator\n'), ((3085, 3134), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (3101, 3134), False, 'from django.utils.decorators import method_decorator\n'), ((3192, 3241), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (3208, 3241), False, 'from django.utils.decorators import method_decorator\n'), ((3299, 3348), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (3315, 3348), False, 'from django.utils.decorators import method_decorator\n'), ((3406, 3455), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {'name': '"""dispatch"""'}), "(login_required, name='dispatch')\n", (3422, 3455), False, 'from django.utils.decorators import method_decorator\n'), ((3640, 3679), 'django.shortcuts.redirect', 'redirect', (['"""recipe-update"""'], {'pk': 'recipe.pk'}), "('recipe-update', pk=recipe.pk)\n", (3648, 3679), False, 'from django.shortcuts import redirect\n'), ((1236, 1279), 'django.urls.reverse_lazy', 'reverse_lazy', (['self.model.path_prefix_plural'], {}), '(self.model.path_prefix_plural)\n', (1248, 1279), False, 'from django.urls import reverse_lazy\n'), ((1741, 1826), 'recipes.forms.RecipePartFormset', 'RecipePartFormset', (['self.request.POST'], {'instance': 'instance', 'form_kwargs': 'form_kwargs'}), '(self.request.POST, instance=instance, form_kwargs=form_kwargs\n )\n', (1758, 1826), False, 'from recipes.forms import IngredientForm, RecipeForm, RecipePartFormset\n'), ((1895, 1956), 'recipes.forms.RecipePartFormset', 'RecipePartFormset', ([], {'instance': 'instance', 'form_kwargs': 'form_kwargs'}), '(instance=instance, form_kwargs=form_kwargs)\n', (1912, 1956), False, 'from recipes.forms import IngredientForm, RecipeForm, RecipePartFormset\n'), ((3572, 3609), 'recipes.models.Recipe.objects.for_user', 'Recipe.objects.for_user', (['request.user'], {}), '(request.user)\n', (3595, 3609), False, 'from recipes.models import Ingredient, Recipe\n')] |
'''
Takes a HI-C paired map file and a bin size, calculate the probability of observing links between two bins
separated by n bins. Prints as distance [tab] probability
'''
from optparse import OptionParser
import sys
import re
from numpy import power
from numpy import random
def parse_options():
parser = OptionParser()
parser.add_option("-a", "--a", dest="a", default=2,
help="a of P ~ aX^a-1", metavar="A")
(options, args) = parser.parse_args()
return options
options = parse_options()
a = int(options.a)
for i in range(0,1000):
value = float(random.power(a,1))
inverse = 1 / value
print(inverse)
| [
"numpy.random.power",
"optparse.OptionParser"
] | [((309, 323), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (321, 323), False, 'from optparse import OptionParser\n'), ((565, 583), 'numpy.random.power', 'random.power', (['a', '(1)'], {}), '(a, 1)\n', (577, 583), False, 'from numpy import random\n')] |
#!/usr/bin/env python
from karma import db
karma_posts = db.Table('karma_posts', db.Model.metadata,
db.Column('left_id',
db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('right_id',
db.Integer, db.ForeignKey('post.id'), primary_key=True))
class Post(db.Model):
__tablename__ = 'post'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(32), index=True)
content = db.Column(db.String(1000), index=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
karma = db.relationship('User',
secondary=karma_posts,
backref='liked_posts')
| [
"karma.db.relationship",
"karma.db.ForeignKey",
"karma.db.String",
"karma.db.Column"
] | [((430, 469), 'karma.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (439, 469), False, 'from karma import db\n'), ((647, 716), 'karma.db.relationship', 'db.relationship', (['"""User"""'], {'secondary': 'karma_posts', 'backref': '"""liked_posts"""'}), "('User', secondary=karma_posts, backref='liked_posts')\n", (662, 716), False, 'from karma import db\n'), ((192, 216), 'karma.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (205, 216), False, 'from karma import db\n'), ((327, 351), 'karma.db.ForeignKey', 'db.ForeignKey', (['"""post.id"""'], {}), "('post.id')\n", (340, 351), False, 'from karma import db\n'), ((492, 505), 'karma.db.String', 'db.String', (['(32)'], {}), '(32)\n', (501, 505), False, 'from karma import db\n'), ((543, 558), 'karma.db.String', 'db.String', (['(1000)'], {}), '(1000)\n', (552, 558), False, 'from karma import db\n'), ((609, 633), 'karma.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (622, 633), False, 'from karma import db\n')] |
import ctypes
import json
from pathlib import Path
from functools import lru_cache
from typing import Union
def check_config(conf: dict):
# TODO
pass
@lru_cache()
def get_conf(path: str):
try:
path = Path(path).expanduser() / "info.json"
with path.open() as info:
conf = json.load(info)
check_config(conf)
return conf
except IOError:
raise IOError(
"Didn't find out correct configurations for Restrain at {}".
format(path.absolute()))
class JuliaPreLoad:
def __init__(self, init, lib):
self.init = init
self.lib = lib
def get_jl_lib(conf) -> JuliaPreLoad:
jl = conf["julia"]
lib_path = jl["lib"]
sys_image = jl["image"]
binary = jl["bin"]
lib = ctypes.PyDLL(lib_path, ctypes.RTLD_GLOBAL)
lib.jl_eval_string.argtypes = [ctypes.c_char_p]
lib.jl_eval_string.restype = ctypes.c_void_p
try:
init = lib.jl_init_with_image
except AttributeError:
init = lib.jl_init_with_image__threading
return JuliaPreLoad(
lambda: init(binary.encode(), sys_image.encode()), lib)
def get_julia(julia: Union[JuliaPreLoad, str] = '~/.restrain'):
if isinstance(julia, str):
conf = get_conf(julia)
return get_julia(get_jl_lib(conf))
assert isinstance(julia, JuliaPreLoad)
julia.init()
julia.lib.jl_eval_string(b'try using PyCall catch e; println(e) end')
julia.lib.jl_eval_string(b'println(100)')
return julia
| [
"json.load",
"functools.lru_cache",
"ctypes.PyDLL",
"pathlib.Path"
] | [((163, 174), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (172, 174), False, 'from functools import lru_cache\n'), ((795, 837), 'ctypes.PyDLL', 'ctypes.PyDLL', (['lib_path', 'ctypes.RTLD_GLOBAL'], {}), '(lib_path, ctypes.RTLD_GLOBAL)\n', (807, 837), False, 'import ctypes\n'), ((315, 330), 'json.load', 'json.load', (['info'], {}), '(info)\n', (324, 330), False, 'import json\n'), ((224, 234), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (228, 234), False, 'from pathlib import Path\n')] |
import collections
from typing import Callable, Dict, List, Tuple
from xlab.data import store
from xlab.data.proto import data_entry_pb2
from xlab.data.store import key
from xlab.net.proto import time_util
from xlab.util.status import errors
class InMemoryDataStore(store.DataStore):
def __init__(self):
# In memory data store has empty initial state
self._data: Dict[
key.DataKey,
List[data_entry_pb2.DataEntry]] = collections.defaultdict(list)
# Load and unload are in-memory specific helpers to manage the data.
def load(self, data_key: key.DataKey,
data_entries: List[data_entry_pb2.DataEntry]):
self._data[data_key] = data_entries
def unload(self, data_key: key.DataKey):
del self._data[data_key]
def add(self, data_entry: data_entry_pb2.DataEntry):
data_key = key.make_key(data_entry)
data_entries = self._data[data_key]
if time_util.to_time(data_entry.timestamp) in {
time_util.to_time(e.timestamp) for e in data_entries
}:
raise errors.AlreadyExistsError(
f'The data entry to add already exists: {data_entry}')
self._data[data_key].append(data_entry)
self._data[data_key].sort(key=lambda e: e.timestamp.ToSeconds())
def read(self,
lookup_key: store.DataStore.LookupKey) -> data_entry_pb2.DataEntry:
data_entries = self._data[key.from_lookup_key(lookup_key)]
try:
return next(
e for e in data_entries
if time_util.to_time(e.timestamp) == lookup_key.timestamp)
except StopIteration:
raise errors.NotFoundError(
f'Cannot find data matching lookup key: {lookup_key}')
def lookup(
self, lookup_key: store.DataStore.LookupKey
) -> data_entry_pb2.DataEntries:
results = []
for data_key, data_entries in self._data.items():
if not key.key_matches(data_key, lookup_key):
continue
results.extend(
(e for e in data_entries if lookup_key.timestamp is None or
time_util.to_time(e.timestamp) == lookup_key.timestamp))
# Return a value, as extend makes copies.
data_entries = data_entry_pb2.DataEntries()
data_entries.entries.extend(results)
return data_entries
def each(self, fn: Callable[[data_entry_pb2.DataEntry], None]):
for key, data_entries in self._data.items():
for data_entry in data_entries:
fn(data_entry)
| [
"xlab.data.store.key.key_matches",
"xlab.net.proto.time_util.to_time",
"xlab.util.status.errors.AlreadyExistsError",
"xlab.data.proto.data_entry_pb2.DataEntries",
"collections.defaultdict",
"xlab.util.status.errors.NotFoundError",
"xlab.data.store.key.from_lookup_key",
"xlab.data.store.key.make_key"
] | [((464, 493), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (487, 493), False, 'import collections\n'), ((870, 894), 'xlab.data.store.key.make_key', 'key.make_key', (['data_entry'], {}), '(data_entry)\n', (882, 894), False, 'from xlab.data.store import key\n'), ((2297, 2325), 'xlab.data.proto.data_entry_pb2.DataEntries', 'data_entry_pb2.DataEntries', ([], {}), '()\n', (2323, 2325), False, 'from xlab.data.proto import data_entry_pb2\n'), ((950, 989), 'xlab.net.proto.time_util.to_time', 'time_util.to_time', (['data_entry.timestamp'], {}), '(data_entry.timestamp)\n', (967, 989), False, 'from xlab.net.proto import time_util\n'), ((1093, 1178), 'xlab.util.status.errors.AlreadyExistsError', 'errors.AlreadyExistsError', (['f"""The data entry to add already exists: {data_entry}"""'], {}), "(f'The data entry to add already exists: {data_entry}'\n )\n", (1118, 1178), False, 'from xlab.util.status import errors\n'), ((1447, 1478), 'xlab.data.store.key.from_lookup_key', 'key.from_lookup_key', (['lookup_key'], {}), '(lookup_key)\n', (1466, 1478), False, 'from xlab.data.store import key\n'), ((1011, 1041), 'xlab.net.proto.time_util.to_time', 'time_util.to_time', (['e.timestamp'], {}), '(e.timestamp)\n', (1028, 1041), False, 'from xlab.net.proto import time_util\n'), ((1681, 1756), 'xlab.util.status.errors.NotFoundError', 'errors.NotFoundError', (['f"""Cannot find data matching lookup key: {lookup_key}"""'], {}), "(f'Cannot find data matching lookup key: {lookup_key}')\n", (1701, 1756), False, 'from xlab.util.status import errors\n'), ((1982, 2019), 'xlab.data.store.key.key_matches', 'key.key_matches', (['data_key', 'lookup_key'], {}), '(data_key, lookup_key)\n', (1997, 2019), False, 'from xlab.data.store import key\n'), ((1577, 1607), 'xlab.net.proto.time_util.to_time', 'time_util.to_time', (['e.timestamp'], {}), '(e.timestamp)\n', (1594, 1607), False, 'from xlab.net.proto import time_util\n'), ((2167, 2197), 'xlab.net.proto.time_util.to_time', 'time_util.to_time', (['e.timestamp'], {}), '(e.timestamp)\n', (2184, 2197), False, 'from xlab.net.proto import time_util\n')] |
from sqlite3 import Row
from typing import List, Optional
from fastapi import Query
from pydantic import BaseModel, Field
class UpdateAdminSettings(BaseModel):
# users
admin_users: str = Query(None)
allowed_users: str = Query(None)
admin_ext: str = Query(None)
disabled_ext: str = Query(None)
funding_source: str = Query(None)
# ops
force_https: bool = Query(None)
service_fee: float = Query(None, ge=0)
hide_api: bool = Query(None)
# Change theme
site_title: str = Query("LNbits")
site_tagline: str = Query("free and open-source lightning wallet")
site_description: str = Query(None)
default_wallet_name: str = Query("LNbits wallet")
denomination: str = Query("sats")
theme: str = Query(None)
ad_space: str = Query(None)
class Admin(BaseModel):
# users
user: str
admin_users: Optional[str]
allowed_users: Optional[str]
admin_ext: Optional[str]
disabled_ext: Optional[str]
funding_source: Optional[str]
# ops
data_folder: Optional[str]
database_url: Optional[str]
force_https: bool = Field(default=True)
service_fee: float = Field(default=0)
hide_api: bool = Field(default=False)
# Change theme
site_title: Optional[str]
site_tagline: Optional[str]
site_description: Optional[str]
default_wallet_name: Optional[str]
denomination: str = Field(default="sats")
theme: Optional[str]
ad_space: Optional[str]
@classmethod
def from_row(cls, row: Row) -> "Admin":
data = dict(row)
return cls(**data)
class Funding(BaseModel):
id: str
backend_wallet: str
endpoint: str = Query(None)
port: str = Query(None)
read_key: str = Query(None)
invoice_key: str = Query(None)
admin_key: str = Query(None)
cert: str = Query(None)
balance: int = Query(None)
selected: int
@classmethod
def from_row(cls, row: Row) -> "Funding":
data = dict(row)
return cls(**data)
| [
"pydantic.Field",
"fastapi.Query"
] | [((198, 209), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (203, 209), False, 'from fastapi import Query\n'), ((235, 246), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (240, 246), False, 'from fastapi import Query\n'), ((268, 279), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (273, 279), False, 'from fastapi import Query\n'), ((304, 315), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (309, 315), False, 'from fastapi import Query\n'), ((342, 353), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (347, 353), False, 'from fastapi import Query\n'), ((388, 399), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (393, 399), False, 'from fastapi import Query\n'), ((425, 442), 'fastapi.Query', 'Query', (['None'], {'ge': '(0)'}), '(None, ge=0)\n', (430, 442), False, 'from fastapi import Query\n'), ((464, 475), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (469, 475), False, 'from fastapi import Query\n'), ((517, 532), 'fastapi.Query', 'Query', (['"""LNbits"""'], {}), "('LNbits')\n", (522, 532), False, 'from fastapi import Query\n'), ((557, 603), 'fastapi.Query', 'Query', (['"""free and open-source lightning wallet"""'], {}), "('free and open-source lightning wallet')\n", (562, 603), False, 'from fastapi import Query\n'), ((632, 643), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (637, 643), False, 'from fastapi import Query\n'), ((675, 697), 'fastapi.Query', 'Query', (['"""LNbits wallet"""'], {}), "('LNbits wallet')\n", (680, 697), False, 'from fastapi import Query\n'), ((722, 735), 'fastapi.Query', 'Query', (['"""sats"""'], {}), "('sats')\n", (727, 735), False, 'from fastapi import Query\n'), ((753, 764), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (758, 764), False, 'from fastapi import Query\n'), ((785, 796), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (790, 796), False, 'from fastapi import Query\n'), ((1105, 1124), 'pydantic.Field', 'Field', ([], {'default': '(True)'}), '(default=True)\n', (1110, 1124), False, 'from pydantic import BaseModel, Field\n'), ((1150, 1166), 'pydantic.Field', 'Field', ([], {'default': '(0)'}), '(default=0)\n', (1155, 1166), False, 'from pydantic import BaseModel, Field\n'), ((1188, 1208), 'pydantic.Field', 'Field', ([], {'default': '(False)'}), '(default=False)\n', (1193, 1208), False, 'from pydantic import BaseModel, Field\n'), ((1389, 1410), 'pydantic.Field', 'Field', ([], {'default': '"""sats"""'}), "(default='sats')\n", (1394, 1410), False, 'from pydantic import BaseModel, Field\n'), ((1662, 1673), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1667, 1673), False, 'from fastapi import Query\n'), ((1690, 1701), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1695, 1701), False, 'from fastapi import Query\n'), ((1722, 1733), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1727, 1733), False, 'from fastapi import Query\n'), ((1757, 1768), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1762, 1768), False, 'from fastapi import Query\n'), ((1790, 1801), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1795, 1801), False, 'from fastapi import Query\n'), ((1818, 1829), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1823, 1829), False, 'from fastapi import Query\n'), ((1849, 1860), 'fastapi.Query', 'Query', (['None'], {}), '(None)\n', (1854, 1860), False, 'from fastapi import Query\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from argparse import ArgumentParser, FileType
import numpy as np
from pyfr.readers import get_reader_by_name, get_reader_by_extn, BaseReader
from pyfr.util import all_subclasses
def process_convert(args):
# Get a suitable mesh reader instance
if args.type:
reader = get_reader_by_name(args.type, args.inmesh)
else:
extn = os.path.splitext(args.inmesh.name)[1]
reader = get_reader_by_extn(extn, args.inmesh)
# Get the mesh in the PyFR format
mesh = reader.to_pyfrm()
# Save to disk
np.savez(args.outmesh, **mesh)
def main():
ap = ArgumentParser(prog='pyfr-mesh', description='Generates and '
'manipulates PyFR mesh files')
sp = ap.add_subparsers(help='sub-command help')
# Mesh format conversion
ap_convert = sp.add_parser('convert', help='convert --help')
ap_convert.add_argument('inmesh', type=FileType('r'),
help='Input mesh file')
ap_convert.add_argument('outmesh', type=FileType('wb'),
help='Output PyFR mesh file')
types = [cls.name for cls in all_subclasses(BaseReader)]
ap_convert.add_argument('-t', dest='type', choices=types, required=False,
help='Input file type; this is usually inferred '
'from the extension of inmesh')
ap_convert.set_defaults(process=process_convert)
args = ap.parse_args()
args.process(args)
if __name__ == '__main__':
main()
| [
"numpy.savez",
"argparse.FileType",
"argparse.ArgumentParser",
"os.path.splitext",
"pyfr.util.all_subclasses",
"pyfr.readers.get_reader_by_extn",
"pyfr.readers.get_reader_by_name"
] | [((597, 627), 'numpy.savez', 'np.savez', (['args.outmesh'], {}), '(args.outmesh, **mesh)\n', (605, 627), True, 'import numpy as np\n'), ((651, 745), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pyfr-mesh"""', 'description': '"""Generates and manipulates PyFR mesh files"""'}), "(prog='pyfr-mesh', description=\n 'Generates and manipulates PyFR mesh files')\n", (665, 745), False, 'from argparse import ArgumentParser, FileType\n'), ((344, 386), 'pyfr.readers.get_reader_by_name', 'get_reader_by_name', (['args.type', 'args.inmesh'], {}), '(args.type, args.inmesh)\n', (362, 386), False, 'from pyfr.readers import get_reader_by_name, get_reader_by_extn, BaseReader\n'), ((467, 504), 'pyfr.readers.get_reader_by_extn', 'get_reader_by_extn', (['extn', 'args.inmesh'], {}), '(extn, args.inmesh)\n', (485, 504), False, 'from pyfr.readers import get_reader_by_name, get_reader_by_extn, BaseReader\n'), ((412, 446), 'os.path.splitext', 'os.path.splitext', (['args.inmesh.name'], {}), '(args.inmesh.name)\n', (428, 446), False, 'import os\n'), ((959, 972), 'argparse.FileType', 'FileType', (['"""r"""'], {}), "('r')\n", (967, 972), False, 'from argparse import ArgumentParser, FileType\n'), ((1070, 1084), 'argparse.FileType', 'FileType', (['"""wb"""'], {}), "('wb')\n", (1078, 1084), False, 'from argparse import ArgumentParser, FileType\n'), ((1177, 1203), 'pyfr.util.all_subclasses', 'all_subclasses', (['BaseReader'], {}), '(BaseReader)\n', (1191, 1203), False, 'from pyfr.util import all_subclasses\n')] |
# coding= utf-8
from __future__ import unicode_literals
import pytest
import forth
class TestTheParser():
def test_empty_string(self):
""" Parser refuses to parse past the end of the string. """
p = forth.Parser('')
with pytest.raises(StopIteration):
p.parse_whitespace()
with pytest.raises(StopIteration):
p.parse_word()
with pytest.raises(StopIteration):
p.parse_rest_of_line()
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_all_whitespace(self):
""" Parser consumes all whitespace in one gulp. """
whitespace_string = " \t\t \t \t"
p = forth.Parser(whitespace_string)
assert p.parse_word() is None
assert p.parse_whitespace() == whitespace_string
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
# Also, next_word will happily consume and ignore the whitespace itself.
p = forth.Parser(whitespace_string)
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_single_word(self):
""" A single word is returned immediately. """
p = forth.Parser("JUST-ONE-WORD")
assert p.next_word() == "JUST-ONE-WORD"
# no further words exist
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_leading_whitespace(self):
""" Leading whitespace is ignored. """
p = forth.Parser(" \t HELLO-WORLD")
assert p.next_word() == 'HELLO-WORLD'
# no further words exist
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_more_words(self):
""" Multiple words are returned one at a time. """
p = forth.Parser("AND ON THE PEDESTAL,")
assert p.next_word() == 'AND'
assert p.next_word() == 'ON'
assert p.next_word() == 'THE'
assert p.next_word() == 'PEDESTAL,'
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_more_whitespace(self):
""" All whitespace is eaten together and has no effect on words. """
p = forth.Parser(" \tTHESE\t\tWORDS APPEAR \t ")
assert p.next_word() == 'THESE'
assert p.next_word() == 'WORDS'
assert p.next_word() == 'APPEAR'
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
def test_newlines(self):
""" Newlines get consumed like other whitespace """
p = forth.Parser("MY NAME IS OZYMANDIAS,\nKING OF KINGS!")
assert p.next_word() == 'MY'
assert p.next_word() == 'NAME'
assert p.next_word() == 'IS'
assert p.next_word() == 'OZYMANDIAS,'
assert p.next_word() == 'KING'
assert p.next_word() == 'OF'
assert p.next_word() == 'KINGS!'
with pytest.raises(StopIteration):
p.next_word()
assert p.is_finished == True
| [
"forth.Parser",
"pytest.raises"
] | [((222, 238), 'forth.Parser', 'forth.Parser', (['""""""'], {}), "('')\n", (234, 238), False, 'import forth\n'), ((726, 757), 'forth.Parser', 'forth.Parser', (['whitespace_string'], {}), '(whitespace_string)\n', (738, 757), False, 'import forth\n'), ((1055, 1086), 'forth.Parser', 'forth.Parser', (['whitespace_string'], {}), '(whitespace_string)\n', (1067, 1086), False, 'import forth\n'), ((1294, 1323), 'forth.Parser', 'forth.Parser', (['"""JUST-ONE-WORD"""'], {}), "('JUST-ONE-WORD')\n", (1306, 1323), False, 'import forth\n'), ((1612, 1644), 'forth.Parser', 'forth.Parser', (['""" \t HELLO-WORLD"""'], {}), "(' \\t HELLO-WORLD')\n", (1624, 1644), False, 'import forth\n'), ((1935, 1971), 'forth.Parser', 'forth.Parser', (['"""AND ON THE PEDESTAL,"""'], {}), "('AND ON THE PEDESTAL,')\n", (1947, 1971), False, 'import forth\n'), ((2363, 2420), 'forth.Parser', 'forth.Parser', (['""" \tTHESE\t\tWORDS APPEAR \t """'], {}), "(' \\tTHESE\\t\\tWORDS APPEAR \\t ')\n", (2375, 2420), False, 'import forth\n'), ((2752, 2809), 'forth.Parser', 'forth.Parser', (['"""MY NAME IS OZYMANDIAS,\nKING OF KINGS!"""'], {}), '("""MY NAME IS OZYMANDIAS,\nKING OF KINGS!""")\n', (2764, 2809), False, 'import forth\n'), ((253, 281), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (266, 281), False, 'import pytest\n'), ((330, 358), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (343, 358), False, 'import pytest\n'), ((401, 429), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (414, 429), False, 'import pytest\n'), ((480, 508), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (493, 508), False, 'import pytest\n'), ((868, 896), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (881, 896), False, 'import pytest\n'), ((1101, 1129), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (1114, 1129), False, 'import pytest\n'), ((1420, 1448), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (1433, 1448), False, 'import pytest\n'), ((1739, 1767), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (1752, 1767), False, 'import pytest\n'), ((2144, 2172), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (2157, 2172), False, 'import pytest\n'), ((2557, 2585), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (2570, 2585), False, 'import pytest\n'), ((3098, 3126), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (3111, 3126), False, 'import pytest\n')] |
"""
@author: <NAME>
In this class we define a class that reprsents all of the spin
simulations.
"""
import numpy as np
from Cayley.classes.abstractsimulation import *
__author__ = "\n".join(['<NAME> (<EMAIL>)'])
__all__ = ['SpinSimulation']
class SpinSimulation(AbstractSimulation):
def __init__(self, network,trials,timesteps):
AbstractSimulation.__init__(self,network)
def startUp(self):
"""Sets the inital state of all nodes to 1 or spin up."""
self.data[0:self.trials,0] = np.ones(len(self.network))
def startDown(self):
"""Sets the inital state of all nodes to -1 or spin down."""
self.data[0:self.trials,0] = np.full(len(self.network),-1)
def startRandom(self):
"""
Sets the initial states of the particles to -1 or 1.
"""
self.data[0:self.trials,0] = np.random.choice([-1,1], size = len(self.network))
def countUp(self,timestep,trial):
return np.count_nonzero(self.data[trial,timestep] == 1)
def countDown(self,timestep,trial):
return np.count_nonzero(self.data[trial,timestep] == -1)
| [
"numpy.count_nonzero"
] | [((969, 1018), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.data[trial, timestep] == 1)'], {}), '(self.data[trial, timestep] == 1)\n', (985, 1018), True, 'import numpy as np\n'), ((1074, 1124), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.data[trial, timestep] == -1)'], {}), '(self.data[trial, timestep] == -1)\n', (1090, 1124), True, 'import numpy as np\n')] |
import argparse
import logging
import sys
from functools import partial
from cook import plugins
from cook.mesos import download_file
from cook.querying import parse_entity_refs, query_unique_and_run, parse_entity_ref
from cook.util import guard_no_cluster
def cat_for_instance(instance, sandbox_dir, path):
"""Outputs the contents of the Mesos sandbox path for the given instance."""
retrieve_fn = plugins.get_fn('download-job-instance-file', download_file)
download = retrieve_fn(instance, sandbox_dir, path)
try:
for data in download(chunk_size=4096):
if data:
sys.stdout.buffer.write(data)
except BrokenPipeError as bpe:
sys.stderr.close()
logging.exception(bpe)
def cat(clusters, args, _):
"""Outputs the contents of the corresponding Mesos sandbox path by job or instance uuid."""
guard_no_cluster(clusters)
entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('target-entity'))
paths = args.get('path')
# argparse should prevent these, but we'll be defensive anyway
assert len(entity_refs) == 1, 'Only a single UUID or URL is supported.'
assert len(paths) == 1, 'Only a single path is supported.'
command_fn = partial(cat_for_instance, path=paths[0])
query_unique_and_run(clusters_of_interest, entity_refs[0], command_fn)
def valid_entity_ref(s):
"""Allows argparse to flag user-provided entity ref strings as valid or not"""
try:
parse_entity_ref(s, lambda x: x)
return s
except Exception as e:
raise argparse.ArgumentTypeError(str(e))
def valid_path(s):
"""Allows argparse to flag user-provided paths as valid or not"""
if len(s) > 0:
return s
else:
raise argparse.ArgumentTypeError('path cannot be empty')
def register(add_parser, _):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('cat', help='output files by job or instance uuid')
parser.add_argument('target-entity', nargs=1,
help='Accepts either a job or an instance UUID or URL. The latest instance is selected for a '
'job with multiple instances.',
type=valid_entity_ref)
parser.add_argument('path', nargs=1,
help='Relative to the sandbox directory on the Mesos agent where the instance runs.',
type=valid_path)
return cat
| [
"cook.util.guard_no_cluster",
"cook.plugins.get_fn",
"argparse.ArgumentTypeError",
"logging.exception",
"functools.partial",
"sys.stdout.buffer.write",
"cook.querying.parse_entity_ref",
"sys.stderr.close",
"cook.querying.query_unique_and_run"
] | [((410, 469), 'cook.plugins.get_fn', 'plugins.get_fn', (['"""download-job-instance-file"""', 'download_file'], {}), "('download-job-instance-file', download_file)\n", (424, 469), False, 'from cook import plugins\n'), ((872, 898), 'cook.util.guard_no_cluster', 'guard_no_cluster', (['clusters'], {}), '(clusters)\n', (888, 898), False, 'from cook.util import guard_no_cluster\n'), ((1248, 1288), 'functools.partial', 'partial', (['cat_for_instance'], {'path': 'paths[0]'}), '(cat_for_instance, path=paths[0])\n', (1255, 1288), False, 'from functools import partial\n'), ((1293, 1363), 'cook.querying.query_unique_and_run', 'query_unique_and_run', (['clusters_of_interest', 'entity_refs[0]', 'command_fn'], {}), '(clusters_of_interest, entity_refs[0], command_fn)\n', (1313, 1363), False, 'from cook.querying import parse_entity_refs, query_unique_and_run, parse_entity_ref\n'), ((1491, 1523), 'cook.querying.parse_entity_ref', 'parse_entity_ref', (['s', '(lambda x: x)'], {}), '(s, lambda x: x)\n', (1507, 1523), False, 'from cook.querying import parse_entity_refs, query_unique_and_run, parse_entity_ref\n'), ((1768, 1818), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""path cannot be empty"""'], {}), "('path cannot be empty')\n", (1794, 1818), False, 'import argparse\n'), ((692, 710), 'sys.stderr.close', 'sys.stderr.close', ([], {}), '()\n', (708, 710), False, 'import sys\n'), ((719, 741), 'logging.exception', 'logging.exception', (['bpe'], {}), '(bpe)\n', (736, 741), False, 'import logging\n'), ((619, 648), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['data'], {}), '(data)\n', (642, 648), False, 'import sys\n')] |
# Generated by Django 2.0.9 on 2018-11-28 16:18
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import osc_bge.users.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('agent', '0001_initial'),
('bge', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('address', models.CharField(max_length=255, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('type', models.CharField(blank=True, choices=[('bge_admin', 'BGE_Admin'), ('bge_branch_admin', 'BGE_Branch_Admin'), ('agency_admin', 'Agency_Admin'), ('counselor', 'Counselor')], max_length=140, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to=osc_bge.users.models.user_directory_path)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='AgencyAdminUser',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='agency_admin', serialize=False, to=settings.AUTH_USER_MODEL)),
('agency', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='agent.Agency')),
],
),
migrations.CreateModel(
name='BgeAdminUser',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('partition', models.CharField(choices=[('entrance', 'ENTRANCE'), ('accounting', 'ACCOUNTING'), ('admin', 'ADMIN')], max_length=255, null=True)),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bge.BgeBranch')),
],
),
migrations.CreateModel(
name='Counselor',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='counselor', serialize=False, to=settings.AUTH_USER_MODEL)),
('agency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='agent.Agency')),
],
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| [
"django.db.models.EmailField",
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((4537, 4788), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""The groups this user belongs to. A user will get all permissions granted to each of their groups."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Group"""', 'verbose_name': '"""groups"""'}), "(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to='auth.Group',\n verbose_name='groups')\n", (4559, 4788), False, 'from django.db import migrations, models\n'), ((4902, 5106), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Specific permissions for this user."""', 'related_name': '"""user_set"""', 'related_query_name': '"""user"""', 'to': '"""auth.Permission"""', 'verbose_name': '"""user permissions"""'}), "(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions')\n", (4924, 5106), False, 'from django.db import migrations, models\n'), ((621, 714), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (637, 714), False, 'from django.db import migrations, models\n'), ((742, 799), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (758, 799), False, 'from django.db import migrations, models\n'), ((833, 903), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (853, 903), False, 'from django.db import migrations, models\n'), ((939, 1110), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates that this user has all permissions without explicitly assigning them."""', 'verbose_name': '"""superuser status"""'}), "(default=False, help_text=\n 'Designates that this user has all permissions without explicitly assigning them.'\n , verbose_name='superuser status')\n", (958, 1110), False, 'from django.db import migrations, models\n'), ((1464, 1534), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'verbose_name': '"""first name"""'}), "(blank=True, max_length=30, verbose_name='first name')\n", (1480, 1534), False, 'from django.db import migrations, models\n'), ((1567, 1637), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'verbose_name': '"""last name"""'}), "(blank=True, max_length=150, verbose_name='last name')\n", (1583, 1637), False, 'from django.db import migrations, models\n'), ((1666, 1741), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""email address"""'}), "(blank=True, max_length=254, verbose_name='email address')\n", (1683, 1741), False, 'from django.db import migrations, models\n'), ((1773, 1916), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Designates whether the user can log into this admin site."""', 'verbose_name': '"""staff status"""'}), "(default=False, help_text=\n 'Designates whether the user can log into this admin site.',\n verbose_name='staff status')\n", (1792, 1916), False, 'from django.db import migrations, models\n'), ((1940, 2121), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (1959, 2121), False, 'from django.db import migrations, models\n'), ((2146, 2234), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""date joined"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'date joined')\n", (2166, 2234), False, 'from django.db import migrations, models\n'), ((2260, 2303), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (2276, 2303), False, 'from django.db import migrations, models\n'), ((2337, 2383), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (2357, 2383), False, 'from django.db import migrations, models\n'), ((2411, 2617), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('bge_admin', 'BGE_Admin'), ('bge_branch_admin', 'BGE_Branch_Admin'), (\n 'agency_admin', 'Agency_Admin'), ('counselor', 'Counselor')]", 'max_length': '(140)', 'null': '(True)'}), "(blank=True, choices=[('bge_admin', 'BGE_Admin'), (\n 'bge_branch_admin', 'BGE_Branch_Admin'), ('agency_admin',\n 'Agency_Admin'), ('counselor', 'Counselor')], max_length=140, null=True)\n", (2427, 2617), False, 'from django.db import migrations, models\n'), ((2637, 2734), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': 'osc_bge.users.models.user_directory_path'}), '(blank=True, null=True, upload_to=osc_bge.users.models.\n user_directory_path)\n', (2654, 2734), False, 'from django.db import migrations, models\n'), ((3141, 3308), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'related_name': '"""agency_admin"""', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, related_name='agency_admin', serialize=False, to=\n settings.AUTH_USER_MODEL)\n", (3161, 3308), False, 'from django.db import migrations, models\n'), ((3329, 3426), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""agent.Agency"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='agent.Agency')\n", (3346, 3426), False, 'from django.db import migrations, models\n'), ((3562, 3695), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)\n', (3582, 3695), False, 'from django.db import migrations, models\n'), ((3724, 3857), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('entrance', 'ENTRANCE'), ('accounting', 'ACCOUNTING'), ('admin', 'ADMIN')]", 'max_length': '(255)', 'null': '(True)'}), "(choices=[('entrance', 'ENTRANCE'), ('accounting',\n 'ACCOUNTING'), ('admin', 'ADMIN')], max_length=255, null=True)\n", (3740, 3857), False, 'from django.db import migrations, models\n'), ((3882, 3980), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""bge.BgeBranch"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='bge.BgeBranch')\n", (3899, 3980), False, 'from django.db import migrations, models\n'), ((4113, 4277), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'primary_key': '(True)', 'related_name': '"""counselor"""', 'serialize': '(False)', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n primary_key=True, related_name='counselor', serialize=False, to=\n settings.AUTH_USER_MODEL)\n", (4133, 4277), False, 'from django.db import migrations, models\n'), ((4298, 4408), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""agent.Agency"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='agent.Agency')\n", (4315, 4408), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# some simple tests for the slurmlib module
import os
import os.path as osp
import time
import subprocess as sp
import datetime
# can't yet use pytest -- sniff
# import pytest
import unittest
from unittest import skipIf
import six
import fleet.slurmlib as slurmlib
from django.conf import settings
from django.test import TestCase, skipIfDBFeature
# NOTE: Here, select which SlurmScheduler to test.
# we select the DummySlurmScheduler by default, so that the automatic tests
# can run without slurm
# SlurmScheduler = slurmlib.SlurmScheduler
# SlurmScheduler = slurmlib.DummySlurmScheduler
ACC_STATE = slurmlib.BaseSlurmScheduler.ACC_STATE
ACC_JOB_ID = slurmlib.BaseSlurmScheduler.ACC_JOB_ID
ACC_PRIONUM = slurmlib.BaseSlurmScheduler.ACC_PRIONUM
PRIO_LOW = slurmlib.BaseSlurmScheduler.MIN_PRIO
PRIO_MEDIUM = PRIO_LOW + 1
PRIO_HIGH = PRIO_MEDIUM + 1
# this is the directory where the test jobs to submit to slurm will reside
# NOTE: the job number 02 must return a non-zero exit code for testing
FAIL_JOB_NUMBER = 2
NUM_JOBS = 5
TEST_DIR = osp.join(settings.KIVE_HOME, "fleet/slurm_test_files/slurmrundir")
def _submit_job_n(n, prio, afteroklst=None, afteranylst=None, sched_cls=None):
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % n
return sched_cls.submit_job(
wdir,
jobname,
[],
prio,
1,
osp.join(wdir, "out.txt"),
osp.join(wdir, "err.txt"),
after_okay=afteroklst,
after_any=afteranylst
)
def submit_all(prio, sched_cls=None):
""" Submit all jobs with a certain priority."""
return [_submit_job_n(i, prio, sched_cls=sched_cls)
for i in range(1, NUM_JOBS + 1)]
def get_accounting_info(jhandles=None, sched_cls=None):
curstates = sched_cls.get_accounting_info(job_handle_iter=jhandles)
if jhandles is not None and len(jhandles) > 0:
# check we have entries for all requested jhandles
jidset = set([jh.job_id for jh in jhandles])
gotset = set(curstates.keys())
assert gotset == jidset, "Did not get results from all submitted jobs"
cls = sched_cls
for jid, dct in curstates.items():
# makes sure all required fields are defined
assert cls.ACC_SET == set(dct.keys()), "inconsistent key set"
assert jid == dct[cls.ACC_JOB_ID]
prio = dct[cls.ACC_PRIONUM]
if prio is not None:
assert prio in cls.PRIO_SET, "invalid priority value"
for k in [cls.ACC_START_TIME, cls.ACC_END_TIME, cls.ACC_SUBMIT_TIME]:
tval = dct[k]
if tval is not None:
assert isinstance(tval, datetime.datetime), "wrong type of time field"
state = dct[cls.ACC_STATE]
assert state in cls.ALL_STATES, "illegal state"
return curstates
@skipIfDBFeature('is_mocked') # Doesn't use the database, but this test is slow.
class SlurmDummyTests(TestCase):
def setUp(self):
self.addTypeEqualityFunc(str, self.assertMultiLineEqual)
self.sched_cls = self.get_slurm_scheduler_class()
is_alive = self.sched_cls.slurm_is_alive(skip_extras=True)
if not is_alive:
raise RuntimeError("slurm is not alive")
def get_slurm_scheduler_class(self):
return slurmlib.DummySlurmScheduler
def tearDown(self):
self.sched_cls.shutdown()
def test_callit01(self, lverb=False):
""" Should return 0 """
n = 1
if lverb:
print("---test_callit01", n)
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % n
# arglst, stderr, stdout = [], None, None
arglst = []
with open("/tmp/out.txt", "w") as stdout, open("/tmp/err.txt", "w") as stderr:
retval = slurmlib.callit(wdir, jobname, arglst, stdout, stderr)
if retval != 0:
print("the error is '%s'" % os.strerror(retval))
assert retval == 0, "expected retval 0"
if lverb:
print("---END test_callit01", n)
def test_callit02(self, lverb=False):
""" Should return 2 """
n = FAIL_JOB_NUMBER
if lverb:
print("---test_callit01", n)
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % n
arglst = []
with open("/tmp/out.txt", "w") as stdout, open("/tmp/err.txt", "w") as stderr:
retval = slurmlib.callit(wdir, jobname, arglst, stdout, stderr)
assert retval == 2, "expected retval 2"
if lverb:
print("---END test_callit01", n)
def test_is_alive(self):
"""test_is_alive() should return True"""
is_alive = self.sched_cls.slurm_is_alive()
assert is_alive, "Calling is_alive fails"
def test_slurm_ident(self):
"""slurm_ident must return a string"""
idstr = self.sched_cls.slurm_ident()
assert isinstance(idstr, str), "slurm ident must be a string"
def test_submit_job01(self, lverb=False):
""" Submitting this job should succeed."""
if lverb:
print("--test_submit_job01")
jhandle = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("submitted job", jhandle)
def test_submit_job02(self, lverb=False):
"""Submission should fail (nonexistent job script)
"""
if lverb:
print("--test_submit_job02")
prio = PRIO_MEDIUM
n, m = 1, 2
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % m
with self.assertRaises(sp.CalledProcessError):
self.sched_cls.submit_job(
wdir,
jobname,
[],
prio,
1,
osp.join(wdir, "out.txt"),
osp.join(wdir, "err.txt"),
None
)
def test_submit_job03(self, lverb=False):
"""Submission should fail (priority a string instead of int)"""
if lverb:
print("--test_submit_job03")
prio = 'illegal priostring'
n = 1
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % n
with self.assertRaises(RuntimeError):
self.sched_cls.submit_job(
wdir,
jobname,
[],
prio,
1,
osp.join(wdir, "out.txt"),
osp.join(wdir, "err.txt"),
None
)
def test_submit_job04(self, lverb=False):
"""Submit a job that should run, but returns a nonzero exit code.
I.e. submission should succeed, but the job should have a non-zero exit code.
"""
if lverb:
print("---test_submit_job04")
jhandle = _submit_job_n(FAIL_JOB_NUMBER, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("successfully launched job %s, now waiting for its failure..." % jhandle)
time.sleep(2)
num_tries, i = 20, 0
curstate = jhandle.get_state()
if lverb:
print("gotstate", curstate)
while (i < num_tries) and (curstate != self.sched_cls.FAILED):
if lverb:
print(i, "curstate...", curstate)
time.sleep(5)
curstate = jhandle.get_state()
i += 1
assert curstate == self.sched_cls.FAILED, "failed to get a 'FAILED' state. got {}".format(curstate)
if lverb:
print("---test_submit_job04: Success, got an expected FAILED status")
def test_submit_job07(self, lverb=False):
"""Submission should fail (illegal cpu_number)"""
if lverb:
print("--test_submit_job07")
num_cpu = 0
prio = PRIO_MEDIUM
n = 1
wdir = osp.join(TEST_DIR, "job%02d" % n)
jobname = "sleep%02d.sh" % n
with self.assertRaises(sp.CalledProcessError):
self.sched_cls.submit_job(
wdir,
jobname,
[],
prio,
num_cpu,
osp.join(wdir, "out.txt"),
osp.join(wdir, "err.txt"),
None
)
if lverb:
print("--test_submit_job07 SUCCESS")
def test_dep_jobs01_okay(self, lverb=False):
"""Submit one job dependent on the other with an after_okay dependency.
Both jobs should succeed."""
if lverb:
print("--test_dep_jobs01_okay")
jobid_01 = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("first job", jobid_01)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("dependent job", jobid_02)
my_handles = [jobid_01, jobid_02]
jobidlst = [j.job_id for j in my_handles]
time.sleep(2)
num_tries, i = 40, 0
curstate = get_accounting_info(my_handles, sched_cls=self.sched_cls)
while i < num_tries and curstate[jobid_02.job_id][ACC_STATE] != self.sched_cls.COMPLETED:
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jobidlst])
time.sleep(5)
curstate = get_accounting_info(my_handles, sched_cls=self.sched_cls)
i += 1
if i == num_tries:
raise RuntimeError("test inconclusive: didn't wait long enough")
assert curstate[jobid_01.job_id][ACC_STATE] == self.sched_cls.COMPLETED, "job01: failed to run successfully"
assert curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.COMPLETED, "job02: failed to run successfully"
if lverb:
print("--test_dep_jobs01_okay SUCCESS")
def test_dep_jobs01_any(self, lverb=False):
"""Submit one job dependent on the other with an after_any dependency.
Both jobs should succeed."""
if lverb:
print("--test_dep_jobs01_any")
jobid_01 = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("first job", jobid_01)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, None, [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("dependent job", jobid_02)
my_handles = [jobid_01, jobid_02]
jobidlst = [j.job_id for j in my_handles]
time.sleep(2)
num_tries, i = 40, 0
curstate = get_accounting_info(my_handles, sched_cls=self.sched_cls)
while i < num_tries and curstate[jobid_02.job_id][ACC_STATE] != self.sched_cls.COMPLETED:
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jobidlst])
time.sleep(5)
curstate = get_accounting_info(my_handles, sched_cls=self.sched_cls)
i += 1
if i == num_tries:
raise RuntimeError("test inconclusive: didn't wait long enough")
if lverb:
print("FINAL STATE", [curstate[jid][ACC_STATE] for jid in jobidlst])
assert curstate[jobid_01.job_id][ACC_STATE] == self.sched_cls.COMPLETED, "job01: failed to run successfully"
assert curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.COMPLETED, "job02: failed to run successfully"
if lverb:
print("--test_dep_jobs01_any SUCCESS")
def test_dep_jobs02_ok(self, lverb=False):
"""Submit job 01, and job 02 dependent on 01 with an after_ok dependency.
Job 01 will fail. Job 02 must be cancelled.
"""
if lverb:
print("--test_dep_jobs02_ok")
jobid_01 = _submit_job_n(FAIL_JOB_NUMBER, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("first job that will fail:", jobid_01)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("dependent job:", jobid_02)
joblst = [jobid_01, jobid_02]
not_failed, num_tries, i = True, 40, 0
curstate = None
while (i < num_tries) and not_failed:
time.sleep(2)
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
if lverb:
print("step %02d:" % i, curstate[jobid_01.job_id][ACC_STATE], curstate[jobid_02.job_id][ACC_STATE])
not_failed = curstate[jobid_01.job_id][ACC_STATE] != self.sched_cls.FAILED
i += 1
if i == num_tries:
raise RuntimeError("test inconclusive: didn't wait long enough")
if lverb:
print("job01 state:", curstate[jobid_01.job_id])
print("job02 state:", curstate[jobid_02.job_id])
assert curstate[jobid_01.job_id][ACC_STATE] == self.sched_cls.FAILED, "unexpected state 01"
assert curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.CANCELLED, "unexpected state 02"
if lverb:
print("--test_dep_jobs02_ok SUCCESS")
def test_dep_jobs02_any(self, lverb=False):
"""Submit job 01, and job 02 dependent on 01 with an after_any dependency.
Job 01 will fail. Job 02 must run anyway.
"""
if lverb:
print("--test_dep_jobs02_any")
jobid_01 = _submit_job_n(FAIL_JOB_NUMBER, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("first job that will fail:", jobid_01)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, None, [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("dependent job:", jobid_02)
joblst = [jobid_01, jobid_02]
jidlst = [jh.job_id for jh in joblst]
if lverb:
print("waiting for job 01 to fail")
not_failed, num_tries, i = True, 40, 0
curstate = None
while (i < num_tries) and not_failed:
time.sleep(2)
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jidlst])
not_failed = curstate[jobid_01.job_id][ACC_STATE] != self.sched_cls.FAILED
i += 1
if i == num_tries:
raise RuntimeError("test inconclusive: didn't wait long enough")
# wait for jobid_02 to start running
if lverb:
print("OK, waiting for job 02 to run")
is_running, num_tries, i = curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.COMPLETED, 40, 0
while (i < num_tries) and not is_running:
time.sleep(2)
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jidlst])
is_running = curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.COMPLETED
if i == num_tries:
raise RuntimeError("failed: job 02 did not complete")
if lverb:
print("job01 state:", curstate[jobid_01.job_id])
print("job02 state:", curstate[jobid_02.job_id])
state_02 = curstate[jobid_02.job_id][ACC_STATE]
ok_state_02 = state_02 == self.sched_cls.RUNNING or state_02 == self.sched_cls.COMPLETED
assert curstate[jobid_01.job_id][ACC_STATE] == self.sched_cls.FAILED, "unexpected state 01"
assert ok_state_02, "unexpected state 02"
if lverb:
print("--test_dep_jobs02_any SUCCESS")
def test_dep_jobs01_multi(self, lverb=False):
"""Submit job 01 that will fail.
Submit job o2 that will succeed.
Submit job 03, after_any on 01, and after_ok on 02.
Job 03 must be run.
"""
if lverb:
print("--test_dep_jobs01_multi")
jobid_01 = _submit_job_n(FAIL_JOB_NUMBER, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("first job that will fail:", jobid_01)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("second job that will succeed", jobid_02)
jobid_03 = _submit_job_n(1, PRIO_MEDIUM, [jobid_02], [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("third job that should run", jobid_02)
joblst = [jobid_01, jobid_02, jobid_03]
jobidlst = [j.job_id for j in joblst]
still_running, num_tries, i = True, 40, 0
curstate = None
while (i < num_tries) and still_running:
time.sleep(2)
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jobidlst])
still_running = (curstate[jobid_01.job_id][ACC_STATE] != self.sched_cls.FAILED) or\
(curstate[jobid_02.job_id][ACC_STATE] != self.sched_cls.COMPLETED)
i += 1
if i == num_tries:
raise RuntimeError("test inconclusive: didn't wait long enough")
state_01 = curstate[jobid_01.job_id][ACC_STATE]
state_02 = curstate[jobid_02.job_id][ACC_STATE]
state_03 = curstate[jobid_03.job_id][ACC_STATE]
if lverb:
print("state after loop:", [curstate[jid][ACC_STATE] for jid in jobidlst])
assert state_01 == self.sched_cls.FAILED, "unexpected state 01: " + state_01
assert state_02 == self.sched_cls.COMPLETED, "unexpected state 02: " + state_02
assert state_03 in self.sched_cls.RUNNING_STATES, "unexpected state 03: " + state_03
if lverb:
print("--test_dep_jobs01_multi SUCCESS")
def test_cancel_jobs01(self, lverb=False):
"""Submit a job, then cancel it"""
if lverb:
print("--test_cancel_jobs01")
jobid_01 = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("submitted job", jobid_01)
print("wait for running status...")
num_tries, i, curstate = 40, 0, jobid_01.get_state()
while i < num_tries and curstate not in self.sched_cls.RUNNING_STATES:
if lverb:
print("step %02d:" % i, curstate)
time.sleep(2)
i += 1
curstate = jobid_01.get_state()
assert curstate in self.sched_cls.RUNNING_STATES, "Job is not running, cannot test cancelling it"
if lverb:
print("job is running, now cancelling job 01...")
self.sched_cls.job_cancel(jobid_01)
if lverb:
print("wait for cancelled status....")
i, curstate = 0, jobid_01.get_state()
while i < num_tries and curstate in self.sched_cls.RUNNING_STATES:
if lverb:
print("step %02d:" % i, curstate)
time.sleep(5)
i += 1
curstate = jobid_01.get_state()
assert curstate == self.sched_cls.CANCELLED, "job is not cancelled: got {}".format(curstate)
if lverb:
print("--test_cancel_jobs01 SUCCESS")
def test_cancel_jobs02(self, lverb=False):
"""Submit a job, then a second one dependent on the first.
When we cancel the first, slurm should cancel the second one as well.
"""
if lverb:
print("---test_cancel_jobs02")
jobid_01 = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("started 01:", jobid_01)
time.sleep(2)
jobid_02 = _submit_job_n(3, PRIO_MEDIUM, [jobid_01], sched_cls=self.sched_cls)
if lverb:
print("started 02 (dependent on 01):", jobid_02)
joblst = [jobid_01, jobid_02]
jobidlst = [j.job_id for j in joblst]
are_ready, i, num_tries = False, 0, 40
while i < num_tries and not are_ready:
time.sleep(2)
i += 1
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jobidlst])
are_ready = curstate[jobid_01.job_id][ACC_STATE] in self.sched_cls.RUNNING_STATES
assert are_ready, "failed to submit the two jobs..."
if lverb:
print("OK, two jobs submitted, now cancelling job 01")
self.sched_cls.job_cancel(jobid_01)
check_tuple = (self.sched_cls.CANCELLED, self.sched_cls.CANCELLED)
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
are_cancelled, i = False, 0
while i < num_tries and not are_cancelled:
if lverb:
print("step %02d:" % i, [curstate[jid][ACC_STATE] for jid in jobidlst])
time.sleep(2)
i += 1
curstate = get_accounting_info(joblst, sched_cls=self.sched_cls)
are_cancelled = tuple([curstate[jid][ACC_STATE] for jid in jobidlst]) == check_tuple
if lverb:
print("final states", [curstate[jid][ACC_STATE] for jid in jobidlst])
assert curstate[jobid_01.job_id][ACC_STATE] == self.sched_cls.CANCELLED,\
"unexpected state 01: got {}".format(curstate[jobid_01.job_id][ACC_STATE])
assert curstate[jobid_02.job_id][ACC_STATE] == self.sched_cls.CANCELLED,\
"unexpected state 02: got {}".format(curstate[jobid_02.job_id][ACC_STATE])
if lverb:
print("---test_cancel_jobs02 SUCCESS")
def test_get_state_01(self, lverb=False):
"""Submit a job, then follow its state using squeue.
If slurm accounting is not properly installed, we will never get
a COMPLETED result.
"""
if lverb:
print("--test_get_state_01")
jhandle = _submit_job_n(1, PRIO_MEDIUM, sched_cls=self.sched_cls)
if lverb:
print("submitted job", jhandle)
i, num_tries, has_finished = 0, 20, False
curstate = None
while (i < num_tries) and not has_finished:
curstate = jhandle.get_state()
if lverb:
print("step %02d:" % i, curstate)
has_finished = (curstate in self.sched_cls.STOPPED_SET)
time.sleep(5)
i += 1
assert curstate == self.sched_cls.COMPLETED, "unexpected final state: got {}".format(curstate)
if lverb:
print("--test_get_state_01 SUCCESS")
def test_set_priority_01(self, lverb=False):
"""Start some jobs with a given priority, then change it.
See if this was successful.
"""
if lverb:
print("--test_set_priority_01")
# first, submit a number of high prio jobs in order to fill the queue
# we can only change the priority of a job if it is not yet running
low_prio = self.sched_cls.MIN_PRIO
high_prio = low_prio + 1
for i in range(4):
submit_all(high_prio, sched_cls=self.sched_cls)
if lverb:
print("submitting low_prio jobs...")
jobhandles = submit_all(low_prio, sched_cls=self.sched_cls)
jobidlst = [jh.job_id for jh in jobhandles]
if lverb:
print("job_ids", jobidlst)
time.sleep(2)
cs = get_accounting_info(jobhandles, sched_cls=self.sched_cls)
priolst = [(cs[jid][ACC_STATE], cs[jid][ACC_PRIONUM]) for jid in jobidlst]
if lverb:
print("state+priority after submission", priolst)
any_done = any([state == self.sched_cls.COMPLETED for state, prio in priolst])
if any_done:
raise RuntimeError("Test failed as jobs completed before we could change prio")
# now change the priority..
self.sched_cls.set_job_priority(jobhandles, high_prio)
test_passed = False
while cs[jobidlst[0]][ACC_STATE] != self.sched_cls.PENDING and not test_passed:
if lverb:
print("waiting")
time.sleep(2)
cs = get_accounting_info(jobhandles, sched_cls=self.sched_cls)
priolst = [(cs[jid][ACC_STATE], cs[jid][ACC_PRIONUM]) for jid in jobidlst]
test_passed = all([prio == high_prio for state, prio in priolst])
if lverb:
print(" after wait")
assert test_passed, "setting high prio failed"
if lverb:
print("Test passed")
if lverb:
cs = get_accounting_info(jobhandles, sched_cls=self.sched_cls)
priolst = [(cs[jid][ACC_STATE], cs[jid][ACC_PRIONUM]) for jid in jobidlst]
print("final states:", priolst)
def test_set_priority_02(self):
"""Set an illegal job priority type (str instead of int).
This should raise an exception."""
low_prio = PRIO_LOW
jobhandles = submit_all(low_prio, sched_cls=self.sched_cls)
with self.assertRaises(RuntimeError):
self.sched_cls.set_job_priority(jobhandles, 'HI_PRIO')
def test_set_priority_03(self):
"""Set an job priority that is higher than MAX_PRIO.
This should simply set the actual priority to MAX_PRIO"""
low_prio = PRIO_LOW
jobhandles = submit_all(low_prio, sched_cls=self.sched_cls)
jidlst = [jh.job_id for jh in jobhandles]
time.sleep(1)
high_prio = PRIO_HIGH
self.sched_cls.set_job_priority(jobhandles, high_prio+10)
cs = get_accounting_info(jobhandles, sched_cls=self.sched_cls)
priolst = [cs[jid][ACC_PRIONUM] for jid in jidlst]
assert all([p == high_prio for p in priolst]), "Failed to set high priority"
def test_acc_info_01(self, lverb=False):
""" Get_accounting_info must return information about all job handles
requested.
Where accounting info is not available, it must return the UNKNOWN state.
NOTE: in particular with slurm, accounting information is not available in the following
situation:
job A in PENDING in the queue
job B is dependent on A (with after_ok or after_any).
==> there will be no information of job B by accounting.
"""
if lverb:
print("--test_acc_info_01:")
low_prio = PRIO_LOW
if lverb:
print("submitting low_prio jobs...")
jobhandles = submit_all(low_prio, sched_cls=self.sched_cls)
job01 = jobhandles[0]
job02 = _submit_job_n(1, PRIO_MEDIUM, [job01], sched_cls=self.sched_cls)
jobhandles.append(job02)
jidlst = [jh.job_id for jh in jobhandles]
time.sleep(1)
i, numtests, is_finished = 0, 40, False
while i < numtests and not is_finished:
cs = get_accounting_info(jobhandles, sched_cls=self.sched_cls)
if lverb:
print(i, [(cs[jid][ACC_STATE], cs[jid][ACC_PRIONUM]) for jid in jidlst])
time.sleep(2)
is_finished = cs[job02.job_id][ACC_STATE] == self.sched_cls.COMPLETED
i += 1
# --
assert i < numtests,\
"job02 failed to complete in 40 iterations. got state: {}".format(cs[job02.job_id][ACC_STATE])
if lverb:
print("--test_acc_info_01(): SUCCESS")
def test_multi_check_output_echo(self):
expected_output = u'Lorem ipsum\n'
output = slurmlib.multi_check_output(['echo', 'Lorem', 'ipsum'])
assert isinstance(output, six.string_types), "string expected, but got {}".format(type(output))
self.assertEqual(expected_output, output)
def test_multi_check_output_echoxxx(self):
expected_py2_error = (
'[Errno 2] No such file or directory: echoxxx Lorem ipsum')
expected_py3_error = (
"[Errno 2] No such file or directory: 'echoxxx': echoxxx Lorem ipsum: 'echoxxx'")
with self.assertRaises(OSError) as context:
slurmlib.multi_check_output(['echoxxx', 'Lorem', 'ipsum'])
got_err = str(context.exception)
assert got_err == expected_py2_error or got_err == expected_py3_error, "unexpected error"
# self.assertEqual(expected_error, )
def show_squeue_jobs01(self, lverb=False):
"""Submit all jobs with a low priority, then an additional one with high
priority.
List all jobs on the queue until the run queue is empty.
NOTE: this test will not terminate if some other process is adding jobs to the
queue.
NOTE: this routine does not assert anything or check for correctness.
It can be used for the user to see how priorities can/ should work.
Exactly how priorities are handled by slurm is a configuration issue,
and priorities could also be ignored.
"""
if lverb:
print("--test_squeue_jobs01")
low_prio = PRIO_LOW
hi_prio = PRIO_HIGH
if lverb:
print("submitting low_prio jobs...")
jh_lst = submit_all(low_prio, sched_cls=self.sched_cls)
time.sleep(1)
jobid_01 = _submit_job_n(1, hi_prio)
if lverb:
print("submitted a high prio job", jobid_01)
jh_lst.append(jobid_01)
is_done, i, num_tries = False, 0, 40
job_state_dct = None
while (i < num_tries) and not is_done:
if lverb:
print("step %d/%d" % (i, num_tries))
job_state_dct = get_accounting_info(jh_lst, sched_cls=self.sched_cls)
for j_state in sorted(job_state_dct.values(), key=lambda a: a[ACC_JOB_ID]):
# has_finished = (j_state["ST"] == 'CD' or j_state["ST"] == 'UKN')
if lverb:
print("%02d: %5s %s" % (i, j_state[ACC_JOB_ID], j_state[ACC_STATE]))
print()
is_done = job_state_dct[jobid_01.job_id][ACC_STATE] == self.sched_cls.COMPLETED
time.sleep(5)
i += 1
if lverb:
print("exited loop...FINAL STATE:")
for j_state in sorted(job_state_dct.values(), key=lambda a: a[ACC_JOB_ID]):
print("FINAL: %5s %s" % (j_state[ACC_JOB_ID], j_state[ACC_STATE]))
print()
assert i < num_tries, "failed to wait for completed jobs!"
if lverb:
print("--test_squeue_jobs01 SUCCESS")
@skipIf(not settings.RUN_SLURM_TESTS, "Slurm tests disabled.")
class SlurmTests(SlurmDummyTests):
def get_slurm_scheduler_class(self):
return slurmlib.SlurmScheduler
if __name__ == "__main__":
unittest.main()
| [
"fleet.slurmlib.callit",
"unittest.skipIf",
"os.path.join",
"fleet.slurmlib.multi_check_output",
"time.sleep",
"django.test.skipIfDBFeature",
"unittest.main",
"os.strerror"
] | [((1071, 1137), 'os.path.join', 'osp.join', (['settings.KIVE_HOME', '"""fleet/slurm_test_files/slurmrundir"""'], {}), "(settings.KIVE_HOME, 'fleet/slurm_test_files/slurmrundir')\n", (1079, 1137), True, 'import os.path as osp\n'), ((2834, 2862), 'django.test.skipIfDBFeature', 'skipIfDBFeature', (['"""is_mocked"""'], {}), "('is_mocked')\n", (2849, 2862), False, 'from django.test import TestCase, skipIfDBFeature\n'), ((30015, 30076), 'unittest.skipIf', 'skipIf', (['(not settings.RUN_SLURM_TESTS)', '"""Slurm tests disabled."""'], {}), "(not settings.RUN_SLURM_TESTS, 'Slurm tests disabled.')\n", (30021, 30076), False, 'from unittest import skipIf\n'), ((1230, 1263), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (1238, 1263), True, 'import os.path as osp\n'), ((30225, 30240), 'unittest.main', 'unittest.main', ([], {}), '()\n', (30238, 30240), False, 'import unittest\n'), ((1406, 1431), 'os.path.join', 'osp.join', (['wdir', '"""out.txt"""'], {}), "(wdir, 'out.txt')\n", (1414, 1431), True, 'import os.path as osp\n'), ((1441, 1466), 'os.path.join', 'osp.join', (['wdir', '"""err.txt"""'], {}), "(wdir, 'err.txt')\n", (1449, 1466), True, 'import os.path as osp\n'), ((3545, 3578), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (3553, 3578), True, 'import os.path as osp\n'), ((4238, 4271), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (4246, 4271), True, 'import os.path as osp\n'), ((5514, 5547), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (5522, 5547), True, 'import os.path as osp\n'), ((6151, 6184), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (6159, 6184), True, 'import os.path as osp\n'), ((7021, 7034), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7031, 7034), False, 'import time\n'), ((7840, 7873), 'os.path.join', 'osp.join', (['TEST_DIR', "('job%02d' % n)"], {}), "(TEST_DIR, 'job%02d' % n)\n", (7848, 7873), True, 'import os.path as osp\n'), ((8920, 8933), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (8930, 8933), False, 'import time\n'), ((10398, 10411), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10408, 10411), False, 'import time\n'), ((19324, 19337), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (19334, 19337), False, 'import time\n'), ((23004, 23017), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (23014, 23017), False, 'import time\n'), ((25042, 25055), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (25052, 25055), False, 'import time\n'), ((26309, 26322), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (26319, 26322), False, 'import time\n'), ((27057, 27112), 'fleet.slurmlib.multi_check_output', 'slurmlib.multi_check_output', (["['echo', 'Lorem', 'ipsum']"], {}), "(['echo', 'Lorem', 'ipsum'])\n", (27084, 27112), True, 'import fleet.slurmlib as slurmlib\n'), ((28709, 28722), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (28719, 28722), False, 'import time\n'), ((3794, 3848), 'fleet.slurmlib.callit', 'slurmlib.callit', (['wdir', 'jobname', 'arglst', 'stdout', 'stderr'], {}), '(wdir, jobname, arglst, stdout, stderr)\n', (3809, 3848), True, 'import fleet.slurmlib as slurmlib\n'), ((4437, 4491), 'fleet.slurmlib.callit', 'slurmlib.callit', (['wdir', 'jobname', 'arglst', 'stdout', 'stderr'], {}), '(wdir, jobname, arglst, stdout, stderr)\n', (4452, 4491), True, 'import fleet.slurmlib as slurmlib\n'), ((7316, 7329), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (7326, 7329), False, 'import time\n'), ((9260, 9273), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9270, 9273), False, 'import time\n'), ((10738, 10751), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (10748, 10751), False, 'import time\n'), ((12094, 12107), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (12104, 12107), False, 'import time\n'), ((13799, 13812), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (13809, 13812), False, 'import time\n'), ((14491, 14504), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14501, 14504), False, 'import time\n'), ((16401, 16414), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (16411, 16414), False, 'import time\n'), ((18087, 18100), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (18097, 18100), False, 'import time\n'), ((18668, 18681), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (18678, 18681), False, 'import time\n'), ((19694, 19707), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (19704, 19707), False, 'import time\n'), ((20555, 20568), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (20565, 20568), False, 'import time\n'), ((22006, 22019), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (22016, 22019), False, 'import time\n'), ((23734, 23747), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (23744, 23747), False, 'import time\n'), ((26617, 26630), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (26627, 26630), False, 'import time\n'), ((27607, 27665), 'fleet.slurmlib.multi_check_output', 'slurmlib.multi_check_output', (["['echoxxx', 'Lorem', 'ipsum']"], {}), "(['echoxxx', 'Lorem', 'ipsum'])\n", (27634, 27665), True, 'import fleet.slurmlib as slurmlib\n'), ((5803, 5828), 'os.path.join', 'osp.join', (['wdir', '"""out.txt"""'], {}), "(wdir, 'out.txt')\n", (5811, 5828), True, 'import os.path as osp\n'), ((5846, 5871), 'os.path.join', 'osp.join', (['wdir', '"""err.txt"""'], {}), "(wdir, 'err.txt')\n", (5854, 5871), True, 'import os.path as osp\n'), ((6431, 6456), 'os.path.join', 'osp.join', (['wdir', '"""out.txt"""'], {}), "(wdir, 'out.txt')\n", (6439, 6456), True, 'import os.path as osp\n'), ((6474, 6499), 'os.path.join', 'osp.join', (['wdir', '"""err.txt"""'], {}), "(wdir, 'err.txt')\n", (6482, 6499), True, 'import os.path as osp\n'), ((8135, 8160), 'os.path.join', 'osp.join', (['wdir', '"""out.txt"""'], {}), "(wdir, 'out.txt')\n", (8143, 8160), True, 'import os.path as osp\n'), ((8178, 8203), 'os.path.join', 'osp.join', (['wdir', '"""err.txt"""'], {}), "(wdir, 'err.txt')\n", (8186, 8203), True, 'import os.path as osp\n'), ((29581, 29594), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (29591, 29594), False, 'import time\n'), ((3921, 3940), 'os.strerror', 'os.strerror', (['retval'], {}), '(retval)\n', (3932, 3940), False, 'import os\n')] |
####
# CODE TAKEN FROM https://github.com/mgrankin/over9000
####
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.optim.optimizer import Optimizer
import math
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, use_variance=True, warmup = 4000):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, use_variance=True, warmup = warmup)
print('======== Warmup: {} ========='.format(warmup))
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
global iter_idx
iter_idx += 1
grad_list = list()
mom_list = list()
mom_2rd_list = list()
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-6 + state['step'] * (group['lr'] - 1e-6) / group['warmup']
else:
scheduled_lr = group['lr']
step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss | [
"torch.zeros_like",
"math.sqrt"
] | [((3043, 3072), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (3059, 3072), False, 'import torch\n'), ((3115, 3144), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (3131, 3144), False, 'import torch\n'), ((4065, 4092), 'math.sqrt', 'math.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (4074, 4092), False, 'import math\n')] |
# Copyright (c) 2018 NVIDIA Corporation
import boto3
import botocore
import time
import threading
import json
import hashlib
from milano.backends.utils import SSHClient
class EC2Instance:
def __init__(self, resource, username, private_key_path):
self._resource = resource
self._private_key_path = private_key_path
self._username = username
self._ssh_client = None
self._terminated = False
def is_running(self):
return self.state() == 'running'
def is_terminated(self):
s = self.state()
return s != 'pending' and s != 'running'
def state(self):
self._reload()
s = self._resource.state['Name']
if s == 'terminated':
self._terminated = True
return s
def public_ip(self):
self._reload()
return self._resource.public_ip_address
def instance_id(self):
return self._resource.instance_id
def _reload(self):
if not self._terminated:
self._resource.reload()
def __try_connect(self):
if self._resource.state['Name'] != 'running':
raise Exception("instance not running")
if self._ssh_client is None:
client = SSHClient(self._private_key_path)
client.connect(self.public_ip(), self._username)
self._ssh_client = client
def exec_command(self, command):
self.__try_connect()
return self._ssh_client.exec_command(command)
def exec_command_blocking(self, command, retries=3):
for i in range(retries):
try:
self.__try_connect()
return self._ssh_client.exec_command_blocking(command)
except Exception as e:
if i < retries - 1:
try:
if self._ssh_client is not None:
self._ssh_client.close()
except:
pass
self._ssh_client = None
else:
raise e
def keep_alive(self):
# As long as this file remains less than 5 minutes old, the instance
# won't terminate.
try:
self.exec_command_blocking("touch /home/ubuntu/.milano_keep_alive")
except:
pass
def is_driver_working(self):
try:
ec, _, _ = self.exec_command_blocking("nvidia-smi")
return ec == 0
except:
return False
def datasets_present(self, datasets):
try:
for i in range(len(datasets)):
ec, _, _ = self.exec_command_blocking("ls /home/ubuntu/data/" + str(i))
if ec != 0:
return False
except:
return False
return True
def terminate(self):
return self._resource.terminate()
def startup_script(datasets):
dataset_mounts = "\n"
for i in range(len(datasets)):
if datasets[i]['type'] == 's3':
dataset_mounts += "aws s3 sync {src} {dst}\n".format(
src="s3://{bucket}/{prefix}".format(
bucket=datasets[i]['bucket'],
prefix=datasets[i].get('prefix', "")),
dst="/home/ubuntu/data/" + str(i),
)
else:
raise Exception("unrecognized dataset source type '{}'".format(
datasets[i]['type']))
# TODO All of the software installation should be baked into an AMI instead,
# this is pretty slow.
return """#!/bin/bash
touch /home/ubuntu/.milano_keep_alive
chmod 777 /home/ubuntu/.milano_keep_alive
eval "while true; do find /home/ubuntu/.milano_keep_alive -mmin +5 -exec shutdown -h now {} + && sleep 10; done" &>/dev/null &disown;
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
groupadd docker
usermod -aG docker ubuntu
apt-get update
apt-get install -y awscli
""" + dataset_mounts + """
apt-get install -y docker-ce
apt-get install -y nvidia-docker2
apt-get install -y nvidia-384
modprobe nvidia
systemctl restart docker
"""
class EC2InstanceManager:
def __init__(self, count, key_name, private_key_path, region_name,
spot_instances, datasets, iam_role, user_params):
self._desired_count = count
self._key_name = key_name
self._private_key_path = private_key_path
self._region_name = region_name
self._spot_instances = spot_instances
self._datasets = datasets
self._iam_role = iam_role
self._user_params = user_params
self._instances = {}
self._active_instance_ids = []
self._thread = None
self._lock = threading.Lock()
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._management_thread_main)
self._thread.start()
def _ami_for_region(self):
# ubuntu 16.04 HVM SSD
ami = {
"us-east-1": "ami-5c150e23",
"us-west-1": "ami-4d6a852e",
"ap-northeast-1": "ami-e5b3ca08",
"sa-east-1": "ami-01316f8dfe32c01e2",
"ap-southeast-1": "ami-01fde464a811ead8a",
"ca-central-1": "ami-4975f82d",
"ap-south-1": "ami-0dcc9657fd6ff85bc",
"eu-central-1": "ami-9fbfb174",
"eu-west-1": "ami-0a8458313ef39d6f6",
"cn-north-1": "ami-0510c868",
"cn-northwest-1": "ami-f96c7b9b",
"us-gov-west-1": "ami-3a4dd15b",
"ap-northeast-2": "ami-09960a24a97b8087b",
"ap-southeast-2": "ami-fc26869e",
"us-west-2": "ami-529fb82a",
"us-east-2": "ami-0eb3ba416aed8a6a4",
"eu-west-2": "ami-52d12435",
"ap-northeast-3": "ami-0d5d86281edca346f",
"eu-west-3": "ami-0a06fa501d424d43f"
}
return ami.get(self._region_name, "")
def _launch(self, launch_count):
s = boto3.Session(region_name=self._region_name)
iam_client = s.client('iam')
iam = s.resource("iam")
ec2 = s.resource("ec2")
# unique role per dataset config
if self._iam_role is None:
self._iam_role, _ = get_or_create_role(
"milano-" + sha1short(json.dumps(self._datasets)),
self._datasets, iam, iam_client)
profile_name, _ = get_or_create_instance_profile(
self._iam_role + "-ip", self._iam_role, iam)
sg_id = get_or_create_ssh_security_group("milano-worker-ssh", ec2)
create_params = {
'InstanceType': "p3.2xlarge",
'ImageId': self._ami_for_region(),
'KeyName': self._key_name,
'MinCount': launch_count,
'MaxCount': launch_count,
'SecurityGroupIds': [sg_id],
'BlockDeviceMappings': [{
"DeviceName": "/dev/xvda",
"Ebs": {
"DeleteOnTermination": True,
# TODO expose this as a top level config option?
"VolumeSize": 64
}
}],
'TagSpecifications': [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'Name',
'Value': 'milano-worker',
}]
}],
"IamInstanceProfile": {
"Name": profile_name,
},
# If ~/.milano_keep_alive isn't touched every 5 minutes, the instance
# will auto terminate.
'InstanceInitiatedShutdownBehavior': "terminate",
'UserData': startup_script(self._datasets),
}
if self._spot_instances:
create_params['InstanceMarketOptions'] = {
'MarketType': 'spot',
'SpotOptions': {
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
}
}
create_params.update(self._user_params)
instance_resources = ec2.create_instances(**create_params)
with self._lock:
for instance_resource in instance_resources:
self._instances[instance_resource.instance_id] = EC2Instance(
instance_resource, "ubuntu", self._private_key_path)
def active_instance_ids(self):
with self._lock:
return self._active_instance_ids.copy()
def get_instance(self, instance_id):
with self._lock:
return self._instances[instance_id]
def terminate(self):
self._stop_event.set()
self._thread.join()
for _, instance in self._instances.items():
instance.terminate()
def _management_thread_main(self):
while not self._stop_event.is_set():
next_active_ids = []
alive_count = 0
for instance_id, instance in self._instances.items():
if not instance.is_terminated():
alive_count += 1
if instance.is_running():
instance.keep_alive()
if instance.is_driver_working() and instance.datasets_present(
self._datasets):
next_active_ids.append(instance_id)
if alive_count < self._desired_count:
needed_count = self._desired_count - alive_count
print("launching {count} EC2 instances and mounting datasets. this may take a few minutes...".
format(count=needed_count))
try:
self._launch(needed_count)
except Exception as e:
print(e)
pass
with self._lock:
self._active_instance_ids = next_active_ids
time.sleep(10)
def get_or_create_ssh_security_group(name, ec2):
try:
groups = ec2.security_groups.filter(GroupNames=[name])
for group in groups:
return group.group_id
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidGroup.NotFound':
raise e
# No existing security group, create one.
sg = ec2.create_security_group(Description=name, GroupName=name)
sg.authorize_ingress(
IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort=22, ToPort=22)
return sg.group_id
def get_or_create_role(name, datasets, iam, client):
try:
role = iam.Role(name)
return role.role_name, role.role_id
except Exception as e:
pass
role = iam.create_role(RoleName=name, AssumeRolePolicyDocument=json.dumps({
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": ["ec2.amazonaws.com"]
},
"Action": ["sts:AssumeRole"]
}]
}))
for i in range(len(datasets)):
bucket = bucket=datasets[i]['bucket']
prefix = datasets[i].get('prefix', "")
resp = client.put_role_policy(
RoleName=name,
PolicyName=name + "-policy-" + str(i),
PolicyDocument=json.dumps({
"Statement":[
{
"Action": ["s3:ListBucket"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::{}".format(bucket)],
"Condition":{"StringLike":{"s3:prefix":["{}/*".format(prefix)]}}
},
{
"Effect": "Allow",
"Action": ["s3:Get*"],
"Resource": ["arn:aws:s3:::{}/{}*".format(bucket, prefix)]
}
]
}
)
)
return role.role_name, role.role_id
def get_or_create_instance_profile(name, role, iam):
try:
instance_profile = iam.InstanceProfile(name)
return name, instance_profile.instance_profile_id
except Exception as e:
pass
instance_profile = iam.create_instance_profile(InstanceProfileName=name)
instance_profile.add_role(RoleName=role)
# create_instances will fail if we try to use this instance profile too soon.
time.sleep(10)
return name, instance_profile.instance_profile_id
def sha1short(str):
return hashlib.sha1(str.encode()).hexdigest()[:6] | [
"threading.Lock",
"boto3.Session",
"json.dumps",
"time.sleep",
"threading.Event",
"milano.backends.utils.SSHClient",
"threading.Thread"
] | [((11083, 11097), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (11093, 11097), False, 'import time\n'), ((4510, 4526), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4524, 4526), False, 'import threading\n'), ((4550, 4567), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4565, 4567), False, 'import threading\n'), ((4587, 4640), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._management_thread_main'}), '(target=self._management_thread_main)\n', (4603, 4640), False, 'import threading\n'), ((5605, 5649), 'boto3.Session', 'boto3.Session', ([], {'region_name': 'self._region_name'}), '(region_name=self._region_name)\n', (5618, 5649), False, 'import boto3\n'), ((1118, 1151), 'milano.backends.utils.SSHClient', 'SSHClient', (['self._private_key_path'], {}), '(self._private_key_path)\n', (1127, 1151), False, 'from milano.backends.utils import SSHClient\n'), ((8981, 8995), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8991, 8995), False, 'import time\n'), ((9740, 9872), 'json.dumps', 'json.dumps', (["{'Statement': [{'Effect': 'Allow', 'Principal': {'Service': [\n 'ec2.amazonaws.com']}, 'Action': ['sts:AssumeRole']}]}"], {}), "({'Statement': [{'Effect': 'Allow', 'Principal': {'Service': [\n 'ec2.amazonaws.com']}, 'Action': ['sts:AssumeRole']}]})\n", (9750, 9872), False, 'import json\n'), ((5885, 5911), 'json.dumps', 'json.dumps', (['self._datasets'], {}), '(self._datasets)\n', (5895, 5911), False, 'import json\n')] |
import os
# The minimal Pump response.
skeleton = {"status": 200, "headers": {}, "body": ""}
# Return a skeleton response with the given body.
def with_body(body):
return dict(skeleton, body=body)
# Return a redirect response to the given URL.
def redirect(url):
return {"status": 302, "headers": {"Location": url}, "body": ""}
# Return the given response updated with the given status.
def with_status(response, status):
return dict(response, status=status)
# Return the given response updated with the given header.
def with_header(response, key, value):
return dict(response,
headers=dict(response.get("headers", {}), **{key: value}))
# Return the given response updated with the given content-type.
def with_content_type(response, content_type):
return with_header(response, 'content_type', content_type)
# Returns a response containing the contents of the file at the given path.
# Options:
#
# - root: the root path for the given file path
# - index_files: whether to look for index.* files in directories (true by
# default)
def file_response(path, options={}):
file = _get_file(path, options)
if file:
return with_body(open(file, 'r'))
def _get_file(path, options={}):
root = options.get("root")
if root:
if _is_path_safe(root, path):
file = os.path.join(root, path)
else:
file = path
if os.path.isdir(file):
if options.get("index_files", True):
return _find_index(file)
elif os.path.exists(file):
return file
def _is_path_safe(root, path):
return os.path.realpath(os.path.join(root, path)).startswith(
os.path.realpath(root))
def _find_index(dir):
indexes = [f for f in os.listdir(dir) if f.lower().startswith('index.')]
if indexes:
return indexes[0]
| [
"os.path.exists",
"os.listdir",
"os.path.join",
"os.path.realpath",
"os.path.isdir"
] | [((1370, 1389), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (1383, 1389), False, 'import os\n'), ((1470, 1490), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1484, 1490), False, 'import os\n'), ((1608, 1630), 'os.path.realpath', 'os.path.realpath', (['root'], {}), '(root)\n', (1624, 1630), False, 'import os\n'), ((1315, 1339), 'os.path.join', 'os.path.join', (['root', 'path'], {}), '(root, path)\n', (1327, 1339), False, 'import os\n'), ((1679, 1694), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1689, 1694), False, 'import os\n'), ((1566, 1590), 'os.path.join', 'os.path.join', (['root', 'path'], {}), '(root, path)\n', (1578, 1590), False, 'import os\n')] |
import math
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize
from utils.augmentation import RandAugment
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
def get_lemon_datasets(args):
# divide data into train, test
df_label = pd.read_csv(args.train_label_file_path)
img_paths = df_label['file_name'].values
labels = df_label['label'].values
train_i, test_i, train_l, test_l = train_test_split(
img_paths, labels,
test_size=102,
shuffle=True,
random_state=args.seed,
stratify=labels)
# divide train into label, unlabel
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
train_l,
args.num_labeled,
args.num_classes,
args.expand_labels,
args.batch_size,
args.eval_step)
# dataset
train_labeled_dataset = LemonDataset(
train_labeled_idxs,
train_i,
train_l,
transform=get_transforms_labeled(args.resize))
train_unlabeled_dataset = LemonDataset(
train_unlabeled_idxs,
train_i,
train_l,
transform=TransformMPL(
args,
args.randaug,
args.resize,
mean=cifar10_mean,
std=cifar10_std))
test_dataset = LemonDataset(
range(len(test_i)),
test_i,
test_l,
transform=get_transforms_val())
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
def x_u_split(
labels,
num_labeled: int,
num_classes: int,
expand_labels: bool,
batch_size: int,
eval_step: int):
label_per_class = num_labeled // num_classes
labels = np.array(labels)
labeled_idx = []
# unlabeled data: all training data
unlabeled_idx = np.array(range(len(labels)))
for i in range(num_classes):
idx = np.where(labels == i)[0]
idx = np.random.choice(idx, label_per_class, False)
labeled_idx.extend(idx)
labeled_idx = np.array(labeled_idx)
assert len(labeled_idx) == num_labeled
if expand_labels or num_labeled < batch_size:
num_expand_x = math.ceil(batch_size * eval_step / num_labeled)
labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])
np.random.shuffle(labeled_idx)
return labeled_idx, unlabeled_idx
class LemonDataset(Dataset):
def __init__(
self,
indexs,
img_paths,
targets,
transform=None,
target_transform=None) -> None:
self.indexs = indexs
if indexs is not None:
self.img_paths = img_paths[indexs]
self.targets = targets[indexs]
self.transform = transform
self.target_transform = target_transform
def __len__(self) -> int:
return len(self.indexs)
def __getitem__(self, index: int):
img = Image.open(self.img_paths[index]).convert('RGB')
target = self.targets[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class TransformMPL(object):
def __init__(
self,
args,
randaug: tuple,
resize: int,
mean: tuple,
std: tuple
) -> None:
n, m = randaug
self.ori = Compose([
RandomHorizontalFlip(),
RandomCrop(
size=resize,
padding=int(resize * 0.125),
padding_mode='reflect')
])
self.aug = Compose([
RandomHorizontalFlip(),
RandomCrop(
size=resize,
padding=int(resize * 0.125),
padding_mode='reflect'),
RandAugment(args, n=n, m=m)
])
self.normalize = Compose([
ToTensor(),
Normalize(mean=mean, std=std)
])
def __call__(self, x):
ori = self.ori(x)
aug = self.aug(x)
return self.normalize(ori), self.normalize(aug)
def get_transforms_labeled(resize: int) -> Compose:
return Compose([
RandomHorizontalFlip(),
RandomCrop(
size=resize,
padding=int(resize * 0.125),
padding_mode='reflect'
),
ToTensor(),
Normalize(mean=cifar10_mean, std=cifar10_std)
])
def get_transforms_val() -> Compose:
return Compose([
ToTensor(),
Normalize(mean=cifar10_mean, std=cifar10_std)
])
| [
"PIL.Image.open",
"math.ceil",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.random.choice",
"numpy.where",
"torchvision.transforms.RandomHorizontalFlip",
"numpy.array",
"utils.augmentation.RandAugment",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"numpy.random.shuffle"
] | [((468, 507), 'pandas.read_csv', 'pd.read_csv', (['args.train_label_file_path'], {}), '(args.train_label_file_path)\n', (479, 507), True, 'import pandas as pd\n'), ((631, 740), 'sklearn.model_selection.train_test_split', 'train_test_split', (['img_paths', 'labels'], {'test_size': '(102)', 'shuffle': '(True)', 'random_state': 'args.seed', 'stratify': 'labels'}), '(img_paths, labels, test_size=102, shuffle=True,\n random_state=args.seed, stratify=labels)\n', (647, 740), False, 'from sklearn.model_selection import train_test_split\n'), ((1898, 1914), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1906, 1914), True, 'import numpy as np\n'), ((2208, 2229), 'numpy.array', 'np.array', (['labeled_idx'], {}), '(labeled_idx)\n', (2216, 2229), True, 'import numpy as np\n'), ((2477, 2507), 'numpy.random.shuffle', 'np.random.shuffle', (['labeled_idx'], {}), '(labeled_idx)\n', (2494, 2507), True, 'import numpy as np\n'), ((2112, 2157), 'numpy.random.choice', 'np.random.choice', (['idx', 'label_per_class', '(False)'], {}), '(idx, label_per_class, False)\n', (2128, 2157), True, 'import numpy as np\n'), ((2348, 2395), 'math.ceil', 'math.ceil', (['(batch_size * eval_step / num_labeled)'], {}), '(batch_size * eval_step / num_labeled)\n', (2357, 2395), False, 'import math\n'), ((2073, 2094), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (2081, 2094), True, 'import numpy as np\n'), ((4418, 4440), 'torchvision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', ([], {}), '()\n', (4438, 4440), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4582, 4592), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4590, 4592), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4602, 4647), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': 'cifar10_mean', 'std': 'cifar10_std'}), '(mean=cifar10_mean, std=cifar10_std)\n', (4611, 4647), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4723, 4733), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4731, 4733), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4743, 4788), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': 'cifar10_mean', 'std': 'cifar10_std'}), '(mean=cifar10_mean, std=cifar10_std)\n', (4752, 4788), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((3103, 3136), 'PIL.Image.open', 'Image.open', (['self.img_paths[index]'], {}), '(self.img_paths[index])\n', (3113, 3136), False, 'from PIL import Image\n'), ((3657, 3679), 'torchvision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', ([], {}), '()\n', (3677, 3679), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((3872, 3894), 'torchvision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', ([], {}), '()\n', (3892, 3894), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4047, 4074), 'utils.augmentation.RandAugment', 'RandAugment', (['args'], {'n': 'n', 'm': 'm'}), '(args, n=n, m=m)\n', (4058, 4074), False, 'from utils.augmentation import RandAugment\n'), ((4134, 4144), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4142, 4144), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n'), ((4158, 4187), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (4167, 4187), False, 'from torchvision.transforms import Compose, RandomHorizontalFlip, RandomCrop, ToTensor, Normalize\n')] |
import dbus
from .constants import *
from .adapter import BluetoothAdapter
class BluetoothBase(object):
def __init__(self):
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self.bus = dbus.SystemBus()
self.adapter = self._findAdapter()
def _getManagedObjects(self):
manager = dbus.Interface(self.bus.get_object(SERVICE_NAME, "/"),
OBJECT_IFACE)
return manager.GetManagedObjects()
def _findInterface(self, interface):
paths = []
objects = self._getManagedObjects()
for path, ifaces in objects.iteritems():
device = ifaces.get(interface)
if device is None:
continue
paths.append(path)
return paths
def _getDevice(self, path):
"""Get a device from a dbus path"""
return self.bus.get_object(SERVICE_NAME, path)
def _findAdapter(self):
adapters = self._findInterface(ADAPTER_IFACE)
if adapters:
device = self._getDevice(adapters[0])
return BluetoothAdapter(device)
else:
return None
def getAllAdapters(self):
adapters = self._findInterface(ADAPTER_IFACE)
out = []
for adapter in adapters:
device = self._getDevice(adapter)
out.append(BluetoothAdapter(device))
return out
def changeAdapter(self, adapter):
self.adapter = adapter
def getInterface(self, interface, path):
return dbus.Interface(self._getDevice(path), interface)
def getProperties(self, path):
return self.getInterface(PROPERTIES_IFACE, path)
| [
"dbus.SystemBus",
"dbus.mainloop.glib.DBusGMainLoop"
] | [((139, 192), 'dbus.mainloop.glib.DBusGMainLoop', 'dbus.mainloop.glib.DBusGMainLoop', ([], {'set_as_default': '(True)'}), '(set_as_default=True)\n', (171, 192), False, 'import dbus\n'), ((212, 228), 'dbus.SystemBus', 'dbus.SystemBus', ([], {}), '()\n', (226, 228), False, 'import dbus\n')] |
import numpy as np
from lumicks import pylake
import pytest
def test_scans(h5_file):
f = pylake.File.from_h5py(h5_file)
if f.format_version == 2:
scan = f.scans["Scan1"]
assert repr(scan) == "Scan(pixels=(5, 4))"
reference_timestamps = [[2.006250e+10, 2.109375e+10, 2.206250e+10, 2.309375e+10],
[2.025000e+10, 2.128125e+10, 2.225000e+10, 2.328125e+10],
[2.043750e+10, 2.146875e+10, 2.243750e+10, 2.346875e+10],
[2.062500e+10, 2.165625e+10, 2.262500e+10, 2.365625e+10],
[2.084375e+10, 2.187500e+10, 2.284375e+10, 2.387500e+10]]
assert np.allclose(scan.timestamps, np.transpose(reference_timestamps))
assert scan.num_frames == 1
assert scan.has_fluorescence
assert not scan.has_force
assert scan.pixels_per_line == 5
assert scan.lines_per_frame == 4
assert len(scan.infowave) == 64
assert scan.rgb_image.shape == (4, 5, 3)
assert scan.red_image.shape == (4, 5)
assert scan.blue_image.shape == (4, 5)
assert scan.green_image.shape == (4, 5)
with pytest.raises(NotImplementedError):
scan["1s":"2s"]
| [
"numpy.transpose",
"lumicks.pylake.File.from_h5py",
"pytest.raises"
] | [((95, 125), 'lumicks.pylake.File.from_h5py', 'pylake.File.from_h5py', (['h5_file'], {}), '(h5_file)\n', (116, 125), False, 'from lumicks import pylake\n'), ((736, 770), 'numpy.transpose', 'np.transpose', (['reference_timestamps'], {}), '(reference_timestamps)\n', (748, 770), True, 'import numpy as np\n'), ((1205, 1239), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1218, 1239), False, 'import pytest\n')] |
"""
这份代码实现了 Pierre Dellacherie 算法,详情可参考 https://blog.csdn.net/qq_41882147/article/details/80005763
"""
import os
import random
import pygame
from game import *
from gameconst import *
class PierreDellacherie():
"""
计算特定方块放置情况下 Pierre Dellacherie 算法的经验公式值
"""
def __init__(self, field_width, field_height, A):
self.field_map = None
self.field_width = field_width
self.field_height = field_height
self.landing_height = 0
self.eroded_piece_cells_metric = 0
self.board_row_transitions = 0
self.board_col_transitions = 0
self.board_buried_holes = 0
self.board_wells = 0
self.a1, self.a2, self.a3, self.a4, self.a5, self.a6 = A
def copyMap(self, field_map):
"""
复制当前游戏区域,在新的游戏区域上尝试方块不同放置情况
"""
self.field_map = [[0] * self.field_width for _ in range(self.field_height)]
for y in range(self.field_height):
for x in range(self.field_width):
if type(field_map[y][x]) is Brick:
self.field_map[y][x] = 1
elif field_map[y][x] == 1:
self.field_map[y][x] = 2
def checkLine(self, line):
for brick in self.field_map[line]:
if brick == 0:
return False
return True
def eliminateLines(self):
lines = 0
for y0 in list(range(self.field_height))[::-1]:
while self.checkLine(y0):
lines += 1
for y in list(range(y0 + 1))[::-1]:
for x in range(self.field_width):
if y == y0:
self.field_map[y][x] = 0
elif self.field_map[y][x] == 1:
self.field_map[y + 1][x] = 1
self.field_map[y][x] = 0
elif self.field_map[y][x] == 2:
self.field_map[y + 1][x] = 2
self.field_map[y][x] = 0
return lines
def getLandingHeight(self, position, layout):
"""
计算当前方块放置之后,方块重心距离游戏区域底部的距离
"""
self.landing_height = 0
for (x, y) in layout:
self.landing_height += self.field_height - (position[1] + y)
self.landing_height /= 4
def getErodedPieceCellsMetric(self, lines):
"""
计算方块放置后消除的行数与当前摆放的方块中被消除的小方块的格数的乘积
"""
self.eroded_piece_cells_metric = 0
bricks = 0
for y in range(self.field_height):
for x in range(self.field_width):
bricks = bricks + 1 if self.field_map[y][x] == 2 else bricks
self.eroded_piece_cells_metric = lines * (4 - bricks)
def getBoardRowTransitions(self):
"""
对于游戏区域每一行,从左往右看,从无小方格到有小方格是一种“变换”,从有小方格到无小方格也是一种“变换”。
计算方块放置后各行中“变换”之和
"""
self.board_row_transitions = 0
for y in range(self.field_height):
for x in range(self.field_width - 1):
if x == 0 and self.field_map[y][x] == 0:
self.board_row_transitions += 1
if self.field_map[y][x] == 0 and self.field_map[y][x + 1] != 0:
self.board_row_transitions += 1
if self.field_map[y][x] != 0 and self.field_map[y][x + 1] == 0:
self.board_row_transitions += 1
if self.field_map[y][self.field_width - 1] == 0:
self.board_row_transitions += 1
def getBoardColTransitions(self):
"""
计算方块放置后各列中“变换”之和
"""
self.board_col_transitions = 0
for x in range(self.field_width):
for y in range(self.field_height - 1):
if self.field_map[y][x] == 0 and self.field_map[y + 1][x] != 0:
self.board_col_transitions += 1
if self.field_map[y][x] != 0 and self.field_map[y + 1][x] == 0:
self.board_col_transitions += 1
if self.field_map[self.field_height - 1][x] == 0:
self.board_col_transitions += 1
def getBoardBuriedHoles(self):
"""
计算各列中的“空洞”小方格数之和
"""
self.board_buried_holes = 0
for x in range(self.field_width):
for y in range(self.field_height - 1):
if self.field_map[y][x] != 0 and self.field_map[y + 1][x] == 0:
self.board_buried_holes += 1
def getBoardWells(self):
"""
计算各列中“井”的深度的连加和
"""
self.board_wells = 0
for x in range(self.field_width):
is_hole = False
hole_deep = 0
for y in range(self.field_height):
if not is_hole and self.field_map[y][x] == 0 and ((x == 0 and self.field_map[y][x + 1] != 0) or \
(x == self.field_width - 1 and self.field_map[y][x - 1] != 0) or \
(0 < x < self.field_width - 1 and self.field_map[y][x - 1] != 0 and self.field_map[y][x + 1] != 0)):
is_hole = True
hole_deep += 1
self.board_wells += hole_deep
elif is_hole and self.field_map[y][x] == 0:
hole_deep += 1
self.board_wells += hole_deep
else:
is_hole = False
hole_deep = 0
def initialize(self, position, layout, field_map):
self.copyMap(field_map)
self.getLandingHeight(position, layout)
self.getErodedPieceCellsMetric(self.eliminateLines())
self.getBoardRowTransitions()
self.getBoardColTransitions()
self.getBoardBuriedHoles()
self.getBoardWells()
def evaluate(self, position, layout, field_map):
self.initialize(position, layout, field_map)
score = self.a1 * self.landing_height \
+ self.a2 * self.eroded_piece_cells_metric \
+ self.a3 * self.board_row_transitions \
+ self.a4 * self.board_col_transitions \
+ self.a5 * self.board_buried_holes \
+ self.a6 * self.board_wells
return score
class AI():
"""
找出经验公式值最大的放置方法并放置方块
"""
def __init__(self, field_width, field_height, A):
self.evaluation = PierreDellacherie(field_width, field_height, A)
self.field_width = field_width
self.field_height = field_height
def getAllPossibleLocation(self, block, layout, field_map):
"""
找出方块在特定方向下所有可行的放置位置
"""
all_possible_position = []
for x in range(self.field_width):
if block.isLegal(layout, (x, -4), field_map) is not State.Middle:
all_possible_position.append(x)
return all_possible_position
def findBottomPosition(self, block, x, layout, field_map):
"""
找出方块最终下落到底部方块的堆顶的位置
"""
y = -4
while block.isLegal(layout, (x, y), field_map) is not State.Bottom:
y += 1
return y - 1
def dropBlock(self, x0, y0, layout, field_map):
"""
模拟将方块放置到目标底部位置上的情况
"""
for (x, y) in layout:
if 0 <= y0 + y < self.field_height:
field_map[y0 + y][x0 + x] = 1
if y0 + y < 0:
return False
return True
def resetMap(self, field_map):
"""
将游戏区域恢复到方块放置前,删除方块模拟放置信息
"""
count = 0
for y in range(self.field_height):
for x in range(self.field_width):
if field_map[y][x] == 1:
field_map[y][x] = 0
count += 1
if count == 4:
return
def getNewMap(self, block, position, direction, field_map):
"""
通过游戏提供的接口将方块移动到目标位置
"""
while block.direction is not direction:
block.rotate(field_map)
while block.position[0] > position[0]:
block.left(field_map)
while block.position[0] < position[0]:
block.right(field_map)
while not block.is_stop:
block.down(field_map)
def ai(self, block, field_map):
best_position = (float('-inf'), (-1, -1), 0)
for direction in range(len(block.layouts)):
for x in self.getAllPossibleLocation(block, block.layouts[direction], field_map):
y = self.findBottomPosition(block, x, block.layouts[direction], field_map)
if self.dropBlock(x, y, block.layouts[direction], field_map):
score = self.evaluation.evaluate((x, y), block.layouts[direction], field_map)
if score > best_position[0]:
best_position = (score, (x, y), direction)
self.resetMap(field_map)
if best_position[0] > float('-inf'):
self.getNewMap(block, best_position[1], best_position[2], field_map)
return True
else:
return False
class AIGame(Game):
def __init__(self):
super(AIGame, self).__init__(10, 20)
def checkEvents(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def start(self, A):
self.initialize()
self.initializePygame()
self.ai = AI(self.field_width, self.field_height, A)
while not self.block_factory.is_failed and self.ai.ai(self.block_factory.cur_block, self.field_map):
self.checkEvents()
self.update()
self.draw()
return self.lines_num
def startWithoutGUI(self, A):
self.initialize()
self.ai = AI(self.field_width, self.field_height, A)
while not self.block_factory.is_failed and self.ai.ai(self.block_factory.cur_block, self.field_map):
self.update()
print("\r" + "Lines: " + str(self.lines_num), end="", flush=True)
return self.lines_num
if __name__ == '__main__':
A = [-4.500158825082766, 3.4181268101392694, -3.2178882868487753, -9.348695305445199, -7.899265427351652, -3.3855972247263626]
game = AIGame()
lines_num = game.start(A)
#lines_num = game.startWithoutGUI(A)
| [
"pygame.quit",
"pygame.event.get"
] | [((9187, 9205), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (9203, 9205), False, 'import pygame\n'), ((9265, 9278), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9276, 9278), False, 'import pygame\n')] |
from torch.utils.data import Dataset
from copy import deepcopy
class Subset(Dataset):
"""Subset of a dataset at specified indices.
Modified from: https://pytorch.org/docs/stable/data.html?highlight=subset#torch.utils.data.Subset
Arguments:
dataset (Dataset): the dataset
indices (sequence): indices in the whole set selected for subset
transform (callable, optional): transformation to apply to the dataset. If None,
the dataset transformation is unchanged. Default: None.
"""
def __init__(self, dataset, indices, transform=None):
self.dataset = deepcopy(dataset)
self.indices = indices
if transform is not None:
self.dataset.transform = transform
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
| [
"copy.deepcopy"
] | [((615, 632), 'copy.deepcopy', 'deepcopy', (['dataset'], {}), '(dataset)\n', (623, 632), False, 'from copy import deepcopy\n')] |
import pandas as pd
import numpy as np
import git
import os
import sys
import bokeh.io
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import bokeh.plotting as bkp
from bokeh.models import Span
import holoviews as hv
from pathlib import Path
# from bokeh.io import export_png
#-- Setup paths
# Get parent directory using git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Change working directory to parent directory
os.chdir(homedir)
# Add 'Dan' directory to the search path for imports
sys.path.append('Dan')
# Import our custom cube managing functions
import cube_formatter as cf
#-- Setup bokeh
bokeh.io.output_notebook()
hv.extension('bokeh')
#-- Control parameters
# Top N counties to plot with the most deaths
# Set to -1 to plot all
plotN = 20
shift = 20
# Data Manipulation flags (should match those used in creating submission file)
isAllocCounties = True # Flag to distribue state deaths amongst counties
isComputeDaily = False # Flag to translate cummulative data to daily counts
#- Plot-type control flags
isStateWide = False # Flag to plot state-wise data (will use nyt_states file for true_df)
# The raw cube won't be affected so make sure it is also state-wise data
# AND cumulative since there is only cumulative nyt_us_states data
isCumul = True # Flag to denote that the plot should be cumulative, not daily deaths
# ** Only affects county-level data since state-wide is implicitly cumulative
# This sets which county-wide nyt file is used and sets the plot y-axis label
# Key days (should match those used in creating the cube)
global_dayzero = pd.to_datetime('2020 Jan 21')
# Day until which model was trained (train_til in epid model)
# Leave as None to not display a boundary
boundary = '2020 May 10'
# Day to use for allocating to counties
# Leave as None to use most recent date
# OR use '2020-04-23' format to allocate based on proportions from that day
alloc_day = '2020-05-10'
# Flag to choose whether to save .svg of figures
is_saveSVG = False
# Filename (including path) for saving .svg files when is_saveSVG=True
# county, state, and fips will be appended to the name to differentiate plots
svg_flm = 'Dan/MidtermFigs/CountyWideDaily2/'
#-- Files to utilize
# Filename for cube of model data
# should be (row=sample, col=day, pane=state) with state FIPS as beef in row1
mat_model = 'Alex\\PracticeOutputs\\fresh.mat'#'Dan\\train_til_today.csv'
# Reference file to treat as "true" death counts
csv_true = 'data\\us\\covid\\nyt_us_counties_daily.csv' # daily county counts (also used for allocating deaths when req.)
csv_ST_true = 'data\\us\\covid\\nyt_us_states.csv' # this is cumulative ONLY; no _daily version exists
csv_CT_cumul_true = 'data\\us\\covid\\nyt_us_counties.csv' # county cumulative counts
# reference file for clustering df
# This assignment as done below assumes that the right file just has _clusters.csv appended.
# You can enter the actual path manually if you'd like
cluster_ref_fln=os.path.splitext(mat_model)[0] + '_clusters.csv'
#-- Read and format true data to have correct columns
# Read correct file for requested setup
if isStateWide:
# Plotting state-wide so use nyt state file (implicitly cumulative)
true_df = pd.read_csv(csv_ST_true)
else:
if isCumul:
# plotting cumulative county-wide so pull this file
true_df = pd.read_csv(csv_CT_cumul_true)
else:
# plotting daily county-wide so pull this file
true_df = pd.read_csv(csv_true)
# The nyt_us_counties.csv file is SUPER FLAWED so we need to fix this:
# - has some empty values in the fips column cousing prob. with .astype(int)
# - Straight up doesn't have fips entry for NYC so need to hardcode its fips
if (not isStateWide) and isCumul:
# Reading in problematic file.
# Replace empty value on NYC with 36061
true_df.loc[true_df.county=='New York City', 'fips'] = 36061
# Remove rows with nans from the df (these are the counties we don't care about)
true_df = true_df[true_df['fips'].notna()]
# Reformat some columns
true_df['fips'] = true_df['fips'].astype(int)
true_df['id'] = true_df['date'] + '-' + true_df['fips'].astype(str)
#-- Read and format model data to county-based
# read raw cube from epid. code
model_cube = cf.read_cube(mat_model)
# format to county-based in same way as format_sub
if isComputeDaily:
model_cube = cf.calc_daily(model_cube)
if isAllocCounties:
model_cube = cf.alloc_fromCluster(model_cube, cluster_ref_fln, alloc_day=alloc_day)
#-- Calculate quantiles for all modeled counties
# Quantiles to consider
perc_list = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# Calculate along each column ignoring the first row of beef
model_quants = np.percentile(model_cube[1:,:,:],perc_list,0)
# model_quants now has 9 rows, one for each of the quantiles requested
# The cols and panes are the same format as model_cube
#-- Order model counties by peak deaths/day predicted AND extract counties for plotting from the cube
# Get maximum deaths/day ever hit by each county
# Use 4th row of model_quants to use the 50th percentile (ie. the central prediction)
peak_daily_deaths = np.max(model_quants[4,:,:],0)
# Get indices of sorted (descending) vector
# NOTE: argsort only works in ascdending order so use [::-1] to reverse
peak_inds = np.argsort(peak_daily_deaths)[::-1]
# Take the largest plotN counties (since these are the only ones requested by the user)
peak_inds = peak_inds[shift:plotN+shift]
# Extract the resulting counties
# results will be implicitly sorted due to use of argsort
model_quants = model_quants[:,:,peak_inds] # Get quantiles
model_fips = model_cube[0,0,peak_inds] # Get fips ID's
#-- Extract the same counties from the true data and add column with datetime date
# Pull desired counties from true_df
true_df = true_df[true_df.fips.isin(model_fips)]
# Add column of dates in datetime format
true_df['dateDT'] = pd.to_datetime(true_df['date'].values)
if isAllocCounties:
#-- Read in cluster-to-fips translation (used for showing which counties were clustered)
# Load cluster data
fips_to_clst = pd.read_csv(cluster_ref_fln)
# Extract useful columns
fips_to_clst = fips_to_clst[['fips', 'cluster']]
# Cast fips and cluster values to int
fips_to_clst['fips'] = fips_to_clst['fips'].astype('int')
fips_to_clst['cluster'] = fips_to_clst['cluster'].astype('int')
# Cast to pandas series
fips_to_clst = pd.Series(fips_to_clst.set_index('fips')['cluster'])
else:
# Define empty list so that "in" check later doesn't cause errors
fips_to_clst = []
#-- Create directory for output .svg files if necessary
if is_saveSVG:
# Append sample filename just to get proper path
tmp_flm = '%sstate_county_fips.svg'%svg_flm
# Create directory if necessary
Path(tmp_flm).parent.mkdir(parents=True, exist_ok=True)
for ind, cnty in enumerate(model_fips):
# Pull just the relevant county
cnty_true_df = true_df[true_df['fips'] == cnty]
cnty_model = model_quants[:,:,ind]
# Ensure true_df is chronolically sorted
cnty_true_df.sort_values(by=['dateDT'],inplace=True)
# Create column with days since global_dayzero (to have same reference point for both datasets)
cnty_true_df['rel_date'] = (cnty_true_df['dateDT'] - global_dayzero)/np.timedelta64(1,'D')
# Create time axes
t_true = cnty_true_df['rel_date'].values
t_model = np.arange(cnty_model.shape[1])
# Format title for state vs. county plots
if isStateWide:
# Don't add county item since it's not pertinent
ptit = 'SEIIRD+Q Model: %s (%d)'%(cnty_true_df['state'].iloc[0], cnty)
else:
# Include county in title
ptit = 'SEIIRD+Q Model: %s, %s (%d)'%(cnty_true_df['county'].iloc[0],cnty_true_df['state'].iloc[0], cnty)
if cnty in fips_to_clst:
# Add cluster ID when the county was clustered
ptit += ' [Cluster %d]'%fips_to_clst[cnty]
# Format y-axis label for cumulative vs. daily plots
if isCumul or isStateWide:
# NOTE: statewide is implicitly cumulative
# Set y-axis label to show cumulative counts
ylab = '# deaths total'
else:
# Set y-axis label to show deaths/day
ylab = '# deaths/day'
# Create figure for the plot
p = bkp.figure( plot_width=600,
plot_height=400,
title = ptit,
x_axis_label = 't (days since %s)'%global_dayzero.date(),
y_axis_label = ylab)
# CONSIDER FLIPPING THE ORDER OF QUANTILES TO SEE IF IT FIXES THE PLOTTING
# Plot uncertainty regions
for i in range(4):
p.varea(x=t_model, y1=cnty_model[i,:], y2=cnty_model[-i-1,:], color='black', fill_alpha=perc_list[i]/100)
# Plot 50th percentile line
p.line(t_model, cnty_model[4,:], color = 'black', line_width = 1)
# Plot true deaths
p.circle(t_true, cnty_true_df['deaths'], color ='black')
# Apply training boundary if desired
if boundary is not None:
bd_day = (pd.to_datetime(boundary)-global_dayzero)/np.timedelta64(1, 'D')
vline = Span(location=bd_day, dimension='height', line_color='black', line_width=2)
p.renderers.extend([vline])
# Show plot
bokeh.io.show(p)
# fn = "Alex/conv/" + ptit.replace('SEIIRD+Q Model:','')
# export_png(p,filename=fn)
# Save output figures if desired
if is_saveSVG:
p.output_backend = "svg"
# Format filename for state vs. county plots
if isStateWide:
suffix = ('%s_%d.svg'%(cnty_true_df['state'].iloc[0],cnty)).replace(' ','')
else:
suffix = ('%s_%s_%d.svg'%(cnty_true_df['state'].iloc[0],cnty_true_df['county'].iloc[0],cnty)).replace(' ','')
bokeh.io.export_svgs(p, filename= svg_flm + suffix)
| [
"holoviews.extension",
"pandas.read_csv",
"numpy.arange",
"pathlib.Path",
"os.path.splitext",
"numpy.max",
"os.chdir",
"cube_formatter.alloc_fromCluster",
"cube_formatter.read_cube",
"cube_formatter.calc_daily",
"numpy.argsort",
"git.Repo",
"numpy.timedelta64",
"numpy.percentile",
"bokeh.models.Span",
"sys.path.append",
"pandas.to_datetime"
] | [((367, 413), 'git.Repo', 'git.Repo', (['"""./"""'], {'search_parent_directories': '(True)'}), "('./', search_parent_directories=True)\n", (375, 413), False, 'import git\n'), ((488, 505), 'os.chdir', 'os.chdir', (['homedir'], {}), '(homedir)\n', (496, 505), False, 'import os\n'), ((559, 581), 'sys.path.append', 'sys.path.append', (['"""Dan"""'], {}), "('Dan')\n", (574, 581), False, 'import sys\n'), ((699, 720), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (711, 720), True, 'import holoviews as hv\n'), ((1803, 1832), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020 Jan 21"""'], {}), "('2020 Jan 21')\n", (1817, 1832), True, 'import pandas as pd\n'), ((4505, 4528), 'cube_formatter.read_cube', 'cf.read_cube', (['mat_model'], {}), '(mat_model)\n', (4517, 4528), True, 'import cube_formatter as cf\n'), ((4950, 4999), 'numpy.percentile', 'np.percentile', (['model_cube[1:, :, :]', 'perc_list', '(0)'], {}), '(model_cube[1:, :, :], perc_list, 0)\n', (4963, 4999), True, 'import numpy as np\n'), ((5395, 5427), 'numpy.max', 'np.max', (['model_quants[4, :, :]', '(0)'], {}), '(model_quants[4, :, :], 0)\n', (5401, 5427), True, 'import numpy as np\n'), ((6178, 6216), 'pandas.to_datetime', 'pd.to_datetime', (["true_df['date'].values"], {}), "(true_df['date'].values)\n", (6192, 6216), True, 'import pandas as pd\n'), ((3467, 3491), 'pandas.read_csv', 'pd.read_csv', (['csv_ST_true'], {}), '(csv_ST_true)\n', (3478, 3491), True, 'import pandas as pd\n'), ((4616, 4641), 'cube_formatter.calc_daily', 'cf.calc_daily', (['model_cube'], {}), '(model_cube)\n', (4629, 4641), True, 'import cube_formatter as cf\n'), ((4679, 4749), 'cube_formatter.alloc_fromCluster', 'cf.alloc_fromCluster', (['model_cube', 'cluster_ref_fln'], {'alloc_day': 'alloc_day'}), '(model_cube, cluster_ref_fln, alloc_day=alloc_day)\n', (4699, 4749), True, 'import cube_formatter as cf\n'), ((5558, 5587), 'numpy.argsort', 'np.argsort', (['peak_daily_deaths'], {}), '(peak_daily_deaths)\n', (5568, 5587), True, 'import numpy as np\n'), ((6374, 6402), 'pandas.read_csv', 'pd.read_csv', (['cluster_ref_fln'], {}), '(cluster_ref_fln)\n', (6385, 6402), True, 'import pandas as pd\n'), ((7683, 7713), 'numpy.arange', 'np.arange', (['cnty_model.shape[1]'], {}), '(cnty_model.shape[1])\n', (7692, 7713), True, 'import numpy as np\n'), ((3219, 3246), 'os.path.splitext', 'os.path.splitext', (['mat_model'], {}), '(mat_model)\n', (3235, 3246), False, 'import os\n'), ((3592, 3622), 'pandas.read_csv', 'pd.read_csv', (['csv_CT_cumul_true'], {}), '(csv_CT_cumul_true)\n', (3603, 3622), True, 'import pandas as pd\n'), ((3706, 3727), 'pandas.read_csv', 'pd.read_csv', (['csv_true'], {}), '(csv_true)\n', (3717, 3727), True, 'import pandas as pd\n'), ((7578, 7600), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (7592, 7600), True, 'import numpy as np\n'), ((9389, 9464), 'bokeh.models.Span', 'Span', ([], {'location': 'bd_day', 'dimension': '"""height"""', 'line_color': '"""black"""', 'line_width': '(2)'}), "(location=bd_day, dimension='height', line_color='black', line_width=2)\n", (9393, 9464), False, 'from bokeh.models import Span\n'), ((9350, 9372), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (9364, 9372), True, 'import numpy as np\n'), ((7068, 7081), 'pathlib.Path', 'Path', (['tmp_flm'], {}), '(tmp_flm)\n', (7072, 7081), False, 'from pathlib import Path\n'), ((9309, 9333), 'pandas.to_datetime', 'pd.to_datetime', (['boundary'], {}), '(boundary)\n', (9323, 9333), True, 'import pandas as pd\n')] |
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
# -*- coding: utf-8 -*-
"""Unit tests for unicode."""
import sys
import pytest
from pydicom import dcmread
class TestUnicodeFilenames:
def test_read(self):
"""Unicode: Can read a file with unicode characters in name..."""
uni_name = 'test°'
# verify first that we could encode file name in this environment
try:
_ = uni_name.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
print("SKIP: Environment doesn't support unicode filenames")
return
try:
dcmread(uni_name)
except UnicodeEncodeError:
self.fail("UnicodeEncodeError generated for unicode name")
# ignore file doesn't exist error
except IOError:
pass
| [
"sys.getfilesystemencoding",
"pydicom.dcmread"
] | [((637, 654), 'pydicom.dcmread', 'dcmread', (['uni_name'], {}), '(uni_name)\n', (644, 654), False, 'from pydicom import dcmread\n'), ((455, 482), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (480, 482), False, 'import sys\n')] |
from typing import List
import numpy as np
from scipy import stats
def calculate_correlation(x: List[float], y: List[float]):
assert len(x) == len(y)
return np.corrcoef(x, y)[0][1], stats.ttest_ind(x, y)[1]
| [
"scipy.stats.ttest_ind",
"numpy.corrcoef"
] | [((193, 214), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['x', 'y'], {}), '(x, y)\n', (208, 214), False, 'from scipy import stats\n'), ((168, 185), 'numpy.corrcoef', 'np.corrcoef', (['x', 'y'], {}), '(x, y)\n', (179, 185), True, 'import numpy as np\n')] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs.
"""
INTERNAL = "Internal"
class AuthenticationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Authentication Type
"""
KEY_BASED = "KeyBased"
class CheckNameAvailabilityReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason why the given name is not available.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class GroupIdProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of private link group ID.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class ManagedServiceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity (where both SystemAssigned and UserAssigned types are
allowed).
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
class Origin(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit
logs UX. Default value is "user,system"
"""
USER = "user"
SYSTEM = "system"
USER_SYSTEM = "user,system"
class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
class PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state.
"""
SUCCEEDED = "Succeeded"
DELETED = "Deleted"
FAILED = "Failed"
CANCELED = "Canceled"
ACCEPTED = "Accepted"
CREATING = "Creating"
class PublicNetworkAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether or not public network access is allowed for the container registry.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
| [
"six.with_metaclass"
] | [((1157, 1208), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1171, 1208), False, 'from six import with_metaclass\n'), ((1374, 1425), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1388, 1425), False, 'from six import with_metaclass\n'), ((1526, 1577), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1540, 1577), False, 'from six import with_metaclass\n'), ((1726, 1777), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1740, 1777), False, 'from six import with_metaclass\n'), ((1983, 2034), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (1997, 2034), False, 'from six import with_metaclass\n'), ((2212, 2263), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (2226, 2263), False, 'from six import with_metaclass\n'), ((2555, 2606), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (2569, 2606), False, 'from six import with_metaclass\n'), ((2884, 2935), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (2898, 2935), False, 'from six import with_metaclass\n'), ((3134, 3185), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (3148, 3185), False, 'from six import with_metaclass\n'), ((3345, 3396), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (3359, 3396), False, 'from six import with_metaclass\n'), ((3614, 3665), 'six.with_metaclass', 'with_metaclass', (['_CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(_CaseInsensitiveEnumMeta, str, Enum)\n', (3628, 3665), False, 'from six import with_metaclass\n')] |
#!/usr/bin/env python3
from gi.repository import Gtk
def font_chooser(fontchooserwidget, font):
print("Font selected: %s" % font)
window = Gtk.Window()
window.connect("destroy", Gtk.main_quit)
fontchooserwidget = Gtk.FontChooserWidget()
fontchooserwidget.connect("font-activated", font_chooser)
window.add(fontchooserwidget)
window.show_all()
Gtk.main()
| [
"gi.repository.Gtk.Window",
"gi.repository.Gtk.FontChooserWidget",
"gi.repository.Gtk.main"
] | [((146, 158), 'gi.repository.Gtk.Window', 'Gtk.Window', ([], {}), '()\n', (156, 158), False, 'from gi.repository import Gtk\n'), ((221, 244), 'gi.repository.Gtk.FontChooserWidget', 'Gtk.FontChooserWidget', ([], {}), '()\n', (242, 244), False, 'from gi.repository import Gtk\n'), ((353, 363), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (361, 363), False, 'from gi.repository import Gtk\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.collections as mc
import copy
action2vect = {0: np.array([0, -1]),
1: np.array([0, +1]),
2: np.array([-1, 0]),
3: np.array([+1, 0])
}
a2m = {0:'up', 1:'down', 2:'left', 3:'right'}
def random_initialize(Maze):
floor_labels = np.arange(len(Maze.floors))
start_floor_label = np.random.choice(floor_labels)
goal_floor_label = np.random.choice(floor_labels)
#Maze.set_start(Maze.floors[start_floor_label].tolist())
Maze.set_goal(Maze.floors[goal_floor_label].tolist())
return Maze
def get_fig_ax(size=(8, 5)):
fig = plt.figure(figsize=size)
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
return fig, ax
class MazeEnv():
def __init__(self, lx, ly, threshold=0.9, figsize=5):
self.lx = lx
self.ly = ly
self.create_maze_by_normal_distribution(threshold=threshold)
self = random_initialize(self)
self.action_space = [0,1,2,3]
self.status = 'Initialized'
self.figsize = figsize
def reset(self, coordinate=[None, None]):
"""
put the state at the start.
"""
if coordinate[0]!=None:
self.state = np.array(coordinate)
else:
#
floor_labels = np.arange(len(self.floors))
start_floor_label = np.random.choice(floor_labels)
self.state = self.floors[start_floor_label]
#
#self.state = np.array(self.start)
self.status = 'Reset'
self.t = 0
return self.get_state()
def is_solved(self):
"""
if the state is at the goal, returns True.
"""
return self.goal==self.state.tolist()
def get_state(self):
"""
returns (x, y) coordinate of the state
"""
return copy.deepcopy(self.state)#, copy.deepcopy(self.state[1])
def step0(self, state, action):
add_vector_np = action2vect[action]
if (state+add_vector_np).tolist() in self.floors.tolist():
next_state = state+add_vector_np
self.status = 'Moved'
else:
next_state = state
self.status = 'Move failed'
self.t += 1
return next_state
def step1(self, state, action, state_p):
if state_p.tolist()==self.goal:
reward = 1
elif False:
reward = 0.1
else:
reward = 0
return reward
def step(self, action):
state = self.get_state()
next_state = self.step0(state, action)
reward = self.step1(state, action, next_state)
# self.state update
self.state = next_state
return self.get_state(), reward, self.is_solved(), {}
def create_maze_by_normal_distribution(self, threshold):
"""
creating a random maze.
Higher threshold creates easier maze.
around threshold=1 is recomended.
"""
x = np.random.randn(self.lx*self.ly).reshape(self.lx, self.ly)
y = (x < threshold)*(x > -threshold)
self.tile = y
self.load_tile()
def load_tile(self):
self.floors = np.array(list(np.where(self.tile==True))).T # (#white tiles, 2), 2 means (x,y) coordinate
self.holes = np.array(list(np.where(self.tile==True))).T # (#black tiles, 2)
def flip(self, coordinate=[None, None]):
self.tile[coordinate[0], coordinate[1]] = not self.tile[coordinate[0], coordinate[1]]
self.load_tile()
def render_tile(self, ax, cmap='gray'):
ax.imshow(self.tile.T, interpolation="none", cmap=cmap)
return ax
def render_arrows(self, ax, values_table):
lx, ly, _ = values_table.shape
vmaxs = np.max(values_table, axis=2).reshape(lx, ly, 1)
vt = np.transpose(values_table*self.tile.reshape(lx, ly, 1)/vmaxs, (1,0,2))
width = 0.5
X, Y= np.meshgrid(np.arange(0, lx, 1), np.arange(0, ly, 1))
ones = .5*np.ones(lx*ly).reshape(lx, ly)
zeros= np.zeros(lx*ly).reshape(lx, ly)
# up
ax.quiver(X, Y, zeros, ones, vt[:,:,0], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# down
ax.quiver(X, Y, zeros, -ones, vt[:,:,1], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# left
ax.quiver(X, Y, -ones, zeros, vt[:,:,2], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# right
ax.quiver(X, Y, ones, zeros, vt[:,:,3], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
return ax
def render(self, fig=None, ax=None, lines=None, values_table=None):
canvas = False
if ax!=None:
pass
canvas = True
ax.clear()
else:
fig = plt.figure(figsize=(self.figsize, self.figsize))
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
####
ax = self.render_tile(ax)
if values_table is not None:
ax = self.render_arrows(ax, values_table)
####
try:
ax.scatter(self.start[0], self.start[1], marker='x', s=100, color='blue',
alpha=0.8, label='start')
except AttributeError:
pass
try:
ax.scatter(self.goal[0], self.goal[1], marker='d', s=100, color='red',
alpha=0.8, label='goal')
except AttributeError:
pass
try:
ax.scatter(self.state[0], self.state[1], marker='o', s=100, color='black',
alpha=0.8, label='agent')
except AttributeError:
pass
if lines is not None:
lc = mc.LineCollection(lines, linewidths=2, color='black', alpha=0.5)
ax.add_collection(lc)
else:
pass
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
scatterpoints=1)
if canvas:
#pass
fig.canvas.draw()
else:
plt.show()
def set_start(self, coordinate=[None, None]):
if coordinate in self.floors.tolist():
self.start = coordinate
else:
print('Set the start on a white tile.')
def set_goal(self, coordinate=[None, None]):
if coordinate in self.floors.tolist():
self.goal = coordinate
else:
print('Set the goal on a white tile.')
def play(self, Agent, show=True):
lines = []
while not self.is_solved():
state0 = self.get_state()
action = Agent.play()
self.step(action)
state1 = self.get_state()
lines.append([state0, state1])
if show:
self.render(lines=lines)
def play_interactive(self, Agent):
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
self.render(fig=fig, ax=ax)
lines = []
while not self.is_solved():
state0 = self.get_state()
action = Agent.play()
self.step(action)
state1 = self.get_state()
lines.append([state0, state1])
self.render(fig=fig, ax=ax, lines=lines)
#fig.canvas.draw()
self.render(fig=fig, ax=ax, lines=lines)
plt.show()
print("solved!")
class CliffEnv(MazeEnv):
def __init__(self, lx, ly, threshold=0.9, figsize=5):
self.lx = lx
self.ly = ly
self.create_cliff()
self.start = [0, ly-1]
self.goal = [lx-1, ly-1]
self.action_space = [0,1,2,3]
self.status = 'Initialized'
self.figsize = figsize
def reset(self, coordinate=[None, None]):
"""
put the state at the start.
"""
if coordinate[0]!=None:
self.state = np.array(coordinate)
else:
self.state = np.array(self.start)
self.status = 'Reset'
self.t = 0
return self.get_state()
def create_cliff(self):
"""
creating a cliff
"""
x = np.ones(self.lx*self.ly).reshape(self.lx, self.ly)
x[:, self.ly-1] -= 1
x[0, self.ly-1] += 1
x[self.lx-1, self.ly-1] += 1
self.tile = x
self.load_tile()
def render_tile(self, ax, cmap='Reds_r'):
ax.imshow(self.tile.T, interpolation="none", cmap=cmap)
return ax
def step0(self, state, action):
add_vector_np = action2vect[action]
if (state+add_vector_np).tolist() in self.floors.tolist():
next_state = state+add_vector_np
self.status = 'Moved'
elif (state+add_vector_np).tolist() in self.holes.tolist():
next_state = self.start
self.status = 'Dropped'
else:
next_state = state
self.status = 'Move failed'
self.t += 1
return next_state
def step1(self, state, action, state_p):
if state_p.tolist()==self.goal:
reward = 1
elif self.status=='Dropped':
reward = -100
else:
reward = 0
return reward | [
"numpy.ones",
"numpy.random.choice",
"numpy.where",
"matplotlib.collections.LineCollection",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"copy.deepcopy",
"numpy.random.randn",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((119, 136), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (127, 136), True, 'import numpy as np\n'), ((156, 173), 'numpy.array', 'np.array', (['[0, +1]'], {}), '([0, +1])\n', (164, 173), True, 'import numpy as np\n'), ((193, 210), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (201, 210), True, 'import numpy as np\n'), ((230, 247), 'numpy.array', 'np.array', (['[+1, 0]'], {}), '([+1, 0])\n', (238, 247), True, 'import numpy as np\n'), ((413, 443), 'numpy.random.choice', 'np.random.choice', (['floor_labels'], {}), '(floor_labels)\n', (429, 443), True, 'import numpy as np\n'), ((467, 497), 'numpy.random.choice', 'np.random.choice', (['floor_labels'], {}), '(floor_labels)\n', (483, 497), True, 'import numpy as np\n'), ((673, 697), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'size'}), '(figsize=size)\n', (683, 697), True, 'import matplotlib.pyplot as plt\n'), ((1949, 1974), 'copy.deepcopy', 'copy.deepcopy', (['self.state'], {}), '(self.state)\n', (1962, 1974), False, 'import copy\n'), ((7133, 7159), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (7143, 7159), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7673), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7671, 7673), True, 'import matplotlib.pyplot as plt\n'), ((1313, 1333), 'numpy.array', 'np.array', (['coordinate'], {}), '(coordinate)\n', (1321, 1333), True, 'import numpy as np\n'), ((1449, 1479), 'numpy.random.choice', 'np.random.choice', (['floor_labels'], {}), '(floor_labels)\n', (1465, 1479), True, 'import numpy as np\n'), ((4072, 4091), 'numpy.arange', 'np.arange', (['(0)', 'lx', '(1)'], {}), '(0, lx, 1)\n', (4081, 4091), True, 'import numpy as np\n'), ((4093, 4112), 'numpy.arange', 'np.arange', (['(0)', 'ly', '(1)'], {}), '(0, ly, 1)\n', (4102, 4112), True, 'import numpy as np\n'), ((5000, 5048), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.figsize, self.figsize)'}), '(figsize=(self.figsize, self.figsize))\n', (5010, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5941, 6005), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['lines'], {'linewidths': '(2)', 'color': '"""black"""', 'alpha': '(0.5)'}), "(lines, linewidths=2, color='black', alpha=0.5)\n", (5958, 6005), True, 'import matplotlib.collections as mc\n'), ((6274, 6284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6282, 6284), True, 'import matplotlib.pyplot as plt\n'), ((8204, 8224), 'numpy.array', 'np.array', (['coordinate'], {}), '(coordinate)\n', (8212, 8224), True, 'import numpy as np\n'), ((8264, 8284), 'numpy.array', 'np.array', (['self.start'], {}), '(self.start)\n', (8272, 8284), True, 'import numpy as np\n'), ((3109, 3143), 'numpy.random.randn', 'np.random.randn', (['(self.lx * self.ly)'], {}), '(self.lx * self.ly)\n', (3124, 3143), True, 'import numpy as np\n'), ((3894, 3922), 'numpy.max', 'np.max', (['values_table'], {'axis': '(2)'}), '(values_table, axis=2)\n', (3900, 3922), True, 'import numpy as np\n'), ((4178, 4195), 'numpy.zeros', 'np.zeros', (['(lx * ly)'], {}), '(lx * ly)\n', (4186, 4195), True, 'import numpy as np\n'), ((8460, 8486), 'numpy.ones', 'np.ones', (['(self.lx * self.ly)'], {}), '(self.lx * self.ly)\n', (8467, 8486), True, 'import numpy as np\n'), ((3330, 3357), 'numpy.where', 'np.where', (['(self.tile == True)'], {}), '(self.tile == True)\n', (3338, 3357), True, 'import numpy as np\n'), ((3441, 3468), 'numpy.where', 'np.where', (['(self.tile == True)'], {}), '(self.tile == True)\n', (3449, 3468), True, 'import numpy as np\n'), ((4132, 4148), 'numpy.ones', 'np.ones', (['(lx * ly)'], {}), '(lx * ly)\n', (4139, 4148), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json, os
from time import sleep
import sys
import re
import config
import urllib
token_api = config.token
url = 'https://api.telegram.org/bot'+token_api+'/'
class JsonSerializable:
def to_json(self):
raise NotImplementedError
def getUpdates(offset=None, limit=None, timeout=None):
Params = {
'offset': offset,
'limit': limit,
'timeout': timeout
}
return json.loads(requests.get(url + 'getUpdates', params=Params).content.decode('utf8'))
def download(url=None,name=None):
urllib.urlretrieve(url,name)
def send_msg(chat_id, text, parse_mode=None, disable_web=None, reply_to_message_id=None, reply_markup=None):
param = {
'chat_id': chat_id,
'text': text,
'parse_mode':'HTML'
}
if disable_web:
param['disable_web_page_preview'] = disable_web
if reply_to_message_id:
param['reply_to_message_id'] = reply_to_message_id
if reply_markup:
param['reply_markup'] = reply_markup
return requests.get(url + 'sendMessage', params=param)
def edit_msg(chat_id,message_id,text,parse_mode):
param = {
'chat_id':chat_id,
'message_id':message_id,
'text':text,
'parse_mode':parse_mode,
}
return requests.post(url + 'editMessageText', params=param)
def send_photo(chat_id, photo, caption=None, reply_markup=None):
param = {
'chat_id':chat_id,
}
if caption:
param['caption'] = caption
if reply_markup:
param['reply_markup'] = reply_markup
file = {'photo':photo}
return requests.post(url + 'sendPhoto', params=param, files=file) # POST photo file no file_id
def send_photo_file_id(chat_id, photo, caption=None, reply_markup=None):
param = {
'chat_id':chat_id,
}
if caption:
param['caption'] = caption
if reply_markup:
param['reply_markup'] = reply_markup
if photo:
param['photo'] = photo
return requests.post(url + 'sendPhoto', params=param) ## POST photo file_id no file
def send_action(chat_id, action):
param = {
'chat_id':chat_id,
'action':action
}
return requests.post(url + 'sendchataction', params=param)
def answerCallbackQuery(callback_query_id,text,show_alert=None):
param = {
'callback_query_id':callback_query_id,
'text':text,
'show_alert':show_alert
}
return requests.post(url + 'answerCallbackQuery', params=param)
def getUserProfilePhotos(user_id):
param = {
'user_id':user_id
}
return json.loads(requests.post(url + 'getUserProfilePhotos', params=param).content.decode('utf8'))
def answerInlineQuery(inline_query_id,results,cache_time):
param = {
'inline_query_id':inline_query_id,
'results':results,
}
if cache_time:
param['cache_time'] = cache_time
return requests.post(url + 'answerInlineQuery', params=param)
| [
"urllib.urlretrieve",
"requests.post",
"requests.get"
] | [((590, 619), 'urllib.urlretrieve', 'urllib.urlretrieve', (['url', 'name'], {}), '(url, name)\n', (608, 619), False, 'import urllib\n'), ((1067, 1114), 'requests.get', 'requests.get', (["(url + 'sendMessage')"], {'params': 'param'}), "(url + 'sendMessage', params=param)\n", (1079, 1114), False, 'import requests\n'), ((1295, 1347), 'requests.post', 'requests.post', (["(url + 'editMessageText')"], {'params': 'param'}), "(url + 'editMessageText', params=param)\n", (1308, 1347), False, 'import requests\n'), ((1612, 1670), 'requests.post', 'requests.post', (["(url + 'sendPhoto')"], {'params': 'param', 'files': 'file'}), "(url + 'sendPhoto', params=param, files=file)\n", (1625, 1670), False, 'import requests\n'), ((1990, 2036), 'requests.post', 'requests.post', (["(url + 'sendPhoto')"], {'params': 'param'}), "(url + 'sendPhoto', params=param)\n", (2003, 2036), False, 'import requests\n'), ((2176, 2227), 'requests.post', 'requests.post', (["(url + 'sendchataction')"], {'params': 'param'}), "(url + 'sendchataction', params=param)\n", (2189, 2227), False, 'import requests\n'), ((2412, 2468), 'requests.post', 'requests.post', (["(url + 'answerCallbackQuery')"], {'params': 'param'}), "(url + 'answerCallbackQuery', params=param)\n", (2425, 2468), False, 'import requests\n'), ((2864, 2918), 'requests.post', 'requests.post', (["(url + 'answerInlineQuery')"], {'params': 'param'}), "(url + 'answerInlineQuery', params=param)\n", (2877, 2918), False, 'import requests\n'), ((479, 526), 'requests.get', 'requests.get', (["(url + 'getUpdates')"], {'params': 'Params'}), "(url + 'getUpdates', params=Params)\n", (491, 526), False, 'import requests\n'), ((2569, 2626), 'requests.post', 'requests.post', (["(url + 'getUserProfilePhotos')"], {'params': 'param'}), "(url + 'getUserProfilePhotos', params=param)\n", (2582, 2626), False, 'import requests\n')] |
import json
import os
from datetime import datetime
from typing import Tuple
import numpy as np
from keras.datasets import mnist
from numpy import ndarray
from scipy.misc import imresize
def get_dataset_info_from_run(run_filepath: str) -> Tuple[dict, dict]:
with open(run_filepath + 'run_config.json', 'r') as f:
dict = json.load(f)
return dict['resolution'], dict['channels']
def load_emoji_dataset(dataset_folder: str, resolution: int, shuffle: bool = True) -> Tuple[ndarray, ndarray, list, list]:
dataset: ndarray = np.load(dataset_folder + 'emojis_' + str(resolution) + '.npy')
classes: ndarray = np.load(dataset_folder + 'emojis_classes.npy')
with open(dataset_folder + 'categories_names.json', 'r') as f:
categories = json.load(f)
with open(dataset_folder + 'companies_names.json', 'r') as f:
companies = json.load(f)
alphas = dataset[:, :, :, -1:]
dataset = dataset[:, :, :, :-1] * alphas + np.ones(dataset.shape)[:, :, :, :-1] * (1 - alphas)
if shuffle:
perm = np.random.permutation(dataset.shape[0])
dataset = dataset[perm]
classes = classes[perm]
return dataset, classes, companies, categories
def load_mnist(resolution: int, shuffle: bool = True) -> ndarray:
(x_train, _), _ = mnist.load_data()
new_images = []
for i in range(x_train.shape[0]):
new_images.append(imresize(x_train[i], (resolution, resolution)))
dataset = np.expand_dims(np.array(new_images) / 255.0, -1)
if shuffle:
perm = np.random.permutation(dataset.shape[0])
dataset = dataset[perm]
return dataset
def generate_run_dir(path: str, model_type):
root_path = path + model_type + '/'
if not os.path.exists(root_path):
os.makedirs(root_path)
current_datetime = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
run_dir = root_path + current_datetime + '/'
outputs_dir = run_dir + 'outputs/'
model_dir = run_dir + 'models/'
generated_datasets_dir = run_dir + 'generated_datasets/'
os.mkdir(run_dir)
os.mkdir(outputs_dir)
os.mkdir(model_dir)
os.mkdir(generated_datasets_dir)
return run_dir, outputs_dir, model_dir, generated_datasets_dir
| [
"os.path.exists",
"numpy.ones",
"keras.datasets.mnist.load_data",
"os.makedirs",
"numpy.array",
"datetime.datetime.now",
"os.mkdir",
"scipy.misc.imresize",
"json.load",
"numpy.load",
"numpy.random.permutation"
] | [((635, 681), 'numpy.load', 'np.load', (["(dataset_folder + 'emojis_classes.npy')"], {}), "(dataset_folder + 'emojis_classes.npy')\n", (642, 681), True, 'import numpy as np\n'), ((1297, 1314), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1312, 1314), False, 'from keras.datasets import mnist\n'), ((2051, 2068), 'os.mkdir', 'os.mkdir', (['run_dir'], {}), '(run_dir)\n', (2059, 2068), False, 'import os\n'), ((2073, 2094), 'os.mkdir', 'os.mkdir', (['outputs_dir'], {}), '(outputs_dir)\n', (2081, 2094), False, 'import os\n'), ((2099, 2118), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (2107, 2118), False, 'import os\n'), ((2123, 2155), 'os.mkdir', 'os.mkdir', (['generated_datasets_dir'], {}), '(generated_datasets_dir)\n', (2131, 2155), False, 'import os\n'), ((335, 347), 'json.load', 'json.load', (['f'], {}), '(f)\n', (344, 347), False, 'import json\n'), ((771, 783), 'json.load', 'json.load', (['f'], {}), '(f)\n', (780, 783), False, 'import json\n'), ((871, 883), 'json.load', 'json.load', (['f'], {}), '(f)\n', (880, 883), False, 'import json\n'), ((1051, 1090), 'numpy.random.permutation', 'np.random.permutation', (['dataset.shape[0]'], {}), '(dataset.shape[0])\n', (1072, 1090), True, 'import numpy as np\n'), ((1544, 1583), 'numpy.random.permutation', 'np.random.permutation', (['dataset.shape[0]'], {}), '(dataset.shape[0])\n', (1565, 1583), True, 'import numpy as np\n'), ((1733, 1758), 'os.path.exists', 'os.path.exists', (['root_path'], {}), '(root_path)\n', (1747, 1758), False, 'import os\n'), ((1768, 1790), 'os.makedirs', 'os.makedirs', (['root_path'], {}), '(root_path)\n', (1779, 1790), False, 'import os\n'), ((1400, 1446), 'scipy.misc.imresize', 'imresize', (['x_train[i]', '(resolution, resolution)'], {}), '(x_train[i], (resolution, resolution))\n', (1408, 1446), False, 'from scipy.misc import imresize\n'), ((1478, 1498), 'numpy.array', 'np.array', (['new_images'], {}), '(new_images)\n', (1486, 1498), True, 'import numpy as np\n'), ((1815, 1829), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1827, 1829), False, 'from datetime import datetime\n'), ((967, 989), 'numpy.ones', 'np.ones', (['dataset.shape'], {}), '(dataset.shape)\n', (974, 989), True, 'import numpy as np\n')] |
import sys
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
# Tkinter is for python 2; tkinter is for python 3
if sys.version_info[0] < 3:
import Tkinter as tk
import tkMessageBox, tkFileDialog
else:
import tkinter as tk
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
class MainApp(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.parent.title('App')
# call the widgets
self.okButton()
self.quitButton()
self.readDataButton()
self.clearDataButton()
self.velScale()
self.canvas()
# print messages on the screen
def printMessage(self):
if (self.data):
print("Data is loaded and accessible from here (printMessage()).")
else:
print('No data loaded...')
### OK button
def okButton(self):
self.okButton = tk.Button(self, text='Test', command=self.printMessage)
self.okButton.grid(column=1, row=1, sticky="nesw")
### Quit button
def quitButton(self):
self.quitButton = tk.Button(self, text='Quit', command=self.confirmQuit)
self.quitButton.grid(column=1, row=2, sticky="nesw")
# confirm quitting
def confirmQuit(self):
answer = tkMessageBox.askyesno(title="App", message="Do you really want to quit?")
if (answer):
self.quit()
# Read data button
def readDataButton(self):
self.data = None
self.readDataButton = tk.Button(self, text='Import data', command=self.readData)
self.readDataButton.grid(column=1, row=3, sticky="nesw")
# reading data
def readData(self):
import os
fullPath = dataList = tkFileDialog.askopenfilename(initialdir='path/to/initialdir')
dataDir = os.path.split(fullPath)[0]+'/'
self.data = readData(fullPath)
# Clear data from current session
def clearDataButton(self):
self.clearData = tk.Button(self, text='Clear data', command=self.confirmClearData)
self.clearData.grid(column=1, row=4, sticky="nesw")
# confirm clearing data
def confirmClearData(self):
answer = tkMessageBox.askyesno(title="App", message="Are you sure you want to clear the loaded data?")
if (answer):
self.data = None
tkMessageBox.showwarning(title="App", message="Data has been deleted.")
# Velocity scale
def velScale(self):
self.velVar = tk.StringVar()
velLabel = tk.Label(self, text="Scale value:", textvariable=self.velVar)
velLabel.grid(row=4, column=0, columnspan=2, sticky=tk.W+tk.E)
velScale = tk.Scale(self, from_=-500, to=+500, orient=tk.HORIZONTAL, resolution=20,
sliderlength=20, showvalue=0,
length=200, width=20,
command=self.onVelScale)
velScale.grid(column=1, row=5, sticky="nesw")
# update velLabel
def onVelScale(self, val):
self.velVar.set("Scale value: {:+0.0f}".format(float(val)))
# Canvas
def canvas(self):
self.f = Figure(figsize=(4,2))
self.a = self.f.add_subplot(111)
self.a.plot([1,2,3,4,5,6,7,8],[5,6,1,3,8,9,3,5])
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.get_tk_widget().grid(column=2, row=1, rowspan=5, sticky="nesw")
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.parent)
if __name__ == "__main__":
root = tk.Tk()
root.geometry("800x600+10+10")
root.resizable(0, 0)
MainApp(root).pack(side=tk.TOP)
root.mainloop()
| [
"tkinter.messagebox.showwarning",
"tkinter.Frame.__init__",
"tkinter.messagebox.askyesno",
"matplotlib.use",
"matplotlib.figure.Figure",
"tkinter.Button",
"os.path.split",
"tkinter.Scale",
"tkinter.StringVar",
"matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg",
"tkinter.Tk",
"tkinter.Label",
"tkinter.filedialog.askopenfilename",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((61, 84), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (75, 84), False, 'import matplotlib\n'), ((3692, 3699), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3697, 3699), True, 'import tkinter as tk\n'), ((554, 585), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (571, 585), True, 'import tkinter as tk\n'), ((1119, 1174), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Test"""', 'command': 'self.printMessage'}), "(self, text='Test', command=self.printMessage)\n", (1128, 1174), True, 'import tkinter as tk\n'), ((1307, 1361), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Quit"""', 'command': 'self.confirmQuit'}), "(self, text='Quit', command=self.confirmQuit)\n", (1316, 1361), True, 'import tkinter as tk\n'), ((1490, 1563), 'tkinter.messagebox.askyesno', 'tkMessageBox.askyesno', ([], {'title': '"""App"""', 'message': '"""Do you really want to quit?"""'}), "(title='App', message='Do you really want to quit?')\n", (1511, 1563), True, 'from tkinter import messagebox as tkMessageBox\n'), ((1718, 1776), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Import data"""', 'command': 'self.readData'}), "(self, text='Import data', command=self.readData)\n", (1727, 1776), True, 'import tkinter as tk\n'), ((1933, 1994), 'tkinter.filedialog.askopenfilename', 'tkFileDialog.askopenfilename', ([], {'initialdir': '"""path/to/initialdir"""'}), "(initialdir='path/to/initialdir')\n", (1961, 1994), True, 'from tkinter import filedialog as tkFileDialog\n'), ((2178, 2243), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Clear data"""', 'command': 'self.confirmClearData'}), "(self, text='Clear data', command=self.confirmClearData)\n", (2187, 2243), True, 'import tkinter as tk\n'), ((2381, 2479), 'tkinter.messagebox.askyesno', 'tkMessageBox.askyesno', ([], {'title': '"""App"""', 'message': '"""Are you sure you want to clear the loaded data?"""'}), "(title='App', message=\n 'Are you sure you want to clear the loaded data?')\n", (2402, 2479), True, 'from tkinter import messagebox as tkMessageBox\n'), ((2677, 2691), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (2689, 2691), True, 'import tkinter as tk\n'), ((2711, 2772), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Scale value:"""', 'textvariable': 'self.velVar'}), "(self, text='Scale value:', textvariable=self.velVar)\n", (2719, 2772), True, 'import tkinter as tk\n'), ((2863, 3021), 'tkinter.Scale', 'tk.Scale', (['self'], {'from_': '(-500)', 'to': '(+500)', 'orient': 'tk.HORIZONTAL', 'resolution': '(20)', 'sliderlength': '(20)', 'showvalue': '(0)', 'length': '(200)', 'width': '(20)', 'command': 'self.onVelScale'}), '(self, from_=-500, to=+500, orient=tk.HORIZONTAL, resolution=20,\n sliderlength=20, showvalue=0, length=200, width=20, command=self.onVelScale\n )\n', (2871, 3021), True, 'import tkinter as tk\n'), ((3313, 3335), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(4, 2)'}), '(figsize=(4, 2))\n', (3319, 3335), False, 'from matplotlib.figure import Figure\n'), ((3456, 3494), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.f'], {'master': 'self'}), '(self.f, master=self)\n', (3473, 3494), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n'), ((3603, 3652), 'matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg', 'NavigationToolbar2TkAgg', (['self.canvas', 'self.parent'], {}), '(self.canvas, self.parent)\n', (3626, 3652), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n'), ((2537, 2608), 'tkinter.messagebox.showwarning', 'tkMessageBox.showwarning', ([], {'title': '"""App"""', 'message': '"""Data has been deleted."""'}), "(title='App', message='Data has been deleted.')\n", (2561, 2608), True, 'from tkinter import messagebox as tkMessageBox\n'), ((2013, 2036), 'os.path.split', 'os.path.split', (['fullPath'], {}), '(fullPath)\n', (2026, 2036), False, 'import os\n')] |
'''
Created on 2018/4/19
Preventing many time-consuming operations to be done in the same loop
:author: hubo
'''
from vlcp.event.event import withIndices, Event
@withIndices("limiter", "index")
class RateLimitingEvent(Event):
pass
class RateLimiter(object):
"""
Limit operations executed in current loop, ensure sockets are
still processed in time-consuming operations
"""
def __init__(self, limit, container):
"""
:param limit: "resources" limited in a single loop. "resources"
can be any countable things like operations executed
or bytes sent
:param container: a `RoutineContainer`
"""
self._container = container
self._limit = limit
if self._limit <= 0:
raise ValueError("Limit must be greater than 0")
self._counter = 0
self._task = None
self._bottom_line = limit
async def _limiter_task(self):
current_index = 0
while True:
await self._container.do_events()
current_index += 1
if current_index * self._limit >= self._counter:
# Last event covers all (NOTICE: self._counter - 1 is the last limited)
break
else:
# This will release from current_index * limit to (current_index + 1) * limit - 1
self._container.scheduler.emergesend(RateLimitingEvent(self, current_index))
self._bottom_line += self._limit
# Reset counter
self._counter = 0
self._task = None
self._bottom_line = self._limit
async def limit(self, use = 1):
"""
Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited
"""
c = self._counter
self._counter = c + use
if self._task is None:
self._task = self._container.subroutine(self._limiter_task(), False)
if c >= self._bottom_line:
# Limited
await RateLimitingEvent.createMatcher(self, c // self._limit)
return True
else:
return False
| [
"vlcp.event.event.withIndices"
] | [((167, 198), 'vlcp.event.event.withIndices', 'withIndices', (['"""limiter"""', '"""index"""'], {}), "('limiter', 'index')\n", (178, 198), False, 'from vlcp.event.event import withIndices, Event\n')] |
import os
import random
import string
import base64
from typing import Tuple
def random_string(length: int) -> str:
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for i in range(length))
def get_path(relative_path: str) -> str:
path = os.path.join(os.getcwd(), relative_path)
return os.path.abspath(path)
def auth_decode(encoded: str) -> Tuple[str, str]:
split = encoded.strip().split(' ')
if len(split) == 1:
try:
username, password = base64.b64decode(split[0]).decode().split(':', 1)
except:
raise Exception('DecodeError')
elif len(split) == 2:
if split[0].strip().lower() == 'basic':
try:
username, password = base64.b64decode(split[1]).decode().split(':', 1)
except:
raise Exception('DecodeError')
else:
raise Exception('DecodeError')
return username, password
| [
"os.path.abspath",
"random.choice",
"base64.b64decode",
"os.getcwd"
] | [((348, 369), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (363, 369), False, 'import os\n'), ((309, 320), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (318, 320), False, 'import os\n'), ((192, 217), 'random.choice', 'random.choice', (['characters'], {}), '(characters)\n', (205, 217), False, 'import random\n'), ((531, 557), 'base64.b64decode', 'base64.b64decode', (['split[0]'], {}), '(split[0])\n', (547, 557), False, 'import base64\n'), ((769, 795), 'base64.b64decode', 'base64.b64decode', (['split[1]'], {}), '(split[1])\n', (785, 795), False, 'import base64\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime
from pytz import timezone
from chat.models import Channel, Post
from score.models import Score
def is_posted_today(string, date, channel=None, excluded_players=[]):
if channel is None:
# if no channel is specified we assume it's the general channel
channel = Channel.objects.get(id=1)
# TODO: check if this is really UTC we want here
paris_tz = timezone('UTC')
post_day = paris_tz.localize(datetime(date.year,
date.month,
date.day))
matching_posts_so_far = Post.objects.filter(channel=channel,
type='m',
date__gte=post_day,
date__lte=date,
content=string)
for player in excluded_players:
matching_posts_so_far = matching_posts_so_far.exclude(author=player)
if not matching_posts_so_far.exists():
return None
matching_posts_so_far.order_by('date')
return matching_posts_so_far[0]
def is_preums(post):
if is_posted_today('preums', post.date) == post:
score = Score(user=post.author, game='p', value=500000)
score.save()
def is_deuz(post):
preums = is_posted_today('preums', post.date)
if preums is not None:
deuz = is_posted_today('deuz',
post.date,
excluded_players=[preums.author])
if deuz == post:
if preums.author != post.author:
score = Score(user=post.author, game='d', value=300000)
score.save()
def is_troiz(post):
preums = is_posted_today('preums', post.date)
if preums is not None:
deuz = is_posted_today('deuz',
post.date,
excluded_players=[preums.author])
if deuz is not None:
troiz = is_posted_today('troiz',
post.date,
excluded_players=[preums.author,
deuz.author])
if troiz == post:
if post.author not in [preums.author, deuz.author]:
score = Score(user=post.author, game='t', value=100000)
score.save()
def is_dernz(post):
general_channel = Channel.objects.get(id=1)
paris_tz = timezone('UTC')
post_day = paris_tz.localize(datetime(post.date.year,
post.date.month,
post.date.day))
latests_dernz = Post.objects.filter(channel=general_channel,
type='m',
date__gte=post_day,
date__lte=post.date,
content='dernz')
user_latests_dernz = latests_dernz.filter(author=post.author)
# if there is exactly one it's the first one
if user_latests_dernz.count() == 1:
latests_dernz = latests_dernz.exclude(author=post.author)
if latests_dernz.exists():
latest_dernz = latests_dernz.order_by('-date')[0]
score = Score(user=latest_dernz.author, game='n', value=-300000)
score.save()
score = Score(user=post.author, game='n', value=300000)
score.save()
| [
"datetime.datetime",
"pytz.timezone",
"score.models.Score",
"chat.models.Post.objects.filter",
"chat.models.Channel.objects.get"
] | [((432, 447), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (440, 447), False, 'from pytz import timezone\n'), ((637, 739), 'chat.models.Post.objects.filter', 'Post.objects.filter', ([], {'channel': 'channel', 'type': '"""m"""', 'date__gte': 'post_day', 'date__lte': 'date', 'content': 'string'}), "(channel=channel, type='m', date__gte=post_day,\n date__lte=date, content=string)\n", (656, 739), False, 'from chat.models import Channel, Post\n'), ((2516, 2541), 'chat.models.Channel.objects.get', 'Channel.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (2535, 2541), False, 'from chat.models import Channel, Post\n'), ((2558, 2573), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (2566, 2573), False, 'from pytz import timezone\n'), ((2770, 2886), 'chat.models.Post.objects.filter', 'Post.objects.filter', ([], {'channel': 'general_channel', 'type': '"""m"""', 'date__gte': 'post_day', 'date__lte': 'post.date', 'content': '"""dernz"""'}), "(channel=general_channel, type='m', date__gte=post_day,\n date__lte=post.date, content='dernz')\n", (2789, 2886), False, 'from chat.models import Channel, Post\n'), ((337, 362), 'chat.models.Channel.objects.get', 'Channel.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (356, 362), False, 'from chat.models import Channel, Post\n'), ((481, 522), 'datetime.datetime', 'datetime', (['date.year', 'date.month', 'date.day'], {}), '(date.year, date.month, date.day)\n', (489, 522), False, 'from datetime import datetime\n'), ((1278, 1325), 'score.models.Score', 'Score', ([], {'user': 'post.author', 'game': '"""p"""', 'value': '(500000)'}), "(user=post.author, game='p', value=500000)\n", (1283, 1325), False, 'from score.models import Score\n'), ((2607, 2663), 'datetime.datetime', 'datetime', (['post.date.year', 'post.date.month', 'post.date.day'], {}), '(post.date.year, post.date.month, post.date.day)\n', (2615, 2663), False, 'from datetime import datetime\n'), ((3484, 3531), 'score.models.Score', 'Score', ([], {'user': 'post.author', 'game': '"""n"""', 'value': '(300000)'}), "(user=post.author, game='n', value=300000)\n", (3489, 3531), False, 'from score.models import Score\n'), ((3385, 3441), 'score.models.Score', 'Score', ([], {'user': 'latest_dernz.author', 'game': '"""n"""', 'value': '(-300000)'}), "(user=latest_dernz.author, game='n', value=-300000)\n", (3390, 3441), False, 'from score.models import Score\n'), ((1685, 1732), 'score.models.Score', 'Score', ([], {'user': 'post.author', 'game': '"""d"""', 'value': '(300000)'}), "(user=post.author, game='d', value=300000)\n", (1690, 1732), False, 'from score.models import Score\n'), ((2391, 2438), 'score.models.Score', 'Score', ([], {'user': 'post.author', 'game': '"""t"""', 'value': '(100000)'}), "(user=post.author, game='t', value=100000)\n", (2396, 2438), False, 'from score.models import Score\n')] |
import logging
import typing
import pydantic
import boto3
from . import base
class CostCategoryRuleDefinition(pydantic.BaseModel):
"""
For type == 'Dimensions':
Key: typing.Literal["LINKED_ACCOUNT", "INSTANCE_TYPE", "REGION", "SERVICE"]="LINKED_ACCOUNT"
"""
Key: str="LINKED_ACCOUNT"
Values: typing.List[str]
MatchOptions: typing.List[typing.Literal["EQUALS", "ABSENT", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "CASE_SENSITIVE", "CASE_INSENSITIVE"]]=["EQUALS"]
class CostCategoryRule(pydantic.BaseModel):
Value: str
Rule: typing.Dict[typing.Literal["Dimensions", "Tags", "CostCategories"], CostCategoryRuleDefinition]
Type: typing.Literal["REGULAR", "INHERITED_VALUE"]=None
InheritedValue: dict=None
class CostCategoryRules(pydantic.BaseModel):
__root__: typing.List[CostCategoryRule]=[]
def append(self, value) -> None:
self.__root__.append(value)
super().__init__(__root__=self.__root__)
def __getitem__(self, item: int) -> int:
return self.__root__[item]
def __setitem__(self, item: int, value) -> None:
self.__root__[item] = value
super().__init__(__root__=self.__root__)
def names(self) -> list:
return list(r.Value for r in self.__root__)
def get(self, name) -> CostCategoryRule:
return next(filter(lambda r: r.Value == name, self.__root__), None)
def find(self, value) -> CostCategoryRule:
for rule in self.__root__:
rk = list(rule.Rule.keys())[0]
if value in rule.Rule[rk].Values:
return rule
return None
class CostCategory(pydantic.BaseModel):
class Config:
extra = 'allow'
validate_assignment = True
Name: str
CostCategoryArn: str=None
EffectiveStart: str=None
RuleVersion: str='CostCategoryExpression.v1'
Rules: CostCategoryRules=CostCategoryRules()
_cli: boto3.Session.client=boto3.client('ce')
def __init__(self, _cli: boto3.Session.client=None, role_arn: str=None, **data) -> None:
if role_arn:
_cli = base.AWSClient.client('ce', role_arn=role_arn)
super().__init__(**data)
if _cli:
self._cli = _cli
def dict(self, **attr: dict) -> dict:
attr['exclude_none'] = True
attr['exclude'] = {"_cli"}
return super().dict(**attr)
def get_arn(self):
refs = self._cli.list_cost_category_definitions()['CostCategoryReferences']
cat = next(filter(lambda r: r['Name'] == self.Name, refs), None)
return cat['CostCategoryArn'] if cat else None
def sync(self):
# If no CostCategoryArn, first check if we don't have one with the same Name
if not self.CostCategoryArn:
self.CostCategoryArn = self.get_arn()
if self.CostCategoryArn:
attr = self.dict(include={'CostCategoryArn', 'RuleVersion', 'Rules'})
return self._cli.update_cost_category_definition(**attr)
attr = self.dict(include={'Name', 'RuleVersion', 'Rules'})
return self._cli.create_cost_category_definition(**attr)
def describe(self):
definition = self._cli.describe_cost_category_definition(CostCategoryArn=self.CostCategoryArn)['CostCategory']
super().__init__(_cli=self._cli, **definition)
class CostCategories(pydantic.BaseModel):
class Config:
extra = 'allow'
__root__: typing.List[CostCategory]=[]
_cli: boto3.Session.client=boto3.client('ce')
def __init__(self, _cli: boto3.Session.client=None, role_arn: str=None, **data) -> None:
if role_arn:
_cli = base.AWSClient.client('ce', role_arn=role_arn)
# Ensure cli is passed on to CostCategory
if '__root__' in data and _cli:
for c in data['__root__']:
c['_cli'] = _cli
super().__init__(**data)
if _cli:
self._cli = _cli
def list(self) -> None:
definitions = self._cli.list_cost_category_definitions()['CostCategoryReferences']
self.__init__(_cli=self._cli, __root__=definitions)
def get(self, name) -> CostCategory:
return next(filter(lambda r: r.Name == name, self.__root__), None)
def diff_CostCategoryRules(cc1: CostCategoryRules, cc2: CostCategoryRules):
diffs = dict(
added = list(set(cc1.names()) - set(cc2.names())),
removed = list(set(cc2.names()) - set(cc1.names())),
changed = {}
)
for rule in list(set(cc1.names()) & set(cc2.names())):
r1k = list(cc1.get(rule).Rule.keys())[0]
r2k = list(cc2.get(rule).Rule.keys())[0]
r1 = cc1.get(rule).Rule[r1k]
r2 = cc2.get(rule).Rule[r2k]
if r1k != r2k or set(r1.Values) != set(r2.Values):
diffs['changed'][rule] = dict(
added = list(set(r1.Values) - set(r2.Values)),
removed = list(set(r2.Values) - set(r1.Values))
)
if r1k != r2k:
diffs['changed'][rule]['type'] = f"{r2k} -> {r1k}"
return diffs
| [
"boto3.client"
] | [((1926, 1944), 'boto3.client', 'boto3.client', (['"""ce"""'], {}), "('ce')\n", (1938, 1944), False, 'import boto3\n'), ((3459, 3477), 'boto3.client', 'boto3.client', (['"""ce"""'], {}), "('ce')\n", (3471, 3477), False, 'import boto3\n')] |
from flask import redirect, g, flash, request, Response
from flask_appbuilder.security.views import UserDBModelView,AuthDBView
from superset.security import SupersetSecurityManager
from flask_appbuilder.security.views import expose
from flask_appbuilder.security.manager import BaseSecurityManager
from flask_login import login_user, logout_user
import jwt
class CustomAuthDBView(AuthDBView):
login_template = 'appbuilder/general/security/login_db.html'
@expose('/iframe/', methods=['GET', 'POST'])
def iframe(self):
from superset import app
jwt_secret = app.config['IFRAME_JWT_SECRET']
token = request.args.get('token')
if not token:
return Response(response='{"msg":"Invalid token"}', status=403, mimetype="application/json")
try:
jwt_payload = jwt.decode(token, jwt_secret, algorithms=['HS256'])
except jwt.exceptions.ExpiredSignatureError as err:
return Response(response='{"msg":"Expired token"}', status=403, mimetype="application/json")
username = jwt_payload.get("username")
redirect_url = jwt_payload.get('redirect_url')
if not redirect_url:
return Response(response='{"msg":"Invalid token"}', status=403, mimetype="application/json")
if username is not None:
user = self.appbuilder.sm.find_user(username=username)
if not user:
return Response(response='{"msg":"Invalid token"}', status=403, mimetype="application/json")
login_user(user, remember=False)
return redirect(redirect_url)
elif g.user is not None and g.user.is_authenticated:
return redirect(redirect_url)
else:
#flash('Unable to auto login', 'warning')
return Response(response='{"msg":"Invalid token"}', status=403, mimetype="application/json")
class CustomSecurityManager(SupersetSecurityManager):
authdbview = CustomAuthDBView
def __init__(self, appbuilder):
super(CustomSecurityManager, self).__init__(appbuilder) | [
"jwt.decode",
"flask.request.args.get",
"flask_login.login_user",
"flask_appbuilder.security.views.expose",
"flask.redirect",
"flask.Response"
] | [((465, 508), 'flask_appbuilder.security.views.expose', 'expose', (['"""/iframe/"""'], {'methods': "['GET', 'POST']"}), "('/iframe/', methods=['GET', 'POST'])\n", (471, 508), False, 'from flask_appbuilder.security.views import expose\n'), ((634, 659), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (650, 659), False, 'from flask import redirect, g, flash, request, Response\n'), ((700, 790), 'flask.Response', 'Response', ([], {'response': '"""{"msg":"Invalid token"}"""', 'status': '(403)', 'mimetype': '"""application/json"""'}), '(response=\'{"msg":"Invalid token"}\', status=403, mimetype=\n \'application/json\')\n', (708, 790), False, 'from flask import redirect, g, flash, request, Response\n'), ((825, 876), 'jwt.decode', 'jwt.decode', (['token', 'jwt_secret'], {'algorithms': "['HS256']"}), "(token, jwt_secret, algorithms=['HS256'])\n", (835, 876), False, 'import jwt\n'), ((1193, 1283), 'flask.Response', 'Response', ([], {'response': '"""{"msg":"Invalid token"}"""', 'status': '(403)', 'mimetype': '"""application/json"""'}), '(response=\'{"msg":"Invalid token"}\', status=403, mimetype=\n \'application/json\')\n', (1201, 1283), False, 'from flask import redirect, g, flash, request, Response\n'), ((1527, 1559), 'flask_login.login_user', 'login_user', (['user'], {'remember': '(False)'}), '(user, remember=False)\n', (1537, 1559), False, 'from flask_login import login_user, logout_user\n'), ((1579, 1601), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (1587, 1601), False, 'from flask import redirect, g, flash, request, Response\n'), ((956, 1046), 'flask.Response', 'Response', ([], {'response': '"""{"msg":"Expired token"}"""', 'status': '(403)', 'mimetype': '"""application/json"""'}), '(response=\'{"msg":"Expired token"}\', status=403, mimetype=\n \'application/json\')\n', (964, 1046), False, 'from flask import redirect, g, flash, request, Response\n'), ((1428, 1518), 'flask.Response', 'Response', ([], {'response': '"""{"msg":"Invalid token"}"""', 'status': '(403)', 'mimetype': '"""application/json"""'}), '(response=\'{"msg":"Invalid token"}\', status=403, mimetype=\n \'application/json\')\n', (1436, 1518), False, 'from flask import redirect, g, flash, request, Response\n'), ((1682, 1704), 'flask.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (1690, 1704), False, 'from flask import redirect, g, flash, request, Response\n'), ((1792, 1882), 'flask.Response', 'Response', ([], {'response': '"""{"msg":"Invalid token"}"""', 'status': '(403)', 'mimetype': '"""application/json"""'}), '(response=\'{"msg":"Invalid token"}\', status=403, mimetype=\n \'application/json\')\n', (1800, 1882), False, 'from flask import redirect, g, flash, request, Response\n')] |
import numpy as np
from math import ceil
def splicing_list(imgs, raw_size):
'''
将slide的out进行拼接,raw_size保证恢复到原状
'''
h, w = imgs[0].shape[:2]
row = ceil(raw_size[0] / h)
col = ceil(raw_size[1] / w)
# print(raw_size[1], w, raw_size[1]/w)
# print('row, col:', row, col)
if len(imgs[0].shape) == 2:
result = np.zeros((h * row, w * col), dtype=np.uint8)
else:
result = np.zeros((h * row, w * col, imgs[0].shape[-1]), dtype=np.uint8)
k = 0
for i_r in range(row):
for i_c in range(col):
# print('h, w:', h, w)
if len(imgs[k].shape) == 2:
result[(i_r * h):((i_r + 1) * h), (i_c * w):((i_c + 1) * w)] = imgs[k]
else:
result[(i_r * h):((i_r + 1) * h), (i_c * w):((i_c + 1) * w), :] = imgs[k]
k += 1
# print('r, c, k:', i_r, i_c, k)
if len(result.shape) == 2:
return result[0:raw_size[0], 0:raw_size[1]]
else:
return result[0:raw_size[0], 0:raw_size[1], :] | [
"numpy.zeros",
"math.ceil"
] | [((181, 202), 'math.ceil', 'ceil', (['(raw_size[0] / h)'], {}), '(raw_size[0] / h)\n', (185, 202), False, 'from math import ceil\n'), ((214, 235), 'math.ceil', 'ceil', (['(raw_size[1] / w)'], {}), '(raw_size[1] / w)\n', (218, 235), False, 'from math import ceil\n'), ((367, 411), 'numpy.zeros', 'np.zeros', (['(h * row, w * col)'], {'dtype': 'np.uint8'}), '((h * row, w * col), dtype=np.uint8)\n', (375, 411), True, 'import numpy as np\n'), ((441, 504), 'numpy.zeros', 'np.zeros', (['(h * row, w * col, imgs[0].shape[-1])'], {'dtype': 'np.uint8'}), '((h * row, w * col, imgs[0].shape[-1]), dtype=np.uint8)\n', (449, 504), True, 'import numpy as np\n')] |
import numpy as np
from .grid import csgrid_GMAO
def calc_cs_face_area(lon_b, lat_b, r_sphere = 6.375e6):
"""Calculate area of cubed-sphere grid cells on one face
Inputs must be in degrees. Edge arrays must be
shaped [N+1 x N+1]
"""
# Convert inputs to radians
lon_b_rad = lon_b * np.pi / 180.0
lat_b_rad = lat_b * np.pi / 180.0
r_sq = r_sphere * r_sphere
n_cs = lon_b.shape[1] - 1
# Allocate output array
cs_area = np.zeros((n_cs,n_cs))
# Ordering
valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1
for i_lon in range(n_cs):
for i_lat in range(n_cs):
lon_corner = np.zeros(4)
lat_corner = np.zeros(4)
xyz_corner = np.zeros((4,3))
for i_vert in range(4):
x_lon = i_lon + (i_vert > 1)
x_lat = i_lat + (i_vert == 0 or i_vert == 3)
lon_corner[i_vert] = lon_b_rad[x_lon,x_lat]
lat_corner[i_vert] = lat_b_rad[x_lon,x_lat]
for i_vert in range(4):
xyz_corner[i_vert,:] = ll2xyz(lon_corner[i_vert],lat_corner[i_vert])
tot_ang = 0.0
for i_corner in range(4):
curr_combo = valid_combo[i_corner,:]
xyz_mini = np.zeros((3,3))
for i_mini in range(3):
xyz_mini[i_mini,:] = xyz_corner[curr_combo[i_mini],:]
curr_ang = sphere_angle(xyz_mini[0,:],xyz_mini[1,:],xyz_mini[2,:])
tot_ang += curr_ang
cs_area[i_lon,i_lat] = r_sq * (tot_ang - (2.0*np.pi))
return cs_area
def ll2xyz(lon_pt,lat_pt):
"""Converts a lon/lat pair (in radians) to cartesian co-ordinates
Vector should point to the surface of the unit sphere"""
xPt = np.cos(lat_pt) * np.cos(lon_pt)
yPt = np.cos(lat_pt) * np.sin(lon_pt)
zPt = np.sin(lat_pt)
return [xPt,yPt,zPt]
def sphere_angle(e1,e2,e3):
# e1: Mid-point
# e2 and e3 to either side
pVec = np.ones(3)
qVec = np.ones(3)
pVec[0] = e1[1]*e2[2] - e1[2]*e2[1]
pVec[1] = e1[2]*e2[0] - e1[0]*e2[2]
pVec[2] = e1[0]*e2[1] - e1[1]*e2[0]
qVec[0] = e1[1]*e3[2] - e1[2]*e3[1]
qVec[1] = e1[2]*e3[0] - e1[0]*e3[2]
qVec[2] = e1[0]*e3[1] - e1[1]*e3[0]
ddd = np.sum(pVec*pVec) * np.sum(qVec*qVec)
if ddd <= 0.0:
angle = 0.0;
else:
ddd = np.sum(pVec*qVec)/np.sqrt(ddd);
if (np.abs(ddd)>1.0):
angle = np.pi/2.0;
else:
angle = np.arccos(ddd);
return angle
def calc_cs_area(cs_grid=None,cs_res=None):
"""Return area in m2 for each cell in a cubed-sphere grid
Uses GMAO indexing convention (6xNxN)
"""
# Calculate area on a cubed sphere
if cs_res is None:
cs_res = cs_grid['lon_b'].shape[-1] - 1
elif cs_grid is None:
cs_grid = csgrid_GMAO(cs_res)
elif cs_grid is not None and cs_res is not None:
assert cs_res == cs_grid['lon_b'].shape[-1], 'Routine calc_cs_area received inconsistent inputs'
cs_area = np.zeros((6,cs_res,cs_res))
cs_area[0,:,:] = calc_cs_face_area(cs_grid['lon_b'][0,:,:],cs_grid['lat_b'][0,:,:])
for i_face in range(1,6):
cs_area[i_face,:,:] = cs_area[0,:,:].copy()
return cs_area
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.arccos",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.cos",
"numpy.sin"
] | [((476, 498), 'numpy.zeros', 'np.zeros', (['(n_cs, n_cs)'], {}), '((n_cs, n_cs))\n', (484, 498), True, 'import numpy as np\n'), ((1888, 1902), 'numpy.sin', 'np.sin', (['lat_pt'], {}), '(lat_pt)\n', (1894, 1902), True, 'import numpy as np\n'), ((2019, 2029), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2026, 2029), True, 'import numpy as np\n'), ((2041, 2051), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2048, 2051), True, 'import numpy as np\n'), ((3070, 3099), 'numpy.zeros', 'np.zeros', (['(6, cs_res, cs_res)'], {}), '((6, cs_res, cs_res))\n', (3078, 3099), True, 'import numpy as np\n'), ((536, 590), 'numpy.array', 'np.array', (['[[1, 2, 4], [2, 3, 1], [3, 2, 4], [4, 1, 3]]'], {}), '([[1, 2, 4], [2, 3, 1], [3, 2, 4], [4, 1, 3]])\n', (544, 590), True, 'import numpy as np\n'), ((1804, 1818), 'numpy.cos', 'np.cos', (['lat_pt'], {}), '(lat_pt)\n', (1810, 1818), True, 'import numpy as np\n'), ((1821, 1835), 'numpy.cos', 'np.cos', (['lon_pt'], {}), '(lon_pt)\n', (1827, 1835), True, 'import numpy as np\n'), ((1846, 1860), 'numpy.cos', 'np.cos', (['lat_pt'], {}), '(lat_pt)\n', (1852, 1860), True, 'import numpy as np\n'), ((1863, 1877), 'numpy.sin', 'np.sin', (['lon_pt'], {}), '(lon_pt)\n', (1869, 1877), True, 'import numpy as np\n'), ((2303, 2322), 'numpy.sum', 'np.sum', (['(pVec * pVec)'], {}), '(pVec * pVec)\n', (2309, 2322), True, 'import numpy as np\n'), ((2323, 2342), 'numpy.sum', 'np.sum', (['(qVec * qVec)'], {}), '(qVec * qVec)\n', (2329, 2342), True, 'import numpy as np\n'), ((678, 689), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (686, 689), True, 'import numpy as np\n'), ((715, 726), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (723, 726), True, 'import numpy as np\n'), ((752, 768), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (760, 768), True, 'import numpy as np\n'), ((2405, 2424), 'numpy.sum', 'np.sum', (['(pVec * qVec)'], {}), '(pVec * qVec)\n', (2411, 2424), True, 'import numpy as np\n'), ((2423, 2435), 'numpy.sqrt', 'np.sqrt', (['ddd'], {}), '(ddd)\n', (2430, 2435), True, 'import numpy as np\n'), ((2449, 2460), 'numpy.abs', 'np.abs', (['ddd'], {}), '(ddd)\n', (2455, 2460), True, 'import numpy as np\n'), ((2532, 2546), 'numpy.arccos', 'np.arccos', (['ddd'], {}), '(ddd)\n', (2541, 2546), True, 'import numpy as np\n'), ((1295, 1311), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1303, 1311), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import pdb
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
from util.misc import NestedTensor
from util.miou_metric import measure_miou_metric
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, dataset: str, max_norm: float = 0, ):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir, dataset):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = None # CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
miou_stats = []
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
processed_targets = []
for t in targets:
item = {}
for k, v in t.items():
item[k] = v if isinstance(v, str) else v.to(device)
processed_targets.append(item)
targets = processed_targets
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
'''
# Visualization
threshold = 0.85
plt.figure(figsize=(10, 10))
for idx in range(9):
plt.subplot(3, 3, idx+1)
scores = results[idx]['scores']
mask_select = scores > threshold
plt.imshow(targets[idx]['raw_images'][0].permute(1, 2, 0).cpu())
pred_bboxes = results[idx]['boxes'][mask_select]
tgt_bboxes = targets[idx]['raw_boxes']
ax = plt.gca()
for j in range(pred_bboxes.shape[0]):
plt_boxes(ax, pred_bboxes[j].cpu(), 'b')
for j in range(tgt_bboxes.shape[0]):
plt_boxes(ax, tgt_bboxes[j].cpu(), 'r')
plt.axis('off')
plt.show()
plt.close()
'''
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
# pdb.set_trace()
def reorder_int_labels(x):
_, y = torch.unique(x, return_inverse=True)
y -= y.min()
return y
#
#
threshold = 0.
for idx in range(len(results)):
scores = results[idx]['scores']
masks = results[idx]['masks']
# mask_select = scores > threshold
# masks = masks[mask_select]
masks = masks.cuda() * scores[:, None, None, None]
masks = torch.cat([torch.zeros_like(masks[0:1]), masks], dim=0)
pred_segments = masks.argmax(0)
gt_segments = targets[idx]['segment_map']
pred_segments = reorder_int_labels(pred_segments)
# pdb.set_trace()
# print('Number of unique segments', len(pred_segments.unique()))
# plt.subplot(1, 3, 1)
#
# plt.imshow(targets[idx]['raw_images'][0].permute(1, 2, 0).cpu())
# plt.title('Image')
# plt.axis('off')
# plt.subplot(1, 3, 2)
# plt.imshow(gt_segments.cpu())
# plt.title('GT segments')
# plt.axis('off')
# plt.subplot(1, 3, 3)
# plt.imshow(pred_segments[0].cpu())
# plt.title('Predicted segments')
# plt.axis('off')
# plt.show()
# plt.close()
miou, vis = measure_miou_metric(pred_segment=pred_segments.int(), gt_segment=gt_segments.int().unsqueeze(-1))
miou_stats.append(miou)
pred, gt, iou = vis
# plt.subplot(1, 2, 1)
# plt.imshow(pred_segments.reshape(512, 512).cpu())
# plt.subplot(1, 2, 2)
# plt.imshow(gt_segments.cpu())
# plt.show()
# plt.close()
# save_out = {
# 'image': targets[idx]['raw_images'][0],
# 'pred_segments': pred[0],
# 'gt_segments': gt[0],
# }
#
# file_name = targets[idx]['file_name'].split('/data2/honglinc/tdw_playroom_small/images/')[-1].replace('/', '-')
# save_path = os.path.join('./output/TDW_Cylinder_DETR_RAFT', file_name+'.pt')
# print('Save to', save_path)
# torch.save(save_out, save_path)
print('mIoU: ', np.mean(miou_stats), len(miou_stats), len(pred[0]))
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
print('Final mIoU: ', np.mean(miou_stats))
# pdb.set_trace()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
def plt_boxes(ax, box, color='r'):
x0, y0, x1, y1 = box.unbind(-1)
width = x1 - x0
height = y1 - y0
rect = patches.Rectangle((x0, y0), width, height, linewidth=1, edgecolor=color, facecolor='none')
ax.add_patch(rect)
| [
"numpy.mean",
"torch.unique",
"matplotlib.patches.Rectangle",
"math.isfinite",
"util.misc.SmoothedValue",
"torch.stack",
"os.path.join",
"util.misc.MetricLogger",
"util.misc.reduce_dict",
"sys.exit",
"torch.no_grad",
"torch.zeros_like"
] | [((2854, 2869), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2867, 2869), False, 'import torch\n'), ((804, 838), 'util.misc.MetricLogger', 'utils.MetricLogger', ([], {'delimiter': '""" """'}), "(delimiter=' ')\n", (822, 838), True, 'import util.misc as utils\n'), ((3028, 3062), 'util.misc.MetricLogger', 'utils.MetricLogger', ([], {'delimiter': '""" """'}), "(delimiter=' ')\n", (3046, 3062), True, 'import util.misc as utils\n'), ((10417, 10511), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x0, y0)', 'width', 'height'], {'linewidth': '(1)', 'edgecolor': 'color', 'facecolor': '"""none"""'}), "((x0, y0), width, height, linewidth=1, edgecolor=color,\n facecolor='none')\n", (10434, 10511), True, 'import matplotlib.patches as patches\n'), ((873, 926), 'util.misc.SmoothedValue', 'utils.SmoothedValue', ([], {'window_size': '(1)', 'fmt': '"""{value:.6f}"""'}), "(window_size=1, fmt='{value:.6f}')\n", (892, 926), True, 'import util.misc as utils\n'), ((971, 1024), 'util.misc.SmoothedValue', 'utils.SmoothedValue', ([], {'window_size': '(1)', 'fmt': '"""{value:.2f}"""'}), "(window_size=1, fmt='{value:.2f}')\n", (990, 1024), True, 'import util.misc as utils\n'), ((1600, 1628), 'util.misc.reduce_dict', 'utils.reduce_dict', (['loss_dict'], {}), '(loss_dict)\n', (1617, 1628), True, 'import util.misc as utils\n'), ((3106, 3159), 'util.misc.SmoothedValue', 'utils.SmoothedValue', ([], {'window_size': '(1)', 'fmt': '"""{value:.2f}"""'}), "(window_size=1, fmt='{value:.2f}')\n", (3125, 3159), True, 'import util.misc as utils\n'), ((4305, 4333), 'util.misc.reduce_dict', 'utils.reduce_dict', (['loss_dict'], {}), '(loss_dict)\n', (4322, 4333), True, 'import util.misc as utils\n'), ((4915, 4968), 'torch.stack', 'torch.stack', (["[t['orig_size'] for t in targets]"], {'dim': '(0)'}), "([t['orig_size'] for t in targets], dim=0)\n", (4926, 4968), False, 'import torch\n'), ((9073, 9092), 'numpy.mean', 'np.mean', (['miou_stats'], {}), '(miou_stats)\n', (9080, 9092), True, 'import numpy as np\n'), ((2054, 2079), 'math.isfinite', 'math.isfinite', (['loss_value'], {}), '(loss_value)\n', (2067, 2079), False, 'import math\n'), ((2200, 2211), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2208, 2211), False, 'import sys\n'), ((5887, 5935), 'torch.stack', 'torch.stack', (["[t['size'] for t in targets]"], {'dim': '(0)'}), "([t['size'] for t in targets], dim=0)\n", (5898, 5935), False, 'import torch\n'), ((6123, 6159), 'torch.unique', 'torch.unique', (['x'], {'return_inverse': '(True)'}), '(x, return_inverse=True)\n', (6135, 6159), False, 'import torch\n'), ((8379, 8398), 'numpy.mean', 'np.mean', (['miou_stats'], {}), '(miou_stats)\n', (8386, 8398), True, 'import numpy as np\n'), ((3640, 3681), 'os.path.join', 'os.path.join', (['output_dir', '"""panoptic_eval"""'], {}), "(output_dir, 'panoptic_eval')\n", (3652, 3681), False, 'import os\n'), ((6560, 6588), 'torch.zeros_like', 'torch.zeros_like', (['masks[0:1]'], {}), '(masks[0:1])\n', (6576, 6588), False, 'import torch\n')] |
"""Compute the Statewide and Climate District Averages!"""
from __future__ import print_function
import sys
import datetime
import warnings
import numpy as np
import geopandas as gpd
from pyiem import iemre
from pyiem.grid.zs import CachingZonalStats
from pyiem.datatypes import temperature, distance
from pyiem.util import get_dbconn, ncopen
warnings.filterwarnings('ignore', category=FutureWarning)
COOP = get_dbconn("coop")
ccursor = COOP.cursor()
def zero(val):
"""Make masked values a zero"""
if val is np.ma.masked:
return 0
return val
def update_database(stid, valid, row):
"""Update the database with these newly computed values!"""
table = "alldata_%s" % (stid[:2], )
def do_update(_row):
"""Do the database work, please."""
ccursor.execute("""
UPDATE """ + table + """
SET high = %s, low = %s, precip = %s, snow = %s, snowd = %s
WHERE station = %s and day = %s
""", (_row['high'], _row['low'],
round(zero(_row['precip']), 2),
round(zero(_row['snow']), 1),
round(zero(_row['snowd']), 1),
stid, valid))
return ccursor.rowcount == 1
if not do_update(row):
ccursor.execute("""
INSERT into """ + table + """ (station, day,
estimated, year, month, sday) VALUES
(%s, %s, 't', %s, %s, %s)
""", (stid, valid, valid.year, valid.month, valid.strftime("%m%d")))
do_update(row)
def do_day(valid):
""" Process a day please """
idx = iemre.daily_offset(valid)
nc = ncopen(iemre.get_daily_ncname(valid.year), 'r', timeout=300)
high = temperature(nc.variables['high_tmpk_12z'][idx, :, :],
'K').value('F')
low = temperature(nc.variables['low_tmpk_12z'][idx, :, :],
'K').value('F')
precip = distance(nc.variables['p01d_12z'][idx, :, :], 'MM').value("IN")
snow = distance(nc.variables['snow_12z'][idx, :, :], 'MM').value("IN")
snowd = distance(nc.variables['snowd_12z'][idx, :, :], 'MM').value("IN")
nc.close()
# build out the state mappers
pgconn = get_dbconn('postgis')
states = gpd.GeoDataFrame.from_postgis("""
SELECT the_geom, state_abbr from states
where state_abbr not in ('AK', 'HI', 'DC')
""", pgconn, index_col='state_abbr', geom_col='the_geom')
czs = CachingZonalStats(iemre.AFFINE)
sthigh = czs.gen_stats(np.flipud(high), states['the_geom'])
stlow = czs.gen_stats(np.flipud(low), states['the_geom'])
stprecip = czs.gen_stats(np.flipud(precip), states['the_geom'])
stsnow = czs.gen_stats(np.flipud(snow), states['the_geom'])
stsnowd = czs.gen_stats(np.flipud(snowd), states['the_geom'])
statedata = {}
for i, state in enumerate(states.index.values):
statedata[state] = dict(
high=sthigh[i],
low=stlow[i],
precip=stprecip[i],
snow=stsnow[i],
snowd=stsnowd[i]
)
update_database(state+"0000", valid, statedata[state])
# build out climate division mappers
climdiv = gpd.GeoDataFrame.from_postgis("""
SELECT geom, iemid from climdiv
where st_abbrv not in ('AK', 'HI', 'DC')
""", pgconn, index_col='iemid', geom_col='geom')
czs = CachingZonalStats(iemre.AFFINE)
sthigh = czs.gen_stats(np.flipud(high), climdiv['geom'])
stlow = czs.gen_stats(np.flipud(low), climdiv['geom'])
stprecip = czs.gen_stats(np.flipud(precip), climdiv['geom'])
stsnow = czs.gen_stats(np.flipud(snow), climdiv['geom'])
stsnowd = czs.gen_stats(np.flipud(snowd), climdiv['geom'])
for i, iemid in enumerate(climdiv.index.values):
row = dict(
high=sthigh[i],
low=stlow[i],
precip=stprecip[i],
snow=stsnow[i],
snowd=stsnowd[i]
)
# we must have temperature data
if row['high'] is np.ma.masked or row['low'] is np.ma.masked:
print(
("compute_0000 %s has missing temperature data, using state"
) % (iemid, )
)
row = statedata[iemid[:2]]
update_database(iemid, valid, row)
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
do_day(datetime.date(int(argv[1]), int(argv[2]),
int(argv[3])))
elif len(argv) == 3:
sts = datetime.date(int(argv[1]), int(argv[2]), 1)
ets = sts + datetime.timedelta(days=35)
ets = ets.replace(day=1)
now = sts
while now < ets:
do_day(now)
now += datetime.timedelta(days=1)
else:
do_day(datetime.date.today())
if __name__ == '__main__':
main(sys.argv)
ccursor.close()
COOP.commit()
| [
"pyiem.iemre.get_daily_ncname",
"pyiem.datatypes.distance",
"pyiem.grid.zs.CachingZonalStats",
"numpy.flipud",
"datetime.date.today",
"pyiem.util.get_dbconn",
"pyiem.iemre.daily_offset",
"pyiem.datatypes.temperature",
"geopandas.GeoDataFrame.from_postgis",
"datetime.timedelta",
"warnings.filterwarnings"
] | [((345, 402), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (368, 402), False, 'import warnings\n'), ((410, 428), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""coop"""'], {}), "('coop')\n", (420, 428), False, 'from pyiem.util import get_dbconn, ncopen\n'), ((1567, 1592), 'pyiem.iemre.daily_offset', 'iemre.daily_offset', (['valid'], {}), '(valid)\n', (1585, 1592), False, 'from pyiem import iemre\n'), ((2160, 2181), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""postgis"""'], {}), "('postgis')\n", (2170, 2181), False, 'from pyiem.util import get_dbconn, ncopen\n'), ((2195, 2391), 'geopandas.GeoDataFrame.from_postgis', 'gpd.GeoDataFrame.from_postgis', (['"""\n SELECT the_geom, state_abbr from states\n where state_abbr not in (\'AK\', \'HI\', \'DC\')\n """', 'pgconn'], {'index_col': '"""state_abbr"""', 'geom_col': '"""the_geom"""'}), '(\n """\n SELECT the_geom, state_abbr from states\n where state_abbr not in (\'AK\', \'HI\', \'DC\')\n """\n , pgconn, index_col=\'state_abbr\', geom_col=\'the_geom\')\n', (2224, 2391), True, 'import geopandas as gpd\n'), ((2392, 2423), 'pyiem.grid.zs.CachingZonalStats', 'CachingZonalStats', (['iemre.AFFINE'], {}), '(iemre.AFFINE)\n', (2409, 2423), False, 'from pyiem.grid.zs import CachingZonalStats\n'), ((3125, 3302), 'geopandas.GeoDataFrame.from_postgis', 'gpd.GeoDataFrame.from_postgis', (['"""\n SELECT geom, iemid from climdiv\n where st_abbrv not in (\'AK\', \'HI\', \'DC\')\n """', 'pgconn'], {'index_col': '"""iemid"""', 'geom_col': '"""geom"""'}), '(\n """\n SELECT geom, iemid from climdiv\n where st_abbrv not in (\'AK\', \'HI\', \'DC\')\n """\n , pgconn, index_col=\'iemid\', geom_col=\'geom\')\n', (3154, 3302), True, 'import geopandas as gpd\n'), ((3303, 3334), 'pyiem.grid.zs.CachingZonalStats', 'CachingZonalStats', (['iemre.AFFINE'], {}), '(iemre.AFFINE)\n', (3320, 3334), False, 'from pyiem.grid.zs import CachingZonalStats\n'), ((1609, 1643), 'pyiem.iemre.get_daily_ncname', 'iemre.get_daily_ncname', (['valid.year'], {}), '(valid.year)\n', (1631, 1643), False, 'from pyiem import iemre\n'), ((2451, 2466), 'numpy.flipud', 'np.flipud', (['high'], {}), '(high)\n', (2460, 2466), True, 'import numpy as np\n'), ((2514, 2528), 'numpy.flipud', 'np.flipud', (['low'], {}), '(low)\n', (2523, 2528), True, 'import numpy as np\n'), ((2579, 2596), 'numpy.flipud', 'np.flipud', (['precip'], {}), '(precip)\n', (2588, 2596), True, 'import numpy as np\n'), ((2645, 2660), 'numpy.flipud', 'np.flipud', (['snow'], {}), '(snow)\n', (2654, 2660), True, 'import numpy as np\n'), ((2710, 2726), 'numpy.flipud', 'np.flipud', (['snowd'], {}), '(snowd)\n', (2719, 2726), True, 'import numpy as np\n'), ((3362, 3377), 'numpy.flipud', 'np.flipud', (['high'], {}), '(high)\n', (3371, 3377), True, 'import numpy as np\n'), ((3422, 3436), 'numpy.flipud', 'np.flipud', (['low'], {}), '(low)\n', (3431, 3436), True, 'import numpy as np\n'), ((3484, 3501), 'numpy.flipud', 'np.flipud', (['precip'], {}), '(precip)\n', (3493, 3501), True, 'import numpy as np\n'), ((3547, 3562), 'numpy.flipud', 'np.flipud', (['snow'], {}), '(snow)\n', (3556, 3562), True, 'import numpy as np\n'), ((3609, 3625), 'numpy.flipud', 'np.flipud', (['snowd'], {}), '(snowd)\n', (3618, 3625), True, 'import numpy as np\n'), ((1674, 1732), 'pyiem.datatypes.temperature', 'temperature', (["nc.variables['high_tmpk_12z'][idx, :, :]", '"""K"""'], {}), "(nc.variables['high_tmpk_12z'][idx, :, :], 'K')\n", (1685, 1732), False, 'from pyiem.datatypes import temperature, distance\n'), ((1777, 1834), 'pyiem.datatypes.temperature', 'temperature', (["nc.variables['low_tmpk_12z'][idx, :, :]", '"""K"""'], {}), "(nc.variables['low_tmpk_12z'][idx, :, :], 'K')\n", (1788, 1834), False, 'from pyiem.datatypes import temperature, distance\n'), ((1881, 1932), 'pyiem.datatypes.distance', 'distance', (["nc.variables['p01d_12z'][idx, :, :]", '"""MM"""'], {}), "(nc.variables['p01d_12z'][idx, :, :], 'MM')\n", (1889, 1932), False, 'from pyiem.datatypes import temperature, distance\n'), ((1956, 2007), 'pyiem.datatypes.distance', 'distance', (["nc.variables['snow_12z'][idx, :, :]", '"""MM"""'], {}), "(nc.variables['snow_12z'][idx, :, :], 'MM')\n", (1964, 2007), False, 'from pyiem.datatypes import temperature, distance\n'), ((2032, 2084), 'pyiem.datatypes.distance', 'distance', (["nc.variables['snowd_12z'][idx, :, :]", '"""MM"""'], {}), "(nc.variables['snowd_12z'][idx, :, :], 'MM')\n", (2040, 2084), False, 'from pyiem.datatypes import temperature, distance\n'), ((4471, 4498), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(35)'}), '(days=35)\n', (4489, 4498), False, 'import datetime\n'), ((4618, 4644), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4636, 4644), False, 'import datetime\n'), ((4670, 4691), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4689, 4691), False, 'import datetime\n')] |
import sys
from string import ascii_lowercase
o = dict(zip(ascii_lowercase, range(26)))
s, t = sys.stdin.read().split()
def main():
g1 = [set() for _ in range(26)]
g2 = [set() for _ in range(26)]
for i in range(len(s)):
a = o[s[i]]; b = o[t[i]]
g1[a].add(b); g2[b].add(a)
for i in range(26):
if len(g1[i]) >= 2 or len(g2[i]) >= 2:
print('No')
return
print('Yes')
if __name__ == '__main__':
main()
| [
"sys.stdin.read"
] | [((102, 118), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (116, 118), False, 'import sys\n')] |
import random
from transformer import Batchify
def test_small_batch():
random.seed(0)
b = Batchify([
([1,2,3], [7,8,9]),
([2,3,4], [8,9,10]),
([3,4,5], [9,10,11]),
([4,5,6], [10,11,12])
], 10)
bb = list(b)
assert len(bb) == 4
assert bb[0].src.tolist() == [[2,3,4,5,3]]
assert bb[0].trg.tolist() == [[2,9,10,11]]
assert bb[1].src.tolist() == [[2,1,2,3,3]]
assert bb[1].trg.tolist() == [[2,7,8,9]]
assert bb[2].src.tolist() == [[2,2,3,4,3]]
assert bb[2].trg.tolist() == [[2,8,9,10]]
assert bb[3].src.tolist() == [[2,4,5,6,3]]
assert bb[3].trg.tolist() == [[2,10,11,12]]
def test_large_batch():
random.seed(0)
b = Batchify([
([1,2], [3, 4]),
([3,4], [5, 6]),
([5,6], [7, 8]),
([7,8], [9, 10]),
([9,10], [11, 12]),
([11,12], [13, 14])
], 16)
bb = list(b)
assert len(bb) == 3
assert bb[0].src.tolist() == [[2,1,2,3], [2,3,4,3]]
assert bb[0].trg.tolist() == [[2,3,4], [2,5,6]]
assert bb[1].src.tolist() == [[2,9,10,3], [2,11,12,3]]
assert bb[1].trg.tolist() == [[2,11,12], [2,13,14]]
assert bb[2].src.tolist() == [[2,5,6,3], [2,7,8,3]]
assert bb[2].trg.tolist() == [[2,7,8], [2,9,10]]
| [
"random.seed",
"transformer.Batchify"
] | [((77, 91), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (88, 91), False, 'import random\n'), ((101, 222), 'transformer.Batchify', 'Batchify', (['[([1, 2, 3], [7, 8, 9]), ([2, 3, 4], [8, 9, 10]), ([3, 4, 5], [9, 10, 11]),\n ([4, 5, 6], [10, 11, 12])]', '(10)'], {}), '([([1, 2, 3], [7, 8, 9]), ([2, 3, 4], [8, 9, 10]), ([3, 4, 5], [9, \n 10, 11]), ([4, 5, 6], [10, 11, 12])], 10)\n', (109, 222), False, 'from transformer import Batchify\n'), ((706, 720), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (717, 720), False, 'import random\n'), ((730, 864), 'transformer.Batchify', 'Batchify', (['[([1, 2], [3, 4]), ([3, 4], [5, 6]), ([5, 6], [7, 8]), ([7, 8], [9, 10]), (\n [9, 10], [11, 12]), ([11, 12], [13, 14])]', '(16)'], {}), '([([1, 2], [3, 4]), ([3, 4], [5, 6]), ([5, 6], [7, 8]), ([7, 8], [9,\n 10]), ([9, 10], [11, 12]), ([11, 12], [13, 14])], 16)\n', (738, 864), False, 'from transformer import Batchify\n')] |
from keras import Input, Model
from keras.applications import MobileNet
from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, \
Flatten
from keras.optimizers import Adam
from keras.regularizers import l2
from nets_custom import net_factory
from utils.helpers import l1_loss
class Mobnet_Custom(net_factory.NetworkFactory):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.classes = kwargs['classes']
self.w = kwargs['weights']
self.dropout_global = kwargs['dropout_global']
self.dropout = kwargs['dropout']
self.eta = kwargs['eta']
self.dim = kwargs['dim']
self.regulizer = kwargs['reg']
def create_model(self):
inputs = Input(shape=self.dim, name='input')
model_mobilenet = MobileNet(input_shape=self.dim, alpha=1, depth_multiplier=1, dropout=self.dropout_global,
include_top=False, weights=self.w, input_tensor=None)
x = model_mobilenet(inputs)
x = SeparableConv2D(filters=128, kernel_size=(7, 7), activation='relu', padding='same')(x)
x = Flatten()(x)
x = BatchNormalization()(x)
x = Dense(1024, activation='relu', kernel_regularizer=l2(self.regulizer))(x)
x = Dropout(self.dropout)(x)
z = Dense(self.classes, activation='tanh')(x)
model = Model(inputs=inputs, outputs=z)
adam = Adam(lr=self.eta)
model.compile(optimizer=adam, loss=l1_loss, metrics=['mse', 'mae'] )
print(model.summary())
return model
| [
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.Model",
"keras.applications.MobileNet",
"keras.Input",
"keras.layers.SeparableConv2D",
"keras.regularizers.l2",
"keras.layers.Dense",
"keras.layers.BatchNormalization",
"keras.layers.Dropout"
] | [((783, 818), 'keras.Input', 'Input', ([], {'shape': 'self.dim', 'name': '"""input"""'}), "(shape=self.dim, name='input')\n", (788, 818), False, 'from keras import Input, Model\n'), ((845, 993), 'keras.applications.MobileNet', 'MobileNet', ([], {'input_shape': 'self.dim', 'alpha': '(1)', 'depth_multiplier': '(1)', 'dropout': 'self.dropout_global', 'include_top': '(False)', 'weights': 'self.w', 'input_tensor': 'None'}), '(input_shape=self.dim, alpha=1, depth_multiplier=1, dropout=self.\n dropout_global, include_top=False, weights=self.w, input_tensor=None)\n', (854, 993), False, 'from keras.applications import MobileNet\n'), ((1414, 1445), 'keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'z'}), '(inputs=inputs, outputs=z)\n', (1419, 1445), False, 'from keras import Input, Model\n'), ((1462, 1479), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.eta'}), '(lr=self.eta)\n', (1466, 1479), False, 'from keras.optimizers import Adam\n'), ((1074, 1162), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': '(128)', 'kernel_size': '(7, 7)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=128, kernel_size=(7, 7), activation='relu', padding\n ='same')\n", (1089, 1162), False, 'from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, Flatten\n'), ((1173, 1182), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1180, 1182), False, 'from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, Flatten\n'), ((1198, 1218), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1216, 1218), False, 'from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, Flatten\n'), ((1319, 1340), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {}), '(self.dropout)\n', (1326, 1340), False, 'from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, Flatten\n'), ((1356, 1394), 'keras.layers.Dense', 'Dense', (['self.classes'], {'activation': '"""tanh"""'}), "(self.classes, activation='tanh')\n", (1361, 1394), False, 'from keras.layers import SeparableConv2D, BatchNormalization, Dense, Dropout, GlobalAveragePooling2D, Activation, Flatten\n'), ((1284, 1302), 'keras.regularizers.l2', 'l2', (['self.regulizer'], {}), '(self.regulizer)\n', (1286, 1302), False, 'from keras.regularizers import l2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import web
import uuid
import logging
from PIL import Image
import pytesseract
import goslate
import json
import base64
LOGGER = logging.getLogger('rest')
urls = ('/enrich', 'Enrich')
supported_filetypes = ['jpg']
supported_languages = ['en', 'de', 'it']
supported_languages_tesseract = {'en': 'eng',
'de': 'deu',
'it': 'ita',
'fr': 'fra',
'es': 'spa',
'sv': 'swe'}
class Enrich:
"""
This class enriches an image by extracting text and then translating said text into a specified language.
"""
def __init__(self):
pass
def GET(self):
"""
This method is the first method that is called. It provides a form that makes uploading an image
with POST possible.
"""
LOGGER.info('GET in Enrich called.')
return """<html><head></head><body>
<form method="POST" enctype="multipart/form-data" action="">
<input type="file" name="image" /><input type="submit" />
</form></body></html>"""
def POST(self):
"""
image: the image that is supposed to be processed.
text: the text that is supposed to be translated.
source: the source language.
target: the target language.
filetype: filetype of the image.
This method gets called on a POST request to /enrich. If the data contains text, then the image will not
be used for operations. If the data contains an image it will be stored in a folder called uploads, using
a uniquely identifying name. The first step is extracting text from the image. If there is no specified
source language the application will try to extract the text, then detect the language in said text and then
try to extract text for a second time. This is supposed to minimize errors made by the ocr engine.
The extracted text is then translated to the specified target language. In a last step all the gathered
information will be returned in form of a json.
In case there is no image passed to the function, a bad request error (400) is raised.
In case the an unsupported filetype is passed to the function, a bad request error (400) is raised.
In case there is no filetype is passed to the function, a bad request error (400) is raised.
In case the an unsupported target language is passed to the function, a bad request error (400) is raised.
In case the passed image could not be stored, an internal server error (500) is raised.
In case there is no target language passed to the function, a bad request error (400) is raised.
:return: json with detected language, extracted text and translation of the extracted text.
"""
LOGGER.info('POST in Enrich called.')
text_from_image = None
source_lang = None
detected_lang = 'unk'
corrected_text = None
corrected = False
data = web.input(image={})
if 'text' in data:
corrected_text = base64.b64decode(data.text)
corrected = True
if 'source' in data:
source_lang = str(data.source).lower()
if source_lang not in supported_languages:
source_lang = None
if 'image' in data and not corrected:
image = data.image
if len(image.value) == 0:
LOGGER.debug('400 Bad Request: No image to process. No value.')
raise web.badrequest(message='No image to process. No value.')
if len(image.filename) == 0:
LOGGER.debug('400 Bad Request: No image to process. No filename.')
raise web.badrequest(message='No image to process. No filename.')
if 'filetype' in data:
filetype = str(data.filetype).lower()
if filetype not in supported_filetypes:
LOGGER.debug('400 Bad Request: No support for this filetype.')
raise web.badrequest(message='No support for this filetype.')
else:
LOGGER.debug('400 Bad Request: No file-type specified.')
raise web.badrequest(message='No file-type specified.')
image_dir = 'uploads'
image_id = str(uuid.uuid4())
image_name = image_id + '.' + filetype
try:
out = open(image_dir + '/' + image_name, 'w')
out.write(image.file.read())
out.close()
except:
LOGGER.warning('500 Internal Server: Could not store image.')
raise web.internalerror(message='Server: Could not store image.')
finally:
final_image_path = image_dir + '/' + image_name
LOGGER.debug('Image saved in ' + image_dir + '/' + image_name)
if source_lang is None:
text_from_image = self.get_text(final_image_path, None)
else:
text_from_image = self.get_text(final_image_path, supported_languages_tesseract[source_lang])
detected_lang = self.get_language(text_from_image)
if source_lang is None and detected_lang in supported_languages_tesseract:
LOGGER.info('Image is read again with tesseract.')
text_from_image = self.get_text(final_image_path, supported_languages_tesseract[detected_lang])
if detected_lang not in supported_languages:
detected_lang = self.get_language_name(detected_lang)
if corrected:
detected_lang = self.get_language(corrected_text)
if detected_lang not in supported_languages:
detected_lang = self.get_language_name(detected_lang)
if 'target' in data:
target_lang = str(data.target).lower()
if target_lang not in supported_languages:
LOGGER.debug('400 Bad Request: This target language is not supported.')
raise web.badrequest(message='This target language is not supported.')
if source_lang is None:
if not corrected:
translation = self.get_translation(text_from_image, target_lang)
else:
translation = self.get_translation(corrected_text, target_lang)
else:
if not corrected:
translation = self.get_translation(text_from_image, target_lang, source_lang)
else:
translation = self.get_translation(corrected_text, target_lang, source_lang)
else:
LOGGER.debug('400 Bad Request: No language to translate into specified.')
raise web.badrequest(message='No language to translate into specified.')
if not corrected:
return self.get_json(text_from_image, translation, detected_lang)
else:
return self.get_json(corrected_text, translation, detected_lang)
@staticmethod
def get_text(image_path, source_lang=None):
"""
Static method that uses pytesseract to extract text from an image. The image is passed by specifying
a file path. A source language can be specified. Note that pytesseract performs much better if a source
language is passed.
In case there is no image path passed to the function, an internal server error (500) is raised.
In case the specified image cannot be opened, an internal server error (500) is raised.
In case there is an error while extracting text with pytesseract, an internal server error (500) is raised.
In case there was is no text that could be extracted from the image, a bad request error (404) is raised.
:param image_path: path to the stored image.
:param source_lang: the source language.
:return: extracted text from the specified image.
"""
LOGGER.info('get_text in Enrich called with parameters: image_path=' + image_path
+ ' and source_lang=' + str(source_lang))
if image_path is None or len(image_path) == 0:
LOGGER.warning('500 Internal Server: Path to image is not valid.')
raise web.internalerror(message='Path to image is not valid.')
try:
image = Image.open(image_path)
except:
LOGGER.warning('500 Internal Server: Could not open image.')
raise web.internalerror(message='Could not open image.')
try:
if source_lang is None:
text = pytesseract.image_to_string(image)
else:
text = pytesseract.image_to_string(image, source_lang)
except:
LOGGER.warning('500 Internal Server: Could not extract text from image.')
raise web.internalerror(message='Could not extract text from image.')
if len(text) <= 0:
LOGGER.debug('404 Bad Request: No text detected in image.')
raise web.notfound(message='No text detected in image.')
LOGGER.debug('\n---------- Text ----------\n' + text + '\n---------- /Text ----------')
return text
@staticmethod
def get_translation(text, target_lang, source_lang=None):
"""
Static method that translates text using goslate. A source language can be specified. If that is not the case
goslate will detect the source language automatically.
In case there is no text passed to the function, an internal server error (500) is raised.
In case there is no target language passed to the function, an internal server error (500) is raised.
In case there is an error while translating text with goslate, an internal server error (500) is raised.
:param text: the original text.
:param target_lang: the target language (default None)
:param source_lang: the source language.
:return: translation of the text in the specified target language.
"""
LOGGER.info('get_translation in Enrich called with text and parameters: target_lang=' + target_lang
+ ' and source_lang=' + str(source_lang))
gs = goslate.Goslate()
if text is None or len(text) == 0:
LOGGER.warning('500 Internal Server: No text specified.')
raise web.internalerror(message='No text specified.')
if target_lang is None or len(target_lang) == 0:
LOGGER.warning('500 Internal Server: Target language is not valid.')
raise web.internalerror(message='Target language is not valid.')
try:
if source_lang is None:
translation = gs.translate(text, target_lang)
else:
translation = gs.translate(text, target_lang, source_lang)
except:
LOGGER.warning('500 Internal Server: Could not translate image.')
raise web.internalerror(message='Could not translate image.')
LOGGER.debug('\n---------- Translation ----------\n' + translation + '\n---------- /Translation ----------')
return translation
@staticmethod
def get_language(text):
"""
Static method that detects the source language of the specified text. The detected language is returned as an
abbreviation.
In case there is no text passed to the function, an internal server error (500) is raised.
:param text: the original text used for language detection.
:return: abbreviation of the detected language.
"""
LOGGER.info('get_language in Enrich called with text.')
gs = goslate.Goslate()
if text is None or len(text) == 0:
LOGGER.warning('500 Internal Server: No text specified.')
raise web.internalerror(message='No text specified.')
detected_lang = gs.detect(text)
if detected_lang is None:
detected_lang = 'unk'
LOGGER.debug('Detected language: ' + detected_lang)
return detected_lang
@staticmethod
def get_language_name(language):
"""
Static method that transforms the abbreviation for a language returned by goslate into a plain text
representation.
In case there is no language passed to the function, an internal server error (500) is raised.
:param language: language abbreviation.
:return: language plain text representation.
"""
gs = goslate.Goslate()
LOGGER.info('get_language_name in Enrich called with language.')
if language is None or len(language) == 0:
LOGGER.warning('500 Internal Server: No language specified.')
raise web.internalerror(message='No language specified.')
gs_languages = gs.get_languages()
detected_lang = 'Unknown'
if language in gs_languages:
detected_lang = 'detected:' + gs_languages[language]
LOGGER.debug('Detected language: ' + detected_lang)
return detected_lang
@staticmethod
def get_json(text, translation, detected_lang):
"""
Combines all gathered information and returns it in form of a json.
In case there is no text passed to the function, an internal server error (500) is raised.
In case there is no translation passed to the function, an internal server error (500) is raised.
In case there is no detected language passed to the function, an internal server error (500) is raised.
:param text: the original text.
:param translation: translation of the original text.
:param detected_lang: the language that has been detected.
:return: json representation of the data.
"""
LOGGER.info('get_json in Enrich called with text, translation and parameters: detected_lang=' + detected_lang)
if text is None or len(text) == 0:
LOGGER.warning('500 Internal Server: No text specified.')
raise web.internalerror(message='No text specified.')
if translation is None or len(translation) == 0:
LOGGER.warning('500 Internal Server: No translation specified.')
raise web.internalerror(message='No translation specified.')
if detected_lang is None or len(detected_lang) == 0:
LOGGER.warning('500 Internal Server: Detected language is not valid.')
raise web.internalerror(message='Detected language is not valid.')
data = {
'detected': detected_lang,
'text': text,
'translation': translation
}
return json.dumps(data)
def main():
"""
Logger is configured so that the output is similar to the logging statements provided by web.py.
A folder for uploads is created. This is where uploaded images will be stored.
"""
logging.basicConfig(format='%(levelname)s - %(module)s - [%(asctime)s] "%(message)s"',
datefmt='%d/%h/%Y %H:%M:%S',
level=logging.DEBUG)
LOGGER.info('Main called for CTE REST Server.')
if not (os.path.isdir('uploads') and os.path.exists('uploads')):
os.makedirs('uploads')
app = web.application(urls, globals())
app.run()
LOGGER.info('REST Server for CTE shut down.')
if __name__ == '__main__':
main() | [
"logging.getLogger",
"logging.basicConfig",
"os.path.exists",
"PIL.Image.open",
"web.badrequest",
"os.makedirs",
"json.dumps",
"web.internalerror",
"base64.b64decode",
"uuid.uuid4",
"web.input",
"os.path.isdir",
"web.notfound",
"pytesseract.image_to_string",
"goslate.Goslate"
] | [((757, 782), 'logging.getLogger', 'logging.getLogger', (['"""rest"""'], {}), "('rest')\n", (774, 782), False, 'import logging\n'), ((15644, 15790), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s - %(module)s - [%(asctime)s] "%(message)s\\""""', 'datefmt': '"""%d/%h/%Y %H:%M:%S"""', 'level': 'logging.DEBUG'}), '(format=\n \'%(levelname)s - %(module)s - [%(asctime)s] "%(message)s"\', datefmt=\n \'%d/%h/%Y %H:%M:%S\', level=logging.DEBUG)\n', (15663, 15790), False, 'import logging\n'), ((3730, 3749), 'web.input', 'web.input', ([], {'image': '{}'}), '(image={})\n', (3739, 3749), False, 'import web\n'), ((10984, 11001), 'goslate.Goslate', 'goslate.Goslate', ([], {}), '()\n', (10999, 11001), False, 'import goslate\n'), ((12431, 12448), 'goslate.Goslate', 'goslate.Goslate', ([], {}), '()\n', (12446, 12448), False, 'import goslate\n'), ((13261, 13278), 'goslate.Goslate', 'goslate.Goslate', ([], {}), '()\n', (13276, 13278), False, 'import goslate\n'), ((15408, 15424), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (15418, 15424), False, 'import json\n'), ((15960, 15982), 'os.makedirs', 'os.makedirs', (['"""uploads"""'], {}), "('uploads')\n", (15971, 15982), False, 'import os\n'), ((3807, 3834), 'base64.b64decode', 'base64.b64decode', (['data.text'], {}), '(data.text)\n', (3823, 3834), False, 'import base64\n'), ((7521, 7587), 'web.badrequest', 'web.badrequest', ([], {'message': '"""No language to translate into specified."""'}), "(message='No language to translate into specified.')\n", (7535, 7587), False, 'import web\n'), ((9023, 9079), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Path to image is not valid."""'}), "(message='Path to image is not valid.')\n", (9040, 9079), False, 'import web\n'), ((9114, 9136), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (9124, 9136), False, 'from PIL import Image\n'), ((9794, 9844), 'web.notfound', 'web.notfound', ([], {'message': '"""No text detected in image."""'}), "(message='No text detected in image.')\n", (9806, 9844), False, 'import web\n'), ((11134, 11181), 'web.internalerror', 'web.internalerror', ([], {'message': '"""No text specified."""'}), "(message='No text specified.')\n", (11151, 11181), False, 'import web\n'), ((11339, 11397), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Target language is not valid."""'}), "(message='Target language is not valid.')\n", (11356, 11397), False, 'import web\n'), ((12581, 12628), 'web.internalerror', 'web.internalerror', ([], {'message': '"""No text specified."""'}), "(message='No text specified.')\n", (12598, 12628), False, 'import web\n'), ((13497, 13548), 'web.internalerror', 'web.internalerror', ([], {'message': '"""No language specified."""'}), "(message='No language specified.')\n", (13514, 13548), False, 'import web\n'), ((14780, 14827), 'web.internalerror', 'web.internalerror', ([], {'message': '"""No text specified."""'}), "(message='No text specified.')\n", (14797, 14827), False, 'import web\n'), ((14981, 15035), 'web.internalerror', 'web.internalerror', ([], {'message': '"""No translation specified."""'}), "(message='No translation specified.')\n", (14998, 15035), False, 'import web\n'), ((15199, 15259), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Detected language is not valid."""'}), "(message='Detected language is not valid.')\n", (15216, 15259), False, 'import web\n'), ((15895, 15919), 'os.path.isdir', 'os.path.isdir', (['"""uploads"""'], {}), "('uploads')\n", (15908, 15919), False, 'import os\n'), ((15924, 15949), 'os.path.exists', 'os.path.exists', (['"""uploads"""'], {}), "('uploads')\n", (15938, 15949), False, 'import os\n'), ((4247, 4303), 'web.badrequest', 'web.badrequest', ([], {'message': '"""No image to process. No value."""'}), "(message='No image to process. No value.')\n", (4261, 4303), False, 'import web\n'), ((4451, 4510), 'web.badrequest', 'web.badrequest', ([], {'message': '"""No image to process. No filename."""'}), "(message='No image to process. No filename.')\n", (4465, 4510), False, 'import web\n'), ((4936, 4985), 'web.badrequest', 'web.badrequest', ([], {'message': '"""No file-type specified."""'}), "(message='No file-type specified.')\n", (4950, 4985), False, 'import web\n'), ((5048, 5060), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5058, 5060), False, 'import uuid\n'), ((6806, 6870), 'web.badrequest', 'web.badrequest', ([], {'message': '"""This target language is not supported."""'}), "(message='This target language is not supported.')\n", (6820, 6870), False, 'import web\n'), ((9244, 9294), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Could not open image."""'}), "(message='Could not open image.')\n", (9261, 9294), False, 'import web\n'), ((9368, 9402), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['image'], {}), '(image)\n', (9395, 9402), False, 'import pytesseract\n'), ((9444, 9491), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['image', 'source_lang'], {}), '(image, source_lang)\n', (9471, 9491), False, 'import pytesseract\n'), ((9612, 9675), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Could not extract text from image."""'}), "(message='Could not extract text from image.')\n", (9629, 9675), False, 'import web\n'), ((11715, 11770), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Could not translate image."""'}), "(message='Could not translate image.')\n", (11732, 11770), False, 'import web\n'), ((4767, 4822), 'web.badrequest', 'web.badrequest', ([], {'message': '"""No support for this filetype."""'}), "(message='No support for this filetype.')\n", (4781, 4822), False, 'import web\n'), ((5386, 5445), 'web.internalerror', 'web.internalerror', ([], {'message': '"""Server: Could not store image."""'}), "(message='Server: Could not store image.')\n", (5403, 5445), False, 'import web\n')] |
#!/usr/bin/env python
# ---------------------------------------------------------------
# Filename: pullTraces.py
# ---------------------------------------------------------------
# Purpose: Open seed files from cwbQuery and pull traces stats
# from the data stream. If trace has sample rate = 0Hz
# remove that trace from the data stream
# ---------------------------------------------------------------
# Methods:
# analyzeRemove() - look for 0Hz traces and remove
# ---------------------------------------------------------------
import os, sys
import numpy as np
from obspy.core.stream import read
# Remove masked traces from stream
def removeMask(st):
for i in range(len(st)):
tracelen = len(st[i]) # num traces in stream
if tracelen == 1: # single trace stream (check for single mask)
tr = st[i][0] # tmp trace
if isinstance(tr.data, np.ma.masked_array):
tr.data = tr.data.filled(fill_value=0)
elif isinstance(tr.data, np.ndarray):
tr.data = tr.data
st[i][0] = tr # store new unmaksed traces
elif tracelen > 1: # mult trace stream (check for mult masks)
j = 0
#while j < list(range(st[i].count())):
while j < st[i].count():
if j == st[i].count():
break # index = (num traces)
tr = st[i][j] # tmp trace
if isinstance(tr.data, np.ma.masked_array):
tr.data = tr.data.filled(fill_value=0)
elif isinstance(tr.data, np.ndarray):
tr.data = tr.data
st[i][j] = tr # store new unmaksed traces
j = j + 1
return st
class PullTraces(object):
def analyzeRemove(self, seedpath):
# ---------------------------------------
# Read MSEED files from query and analyze
# ---------------------------------------
print("------pullTraces() Start------\n")
os.chdir(seedpath)
filelist = sorted(os.listdir(seedpath), key=os.path.getctime)
self.filelist = filelist
filelen = len(filelist)
stream = [0 for x in range(filelen)] # streams = [][] where the second entry denotes the trace index
i = filelen - 1
while i >= 0:
try:
stream[i] = read(filelist[i]) # read MSEED files from query
except Exception as e:
print("Exception pullTraces() (read(MSEED)): " + str(e))
sys.exit(0)
i = i - 1
# Check for masked arrays and 0 fill to create np.ndarray types
stream = removeMask(stream)
# Remove traces with sample rate = 0.0Hz => NFFT = 0
try:
print("Removing traces with 0.0Hz sampling rate from stream...")
streamlen = len(stream) # number of streams (ie stream files)
self.streamlen = streamlen
RM = False
i = 0 # reset indexing
print("streamlen = %s\n" % str(streamlen))
for i in range(streamlen):
tracelen = stream[i].count() # number of traces in stream
if tracelen == 1:
tr = stream[i][0] # tmp trace
if tr.stats['sampling_rate'] == 0.0:
stream[i].remove(tr)
elif tracelen > 1:
j = 0 # stream will change sizes when trace is removed
#while j < list(range(stream[i].count())):
while j < stream[i].count():
if j == stream[i].count():
break # index = num traces
tr = stream[i][j] # tmp trace
if tr.stats['sampling_rate'] == 0.0:
if not RM:
print("Removing empty traces:")
print(stream[i])
print()
RM = True
stream[i].remove(tr) # rm empty trace
j = 0 # reset index for new size
else:
j = j + 1 # mv to next element
if RM:
print("Final stream with removed traces:")
print(stream[i])
print()
RM = False
self.stream = stream # new stream with removed traces
print("-------pullTraces() Complete-------\n\n")
except KeyboardInterrupt:
print("KeyboardInterrupt pullTraces(): terminating analyzeRemove() method")
sys.exit(0)
print("Method pullTraces() is terminated!")
except Exception as e:
print("Exception pullTraces(): " + str(e))
sys.exit(0)
print("Method pullTraces() is terminated!")
| [
"os.chdir",
"os.listdir",
"sys.exit",
"obspy.core.stream.read"
] | [((1740, 1758), 'os.chdir', 'os.chdir', (['seedpath'], {}), '(seedpath)\n', (1748, 1758), False, 'import os, sys\n'), ((1779, 1799), 'os.listdir', 'os.listdir', (['seedpath'], {}), '(seedpath)\n', (1789, 1799), False, 'import os, sys\n'), ((2037, 2054), 'obspy.core.stream.read', 'read', (['filelist[i]'], {}), '(filelist[i])\n', (2041, 2054), False, 'from obspy.core.stream import read\n'), ((3743, 3754), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3751, 3754), False, 'import os, sys\n'), ((3876, 3887), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3884, 3887), False, 'import os, sys\n'), ((2176, 2187), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2184, 2187), False, 'import os, sys\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from .FastSubdirData import FastSubdirData
from conda.models.channel import Channel, prioritize_channels
from conda.core.index import calculate_channel_urls, check_whitelist #, get_index
from conda.models.records import PackageRecord
from conda.models.enums import PackageType
from conda.common.url import join_url
from conda.base.context import context
import threading
import json
import os
def load_channel(subdir_data, result_container):
if not context.quiet:
print("Getting ", subdir_data.channel.name, subdir_data.channel.platform)
return result_container.append(subdir_data.load())
def get_index(channel_urls=(), prepend=True, platform=None,
use_local=False, use_cache=False, unknown=None, prefix=None,
repodata_fn="repodata.json"):
real_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local)
check_whitelist(real_urls)
result = get_env_index(real_urls, repodata_fn)
return result
def get_env_index(channel_urls, repodata_fn="repodata.json"):
threads = []
result = []
sddata = [FastSubdirData(Channel(x), idx, repodata_fn) for idx, x in enumerate(channel_urls)]
for sd in sddata:
t = threading.Thread(target=load_channel, args=(sd, result))
threads.append(t)
t.start()
for t in threads:
t.join()
result = sorted(result, key=lambda x: x.channel_idx)
return result
def to_package_record_from_subjson(subdir, pkg, jsn_string):
channel = subdir.channel
channel_url = subdir.url_w_credentials
info = json.loads(jsn_string)
info['fn'] = pkg
info['channel'] = channel
info['url'] = join_url(channel_url, pkg)
package_record = PackageRecord(**info)
return package_record
def _make_virtual_package(name, version=None):
return PackageRecord(
package_type=PackageType.VIRTUAL_SYSTEM,
name=name,
version=version or '0',
build='0',
channel='@',
subdir=context.subdir,
md5="12345678901234567890123456789012",
build_number=0,
fn=name,
)
def _supplement_index_with_system(index):
cuda_version = context.cuda_version
if cuda_version is not None:
rec = _make_virtual_package('__cuda', cuda_version)
index.append(rec)
dist_name, dist_version = context.os_distribution_name_version
if dist_name == 'OSX':
dist_version = os.environ.get('CONDA_OVERRIDE_OSX', dist_version)
if len(dist_version) > 0:
rec = _make_virtual_package('__osx', dist_version)
index.append(rec)
libc_family, libc_version = context.libc_family_version
if libc_family and libc_version:
libc_version = os.getenv("CONDA_OVERRIDE_{}".format(libc_family.upper()), libc_version)
rec = _make_virtual_package('__' + libc_family, libc_version)
index.append(rec)
| [
"json.loads",
"os.environ.get",
"conda.models.channel.Channel",
"conda.core.index.check_whitelist",
"conda.core.index.calculate_channel_urls",
"threading.Thread",
"conda.common.url.join_url",
"conda.models.records.PackageRecord"
] | [((985, 1051), 'conda.core.index.calculate_channel_urls', 'calculate_channel_urls', (['channel_urls', 'prepend', 'platform', 'use_local'], {}), '(channel_urls, prepend, platform, use_local)\n', (1007, 1051), False, 'from conda.core.index import calculate_channel_urls, check_whitelist\n'), ((1056, 1082), 'conda.core.index.check_whitelist', 'check_whitelist', (['real_urls'], {}), '(real_urls)\n', (1071, 1082), False, 'from conda.core.index import calculate_channel_urls, check_whitelist\n'), ((1742, 1764), 'json.loads', 'json.loads', (['jsn_string'], {}), '(jsn_string)\n', (1752, 1764), False, 'import json\n'), ((1834, 1860), 'conda.common.url.join_url', 'join_url', (['channel_url', 'pkg'], {}), '(channel_url, pkg)\n', (1842, 1860), False, 'from conda.common.url import join_url\n'), ((1882, 1903), 'conda.models.records.PackageRecord', 'PackageRecord', ([], {}), '(**info)\n', (1895, 1903), False, 'from conda.models.records import PackageRecord\n'), ((1989, 2200), 'conda.models.records.PackageRecord', 'PackageRecord', ([], {'package_type': 'PackageType.VIRTUAL_SYSTEM', 'name': 'name', 'version': "(version or '0')", 'build': '"""0"""', 'channel': '"""@"""', 'subdir': 'context.subdir', 'md5': '"""12345678901234567890123456789012"""', 'build_number': '(0)', 'fn': 'name'}), "(package_type=PackageType.VIRTUAL_SYSTEM, name=name, version=\n version or '0', build='0', channel='@', subdir=context.subdir, md5=\n '12345678901234567890123456789012', build_number=0, fn=name)\n", (2002, 2200), False, 'from conda.models.records import PackageRecord\n'), ((1380, 1436), 'threading.Thread', 'threading.Thread', ([], {'target': 'load_channel', 'args': '(sd, result)'}), '(target=load_channel, args=(sd, result))\n', (1396, 1436), False, 'import threading\n'), ((2626, 2676), 'os.environ.get', 'os.environ.get', (['"""CONDA_OVERRIDE_OSX"""', 'dist_version'], {}), "('CONDA_OVERRIDE_OSX', dist_version)\n", (2640, 2676), False, 'import os\n'), ((1277, 1287), 'conda.models.channel.Channel', 'Channel', (['x'], {}), '(x)\n', (1284, 1287), False, 'from conda.models.channel import Channel, prioritize_channels\n')] |
import msal
from settings import settings
from office365.graph_client import GraphClient
def acquire_token_msal():
"""
Acquire token via MSAL
"""
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings['tenant'])
app = msal.ConfidentialClientApplication(
authority=authority_url,
client_id=settings['client_credentials']['client_id'],
client_credential=settings['client_credentials']['client_secret']
)
result = app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
return result
client = GraphClient(acquire_token_msal)
teams = client.teams.get_all().execute_query()
for team in teams:
print(team.id)
| [
"msal.ConfidentialClientApplication",
"office365.graph_client.GraphClient"
] | [((591, 622), 'office365.graph_client.GraphClient', 'GraphClient', (['acquire_token_msal'], {}), '(acquire_token_msal)\n', (602, 622), False, 'from office365.graph_client import GraphClient\n'), ((259, 450), 'msal.ConfidentialClientApplication', 'msal.ConfidentialClientApplication', ([], {'authority': 'authority_url', 'client_id': "settings['client_credentials']['client_id']", 'client_credential': "settings['client_credentials']['client_secret']"}), "(authority=authority_url, client_id=\n settings['client_credentials']['client_id'], client_credential=settings\n ['client_credentials']['client_secret'])\n", (293, 450), False, 'import msal\n')] |
from setuptools import setup, find_packages
setup(name='pywemo',
version='0.4.33',
description='Access WeMo switches using their SOAP API',
url='http://github.com/pavoni/pywemo',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=['netifaces>=0.10.0', 'requests>=2.0', 'six>=1.10.0'],
packages=find_packages(),
zip_safe=True)
| [
"setuptools.find_packages"
] | [((365, 380), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (378, 380), False, 'from setuptools import setup, find_packages\n')] |
from mparts.configspace import ConfigSpace
import hosts
# If set to True, do as few experiments as quickly as possible to test
# the setup. This is useful to do before the full benchmark suite
# because it will almost certainly uncover misconfigurations that
# could halt a lengthy full benchmark part way through.
sanityRun = True
# For an explanation of configuration spaces and a description of why
# we use '*' and '+' all over this file, see the module documentation
# for mparts.configspace. In short, * combines configuration options,
# while + specifies alternate configurations. Likewise, passing a
# list to mk creates a set of alternate configurations.
mk = ConfigSpace.mk
##################################################################
# Shared configuration
#
shared = ConfigSpace.unit()
# The primary host that will run the benchmark applications.
shared *= mk(primaryHost = hosts.primaryHost)
# benchRoot specifies the directory on the primary host where MOSBENCH
# was checked out or unpacked.
shared *= mk(benchRoot = "~/mosbench")
# textRoot specifies the directory on the primary host where the text
# to use for the Psearchy indexing benchmark can be found. To
# reproduce the results in the paper, this should be a pristine check
# out of Linux 2.6.35-rc5.
shared *= mk(textRoot = "~/scale-linux")
# kernelRoot specifies the directory on the primary host where the
# kernel source to use for the gmake benchmark can be found. To
# reproduce the results in the paper, this should be a check out of
# Linux 2.6.35-rc5. This can be the same directory used for textRoot
# above.
shared *= mk(kernelRoot = "~/scale-linux")
# fs specifies which type of file system to use. This can be any file
# system type known to mkmounts except hugetlbfs.
shared *= mk(fs = "tmpfs-separate")
# trials is the number of times to run each benchmark. The best
# result will be taken.
if sanityRun:
shared *= mk(trials = 1)
else:
shared *= mk(trials = 3)
# hotplug specifies whether or not to use CPU hotplug to physically
# disable cores not in use by the benchmark. All cores should be
# re-enabled when the benchmark exits, even after an error. Several
# of the benchmarks do not otherwise restrict which cores they use,
# and thus will give bogus results without this.
shared *= mk(hotplug = True)
# cores specifies the number of cores to use. This must be
# non-constant and must be the last variable in the shared
# configuration for the graphing tools to work (which also means it
# generally shouldn't be overridden per benchmark).
if sanityRun:
shared *= mk(cores = [48], nonConst = True)
else:
shared *= mk(cores = [1] + range(0, 49, 4)[1:], nonConst = True)
##################################################################
# Exim
#
# eximBuild - The build name of Exim to run. Corresponds to a
# subdirectory of the exim/ directory that contains an Exim
# installation.
#
# eximPort - The port Exim should listen on.
#
# clients - The number of client load generators to run.
import exim
exim = mk(benchmark = exim.runner, nonConst = True)
exim *= mk(eximBuild = "exim-mod")
exim *= mk(eximPort = 2526)
exim *= mk(clients = 96)
##################################################################
# memcached
#
# getMemcacheClients - A function that takes a destination host and a
# list of ports and returns a list of memcached.MemcachedHost objects
# to use as client load generators.
import memcached
memcached = mk(benchmark = memcached.runner, nonConst = True)
memcached *= mk(getMemcacheClients = hosts.getMemcacheClients)
##################################################################
# Apache
#
# threadsPerCore - The number of Apache threads to run per core.
#
# fileSize - The size of the file to serve, in bytes.
#
# getApacheClients - A function that, given the configuration, returns
# a list of Host objects that should be used for client load
# generators. The same host may be returned multiple times.
#
# getApacheRate - A function that, given the configuration, returns
# the number of connections that each load generator client should
# attempt per second.
#
# getApacheFDLim - A function that, given the configuration, returns
# the FD limit for each load generator client. This, in turn, limits
# the number of open connections each client can maintain at once.
import apache
apache = mk(benchmark = apache.runner, nonConst = True)
apache *= mk(threadsPerCore = 24)
apache *= mk(fileSize = 300)
apache *= mk(getApacheClients = hosts.getApacheClients)
apache *= mk(getApacheRate = lambda cfg: 100 + 400*cfg.cores)
apache *= mk(getApacheFDLim = lambda cfg: max(41 * cfg.cores / 20, 10))
##################################################################
# Postgres
#
# rows - The number of rows in the database.
#
# partitions - The number of tables to split the database across.
#
# batchSize - The number of queries each client should send to
# Postgres at a time. This causes the load generator to act like a
# connection pooler with query aggregation.
#
# randomWritePct - The percentage of queries that should be updates.
#
# sleep - The method Postgres uses to sleep when a lock is taken. Can
# be "sysv" for SysV semaphores or "posix" for POSIX semaphores (that
# is, futexes on Linux).
#
# semasPerSet - For sysv sleep, the number of semaphores per SysV
# semaphore set. In the kernel, each semaphore set is protected by
# one lock. Ignored for posix sleep.
#
# lwScale - Whether or not to use scalable lightweight locks
# (read/write mutexes) in Postgres.
#
# lockScale - Whether or not to use scalable database locks in
# Postgres. Enabling scalable database locks requires scalable
# lightweight locks.
#
# lockPartitions - The number of partitions for the database lock
# manager. Each partition is protected by an underlying lightweight
# lock. This must be a power of 2. The Postgres default is 1<<4.
#
# malloc - The malloc implementation to use in Postgres. Must be
# tcmalloc or glibc. For tcmalloc, you'll need to install the
# tcmalloc library.
#
# bufferCache - The size of the Postgres buffer cache, in megabytes.
import postgres
postgres = mk(benchmark = postgres.runner, nonConst = True)
postgres *= mk(postgresClient = hosts.postgresClient)
postgres *= mk(rows = 10000000)
postgres *= mk(partitions = 0)
postgres *= mk(batchSize = 256)
if sanityRun:
postgres *= mk(randomWritePct = [5])
else:
postgres *= mk(randomWritePct = [0, 5])
pgopt = (mk(sleep = "sysv") * mk(semasPerSet = 16) *
mk(lwScale = True) * mk(lockScale = True) *
mk(lockPartitions = 1<<10))
pgstock = (mk(sleep = "sysv") * mk(semasPerSet = 16) *
mk(lwScale = False) * mk(lockScale = False) *
mk(lockPartitions = 1<<4))
postgres *= pgopt + pgstock
postgres *= mk(malloc = "tcmalloc")
postgres *= mk(bufferCache = 2048)
##################################################################
# gmake
#
import gmake
gmake = mk(benchmark = gmake.runner, nonConst = True)
##################################################################
# psearchy
#
# mode - The mode to run mkdb in. Must be "thread" or "process".
#
# seq - The sequence to assign cores in. Must be "seq" for sequential
# assignment or "rr" for round-robin assignment.
#
# mem - How much memory to allocate to the hash table on each core, in
# megabytes.
#
# dblim - The maximum number of entries to store per Berkeley DB file.
# None for no limit.
import psearchy
psearchy = mk(benchmark = psearchy.runner, nonConst = True)
if sanityRun:
psearchy *= (mk(mode = ["thread"]) * mk(order = ["seq"]) +
mk(mode = ["process"]) * mk(order = ["rr"]))
else:
psearchy *= (mk(mode = ["thread"]) * mk(order = ["seq"]) +
mk(mode = ["process"]) * mk(order = ["seq", "rr"]))
psearchy *= mk(mem = 48)
psearchy *= mk(dblim = 200000)
##################################################################
# Metis
#
# streamflow - Whether or not to use the Streamflow parallel
# allocator.
#
# model - The memory allocation model to use. Either "default" to use
# 4K pages or "hugetlb" to 2M pages. "hugetlb" requires the
# Streamflow allocator.
#
# order - The sequence to assign cores in. "seq" or "rr".
import metis
metis = mk(benchmark = metis.runner, nonConst = True)
metis *= mk(streamflow = True)
metis *= mk(model = ["hugetlb", "default"])
metis *= mk(order = ["rr"])
##################################################################
# Complete configuration
#
# XXX Hmm. Constant analysis is space-global right now, so combining
# spaces for different benchmarks may give odd results.
# We compute the product of the benchmark configurations with the
# shared configuration instead of the other way around so that we will
# perform all configurations of a given benchmark before moving on to
# the next, even if the shared configuration space contains more than
# one configuration. Furthermore, instead of computing the regular
# product, we compute a "merge" product, where assignments from the
# left will override assignments to the same variables from the right.
configSpace = ((exim + memcached + apache + postgres + gmake + psearchy + metis)
.merge(shared))
#configSpace = exim.merge(shared)
#configSpace = memcached.merge(shared)
#configSpace = apache.merge(shared)
#configSpace = postgres.merge(shared)
#configSpace = gmake.merge(shared)
#configSpace = psearchy.merge(shared)
#configSpace = metis.merge(shared)
##################################################################
# Run
#
if __name__ == "__main__":
from mparts.manager import generateManagers
from mparts.rpc import print_remote_exception
import sys
sys.excepthook = print_remote_exception
for (m, cfg) in generateManagers("sanity" if sanityRun else "results", configSpace):
cfg.benchmark.run(m, cfg)
| [
"mparts.manager.generateManagers",
"mparts.configspace.ConfigSpace.unit"
] | [((794, 812), 'mparts.configspace.ConfigSpace.unit', 'ConfigSpace.unit', ([], {}), '()\n', (810, 812), False, 'from mparts.configspace import ConfigSpace\n'), ((9773, 9840), 'mparts.manager.generateManagers', 'generateManagers', (["('sanity' if sanityRun else 'results')", 'configSpace'], {}), "('sanity' if sanityRun else 'results', configSpace)\n", (9789, 9840), False, 'from mparts.manager import generateManagers\n')] |
from __future__ import division
from builtins import str
from past.utils import old_div
import requests
import os
import logging
import datetime
logger = logging.getLogger('main_logger')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isb_cgc.settings")
import django
django.setup()
from cohorts.metadata_helpers import get_sql_connection
from projects.models import Public_Data_Tables, Program
from google_helpers.bigquery.gcs_path_support import BigQueryGcsPathSupport, BigQuerySupport
from isb_cgc import settings
db = None
cursor = None
INDEXD_URI = settings.INDEXD_URI + "?ids="
LIMIT = settings.INDEXD_REQ_LIMIT
EXECUTION_PROJECT = "isb-cgc"
STORAGE_DATASET = "gcs_path_import_staging"
try:
db = get_sql_connection()
cursor = db.cursor()
program_paths = {}
query_base = """
SELECT file_gdc_id, case_barcode, sample_barcode, case_gdc_id, sample_gdc_id
FROM {metadata_data_table}
WHERE file_gdc_id IS NOT NULL AND NOT(file_gdc_id = '')
LIMIT {limit} OFFSET {offset};
"""
count_query_base_cloudsql = """
SELECT COUNT(*)
FROM (
SELECT file_gdc_id
FROM {data_table}
GROUP BY file_gdc_id
) uuids;
"""
count_query_base = """
#standardSQL
SELECT COUNT(*)
FROM (
SELECT file_gdc_id
FROM `{data_table}`
GROUP BY file_gdc_id
);
"""
missing_uuid_check = """
#standardSQL
SELECT curr.file_gdc_id AS curr_file_gdc_id, inc.file_gdc_id AS inc_file_gdc_id
FROM `{curr_data_table}` curr
LEFT JOIN `{inc_data_table}` inc
ON inc.file_gdc_id = curr.file_gdc_id
WHERE inc.file_gdc_id IS NULL OR inc.file_gdc_id = '';
"""
missing_path_check = """
#standardSQL
SELECT curr.file_gdc_id AS curr_file_gdc_id, inc.file_gdc_id AS inc_file_gdc_id, inc.file_gcs_path
FROM `{curr_data_table}` curr
LEFT JOIN `{inc_data_table}` inc
ON inc.file_gdc_id = curr.file_gdc_id
WHERE inc.file_gcs_path = '' OR inc.file_gcs_path IS NULL;
"""
program_tables = Public_Data_Tables.objects.filter(program__in=Program.get_public_programs())
for table in program_tables:
if table.program.name.lower() == 'ccle' and table.build.lower() == 'hg19':
data_table_bq = table.data_table.lower()
prog_build_table = "{}_metadata_data_{}_new_paths_{}".format(
table.program.name.lower(),
table.build.lower(),
datetime.datetime.now().strftime("%Y%m%d_%H%M")
)
gcs_support = BigQueryGcsPathSupport(
EXECUTION_PROJECT,
STORAGE_DATASET,
prog_build_table
)
result = gcs_support.add_temp_path_table()
if 'status' in result and result['status'] == 'TABLE_MADE':
logger.info("Table {} successfully made.".format(prog_build_table))
offset=0
files_found = True
logger.info("Fetching IndexD paths for {} Build {}...".format(table.program.name, table.build))
cursor.execute(count_query_base_cloudsql.format(data_table=table.data_table))
uuid_count = cursor.fetchall()[0][0]
expected_iter = (old_div(uuid_count,100) + (1 if uuid_count % 100 > 0 else 0))
logger.info("Expected calls: {}".format(str(expected_iter)))
iterations = 0
while files_found and iterations < expected_iter:
cursor.execute(query_base.format(limit=LIMIT,offset=offset,metadata_data_table=table.data_table))
files = cursor.fetchall()
files_found = len(files) > 0
if files_found:
indexd_req_string = ",".join([x[0] for x in files])
files_this_fetch = {
x[0]: {
'case_barcode': x[1],
'sample_barcode': x[2],
'case_gdc_id': x[3],
'sample_gdc_id':x[4],
'gcs_path': ''
} for x in files
}
try:
indexd_resp = requests.get(url=INDEXD_URI + indexd_req_string)
except Exception as e:
logger.warning("[WARNING] Exception during fetch - retrying...")
try:
indexd_resp = requests.get(url=INDEXD_URI + indexd_req_string)
except Exception as e:
logger.error("[ERROR] Unable to fetch this batch! Skipping...")
continue
reported = False
if 'records' in indexd_resp.json():
for record in indexd_resp.json()['records']:
for url in record['urls']:
if 'gs://' in url:
if record['did'] in files_this_fetch:
files_this_fetch[record['did']]['gcs_path'] = url
else:
logger.info("Unexpected record in response: {}".format(record['did']))
if not reported:
logger.info("Original request block: {}".format(indexd_req_string))
reported = True
offset += len(files)
# Insert rows
result = gcs_support.add_rows(files_this_fetch)
if 'insertErrors' in result:
logger.error("[ERROR] While inserting {} rows at offset {} into {}, saw insertion errors!".format(
str(len(files)), str(offset), prog_build_table
))
iterations += 1
# Compare tables via BQ
compare_count = BigQuerySupport.execute_query_and_fetch_results(count_query_base.format(
data_table="{}.{}.{}".format(EXECUTION_PROJECT, table.bq_dataset, data_table_bq))
)
main_table_count = int(compare_count[0]['f'][0]['v'])
compare_count = BigQuerySupport.execute_query_and_fetch_results(count_query_base.format(
data_table="{}.{}.{}".format(EXECUTION_PROJECT, STORAGE_DATASET, prog_build_table))
)
new_table_count = int(compare_count[0]['f'][0]['v'])
if main_table_count != new_table_count:
logger.warning("[WARNING] Possible missing paths: count mismatch for {} and {}".format(data_table_bq,prog_build_table))
logger.warning("[WARNING] Current table GDC file UUID count: {}".format(str(main_table_count)))
logger.warning("[WARNING] New table GDC file UUID count: {}".format(str(new_table_count)))
else:
logger.info("[STATUS] {} Build {} path fetch complete - counts match ({} vs. {})".format(
table.program.name,
table.build,
str(main_table_count),
str(new_table_count)
))
missing_uuid_table = "{}_metadata_data_{}_uuid_check_{}".format(
table.program.name.lower(),
table.build.lower(),
datetime.datetime.now().strftime("%Y%m%d_%H%M")
)
logger.info("Creating missing UUID table {}".format(missing_uuid_table))
check_for_uuids = BigQuerySupport.execute_query_to_table(
missing_uuid_check.format(
curr_data_table="{}.{}.{}".format(EXECUTION_PROJECT, table.bq_dataset, data_table_bq),
inc_data_table="{}.{}.{}".format(EXECUTION_PROJECT, STORAGE_DATASET, prog_build_table)
),
EXECUTION_PROJECT,
STORAGE_DATASET,
missing_uuid_table
)
missing_path_table = "{}_metadata_data_{}_path_check_{}".format(
table.program.name.lower(),
table.build.lower(),
datetime.datetime.now().strftime("%Y%m%d_%H%M")
)
logger.info("Creating missing path table {}".format(missing_path_table))
check_for_paths = BigQuerySupport.execute_query_to_table(
missing_path_check.format(
curr_data_table="{}.{}.{}".format(EXECUTION_PROJECT, table.bq_dataset, data_table_bq),
inc_data_table="{}.{}.{}".format(EXECUTION_PROJECT, STORAGE_DATASET, prog_build_table)
),
EXECUTION_PROJECT,
STORAGE_DATASET,
missing_path_table
)
except Exception as e:
logger.error("[ERROR] While updating GCS paths: ")
logger.exception(e)
finally:
if cursor: cursor.close
if db and db.open: db.close
| [
"logging.getLogger",
"os.environ.setdefault",
"django.setup",
"projects.models.Program.get_public_programs",
"builtins.str",
"past.utils.old_div",
"requests.get",
"datetime.datetime.now",
"cohorts.metadata_helpers.get_sql_connection",
"google_helpers.bigquery.gcs_path_support.BigQueryGcsPathSupport"
] | [((155, 187), 'logging.getLogger', 'logging.getLogger', (['"""main_logger"""'], {}), "('main_logger')\n", (172, 187), False, 'import logging\n'), ((189, 256), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""isb_cgc.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'isb_cgc.settings')\n", (210, 256), False, 'import os\n'), ((272, 286), 'django.setup', 'django.setup', ([], {}), '()\n', (284, 286), False, 'import django\n'), ((716, 736), 'cohorts.metadata_helpers.get_sql_connection', 'get_sql_connection', ([], {}), '()\n', (734, 736), False, 'from cohorts.metadata_helpers import get_sql_connection\n'), ((2186, 2215), 'projects.models.Program.get_public_programs', 'Program.get_public_programs', ([], {}), '()\n', (2213, 2215), False, 'from projects.models import Public_Data_Tables, Program\n'), ((2648, 2724), 'google_helpers.bigquery.gcs_path_support.BigQueryGcsPathSupport', 'BigQueryGcsPathSupport', (['EXECUTION_PROJECT', 'STORAGE_DATASET', 'prog_build_table'], {}), '(EXECUTION_PROJECT, STORAGE_DATASET, prog_build_table)\n', (2670, 2724), False, 'from google_helpers.bigquery.gcs_path_support import BigQueryGcsPathSupport, BigQuerySupport\n'), ((3332, 3356), 'past.utils.old_div', 'old_div', (['uuid_count', '(100)'], {}), '(uuid_count, 100)\n', (3339, 3356), False, 'from past.utils import old_div\n'), ((3447, 3465), 'builtins.str', 'str', (['expected_iter'], {}), '(expected_iter)\n', (3450, 3465), False, 'from builtins import str\n'), ((2559, 2582), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2580, 2582), False, 'import datetime\n'), ((4310, 4358), 'requests.get', 'requests.get', ([], {'url': '(INDEXD_URI + indexd_req_string)'}), '(url=INDEXD_URI + indexd_req_string)\n', (4322, 4358), False, 'import requests\n'), ((6933, 6954), 'builtins.str', 'str', (['main_table_count'], {}), '(main_table_count)\n', (6936, 6954), False, 'from builtins import str\n'), ((7041, 7061), 'builtins.str', 'str', (['new_table_count'], {}), '(new_table_count)\n', (7044, 7061), False, 'from builtins import str\n'), ((7281, 7302), 'builtins.str', 'str', (['main_table_count'], {}), '(main_table_count)\n', (7284, 7302), False, 'from builtins import str\n'), ((7324, 7344), 'builtins.str', 'str', (['new_table_count'], {}), '(new_table_count)\n', (7327, 7344), False, 'from builtins import str\n'), ((7539, 7562), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7560, 7562), False, 'import datetime\n'), ((8325, 8348), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8346, 8348), False, 'import datetime\n'), ((4562, 4610), 'requests.get', 'requests.get', ([], {'url': '(INDEXD_URI + indexd_req_string)'}), '(url=INDEXD_URI + indexd_req_string)\n', (4574, 4610), False, 'import requests\n'), ((5965, 5976), 'builtins.str', 'str', (['offset'], {}), '(offset)\n', (5968, 5976), False, 'from builtins import str\n')] |
# /setup.py file
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# dependencies
import logging
import re
import threading
import asyncio
from time import sleep
from aiogram import Bot, Dispatcher, executor, types
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.utils.executor import start_webhook
from services import quiz_service, user_service, session_service, subject_service
from config import Config
from localization.localization import Localization, Data
from utils import calc_results, ReferralStates, UserNameStates, TeacherStatStates, SynopsesStates, SessionStates, \
SubjectsStates
logging.basicConfig(level=logging.INFO)
# bot initialization
bot = Bot(token=Config.TOKEN, parse_mode="HTML")
dp = Dispatcher(bot, storage=MemoryStorage())
dp.middleware.setup(LoggingMiddleware())
user_s = user_service.UserService()
quiz_s = quiz_service.QuizService()
session_s = session_service.SessionService()
subject_s = subject_service.SubjectService()
local = Localization()
time_between_questions = 0.75
# MARK: Send students like button
async def send_students_like_button(message, state):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
students = user_s.get_teacher_students(telegram_id)
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
poll_keyboard.add(local.data[Data.CANCEL_BUTTON][language])
language = user_s.get_language(telegram_id=telegram_id)
for index in range(len(students)):
student_id = students[index]
name = user_s.get_name(student_id)
poll_keyboard.add(str(index) + ") " + name)
await message.answer(local.data[state][language], reply_markup=poll_keyboard)
# MARK: Send student stats
async def send_student_stats(message, telegram_id, stats, language, index=None):
name = user_s.get_name(telegram_id)
args = [name, stats["correct_all_time"], stats["correct_all_time"] + stats["incorrect_all_time"],
stats["Last_7_days_correct_question_number:"], stats["Last_7_days_question_number:"],
stats["Profile_creation_date"]]
if index is not None:
base_stats = f"№ {index}\n" + local.data[Data.STUDENT_STATS_MESSAGE][language].format(*args)
else:
base_stats = local.data[Data.STUDENT_STATS_MESSAGE][language].format(*args)
other_stats = [local.data[Data.SUBJECT_INFO_FORMAT_MESSAGE][language]]
for subject in local.subjects:
subject_name = Config.DATA_SUBJECT_NAME[subject]
if subject_name in stats:
other_stats.append(f"{local.data[subject][language]} / {stats[subject_name]} / "
f"{stats[subject_name + '_correct']}")
await message.answer(base_stats + "\n".join(other_stats))
# MARK: Send quiz
async def send_quiz(quiz, telegram_id, is_poll, quizzes_number):
if quiz_s.is_in_quiz_set(quiz.quiz_id):
if user_s.is_send_text(telegram_id):
msg = await bot.send_message(chat_id=telegram_id, text=quiz_s.get_quiz_set_text(quiz.quiz_id))
quiz_number = len(user_s.quiz_results[telegram_id]) + 1
if quiz.is_image:
with open(f"data/images/{Config.DATA_SUBJECT_NAME[quiz.topic]}/{quiz.question}.png", 'rb') as f:
photo = f
await bot.send_photo(chat_id=telegram_id, photo=photo)
options = local.options[:int(quiz.options[0])]
correct_option_ids = quiz.correct_option_ids
if is_poll:
allows_multiple_answers = len(options) > 5
msg = await bot.send_poll(chat_id=telegram_id, question=f"[{quiz_number}:{quizzes_number}]",
is_anonymous=False, options=options,
allows_multiple_answers=allows_multiple_answers)
else:
msg = await bot.send_poll(chat_id=telegram_id, question=f"[{quiz_number}:{quizzes_number}]",
is_anonymous=False, options=options, type="quiz",
correct_option_id=correct_option_ids[0])
else:
options, correct_option_ids = quiz_s.shuffle_options(options=quiz.options,
correct_option_ids=quiz.correct_option_ids)
is_options_correct = True
for option in options:
if len(option) > 100:
is_options_correct = False
if len(quiz.question) <= 300 and is_options_correct:
if is_poll:
allows_multiple_answers = len(options) > 5
msg = await bot.send_poll(chat_id=telegram_id,
question=f"[{quiz_number}:{quizzes_number}]\n" + quiz.question,
is_anonymous=False, options=options,
allows_multiple_answers=allows_multiple_answers)
else:
msg = await bot.send_poll(chat_id=telegram_id,
question=f"[{quiz_number}:{quizzes_number}]\n" + quiz.question,
is_anonymous=False, options=options, type="quiz",
correct_option_id=correct_option_ids[0])
else:
text = f"[{quiz_number}:{quizzes_number}]\n" + quiz.question + "\n"
options2 = local.options[:len(options)]
for index in range(len(options)):
option = options[index]
text += options2[index] + " " + option
if text[-1] != "\n":
text += "\n"
await bot.send_message(chat_id=telegram_id, text=text)
if is_poll:
allows_multiple_answers = len(options) > 5
msg = await bot.send_poll(chat_id=telegram_id, question=" ",
is_anonymous=False, options=options2,
allows_multiple_answers=allows_multiple_answers)
else:
msg = await bot.send_poll(chat_id=telegram_id, question=" ",
is_anonymous=False, options=options2, type="quiz",
correct_option_id=correct_option_ids[0])
sleep(time_between_questions)
quiz_s.set_correct_option_ids(quiz_id=msg.poll.id, option_ids=correct_option_ids)
quiz_s.connect_ids(new_id=msg.poll.id, old_id=quiz.quiz_id)
def get_poll_keyboard(buttons):
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for index in range(0, len(buttons), 2):
if index != len(buttons) - 1:
poll_keyboard.row(buttons[index], buttons[index + 1])
else:
poll_keyboard.add(buttons[index])
return poll_keyboard
# MARK: Send message and buttons
async def send_message_and_buttons(message, buttons, state, args=None):
if args is None:
args = []
if await go_to_start(message):
return
if type(message) is types.Message:
telegram_id = message.from_user.id
else:
telegram_id = message.user.id
language = user_s.get_language(telegram_id=telegram_id)
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for index in range(0, len(buttons), 2):
button = types.KeyboardButton(local.data[buttons[index]][language])
if index != len(buttons) - 1:
button2 = types.KeyboardButton(local.data[buttons[index + 1]][language])
poll_keyboard.row(button, button2)
else:
poll_keyboard.add(button)
if type(message) is types.Message:
await message.answer(local.data[state][language].format(*args), reply_markup=poll_keyboard)
else:
await bot.send_message(chat_id=telegram_id, text=local.data[state][language].format(*args),
reply_markup=poll_keyboard)
# MARK: Send message if user not registered
async def go_to_start(message):
if type(message) is types.Message:
telegram_id = message.from_user.id
else:
telegram_id = message.user.id
if user_s.user_exists(telegram_id=telegram_id):
return False
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for language in local.languages:
if type(message) is types.Message:
await message.answer(local.data[Data.NOT_REGISTERED_MESSAGE][language], reply_markup=poll_keyboard)
else:
await bot.send_message(chat_id=telegram_id, text=local.data[Data.NOT_REGISTERED_MESSAGE][language],
reply_markup=poll_keyboard)
return True
# MARK: Start state
@dp.message_handler(commands=["start"])
async def start(message: types.Message):
telegram_id = message.from_user.id
is_new_user = await user_s.post_user(telegram_id=telegram_id)
language = user_s.get_language(telegram_id=telegram_id)
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
poll_keyboard.add(types.KeyboardButton(text=local.data[Data.MAIN_MENU_BUTTON][language]))
if is_new_user:
for language in local.languages:
if local.languages[-1] != language:
await message.answer(text=local.data[Data.NEW_USERS_WELCOME_MESSAGE][language])
else:
await message.answer(text=local.data[Data.NEW_USERS_WELCOME_MESSAGE][language],
reply_markup=poll_keyboard)
else:
await message.answer(text=local.data[Data.WELCOME_MESSAGE][language], reply_markup=poll_keyboard)
# MARK: Set language cmd state
@dp.message_handler(lambda message: local.check_text([Data.LANGUAGE_BUTTON], message.text)[0] or
message.text == "/language")
async def set_language_cmd(message: types.Message):
if await go_to_start(message):
return
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for language in local.languages:
poll_keyboard.add(types.KeyboardButton(text=local.data[language][language]))
if local.languages[-1] != language:
await message.answer(text=local.data[Data.SET_LANGUAGE_MESSAGE][language])
else:
await message.answer(text=local.data[Data.SET_LANGUAGE_MESSAGE][language], reply_markup=poll_keyboard)
# MARK: Set language result state
@dp.message_handler(lambda message: local.check_text(local.languages, message.text)[0])
async def set_language_result(message: types.Message):
if await go_to_start(message):
return
telegram_id = message.from_user.id
language = local.check_text(local.languages, message.text)[1]
await user_s.set_language(telegram_id=telegram_id, selected_language=language)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message=message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Set user state cmd
@dp.message_handler(lambda message: local.check_text([Data.USER_STATE_BUTTON], text=message.text)[0] or
message.text == "/user_state")
async def set_user_state_cmd(message: types.Message):
await send_message_and_buttons(message=message, buttons=local.user_state_buttons, state=Data.SET_USER_STATE_MESSAGE)
# MARK: Set user state result
@dp.message_handler(lambda message: local.check_text(local.user_state_buttons, message.text)[0])
async def set_user_state_result(message: types.Message):
if await go_to_start(message):
return
telegram_id = message.from_user.id
user_state = local.check_text(local.user_state_buttons, message.text)[1]
await user_s.set_user_state(telegram_id=telegram_id, user_state=user_state)
language = user_s.get_language(telegram_id)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message=message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Cancel set profile
@dp.message_handler(lambda message: local.check_text([Data.CANCEL_BUTTON], message.text)[0],
state=SubjectsStates.all_states + UserNameStates.all_states)
async def cancel_set_profile(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
await state.finish()
language = user_s.get_language(telegram_id)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message=message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Set user name result
@dp.message_handler(state=UserNameStates.USER_NAME_STATE_0)
async def set_user_name_result(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
await state.finish()
await user_s.set_user_name(telegram_id=message.from_user.id, name=message.text)
language = user_s.get_language(telegram_id)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message=message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Set user name
@dp.message_handler(lambda message: local.check_text([Data.USER_NAME_BUTTON], message.text)[0])
async def set_user_name(message: types.Message):
if await go_to_start(message):
return
telegram_id = message.from_user.id
state = dp.current_state(user=telegram_id)
await state.set_state(UserNameStates.USER_NAME_STATE_0)
await send_message_and_buttons(message=message, buttons=[Data.CANCEL_BUTTON], state=Data.INPUT_NAME_MESSAGE)
# MARK: Start ent
@dp.message_handler(lambda message: local.check_text([Data.ENT_BUTTON], message.text)[0])
async def start_ent(message: types.Message):
telegram_id = message.from_user.id
subject_1, subject_2 = user_s.get_student_subjects(telegram_id)
user_s.user_start_new_quiz(telegram_id)
user_s.set_quiz_type(telegram_id, True)
quiz_sets = quiz_s.get_quiz_sets(topic=Data.READING_LITERACY_RUS, quiz_number=15)
user_s.set_quiz_sets(telegram_id=telegram_id, quiz_sets=quiz_sets)
quiz_ids_reading = quiz_s.quiz_set_to_quiz_ids(quiz_sets, 20)
quiz_ids_history = quiz_s.get_specified_number_of_quizzes_by_topic(topic_name=Data.KAZ_HISTORY_RUS, number=15)
quiz_ids_math = quiz_s.get_specified_number_of_quizzes_by_topic(topic_name=Data.MATH_LITERACY_RUS, number=15)
quiz_ids_subject_1 = quiz_s.get_specified_number_of_quizzes_by_topic(topic_name=subject_1, number=20)
quiz_ids_subject_2 = quiz_s.get_specified_number_of_quizzes_by_topic(topic_name=subject_2, number=20)
print(quiz_ids_math)
print(len(quiz_ids_math))
quiz_ids = quiz_ids_history + quiz_ids_math + quiz_ids_reading + quiz_ids_subject_1 + \
quiz_ids_subject_2
quizzes_number = len(quiz_ids)
user_s.set_quiz_size(telegram_id, quizzes_number)
user_s.set_quiz_ids_for_user(quiz_ids=quiz_ids, telegram_id=telegram_id)
session_s.create_session(telegram_id=telegram_id, quiz_ids=quiz_ids, topic_name=Data.ENT)
await send_message_and_buttons(message=message, buttons=[Data.CANCEL_BUTTON],
state=Data.START_SESSION_MESSAGE, args=[message.text, quizzes_number])
await send_quiz(telegram_id=telegram_id, quiz=quiz_s.get_quiz_from_id(
quiz_id=user_s.get_quiz_ids_for_user(telegram_id=message.from_user.id)), is_poll=True,
quizzes_number=quizzes_number)
# MARK: Student main menu state
@dp.message_handler(lambda message: local.check_text([Data.MAIN_MENU_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def student_main_menu(message: types.Message):
await send_message_and_buttons(message=message, buttons=local.student_main_menu_buttons,
state=Data.IN_MAIN_MENU_MESSAGE)
# MARK: Student synopses return to main menu
@dp.message_handler(lambda message: local.check_text([Data.MAIN_MENU_BUTTON], message.text)[0],
state=SynopsesStates.SYNOPSES_STATE_0)
async def student_synopses_return_to_main_menu(message: types.Message, state: FSMContext):
await state.finish()
await send_message_and_buttons(message=message, buttons=local.student_main_menu_buttons,
state=Data.IN_MAIN_MENU_MESSAGE)
# MARK: Student synopses subtopic text
@dp.message_handler(lambda message: subject_s.is_subtopic_name(message.text) and user_s.is_student(telegram_id=
message.from_user.id))
async def student_synopses_subtopic_text(message: types.Message):
telegram_id = message.from_user.id
text = subject_s.get_subtopic_text(message.text)
print(text)
await bot.send_message(chat_id=telegram_id, text=text)
await send_message_and_buttons(message=message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.MAIN_MENU_MESSAGE)
# MARK: Student synopses choose subject
@dp.message_handler(lambda message: local.check_text(local.synopses_subjects, message.text)[0],
state=SynopsesStates.SYNOPSES_STATE_0)
async def student_synopses_choose_subject(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
await state.finish()
subtopics = subject_s.get_subject_topics(topic_name=local.check_text(local.synopses_subjects, message.text)[1])
poll_keyboard = get_poll_keyboard(subtopics)
poll_keyboard.add(local.data[Data.MAIN_MENU_BUTTON][language])
await message.answer(local.data[Data.CHOOSE_SUBTOPIC_MESSAGE][language], reply_markup=poll_keyboard)
# MARK: Student synopses state
@dp.message_handler(lambda message: local.check_text([Data.SYNOPSES_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def student_synopses(message: types.Message, state: FSMContext):
await state.set_state(SynopsesStates.SYNOPSES_STATE_0)
await send_message_and_buttons(message=message, buttons=local.synopses_subjects, state=Data.CHOOSE_TOPIC_MESSAGE)
# MARK: Choose quiz topic state
@dp.message_handler(lambda message: local.check_text([Data.START_QUIZ_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def choose_quiz_topic(message: types.Message, state: FSMContext):
await state.set_state(SessionStates.SESSION_STATE_0)
await send_message_and_buttons(message=message, buttons=local.subjects, state=Data.CHOOSE_TOPIC_MESSAGE)
# MARK: Start session
@dp.message_handler(lambda message: local.check_text(local.subjects, message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id),
state=SessionStates.SESSION_STATE_0)
async def start_new_session(message: types.Message, state: FSMContext):
quizzes_number = 20
await state.finish()
telegram_id = message.from_user.id
topic_name = local.check_text(local.subjects, message.text)[1]
user_s.set_quiz_size(telegram_id, quizzes_number)
user_s.user_start_new_quiz(message.from_user.id)
user_s.set_quiz_type(telegram_id, False)
quiz_ids = quiz_s.get_specified_number_of_quizzes_by_topic(topic_name=topic_name, number=quizzes_number)
user_s.set_quiz_ids_for_user(quiz_ids=quiz_ids, telegram_id=telegram_id)
session_s.create_session(telegram_id=telegram_id, quiz_ids=quiz_ids, topic_name=topic_name)
await send_message_and_buttons(message=message, buttons=[Data.CANCEL_BUTTON],
state=Data.START_SESSION_MESSAGE, args=[message.text, quizzes_number])
await send_quiz(telegram_id=telegram_id, quiz=quiz_s.get_quiz_from_id(
quiz_id=user_s.get_quiz_ids_for_user(telegram_id=message.from_user.id)), is_poll=False,
quizzes_number=quizzes_number)
# MARK: Cancel session
@dp.message_handler(lambda message: local.check_text([Data.CANCEL_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def cancel_session(message: types.Message):
telegram_id = message.from_user.id
results = user_s.get_quiz_results(telegram_id)
await session_s.post_session(telegram_id=telegram_id, results=results)
await send_message_and_buttons(message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.RESULTS_MESSAGE,
args=calc_results(results=results))
# MARK: Cancel input referral
@dp.message_handler(lambda message: local.check_text([Data.CANCEL_BUTTON], text=message.text)[0],
state=ReferralStates.REFERRAL_STATE_0)
async def cancel_input_referral(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
await state.finish()
await send_message_and_buttons(message=message, buttons=local.student_main_menu_buttons,
state=Data.IN_MAIN_MENU_MESSAGE)
# MARK: Input referral
@dp.message_handler(state=ReferralStates.REFERRAL_STATE_0)
async def input_referral(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
result = await user_s.set_student_to_teacher(telegram_id=telegram_id, referral=message.text)
if result:
await state.finish()
await send_message_and_buttons(message=message, buttons=local.student_main_menu_buttons,
state=Data.TEACHER_ADD_SUCCESS_MESSAGE)
else:
await send_message_and_buttons(message, buttons=[Data.CANCEL_BUTTON], state=Data.TEACHER_ADD_UN_SUCCESS_MESSAGE)
# MARK: Add student to teacher
@dp.message_handler(lambda message: local.check_text([Data.ADD_TEACHER_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def add_student_to_teacher_root(message: types.Message):
telegram_id = message.from_user.id
state = dp.current_state(user=telegram_id)
await state.set_state(ReferralStates.REFERRAL_STATE_0)
await send_message_and_buttons(message=message, buttons=[Data.CANCEL_BUTTON], state=Data.INPUT_REFERRAL_MESSAGE)
# MARK: Payment
@dp.message_handler(lambda message: local.check_text([Data.PAYMENT_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def student_payment(message: types.Message):
await send_message_and_buttons(message=message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.STUDENT_PAYMENT_MESSAGE)
# MARK: Student stats
@dp.message_handler(lambda message: local.check_text([Data.STUDENT_STATS_BUTTON], message.text)[0] and
user_s.is_student(telegram_id=message.from_user.id))
async def student_stats(message: types.Message):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
stats = await user_s.get_student_stats(telegram_id)
await send_student_stats(message=message, telegram_id=telegram_id, stats=stats, language=language)
# MARK: Answer handler
@dp.poll_answer_handler()
async def handle_poll_answer(quiz_answer: types.PollAnswer):
telegram_id = quiz_answer.user.id
if not session_s.have_active_session(telegram_id):
return
quiz_id = quiz_answer.poll_id
is_quiz_end = user_s.user_make_answer_for_quiz(telegram_id=telegram_id,
is_option_correct=quiz_s.is_option_correct(
quiz_id=quiz_id,
option=quiz_answer.option_ids[0])
, number=user_s.get_quiz_size(telegram_id))
if is_quiz_end:
results = user_s.get_quiz_results(telegram_id)
await session_s.post_session(telegram_id=telegram_id, results=results)
await send_message_and_buttons(quiz_answer, buttons=[Data.MAIN_MENU_BUTTON], state=Data.RESULTS_MESSAGE,
args=calc_results(results=results))
else:
await send_quiz(telegram_id=telegram_id,
quiz=quiz_s.get_quiz_from_id(quiz_id=user_s.get_quiz_ids_for_user(telegram_id=telegram_id)),
is_poll=user_s.get_quiz_type(telegram_id), quizzes_number=user_s.get_quiz_size(telegram_id))
# MARK: Teacher Menu
@dp.message_handler(lambda message: local.check_text([Data.MAIN_MENU_BUTTON], message.text)[0] and
user_s.is_teacher(telegram_id=message.from_user.id))
async def teacher_main_menu(message: types.Message):
await send_message_and_buttons(message, buttons=local.teacher_main_menu_buttons, state=Data.IN_MAIN_MENU_MESSAGE)
# MARK: Teacher payment
@dp.message_handler(lambda message: local.check_text([Data.PAYMENT_BUTTON], message.text)[0] and
user_s.is_teacher(telegram_id=message.from_user.id))
async def teacher_payment(message: types.Message):
await send_message_and_buttons(message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.TEACHER_PAYMENT_MESSAGE)
# MARK: Teacher stats
@dp.message_handler(lambda message: local.check_text([Data.TEACHER_STATS_BUTTON], message.text)[0] and
user_s.is_teacher(telegram_id=message.from_user.id))
async def teacher_stats(message: types.Message):
telegram_id = message.from_user.id
students = user_s.get_teacher_students(telegram_id)
language = user_s.get_language(telegram_id)
for index in range(len(students)):
student_id = students[index]
stats = await user_s.get_student_stats(student_id)
await send_student_stats(message=message, telegram_id=student_id, stats=stats, language=language, index=index)
await send_message_and_buttons(message, buttons=[Data.MAIN_MENU_BUTTON, Data.DELETE_STUDENT_FROM_TEACHER_BUTTON],
state=Data.MAIN_MENU_MESSAGE)
# MARK: Delete student from teacher
@dp.message_handler(lambda message: local.check_text([Data.DELETE_STUDENT_FROM_TEACHER_BUTTON], message.text)[0] and
user_s.is_teacher(telegram_id=message.from_user.id))
async def delete_student_from_teacher(message: types.Message, state: FSMContext):
await send_students_like_button(message, state=Data.DELETE_STUDENT_FROM_TEACHER_MESSAGE)
await state.set_state(TeacherStatStates.TEACHER_STAT_STATE_0)
# MARK: Delete student from teacher cancel
@dp.message_handler(lambda message: local.check_text([Data.CANCEL_BUTTON], text=message.text)[0],
state=TeacherStatStates.TEACHER_STAT_STATE_0)
async def delete_student_from_teacher_cancel(message: types.Message, state: FSMContext):
await state.finish()
await send_message_and_buttons(message, buttons=local.teacher_main_menu_buttons,
state=Data.MAIN_MENU_MESSAGE)
# MARK: Delete student from teacher result
@dp.message_handler(state=TeacherStatStates.TEACHER_STAT_STATE_0)
async def delete_student_from_teacher_result(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
nums = re.findall(r"\d+", message.text)
student_pos = -1
if len(nums) > 0:
student_pos = nums[0]
if student_pos != -1:
is_correct = await user_s.delete_student_from_teacher(teacher_id=telegram_id, student_pos=student_pos)
if is_correct:
await state.finish()
await send_message_and_buttons(message, buttons=local.teacher_main_menu_buttons,
state=Data.TEACHER_DELETE_STUDENT_SUCCESS_MESSAGE)
return
await send_students_like_button(message, state=Data.TEACHER_DELETE_STUDENT_UN_SUCCESS_MESSAGE)
# MARK: About us
@dp.message_handler(lambda message: local.check_text([Data.ABOUT_US_BUTTON], text=message.text)[0])
async def about_us(message: types.Message):
await send_message_and_buttons(message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.ABOUT_US_MESSAGE)
# MARK: Teacher referrals
@dp.message_handler(lambda message: local.check_text([Data.TEACHER_REFERRAL_BUTTON], message.text)[0] and
user_s.is_teacher(telegram_id=message.from_user.id))
async def teacher_referrals(message: types.Message):
telegram_id = message.from_user.id
referral = user_s.get_teacher_referral(telegram_id)
await send_message_and_buttons(message, buttons=[Data.MAIN_MENU_BUTTON], state=Data.TEACHER_REFERRAL_MESSAGE)
await message.answer(referral)
# MARK: Subject set state
@dp.message_handler(state=SubjectsStates.all_states)
async def subject_set_state(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
current_state = await state.get_state()
if current_state == SubjectsStates.SUBJECTS_STATE_0.state:
await state.set_state(SubjectsStates.SUBJECTS_STATE_1)
subject = local.check_text(local.subjects, message.text)[1]
await user_s.set_student_subjects(telegram_id, 1, subject)
await send_message_and_buttons(message, buttons=local.subjects + [Data.CANCEL_BUTTON],
state=Data.CHOOSE_TOPIC_MESSAGE)
else:
await state.finish()
subject = local.check_text(local.subjects, message.text)[1]
await user_s.set_student_subjects(telegram_id, 2, subject)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Subject profile
@dp.message_handler(lambda message: local.check_text([Data.SUBJECT_BUTTON], message.text)[0])
async def subject_profile(message: types.Message, state: FSMContext):
telegram_id = message.from_user.id
await state.set_state(SubjectsStates.SUBJECTS_STATE_0)
await send_message_and_buttons(message, buttons=local.subjects + [Data.CANCEL_BUTTON],
state=Data.CHOOSE_TOPIC_MESSAGE)
# MARK: Profile
@dp.message_handler(lambda message: local.check_text([Data.PROFILE_BUTTON], text=message.text)[0] or
message.text == "/profile")
async def profile(message: types.Message):
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id)
profile_info = user_s.get_user_profile(telegram_id=telegram_id, user_language=language)
await send_message_and_buttons(message, buttons=local.profile, state=Data.PROFILE_MESSAGE,
args=[profile_info])
# MARK: Default response
@dp.message_handler()
async def default_response(message: types.Message):
if await go_to_start(message):
return
telegram_id = message.from_user.id
language = user_s.get_language(telegram_id=telegram_id)
poll_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
poll_keyboard.add(local.data[Data.MAIN_MENU_BUTTON][language])
await message.answer(local.data[Data.MAIN_MENU_MESSAGE][language], reply_markup=poll_keyboard)
def load_db():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(user_s.get_users())
loop.run_until_complete(quiz_s.load_quizzes())
loop.run_until_complete(subject_s.load_subjects())
async def on_startup(dp):
await bot.set_webhook(Config.WEBHOOK_URL, drop_pending_updates=True)
logging.warning(
'Starting connection. ')
def main():
x = threading.Thread(target=load_db)
x.start()
start_webhook(
dispatcher=dp,
webhook_path=Config.WEBHOOK_PATH,
skip_updates=True,
on_startup=on_startup,
host=Config.WEBAPP_HOST,
port=Config.WEBAPP_PORT,
)
if __name__ == "__main__":
load_db()
executor.start_polling(dp, skip_updates=True)
| [
"time.sleep",
"aiogram.Bot",
"aiogram.contrib.fsm_storage.memory.MemoryStorage",
"asyncio.new_event_loop",
"aiogram.types.ReplyKeyboardMarkup",
"aiogram.executor.start_polling",
"aiogram.contrib.middlewares.logging.LoggingMiddleware",
"services.user_service.UserService",
"aiogram.utils.executor.start_webhook",
"logging.warning",
"services.session_service.SessionService",
"services.subject_service.SubjectService",
"re.findall",
"logging.basicConfig",
"utils.calc_results",
"aiogram.types.KeyboardButton",
"services.quiz_service.QuizService",
"localization.localization.Localization",
"threading.Thread",
"asyncio.set_event_loop"
] | [((733, 772), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (752, 772), False, 'import logging\n'), ((800, 842), 'aiogram.Bot', 'Bot', ([], {'token': 'Config.TOKEN', 'parse_mode': '"""HTML"""'}), "(token=Config.TOKEN, parse_mode='HTML')\n", (803, 842), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((939, 965), 'services.user_service.UserService', 'user_service.UserService', ([], {}), '()\n', (963, 965), False, 'from services import quiz_service, user_service, session_service, subject_service\n'), ((975, 1001), 'services.quiz_service.QuizService', 'quiz_service.QuizService', ([], {}), '()\n', (999, 1001), False, 'from services import quiz_service, user_service, session_service, subject_service\n'), ((1014, 1046), 'services.session_service.SessionService', 'session_service.SessionService', ([], {}), '()\n', (1044, 1046), False, 'from services import quiz_service, user_service, session_service, subject_service\n'), ((1059, 1091), 'services.subject_service.SubjectService', 'subject_service.SubjectService', ([], {}), '()\n', (1089, 1091), False, 'from services import quiz_service, user_service, session_service, subject_service\n'), ((1100, 1114), 'localization.localization.Localization', 'Localization', ([], {}), '()\n', (1112, 1114), False, 'from localization.localization import Localization, Data\n'), ((909, 928), 'aiogram.contrib.middlewares.logging.LoggingMiddleware', 'LoggingMiddleware', ([], {}), '()\n', (926, 928), False, 'from aiogram.contrib.middlewares.logging import LoggingMiddleware\n'), ((1398, 1445), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (1423, 1445), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((6374, 6403), 'time.sleep', 'sleep', (['time_between_questions'], {}), '(time_between_questions)\n', (6379, 6403), False, 'from time import sleep\n'), ((6608, 6655), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (6633, 6655), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((7296, 7343), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (7321, 7343), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((8296, 8343), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (8321, 8343), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((9030, 9077), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (9055, 9077), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((9990, 10037), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (10015, 10037), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((27684, 27716), 're.findall', 're.findall', (['"""\\\\d+"""', 'message.text'], {}), "('\\\\d+', message.text)\n", (27694, 27716), False, 'import re\n'), ((31498, 31545), 'aiogram.types.ReplyKeyboardMarkup', 'types.ReplyKeyboardMarkup', ([], {'resize_keyboard': '(True)'}), '(resize_keyboard=True)\n', (31523, 31545), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((31740, 31764), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (31762, 31764), False, 'import asyncio\n'), ((31769, 31797), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (31791, 31797), False, 'import asyncio\n'), ((32057, 32097), 'logging.warning', 'logging.warning', (['"""Starting connection. """'], {}), "('Starting connection. ')\n", (32072, 32097), False, 'import logging\n'), ((32129, 32161), 'threading.Thread', 'threading.Thread', ([], {'target': 'load_db'}), '(target=load_db)\n', (32145, 32161), False, 'import threading\n'), ((32180, 32344), 'aiogram.utils.executor.start_webhook', 'start_webhook', ([], {'dispatcher': 'dp', 'webhook_path': 'Config.WEBHOOK_PATH', 'skip_updates': '(True)', 'on_startup': 'on_startup', 'host': 'Config.WEBAPP_HOST', 'port': 'Config.WEBAPP_PORT'}), '(dispatcher=dp, webhook_path=Config.WEBHOOK_PATH, skip_updates\n =True, on_startup=on_startup, host=Config.WEBAPP_HOST, port=Config.\n WEBAPP_PORT)\n', (32193, 32344), False, 'from aiogram.utils.executor import start_webhook\n'), ((32437, 32482), 'aiogram.executor.start_polling', 'executor.start_polling', (['dp'], {'skip_updates': '(True)'}), '(dp, skip_updates=True)\n', (32459, 32482), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((872, 887), 'aiogram.contrib.fsm_storage.memory.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (885, 887), False, 'from aiogram.contrib.fsm_storage.memory import MemoryStorage\n'), ((7405, 7463), 'aiogram.types.KeyboardButton', 'types.KeyboardButton', (['local.data[buttons[index]][language]'], {}), '(local.data[buttons[index]][language])\n', (7425, 7463), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((9100, 9170), 'aiogram.types.KeyboardButton', 'types.KeyboardButton', ([], {'text': 'local.data[Data.MAIN_MENU_BUTTON][language]'}), '(text=local.data[Data.MAIN_MENU_BUTTON][language])\n', (9120, 9170), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((7524, 7586), 'aiogram.types.KeyboardButton', 'types.KeyboardButton', (['local.data[buttons[index + 1]][language]'], {}), '(local.data[buttons[index + 1]][language])\n', (7544, 7586), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((10101, 10158), 'aiogram.types.KeyboardButton', 'types.KeyboardButton', ([], {'text': 'local.data[language][language]'}), '(text=local.data[language][language])\n', (10121, 10158), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((20903, 20932), 'utils.calc_results', 'calc_results', ([], {'results': 'results'}), '(results=results)\n', (20915, 20932), False, 'from utils import calc_results, ReferralStates, UserNameStates, TeacherStatStates, SynopsesStates, SessionStates, SubjectsStates\n'), ((24496, 24525), 'utils.calc_results', 'calc_results', ([], {'results': 'results'}), '(results=results)\n', (24508, 24525), False, 'from utils import calc_results, ReferralStates, UserNameStates, TeacherStatStates, SynopsesStates, SessionStates, SubjectsStates\n')] |
import torch
from ..utils import common_functions as c_f
from .base import BaseWrapperHook
from .features import FeaturesChainHook, FeaturesHook, LogitsHook
from .utils import ParallelHook
class DomainSpecificFeaturesHook(FeaturesHook):
def add_if_new(
self, outputs, full_key, output_vals, inputs, model_name, in_keys, domain
):
[domain] = c_f.extract(inputs, [f"{domain}_domain"])
c_f.add_if_new(
outputs,
full_key,
output_vals,
inputs,
model_name,
in_keys,
other_args=[domain],
)
class DomainSpecificLogitsHook(LogitsHook, DomainSpecificFeaturesHook):
pass
class AdaBNHook(BaseWrapperHook):
def __init__(self, domains=None, **kwargs):
super().__init__(**kwargs)
domains = c_f.default(domains, ["src", "target"])
hooks = []
for d in domains:
f_hook = DomainSpecificFeaturesHook(domains=[d], detach=True)
l_hook = DomainSpecificLogitsHook(domains=[d], detach=True)
hooks.append(FeaturesChainHook(f_hook, l_hook))
self.hook = ParallelHook(*hooks)
def call(self, losses, inputs):
with torch.no_grad():
losses, outputs = self.hook(losses, inputs)
return losses, outputs
| [
"torch.no_grad"
] | [((1212, 1227), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1225, 1227), False, 'import torch\n')] |
import os
from functools import partial
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
def configure(opt):
opt.format = 'png'
opt.n_df = 64
if opt.dataset_name == 'Cityscapes':
if opt.use_boundary_map:
opt.input_ch = 36
else:
opt.input_ch = 35
if opt.image_height == 512:
opt.half = True
elif opt.image_height == 1024:
opt.half = False
opt.image_size = (512, 1024) if opt.half else (1024, 2048)
opt.n_gf = 64 if opt.half else 32
opt.output_ch = 3
else:
opt.input_ch = 1
opt.flip = False
opt.VGG_loss = False
if opt.image_height == 512:
opt.half = True
elif opt.image_height == 1024:
opt.half = False
opt.image_size = (512, 512) if opt.half else (1024, 1024)
opt.n_gf = 64 if opt.half else 32
opt.output_ch = 1
dataset_name = opt.dataset_name
model_name = model_namer(height=opt.image_height)
make_dir(dataset_name, model_name, type='checkpoints')
if opt.is_train:
opt.image_dir = os.path.join('./checkpoints', dataset_name, 'Image/Training', model_name)
elif not opt.is_train:
opt.image_dir = os.path.join('./checkpoints', dataset_name, 'Image/Test', model_name)
opt.model_dir = os.path.join('./checkpoints', dataset_name, 'Model', model_name)
log_path = os.path.join('./checkpoints/', dataset_name, 'Model', model_name, 'opt.txt')
if os.path.isfile(log_path) and opt.is_train:
permission = input(
"{} log already exists. Do you really want to overwrite this log? Y/N. : ".format(model_name + '/opt'))
if permission == 'Y':
pass
else:
raise NotImplementedError("Please check {}".format(log_path))
if opt.debug:
opt.display_freq = 1
opt.epoch_decay = 2
opt.n_epochs = 4
opt.report_freq = 1
opt.save_freq = 1
args = vars(opt)
with open(log_path, 'wt') as log:
log.write('-' * 50 + 'Options' + '-' * 50 + '\n')
print('-' * 50 + 'Options' + '-' * 50)
for k, v in sorted(args.items()):
log.write('{}: {}\n'.format(str(k), str(v)))
print("{}: {}".format(str(k), str(v)))
log.write('-' * 50 + 'End' + '-' * 50)
print('-' * 50 + 'End' + '-' * 50)
log.close()
def model_namer(**elements):
name = ''
for k, v in sorted(elements.items()):
name += str(k) + '_' + str(v)
return name
def make_dir(dataset_name=None, model_name=None, type='checkpoints'):
if type == 'checkpoints':
assert model_name, "model_name keyword should be specified for type='checkpoints'"
if not os.path.isdir(os.path.join('./checkpoints', dataset_name)):
os.makedirs(os.path.join('./checkpoints', dataset_name, 'Image', 'Training', model_name))
os.makedirs(os.path.join('./checkpoints', dataset_name, 'Image', 'Test', model_name))
os.makedirs(os.path.join('./checkpoints', dataset_name, 'Model', model_name))
elif os.path.isdir('./checkpoints'):
print("checkpoints directory already exists.")
else:
"""
for other type of directory
"""
pass
def get_grid(input, is_real=True):
if is_real:
grid = torch.FloatTensor(input.shape).fill_(1.0)
elif not is_real:
grid = torch.FloatTensor(input.shape).fill_(0.0)
return grid
def get_norm_layer(type):
if type == 'BatchNorm2d':
layer = partial(nn.BatchNorm2d, affine=True)
elif type == 'InstanceNorm2d':
layer = partial(nn.InstanceNorm2d, affine=False)
return layer
def get_pad_layer(type):
if type == 'reflection':
layer = nn.ReflectionPad2d
elif type == 'replication':
layer = nn.ReplicationPad2d
elif type == 'zero':
layer = nn.ZeroPad2d
else:
raise NotImplementedError(
"Padding type {} is not valid. Please choose among ['reflection', 'replication', 'zero']".format(type))
return layer
class Manager(object):
def __init__(self, opt):
self.opt = opt
@staticmethod
def report_loss(package):
print("Epoch: {} [{:.{prec}}%] Current_step: {} D_loss: {:.{prec}} G_loss: {:.{prec}}"
.format(package['Epoch'],
package['current_step'] / package['total_step'] * 100,
package['current_step'],
package['D_loss'],
package['G_loss'],
prec=4))
@staticmethod
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (
np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def tensor2image(self, image_tensor):
np_image = image_tensor.squeeze().cpu().float().numpy()
if len(np_image.shape) == 3:
np_image = np.transpose(np_image, (1, 2, 0)) # HWC
else:
pass
np_image = self.adjust_dynamic_range(np_image, drange_in=[-1., 1.], drange_out=[0, 255])
np_image = np.clip(np_image, 0, 255).astype(np.uint8)
return np_image
def save_image(self, image_tensor, path):
np_image = self.tensor2image(image_tensor)
pil_image = Image.fromarray(np_image)
pil_image.save(path, self.opt.image_mode)
def save(self, package, image=False, model=False):
if image:
path_real = os.path.join(self.opt.image_dir, str(package['current_step']) + '_' + 'real.png')
path_fake = os.path.join(self.opt.image_dir, str(package['current_step']) + '_' + 'fake.png')
self.save_image(package['target_tensor'], path_real)
self.save_image(package['generated_tensor'], path_fake)
elif model:
path_D = os.path.join(self.opt.model_dir, str(package['current_step']) + '_' + 'D.pt')
path_G = os.path.join(self.opt.model_dir, str(package['current_step']) + '_' + 'G.pt')
torch.save(package['D_state_dict'], path_D)
torch.save(package['G_state_dict'], path_G)
def __call__(self, package):
if package['current_step'] % self.opt.iter_display == 0:
self.save(package, image=True)
if package['current_step'] % self.opt.iter_report == 0:
self.report_loss(package)
def update_lr(init_lr, old_lr, n_epoch_decay, *optims):
delta_lr = init_lr / n_epoch_decay
new_lr = old_lr - delta_lr
for optim in optims:
for param_group in optim.param_groups:
param_group['lr'] = new_lr
print("Learning rate has been updated from {} to {}.".format(old_lr, new_lr))
return new_lr
def weights_init(module):
classname = module.__class__.__name__
if classname.find('Conv') != -1:
module.weight.detach().normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
module.weight.detach().normal(1.0, 0.02)
module.bias.detach().fill_(0.0)
| [
"numpy.clip",
"PIL.Image.fromarray",
"numpy.float32",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"functools.partial",
"torch.save",
"numpy.transpose",
"torch.FloatTensor"
] | [((1365, 1429), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Model"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Model', model_name)\n", (1377, 1429), False, 'import os\n'), ((1445, 1521), 'os.path.join', 'os.path.join', (['"""./checkpoints/"""', 'dataset_name', '"""Model"""', 'model_name', '"""opt.txt"""'], {}), "('./checkpoints/', dataset_name, 'Model', model_name, 'opt.txt')\n", (1457, 1521), False, 'import os\n'), ((1148, 1221), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Image/Training"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Image/Training', model_name)\n", (1160, 1221), False, 'import os\n'), ((1530, 1554), 'os.path.isfile', 'os.path.isfile', (['log_path'], {}), '(log_path)\n', (1544, 1554), False, 'import os\n'), ((3601, 3637), 'functools.partial', 'partial', (['nn.BatchNorm2d'], {'affine': '(True)'}), '(nn.BatchNorm2d, affine=True)\n', (3608, 3637), False, 'from functools import partial\n'), ((5592, 5617), 'PIL.Image.fromarray', 'Image.fromarray', (['np_image'], {}), '(np_image)\n', (5607, 5617), False, 'from PIL import Image\n'), ((1274, 1343), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Image/Test"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Image/Test', model_name)\n", (1286, 1343), False, 'import os\n'), ((3145, 3175), 'os.path.isdir', 'os.path.isdir', (['"""./checkpoints"""'], {}), "('./checkpoints')\n", (3158, 3175), False, 'import os\n'), ((3690, 3730), 'functools.partial', 'partial', (['nn.InstanceNorm2d'], {'affine': '(False)'}), '(nn.InstanceNorm2d, affine=False)\n', (3697, 3730), False, 'from functools import partial\n'), ((5218, 5251), 'numpy.transpose', 'np.transpose', (['np_image', '(1, 2, 0)'], {}), '(np_image, (1, 2, 0))\n', (5230, 5251), True, 'import numpy as np\n'), ((2795, 2838), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name'], {}), "('./checkpoints', dataset_name)\n", (2807, 2838), False, 'import os\n'), ((2865, 2941), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Image"""', '"""Training"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Image', 'Training', model_name)\n", (2877, 2941), False, 'import os\n'), ((2967, 3039), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Image"""', '"""Test"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Image', 'Test', model_name)\n", (2979, 3039), False, 'import os\n'), ((3065, 3129), 'os.path.join', 'os.path.join', (['"""./checkpoints"""', 'dataset_name', '"""Model"""', 'model_name'], {}), "('./checkpoints', dataset_name, 'Model', model_name)\n", (3077, 3129), False, 'import os\n'), ((3388, 3418), 'torch.FloatTensor', 'torch.FloatTensor', (['input.shape'], {}), '(input.shape)\n', (3405, 3418), False, 'import torch\n'), ((4930, 4955), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (4940, 4955), True, 'import numpy as np\n'), ((5407, 5432), 'numpy.clip', 'np.clip', (['np_image', '(0)', '(255)'], {}), '(np_image, 0, 255)\n', (5414, 5432), True, 'import numpy as np\n'), ((6318, 6361), 'torch.save', 'torch.save', (["package['D_state_dict']", 'path_D'], {}), "(package['D_state_dict'], path_D)\n", (6328, 6361), False, 'import torch\n'), ((6374, 6417), 'torch.save', 'torch.save', (["package['G_state_dict']", 'path_G'], {}), "(package['G_state_dict'], path_G)\n", (6384, 6417), False, 'import torch\n'), ((3468, 3498), 'torch.FloatTensor', 'torch.FloatTensor', (['input.shape'], {}), '(input.shape)\n', (3485, 3498), False, 'import torch\n'), ((4778, 4803), 'numpy.float32', 'np.float32', (['drange_out[1]'], {}), '(drange_out[1])\n', (4788, 4803), True, 'import numpy as np\n'), ((4806, 4831), 'numpy.float32', 'np.float32', (['drange_out[0]'], {}), '(drange_out[0])\n', (4816, 4831), True, 'import numpy as np\n'), ((4857, 4881), 'numpy.float32', 'np.float32', (['drange_in[1]'], {}), '(drange_in[1])\n', (4867, 4881), True, 'import numpy as np\n'), ((4884, 4908), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (4894, 4908), True, 'import numpy as np\n'), ((4958, 4982), 'numpy.float32', 'np.float32', (['drange_in[0]'], {}), '(drange_in[0])\n', (4968, 4982), True, 'import numpy as np\n')] |
import pytest
import torch
from nnrl.nn.actor import (
Alpha,
DeterministicPolicy,
MLPContinuousPolicy,
MLPDeterministicPolicy,
)
from nnrl.nn.critic import ActionValueCritic, MLPVValue
from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single
from ray.rllib import SampleBatch
from raylab.utils.debug import fake_batch
@pytest.fixture(scope="module")
def reward_fn():
def func(obs, act, new_obs):
return new_obs[..., 0] - obs[..., 0] - act.norm(dim=-1)
return func
@pytest.fixture(scope="module")
def termination_fn():
def func(obs, *_):
return torch.randn_like(obs[..., 0]) > 0
return func
@pytest.fixture
def batch(obs_space, action_space):
samples = fake_batch(obs_space, action_space, batch_size=256)
return {k: torch.from_numpy(v) for k, v in samples.items()}
@pytest.fixture
def obs(batch):
return batch[SampleBatch.CUR_OBS]
@pytest.fixture
def rew(batch):
return batch[SampleBatch.REWARDS]
@pytest.fixture
def done(batch):
return batch[SampleBatch.DONES]
@pytest.fixture
def new_obs(batch):
return batch[SampleBatch.NEXT_OBS]
@pytest.fixture
def model_spec():
spec = EnsembleSpec()
spec.network.units = (32,)
spec.network.input_dependent_scale = True
spec.residual = True
return spec
@pytest.fixture
def model(obs_space, action_space, model_spec):
return build_single(obs_space, action_space, model_spec)
@pytest.fixture(params=(1, 2, 4), ids=(f"Models({n})" for n in (1, 2, 4)))
def models(request, obs_space, action_space, model_spec):
spec = model_spec
spec.ensemble_size = request.param
spec.parallelize = True
return build_ensemble(obs_space, action_space, spec)
@pytest.fixture(params=(1, 2), ids=(f"Critics({n})" for n in (1, 2)))
def action_critics(request, obs_space, action_space):
config = {
"encoder": {"units": [32]},
"double_q": request.param == 2,
"parallelize": False,
}
spec = ActionValueCritic.spec_cls.from_dict(config)
act_critic = ActionValueCritic(obs_space, action_space, spec)
return act_critic.q_values, act_critic.target_q_values
@pytest.fixture
def state_critics(obs_space):
spec = MLPVValue.spec_cls()
spec.units = (32,)
spec.activation = "ReLU"
spec.layer_norm = False
main, target = MLPVValue(obs_space, spec), MLPVValue(obs_space, spec)
return main, target
@pytest.fixture
def deterministic_policies(obs_space, action_space):
spec = MLPDeterministicPolicy.spec_cls(
units=(32,), activation="ReLU", norm_beta=1.2
)
policy = MLPDeterministicPolicy(obs_space, action_space, spec)
target_policy = DeterministicPolicy.add_gaussian_noise(policy, noise_stddev=0.3)
return policy, target_policy
@pytest.fixture(params=(True, False), ids=(f"PiScaleDep({b})" for b in (True, False)))
def policy_input_scale(request):
return request.param
@pytest.fixture
def stochastic_policy(obs_space, action_space, policy_input_scale):
config = {"encoder": {"units": (32,)}}
mlp_spec = MLPContinuousPolicy.spec_cls.from_dict(config)
return MLPContinuousPolicy(
obs_space, action_space, mlp_spec, input_dependent_scale=policy_input_scale
)
@pytest.fixture
def alpha_module():
return Alpha(1.0)
| [
"nnrl.nn.critic.ActionValueCritic",
"nnrl.nn.actor.MLPContinuousPolicy.spec_cls.from_dict",
"nnrl.nn.actor.DeterministicPolicy.add_gaussian_noise",
"nnrl.nn.critic.MLPVValue",
"nnrl.nn.model.EnsembleSpec",
"nnrl.nn.model.build_ensemble",
"nnrl.nn.critic.MLPVValue.spec_cls",
"nnrl.nn.critic.ActionValueCritic.spec_cls.from_dict",
"nnrl.nn.actor.Alpha",
"torch.from_numpy",
"torch.randn_like",
"nnrl.nn.actor.MLPDeterministicPolicy",
"pytest.fixture",
"nnrl.nn.actor.MLPDeterministicPolicy.spec_cls",
"nnrl.nn.actor.MLPContinuousPolicy",
"raylab.utils.debug.fake_batch",
"nnrl.nn.model.build_single"
] | [((351, 381), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (365, 381), False, 'import pytest\n'), ((516, 546), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (530, 546), False, 'import pytest\n'), ((1444, 1517), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(1, 2, 4)', 'ids': "(f'Models({n})' for n in (1, 2, 4))"}), "(params=(1, 2, 4), ids=(f'Models({n})' for n in (1, 2, 4)))\n", (1458, 1517), False, 'import pytest\n'), ((1726, 1794), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(1, 2)', 'ids': "(f'Critics({n})' for n in (1, 2))"}), "(params=(1, 2), ids=(f'Critics({n})' for n in (1, 2)))\n", (1740, 1794), False, 'import pytest\n'), ((2780, 2869), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(True, False)', 'ids': "(f'PiScaleDep({b})' for b in (True, False))"}), "(params=(True, False), ids=(f'PiScaleDep({b})' for b in (True,\n False)))\n", (2794, 2869), False, 'import pytest\n'), ((726, 777), 'raylab.utils.debug.fake_batch', 'fake_batch', (['obs_space', 'action_space'], {'batch_size': '(256)'}), '(obs_space, action_space, batch_size=256)\n', (736, 777), False, 'from raylab.utils.debug import fake_batch\n'), ((1181, 1195), 'nnrl.nn.model.EnsembleSpec', 'EnsembleSpec', ([], {}), '()\n', (1193, 1195), False, 'from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single\n'), ((1391, 1440), 'nnrl.nn.model.build_single', 'build_single', (['obs_space', 'action_space', 'model_spec'], {}), '(obs_space, action_space, model_spec)\n', (1403, 1440), False, 'from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single\n'), ((1677, 1722), 'nnrl.nn.model.build_ensemble', 'build_ensemble', (['obs_space', 'action_space', 'spec'], {}), '(obs_space, action_space, spec)\n', (1691, 1722), False, 'from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single\n'), ((1987, 2031), 'nnrl.nn.critic.ActionValueCritic.spec_cls.from_dict', 'ActionValueCritic.spec_cls.from_dict', (['config'], {}), '(config)\n', (2023, 2031), False, 'from nnrl.nn.critic import ActionValueCritic, MLPVValue\n'), ((2050, 2098), 'nnrl.nn.critic.ActionValueCritic', 'ActionValueCritic', (['obs_space', 'action_space', 'spec'], {}), '(obs_space, action_space, spec)\n', (2067, 2098), False, 'from nnrl.nn.critic import ActionValueCritic, MLPVValue\n'), ((2217, 2237), 'nnrl.nn.critic.MLPVValue.spec_cls', 'MLPVValue.spec_cls', ([], {}), '()\n', (2235, 2237), False, 'from nnrl.nn.critic import ActionValueCritic, MLPVValue\n'), ((2499, 2577), 'nnrl.nn.actor.MLPDeterministicPolicy.spec_cls', 'MLPDeterministicPolicy.spec_cls', ([], {'units': '(32,)', 'activation': '"""ReLU"""', 'norm_beta': '(1.2)'}), "(units=(32,), activation='ReLU', norm_beta=1.2)\n", (2530, 2577), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((2605, 2658), 'nnrl.nn.actor.MLPDeterministicPolicy', 'MLPDeterministicPolicy', (['obs_space', 'action_space', 'spec'], {}), '(obs_space, action_space, spec)\n', (2627, 2658), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((2679, 2743), 'nnrl.nn.actor.DeterministicPolicy.add_gaussian_noise', 'DeterministicPolicy.add_gaussian_noise', (['policy'], {'noise_stddev': '(0.3)'}), '(policy, noise_stddev=0.3)\n', (2717, 2743), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((3068, 3114), 'nnrl.nn.actor.MLPContinuousPolicy.spec_cls.from_dict', 'MLPContinuousPolicy.spec_cls.from_dict', (['config'], {}), '(config)\n', (3106, 3114), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((3126, 3226), 'nnrl.nn.actor.MLPContinuousPolicy', 'MLPContinuousPolicy', (['obs_space', 'action_space', 'mlp_spec'], {'input_dependent_scale': 'policy_input_scale'}), '(obs_space, action_space, mlp_spec,\n input_dependent_scale=policy_input_scale)\n', (3145, 3226), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((3286, 3296), 'nnrl.nn.actor.Alpha', 'Alpha', (['(1.0)'], {}), '(1.0)\n', (3291, 3296), False, 'from nnrl.nn.actor import Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy\n'), ((793, 812), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (809, 812), False, 'import torch\n'), ((2338, 2364), 'nnrl.nn.critic.MLPVValue', 'MLPVValue', (['obs_space', 'spec'], {}), '(obs_space, spec)\n', (2347, 2364), False, 'from nnrl.nn.critic import ActionValueCritic, MLPVValue\n'), ((2366, 2392), 'nnrl.nn.critic.MLPVValue', 'MLPVValue', (['obs_space', 'spec'], {}), '(obs_space, spec)\n', (2375, 2392), False, 'from nnrl.nn.critic import ActionValueCritic, MLPVValue\n'), ((607, 636), 'torch.randn_like', 'torch.randn_like', (['obs[..., 0]'], {}), '(obs[..., 0])\n', (623, 636), False, 'import torch\n')] |
from typing import List, Any
import numpy as np
from monai.data import NibabelReader
import matplotlib.pyplot as plt
from numpy import ndarray
from pathlib import Path
import seaborn as sns
from monai.transforms import apply_transform
from utils.transforms import get_modality_img_transforms
plt.rcParams["figure.figsize"] = (8.0, 12.0)
PATH = Path("/home/jq/Desktop/rUnet/data/BraTS")
def plot_slices(
processed_preds: List[ndarray],
processed_targets: List[ndarray],
clip_min: float,
clip_max: float,
) -> Any:
fig: plt.Figure
axes: plt.Axes
fig, axes = plt.subplots(nrows=len(processed_preds), ncols=2)
maes, maes_255, masked_maes, masked_255_maes = [], [], [], []
max_value = 0
min_value = 255
for i, (pred, targ) in enumerate(zip(processed_preds, processed_targets)):
mask = targ == 0
pred_255 = np.clip(pred, -clip_min, clip_max)
targ_255 = np.clip(targ, -clip_min, clip_max)
min_pred = min(-clip_min, np.min(pred))
min_targ = min(-clip_min, np.min(targ))
pred_255 = np.floor(255 * ((pred_255 - min_pred) / (clip_max - min_pred)))
targ_255 = np.floor(255 * ((targ_255 - min_targ) / (clip_max - min_targ)))
max_value = max(max(max_value, np.max(pred_255)), np.max(targ_255))
min_value = min(min(min_value, np.min(pred_255)), np.min(targ_255))
pred_255[mask] = 0
targ_255[mask] = 0
mae_255 = np.mean(np.abs(pred_255.ravel() - targ_255.ravel()))
mae = np.mean(np.abs(pred.ravel() - targ.ravel()))
masked_mae = np.mean(np.abs(pred[~mask].ravel() - targ[~mask].ravel()))
masked_255_mae = np.mean(np.abs(pred_255[~mask].ravel() - targ_255[~mask].ravel()))
maes.append(mae)
maes_255.append(mae_255)
masked_maes.append(masked_mae)
masked_255_maes.append(masked_255_mae)
mae_str = "{:1.2f}".format(float(np.round(mae, 2)))
mae_255_str = "{:1.2f}".format(float(np.round(mae_255, 2)))
mask_str = "{:1.2f}".format(float(np.round(masked_mae, 2)))
mask_255_str = "{:1.2f}".format(float(np.round(masked_255_mae, 2)))
axes[i][0].imshow(pred_255, cmap="Greys")
axes[i][0].set_title(
f"Predicted (MAE={mae_str}, MAE_255={mae_255_str}\n masked: (MAE={mask_str}, MAE_255={mask_255_str})"
f"clip_max: {clip_max}, clip_min: {clip_min}",
{"fontsize": 6},
)
axes[i][1].imshow(targ_255, cmap="Greys")
axes[i][1].set_title("Target", {"fontsize": 8})
mae_clean = "{:1.2f}".format(float(np.round(np.mean(maes), 2)))
mae_255_clean = "{:1.2f}".format(float(np.round(np.mean(maes_255), 2)))
mask_clean = "{:1.2f}".format(float(np.round(np.mean(masked_maes), 2)))
mask_255_clean = "{:1.2f}".format(float(np.round(np.mean(masked_255_maes), 2)))
fig.set_size_inches(w=8, h=12)
fig.suptitle(
f"All brains rescaled to [0, 255].\nAverage: (MAE_255={mae_255_clean}, MAE={mae_clean})\n Masked: (MAE_255={mask_255_clean}, MAE={mask_clean})"
)
fig.savefig(f"./plot/clip_max: {clip_max} clip_min:{clip_min}.png")
print(f"max: {max_value}, min: {min_value}")
return np.mean(masked_255_maes)
if __name__ == "__main__":
brains = sorted(list(PATH.glob("**/*_t1.nii.gz")))
# targ_slices = []
# pred_slices = []
# for brain in brains:
# data = np.load(brain)
# target = data["target"]
# predict = data["predict"]
# mask = target != target[0][0][0]
# print(f"target mask mean: {np.mean(target[mask])}")
# cur_targ_slices = [target[64, ...], target[:, 64, :], target[..., 64]]
# cur_pred_slices = [predict[64, ...], predict[:, 64, :], predict[..., 64]]
# targs = np.concatenate(cur_targ_slices, axis=1)
# preds = np.concatenate(cur_pred_slices, axis=1)
# targ_slices.append(targs)
# pred_slices.append(preds)
nib_reader = NibabelReader()
preprocess = get_modality_img_transforms()
fig, axes = plt.subplots(nrows=5, ncols=1)
for idx, brain in enumerate(brains[15:20]):
nib_object = nib_reader.read(str(brain))
img, compatible_meta = nib_reader.get_data(nib_object)
img = apply_transform(preprocess, img)
mask = img != 0.0
img = img[mask]
sns.distplot(img, ax=axes[idx])
fig.savefig("dist.png")
# clips = [5, 4.5, 4, 3.5, 3, 2, 1.8]
# mae_dict = {}
# for clip_min in clips:
# for clip_max in clips:
# print(f"clip_min: {clip_min}, clip_max: {clip_max}")
# mae = plot_slices(pred_slices, targ_slices, clip_min, clip_max)
# mae_dict[f"clip_min:{clip_min}, clip_max:{clip_max}"] = mae
# print(mae_dict)
# for key, value in mae_dict.items():
# print(f"{key}: {value}")
| [
"utils.transforms.get_modality_img_transforms",
"numpy.mean",
"numpy.clip",
"monai.data.NibabelReader",
"pathlib.Path",
"seaborn.distplot",
"monai.transforms.apply_transform",
"numpy.floor",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.round"
] | [((347, 388), 'pathlib.Path', 'Path', (['"""/home/jq/Desktop/rUnet/data/BraTS"""'], {}), "('/home/jq/Desktop/rUnet/data/BraTS')\n", (351, 388), False, 'from pathlib import Path\n'), ((3189, 3213), 'numpy.mean', 'np.mean', (['masked_255_maes'], {}), '(masked_255_maes)\n', (3196, 3213), True, 'import numpy as np\n'), ((3926, 3941), 'monai.data.NibabelReader', 'NibabelReader', ([], {}), '()\n', (3939, 3941), False, 'from monai.data import NibabelReader\n'), ((3959, 3988), 'utils.transforms.get_modality_img_transforms', 'get_modality_img_transforms', ([], {}), '()\n', (3986, 3988), False, 'from utils.transforms import get_modality_img_transforms\n'), ((4006, 4036), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(5)', 'ncols': '(1)'}), '(nrows=5, ncols=1)\n', (4018, 4036), True, 'import matplotlib.pyplot as plt\n'), ((866, 900), 'numpy.clip', 'np.clip', (['pred', '(-clip_min)', 'clip_max'], {}), '(pred, -clip_min, clip_max)\n', (873, 900), True, 'import numpy as np\n'), ((920, 954), 'numpy.clip', 'np.clip', (['targ', '(-clip_min)', 'clip_max'], {}), '(targ, -clip_min, clip_max)\n', (927, 954), True, 'import numpy as np\n'), ((1070, 1133), 'numpy.floor', 'np.floor', (['(255 * ((pred_255 - min_pred) / (clip_max - min_pred)))'], {}), '(255 * ((pred_255 - min_pred) / (clip_max - min_pred)))\n', (1078, 1133), True, 'import numpy as np\n'), ((1153, 1216), 'numpy.floor', 'np.floor', (['(255 * ((targ_255 - min_targ) / (clip_max - min_targ)))'], {}), '(255 * ((targ_255 - min_targ) / (clip_max - min_targ)))\n', (1161, 1216), True, 'import numpy as np\n'), ((4211, 4243), 'monai.transforms.apply_transform', 'apply_transform', (['preprocess', 'img'], {}), '(preprocess, img)\n', (4226, 4243), False, 'from monai.transforms import apply_transform\n'), ((4302, 4333), 'seaborn.distplot', 'sns.distplot', (['img'], {'ax': 'axes[idx]'}), '(img, ax=axes[idx])\n', (4314, 4333), True, 'import seaborn as sns\n'), ((989, 1001), 'numpy.min', 'np.min', (['pred'], {}), '(pred)\n', (995, 1001), True, 'import numpy as np\n'), ((1037, 1049), 'numpy.min', 'np.min', (['targ'], {}), '(targ)\n', (1043, 1049), True, 'import numpy as np\n'), ((1275, 1291), 'numpy.max', 'np.max', (['targ_255'], {}), '(targ_255)\n', (1281, 1291), True, 'import numpy as np\n'), ((1351, 1367), 'numpy.min', 'np.min', (['targ_255'], {}), '(targ_255)\n', (1357, 1367), True, 'import numpy as np\n'), ((1256, 1272), 'numpy.max', 'np.max', (['pred_255'], {}), '(pred_255)\n', (1262, 1272), True, 'import numpy as np\n'), ((1332, 1348), 'numpy.min', 'np.min', (['pred_255'], {}), '(pred_255)\n', (1338, 1348), True, 'import numpy as np\n'), ((1912, 1928), 'numpy.round', 'np.round', (['mae', '(2)'], {}), '(mae, 2)\n', (1920, 1928), True, 'import numpy as np\n'), ((1976, 1996), 'numpy.round', 'np.round', (['mae_255', '(2)'], {}), '(mae_255, 2)\n', (1984, 1996), True, 'import numpy as np\n'), ((2041, 2064), 'numpy.round', 'np.round', (['masked_mae', '(2)'], {}), '(masked_mae, 2)\n', (2049, 2064), True, 'import numpy as np\n'), ((2113, 2140), 'numpy.round', 'np.round', (['masked_255_mae', '(2)'], {}), '(masked_255_mae, 2)\n', (2121, 2140), True, 'import numpy as np\n'), ((2590, 2603), 'numpy.mean', 'np.mean', (['maes'], {}), '(maes)\n', (2597, 2603), True, 'import numpy as np\n'), ((2662, 2679), 'numpy.mean', 'np.mean', (['maes_255'], {}), '(maes_255)\n', (2669, 2679), True, 'import numpy as np\n'), ((2735, 2755), 'numpy.mean', 'np.mean', (['masked_maes'], {}), '(masked_maes)\n', (2742, 2755), True, 'import numpy as np\n'), ((2815, 2839), 'numpy.mean', 'np.mean', (['masked_255_maes'], {}), '(masked_255_maes)\n', (2822, 2839), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from contextlib import contextmanager
import sqlite3
from random import randrange
from functools import wraps
class AnkiModel(object):
class Error(Exception):
pass
FIELD_SEP = u'\x1f'
# Sub-Class Customizations
DATA_PATH = None
FIELDS = tuple()
FINDER = '1=1'
TYPE_ID = None
KEY = None
@classmethod
def setup(cls, **settings):
path = settings.get('path', None)
if path is not None:
cls.DATA_PATH = path
@classmethod
@contextmanager
def execute(cls):
if AnkiModel.DATA_PATH is None:
raise AnkiModel.Error('Database not setup')
conn = sqlite3.connect(AnkiModel.DATA_PATH)
cursor = conn.cursor()
yield cursor
conn.commit()
conn.close()
@classmethod
def select(cls, command):
with cls.execute() as cursor:
out = cursor.execute(command).fetchall()
return out
@classmethod
def update(cls, command):
with cls.execute() as cursor:
cursor.execute(command)
out = cursor.rowcount
return out
@classmethod
def load_all(cls):
cls._cards = []
cls._cards_map = {}
cls._card_pk_map = {}
finder = cls.FINDER
if cls.TYPE_ID is not None:
finder += ' AND mid in (%s)' % ','.join(cls.TYPE_ID)
for row in cls.select('SELECT id, flds, tags from notes WHERE %s;' % finder):
kwargs = {}
kwargs['tags'] = row[2]
kwargs['id'] = row[0]
kwargs['suspended'] = False
# Properly parse the tags field
kwargs['tags'] = kwargs['tags'].split()
# Now we need to parse the custom FIELDS
fields = row[1].split(AnkiModel.FIELD_SEP)
if len(fields) < len(cls.FIELDS):
raise AnkiModel.Error("Not enough fields found: FIELDS(len %i), Actual(len %i)" % (
len(cls.FIELDS),
len(fields)
))
# Now actually parse the fields
for field in cls.FIELDS:
kwargs[field] = unicode(fields.pop(0))
obj = cls(kwargs)
cls._cards.append(obj)
cls._cards_map[obj.id] = obj
# Now we get extra information about these cards
ids = map(lambda card: str(card.id), cls._cards)
for row in cls.select('SELECT nid, queue from cards WHERE nid in (%s)' % ','.join(ids)):
card_id = row[0]
suspended = row[1] == -1
if suspended:
card = cls.get_id(card_id)
card.suspended = True
def mark_suspended(self, boolean):
'''
Marks all of the Cards for this Note as Suspended (or not) depending on the boolean
'''
# Don't change any cards which are already at the right status
if self.suspended == boolean:
return
AnkiModel.update('UPDATE cards SET queue=cards.type WHERE nid=%s;' % (
self.id,
))
def __init__(self, data):
for field, value in data.iteritems():
setattr(self, field, value)
if self.KEY:
key = getattr(self, self.KEY)
self.store(key, self)
loaded = False
ensuring = False
@classmethod
def reload(cls, lazy=True):
cls.loaded = False
# Make sure you can force the reload to happen right away
if not lazy:
cls.ensure_load()
@classmethod
def ensure_load(cls):
# We need to make sure we can use some of these methods
# even though technically the cards aren't "loaded" yet
if cls.ensuring:
return
if not cls.loaded:
cls.ensuring = True
cls.load_all()
cls.ensuring = False
cls.loaded = True
def needs_loaded(func):
'''
Helper decorator, ensures the class is properly loaded before
the method is called (classmethods only).
'''
@wraps(func)
def wrapper(cls, *args, **kwargs):
cls.ensure_load()
return func(cls, *args, **kwargs)
return wrapper
@classmethod
def store(cls, key, val):
cls._card_pk_map[key] = val
@classmethod
@needs_loaded
def find(cls, key):
return cls._card_pk_map[unicode(key)]
@classmethod
@needs_loaded
def random(cls):
return cls._cards[randrange(0, len(cls._cards))]
@classmethod
@needs_loaded
def get_id(cls, id):
return cls._cards_map[id]
@classmethod
def all(cls):
return list(cls.iter_all())
@classmethod
@needs_loaded
def iter_all(cls):
for item in cls._cards:
yield item
| [
"sqlite3.connect",
"functools.wraps"
] | [((707, 743), 'sqlite3.connect', 'sqlite3.connect', (['AnkiModel.DATA_PATH'], {}), '(AnkiModel.DATA_PATH)\n', (722, 743), False, 'import sqlite3\n'), ((4119, 4130), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4124, 4130), False, 'from functools import wraps\n')] |
#######
# Objective: Make a DataFrame using the Abalone dataset (../data/abalone.csv).
# Take two independent random samples of different sizes from the 'rings' field.
# HINT: np.random.choice(df['rings'],10,replace=False) takes 10 random values
# Use box plots to show that the samples do derive from the same population.
######
# Perform imports here:
import plotly.offline as pyo
import plotly.graph_objs as go
import numpy as np
import pandas as pd
# create a DataFrame from the .csv file:
df = pd.read_csv('../data/abalone.csv')
# take two random samples of different sizes:
a = np.random.choice(df['rings'],30,replace=False)
b = np.random.choice(df['rings'],100,replace=False)
# create a data variable with two Box plots:
data = [
go.Box(
y=a,
name='A'
),
go.Box(
y=b,
name='B'
)
]
# add a layout
layout = go.Layout(
title = 'Comparison of two samples taken from the same population'
)
# create a fig from data & layout, and plot the fig
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='solution5.html')
| [
"plotly.graph_objs.Box",
"pandas.read_csv",
"numpy.random.choice",
"plotly.offline.plot",
"plotly.graph_objs.Layout",
"plotly.graph_objs.Figure"
] | [((501, 535), 'pandas.read_csv', 'pd.read_csv', (['"""../data/abalone.csv"""'], {}), "('../data/abalone.csv')\n", (512, 535), True, 'import pandas as pd\n'), ((587, 635), 'numpy.random.choice', 'np.random.choice', (["df['rings']", '(30)'], {'replace': '(False)'}), "(df['rings'], 30, replace=False)\n", (603, 635), True, 'import numpy as np\n'), ((638, 687), 'numpy.random.choice', 'np.random.choice', (["df['rings']", '(100)'], {'replace': '(False)'}), "(df['rings'], 100, replace=False)\n", (654, 687), True, 'import numpy as np\n'), ((865, 940), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Comparison of two samples taken from the same population"""'}), "(title='Comparison of two samples taken from the same population')\n", (874, 940), True, 'import plotly.graph_objs as go\n'), ((1008, 1043), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (1017, 1043), True, 'import plotly.graph_objs as go\n'), ((1044, 1084), 'plotly.offline.plot', 'pyo.plot', (['fig'], {'filename': '"""solution5.html"""'}), "(fig, filename='solution5.html')\n", (1052, 1084), True, 'import plotly.offline as pyo\n'), ((745, 766), 'plotly.graph_objs.Box', 'go.Box', ([], {'y': 'a', 'name': '"""A"""'}), "(y=a, name='A')\n", (751, 766), True, 'import plotly.graph_objs as go\n'), ((794, 815), 'plotly.graph_objs.Box', 'go.Box', ([], {'y': 'b', 'name': '"""B"""'}), "(y=b, name='B')\n", (800, 815), True, 'import plotly.graph_objs as go\n')] |
import cv2
import numpy as np
import os
from skimage.morphology import remove_small_objects
def read_folder_volume(path):
tiffs = [os.path.join(path, f) for f in os.listdir(path) if f[0] != '.']
fnames = sorted(tiffs)
vol = []
for i, fname in enumerate(fnames):
img = cv2.imread(fname, cv2.COLOR_BGR2GRAY)
vol.append(img)
vol = np.array(vol)
return vol
def binarize(array, threshold_value):
return (array > threshold_value)
def process_volume(path):
vol = read_folder_volume(path)
threshold = binarize(vol, 0.7)
filtered = remove_small_objects(threshold, min_size=256, connectivity=3)
return filtered | [
"skimage.morphology.remove_small_objects",
"os.listdir",
"os.path.join",
"numpy.array",
"cv2.imread"
] | [((373, 386), 'numpy.array', 'np.array', (['vol'], {}), '(vol)\n', (381, 386), True, 'import numpy as np\n'), ((593, 654), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['threshold'], {'min_size': '(256)', 'connectivity': '(3)'}), '(threshold, min_size=256, connectivity=3)\n', (613, 654), False, 'from skimage.morphology import remove_small_objects\n'), ((137, 158), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (149, 158), False, 'import os\n'), ((300, 337), 'cv2.imread', 'cv2.imread', (['fname', 'cv2.COLOR_BGR2GRAY'], {}), '(fname, cv2.COLOR_BGR2GRAY)\n', (310, 337), False, 'import cv2\n'), ((168, 184), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (178, 184), False, 'import os\n')] |
"""
useful neural net modules
"""
import operator
from operator import mul
from functools import reduce
import tensorflow as tf
VERY_SMALL_NUMBER = -1e10
def softmax_with_mask(shape, x, mask, name=None):
if name is None:
name = softmax_with_mask.__name__
x_masked = x + VERY_SMALL_NUMBER * (1.0 - mask)
x_flat = tf.reshape(x_masked, [reduce(mul, shape[:-1], 1), shape[-1]])
p_flat = tf.nn.softmax(x_flat)
p = tf.reshape(p_flat, shape, name=name)
return p
def softmax_with_base(shape, base_untiled, x, mask=None, name='sig'):
if mask is not None:
x += VERY_SMALL_NUMBER * (1.0 - mask)
base_shape = shape[:-1] + [1]
for _ in shape:
base_untiled = tf.expand_dims(base_untiled, -1)
base = tf.tile(base_untiled, base_shape)
c_shape = shape[:-1] + [shape[-1] + 1]
c = tf.concat(len(shape)-1, [base, x])
c_flat = tf.reshape(c, [reduce(mul, shape[:-1], 1), c_shape[-1]])
p_flat = tf.nn.softmax(c_flat)
p_cat = tf.reshape(p_flat, c_shape)
s_aug = tf.slice(p_cat, [0 for _ in shape], [i for i in shape[:-1]] + [1])
s = tf.squeeze(s_aug, [len(shape)-1])
sig = tf.sub(1.0, s, name="sig")
p = tf.slice(p_cat, [0 for _ in shape[:-1]] + [1], shape)
return sig, p
def man_sim(shape, u, v, name='man_sim'):
"""
Manhattan similarity
https://pdfs.semanticscholar.org/6812/fb9ef1c2dad497684a9020d8292041a639ff.pdf
:param shape:
:param u:
:param v:
:param name:
:return:
"""
dist = tf.reduce_sum(tf.abs(u - v), len(shape)-1)
sim = tf.sub(0.0, dist, name=name)
return sim
def linear(input_shape, output_dim, input_, name="linear"):
a = input_shape[-1]
b = output_dim
input_flat = tf.reshape(input_, [reduce(operator.mul, input_shape[:-1], 1), a])
with tf.variable_scope(name):
mat = tf.get_variable("mat", shape=[a, b])
bias = tf.get_variable("bias", shape=[b])
out_flat = tf.matmul(input_flat, mat) + bias
out = tf.reshape(out_flat, input_shape[:-1] + [b])
return out
| [
"tensorflow.tile",
"tensorflow.slice",
"tensorflow.variable_scope",
"tensorflow.get_variable",
"functools.reduce",
"tensorflow.sub",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.abs"
] | [((411, 432), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['x_flat'], {}), '(x_flat)\n', (424, 432), True, 'import tensorflow as tf\n'), ((441, 477), 'tensorflow.reshape', 'tf.reshape', (['p_flat', 'shape'], {'name': 'name'}), '(p_flat, shape, name=name)\n', (451, 477), True, 'import tensorflow as tf\n'), ((755, 788), 'tensorflow.tile', 'tf.tile', (['base_untiled', 'base_shape'], {}), '(base_untiled, base_shape)\n', (762, 788), True, 'import tensorflow as tf\n'), ((959, 980), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['c_flat'], {}), '(c_flat)\n', (972, 980), True, 'import tensorflow as tf\n'), ((993, 1020), 'tensorflow.reshape', 'tf.reshape', (['p_flat', 'c_shape'], {}), '(p_flat, c_shape)\n', (1003, 1020), True, 'import tensorflow as tf\n'), ((1033, 1101), 'tensorflow.slice', 'tf.slice', (['p_cat', '[(0) for _ in shape]', '([i for i in shape[:-1]] + [1])'], {}), '(p_cat, [(0) for _ in shape], [i for i in shape[:-1]] + [1])\n', (1041, 1101), True, 'import tensorflow as tf\n'), ((1152, 1178), 'tensorflow.sub', 'tf.sub', (['(1.0)', 's'], {'name': '"""sig"""'}), "(1.0, s, name='sig')\n", (1158, 1178), True, 'import tensorflow as tf\n'), ((1187, 1242), 'tensorflow.slice', 'tf.slice', (['p_cat', '([(0) for _ in shape[:-1]] + [1])', 'shape'], {}), '(p_cat, [(0) for _ in shape[:-1]] + [1], shape)\n', (1195, 1242), True, 'import tensorflow as tf\n'), ((1567, 1595), 'tensorflow.sub', 'tf.sub', (['(0.0)', 'dist'], {'name': 'name'}), '(0.0, dist, name=name)\n', (1573, 1595), True, 'import tensorflow as tf\n'), ((711, 743), 'tensorflow.expand_dims', 'tf.expand_dims', (['base_untiled', '(-1)'], {}), '(base_untiled, -1)\n', (725, 743), True, 'import tensorflow as tf\n'), ((1528, 1541), 'tensorflow.abs', 'tf.abs', (['(u - v)'], {}), '(u - v)\n', (1534, 1541), True, 'import tensorflow as tf\n'), ((1809, 1832), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1826, 1832), True, 'import tensorflow as tf\n'), ((1848, 1884), 'tensorflow.get_variable', 'tf.get_variable', (['"""mat"""'], {'shape': '[a, b]'}), "('mat', shape=[a, b])\n", (1863, 1884), True, 'import tensorflow as tf\n'), ((1900, 1934), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'shape': '[b]'}), "('bias', shape=[b])\n", (1915, 1934), True, 'import tensorflow as tf\n'), ((2002, 2046), 'tensorflow.reshape', 'tf.reshape', (['out_flat', '(input_shape[:-1] + [b])'], {}), '(out_flat, input_shape[:-1] + [b])\n', (2012, 2046), True, 'import tensorflow as tf\n'), ((358, 384), 'functools.reduce', 'reduce', (['mul', 'shape[:-1]', '(1)'], {}), '(mul, shape[:-1], 1)\n', (364, 384), False, 'from functools import reduce\n'), ((904, 930), 'functools.reduce', 'reduce', (['mul', 'shape[:-1]', '(1)'], {}), '(mul, shape[:-1], 1)\n', (910, 930), False, 'from functools import reduce\n'), ((1753, 1794), 'functools.reduce', 'reduce', (['operator.mul', 'input_shape[:-1]', '(1)'], {}), '(operator.mul, input_shape[:-1], 1)\n', (1759, 1794), False, 'from functools import reduce\n'), ((1954, 1980), 'tensorflow.matmul', 'tf.matmul', (['input_flat', 'mat'], {}), '(input_flat, mat)\n', (1963, 1980), True, 'import tensorflow as tf\n')] |
import os
import torch
import torchvision
from tensorboardX import SummaryWriter
from tqdm import tqdm
from dataset.dataset import AdaMattingDataset
from dataset.pre_process import composite_dataset, gen_train_valid_names
from net.adamatting import AdaMatting
from loss import task_uncertainty_loss
from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, \
compute_mse, compute_sad
def train(model, optimizer, device, args, logger, multi_gpu):
torch.manual_seed(7)
writer = SummaryWriter()
logger.info("Initializing data loaders")
train_dataset = AdaMattingDataset(args.raw_data_path, 'train')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=16, pin_memory=True)
valid_dataset = AdaMattingDataset(args.raw_data_path, 'valid')
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=16, pin_memory=True)
if args.resume:
logger.info("Start training from saved ckpt")
ckpt = torch.load(args.ckpt_path)
model = ckpt["model"].module
model = model.to(device)
optimizer = ckpt["optimizer"]
start_epoch = ckpt["epoch"] + 1
max_iter = ckpt["max_iter"]
cur_iter = ckpt["cur_iter"]
init_lr = ckpt["init_lr"]
best_loss = ckpt["best_loss"]
else:
logger.info("Start training from scratch")
start_epoch = 0
max_iter = 43100 * (1 - args.valid_portion) / args.batch_size * args.epochs
cur_iter = 0
init_lr = args.lr
best_loss = float('inf')
for epoch in range(start_epoch, args.epochs):
# Training
torch.set_grad_enabled(True)
model.train()
for index, (img, gt) in enumerate(train_loader):
cur_lr = poly_lr_scheduler(optimizer=optimizer, init_lr=init_lr, iter=cur_iter, max_iter=max_iter)
img = img.type(torch.FloatTensor).to(device) # [bs, 4, 320, 320]
gt_alpha = (gt[:, 0, :, :].unsqueeze(1)).type(torch.FloatTensor).to(device) # [bs, 1, 320, 320]
gt_trimap = gt[:, 1, :, :].type(torch.LongTensor).to(device) # [bs, 320, 320]
optimizer.zero_grad()
trimap_adaption, t_argmax, alpha_estimation = model(img)
L_overall, L_t, L_a = task_uncertainty_loss(pred_trimap=trimap_adaption, pred_trimap_argmax=t_argmax,
pred_alpha=alpha_estimation, gt_trimap=gt_trimap,
gt_alpha=gt_alpha, log_sigma_t_sqr=model.log_sigma_t_sqr, log_sigma_a_sqr=model.log_sigma_a_sqr)
# if multi_gpu:
# L_overall, L_t, L_a = L_overall.mean(), L_t.mean(), L_a.mean()
optimizer.zero_grad()
L_overall.backward()
optimizer.step()
if cur_iter % 10 == 0:
logger.info("Epoch: {:03d} | Iter: {:05d}/{} | Loss: {:.4e} | L_t: {:.4e} | L_a: {:.4e}"
.format(epoch, index, len(train_loader), L_overall.item(), L_t.item(), L_a.item()))
writer.add_scalar("loss/L_overall", L_overall.item(), cur_iter)
writer.add_scalar("loss/L_t", L_t.item(), cur_iter)
writer.add_scalar("loss/L_a", L_a.item(), cur_iter)
sigma_t = torch.exp(model.log_sigma_t_sqr / 2)
sigma_a = torch.exp(model.log_sigma_a_sqr / 2)
writer.add_scalar("sigma/sigma_t", sigma_t, cur_iter)
writer.add_scalar("sigma/sigma_a", sigma_a, cur_iter)
writer.add_scalar("lr", cur_lr, cur_iter)
cur_iter += 1
# Validation
logger.info("Validating after the {}th epoch".format(epoch))
avg_loss = AverageMeter()
avg_l_t = AverageMeter()
avg_l_a = AverageMeter()
torch.cuda.empty_cache()
torch.set_grad_enabled(False)
model.eval()
with tqdm(total=len(valid_loader)) as pbar:
for index, (img, gt) in enumerate(valid_loader):
img = img.type(torch.FloatTensor).to(device) # [bs, 4, 320, 320]
gt_alpha = (gt[:, 0, :, :].unsqueeze(1)).type(torch.FloatTensor).to(device) # [bs, 1, 320, 320]
gt_trimap = gt[:, 1, :, :].type(torch.LongTensor).to(device) # [bs, 320, 320]
trimap_adaption, t_argmax, alpha_estimation = model(img)
L_overall_valid, L_t_valid, L_a_valid = task_uncertainty_loss(pred_trimap=trimap_adaption, pred_trimap_argmax=t_argmax,
pred_alpha=alpha_estimation, gt_trimap=gt_trimap,
gt_alpha=gt_alpha, log_sigma_t_sqr=model.log_sigma_t_sqr, log_sigma_a_sqr=model.log_sigma_a_sqr)
# if multi_gpu:
# L_overall, L_t, L_a = L_overall.mean(), L_t.mean(), L_a.mean()
avg_loss.update(L_overall_valid.item())
avg_l_t.update(L_t_valid.item())
avg_l_a.update(L_a_valid.item())
if index == 0:
trimap_adaption_res = torchvision.utils.make_grid(t_argmax.type(torch.FloatTensor) / 2, normalize=True, scale_each=True)
writer.add_image('valid_image/trimap_adaptation', trimap_adaption_res, cur_iter)
alpha_estimation_res = torchvision.utils.make_grid(alpha_estimation, normalize=True, scale_each=True)
writer.add_image('valid_image/alpha_estimation', alpha_estimation_res, cur_iter)
pbar.update()
logger.info("Average loss overall: {:.4e}".format(avg_loss.avg))
logger.info("Average loss of trimap adaptation: {:.4e}".format(avg_l_t.avg))
logger.info("Average loss of alpha estimation: {:.4e}".format(avg_l_a.avg))
writer.add_scalar("valid_loss/L_overall", avg_loss.avg, cur_iter)
writer.add_scalar("valid_loss/L_t", avg_l_t.avg, cur_iter)
writer.add_scalar("valid_loss/L_a", avg_l_a.avg, cur_iter)
is_best = avg_loss.avg < best_loss
best_loss = min(avg_loss.avg, best_loss)
if is_best or (args.save_ckpt and epoch % 10 == 0):
if not os.path.exists("ckpts"):
os.makedirs("ckpts")
logger.info("Checkpoint saved")
if (is_best):
logger.info("Best checkpoint saved")
save_checkpoint(epoch, model, optimizer, cur_iter, max_iter, init_lr, avg_loss.avg, is_best, args.ckpt_path)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
def test():
pass
def main():
args = get_args()
logger = get_logger(args.write_log)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device_ids_str = args.gpu.split(',')
device_ids = []
for i in range(len(device_ids_str)):
device_ids.append(i)
multi_gpu = False
if args.mode != "prep":
logger.info("Loading network")
model = AdaMatting(in_channel=4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0)
if args.cuda:
device = torch.device("cuda:{}".format(device_ids[0]))
if len(device_ids) > 1 and args.mode=="train":
logger.info("Loading with multiple GPUs")
model = torch.nn.DataParallel(model, device_ids=device_ids)
multi_gpu = True
model = model.cuda(device=device_ids[0])
else:
device = torch.device("cpu")
if args.mode == "train":
logger.info("Program runs in train mode")
train(model=model, optimizer=optimizer, device=device, args=args, logger=logger, multi_gpu=multi_gpu)
elif args.mode == "test":
logger.info("Program runs in test mode")
test()
elif args.mode == "prep":
logger.info("Program runs in prep mode")
# composite_dataset(args.raw_data_path, logger)
gen_train_valid_names(args.valid_portion, logger)
if __name__ == "__main__":
main()
| [
"utility.save_checkpoint",
"torch.exp",
"loss.task_uncertainty_loss",
"utility.poly_lr_scheduler",
"torchvision.utils.make_grid",
"dataset.dataset.AdaMattingDataset",
"os.path.exists",
"tensorboardX.SummaryWriter",
"net.adamatting.AdaMatting",
"dataset.pre_process.gen_train_valid_names",
"utility.get_args",
"utility.AverageMeter",
"torch.cuda.empty_cache",
"torch.device",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"torch.nn.DataParallel",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled",
"utility.get_logger"
] | [((507, 527), 'torch.manual_seed', 'torch.manual_seed', (['(7)'], {}), '(7)\n', (524, 527), False, 'import torch\n'), ((541, 556), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (554, 556), False, 'from tensorboardX import SummaryWriter\n'), ((623, 669), 'dataset.dataset.AdaMattingDataset', 'AdaMattingDataset', (['args.raw_data_path', '"""train"""'], {}), "(args.raw_data_path, 'train')\n", (640, 669), False, 'from dataset.dataset import AdaMattingDataset\n'), ((689, 810), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=16, pin_memory=True)\n', (716, 810), False, 'import torch\n'), ((875, 921), 'dataset.dataset.AdaMattingDataset', 'AdaMattingDataset', (['args.raw_data_path', '"""valid"""'], {}), "(args.raw_data_path, 'valid')\n", (892, 921), False, 'from dataset.dataset import AdaMattingDataset\n'), ((941, 1063), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(valid_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=16, pin_memory=True)\n', (968, 1063), False, 'import torch\n'), ((6898, 6908), 'utility.get_args', 'get_args', ([], {}), '()\n', (6906, 6908), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((6922, 6948), 'utility.get_logger', 'get_logger', (['args.write_log'], {}), '(args.write_log)\n', (6932, 6948), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((1198, 1224), 'torch.load', 'torch.load', (['args.ckpt_path'], {}), '(args.ckpt_path)\n', (1208, 1224), False, 'import torch\n'), ((1849, 1877), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (1871, 1877), False, 'import torch\n'), ((3978, 3992), 'utility.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3990, 3992), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((4011, 4025), 'utility.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4023, 4025), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((4044, 4058), 'utility.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4056, 4058), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((4067, 4091), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4089, 4091), False, 'import torch\n'), ((4100, 4129), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4122, 4129), False, 'import torch\n'), ((7241, 7265), 'net.adamatting.AdaMatting', 'AdaMatting', ([], {'in_channel': '(4)'}), '(in_channel=4)\n', (7251, 7265), False, 'from net.adamatting import AdaMatting\n'), ((1978, 2071), 'utility.poly_lr_scheduler', 'poly_lr_scheduler', ([], {'optimizer': 'optimizer', 'init_lr': 'init_lr', 'iter': 'cur_iter', 'max_iter': 'max_iter'}), '(optimizer=optimizer, init_lr=init_lr, iter=cur_iter,\n max_iter=max_iter)\n', (1995, 2071), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((2482, 2723), 'loss.task_uncertainty_loss', 'task_uncertainty_loss', ([], {'pred_trimap': 'trimap_adaption', 'pred_trimap_argmax': 't_argmax', 'pred_alpha': 'alpha_estimation', 'gt_trimap': 'gt_trimap', 'gt_alpha': 'gt_alpha', 'log_sigma_t_sqr': 'model.log_sigma_t_sqr', 'log_sigma_a_sqr': 'model.log_sigma_a_sqr'}), '(pred_trimap=trimap_adaption, pred_trimap_argmax=\n t_argmax, pred_alpha=alpha_estimation, gt_trimap=gt_trimap, gt_alpha=\n gt_alpha, log_sigma_t_sqr=model.log_sigma_t_sqr, log_sigma_a_sqr=model.\n log_sigma_a_sqr)\n', (2503, 2723), False, 'from loss import task_uncertainty_loss\n'), ((6665, 6777), 'utility.save_checkpoint', 'save_checkpoint', (['epoch', 'model', 'optimizer', 'cur_iter', 'max_iter', 'init_lr', 'avg_loss.avg', 'is_best', 'args.ckpt_path'], {}), '(epoch, model, optimizer, cur_iter, max_iter, init_lr,\n avg_loss.avg, is_best, args.ckpt_path)\n', (6680, 6777), False, 'from utility import get_args, get_logger, poly_lr_scheduler, save_checkpoint, AverageMeter, compute_mse, compute_sad\n'), ((7754, 7773), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7766, 7773), False, 'import torch\n'), ((3523, 3559), 'torch.exp', 'torch.exp', (['(model.log_sigma_t_sqr / 2)'], {}), '(model.log_sigma_t_sqr / 2)\n', (3532, 3559), False, 'import torch\n'), ((3586, 3622), 'torch.exp', 'torch.exp', (['(model.log_sigma_a_sqr / 2)'], {}), '(model.log_sigma_a_sqr / 2)\n', (3595, 3622), False, 'import torch\n'), ((4681, 4922), 'loss.task_uncertainty_loss', 'task_uncertainty_loss', ([], {'pred_trimap': 'trimap_adaption', 'pred_trimap_argmax': 't_argmax', 'pred_alpha': 'alpha_estimation', 'gt_trimap': 'gt_trimap', 'gt_alpha': 'gt_alpha', 'log_sigma_t_sqr': 'model.log_sigma_t_sqr', 'log_sigma_a_sqr': 'model.log_sigma_a_sqr'}), '(pred_trimap=trimap_adaption, pred_trimap_argmax=\n t_argmax, pred_alpha=alpha_estimation, gt_trimap=gt_trimap, gt_alpha=\n gt_alpha, log_sigma_t_sqr=model.log_sigma_t_sqr, log_sigma_a_sqr=model.\n log_sigma_a_sqr)\n', (4702, 4922), False, 'from loss import task_uncertainty_loss\n'), ((6468, 6491), 'os.path.exists', 'os.path.exists', (['"""ckpts"""'], {}), "('ckpts')\n", (6482, 6491), False, 'import os\n'), ((6509, 6529), 'os.makedirs', 'os.makedirs', (['"""ckpts"""'], {}), "('ckpts')\n", (6520, 6529), False, 'import os\n'), ((7581, 7632), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (7602, 7632), False, 'import torch\n'), ((8201, 8250), 'dataset.pre_process.gen_train_valid_names', 'gen_train_valid_names', (['args.valid_portion', 'logger'], {}), '(args.valid_portion, logger)\n', (8222, 8250), False, 'from dataset.pre_process import composite_dataset, gen_train_valid_names\n'), ((5618, 5696), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['alpha_estimation'], {'normalize': '(True)', 'scale_each': '(True)'}), '(alpha_estimation, normalize=True, scale_each=True)\n', (5645, 5696), False, 'import torchvision\n')] |
import unittest
from opentmi_client.api import Event
class TestEvent(unittest.TestCase):
def test_construct(self):
event = Event()
self.assertIsInstance(event, Event)
self.assertEqual(event.data, {})
def test_str(self):
event = Event()
event.eid = "iid"
self.assertEqual(str(event), "iid")
def test_properties(self):
event = Event()
event.msgid = 'ALLOCATED'
self.assertEqual(event.msgid, 'ALLOCATED')
event.traceid = "asd"
self.assertEqual(event.traceid, "asd")
event.msg = "abc"
self.assertEqual(event.msg, "abc")
event.tag = "taag"
self.assertEqual(event.tag, "taag")
event.duration = 123.0
self.assertEqual(event.duration, 123.0)
event.eid = "aid"
self.assertEqual(event.eid, "aid")
spare = {"a": 123}
event.spare = spare
self.assertEqual(event.spare, spare)
self.assertDictEqual(event.data, {
"msgid": "ALLOCATED",
"traceid": "asd",
"msg": "abc",
"tag": "taag",
"duration": 123.0,
"id": "aid",
"spare": spare,
})
def test_priority(self):
event = Event()
event.priority.facility = "testcase"
self.assertEqual(event.priority.facility, "testcase")
event.priority.level = "warning"
self.assertEqual(event.priority.level, "warning")
self.assertDictEqual(event.data, {
"priority": {
"facility": "testcase",
"level": "warning"
}
})
def test_ref(self):
event = Event()
event.ref.resource = "123"
self.assertEqual(event.ref.resource, "123")
event.ref.testcase = "tcs"
self.assertEqual(event.ref.testcase, "tcs")
event.ref.result = "res"
self.assertEqual(event.ref.result, "res")
self.assertDictEqual(event.data, {
"ref": {
"resource": "123",
"testcase": "tcs",
"result": "res"
}
})
| [
"opentmi_client.api.Event"
] | [((137, 144), 'opentmi_client.api.Event', 'Event', ([], {}), '()\n', (142, 144), False, 'from opentmi_client.api import Event\n'), ((271, 278), 'opentmi_client.api.Event', 'Event', ([], {}), '()\n', (276, 278), False, 'from opentmi_client.api import Event\n'), ((397, 404), 'opentmi_client.api.Event', 'Event', ([], {}), '()\n', (402, 404), False, 'from opentmi_client.api import Event\n'), ((1256, 1263), 'opentmi_client.api.Event', 'Event', ([], {}), '()\n', (1261, 1263), False, 'from opentmi_client.api import Event\n'), ((1680, 1687), 'opentmi_client.api.Event', 'Event', ([], {}), '()\n', (1685, 1687), False, 'from opentmi_client.api import Event\n')] |
import unittest
from roman import RomanNumeral
class RomanNumeralTests(unittest.TestCase):
"""Tests for RomanNumeral."""
def verify(self, integer, numeral):
self.assertEqual(int(RomanNumeral(numeral)), integer)
self.assertNotEqual(int(RomanNumeral(numeral)), integer + 1)
self.assertNotEqual(int(RomanNumeral(numeral)), integer - 1)
def test_single_digit(self):
self.verify(1, "I")
self.verify(5, "V")
self.verify(10, "X")
self.verify(50, "L")
self.verify(100, "C")
self.verify(500, "D")
self.verify(1000, "M")
def test_two_digits_ascending(self):
self.verify(2, "II")
self.verify(6, "VI")
self.verify(11, "XI")
self.verify(15, "XV")
self.verify(20, "XX")
self.verify(60, "LX")
self.verify(101, "CI")
self.verify(105, "CV")
self.verify(110, "CX")
self.verify(150, "CL")
self.verify(550, "DL")
self.verify(600, "DC")
self.verify(1100, "MC")
self.verify(2000, "MM")
def test_three_digits_ascending(self):
self.verify(3, "III")
self.verify(7, "VII")
self.verify(12, "XII")
self.verify(16, "XVI")
self.verify(21, "XXI")
self.verify(25, "XXV")
self.verify(30, "XXX")
def test_four_digits_ascending(self):
self.verify(8, "VIII")
self.verify(13, "XIII")
self.verify(17, "XVII")
self.verify(22, "XXII")
self.verify(26, "XXVI")
self.verify(31, "XXXI")
self.verify(35, "XXXV")
def test_many_digits(self):
self.verify(1888, "MDCCCLXXXVIII")
def test_subtractive(self):
self.verify(4, "IV")
self.verify(9, "IX")
self.verify(14, "XIV")
self.verify(19, "XIX")
self.verify(24, "XXIV")
self.verify(29, "XXIX")
self.verify(40, "XL")
self.verify(90, "XC")
self.verify(44, "XLIV")
self.verify(94, "XCIV")
self.verify(49, "XLIX")
self.verify(99, "XCIX")
self.verify(1999, "MCMXCIX")
self.verify(1948, "MCMXLVIII")
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_from_int_and_string_representation(self):
self.assertEqual(str(RomanNumeral("I")), "I")
self.assertEqual(repr(RomanNumeral("CD")), "RomanNumeral('CD')")
self.assertEqual(str(RomanNumeral.from_int(1)), str(RomanNumeral("I")))
self.assertEqual(str(RomanNumeral.from_int(10)), "X")
self.assertEqual(str(RomanNumeral.from_int(21)), "XXI")
self.assertEqual(str(RomanNumeral.from_int(600)), "DC")
self.assertEqual(str(RomanNumeral.from_int(2000)), "MM")
self.assertEqual(str(RomanNumeral.from_int(12)), "XII")
self.assertEqual(str(RomanNumeral.from_int(25)), "XXV")
self.assertEqual(str(RomanNumeral.from_int(6)), "VI")
self.assertEqual(str(RomanNumeral.from_int(4)), "IV")
self.assertEqual(str(RomanNumeral.from_int(9)), "IX")
self.assertEqual(str(RomanNumeral.from_int(14)), "XIV")
self.assertEqual(str(RomanNumeral.from_int(1888)), "MDCCCLXXXVIII")
self.assertEqual(str(RomanNumeral.from_int(1999)), "MCMXCIX")
self.assertEqual(str(RomanNumeral.from_int(1948)), "MCMXLVIII")
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_adding(self):
sixty_five = RomanNumeral("LXV")
eighty_seven = RomanNumeral("LXXXVII")
self.assertEqual(int(sixty_five + eighty_seven), 152)
self.assertEqual(type(sixty_five + eighty_seven), RomanNumeral)
self.assertEqual(int(sixty_five + 87), 152)
self.assertEqual(type(sixty_five + 87), RomanNumeral)
self.assertEqual(str(sixty_five + 87), str("CLII"))
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_equality_and_ordering(self):
self.assertEqual(RomanNumeral("I"), 1)
self.assertNotEqual(RomanNumeral("I"), 2)
self.assertEqual(RomanNumeral("I"), "I")
self.assertEqual(RomanNumeral.from_int(10), "X")
self.assertEqual(RomanNumeral.from_int(21), "XXI")
self.assertEqual(RomanNumeral.from_int(600), "DC")
self.assertEqual(RomanNumeral.from_int(2000), "MM")
self.assertEqual(RomanNumeral.from_int(12), "XII")
self.assertEqual(RomanNumeral.from_int(25), "XXV")
self.assertEqual(RomanNumeral.from_int(6), "VI")
self.assertEqual(RomanNumeral.from_int(4), "IV")
self.assertEqual(RomanNumeral.from_int(9), "IX")
self.assertEqual(RomanNumeral.from_int(14), "XIV")
self.assertEqual(RomanNumeral.from_int(1888), "MDCCCLXXXVIII")
self.assertEqual(RomanNumeral.from_int(1999), "MCMXCIX")
self.assertEqual(RomanNumeral.from_int(1948), "MCMXLVIII")
self.assertLess(RomanNumeral("MCMXLVIII"), RomanNumeral("MCMXCIX"))
self.assertGreater(RomanNumeral("MCMXCIX"), RomanNumeral("MCMXLVIII"))
self.assertGreaterEqual(RomanNumeral("IX"), RomanNumeral("III"))
self.assertLessEqual(RomanNumeral("III"), RomanNumeral("IX"))
self.assertGreaterEqual(RomanNumeral("X"), RomanNumeral("X"))
self.assertLessEqual(RomanNumeral("IIII"), RomanNumeral("IV"))
self.assertFalse(RomanNumeral("V") < RomanNumeral("IV"))
self.assertFalse(RomanNumeral("V") > RomanNumeral("IX"))
self.assertFalse(RomanNumeral("V") <= RomanNumeral("IV"))
self.assertFalse(RomanNumeral("V") >= RomanNumeral("IX"))
with self.assertRaises(TypeError):
RomanNumeral("X") < "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") <= "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") > "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") >= "XX"
self.assertFalse(RomanNumeral("V") < 4)
self.assertFalse(RomanNumeral("V") > 9)
self.assertFalse(RomanNumeral("V") <= 4)
self.assertFalse(RomanNumeral("V") >= 9)
with self.assertRaises(TypeError):
RomanNumeral("X") < "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") <= "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") > "XX"
with self.assertRaises(TypeError):
RomanNumeral("X") >= "XX"
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"roman.RomanNumeral.from_int",
"roman.RomanNumeral"
] | [((6576, 6602), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (6589, 6602), False, 'import unittest\n'), ((3544, 3563), 'roman.RomanNumeral', 'RomanNumeral', (['"""LXV"""'], {}), "('LXV')\n", (3556, 3563), False, 'from roman import RomanNumeral\n'), ((3587, 3610), 'roman.RomanNumeral', 'RomanNumeral', (['"""LXXXVII"""'], {}), "('LXXXVII')\n", (3599, 3610), False, 'from roman import RomanNumeral\n'), ((4097, 4114), 'roman.RomanNumeral', 'RomanNumeral', (['"""I"""'], {}), "('I')\n", (4109, 4114), False, 'from roman import RomanNumeral\n'), ((4147, 4164), 'roman.RomanNumeral', 'RomanNumeral', (['"""I"""'], {}), "('I')\n", (4159, 4164), False, 'from roman import RomanNumeral\n'), ((4194, 4211), 'roman.RomanNumeral', 'RomanNumeral', (['"""I"""'], {}), "('I')\n", (4206, 4211), False, 'from roman import RomanNumeral\n'), ((4243, 4268), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(10)'], {}), '(10)\n', (4264, 4268), False, 'from roman import RomanNumeral\n'), ((4300, 4325), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(21)'], {}), '(21)\n', (4321, 4325), False, 'from roman import RomanNumeral\n'), ((4359, 4385), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(600)'], {}), '(600)\n', (4380, 4385), False, 'from roman import RomanNumeral\n'), ((4418, 4445), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(2000)'], {}), '(2000)\n', (4439, 4445), False, 'from roman import RomanNumeral\n'), ((4478, 4503), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(12)'], {}), '(12)\n', (4499, 4503), False, 'from roman import RomanNumeral\n'), ((4537, 4562), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(25)'], {}), '(25)\n', (4558, 4562), False, 'from roman import RomanNumeral\n'), ((4596, 4620), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(6)'], {}), '(6)\n', (4617, 4620), False, 'from roman import RomanNumeral\n'), ((4653, 4677), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(4)'], {}), '(4)\n', (4674, 4677), False, 'from roman import RomanNumeral\n'), ((4710, 4734), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(9)'], {}), '(9)\n', (4731, 4734), False, 'from roman import RomanNumeral\n'), ((4767, 4792), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(14)'], {}), '(14)\n', (4788, 4792), False, 'from roman import RomanNumeral\n'), ((4826, 4853), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1888)'], {}), '(1888)\n', (4847, 4853), False, 'from roman import RomanNumeral\n'), ((4897, 4924), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1999)'], {}), '(1999)\n', (4918, 4924), False, 'from roman import RomanNumeral\n'), ((4962, 4989), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1948)'], {}), '(1948)\n', (4983, 4989), False, 'from roman import RomanNumeral\n'), ((5028, 5053), 'roman.RomanNumeral', 'RomanNumeral', (['"""MCMXLVIII"""'], {}), "('MCMXLVIII')\n", (5040, 5053), False, 'from roman import RomanNumeral\n'), ((5055, 5078), 'roman.RomanNumeral', 'RomanNumeral', (['"""MCMXCIX"""'], {}), "('MCMXCIX')\n", (5067, 5078), False, 'from roman import RomanNumeral\n'), ((5107, 5130), 'roman.RomanNumeral', 'RomanNumeral', (['"""MCMXCIX"""'], {}), "('MCMXCIX')\n", (5119, 5130), False, 'from roman import RomanNumeral\n'), ((5132, 5157), 'roman.RomanNumeral', 'RomanNumeral', (['"""MCMXLVIII"""'], {}), "('MCMXLVIII')\n", (5144, 5157), False, 'from roman import RomanNumeral\n'), ((5191, 5209), 'roman.RomanNumeral', 'RomanNumeral', (['"""IX"""'], {}), "('IX')\n", (5203, 5209), False, 'from roman import RomanNumeral\n'), ((5211, 5230), 'roman.RomanNumeral', 'RomanNumeral', (['"""III"""'], {}), "('III')\n", (5223, 5230), False, 'from roman import RomanNumeral\n'), ((5261, 5280), 'roman.RomanNumeral', 'RomanNumeral', (['"""III"""'], {}), "('III')\n", (5273, 5280), False, 'from roman import RomanNumeral\n'), ((5282, 5300), 'roman.RomanNumeral', 'RomanNumeral', (['"""IX"""'], {}), "('IX')\n", (5294, 5300), False, 'from roman import RomanNumeral\n'), ((5334, 5351), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (5346, 5351), False, 'from roman import RomanNumeral\n'), ((5353, 5370), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (5365, 5370), False, 'from roman import RomanNumeral\n'), ((5401, 5421), 'roman.RomanNumeral', 'RomanNumeral', (['"""IIII"""'], {}), "('IIII')\n", (5413, 5421), False, 'from roman import RomanNumeral\n'), ((5423, 5441), 'roman.RomanNumeral', 'RomanNumeral', (['"""IV"""'], {}), "('IV')\n", (5435, 5441), False, 'from roman import RomanNumeral\n'), ((199, 220), 'roman.RomanNumeral', 'RomanNumeral', (['numeral'], {}), '(numeral)\n', (211, 220), False, 'from roman import RomanNumeral\n'), ((264, 285), 'roman.RomanNumeral', 'RomanNumeral', (['numeral'], {}), '(numeral)\n', (276, 285), False, 'from roman import RomanNumeral\n'), ((333, 354), 'roman.RomanNumeral', 'RomanNumeral', (['numeral'], {}), '(numeral)\n', (345, 354), False, 'from roman import RomanNumeral\n'), ((2356, 2373), 'roman.RomanNumeral', 'RomanNumeral', (['"""I"""'], {}), "('I')\n", (2368, 2373), False, 'from roman import RomanNumeral\n'), ((2411, 2429), 'roman.RomanNumeral', 'RomanNumeral', (['"""CD"""'], {}), "('CD')\n", (2423, 2429), False, 'from roman import RomanNumeral\n'), ((2483, 2507), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1)'], {}), '(1)\n', (2504, 2507), False, 'from roman import RomanNumeral\n'), ((2514, 2531), 'roman.RomanNumeral', 'RomanNumeral', (['"""I"""'], {}), "('I')\n", (2526, 2531), False, 'from roman import RomanNumeral\n'), ((2563, 2588), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(10)'], {}), '(10)\n', (2584, 2588), False, 'from roman import RomanNumeral\n'), ((2625, 2650), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(21)'], {}), '(21)\n', (2646, 2650), False, 'from roman import RomanNumeral\n'), ((2689, 2715), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(600)'], {}), '(600)\n', (2710, 2715), False, 'from roman import RomanNumeral\n'), ((2753, 2780), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(2000)'], {}), '(2000)\n', (2774, 2780), False, 'from roman import RomanNumeral\n'), ((2818, 2843), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(12)'], {}), '(12)\n', (2839, 2843), False, 'from roman import RomanNumeral\n'), ((2882, 2907), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(25)'], {}), '(25)\n', (2903, 2907), False, 'from roman import RomanNumeral\n'), ((2946, 2970), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(6)'], {}), '(6)\n', (2967, 2970), False, 'from roman import RomanNumeral\n'), ((3008, 3032), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(4)'], {}), '(4)\n', (3029, 3032), False, 'from roman import RomanNumeral\n'), ((3070, 3094), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(9)'], {}), '(9)\n', (3091, 3094), False, 'from roman import RomanNumeral\n'), ((3132, 3157), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(14)'], {}), '(14)\n', (3153, 3157), False, 'from roman import RomanNumeral\n'), ((3196, 3223), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1888)'], {}), '(1888)\n', (3217, 3223), False, 'from roman import RomanNumeral\n'), ((3272, 3299), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1999)'], {}), '(1999)\n', (3293, 3299), False, 'from roman import RomanNumeral\n'), ((3342, 3369), 'roman.RomanNumeral.from_int', 'RomanNumeral.from_int', (['(1948)'], {}), '(1948)\n', (3363, 3369), False, 'from roman import RomanNumeral\n'), ((5468, 5485), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (5480, 5485), False, 'from roman import RomanNumeral\n'), ((5488, 5506), 'roman.RomanNumeral', 'RomanNumeral', (['"""IV"""'], {}), "('IV')\n", (5500, 5506), False, 'from roman import RomanNumeral\n'), ((5533, 5550), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (5545, 5550), False, 'from roman import RomanNumeral\n'), ((5553, 5571), 'roman.RomanNumeral', 'RomanNumeral', (['"""IX"""'], {}), "('IX')\n", (5565, 5571), False, 'from roman import RomanNumeral\n'), ((5598, 5615), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (5610, 5615), False, 'from roman import RomanNumeral\n'), ((5619, 5637), 'roman.RomanNumeral', 'RomanNumeral', (['"""IV"""'], {}), "('IV')\n", (5631, 5637), False, 'from roman import RomanNumeral\n'), ((5664, 5681), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (5676, 5681), False, 'from roman import RomanNumeral\n'), ((5685, 5703), 'roman.RomanNumeral', 'RomanNumeral', (['"""IX"""'], {}), "('IX')\n", (5697, 5703), False, 'from roman import RomanNumeral\n'), ((5760, 5777), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (5772, 5777), False, 'from roman import RomanNumeral\n'), ((5840, 5857), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (5852, 5857), False, 'from roman import RomanNumeral\n'), ((5921, 5938), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (5933, 5938), False, 'from roman import RomanNumeral\n'), ((6001, 6018), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (6013, 6018), False, 'from roman import RomanNumeral\n'), ((6052, 6069), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (6064, 6069), False, 'from roman import RomanNumeral\n'), ((6100, 6117), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (6112, 6117), False, 'from roman import RomanNumeral\n'), ((6148, 6165), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (6160, 6165), False, 'from roman import RomanNumeral\n'), ((6197, 6214), 'roman.RomanNumeral', 'RomanNumeral', (['"""V"""'], {}), "('V')\n", (6209, 6214), False, 'from roman import RomanNumeral\n'), ((6276, 6293), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (6288, 6293), False, 'from roman import RomanNumeral\n'), ((6356, 6373), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (6368, 6373), False, 'from roman import RomanNumeral\n'), ((6437, 6454), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (6449, 6454), False, 'from roman import RomanNumeral\n'), ((6517, 6534), 'roman.RomanNumeral', 'RomanNumeral', (['"""X"""'], {}), "('X')\n", (6529, 6534), False, 'from roman import RomanNumeral\n')] |
import kivy
kivy.require('2.0.0') # replace with your current kivy version !
from kivy.app import App
import kivy.graphics
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty, ObjectProperty
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.uix.screenmanager import Screen, ScreenManager
KV = '''
<ClientListScreen>:
BoxLayout:
orientation: 'vertical'
table_box: table_box
BoxLayout:
orientation: 'horizontal'
Label:
size_hint: 0.25, None
text: 'Nazwisko'
Label:
size_hint: 0.25, None
text: 'Imię'
Label:
size_hint: 0.25, None
text: 'wzrost'
Label:
size_hint: 0.25, None
text: 'Akcje'
BoxLayout:
orientation: 'vertical'
id: table_box
PersonRow:
last_name: 'Mejer'
first_name: 'Zdzichu'
height_: 173
PersonRow:
last_name: 'Ptok'
first_name: 'Kacper'
height_: 180
PersonRow:
last_name: 'Ufnal'
first_name: 'Melania'
height_: 169
PersonRow:
last_name: 'Cyroń'
first_name: 'Marcin'
height_: 183
<ImageViewScreen>:
BoxLayout:
orientation: 'vertical'
Button:
text: 'Go back'
size_hint: 1, 0.15
on_press: root.manager.current = 'clients'
ImageCanvas:
source: 'hippo.jpg'
<PersonRow>:
id: person
BoxLayout:
orientation: 'horizontal'
Label:
size_hint: 0.25, None
text: person.last_name
Label:
size_hint: 0.25, None
text: person.first_name
Label:
size_hint: 0.25, None
text: str(person.height_)
Button:
size_hint: 0.25, None
text: 'otwórz'
on_press: app.open_image()
'''
# TODO can I reference objects in the layout directly from the app object?
class ClientListScreen(Screen):
# TODO fill that table_box from sqlite. Might have to be made into a recycle view
table_box = ObjectProperty()
class ImageViewScreen(Screen):
# an example of how to use file chooser is in Kivy repo: kivy/examples/RST_Editor/main.py
pass
class PersonRow(BoxLayout):
last_name = StringProperty()
first_name = StringProperty()
height_ = NumericProperty()
# TODO should be using https://docs.python.org/3.7/library/importlib.html#module-importlib.resources
# to load the image... well no, actually I'll be loading it from somewhere else. But some other images should be
# loaded like that.
class ImageCanvas(Image):
def on_touch_down(self, touch):
# TODO limit the processing to touches on the image
with self.canvas:
kivy.graphics.Color(0, 0.7, 0)
dot_size = 20
dot_position = (touch.pos[0] - dot_size//2, touch.pos[1] - dot_size//2)
kivy.graphics.Ellipse(pos=dot_position, size=(dot_size, dot_size))
print(touch, touch.button, 'mouse button')
# TODO make the app async
class MyApp(App):
def build(self):
Builder.load_string(KV)
self.screen_manager = ScreenManager()
self.screen_manager.add_widget(ClientListScreen(name='clients'))
self.screen_manager.add_widget(ImageViewScreen(name='image_view'))
return self.screen_manager
def open_image(self):
self.screen_manager.current = 'image_view'
if __name__ == '__main__':
MyApp().run()
| [
"kivy.require",
"kivy.properties.NumericProperty",
"kivy.graphics.Ellipse",
"kivy.uix.screenmanager.ScreenManager",
"kivy.lang.Builder.load_string",
"kivy.graphics.Color",
"kivy.properties.StringProperty",
"kivy.properties.ObjectProperty"
] | [((12, 33), 'kivy.require', 'kivy.require', (['"""2.0.0"""'], {}), "('2.0.0')\n", (24, 33), False, 'import kivy\n'), ((2419, 2435), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {}), '()\n', (2433, 2435), False, 'from kivy.properties import NumericProperty, StringProperty, ObjectProperty\n'), ((2618, 2634), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (2632, 2634), False, 'from kivy.properties import NumericProperty, StringProperty, ObjectProperty\n'), ((2652, 2668), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (2666, 2668), False, 'from kivy.properties import NumericProperty, StringProperty, ObjectProperty\n'), ((2683, 2700), 'kivy.properties.NumericProperty', 'NumericProperty', ([], {}), '()\n', (2698, 2700), False, 'from kivy.properties import NumericProperty, StringProperty, ObjectProperty\n'), ((3444, 3467), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['KV'], {}), '(KV)\n', (3463, 3467), False, 'from kivy.lang import Builder\n'), ((3499, 3514), 'kivy.uix.screenmanager.ScreenManager', 'ScreenManager', ([], {}), '()\n', (3512, 3514), False, 'from kivy.uix.screenmanager import Screen, ScreenManager\n'), ((3097, 3127), 'kivy.graphics.Color', 'kivy.graphics.Color', (['(0)', '(0.7)', '(0)'], {}), '(0, 0.7, 0)\n', (3116, 3127), False, 'import kivy\n'), ((3250, 3316), 'kivy.graphics.Ellipse', 'kivy.graphics.Ellipse', ([], {'pos': 'dot_position', 'size': '(dot_size, dot_size)'}), '(pos=dot_position, size=(dot_size, dot_size))\n', (3271, 3316), False, 'import kivy\n')] |
from selenium import webdriver
from time import sleep
import random
from datetime import datetime
def getLinks(name, link):
browser = webdriver.Chrome()
browser.get(link)
sleep(random.randint(5, 20))
links = []
print("start")
while 1:
lis = browser.find_elements_by_xpath('//div[4]/div[1]/div[1]/a')
links.extend(list(map(lambda x: x.get_attribute('href'), lis)))
# https://riptutorial.com/selenium-webdriver/example/28140/scrolling-using-python
# if lis: lis[random.randint(len(lis))].
next = browser.find_element_by_class_name('n-pager__button-next').get_attribute('href')
while next:
try:
sleep(random.randint(1, 250))
browser.get(next)
print(datetime.now(), "- next")
except:
print('хьюстон, нас банят эгэйн')
else:
break
links = list(map(lambda x: x.split('?')[0] + "/spec?track=tabs", links))
with open('links\\' + name + '.txt', 'w') as s:
s.write("\n".join(links))
browser.close()
return links
if __name__ == "__main__":
print(getLinks("Стиралки",
"https://market.yandex.ru/catalog--stiralnye-mashiny/54913/list?onstock=1&local-offers-first=0"))
| [
"selenium.webdriver.Chrome",
"datetime.datetime.now",
"random.randint"
] | [((139, 157), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (155, 157), False, 'from selenium import webdriver\n'), ((190, 211), 'random.randint', 'random.randint', (['(5)', '(20)'], {}), '(5, 20)\n', (204, 211), False, 'import random\n'), ((702, 724), 'random.randint', 'random.randint', (['(1)', '(250)'], {}), '(1, 250)\n', (716, 724), False, 'import random\n'), ((782, 796), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (794, 796), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/23 20:52
# @Author : ganliang
# @File : tf_activation.py
# @Desc : tensorflow激活函数
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from src.config import logger
def activation_threhold():
"""
阈值激活函数,这是最简单的激活函数。在这里,如果神经元的激活值大于零,那么神经元就会被激活;否则,它还是处于抑制状态。下面绘制阈值激活函数的图,随着神经元的激活值的改变
:return:
"""
def threhold(X):
cond = tf.less(X, tf.zeros(shape=tf.shape(X), dtype=X.dtype))
out = tf.where(cond, tf.zeros(tf.shape(X)), tf.ones(tf.shape(X)))
return out
h = np.linspace(-1., 1., 50)
logger.info("h\n{0}".format(h))
out = threhold(h)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(out)
plt.title("threhold activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(h, y)
plt.show()
def activation_sigmoid():
"""
Sigmoid 激活函数:在这种情况下,神经元的输出由函数 g(x)=1/(1+exp(-x)) 确定。在 TensorFlow 中,方法是 tf.sigmoid,它提供了 Sigmoid 激活函数。这个函数的范围在 0 到 1 之间:
:return:
"""
h = np.linspace(-10, 10, 50)
out = tf.sigmoid(h)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(out)
plt.title("sigmoid activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(h, y)
plt.show()
def activation_tanh():
"""
在数学上,它表示为 (1-exp(-2x)/(1+exp(-2x)))。在形状上,它类似于 Sigmoid 函数,但是它的中心位置是 0,其范围是从 -1 到 1。TensorFlow 有一个内置函数 tf.tanh,用来实现双曲正切激活函数:
:return:
"""
h = np.linspace(-10, 10, 50)
out = tf.tanh(h)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(out)
plt.title("tanh activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(h, y)
plt.show()
def activation_liner():
"""
线性激活函数:在这种情况下,神经元的输出与神经元的输入值相同。这个函数的任何一边都不受限制
:return:
"""
h = np.linspace(-10., 10., 30)
w = tf.Variable(tf.random_normal(shape=(3, 1), stddev=2, dtype=tf.float64))
b = tf.Variable(tf.random_normal(shape=(1,), stddev=2, dtype=tf.float64))
liner_out = tf.matmul(h.reshape(10, 3), w) + b
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(liner_out)
plt.title("liner activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(np.linspace(-10., 10., len(y)), y.reshape(len(y), ))
plt.show()
def activation_relu():
"""
线性激活函数:在这种情况下,神经元的输出与神经元的输入值相同。这个函数的任何一边都不受限制
:return:
"""
h = np.linspace(-10., 10., 30)
out = tf.nn.relu(h)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(out)
plt.title("relu activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(h, y)
plt.show()
def activation_softmax():
"""
Softmax 激活函数是一个归一化的指数函数。一个神经元的输出不仅取决于其自身的输入值,还取决于该层中存在的所有其他神经元的输入的总和。这样做的一个优点是使得神经元的输出小,因此梯度不会过大。数学表达式为 yi =exp(xi)/Σjexp(xj):
:return:
"""
h = np.linspace(-10., 10., 30)
out = tf.nn.softmax(h)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y = sess.run(out)
plt.title("softmax activation")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(h, y)
plt.show()
| [
"tensorflow.random_normal",
"tensorflow.shape",
"tensorflow.nn.relu",
"matplotlib.pyplot.ylabel",
"tensorflow.tanh",
"tensorflow.Session",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.global_variables_initializer",
"numpy.linspace",
"tensorflow.sigmoid",
"tensorflow.nn.softmax",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((604, 630), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (615, 630), True, 'import numpy as np\n'), ((698, 731), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (729, 731), True, 'import tensorflow as tf\n'), ((1134, 1158), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(50)'], {}), '(-10, 10, 50)\n', (1145, 1158), True, 'import numpy as np\n'), ((1169, 1182), 'tensorflow.sigmoid', 'tf.sigmoid', (['h'], {}), '(h)\n', (1179, 1182), True, 'import tensorflow as tf\n'), ((1194, 1227), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1225, 1227), True, 'import tensorflow as tf\n'), ((1630, 1654), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(50)'], {}), '(-10, 10, 50)\n', (1641, 1654), True, 'import numpy as np\n'), ((1665, 1675), 'tensorflow.tanh', 'tf.tanh', (['h'], {}), '(h)\n', (1672, 1675), True, 'import tensorflow as tf\n'), ((1687, 1720), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1718, 1720), True, 'import tensorflow as tf\n'), ((2043, 2071), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(30)'], {}), '(-10.0, 10.0, 30)\n', (2054, 2071), True, 'import numpy as np\n'), ((2292, 2325), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2323, 2325), True, 'import tensorflow as tf\n'), ((2701, 2729), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(30)'], {}), '(-10.0, 10.0, 30)\n', (2712, 2729), True, 'import numpy as np\n'), ((2738, 2751), 'tensorflow.nn.relu', 'tf.nn.relu', (['h'], {}), '(h)\n', (2748, 2751), True, 'import tensorflow as tf\n'), ((2764, 2797), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2795, 2797), True, 'import tensorflow as tf\n'), ((3203, 3231), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(10.0)', '(30)'], {}), '(-10.0, 10.0, 30)\n', (3214, 3231), True, 'import numpy as np\n'), ((3240, 3256), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['h'], {}), '(h)\n', (3253, 3256), True, 'import tensorflow as tf\n'), ((3269, 3302), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3300, 3302), True, 'import tensorflow as tf\n'), ((742, 754), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (752, 754), True, 'import tensorflow as tf\n'), ((822, 854), 'matplotlib.pyplot.title', 'plt.title', (['"""threhold activation"""'], {}), "('threhold activation')\n", (831, 854), True, 'import matplotlib.pyplot as plt\n'), ((863, 878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (873, 878), True, 'import matplotlib.pyplot as plt\n'), ((887, 902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (897, 902), True, 'import matplotlib.pyplot as plt\n'), ((911, 925), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'y'], {}), '(h, y)\n', (919, 925), True, 'import matplotlib.pyplot as plt\n'), ((934, 944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (942, 944), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1250), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1248, 1250), True, 'import tensorflow as tf\n'), ((1318, 1349), 'matplotlib.pyplot.title', 'plt.title', (['"""sigmoid activation"""'], {}), "('sigmoid activation')\n", (1327, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1358, 1373), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1368, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1397), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1392, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1420), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'y'], {}), '(h, y)\n', (1414, 1420), True, 'import matplotlib.pyplot as plt\n'), ((1429, 1439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1437, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1743), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1741, 1743), True, 'import tensorflow as tf\n'), ((1811, 1839), 'matplotlib.pyplot.title', 'plt.title', (['"""tanh activation"""'], {}), "('tanh activation')\n", (1820, 1839), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1863), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1858, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1882, 1887), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1910), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'y'], {}), '(h, y)\n', (1904, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2149), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(3, 1)', 'stddev': '(2)', 'dtype': 'tf.float64'}), '(shape=(3, 1), stddev=2, dtype=tf.float64)\n', (2107, 2149), True, 'import tensorflow as tf\n'), ((2171, 2227), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(1,)', 'stddev': '(2)', 'dtype': 'tf.float64'}), '(shape=(1,), stddev=2, dtype=tf.float64)\n', (2187, 2227), True, 'import tensorflow as tf\n'), ((2336, 2348), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2346, 2348), True, 'import tensorflow as tf\n'), ((2422, 2451), 'matplotlib.pyplot.title', 'plt.title', (['"""liner activation"""'], {}), "('liner activation')\n", (2431, 2451), True, 'import matplotlib.pyplot as plt\n'), ((2460, 2475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2470, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2499), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2494, 2499), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2586, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2820), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2818, 2820), True, 'import tensorflow as tf\n'), ((2888, 2916), 'matplotlib.pyplot.title', 'plt.title', (['"""relu activation"""'], {}), "('relu activation')\n", (2897, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2940), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2935, 2940), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2964), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2959, 2964), True, 'import matplotlib.pyplot as plt\n'), ((2973, 2987), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'y'], {}), '(h, y)\n', (2981, 2987), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3004, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3313, 3325), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3323, 3325), True, 'import tensorflow as tf\n'), ((3393, 3424), 'matplotlib.pyplot.title', 'plt.title', (['"""softmax activation"""'], {}), "('softmax activation')\n", (3402, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3448), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3443, 3448), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3472), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3467, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3495), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'y'], {}), '(h, y)\n', (3489, 3495), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3514), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3512, 3514), True, 'import matplotlib.pyplot as plt\n'), ((540, 551), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (548, 551), True, 'import tensorflow as tf\n'), ((562, 573), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (570, 573), True, 'import tensorflow as tf\n'), ((473, 484), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (481, 484), True, 'import tensorflow as tf\n')] |
import base64
import re
import xml.etree.ElementTree
import pickle
import requests
import steam, steam.webauth, steam.guard
from verifier.settings import STEAM_GROUP_ID, ACTOR_USERNAME, ACTOR_PASSWORD, ACTOR_SHARED_SECRET
def list_group_uids():
result = []
try:
url = f"https://steamcommunity.com/gid/{STEAM_GROUP_ID}/memberslistxml/"
tree = xml.etree.ElementTree.fromstring(
requests.get(url).text
)
members = tree.find("members")
for member in members:
result.append(int(member.text))
except:
pass
return result
def get_session():
try:
sess_cookies = pickle.load(open("sessionFile", "rb"))
sess = requests.Session()
sess.cookies.update(sess_cookies)
return sess
except:
user = steam.webauth.WebAuth(ACTOR_USERNAME, ACTOR_PASSWORD)
try:
user.login()
except steam.webauth.TwoFactorCodeRequired:
user.login(twofactor_code=steam.guard.generate_twofactor_code(
base64.b64decode(ACTOR_SHARED_SECRET.encode("u8"))
))
except:
return False
pickle.dump(user.session.cookies, open("sessionFile", "wb+"))
return user.session
def approve_to_group(steamid32):
sess = get_session()
if not sess:
return False
ret = sess.get(
f"https://steamcommunity.com/gid/{STEAM_GROUP_ID}/joinRequestsManage")
session_ids = re.findall('name="sessionID" value="([a-f0-9]+)"', ret.text)
if not len(session_ids):
raise Exception("Steam 故障,暂时无法登录")
session_id = session_ids[0]
approve_ids = list(
map(int, re.findall('JoinRequests_ApproveDenyUser\( \'(\d+)\', 1 \)"', ret.text)))
if steamid32 not in approve_ids:
raise Exception(
f"未查找到 Steam 加入申请。"
f"请 <a href='https://steamcommunity.com/gid/{STEAM_GROUP_ID}/'>点击此处</a> 发送进组请求"
)
ret = sess.post(
f"https://steamcommunity.com/gid/{STEAM_GROUP_ID}/joinRequestsManage",
data={
"rgAccounts[]": str(steamid32),
"bapprove": "1",
"sessionID": session_id
})
if int(ret.text) != 1:
raise Exception("Steam 内部错误 [%d]" % int(ret.text))
return True
| [
"requests.Session",
"requests.get",
"verifier.settings.ACTOR_SHARED_SECRET.encode",
"steam.webauth.WebAuth",
"re.findall"
] | [((1476, 1536), 're.findall', 're.findall', (['"""name="sessionID" value="([a-f0-9]+)\\""""', 'ret.text'], {}), '(\'name="sessionID" value="([a-f0-9]+)"\', ret.text)\n', (1486, 1536), False, 'import re\n'), ((712, 730), 'requests.Session', 'requests.Session', ([], {}), '()\n', (728, 730), False, 'import requests\n'), ((821, 874), 'steam.webauth.WebAuth', 'steam.webauth.WebAuth', (['ACTOR_USERNAME', 'ACTOR_PASSWORD'], {}), '(ACTOR_USERNAME, ACTOR_PASSWORD)\n', (842, 874), False, 'import steam, steam.webauth, steam.guard\n'), ((1684, 1758), 're.findall', 're.findall', (['"""JoinRequests_ApproveDenyUser\\\\( \'(\\\\d+)\', 1 \\\\)\\""""', 'ret.text'], {}), '(\'JoinRequests_ApproveDenyUser\\\\( \\\'(\\\\d+)\\\', 1 \\\\)"\', ret.text)\n', (1694, 1758), False, 'import re\n'), ((415, 432), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (427, 432), False, 'import requests\n'), ((1073, 1105), 'verifier.settings.ACTOR_SHARED_SECRET.encode', 'ACTOR_SHARED_SECRET.encode', (['"""u8"""'], {}), "('u8')\n", (1099, 1105), False, 'from verifier.settings import STEAM_GROUP_ID, ACTOR_USERNAME, ACTOR_PASSWORD, ACTOR_SHARED_SECRET\n')] |
from django import forms
from datetimewidget.widgets import DateTimeWidget
from urllib import parse
class FilterExperimentsForm(forms.Form):
name = forms.CharField(
label='Search by name',
required=False,
max_length=255)
sha256 = forms.CharField(
label='SHA-256 of data file',
required=False,
max_length=64)
created_from = forms.DateTimeField(
label='Only newer experiments than',
required=False,
widget=DateTimeWidget(attrs={'id': "from"}, usel10n=True, bootstrap_version=3))
created_to = forms.DateTimeField(
label='Only older experiments than',
required=False,
widget=DateTimeWidget(attrs={'id': "to"}, usel10n=True, bootstrap_version=3))
only_own = forms.BooleanField(
label='Show only your experiments',
required=False)
def as_url_args(self):
return '?' + parse.urlencode(self.cleaned_data)
| [
"urllib.parse.urlencode",
"django.forms.BooleanField",
"django.forms.CharField",
"datetimewidget.widgets.DateTimeWidget"
] | [((154, 225), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Search by name"""', 'required': '(False)', 'max_length': '(255)'}), "(label='Search by name', required=False, max_length=255)\n", (169, 225), False, 'from django import forms\n'), ((264, 340), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""SHA-256 of data file"""', 'required': '(False)', 'max_length': '(64)'}), "(label='SHA-256 of data file', required=False, max_length=64)\n", (279, 340), False, 'from django import forms\n'), ((771, 841), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'label': '"""Show only your experiments"""', 'required': '(False)'}), "(label='Show only your experiments', required=False)\n", (789, 841), False, 'from django import forms\n'), ((490, 561), 'datetimewidget.widgets.DateTimeWidget', 'DateTimeWidget', ([], {'attrs': "{'id': 'from'}", 'usel10n': '(True)', 'bootstrap_version': '(3)'}), "(attrs={'id': 'from'}, usel10n=True, bootstrap_version=3)\n", (504, 561), False, 'from datetimewidget.widgets import DateTimeWidget\n'), ((685, 754), 'datetimewidget.widgets.DateTimeWidget', 'DateTimeWidget', ([], {'attrs': "{'id': 'to'}", 'usel10n': '(True)', 'bootstrap_version': '(3)'}), "(attrs={'id': 'to'}, usel10n=True, bootstrap_version=3)\n", (699, 754), False, 'from datetimewidget.widgets import DateTimeWidget\n'), ((908, 942), 'urllib.parse.urlencode', 'parse.urlencode', (['self.cleaned_data'], {}), '(self.cleaned_data)\n', (923, 942), False, 'from urllib import parse\n')] |
import graphene
from graphql import GraphQLError
from graphql_jwt.decorators import login_required, superuser_required
from app_utils.constants import STORE_CHOICES
from app_utils.database import get_model_object, SaveContextManager
from app_utils.validations.validate_store import ValidateStore
from apps.properties.models import PropDetail
from apps.stores.models import Store
from app_utils.model_types.store import StoreInputType, StoreType
class CreateEditStore(graphene.Mutation):
message = graphene.String()
store = graphene.Field(StoreType)
class Arguments:
amount = graphene.Float(required=True)
record_type = graphene.String(required=True)
property_id = graphene.String(required=False)
is_inflow = graphene.Boolean(required=True)
action_date = graphene.Date(required=True)
description = graphene.String(required=True)
def mutate(self, into, **kwargs):
pass
class CreateStore(CreateEditStore):
"""
Mutation to create a store. Inherits from 'CreateEditStore' class
"""
@login_required
def mutate(self, info, **kwargs):
user = info.context.user
store = Store(user=user)
validator = ValidateStore(**kwargs)
new_store = validator.validate_and_save_store(store)
return CreateStore(message='Successfully saved', store=new_store)
class UpdateStore(CreateEditStore):
"""
Mutation to update a store. Inherits from 'CreateEditStore' class
"""
class Arguments(CreateEditStore.Arguments):
id = graphene.String(required=True)
@login_required
def mutate(self, info, **kwargs):
store = get_model_object(Store, 'id', kwargs.get('id'))
validator = ValidateStore(**kwargs)
updated_store = validator.validate_and_save_store(store)
return UpdateStore(message='Successfully update', store=updated_store)
class CreateManyStores(graphene.Mutation):
message = graphene.String()
total_saved = graphene.Int()
total_not_saved = graphene.Int()
class Arguments:
stores = graphene.List(StoreInputType)
@login_required
def mutate(self, info, **kwargs):
saved = 0
not_saved = 0
stores = kwargs['stores']
for store in stores:
store_type = [item for item in STORE_CHOICES if item[0] == store['record_type']]
if not store_type:
raise GraphQLError('Invalid store type')
has_saved = Store.objects.filter(
action_date=store['action_date'],
description=store['description']).first()
if not has_saved:
store['user_id'] = info.context.user.id
new_store = Store(**store)
new_store.save()
saved += 1
else:
not_saved += 1
return CreateManyStores(
message="stores successfully saved",
total_saved=saved,
total_not_saved=not_saved)
class MigrateStoreProperties(graphene.Mutation):
message = graphene.String()
class Arguments:
pass
@superuser_required
def mutate(self, info, **kwargs):
stores = Store.objects.filter(is_property=True)
fields_to_update = ['property_id', 'is_property']
for store in stores:
prop_detail = PropDetail.objects.filter(
title=store.description,
amount=store.amount,
created_at__date=store.updated_at.date()).first()
if prop_detail:
store.property_id = prop_detail.property_id
store.is_property = False
else:
print('Failed store======>')
print(store)
break
Store.objects.bulk_update(stores, fields_to_update, batch_size=500)
return MigrateStoreProperties(message=f"{len(stores)} stores successfully migrated")
class StoreMutation(graphene.ObjectType):
create_store = CreateStore.Field()
update_store = UpdateStore.Field()
create_many_stores = CreateManyStores.Field()
migrate_store_properties = MigrateStoreProperties.Field()
| [
"graphene.String",
"apps.stores.models.Store",
"graphene.Field",
"graphene.List",
"apps.stores.models.Store.objects.filter",
"graphene.Date",
"graphene.Float",
"graphql.GraphQLError",
"app_utils.validations.validate_store.ValidateStore",
"graphene.Int",
"apps.stores.models.Store.objects.bulk_update",
"graphene.Boolean"
] | [((501, 518), 'graphene.String', 'graphene.String', ([], {}), '()\n', (516, 518), False, 'import graphene\n'), ((528, 553), 'graphene.Field', 'graphene.Field', (['StoreType'], {}), '(StoreType)\n', (542, 553), False, 'import graphene\n'), ((1809, 1826), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1824, 1826), False, 'import graphene\n'), ((1842, 1856), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (1854, 1856), False, 'import graphene\n'), ((1876, 1890), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (1888, 1890), False, 'import graphene\n'), ((2697, 2714), 'graphene.String', 'graphene.String', ([], {}), '()\n', (2712, 2714), False, 'import graphene\n'), ((584, 613), 'graphene.Float', 'graphene.Float', ([], {'required': '(True)'}), '(required=True)\n', (598, 613), False, 'import graphene\n'), ((630, 660), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (645, 660), False, 'import graphene\n'), ((677, 708), 'graphene.String', 'graphene.String', ([], {'required': '(False)'}), '(required=False)\n', (692, 708), False, 'import graphene\n'), ((723, 754), 'graphene.Boolean', 'graphene.Boolean', ([], {'required': '(True)'}), '(required=True)\n', (739, 754), False, 'import graphene\n'), ((771, 799), 'graphene.Date', 'graphene.Date', ([], {'required': '(True)'}), '(required=True)\n', (784, 799), False, 'import graphene\n'), ((816, 846), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (831, 846), False, 'import graphene\n'), ((1094, 1110), 'apps.stores.models.Store', 'Store', ([], {'user': 'user'}), '(user=user)\n', (1099, 1110), False, 'from apps.stores.models import Store\n'), ((1125, 1148), 'app_utils.validations.validate_store.ValidateStore', 'ValidateStore', ([], {}), '(**kwargs)\n', (1138, 1148), False, 'from app_utils.validations.validate_store import ValidateStore\n'), ((1440, 1470), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (1455, 1470), False, 'import graphene\n'), ((1596, 1619), 'app_utils.validations.validate_store.ValidateStore', 'ValidateStore', ([], {}), '(**kwargs)\n', (1609, 1619), False, 'from app_utils.validations.validate_store import ValidateStore\n'), ((1921, 1950), 'graphene.List', 'graphene.List', (['StoreInputType'], {}), '(StoreInputType)\n', (1934, 1950), False, 'import graphene\n'), ((2809, 2847), 'apps.stores.models.Store.objects.filter', 'Store.objects.filter', ([], {'is_property': '(True)'}), '(is_property=True)\n', (2829, 2847), False, 'from apps.stores.models import Store\n'), ((3244, 3311), 'apps.stores.models.Store.objects.bulk_update', 'Store.objects.bulk_update', (['stores', 'fields_to_update'], {'batch_size': '(500)'}), '(stores, fields_to_update, batch_size=500)\n', (3269, 3311), False, 'from apps.stores.models import Store\n'), ((2199, 2233), 'graphql.GraphQLError', 'GraphQLError', (['"""Invalid store type"""'], {}), "('Invalid store type')\n", (2211, 2233), False, 'from graphql import GraphQLError\n'), ((2436, 2450), 'apps.stores.models.Store', 'Store', ([], {}), '(**store)\n', (2441, 2450), False, 'from apps.stores.models import Store\n'), ((2249, 2342), 'apps.stores.models.Store.objects.filter', 'Store.objects.filter', ([], {'action_date': "store['action_date']", 'description': "store['description']"}), "(action_date=store['action_date'], description=store[\n 'description'])\n", (2269, 2342), False, 'from apps.stores.models import Store\n')] |
import json
import os
import requests
from requests_html import HTML
import random
from main import ResourceBot
update_id = None
Bot = ResourceBot()
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(f'{dir_path}/memes.json', encoding="utf8") as file:
meme_data = json.loads(file.read())
with open(f'{dir_path}/quotes.json', encoding="utf8") as file:
quotes_data = json.loads(file.read())
def format_message(msg):
formatted_msg = ""
for i in msg:
formatted_msg += f"{i.upper()} ---> {msg[i]}\n"
return formatted_msg
def url_to_text(url):
r = requests.get(url)
if r.status_code == 200:
html_text = r.text
return html_text
# It will parse the html data into structure way
def pharse_and_extract(url):
# it will get the html data from url
html_text = url_to_text(url)
if html_text is None:
return ""
r_html = HTML(html=html_text)
return r_html
def extract_from_css_tricks(res_html):
resulted_tricks = []
titles=res_html.find(".article-article h2 a")
for title in titles:
resulted_tricks.append(title.attrs['href'])
return resulted_tricks
def extract_icons_url(res_html,limit=1):
icons_url = []
titles=res_html.find(".icon--holder>a>img")
for title in titles:
icons_url.append(title.attrs['data-src'])
return " \n".join(icons_url[:limit])
def make_reply(message):
reply = None
if message == '/help':
reply = "Type \n/meme to get meme \n/quote to get quote\n/icon <keyword> <no of asset>"
elif message == '/meme':
reply = random.choice(meme_data)
elif message == '/resource':
reply = "https://css-tricks.com/too-many-svgs-clogging-up-your-markup-try-use/"
elif message == '/quote':
reply = format_message(random.choice(quotes_data))
elif message:
if message.startswith('/icon'):
search_icon,*params = message.split(" ")
if len(params)>1:
flaticon_url = f"https://www.flaticon.com/search?word={params[0]}"
icons = extract_icons_url(pharse_and_extract(flaticon_url),limit=int(params[-1]))
reply = icons
return reply
while True:
print("....")
updates = Bot.get_updates(offset=update_id)
updates = updates['result']
if updates:
for item in updates:
update_id = item['update_id']
try:
message = item["message"]["text"]
except:
message = None
from_ = item['message']['from']['id']
reply = make_reply(message)
Bot.send_messages(reply, from_)
| [
"main.ResourceBot",
"random.choice",
"requests.get",
"os.path.realpath",
"requests_html.HTML"
] | [((138, 151), 'main.ResourceBot', 'ResourceBot', ([], {}), '()\n', (149, 151), False, 'from main import ResourceBot\n'), ((180, 206), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((597, 614), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (609, 614), False, 'import requests\n'), ((909, 929), 'requests_html.HTML', 'HTML', ([], {'html': 'html_text'}), '(html=html_text)\n', (913, 929), False, 'from requests_html import HTML\n'), ((1601, 1625), 'random.choice', 'random.choice', (['meme_data'], {}), '(meme_data)\n', (1614, 1625), False, 'import random\n'), ((1808, 1834), 'random.choice', 'random.choice', (['quotes_data'], {}), '(quotes_data)\n', (1821, 1834), False, 'import random\n')] |
#!/usr/bin/env python3
import math
import os
import random
import time
import tkinter as tk
import tkinter.messagebox as msgbox
import tkinter.filedialog as tkfd
from PIL import Image, ImageTk
import libnochmal as ln
from libnochmal import Color
GAME_OVER_MSGS = [
'Das grenzt ja schon an Arbeitsverweigerung.',
'Dabei sein ist alles.',
'Da muss wohl noch etwas geübt werden.',
'Nicht ganz schlecht.',
'Na, wird doch langsam.',
'Gut, aber das geht noch besser.',
'Das war wohl nicht dein erstes Mal...',
'Klasse! Das lief ja gut.',
'Hoffentlich ohne Schummeln geschafft!',
'Super! Welch grandioses Ergebnis!',
'Du könntest auch professioneller „NOCH MAL!“-Spieler sein.',
'Wirst du „Glückspilz“ oder „The Brain“ genannt?',
'Es gibt also doch Superhelden!',
]
class ColorButton(tk.Button):
def __init__(self, master, color, x, y):
super(ColorButton, self).__init__(master, bg=color.to_rgb(), activebackground=color.to_rgb_secondary(),
font='TkFixedFont', relief="flat", overrelief="flat")
self.x = x
self.y = y
self.application = master
self.color = color
self.star = False
self.crossed = False
self['image'] = Application.CIRCLE_IMAGE
def set_by_tile(self, tile):
self.set_color(tile.color)
self.star = tile.star
if self.star:
self['image'] = Application.STAR_IMAGE
def set_color(self, color):
self.color = color
self.config(bg=color.to_rgb(), activebackground=color.to_rgb_secondary())
def click(self):
if not self.application.check_click(self.x, self.y):
return
if self.crossed:
if self.star:
self['image'] = Application.STAR_IMAGE
else:
self['image'] = Application.CIRCLE_IMAGE
else:
self['image'] = Application.CROSS_GRAY_IMAGE
self.crossed = not self.crossed
def commit(self):
self['image'] = Application.CROSS_IMAGE
def reset(self):
self.color = Color.UNINITIALIZED
self.star = False
self.crossed = False
self['image'] = Application.CIRCLE_IMAGE
self.set_color(self.color)
class SinglePlayerGameState:
def __init__(self):
self.board = None
self.joker_count = 8
self.toss_counter = 0
self.tossed = False
self.started = False
self.rolled_numbers = [-1, -1]
self.rolled_colors = [Color.UNINITIALIZED, Color.UNINITIALIZED]
self.crossed_tiles = []
self.crossed_tiles_to_commit = []
self.stars_crossed = 0
self.columns_crossed = []
self.colors_crossed = dict(zip(Color.ref_list(), [0 for _ in range(len(Color.ref_list()))]))
def start(self):
self.started = True
self.columns_crossed = [0 for _ in range(self.board.width)]
def finish(self):
self.started = False
def inc_toss_count(self):
if not self.started or self.tossed:
return
self.toss_counter += 1
self.tossed = True
def use_joker(self, n=1) -> bool:
if self.joker_count < n:
return False
else:
self.joker_count -= n
return True
class Application(tk.Frame):
STAR_IMAGE = None
CIRCLE_IMAGE = None
CROSS_IMAGE = None
CROSS_GRAY_IMAGE = None
BOARDS = []
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.grid()
self.game_state = SinglePlayerGameState()
# load star and circle image
script_path = os.path.dirname(os.path.realpath(__file__))
Application.STAR_IMAGE = ImageTk.PhotoImage(Image.open(script_path + '/img/star.png'))
Application.CIRCLE_IMAGE = ImageTk.PhotoImage(Image.open(script_path + '/img/circle.png'))
Application.CROSS_IMAGE = ImageTk.PhotoImage(Image.open(script_path + '/img/cross.png'))
Application.CROSS_GRAY_IMAGE = ImageTk.PhotoImage(Image.open(script_path + '/img/cross-gray.png'))
boards_dir = script_path + '/boards'
boards_dir_files = os.listdir(boards_dir)
boards_dir_files.sort()
assert len(boards_dir_files) == 7
for file in boards_dir_files:
Application.BOARDS.append(ln.read_board_from_file(os.path.join(boards_dir, file)))
# top buttons
self.choose_board_lbl = tk.Label(self, text='Choose Board:')
self.choose_board_lbl.grid(row=0, column=0, columnspan=5, sticky='W')
# board options
self.board_chooser_buttons = []
for color, index in zip([Color.UNINITIALIZED, Color.RED, Color.BLUE, Color.GREEN, Color.ORANGE, Color.UNINITIALIZED, Color.YELLOW], range(7)):
btn = ColorButton(self, color, -1, -1)
if index == 0:
btn['bg'] = 'black'
btn['activebackground'] = '#555555'
if index == 5:
btn['bg'] = 'purple'
btn['activebackground'] = '#BB00BB'
btn.grid(row=0, column=(4+index))
btn['command'] = lambda i=index, b=btn: self.open_board(Application.BOARDS[i], b)
self.board_chooser_buttons.append(btn)
# disabled for now, maybe use command line args to load a custom board
# self.load_board_btn = tk.Button(self, text='Open Board', command=self.open_board)
# self.load_board_btn.grid(row=0, column=5, columnspan=4, sticky='W')
self.start_game_btn = tk.Button(self, text='Start Game', command=self.start_game)
self.start_game_btn.grid(row=0, column=11, columnspan=4, sticky='E')
# top Letters
for x in range(ln.DEFAULT_BOARD_WIDTH):
lbl = tk.Label(self, text=chr(65 + x))
if x == 7:
lbl['fg'] = 'red'
lbl.grid(row=1, column=x)
# board buttons
self.board_buttons = []
for x in range(ln.DEFAULT_BOARD_WIDTH):
self.board_buttons.append([])
for y in range(ln.DEFAULT_BOARD_HEIGHT):
btn = ColorButton(self, Color.UNINITIALIZED, x, y)
btn['command'] = btn.click
btn.grid(row=(y + 2), column=x)
if x == 7:
btn['highlightbackground'] = '#808080'
self.board_buttons[x].append(btn)
# bottom points per column
self.board_column_point_labels = []
for x in range(ln.DEFAULT_BOARD_WIDTH):
lbl = tk.Label(self, text=ln.POINTS_PER_COLUMN[x])
lbl.grid(row=9, column=x)
if x == 7:
lbl['fg'] = 'red'
self.board_column_point_labels.append(lbl)
# bottom buttons
self.color_dice_1 = ColorButton(self, Color.UNINITIALIZED, -1, -1)
self.color_dice_1.grid(row=10, column=4)
self.color_dice_2 = ColorButton(self, Color.UNINITIALIZED, -1, -1)
self.color_dice_2.grid(row=10, column=5)
self.number_dice_1 = tk.Label(self, text='0')
self.number_dice_1.grid(row=10, column=6)
self.number_dice_2 = tk.Label(self, text='0')
self.number_dice_2.grid(row=10, column=7)
self.commit_btn = tk.Button(self, text='Commit', command=self.commit)
self.commit_btn.grid(row=10, column=8, columnspan=3, sticky='W')
# status bar
self.sep = tk.Label(self, text='_______________________________________________________________')
self.sep.grid(row=11, column=0, columnspan=15)
self.statusbar = tk.Label(self, text='')
self.statusbar.grid(row=12, column=0, columnspan=15, sticky='W')
def open_board(self, board=None, btn=None):
if self.game_state.board and self.game_state.started:
if msgbox.askyesno("Reset Game", "Do you want start a new game? Current progress will be lost."):
self.clear_game()
else:
return
for b in self.board_chooser_buttons:
b['image'] = Application.CIRCLE_IMAGE
if board is None:
f = tkfd.askopenfilename(defaultextension='.dat')
if len(f) == 0:
return
try:
board = ln.read_board_from_file(f)
finally:
if board is None:
print("Could not load board from file '{}'".format(f))
else:
self._load_board(board)
else:
self.clear_game()
self._load_board(board)
if btn is not None:
btn['image'] = Application.CROSS_GRAY_IMAGE
def _load_board(self, board):
self.game_state.board = board
for x in range(board.width):
for y in range(board.height):
self.board_buttons[x][y].set_by_tile(board.get_tile_at(x, y))
def start_game(self):
if self.game_state.board is None or self.game_state.started:
if self.game_state.board is None:
self.update_statusbar("No board loaded")
return
self.game_state.start()
self.update_statusbar()
self.toss()
self.update_statusbar()
def clear_game(self):
self.game_state = SinglePlayerGameState()
for col in self.board_buttons:
for btn in col:
btn.reset()
for lbl in self.board_column_point_labels:
lbl['bg'] = self['bg']
self.color_dice_1.set_color(Color.UNINITIALIZED)
self.color_dice_2.set_color(Color.UNINITIALIZED)
self.number_dice_1['text'] = '0'
self.number_dice_2['text'] = '0'
self.update_statusbar()
def toss(self):
if not self.game_state.started or self.game_state.tossed:
return
for i in range(random.randint(11, 17)):
self.game_state.rolled_numbers[0] = random.randint(1, 6)
self.game_state.rolled_numbers[1] = random.randint(1, 6)
self.game_state.rolled_colors[0] = random.choice(Color.ref_list(True))
self.game_state.rolled_colors[1] = random.choice(Color.ref_list(True))
self.color_dice_1.set_color(self.game_state.rolled_colors[0])
self.color_dice_2.set_color(self.game_state.rolled_colors[1])
self.number_dice_1['text'] = str(self.game_state.rolled_numbers[0]).replace('6', '?')
self.number_dice_2['text'] = str(self.game_state.rolled_numbers[1]).replace('6', '?')
self.update()
time.sleep(0.075 + 0.125 * (i/17))
self.game_state.inc_toss_count()
def commit(self):
if not self.game_state.started or not self.game_state.tossed:
self.update_statusbar("Game not started yet")
return
jokers_used = 0
if len(self.game_state.crossed_tiles_to_commit) == 0:
if not msgbox.askyesno(title="Pass?", message="Would you like to pass this turn?"):
return
else:
if len(self.game_state.crossed_tiles_to_commit) not in self.game_state.rolled_numbers:
if 6 in self.game_state.rolled_numbers:
jokers_used += 1
else:
self.update_statusbar("{0} tiles were crossed, but the dices rolled {1[0]} and {1[1]}"
.format(len(self.game_state.crossed_tiles_to_commit), self.game_state.rolled_numbers))
return # invalid set of crosses
if self.game_state.board.get_color_at(self.game_state.crossed_tiles_to_commit[0][0], self.game_state.crossed_tiles_to_commit[0][1]) not in self.game_state.rolled_colors:
jokers_used += 1
if not self.game_state.use_joker(jokers_used):
self.update_statusbar("Not enough jokers left")
return
# check if all tiles to commit are reachable
for (x, y) in self.game_state.crossed_tiles_to_commit:
if not self._tile_is_reachable(x, y):
self.update_statusbar("Invalid selection, tile ({}, {}) is not reachable".format(chr(x + 65), y+1))
return
# check if component in itself is fully connected
if len(ln._get_connected_coords(self.game_state.crossed_tiles_to_commit)) > 1:
self.update_statusbar("Invalid selection, the placed crosses are not fully connected")
return
# all checks passed, update game state
for (x, y) in self.game_state.crossed_tiles_to_commit:
self.board_buttons[x][y].commit()
tile = self.game_state.board.get_tile_at(x, y)
self.game_state.colors_crossed[tile.color] += 1
self.game_state.columns_crossed[x] += 1
if tile.star:
self.game_state.stars_crossed += 1
# reset tiles to commit list
self.game_state.crossed_tiles.extend(self.game_state.crossed_tiles_to_commit)
self.game_state.crossed_tiles_to_commit = []
self.update_statusbar()
if self.game_state.toss_counter == 30:
score = self.calc_score()
msgbox.showinfo("Game Over", "Game over, final score:\n\n"
"Color bonus:\t{1[0]:>3}\n"
"Column bonus:\t{1[1]:>3}\n"
"Joker bonus:\t{1[2]:>3}\n"
"Star penalty:\t{1[3]:>3}\n"
"-----------------------------\n"
"Total score:\t{0}\n"
"\n"
"{2}".format(sum(score), score, self.get_game_over_msg()))
self.game_state.finish()
return
else:
self.game_state.tossed = False
self.toss()
def check_click(self, x, y):
if not self.game_state.started or not self.game_state.tossed:
self.update_statusbar("Game not started yet")
return False
# no already committed cross
if (x, y) in self.game_state.crossed_tiles:
self.update_statusbar("Cannot uncross this tile")
return False
# always be able to remove a cross to commit
if (x, y) in self.game_state.crossed_tiles_to_commit:
self.game_state.crossed_tiles_to_commit.remove((x, y))
self.update_statusbar()
return True
# Xs left to place?
if len(self.game_state.crossed_tiles_to_commit) >= min(max(self.game_state.rolled_numbers), 5):
self.update_statusbar("No more tiles can be crossed")
return False
# color was tossed?
if Color.UNINITIALIZED not in self.game_state.rolled_colors and self.game_state.board.get_color_at(x, y) not in self.game_state.rolled_colors:
self.update_statusbar("This color wasn't tossed")
return False
# only one component
if len(self.game_state.crossed_tiles_to_commit) > 0:
component = self.game_state.board.get_component(self.game_state.crossed_tiles_to_commit[0][0], self.game_state.crossed_tiles_to_commit[0][1])
if (x, y) not in component:
self.update_statusbar("You can't cross tiles from multiple components")
return False
# column 7 (H, the middle) reachable?
if not self._tile_is_reachable(x, y):
self.update_statusbar("This tile is not reachable")
return False
# all checks passed
self.game_state.crossed_tiles_to_commit.append((x, y))
self.update_statusbar()
return True
def _tile_is_reachable(self, x, y):
if x == 7:
return True
coords = set(self.game_state.crossed_tiles).union(set(self.game_state.crossed_tiles_to_commit))
coords.add((x, y))
reachable_coords = ln._get_connected_coords(coords, (x, y))[0]
for i in range(self.game_state.board.height):
if (7, i) in reachable_coords:
return True
return False
def update_statusbar(self, error_msg=None):
state = self.game_state
if error_msg:
self.statusbar['text'] = error_msg
return
if not state.started:
self.statusbar['text'] = "Game not started yet"
return
turn = "Turn {:>2}/30".format(state.toss_counter)
jokers = "Jokers left: " + str(state.joker_count)
score = "Score: " + str(sum(self.calc_score()))
self.statusbar['text'] = "{} {} {}".format(turn, jokers, score)
self.update_column_finished_indicators()
def calc_score(self):
state = self.game_state
return [sum([5 if v == 21 else 0 for v in state.colors_crossed.values()]), # color bonus
sum([ln.POINTS_PER_COLUMN[i] if v == 7 else 0 for i, v in
zip(range(state.board.width), state.columns_crossed)]), # column bonus
state.joker_count, # joker bonus
(-2) * (state.board.width - state.stars_crossed)] # star penalty
def get_game_over_msg(self, score=None):
if score is None:
score = sum(self.calc_score())
if score < 0:
return GAME_OVER_MSGS[0]
if score == 0:
return GAME_OVER_MSGS[1]
if score > 40:
return GAME_OVER_MSGS[-1]
return GAME_OVER_MSGS[math.ceil(float(score) / 4.0) + 1]
def update_column_finished_indicators(self):
for i in range(len(self.board_column_point_labels)):
if self.game_state.columns_crossed[i] == self.game_state.board.height:
self.board_column_point_labels[i]['bg'] = 'green'
def main():
random.seed()
root = tk.Tk()
root.wm_title("Noch mal! Single Player Challenge")
root.attributes('-type', 'dialog')
app = Application(master=root)
app.mainloop()
if __name__ == '__main__':
main()
| [
"os.listdir",
"PIL.Image.open",
"libnochmal.read_board_from_file",
"tkinter.messagebox.askyesno",
"libnochmal.Color.ref_list",
"os.path.join",
"tkinter.Button",
"random.seed",
"os.path.realpath",
"time.sleep",
"tkinter.Tk",
"libnochmal._get_connected_coords",
"tkinter.Label",
"random.randint",
"tkinter.filedialog.askopenfilename"
] | [((17888, 17901), 'random.seed', 'random.seed', ([], {}), '()\n', (17899, 17901), False, 'import random\n'), ((17914, 17921), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (17919, 17921), True, 'import tkinter as tk\n'), ((4218, 4240), 'os.listdir', 'os.listdir', (['boards_dir'], {}), '(boards_dir)\n', (4228, 4240), False, 'import os\n'), ((4503, 4539), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Choose Board:"""'}), "(self, text='Choose Board:')\n", (4511, 4539), True, 'import tkinter as tk\n'), ((5588, 5647), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Start Game"""', 'command': 'self.start_game'}), "(self, text='Start Game', command=self.start_game)\n", (5597, 5647), True, 'import tkinter as tk\n'), ((7080, 7104), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""0"""'}), "(self, text='0')\n", (7088, 7104), True, 'import tkinter as tk\n'), ((7184, 7208), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""0"""'}), "(self, text='0')\n", (7192, 7208), True, 'import tkinter as tk\n'), ((7286, 7337), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Commit"""', 'command': 'self.commit'}), "(self, text='Commit', command=self.commit)\n", (7295, 7337), True, 'import tkinter as tk\n'), ((7452, 7543), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""_______________________________________________________________"""'}), "(self, text=\n '_______________________________________________________________')\n", (7460, 7543), True, 'import tkinter as tk\n'), ((7619, 7642), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (7627, 7642), True, 'import tkinter as tk\n'), ((3719, 3745), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3735, 3745), False, 'import os\n'), ((3799, 3840), 'PIL.Image.open', 'Image.open', (["(script_path + '/img/star.png')"], {}), "(script_path + '/img/star.png')\n", (3809, 3840), False, 'from PIL import Image, ImageTk\n'), ((3896, 3939), 'PIL.Image.open', 'Image.open', (["(script_path + '/img/circle.png')"], {}), "(script_path + '/img/circle.png')\n", (3906, 3939), False, 'from PIL import Image, ImageTk\n'), ((3994, 4036), 'PIL.Image.open', 'Image.open', (["(script_path + '/img/cross.png')"], {}), "(script_path + '/img/cross.png')\n", (4004, 4036), False, 'from PIL import Image, ImageTk\n'), ((4096, 4143), 'PIL.Image.open', 'Image.open', (["(script_path + '/img/cross-gray.png')"], {}), "(script_path + '/img/cross-gray.png')\n", (4106, 4143), False, 'from PIL import Image, ImageTk\n'), ((6582, 6626), 'tkinter.Label', 'tk.Label', (['self'], {'text': 'ln.POINTS_PER_COLUMN[x]'}), '(self, text=ln.POINTS_PER_COLUMN[x])\n', (6590, 6626), True, 'import tkinter as tk\n'), ((7842, 7939), 'tkinter.messagebox.askyesno', 'msgbox.askyesno', (['"""Reset Game"""', '"""Do you want start a new game? Current progress will be lost."""'], {}), "('Reset Game',\n 'Do you want start a new game? Current progress will be lost.')\n", (7857, 7939), True, 'import tkinter.messagebox as msgbox\n'), ((8151, 8196), 'tkinter.filedialog.askopenfilename', 'tkfd.askopenfilename', ([], {'defaultextension': '""".dat"""'}), "(defaultextension='.dat')\n", (8171, 8196), True, 'import tkinter.filedialog as tkfd\n'), ((9866, 9888), 'random.randint', 'random.randint', (['(11)', '(17)'], {}), '(11, 17)\n', (9880, 9888), False, 'import random\n'), ((9939, 9959), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (9953, 9959), False, 'import random\n'), ((10008, 10028), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (10022, 10028), False, 'import random\n'), ((10579, 10615), 'time.sleep', 'time.sleep', (['(0.075 + 0.125 * (i / 17))'], {}), '(0.075 + 0.125 * (i / 17))\n', (10589, 10615), False, 'import time\n'), ((16023, 16063), 'libnochmal._get_connected_coords', 'ln._get_connected_coords', (['coords', '(x, y)'], {}), '(coords, (x, y))\n', (16047, 16063), True, 'import libnochmal as ln\n'), ((2773, 2789), 'libnochmal.Color.ref_list', 'Color.ref_list', ([], {}), '()\n', (2787, 2789), False, 'from libnochmal import Color\n'), ((8290, 8316), 'libnochmal.read_board_from_file', 'ln.read_board_from_file', (['f'], {}), '(f)\n', (8313, 8316), True, 'import libnochmal as ln\n'), ((10090, 10110), 'libnochmal.Color.ref_list', 'Color.ref_list', (['(True)'], {}), '(True)\n', (10104, 10110), False, 'from libnochmal import Color\n'), ((10173, 10193), 'libnochmal.Color.ref_list', 'Color.ref_list', (['(True)'], {}), '(True)\n', (10187, 10193), False, 'from libnochmal import Color\n'), ((10933, 11008), 'tkinter.messagebox.askyesno', 'msgbox.askyesno', ([], {'title': '"""Pass?"""', 'message': '"""Would you like to pass this turn?"""'}), "(title='Pass?', message='Would you like to pass this turn?')\n", (10948, 11008), True, 'import tkinter.messagebox as msgbox\n'), ((12293, 12358), 'libnochmal._get_connected_coords', 'ln._get_connected_coords', (['self.game_state.crossed_tiles_to_commit'], {}), '(self.game_state.crossed_tiles_to_commit)\n', (12317, 12358), True, 'import libnochmal as ln\n'), ((4415, 4445), 'os.path.join', 'os.path.join', (['boards_dir', 'file'], {}), '(boards_dir, file)\n', (4427, 4445), False, 'import os\n'), ((2813, 2829), 'libnochmal.Color.ref_list', 'Color.ref_list', ([], {}), '()\n', (2827, 2829), False, 'from libnochmal import Color\n')] |
import logging
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("user_count", type=int)
def handle(self, *args, **options):
batch_size = 1000
user_count = options["user_count"]
users = []
for i in range(1, user_count + 1):
username = f"user_{i}"
user = User(username=username)
user.set_unusable_password()
users.append(user)
if i % batch_size == 0:
logger.info("User #%s created", i)
User.objects.bulk_create(
users, batch_size=batch_size, ignore_conflicts=True)
| [
"logging.getLogger",
"django.contrib.auth.models.User",
"django.contrib.auth.models.User.objects.bulk_create"
] | [((122, 149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (139, 149), False, 'import logging\n'), ((687, 764), 'django.contrib.auth.models.User.objects.bulk_create', 'User.objects.bulk_create', (['users'], {'batch_size': 'batch_size', 'ignore_conflicts': '(True)'}), '(users, batch_size=batch_size, ignore_conflicts=True)\n', (711, 764), False, 'from django.contrib.auth.models import User\n'), ((496, 519), 'django.contrib.auth.models.User', 'User', ([], {'username': 'username'}), '(username=username)\n', (500, 519), False, 'from django.contrib.auth.models import User\n')] |
import numpy as np
import torch
import os
from generic import get_match_result, to_np
def evaluate_with_ground_truth_graph(env, agent, num_games, level):
achieved_game_points = []
total_game_steps = []
game_name_list = []
game_max_score_list = []
game_id = 0
while(True):
if game_id >= num_games:
break
try:
obs, infos = env.reset()
except:
print("Error evaluating level {}, game {}!!!\n\n\n".format(level, game_id))
game_id += 1
continue
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
game_max_score_list += [game.max_score for game in infos["game"]]
batch_size = len(obs)
agent.eval()
agent.init()
chosen_actions, prev_step_dones = [], []
for _ in range(batch_size):
chosen_actions.append("restart")
prev_step_dones.append(0.0)
prev_h, prev_c = None, None
observation_strings, current_triplets, action_candidate_list, _, _ = agent.get_game_info_at_certain_step(obs, infos, prev_actions=None, prev_facts=None)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
agent.get_ingredient_list(current_triplets)
whether_new_tasks = [True for _ in range(batch_size)]
dones = [False for _ in range(batch_size)]
still_running_mask = []
final_scores = []
for step_no in range(agent.eval_max_nb_steps_per_episode):
assert (len(whether_new_tasks) == 1), "I assume there's only one env, no parallel"
if whether_new_tasks[0] or dones[0]:
agent.update_task_candidate_list(current_triplets)
task_candidate_list = agent.available_task_list
chosen_tasks, chosen_task_indices, _, _, _ = agent.act_greedy(
observation_strings = observation_strings,
triplets = current_triplets,
action_candidate_list = task_candidate_list,
tasks=[None], # batch = 1
previous_h=None,
previous_c=None,
model_type="meta")
chosen_tasks_before_parsing = [item[idx] for item, idx in zip(agent.available_task_list, chosen_task_indices)]
agent.update_chosen_task_type(chosen_tasks,chosen_task_indices)
chosen_actions, chosen_action_indices, _, prev_h, prev_c = agent.act_greedy(
observation_strings = observation_strings,
triplets = current_triplets,
action_candidate_list = action_candidate_list,
tasks = chosen_tasks_before_parsing,
previous_h = prev_h,
previous_c = prev_c,
model_type = "sub")
chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_action_indices)]
obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
for commands_ in infos["admissible_commands"]:
for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
commands_.remove(cmd_)
observation_strings, current_triplets, action_candidate_list, _, _ = agent.get_game_info_at_certain_step(obs, infos, prev_actions=None, prev_facts=None)
observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
still_running = [1.0 - float(item) for item in prev_step_dones]
_, whether_new_tasks = agent.get_task_rewards(current_triplets)
prev_step_dones = dones
final_scores = scores
still_running_mask.append(still_running)
if np.sum(still_running) == 0:
break
achieved_game_points += final_scores
still_running_mask = np.array(still_running_mask)
total_game_steps += np.sum(still_running_mask, 0).tolist()
game_id += batch_size
achieved_game_points = np.array(achieved_game_points, dtype="float32")
game_max_score_list = np.array(game_max_score_list, dtype="float32")
normalized_game_points = achieved_game_points / game_max_score_list
print_strings = []
print_strings.append("EvLevel {}|Score: {:2.3f}|ScoreNorm: {:2.3f}|Steps: {:2.3f}".format(level,
np.mean(achieved_game_points),
np.mean(normalized_game_points),
np.mean(total_game_steps)))
# for i in range(len(game_name_list)):
# print_strings.append("GameID: {}|Score: {:2.3f}|ScoreNorm: {:2.3f}|Steps: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i]))
print_strings = "\n".join(print_strings)
print(print_strings)
return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), 0.0, print_strings
def evaluate(env, agent, num_games, level):
assert (agent.fully_observable_graph), "Only allow full graph!"
return evaluate_with_ground_truth_graph(env, agent, num_games, level)
# if agent.fully_observable_graph:
# return evaluate_with_ground_truth_graph(env, agent, num_games, level)
# achieved_game_points = []
# total_game_steps = []
# game_name_list = []
# game_max_score_list = []
# game_id = 0
# while(True):
# if game_id >= num_games:
# break
# obs, infos = env.reset()
# # filter look and examine actions
# for commands_ in infos["admissible_commands"]:
# for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
# commands_.remove(cmd_)
# game_name_list += [game.metadata["uuid"].split("-")[-1] for game in infos["game"]]
# game_max_score_list += [game.max_score for game in infos["game"]]
# batch_size = len(obs)
# agent.eval()
# agent.init()
#
# triplets, chosen_actions, prev_game_facts = [], [], []
# prev_step_dones = []
# for _ in range(batch_size):
# chosen_actions.append("restart")
# prev_game_facts.append(set())
# triplets.append([])
# prev_step_dones.append(0.0)
#
# prev_h, prev_c = None, None
#
# observation_strings, current_triplets, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=None)
# observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
# still_running_mask = []
# final_scores = []
#
# for step_no in range(agent.eval_max_nb_steps_per_episode):
#
# # choose what to do next from candidate list
# chosen_actions, chosen_indices, _, prev_h, prev_c = agent.act_greedy(observation_strings, current_triplets, action_candidate_list, prev_h, prev_c)
# # send chosen actions to game engine
# chosen_actions_before_parsing = [item[idx] for item, idx in zip(infos["admissible_commands"], chosen_indices)]
# obs, scores, dones, infos = env.step(chosen_actions_before_parsing)
# # filter look and examine actions
# for commands_ in infos["admissible_commands"]:
# for cmd_ in [cmd for cmd in commands_ if cmd != "examine cookbook" and cmd.split()[0] in ["examine", "look"]]:
# commands_.remove(cmd_)
#
# prev_game_facts = current_game_facts
# observation_strings, current_triplets, action_candidate_list, _, current_game_facts = agent.get_game_info_at_certain_step(obs, infos, prev_actions=chosen_actions, prev_facts=prev_game_facts)
# observation_strings = [item + " <sep> " + a for item, a in zip(observation_strings, chosen_actions)]
#
# still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
# prev_step_dones = dones
# final_scores = scores
# still_running_mask.append(still_running)
#
# # if all ended, break
# if np.sum(still_running) == 0:
# break
#
# achieved_game_points += final_scores
# still_running_mask = np.array(still_running_mask)
# total_game_steps += np.sum(still_running_mask, 0).tolist()
# game_id += batch_size
#
# achieved_game_points = np.array(achieved_game_points, dtype="float32")
# game_max_score_list = np.array(game_max_score_list, dtype="float32")
# normalized_game_points = achieved_game_points / game_max_score_list
# print_strings = []
# # print_strings.append("======================================================")
# print_strings.append("EvLevel {}|Score: {:2.3f}|ScoreNorm: {:2.3f}|Steps: {:2.3f}".format(level,
# np.mean(achieved_game_points),
# np.mean(normalized_game_points),
# np.mean(total_game_steps)))
# # for i in range(len(game_name_list)):
# # print_strings.append("GameID: {}|Score: {:2.3f}|ScoreNorm: {:2.3f}|Steps: {:2.3f}".format(game_name_list[i], achieved_game_points[i], normalized_game_points[i], total_game_steps[i]))
# # print_strings.append("======================================================")
# print_strings = "\n".join(print_strings)
# print(print_strings)
# return np.mean(achieved_game_points), np.mean(normalized_game_points), np.mean(total_game_steps), 0.0, print_strings
| [
"numpy.array",
"numpy.mean",
"numpy.sum"
] | [((5191, 5238), 'numpy.array', 'np.array', (['achieved_game_points'], {'dtype': '"""float32"""'}), "(achieved_game_points, dtype='float32')\n", (5199, 5238), True, 'import numpy as np\n'), ((5265, 5311), 'numpy.array', 'np.array', (['game_max_score_list'], {'dtype': '"""float32"""'}), "(game_max_score_list, dtype='float32')\n", (5273, 5311), True, 'import numpy as np\n'), ((5038, 5066), 'numpy.array', 'np.array', (['still_running_mask'], {}), '(still_running_mask)\n', (5046, 5066), True, 'import numpy as np\n'), ((6172, 6201), 'numpy.mean', 'np.mean', (['achieved_game_points'], {}), '(achieved_game_points)\n', (6179, 6201), True, 'import numpy as np\n'), ((6203, 6234), 'numpy.mean', 'np.mean', (['normalized_game_points'], {}), '(normalized_game_points)\n', (6210, 6234), True, 'import numpy as np\n'), ((6236, 6261), 'numpy.mean', 'np.mean', (['total_game_steps'], {}), '(total_game_steps)\n', (6243, 6261), True, 'import numpy as np\n'), ((5593, 5622), 'numpy.mean', 'np.mean', (['achieved_game_points'], {}), '(achieved_game_points)\n', (5600, 5622), True, 'import numpy as np\n'), ((5709, 5740), 'numpy.mean', 'np.mean', (['normalized_game_points'], {}), '(normalized_game_points)\n', (5716, 5740), True, 'import numpy as np\n'), ((5827, 5852), 'numpy.mean', 'np.mean', (['total_game_steps'], {}), '(total_game_steps)\n', (5834, 5852), True, 'import numpy as np\n'), ((4914, 4935), 'numpy.sum', 'np.sum', (['still_running'], {}), '(still_running)\n', (4920, 4935), True, 'import numpy as np\n'), ((5095, 5124), 'numpy.sum', 'np.sum', (['still_running_mask', '(0)'], {}), '(still_running_mask, 0)\n', (5101, 5124), True, 'import numpy as np\n')] |
from io import StringIO, BytesIO
from unittest import TestCase
from datetime import datetime
from memory_watch import ZoneInfoScanner, LogWriter
class ZoneInfoScannerTest(TestCase):
def test_simple(self):
zone_info = StringIO("""\
2: Node 0, zone Normal
2: pages free 1363343
2: min 9891
""")
expected_entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1363343,
min_mem=9891)]
scanner = ZoneInfoScanner(zone_info)
entries = list(scanner)
self.assertEqual(expected_entries, entries)
def test_available(self):
zone_info = StringIO("""\
2: Node 0, zone Normal
2: pages free 1363343
2: min 9891
2: MemAvailable: 8000000 kB
""")
expected_entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1363343,
min_mem=9891),
dict(node='2',
mem_node='avail',
free_mem=2000000)]
scanner = ZoneInfoScanner(zone_info)
entries = list(scanner)
self.assertEqual(expected_entries, entries)
def test_two_nodes(self):
zone_info = StringIO("""\
2: Node 0, zone Normal
2: pages free 1363343
2: min 9891
3: Node 0, zone Normal
3: pages free 2000000
3: min 9999
""")
expected_entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1363343,
min_mem=9891),
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=2000000,
min_mem=9999)]
scanner = ZoneInfoScanner(zone_info)
entries = list(scanner)
self.assertEqual(expected_entries, entries)
def test_skipping_lines(self):
zone_info = StringIO("""\
2: some garbage
2: Zode 0, zone Normal
2: Node 0, bone Normal
2: Node 0, zone Normal
2: pages free 1363343
2: min 9891
2: more garbage
3: Node 0, zone Normal
3: pages free 2000000
3: min 9999
""")
expected_entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1363343,
min_mem=9891),
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=2000000,
min_mem=9999)]
scanner = ZoneInfoScanner(zone_info)
entries = list(scanner)
self.assertEqual(expected_entries, entries)
def test_unexpected_lines(self):
zone_info = StringIO("""\
2: Node 0, zone Normal
2: xpages free 1363343
2: ymin 9891
""")
bad_lines = " 2: xpages free 1363343\n 2: ymin 9891\n"
expected_entries = [dict(node='2',
mem_node='0',
zone='Normal',
unexpected=bad_lines)]
scanner = ZoneInfoScanner(zone_info)
entries = list(scanner)
self.assertEqual(expected_entries, entries)
class LogWriterTest(TestCase):
def test_simple(self):
entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1024*1024,
min_mem=10*1024),
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=2*1024*1024,
min_mem=128*1024)]
time = datetime(2017, 10, 15, 19, 30, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,3_0_0_min,3_0_0_free,2_0_0_unexpected,3_0_0_unexpected
2017-10-15 19:30:01,0.04,4.00,0.50,8.00,,
"""
writer = LogWriter(report)
writer.write(time, entries)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
def test_available(self):
entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=1024*1024,
min_mem=10*1024),
dict(node='2',
mem_node='avail',
free_mem=2*1024*1024)]
time = datetime(2017, 10, 15, 19, 30, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,2_avail_1_min,2_avail_1_free,2_0_0_unexpected,2_avail_1_unexpected
2017-10-15 19:30:01,0.04,4.00,,8.00,,
"""
writer = LogWriter(report)
writer.write(time, entries)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
def test_multiple_times(self):
entries1 = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=262144, # 1.00GB
min_mem=2621), # 0.01GB
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=262144, # 1.00GB
min_mem=2621)] # 0.01GB
entries2 = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=267387, # 1.02GB
min_mem=5242), # 0.02GB
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=270008, # 1.03GB
min_mem=7864)] # 0.03GB
time1 = datetime(2017, 10, 15, 19, 30, 1)
time2 = datetime(2017, 10, 15, 19, 31, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,3_0_0_min,3_0_0_free,2_0_0_unexpected,3_0_0_unexpected
2017-10-15 19:30:01,0.01,1.00,0.01,1.00,,
2017-10-15 19:31:01,0.02,1.02,0.03,1.03,,
"""
writer = LogWriter(report)
writer.write(time1, entries1)
writer.write(time2, entries2)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
def test_multiple_zones(self):
entries1 = [dict(node='2',
mem_node='0',
zone='DMA32',
free_mem=262144, # 1.00GB
min_mem=2621), # 0.01GB
dict(node='2',
mem_node='0',
zone='Normal',
free_mem=262144, # 1.00GB
min_mem=2621)] # 0.01GB
time1 = datetime(2017, 10, 15, 19, 30, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,2_0_1_min,2_0_1_free,2_0_0_unexpected,2_0_1_unexpected
2017-10-15 19:30:01,0.01,1.00,0.01,1.00,,
"""
writer = LogWriter(report)
writer.write(time1, entries1)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
def test_node_missing(self):
entries1 = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=262144,
min_mem=2621),
dict(node='3',
mem_node='0',
zone='Normal',
free_mem=262144,
min_mem=2621)]
entries2 = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=262144,
min_mem=2621)]
time1 = datetime(2017, 10, 15, 19, 30, 1)
time2 = datetime(2017, 10, 15, 19, 31, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,3_0_0_min,3_0_0_free,2_0_0_unexpected,3_0_0_unexpected
2017-10-15 19:30:01,0.01,1.00,0.01,1.00,,
2017-10-15 19:31:01,0.01,1.00,,,,
"""
writer = LogWriter(report)
writer.write(time1, entries1)
writer.write(time2, entries2)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
def test_unexpected(self):
entries = [dict(node='2',
mem_node='0',
zone='Normal',
free_mem=262144,
unexpected='bogus line\n'),
dict(node='3',
mem_node='0',
zone='Normal',
unexpected='bogus line 1\nbogus line 2\n')]
time = datetime(2017, 10, 15, 19, 30, 1)
report = BytesIO()
expected_report = """\
time,2_0_0_min,2_0_0_free,3_0_0_min,3_0_0_free,2_0_0_unexpected,3_0_0_unexpected
2017-10-15 19:30:01,,1.00,,,"bogus line
","bogus line 1
bogus line 2
"
"""
writer = LogWriter(report)
writer.write(time, entries)
self.assertEqual(expected_report, report.getvalue().decode('UTF-8'))
| [
"datetime.datetime",
"memory_watch.ZoneInfoScanner",
"io.BytesIO",
"io.StringIO",
"memory_watch.LogWriter"
] | [((233, 343), 'io.StringIO', 'StringIO', (['""" 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n"""'], {}), '(\n """ 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n"""\n )\n', (241, 343), False, 'from io import StringIO, BytesIO\n'), ((591, 617), 'memory_watch.ZoneInfoScanner', 'ZoneInfoScanner', (['zone_info'], {}), '(zone_info)\n', (606, 617), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((755, 896), 'io.StringIO', 'StringIO', (['""" 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 2: MemAvailable: 8000000 kB\n"""'], {}), '(\n """ 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 2: MemAvailable: 8000000 kB\n"""\n )\n', (763, 896), False, 'from io import StringIO, BytesIO\n'), ((1290, 1316), 'memory_watch.ZoneInfoScanner', 'ZoneInfoScanner', (['zone_info'], {}), '(zone_info)\n', (1305, 1316), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((1454, 1648), 'io.StringIO', 'StringIO', (['""" 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 3: Node 0, zone Normal\n 3: pages free 2000000\n 3: min 9999\n"""'], {}), '(\n """ 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 3: Node 0, zone Normal\n 3: pages free 2000000\n 3: min 9999\n"""\n )\n', (1462, 1648), False, 'from io import StringIO, BytesIO\n'), ((2133, 2159), 'memory_watch.ZoneInfoScanner', 'ZoneInfoScanner', (['zone_info'], {}), '(zone_info)\n', (2148, 2159), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((2302, 2586), 'io.StringIO', 'StringIO', (['""" 2: some garbage\n 2: Zode 0, zone Normal\n 2: Node 0, bone Normal\n 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 2: more garbage\n 3: Node 0, zone Normal\n 3: pages free 2000000\n 3: min 9999\n"""'], {}), '(\n """ 2: some garbage\n 2: Zode 0, zone Normal\n 2: Node 0, bone Normal\n 2: Node 0, zone Normal\n 2: pages free 1363343\n 2: min 9891\n 2: more garbage\n 3: Node 0, zone Normal\n 3: pages free 2000000\n 3: min 9999\n"""\n )\n', (2310, 2586), False, 'from io import StringIO, BytesIO\n'), ((3071, 3097), 'memory_watch.ZoneInfoScanner', 'ZoneInfoScanner', (['zone_info'], {}), '(zone_info)\n', (3086, 3097), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((3242, 3352), 'io.StringIO', 'StringIO', (['""" 2: Node 0, zone Normal\n 2: xpages free 1363343\n 2: ymin 9891\n"""'], {}), '(\n """ 2: Node 0, zone Normal\n 2: xpages free 1363343\n 2: ymin 9891\n"""\n )\n', (3250, 3352), False, 'from io import StringIO, BytesIO\n'), ((3639, 3665), 'memory_watch.ZoneInfoScanner', 'ZoneInfoScanner', (['zone_info'], {}), '(zone_info)\n', (3654, 3665), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((4224, 4257), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (4232, 4257), False, 'from datetime import datetime\n'), ((4275, 4284), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4282, 4284), False, 'from io import StringIO, BytesIO\n'), ((4460, 4477), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (4469, 4477), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((4959, 4992), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (4967, 4992), False, 'from datetime import datetime\n'), ((5010, 5019), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5017, 5019), False, 'from io import StringIO, BytesIO\n'), ((5203, 5220), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (5212, 5220), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((6260, 6293), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (6268, 6293), False, 'from datetime import datetime\n'), ((6310, 6343), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(31)', '(1)'], {}), '(2017, 10, 15, 19, 31, 1)\n', (6318, 6343), False, 'from datetime import datetime\n'), ((6361, 6370), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6368, 6370), False, 'from io import StringIO, BytesIO\n'), ((6588, 6605), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (6597, 6605), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((7244, 7277), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (7252, 7277), False, 'from datetime import datetime\n'), ((7295, 7304), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7302, 7304), False, 'from io import StringIO, BytesIO\n'), ((7480, 7497), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (7489, 7497), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((8253, 8286), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (8261, 8286), False, 'from datetime import datetime\n'), ((8303, 8336), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(31)', '(1)'], {}), '(2017, 10, 15, 19, 31, 1)\n', (8311, 8336), False, 'from datetime import datetime\n'), ((8354, 8363), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8361, 8363), False, 'from io import StringIO, BytesIO\n'), ((8573, 8590), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (8582, 8590), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n'), ((9176, 9209), 'datetime.datetime', 'datetime', (['(2017)', '(10)', '(15)', '(19)', '(30)', '(1)'], {}), '(2017, 10, 15, 19, 30, 1)\n', (9184, 9209), False, 'from datetime import datetime\n'), ((9227, 9236), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (9234, 9236), False, 'from io import StringIO, BytesIO\n'), ((9441, 9458), 'memory_watch.LogWriter', 'LogWriter', (['report'], {}), '(report)\n', (9450, 9458), False, 'from memory_watch import ZoneInfoScanner, LogWriter\n')] |
"""
The following is a discrete policy shaping example in the openAI Frozen lake text env.
(see: https://gym.openai.com/envs/FrozenLake-v0/)
For more details on policy shaping see both the PolicyShaping class and the original paper
by Griffith (see: https://papers.nips.cc/paper/2013/file/e034fb6b66aacc1d48f445ddfb08da98-Paper.pdf)
"""
import gym
import numpy as np
import pickle
import matplotlib.pyplot as plt
import Qtable
from Oracle import NPOracle as Oracle
import PolicyShaping
# is_slippery refers to weather or not the agent will take the action chosen with 100% success rate
# Make environment, size: 8x8
#lake = gym.make("FrozenLake-v0", map_name='8x8', is_slippery=False)
# Make enviornment, size 4x4
lake = gym.make("FrozenLake-v0", is_slippery=False)
# How to make a custom map:
"""
cstm_map = [
'FFSFF',
'FHHHF',
'FHHHF',
'FHHHF',
'GFFFG'
]
lake = gym.make("FrozenLake-v0", desc=cstm_map, is_slippery=False)
"""
state_size = lake.observation_space.n
action_size = lake.action_space.n
qtable = Qtable.Qtable(state_size, action_size, True) # Initialize q-table
# Initialize oracle
oracle_qtable_initial = pickle.load(open('q_table_episode_4x4Oracle.pkl', 'rb')).qtable
oracle = Oracle(oracle_qtable_initial)
print(oracle.qtable)
episodes = 100
max_moves = 30 # Max moves per episode
# q-learning parameters
learning = .8
discount = .99
exploration_rate = .001
min_exploration = .01
exploration_decay = .001
# policy shaping parameters:
confidence = .9 # confidence that feedback is optimal
likelihood = .9 # likelihood feedback is provided
const = 0.3 # constant used in probability of action equation
feedback_tbl = np.zeros((state_size, action_size)) # Table keeping track of feedback
policy_shaping = PolicyShaping.PolicyShaping(qtable.qtable, feedback_tbl, confidence, const)
rewards = []
num_moves = []
for episode in range(episodes):
state = lake.reset()
done = False
curr_reward = 0
num_moves.append(0)
for move in range(max_moves):
# This epsilon-greedy approach in PS with QL is analogous to the original papers, exploration rate is
# a const depending on the env. they used.
if np.random.random() > exploration_rate:
action = policy_shaping.get_shaped_action(state)
else:
action = lake.action_space.sample()
num_moves[episode] += 1
next_state, reward, done, info = lake.step(action)
"""
if reward == 0 and done is True: # Modifying frozen lake so falling in a hole gives - 1 reward instead of 0.
reward = -1
"""
# Get feedback and update feedback table:
feedback = oracle.get_binary_feedback_ps(state, action, likelihood, confidence)
feedback_tbl[state][action] += feedback
# Q-value update formula
qtable.qtable[state][action] = (1 - learning) * qtable.qtable[state][action] + learning * \
(reward + discount * qtable.maxq(next_state))
# Update policy shaping object
policy_shaping.update_qtable(qtable.qtable)
policy_shaping.update_feedback_tbl(feedback_tbl)
state = next_state
curr_reward += reward
if done:
break
rewards.append(curr_reward)
"""
# Exponential decay of exploration rate:
exploration_rate = min_exploration + (1 - min_exploration) * np.exp(-exploration_decay * episode)
"""
# print(qtable.qtable)
rewards_per_thosand_episodes = np.split(np.array(rewards), episodes / 10)
count = 10
avg = []
for r in rewards_per_thosand_episodes:
avg_reward = sum(r / 10)
print(count, ": ", str(avg_reward))
avg.append(avg_reward)
count += 10
plt.plot(avg)
plt.show() | [
"numpy.random.random",
"Qtable.Qtable",
"matplotlib.pyplot.plot",
"Oracle.NPOracle",
"numpy.array",
"numpy.zeros",
"PolicyShaping.PolicyShaping",
"gym.make",
"matplotlib.pyplot.show"
] | [((747, 791), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {'is_slippery': '(False)'}), "('FrozenLake-v0', is_slippery=False)\n", (755, 791), False, 'import gym\n'), ((1075, 1119), 'Qtable.Qtable', 'Qtable.Qtable', (['state_size', 'action_size', '(True)'], {}), '(state_size, action_size, True)\n', (1088, 1119), False, 'import Qtable\n'), ((1264, 1293), 'Oracle.NPOracle', 'Oracle', (['oracle_qtable_initial'], {}), '(oracle_qtable_initial)\n', (1270, 1293), True, 'from Oracle import NPOracle as Oracle\n'), ((1727, 1762), 'numpy.zeros', 'np.zeros', (['(state_size, action_size)'], {}), '((state_size, action_size))\n', (1735, 1762), True, 'import numpy as np\n'), ((1818, 1893), 'PolicyShaping.PolicyShaping', 'PolicyShaping.PolicyShaping', (['qtable.qtable', 'feedback_tbl', 'confidence', 'const'], {}), '(qtable.qtable, feedback_tbl, confidence, const)\n', (1845, 1893), False, 'import PolicyShaping\n'), ((3850, 3863), 'matplotlib.pyplot.plot', 'plt.plot', (['avg'], {}), '(avg)\n', (3858, 3863), True, 'import matplotlib.pyplot as plt\n'), ((3865, 3875), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3873, 3875), True, 'import matplotlib.pyplot as plt\n'), ((3635, 3652), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (3643, 3652), True, 'import numpy as np\n'), ((2263, 2281), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2279, 2281), True, 'import numpy as np\n')] |
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
class Database(object):
def __init__(self, metadata, session_getter=None, session_setter=None):
self.engine = None
self._session_factory = None
self._session = None
if session_getter is None:
session_getter = self._default_session_getter
self._session_getter = session_getter
if session_setter is None:
session_setter = self._default_session_setter
self._session_setter = session_setter
self._metadata = metadata
def create_all(self):
self._metadata.create_all(bind=self.engine)
def drop_all(self):
self._metadata.drop_all(bind=self.engine, checkfirst=True)
def connect(self, database_uri, pool_recycle=3600, echo=False):
self.engine = create_engine(
database_uri,
pool_recycle=pool_recycle,
echo=echo,
)
self._session_factory = sessionmaker(
bind=self.engine,
autocommit=False,
autoflush=False,
)
def _default_session_getter(self):
return self._session
def _default_session_setter(self, value):
self._session = value
@property
def session(self):
session = self._session_getter()
if session is None:
session = scoped_session(self._session_factory)
self.session = session
return session
@session.setter
def session(self, value):
self._session_setter(value)
def remove_session(self):
if self.session is not None:
self.session.remove()
self.session = None
@contextmanager
def transaction_context(self):
try:
yield self.session
except Exception as e:
if self.session is not None:
self.session.rollback()
raise e
finally:
if self.session is not None:
self.session.commit()
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"sqlalchemy.orm.scoped_session"
] | [((901, 966), 'sqlalchemy.create_engine', 'create_engine', (['database_uri'], {'pool_recycle': 'pool_recycle', 'echo': 'echo'}), '(database_uri, pool_recycle=pool_recycle, echo=echo)\n', (914, 966), False, 'from sqlalchemy import create_engine\n'), ((1046, 1111), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'self.engine', 'autocommit': '(False)', 'autoflush': '(False)'}), '(bind=self.engine, autocommit=False, autoflush=False)\n', (1058, 1111), False, 'from sqlalchemy.orm import scoped_session, sessionmaker\n'), ((1434, 1471), 'sqlalchemy.orm.scoped_session', 'scoped_session', (['self._session_factory'], {}), '(self._session_factory)\n', (1448, 1471), False, 'from sqlalchemy.orm import scoped_session, sessionmaker\n')] |
# pylint: disable = attribute-defined-outside-init, too-few-public-methods
"""Module for serializing IIIF Collection Lists"""
from django.core.serializers.base import SerializerDoesNotExist
import config.settings.local as settings
from apps.iiif.serializers.base import Serializer as JSONSerializer
class Serializer(JSONSerializer):
"""IIIF Collection"""
def get_dump_object(self, obj):
if ((self.version == 'v2') or (self.version is None)):
data = {
"@id": '{h}/iiif/{p}/manifest'.format(h=settings.HOSTNAME, p=obj.pid),
"@type": "sc:Manifest",
"label": obj.label,
}
return data
return None
class Deserializer:
"""Deserialize IIIF Annotation List
:raises SerializerDoesNotExist: Not yet implemented.
"""
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("collection_manifest is a serialization-only serializer")
| [
"django.core.serializers.base.SerializerDoesNotExist"
] | [((883, 968), 'django.core.serializers.base.SerializerDoesNotExist', 'SerializerDoesNotExist', (['"""collection_manifest is a serialization-only serializer"""'], {}), "('collection_manifest is a serialization-only serializer'\n )\n", (905, 968), False, 'from django.core.serializers.base import SerializerDoesNotExist\n')] |
from typing import List
from pydantic import BaseModel, Field, validator
from enum import Enum
from datetime import datetime
from monty.json import MontyDecoder
from pymatgen.analysis.gb.grain import GrainBoundary
class GBTypeEnum(Enum):
"""
Grain boundary types
"""
tilt = "tilt"
twist = "twist"
class GrainBoundaryDoc(BaseModel):
"""
Grain boundary energies, work of separation...
"""
task_id: str = Field(
None,
description="The Materials Project ID of the material. This comes in the form: mp-******",
)
sigma: int = Field(
None, description="Sigma value of the boundary",
)
type: GBTypeEnum = Field(
None, description="Grain boundary type",
)
rotation_axis: List[int] = Field(
None, description="Rotation axis",
)
gb_plane: List[int] = Field(
None, description="Grain boundary plane",
)
rotation_angle: float = Field(
None, description="Rotation angle in degrees",
)
gb_energy: float = Field(
None, description="Grain boundary energy in J/m^2",
)
initial_structure: GrainBoundary = Field(
None, description="Initial grain boundary structure"
)
final_structure: GrainBoundary = Field(
None, description="Final grain boundary structure"
)
pretty_formula: str = Field(None, description="Reduced formula of the material")
w_sep: float = Field(None, description="Work of separation in J/m^2")
cif: str = Field(None, description="CIF file of the structure")
chemsys: str = Field(
None, description="Dash-delimited string of elements in the material"
)
last_updated: datetime = Field(
None,
description="Timestamp for the most recent calculation for this Material document",
)
# Make sure that the datetime field is properly formatted
@validator("last_updated", pre=True)
def last_updated_dict_ok(cls, v):
return MontyDecoder().process_decoded(v)
| [
"pydantic.Field",
"monty.json.MontyDecoder",
"pydantic.validator"
] | [((445, 557), 'pydantic.Field', 'Field', (['None'], {'description': '"""The Materials Project ID of the material. This comes in the form: mp-******"""'}), "(None, description=\n 'The Materials Project ID of the material. This comes in the form: mp-******'\n )\n", (450, 557), False, 'from pydantic import BaseModel, Field, validator\n'), ((589, 643), 'pydantic.Field', 'Field', (['None'], {'description': '"""Sigma value of the boundary"""'}), "(None, description='Sigma value of the boundary')\n", (594, 643), False, 'from pydantic import BaseModel, Field, validator\n'), ((683, 729), 'pydantic.Field', 'Field', (['None'], {'description': '"""Grain boundary type"""'}), "(None, description='Grain boundary type')\n", (688, 729), False, 'from pydantic import BaseModel, Field, validator\n'), ((777, 817), 'pydantic.Field', 'Field', (['None'], {'description': '"""Rotation axis"""'}), "(None, description='Rotation axis')\n", (782, 817), False, 'from pydantic import BaseModel, Field, validator\n'), ((860, 907), 'pydantic.Field', 'Field', (['None'], {'description': '"""Grain boundary plane"""'}), "(None, description='Grain boundary plane')\n", (865, 907), False, 'from pydantic import BaseModel, Field, validator\n'), ((952, 1004), 'pydantic.Field', 'Field', (['None'], {'description': '"""Rotation angle in degrees"""'}), "(None, description='Rotation angle in degrees')\n", (957, 1004), False, 'from pydantic import BaseModel, Field, validator\n'), ((1044, 1101), 'pydantic.Field', 'Field', (['None'], {'description': '"""Grain boundary energy in J/m^2"""'}), "(None, description='Grain boundary energy in J/m^2')\n", (1049, 1101), False, 'from pydantic import BaseModel, Field, validator\n'), ((1157, 1216), 'pydantic.Field', 'Field', (['None'], {'description': '"""Initial grain boundary structure"""'}), "(None, description='Initial grain boundary structure')\n", (1162, 1216), False, 'from pydantic import BaseModel, Field, validator\n'), ((1269, 1326), 'pydantic.Field', 'Field', (['None'], {'description': '"""Final grain boundary structure"""'}), "(None, description='Final grain boundary structure')\n", (1274, 1326), False, 'from pydantic import BaseModel, Field, validator\n'), ((1368, 1426), 'pydantic.Field', 'Field', (['None'], {'description': '"""Reduced formula of the material"""'}), "(None, description='Reduced formula of the material')\n", (1373, 1426), False, 'from pydantic import BaseModel, Field, validator\n'), ((1447, 1501), 'pydantic.Field', 'Field', (['None'], {'description': '"""Work of separation in J/m^2"""'}), "(None, description='Work of separation in J/m^2')\n", (1452, 1501), False, 'from pydantic import BaseModel, Field, validator\n'), ((1518, 1570), 'pydantic.Field', 'Field', (['None'], {'description': '"""CIF file of the structure"""'}), "(None, description='CIF file of the structure')\n", (1523, 1570), False, 'from pydantic import BaseModel, Field, validator\n'), ((1591, 1667), 'pydantic.Field', 'Field', (['None'], {'description': '"""Dash-delimited string of elements in the material"""'}), "(None, description='Dash-delimited string of elements in the material')\n", (1596, 1667), False, 'from pydantic import BaseModel, Field, validator\n'), ((1712, 1812), 'pydantic.Field', 'Field', (['None'], {'description': '"""Timestamp for the most recent calculation for this Material document"""'}), "(None, description=\n 'Timestamp for the most recent calculation for this Material document')\n", (1717, 1812), False, 'from pydantic import BaseModel, Field, validator\n'), ((1899, 1934), 'pydantic.validator', 'validator', (['"""last_updated"""'], {'pre': '(True)'}), "('last_updated', pre=True)\n", (1908, 1934), False, 'from pydantic import BaseModel, Field, validator\n'), ((1988, 2002), 'monty.json.MontyDecoder', 'MontyDecoder', ([], {}), '()\n', (2000, 2002), False, 'from monty.json import MontyDecoder\n')] |
import numpy as np
from math import sqrt,log
import copy
import Model.config as config
import time
BASE = np.array(['A','C','G','T'])
QUANT = {'A': 0, 'C':1, 'G':2, 'T':3}
qua2str = lambda qua: ''.join(BASE[qua])
str2qua = lambda dna: np.array([QUANT[base] for base in dna],dtype = 'uint8')
class DNA_Channel_Model:
def __init__(self, Modules, arg = config.DEFAULT_PASSER):
if Modules:
self.Modules = Modules
else:
self.Modules = [
('synthesizing',Synthesizer(arg)),
# ('decaying',Decayer(arg)),
('pcring',PCRer(arg = arg)),
('sampling',Sampler(arg = arg)),
('sequencing',Sequencer(arg))
]
def __call__(self, dnas, inspectFunction = None, print_state = True):
if print_state: print('Model running... ', end = '\r')
ast = time.time()
for stage_name, module in self.Modules:
st = time.time()
if print_state: print(stage_name + '.... ', end = '')
dnas = module(dnas)
if print_state: print(' Done. Time spent: ' + str(round(time.time() - st,3)) +'s')
if inspectFunction: inspectFunction(dnas)
if print_state: print(f'Simulation done. Time spent: {round(time.time()-ast,3)}s')
return dnas
class Synthesizer:
def __init__(self, arg):
self.Yield = arg.syn_yield
self.N = arg.syn_number
self.pcrc = arg.syn_pcrc
self.pcrp = arg.syn_pcrp
self.performPCR = arg.syn_performPCR
self.probS = arg.syn_sub_prob
self.probD = arg.syn_del_prob
self.probI = arg.syn_ins_prob
self.syn = Syn_D(self.Yield, self.N)
self.err = ErrorAdder(self.probS, self.probD, self.probI)
self.pcr = PCRer(self.pcrc,self.pcrp)
def __call__(self, dnas):
dnas = self.syn(dnas)
dnas = self.err(dnas)
if self.performPCR: dnas = self.pcr(dnas)
return dnas
class Decayer:
def __init__(self, arg):
self.error_rate = arg.decay_er
self.loss_rate = arg.decay_loss_rate
self.constructTm()
self.sam = Sampler(1-self.loss_rate)
self.err = ErrorAdder(probS = 0, probD = 0, probI = 0, TM = self.TM)
def constructTm(self):
Tm = [[0 for i in range(4)] for i in range(4)]
Tm[1][3] = Tm[2][1] = self.error_rate
Tm[1][1] = Tm[2][2] = 1-self.error_rate # C2T, G2A
Tm[0][0] = Tm[3][3] = 1
self.TM = Tm
def __call__(self, dnas):
dnas = self.sam(dnas)
dnas = self.err(dnas)
return dnas
class Sequencer:
def __init__(self,arg):
self.copies_required = arg.seq_copies
self.pcrp = arg.seq_prcp
self.performPCR = arg.seq_performPCR
self.seq_depth = arg.seq_depth
self.TM = arg.seq_TM
def __call__(self, dnas):
if self.performPCR:
dnas = self.pcr(dnas)
dnas = self.sample(dnas)
self.E = ErrorAdder(probI = 0.00001, probD= 0.00001, TM = self.TM)
dnas = self.E(dnas)
return dnas
def pcr(self,dnas):
rNs = [dna['num'] for dna in dnas]
average_copies = sum(rNs) / len(rNs)
amplify_ratio = self.copies_required / average_copies
self.pcrc = int(log(amplify_ratio) / log(self.pcrp+1))
dnas = PCRer(self.pcrc, self.pcrp)(dnas)
return dnas
def sample(self, dnas):
rNs = [dna['num'] for dna in dnas]
average_copies = sum(rNs) / len(rNs)
self.sample_ratio = self.seq_depth / average_copies
dnas = Sampler(self.sample_ratio)(dnas)
return dnas
class Syn_D:
def __init__(self, Yield = 0.99, N = 30):
self.Yield = Yield
self.N = N
def distribution(self):
return np.random.binomial(self.N, self.p)
def __call__(self,dnas):
self.L = len(dnas[0])
self.p = self.Yield ** self.L
out = []
for dna in dnas:
n = self.distribution()
out.append({'ori':dna, 'num':n,'re':[[n,[]]]})
return out
class Sampler:
def __init__(self, p=0.001, sam_to_number = False, arg = None):
if arg:
self.p = arg.sam_ratio
self.sam_to_number = arg.sam_to_number
else:
self.p = p
self.sam_to_number = sam_to_number
def distribution(self,N):
return np.random.binomial(N,self.p)
def run(self,re_dnas):
markers = []
for i,dna in enumerate(re_dnas):
dna[0] = self.distribution(dna[0])
if dna[0] > 0: markers.append(i)
re_dnas = [re_dnas[i] for i in markers]
return re_dnas
def __call__(self,dnas, in_place = False):
if not in_place:
out_dnas = copy.deepcopy(dnas)
else:
out_dnas = dnas
if self.sam_to_number:
rNs = [dna['num'] for dna in dnas]
average_copies = sum(rNs) / len(rNs)
self.p = self.sam_to_number / average_copies
for dna in out_dnas:
dna['re'] = self.run(dna['re'])
dna['num'] = sum([tp[0] for tp in dna['re']])
return out_dnas
# class PCRer:
# def __init__(self,N = 16, p = 0.7, arg = None):
# if arg:
# p = arg.pcrp
# N = arg.pcrc
# self.p = p
# self.N = N
# self.u0 = (1+p)**N
# self.sigma0 = np.sqrt((1-p) / (1+p) * ((1+p)**(2*N) - (1+p)**N))
# def distribution(self,ori):
# assert ori >= 0
# return max(int(np.random.normal(self.u0 * ori, self.sigma0 * sqrt(ori))),0)
# def run(self,re_dnas):
# out = []
# for dna in re_dnas:
# dna[0] = self.distribution(dna[0])
# if dna[0] > 0:
# out.append(dna)
# return out
# def __call__(self,dnas,in_place = False):
# if not in_place:
# out_dnas = copy.deepcopy(dnas)
# else:
# out_dnas = dnas
# for dna in out_dnas:
# dna['re'] = self.run(dna['re'])
# dna['num'] = sum([tp[0] for tp in dna['re']])
# return out_dnas
class PCRer:
def __init__(self,N = 16, p = 0.7, pBias = 0.05, arg = None):
if arg:
p = arg.pcrp
N = arg.pcrc
pBias = arg.pcrBias
self.p = p
self.N = N
self.pBias = pBias
self.u0 = (1+p)**N
self.sigma0 = np.sqrt((1-p) / (1+p) * ((1+p)**(2*N) - (1+p)**N))
def distribution(self,ori):
assert ori >= 0
p = np.random.uniform(self.p - self.pBias, self.p + self.pBias)
N = self.N
u0 = (1+p)**N
sigma0 = np.sqrt((1-p) / (1+p) * ((1+p)**(2*N) - (1+p)**N))
return max(int(np.random.normal(u0 * ori, sigma0 * sqrt(ori))),0)
def run(self,re_dnas):
out = []
for dna in re_dnas:
dna[0] = self.distribution(dna[0])
if dna[0] > 0:
out.append(dna)
return out
def __call__(self,dnas,in_place = False):
if not in_place:
out_dnas = copy.deepcopy(dnas)
else:
out_dnas = dnas
for dna in out_dnas:
dna['re'] = self.run(dna['re'])
dna['num'] = sum([tp[0] for tp in dna['re']])
return out_dnas
class ErrorAdder:
def __init__(self,probS = 0.001, probD = 0.0005, probI = 0.0005, TM = None):
if TM != None:
self.TM = TM
self.all_equal = 0
else:
self.TM = genTm(probS)
self.all_equal = 1
self.probD = probD
self.probI = probI
def genNewError(self,dna):
Errors = []
for i,base in enumerate(['A','C','G','T']):
Pi = np.where(dna==base)[0]
subi = np.random.choice(['A','C','G','T'],size = Pi.size, p = self.TM[i])
subPi = np.where(subi != base)[0]
for pos in subPi:
Errors.append((Pi[pos],'s', subi[pos]))
delP = np.where(np.random.choice([False,True],size = len(dna), p = [1-self.probD,self.probD]))[0]
insP = np.where(np.random.choice([False,True],size = len(dna), p = [1-self.probI,self.probI]))[0]
Errors += ([(pos,'-',dna[pos]) for pos in delP] + [(pos,'+',np.random.choice(['A','T','C','G'])) for pos in insP])
return Errors
def run(self,ori_dna,re_dnas):
ori_dna = np.array(list(ori_dna))
new_types = []
for re_dna in re_dnas:
for i in range(re_dna[0]):
new_error = self.genNewError(ori_dna)
if len(new_error) > 0:
new_types.append([1, re_dna[1] + new_error])
re_dna[0] -= 1
return re_dnas + new_types
def __call__(self,dnas,in_place = False, apply = True):
if not in_place:
out_dnas = copy.deepcopy(dnas)
else:
out_dnas = dnas
for dna in out_dnas:
dna['re'] = self.run(dna['ori'], dna['re'])
if apply:
out_dnas = self.apply_batch(out_dnas)
return out_dnas
# apply errors to dnas
def apply(self, ori_dna, errors):
dna = list(ori_dna)
errors.sort(key = lambda x: x[0])
# substitutions
for error in errors:
pos, tp, base = error
if tp == 's':
dna[pos] = base
# del / insertions
for error in errors:
bias = 0
pos, tp, base = error
if tp == '-':
try:
dna.pop(pos + bias)
except:
# print('pop index error:', pos + bias)
break
bias -= 1
elif tp == '+':
dna.insert(pos,base)
bias += 1
dna = ''.join(dna)
return dna
def apply_batch(self, dnas):
for dna in dnas:
ori_dna = dna['ori']
re = []
for re_dna in dna['re']:
if re_dna[0] == 0: pass
re.append([re_dna[0],re_dna[1],self.apply(ori_dna, re_dna[1])])
dna['re'] = re
return dnas
def genTm(prob):
tm = []
for i in range(4):
row = [prob for i in range(4)]
row[i] = 1 - 3* prob
tm.append(row)
return tm | [
"numpy.sqrt",
"copy.deepcopy",
"numpy.random.choice",
"numpy.where",
"math.sqrt",
"math.log",
"numpy.array",
"numpy.random.uniform",
"time.time",
"numpy.random.binomial"
] | [((108, 138), 'numpy.array', 'np.array', (["['A', 'C', 'G', 'T']"], {}), "(['A', 'C', 'G', 'T'])\n", (116, 138), True, 'import numpy as np\n'), ((237, 291), 'numpy.array', 'np.array', (['[QUANT[base] for base in dna]'], {'dtype': '"""uint8"""'}), "([QUANT[base] for base in dna], dtype='uint8')\n", (245, 291), True, 'import numpy as np\n'), ((882, 893), 'time.time', 'time.time', ([], {}), '()\n', (891, 893), False, 'import time\n'), ((3879, 3913), 'numpy.random.binomial', 'np.random.binomial', (['self.N', 'self.p'], {}), '(self.N, self.p)\n', (3897, 3913), True, 'import numpy as np\n'), ((4499, 4528), 'numpy.random.binomial', 'np.random.binomial', (['N', 'self.p'], {}), '(N, self.p)\n', (4517, 4528), True, 'import numpy as np\n'), ((6609, 6673), 'numpy.sqrt', 'np.sqrt', (['((1 - p) / (1 + p) * ((1 + p) ** (2 * N) - (1 + p) ** N))'], {}), '((1 - p) / (1 + p) * ((1 + p) ** (2 * N) - (1 + p) ** N))\n', (6616, 6673), True, 'import numpy as np\n'), ((6733, 6792), 'numpy.random.uniform', 'np.random.uniform', (['(self.p - self.pBias)', '(self.p + self.pBias)'], {}), '(self.p - self.pBias, self.p + self.pBias)\n', (6750, 6792), True, 'import numpy as np\n'), ((6851, 6915), 'numpy.sqrt', 'np.sqrt', (['((1 - p) / (1 + p) * ((1 + p) ** (2 * N) - (1 + p) ** N))'], {}), '((1 - p) / (1 + p) * ((1 + p) ** (2 * N) - (1 + p) ** N))\n', (6858, 6915), True, 'import numpy as np\n'), ((959, 970), 'time.time', 'time.time', ([], {}), '()\n', (968, 970), False, 'import time\n'), ((4881, 4900), 'copy.deepcopy', 'copy.deepcopy', (['dnas'], {}), '(dnas)\n', (4894, 4900), False, 'import copy\n'), ((7273, 7292), 'copy.deepcopy', 'copy.deepcopy', (['dnas'], {}), '(dnas)\n', (7286, 7292), False, 'import copy\n'), ((7988, 8054), 'numpy.random.choice', 'np.random.choice', (["['A', 'C', 'G', 'T']"], {'size': 'Pi.size', 'p': 'self.TM[i]'}), "(['A', 'C', 'G', 'T'], size=Pi.size, p=self.TM[i])\n", (8004, 8054), True, 'import numpy as np\n'), ((9052, 9071), 'copy.deepcopy', 'copy.deepcopy', (['dnas'], {}), '(dnas)\n', (9065, 9071), False, 'import copy\n'), ((3352, 3370), 'math.log', 'log', (['amplify_ratio'], {}), '(amplify_ratio)\n', (3355, 3370), False, 'from math import sqrt, log\n'), ((3373, 3391), 'math.log', 'log', (['(self.pcrp + 1)'], {}), '(self.pcrp + 1)\n', (3376, 3391), False, 'from math import sqrt, log\n'), ((7946, 7967), 'numpy.where', 'np.where', (['(dna == base)'], {}), '(dna == base)\n', (7954, 7967), True, 'import numpy as np\n'), ((8075, 8097), 'numpy.where', 'np.where', (['(subi != base)'], {}), '(subi != base)\n', (8083, 8097), True, 'import numpy as np\n'), ((8467, 8505), 'numpy.random.choice', 'np.random.choice', (["['A', 'T', 'C', 'G']"], {}), "(['A', 'T', 'C', 'G'])\n", (8483, 8505), True, 'import numpy as np\n'), ((6961, 6970), 'math.sqrt', 'sqrt', (['ori'], {}), '(ori)\n', (6965, 6970), False, 'from math import sqrt, log\n'), ((1286, 1297), 'time.time', 'time.time', ([], {}), '()\n', (1295, 1297), False, 'import time\n'), ((1137, 1148), 'time.time', 'time.time', ([], {}), '()\n', (1146, 1148), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.