filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7286 | from opytimizer.optimizers.science import SA
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'T': 100,
'beta': 0.999
}
# Creates a SA optimizer
o = SA(params=params)
|
the-stack_0_7288 | #!/usr/bin/env python3
import unittest
import os
from getJenkinsVersion import get_latest_version, get_jenkins_version
USERNAME = os.environ.get('MAVEN_REPOSITORY_USERNAME', '')
PASSWORD = os.environ.get('MAVEN_REPOSITORY_PASSWORD', '')
# Test that GetJenkinsVersion returns the correct value
class TestGetJenkinsVersion(unittest.TestCase):
'''Unit Test getJenkinversion.py scripts'''
data_set = {
'all_versions': [
"1", "1.10", "1.11", "1.10.1", "1.10.2", "1.11.0", "1.11.2",
"1.999",
"2", "2.10", "2.11", "2.10.1", "2.10.2", "2.11.0", "2.11.2",
"2.99", "2.249", "2.249.1", "2.265", "2.265.3"
],
'url': "https://repo.jenkins-ci.org/releases/org/jenkins-ci/main/jenkins-war/maven-metadata.xml",
'versions': [
{
'name': 'latest',
'expected': '2.265'
},
{
'name': '1',
'expected': '1.658'
},
{
'name': '2',
'expected': '2.265'
},
{
'name': '2.249',
'expected': '2.249.3'
},
{
'name': '2.249.3',
'expected': '2.249.3'
}],
}
def test_latest_version(self):
'''Test that we correclty get Jenkins version value'''
result = get_latest_version(self.data_set["all_versions"])
self.assertEqual("2.265.3", result)
def test_result(self):
'''Test that we correclty get Jenkins version value'''
for version in self.data_set["versions"]:
result = get_jenkins_version(self.data_set["url"],
version["name"],
USERNAME,
PASSWORD)
self.assertEqual(version["expected"], result)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_7290 | """Script to run stacking scripts on the DESY cluster.
Through use of argparse, a given configuration for the code can be selected.
This can be given from the command line, in the form:
python RunCluster.py -c Desired_Configuration_Name -n Number_Of_Tasks -s
Each available configuration must be listed in "config.ini", and controls
options for fitting, such as which catalogue is to be used, and which seasons
of data should be included. If -x is included, then a new job is submitted
to the cluster. Having submitted the job to the cluster it will be run in
parallel Number_of_Tasks times. The shell script SubmitOne.sh is called for
each task, which in turn calls RunLocal.py with the given configuration setting.
The Wait function will periodically query the cluster
to check on the status of the job, and will output the job status occasionally.
Once all sub-tasks are completed, the script will proceed to call
MergeFiles.run() for the given configuration, combining results.
"""
import subprocess
import time
import os
import os.path
import logging
import argparse
import numpy as np
from flarestack.shared import log_dir, fs_dir
from flarestack.cluster.submitter import Submitter
from flarestack.cluster.make_desy_cluster_script import (
make_desy_submit_file,
submit_file,
)
logger = logging.getLogger(__name__)
username = os.path.basename(os.environ["HOME"])
cmd = "qstat -u " + username
def wait_for_cluster(job_ids=None):
logger.warning(
"The wait_for_cluster function is deprecated! "
"Use the Submitter class instead."
)
Submitter.wait_for_cluster(job_ids)
# if not job_ids:
# wait_for_job()
# else:
# try:
# for i, job_id in enumerate(job_ids):
#
# logger.debug(f'waiting for job {job_id}')
# prog_str = f'{i}/{len(job_ids)}'
# wait_for_job(job_id, prog_str)
#
# except TypeError:
# logger.debug('Only waiting for one job')
# wait_for_job(job_ids)
def wait_for_job(job_id=None, progress_str=None):
"""
Runs the command cmd, which queries the status of the job on the
cluster, and reads the output. While the output is not an empty
string (indicating job completion), the cluster is re-queried
every 30 seconds. Occasionally outputs the number of remaining sub-tasks
on cluster, and outputs full table result every ~ 8 minutes. On
completion of job, terminates function process and allows the script to
continue.
"""
if not job_id:
job_id_str = "s"
else:
if progress_str:
job_id_str = f" {progress_str} {job_id}"
else:
job_id_str = " " + str(job_id)
time.sleep(10)
cmd = f"qstat -u {username}"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
tmp = process.stdout.read().decode()
n_total = n_tasks(tmp, job_id)
i = 31
j = 6
while n_total != 0:
if i > 3:
running_process = subprocess.Popen(
cmd + " -s r", stdout=subprocess.PIPE, shell=True
)
running_tmp = running_process.stdout.read().decode()
if running_tmp != "":
n_running = n_tasks(running_tmp, job_id)
else:
n_running = 0
logger.info(
f"{time.asctime(time.localtime())} - Job{job_id_str}:"
f" {n_total} entries in queue. "
f"Of these, {n_running} are running tasks, and "
f"{n_total-n_running} are tasks still waiting to be executed."
)
i = 0
j += 1
if j > 7:
logger.info(str(tmp))
j = 0
time.sleep(30)
i += 1
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
tmp = process.stdout.read().decode()
n_total = n_tasks(tmp, job_id)
def submit_to_cluster(path, n_cpu=2, n_jobs=10, ram_per_core=None, **kwargs):
for file in os.listdir(log_dir):
os.remove(log_dir + file)
# Submits job to the cluster
submit_cmd = "qsub "
if n_cpu > 1:
submit_cmd += " -pe multicore {0} -R y ".format(n_cpu)
ram_per_core = (
"{0:.1f}G".format(6.0 / float(n_cpu) + 2.0)
if not ram_per_core
else ram_per_core
)
print("Ram per core:", ram_per_core)
submit_cmd += "-t 1-{0}:1 {1} {2} {3}".format(n_jobs, submit_file, path, n_cpu)
make_desy_submit_file(ram_per_core, **kwargs)
print(time.asctime(time.localtime()), submit_cmd, "\n")
process = subprocess.Popen(submit_cmd, stdout=subprocess.PIPE, shell=True)
msg = process.stdout.read().decode()
print(msg)
job_id = int(str(msg).split("job-array")[1].split(".")[0])
return job_id
def n_tasks(tmp, job_id):
"""
Returns the number of tasks given the output of qsub
:param tmp: output of qsub
:param job_id: int, optional, if given only tasks belonging to this job will we counted
:return: int
"""
st = str(tmp)
ids = np.array([int(s.split(" ")[2]) for s in st.split("\n")[2:-1]])
if job_id:
return len(ids[ids == job_id])
else:
return len(ids)
if not os.path.isfile(submit_file):
make_desy_submit_file()
|
the-stack_0_7294 | """
File: sillystream/examples/daemon.py
Author: John Andersen
Description: A process that forks off and redirects stdout to sillystream server
To run:
python examples/daemon.py
python sillystream/__main__.py client
"""
import os
import sys
import time
import sillystream
# Send stdout to sillystream
STREAM = True
# Seconds to stay alive for
STAY_ALIVE = 20
def make_daemon():
"""
Daemonize to run in background
"""
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
pid = os.fork()
if pid > 0:
# exit second parent
sys.exit(0)
if STREAM:
# Create sillystream server
output = sillystream.server()
# Start the server
output.start_thread()
else:
output = open("/dev/null", 'wb')
sys.stdout = output
sys.stderr = output
def main():
"""
Daemonizes and prints numbers
"""
make_daemon()
i = 0
while i < STAY_ALIVE:
print("test {}".format(i))
i += 1
time.sleep(1)
if __name__ == '__main__':
main()
|
the-stack_0_7295 | import ctypes
import numpy as np
import os
import subprocess
import tempfile
import tvm
from tvm import relay, get_global_func, target, register_func
from tvm.relay.expr import Expr, Function, Let, GlobalVar
from tvm.relay.adt import Constructor
from tvm.relay.expr_functor import ExprFunctor, ExprVisitor
from tvm.relay.backend import compile_engine
from .little_cpp import PackedCall, CPPFunction, Invoke, Decl, CPPIf, CPPTuple, CPPMatch, CPPConstructor, CPPTupleGetItem
from .little_cpp import CPPRefCreate, CPPRefRead, CPPRefWrite
from . import to_source
from .convert import convert
TVM_PATH = os.environ['TVM_HOME']
def must_run_process(args):
proc = subprocess.run(args)
assert proc.returncode == 0
def compile_cpp(source, lib_name, flags=None, lib_path=None):
if flags is None:
flags = []
if lib_path is None:
lib_path = os.curdir
debug_source_path = os.path.join(lib_path, 'source.cc')
# Write out the file for debugging.
with open(debug_source_path, 'w') as source_file:
source_file.write(source)
# with tempfile.TmporaryDirectory() as tmpdir:
tmpdir = tempfile.mkdtemp(prefix="relay_aot_compiler")
lib_path = os.path.join(tmpdir, lib_name)
source_path = os.path.join(tmpdir, 'source.cc')
with open(source_path, 'w') as source_file:
source_file.write(source)
must_run_process(["clang-format", "-i", debug_source_path])
system = os.uname()[0]
if system == 'Darwin':
command = [
"clang",
"-std=c++14",
"-shared",
"-undefined",
"dynamic_lookup",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
else:
command = [
"clang",
"-std=c++14",
"-shared",
"-fPIC",
"-o",
lib_path,
source_path,
f"-I{TVM_PATH}/3rdparty/dmlc-core/include",
f"-I{TVM_PATH}/3rdparty/dlpack/include",
f"-I{TVM_PATH}/3rdparty/HalideIR/src",
f"-I{TVM_PATH}/include",
f"-L{TVM_PATH}/build",
"-ltvm"
] + flags
must_run_process(command)
return lib_path
def load_lib(name):
return ctypes.CDLL(name, ctypes.RTLD_GLOBAL)
def is_primitive(e: relay.Expr):
return isinstance(e, relay.Function) and e.attrs and e.attrs.Primitive.value == 1
class AoTCompiler(ExprFunctor):
def __init__(self, mod, tgt) -> None:
super().__init__()
self.mod = mod
self.tgt = tgt
self.engine = compile_engine.get()
self.bindings = [[]]
self.gv_map = {}
def add_binding(self, var, value):
self.bindings[-1].append((var, value))
def optimize(self, expr: Function) -> Function:
opts = relay.transform.Sequential([relay.transform.FuseOps(),
relay.transform.ToANormalForm()])
self.mod['main'] = expr
self.mod = opts(self.mod)
ret = self.mod['main']
return ret
def mk_primitive_op(self, func: Expr, args, output_type) -> Expr:
cc_key = compile_engine.CCacheKey(func, self.tgt)
hash = relay.analysis.structural_hash(func)
name = f"op_{hash}"
if not get_global_func(name, allow_missing=True):
jit_func = self.engine.jit(cc_key, self.tgt)
register_func(name, jit_func)
return PackedCall(name, args, [x.checked_type for x in args], output_type)
def visit_call(self, call: Expr) -> Expr:
if is_primitive(call.op):
return self.mk_primitive_op(call.op, call.args, call.checked_type)
elif isinstance(call.op, Constructor):
return CPPConstructor(call.op.tag, [self.visit(arg) for arg in call.args])
else:
assert(call.attrs == None)
args = [self.visit(arg) for arg in call.args]
fn = self.visit(call.op)
return Invoke(fn, args)
def visit_let(self, let: Expr) -> Expr:
self.bindings.append([])
while isinstance(let, Let):
cpp_value = self.visit(let.value)
self.add_binding(let.var, cpp_value)
let = let.body
bindings = self.bindings.pop()
body = self.visit(let)
return Decl(bindings, body)
def visit_var(self, var):
return var
def visit_global_var(self, gv):
if gv not in self.gv_map:
self.gv_map[gv] = "to be updated"
self.gv_map[gv] = self.visit(self.mod[gv])
return gv
def visit_function(self, func):
if is_primitive(func):
body = self.mk_primitive_op(func, func.params, func.ret_type)
return CPPFunction(func.params, body, func.checked_type.ret_type)
else:
return CPPFunction(func.params, self.visit(func.body), func.checked_type.ret_type)
def visit_constant(self, const):
return const
def visit_if(self, i):
return CPPIf(self.visit(i.cond),
self.visit(i.true_branch),
self.visit(i.false_branch),
i.checked_type)
def visit_tuple(self, t):
return CPPTuple([self.visit(f) for f in t.fields], t.checked_type)
def visit_match(self, m):
return CPPMatch(self.visit(m.data),
[(c.lhs, self.visit(c.rhs)) for c in m.clauses],
m.checked_type)
def visit_op(self, op):
raise Exception(f'op outside of primitive: {op}')
def visit_tuple_getitem(self, t):
return CPPTupleGetItem(self.visit(t.tuple_value), t.index, t.checked_type)
def visit_ref_create(self, r):
return CPPRefCreate(self.visit(r.value), r.checked_type)
def visit_ref_read(self, r):
return CPPRefRead(self.visit(r.ref), r.checked_type)
def visit_ref_write(self, r):
return CPPRefWrite(self.visit(r.ref), self.visit(r.value))
_LIB_COUNTER = 1
_LIB = []
def lib_and_func_name(name):
global _LIB_COUNTER
packed_name = f'relay.aot.{name}.{_LIB_COUNTER}'
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
_LIB_COUNTER += 1
return lib_name, packed_name
import time
def _mk_wrapper(fn, ctx, constants, record_time):
def _wrapper(*args):
new_constants = [convert(a, ctx) for a in constants]
new_args = [convert(a, ctx) for a in args]
begin = time.perf_counter()
res = fn(*new_constants, *new_args)
end = time.perf_counter()
return res if not record_time else (res, end - begin)
return _wrapper
import sys
sys.setrecursionlimit(10000)
def compile(func, mod, ctx, tgt, name='default', record_time=False):
"""Compile a relay function into a native library function.
Parameters
----------
func: Expr
The function.
mod: Module
The Module.
ctx: Context
The Context.
tgt: Target
The target
name: String
The name of the target binary library.
record_time: Bool
Time cost to call f?
Returns
-------
result: Function
A function that, when pass in some values,
will convert them to the right format and call the compiled func.
"""
global _LIB
if isinstance(func, GlobalVar):
func = mod[func]
assert isinstance(func, Function)
compiler = AoTCompiler(mod, tgt)
func = compiler.optimize(func)
func = compiler.visit(func)
lib_name, packed_name = lib_and_func_name(name)
constants, source_code = to_source.to_source(mod, func, compiler.gv_map, ctx, packed_name)
lib_name = f"librelay_aot_{_LIB_COUNTER}.so"
library_path = compile_cpp(source_code, lib_name, flags=["-O3"])
_LIB.append(load_lib(library_path))
fn = get_global_func(packed_name)
return _mk_wrapper(fn, ctx, constants, record_time)
|
the-stack_0_7297 | import pandas as pd
from pyecharts.components import Table
from pyecharts.options import ComponentTitleOpts
__all__ = ['statistics']
def statistics(model, jupyter=True, path='Model Summary.html', title="Model Summary", subtitle=""):
t = pd.DataFrame([[i.name, i.__class__.__name__, i.trainable, i.dtype, i.input_shape, i.output_shape, i.count_params()] for i in model.layers],
columns=['layer_custom_name', 'layer_object_name', 'trainable', 'dtype', 'input_shape', 'output_shape', 'params'])
# t['output_memory(MB)'] = (t.output_shape.map(lambda x:sum([reduce(lambda y,z:y*z, i[1:]) for i in x]) if isinstance(x, list) else reduce(lambda y,z:y*z, x[1:]))
# *t.dtype.map(lambda x:int(re.sub("\D", "", x))))/32#/1024/1024)
t.loc['total'] = ['', '', '', '', '', '', t.params.sum()]
t['input_shape'] = t.input_shape.map(lambda x:str(x).replace("),(", "),\n(") if isinstance(x, list) else x)
t = t.reset_index().rename(columns={'index':''})
for i in t.columns:
t[i] = t[i].astype(str)
table = Table()
headers = t.columns.tolist()
rows = t.values.tolist()
table.add(headers, rows).set_global_opts(title_opts=ComponentTitleOpts(title=title, subtitle=subtitle))
return table.render_notebook() if jupyter else table.render(path)
|
the-stack_0_7298 | # This scripts demonstrates how to use mitmproxy's filter pattern in inline scripts.
# Usage: mitmdump -s "filt.py FILTER"
import sys
from mitmproxy import filt
def start(context):
if len(sys.argv) != 2:
raise ValueError("Usage: -s 'filt.py FILTER'")
context.filter = filt.parse(sys.argv[1])
def response(context, flow):
if flow.match(context.filter):
print("Flow matches filter:")
print(flow)
|
the-stack_0_7301 | # encoding: utf-8
# cython: profile=False
# cython: embedsignature=True
"""
Implementation of DirichletDistribution.
The Dirichlet distribution makes use of path counts from a DFA. Consider
two representations of the DFA for the golden mean process.
0 1
0 0 1
1 0 -1
and
0 1
0 0 1
1 0 2
2 2 2
In the first, we have an incomplete DFA, whereas the second DFA is complete.
The first represents forbidden transitions with a special node, -1. There is no
explicit treatment of this node, and so, when counting paths, one must deal
with the reality that paths can terminate. For the second DFA, node 2 is an
explicit node that receives all forbidden transitions. Self loops keep all
subsequent transitions at node 2. In this case, paths, even forbidden paths,
do not terminate prematurely. It should not be surprising that supporting
incomplete DFAs makes the code *more* complex. For example, the expression
for the likelihood that we use during inference is valid only if the path has
a nonzero probability, and so, the implementation must first check that the
path is valid.
Further complicating matters is that this implementation will infer transition
probabilities for every node that has more than one outgoing edge. Without
a mechanism for declaring which edges should be inferred and which should be
fixed, this means that doing inference on a complete DFA will yield undesirable
results---as forbidden edges will be inferred to have some nonzero probability.
What this means for the current implementation:
If one is attempting to do inference on an HMM that does not have full
support from each state, then one should pass in an incomplete DFA of its
support, rather than a complete DFA. The expression for the likelihood
(and thus, evidence) still only holds for words with nonzero probability.
Edges to implicit, forbidden nodes will have probability 0, which is fixed
and not inferred by the algorithm.
A goal for the future is to extend it so that one can declare which edges are
fixed and which should be inferred, and how the parameters might be related
to one another.
"""
from __future__ import absolute_import
from __future__ import division
import cython
#cimport cython
import numpy as np
#cimport numpy as np
from copy import deepcopy
from .counts import path_counts
from .exceptions import InvalidInitialNode
from itertools import product
BTYPE = np.bool
#ctypedef bint BTYPE_t
ITYPE = np.int64
#ctypedef np.int64_t ITYPE_t
__all__ = ['DirichletDistribution', 'Infer']
import dit
class DirichletDistribution(object):
"""
A barebones representation of a product of Dirichlet distributions.
"""
### Public
nodes = None
symbols = None
final_node = None
valid_initial_nodes = None
node_paths = None
nNodes = None
nSymbols = None
nInitial = None
nEdges = None
prng = None
### Private
tmatrix = None
edges = None
edge_alphas = None
edge_counts = None
node_alphas = None
node_counts = None
_temp = None
def __init__(self, tmatrix, data=None, node_path=False, prng=None, out_arrays=None):
#np.ndarray[ITYPE_t, ndim=2, mode="c"] tmatrix,
#np.ndarray[ITYPE_t, ndim=1, mode="c"] data,
#BTYPE_t node_path=False,
#out_arrays=None):
# In the follow, we use the following variables:
# n : number of nodes
# k : number of symbols
# L : length of data
# nInitial : number of valid initial nodes
# nEdges : number of edges
if prng is None:
prng = np.random.RandomState()
self.prng = prng
if data is None:
data = np.array((), dtype=int)
# shape: (n, k)
# axes: (node, symbol)
# Each element is the next node.
self.tmatrix = tmatrix
# shape: (nEdges, 2)
# axes: (edge index, (node, symbol))
self.edges = np.dstack(np.nonzero(tmatrix != -1))[0]
# shape : (n,)
# axes: (node,)
# Not strictly necessary since the nodes are integers from zero.
self.nodes = np.arange(tmatrix.shape[0])
# shape: (k,)
# axes: (symbol,)
self.symbols = np.arange(tmatrix.shape[1])
counts, final, node_paths = path_counts(tmatrix, data,
node_path, out_arrays)
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# Use float to support average counts.
self.edge_counts = counts.astype(float)
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# Start with uniform prior.
self.edge_alphas = np.zeros(counts.shape, dtype=float) + 1
self._update_node_alphacounts()
# shape: (n,)
# axes: (initial node,)
# Each element is the final node.
self.final_node = final
# shape: (nInitial,)
# axes: (initial node,)
# Each element is a valid initial node.
self.valid_initial_nodes = np.array(np.nonzero(final != -1)[0])
# Eventually, we will need to determine which nodes have edges that
# are to be inferred. Presently, this is every node since we cannot
# have fixed edges with this algorithm. This will affect self.temp.
# shape: (nNodes, L+1)
# axes: (initialNode, time)
self.node_paths = node_paths
# The first row is for numerator terms
# The second row is for denominator terms
shape = (2, len(self.edges) + len(self.nodes))
self._temp = np.empty(shape, dtype=float)
self.nNodes = tmatrix.shape[0]
self.nSymbols = tmatrix.shape[1]
self.nInitial = len(self.valid_initial_nodes)
self.nEdges = self.edges.shape[0]
def _update_node_alphacounts(self, alpha=True, counts=True):
"""
Recalculates `node_alpha` and `node_counts`.
This must be called any time `edge_alpha` or `edge_counts` is updated.
They are used to calculate the evidence.
Practically, the node counts are the number of times each node was
visited by some symbol. Effectively:
node_count(initial_node, node)
= \sum_{symbol} edge_count(initialNode, node, symbol)
"""
# axes: (initialNode, node)
# Each element is the count/alpha value.
# Recall edge_counts and edge_alphas have:
# shape: (n, n, k)
# axes: (initialNode, node, symbol)
# For the counts, if an edge was not traversed, then its count is
# zero and will not affect the sum along axis=2. When we consider
# the alphas, we must make sure that the alphas corresponding to
# nonedges (assuming incomplete DFAs) do not effect the node alpha,
# that is, the sum along axis=2. So we exclude nonedges from the sum.
# This means the minimum node alpha for every (initial node, node) pair
# is 1, even for nodes which have no edges that need to be inferred.
# However, this is not a problem since algorithms, like the evidence,
# will not query for those alpha values (since they use self.edges).
#
# The reason it is done this way is to simplify the data structure.
# Technically, you only need priors for edges that are to be inferred.
# As of now, the implementation is that these arrays will have fixed
# size, no matter how many edges need to be inferred. An alternative
# way to do this is to make axis=1 sparse and with size equal to the
# number of edges to be inferred. We would then need to use a lookup to
# match indexes along axis=1 to the edges.
if alpha:
condition = self.tmatrix != -1
self.node_alphas = np.where(condition, self.edge_alphas, 0).sum(axis=2)
if counts:
self.node_counts = self.edge_counts.sum(axis=2)
def add_counts_from(self, data):
"""
Adds additional counts from `data`.
"""
# For each symbol, add the count and update the final node.
for symbol in data:
for initial_node in self.valid_initial_nodes:
final_node = self.final_node[initial_node]
self.final_node[initial_node] = self.tmatrix[final_node, symbol]
self.edge_counts[initial_node, final_node, symbol] += 1
self.valid_initial_nodes = np.array(np.nonzero(self.final_node != -1)[0])
self._update_node_alphacounts()
def log_evidence(self, initial_node):
"""
Returns the log evidence of the data using `node` as the initial node.
Parameters
----------
initial_node : int
An initial node.
Returns
-------
log_evid : float
The base-2 log evidence of the data given the initial node. When
its value is -inf, then it is not possible to generate the given
data from the initial node. When its value is 0, then the given
data is the only possible data that could be generated from the
initial node.
"""
if self.final_node[initial_node] == -1:
# Then the data cannot be generated by this node.
#
# The form we use for the likelihood is valid only if the
# probability of the data is nonzero. The reason is that it
# requires edge counts for every edge, and we only obtain counts on
# allowed edges. We could, alternatively, work with complete DFAs,
# and then we *would* have counts for transitions following a
# forbidden transition. In this case, the transition matrix would
# have zero entries equal to -1 and one of the states would be the
# garbage state. But this doesn't work for other reasons. See the
# module docstring.
log_evid = -np.inf
else:
from scipy.special import gammaln
# shape: (2, nEdges + nNodes)
temp = self._temp
# It is no problem to iterate through nodes which only have
# one edge, since the edge and node counts/alphas will cancel out.
# Once we allow nodes with fixed probs, we will need to iterate
# only through inferred edges and nodes with inferred edges.
# Now iterate through every edge (u, x)
edges = self.edges
nEdges = self.nEdges
ealphas = self.edge_alphas
ecounts = self.edge_counts
for i in range(nEdges):
u = edges[i, 0]
x = edges[i, 1]
temp[0, i] = ealphas[initial_node, u, x] + \
ecounts[initial_node, u, x]
temp[1, i] = ealphas[initial_node, u, x]
# Similarly, iterate through every node (u, *)
nalphas = self.node_alphas
ncounts = self.node_counts
for i in range(self.nNodes):
temp[0, i + nEdges] = nalphas[initial_node, i]
temp[1, i + nEdges] = nalphas[initial_node, i] + \
ncounts[initial_node, i]
gammaln(temp, temp)
temp[1] *= -1
log_evid = temp.sum()
# Return base-2 logarithms.
return log_evid / np.log(2)
def log_evidence_array(self):
"""
Returns an array of the log evidence of each node.
"""
nNodes = self.nNodes
log_evid = np.empty(nNodes)
for i in range(nNodes):
log_evid[i] = self.log_evidence(i)
return log_evid
def sample_uhmm(self, initial_node, size=None, prng=None):
"""
Returns a uHMM sampled from the posterior.
Parameters
----------
initial_node : int
The initial node.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
if prng is None:
prng = self.prng
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
post = self.edge_alphas[initial_node] + self.edge_counts[initial_node]
condition = self.tmatrix != -1
if size is None:
shape = (1,) + self.tmatrix.shape
else:
shape = (size,) + self.tmatrix.shape
trans = np.zeros(shape, dtype=float)
for n in range(shape[0]):
for i in range(shape[1]):
cond = condition[i]
trans[n, i, cond] = prng.dirichlet(post[i, cond])
if size is None:
trans = trans[0]
return trans
def pm_uhmm(self, initial_node):
"""
Returns the posterior mean uHMM for the specified the initial node.
Parameters
----------
initial_node : int
The initial node.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
# This is a vectorized version of pm_edge_probability().
# An edge is a node and symbol: s, x
# alpha(s, x|s_i) + counts(s, x|s_i)
trans = self.edge_alphas[initial_node] + \
self.edge_counts[initial_node]
# Now, we divide each row of trans by its normalization constant:
#
# \sum_x (alpha(s, x | s_i) + counts(s, x | s_i))
#
# The node_* arrays have row/cols (initial_node, node). So we need
# to associate their columns to the rows of trans. This is achieved
# by dividing trans by a column vector. Before the [:, np.newaxis],
# we have arrays of shape (n,). Afterwards, we have shape (n,1)
trans /= (self.node_alphas[initial_node] + \
self.node_counts[initial_node])[:, np.newaxis]
# It is necessary to explicitly mark forbidden transitions as having
# zero probability since the alphas are nonzero for all transitions.
condition = self.tmatrix == -1
trans[condition] = 0
return trans
def pm_uhmm_array(self):
"""
Returns an array of the posterior mean uHMMs.
"""
uhmms = np.zeros((self.nInitial, self.nNodes, self.nSymbols))
for i, initial_node in enumerate(self.valid_initial_nodes):
uhmms[i] = self.pm_uhmm(initial_node)
return uhmms
def _ntm(self, trans):
n = trans.shape[0]
ntm = np.zeros((n,n), dtype=float)
edges = self.edges
tmatrix = self.tmatrix
for i in range(len(edges)):
u = edges[i, 0]
x = edges[i, 1]
v = tmatrix[u, x]
ntm[u, v] += trans[u, x]
return ntm
def get_updated_prior(self):
"""
Returns a new DirichletDistribution that incorporates observed counts.
"""
new = deepcopy(self)
# Transfer edge counts to edge alphas.
new.edge_alphas += new.edge_counts
new.edge_counts *= 0
new._update_node_alphacounts()
# To sample from the posterior, P( \theta | D, \sigma) we must keep the
# same valid_initial_nodes. Note that the edge counts are zero in the
# updated posterior. This suggests that the final_nodes should be
# equal to the valid_initial_nodes since there is no data (e.g. no
# edge counts). But doing this will not allow us to properly add new
# since we *must* know the final state from all data seen (even if
# the counts in the updated prior are now zero).
return new
def predictive_probability(self, x, initial_node):
"""
Returns the mean predictive probability of `x` given `initial_node`.
That is, we calculate::
\Pr(x | D, \sigma) = \int \Pr( x | D, \theta, \sigma)
\Pr( \theta | D, \sigma) d \theta
This is a calculation from the posterior predictive distribution.
Parameters
----------
x : iterable
The new data used to calculate the predictive probability.
initial_node : int
The initial node.
Returns
-------
p : float
The base-e logarithm of the mean predictive probability of `x`.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
"""
new = self.get_updated_prior()
new.add_counts_from(x)
return new.log_evidence(initial_node)
class DirichletDistributionCP(DirichletDistribution):
"""
A Dirichlet distribution for Cartesian product inference.
Importantly, the node/edge alpha and counts are not directly used to
determine the posterior without first transforming them into the
constituent parts of the Cartesian product.
"""
### Public
nodes = None
symbols = None
final_node = None
valid_initial_nodes = None
node_paths = None
nMatrices = None
nNodes = None
nSymbols = None
nInitial = None
nEdges = None
prng = None
### Private
tmatrices = None
tmatrix = None
edges = None
_temp = None
def __init__(self, tmatrices, data=None, node_path=False, prng=None, out_arrays=None):
tmatrix = self._build_tmatrix(tmatrices, data)
base = super(DirichletDistributionCP, self)
base.__init__(tmatrix, data, node_path, prng, out_arrays)
def _build_tmatrix(self, tmatrices, data):
# list of length m
# elements are arrays of shape: (n_i, k_i)
# axes: (node, symbol) for the ith tmatrix.
# Each element is the next node.
self.tmatrices = tmatrices
self.nMatrices = len(tmatrices)
self.nNodes_array = np.array([tmatrix.shape[0] for tmatrix in tmatrices])
nNodes = np.prod(self.nNodes_array)
self.nodes = np.arange(nNodes)
self.node_tuples = list(product(*[range(n) for n in self.nNodes_array]))
self.node_tuples_index = dict(zip(self.node_tuples, self.nodes))
self.nSymbols_array = np.array([tmatrix.shape[1] for tmatrix in tmatrices])
nSymbols = np.prod(self.nSymbols_array)
self.symbols = np.arange(nSymbols)
self.symbol_tuples = list(product(*[range(n) for n in self.nSymbols_array]))
self.symbol_tuples_index = dict(zip(self.symbol_tuples, self.symbols))
shape = np.array([m.shape for m in self.tmatrices]).prod(axis=0)
tmatrix = np.zeros(shape, dtype=int) - 1
# Quick hack for now...generate the data for each tmatrix.
# This requires a scan of the data for each tmatrix. Slow.
# In principle, we can generate the counts/alphas with one scan,
# and then propagate these values through summations to the counts
# and alphas for each individual tmatrix.
self.dd = []
symbols = self.symbol_tuples
for i,m in enumerate(tmatrices):
if data is not None:
data_ = np.array([symbols[sym][i] for sym in data])
else:
data_ = None
self.dd.append( DirichletDistribution(m, data_) )
for edges in product(*[dd.edges for dd in self.dd]):
v = tuple(self.tmatrices[i][u, x] for i, (u, x) in enumerate(edges))
u, x = zip(*edges)
uu = self.node_tuples_index[u]
vv = self.node_tuples_index[v]
xx = self.symbol_tuples_index[x]
tmatrix[uu, xx] = vv
return tmatrix
def log_evidence(self, initial_node):
"""
Returns the log evidence of the data using `node` as the initial node.
Parameters
----------
initial_node : int
An initial node.
Returns
-------
log_evid : float
The base-2 log evidence of the data given the initial node. When
its value is -inf, then it is not possible to generate the given
data from the initial node. When its value is 0, then the given
data is the only possible data that could be generated from the
initial node.
"""
base = 2
ops = dit.math.get_ops(base)
node = self.node_tuples[initial_node]
log_evids = np.array([self.dd[i].log_evidence(node[i])
for i in range(self.nMatrices)])
log_evid = ops.mult_reduce(log_evids)
return log_evid
def sample_uhmm(self, initial_node, size=None, prng=None):
"""
Returns a uHMM sampled from the posterior.
Parameters
----------
initial_node : int
The initial node.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final_node[initial_node].
"""
if prng is None:
prng = self.prng
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
inodes = self.node_tuples[initial_node]
uhmms = [self.dd[i].sample_uhmm(inodes[i], prng=prng)
for i in range(self.nMatrices)]
trans = uhmms[0]
for uhmm in uhmms[1:]:
trans = np.kron(trans, uhmm)
return trans
def pm_uhmm(self, initial_node):
"""
Returns the posterior mean uHMM for the specified the initial node.
Parameters
----------
initial_node : int
The initial node.
Returns
-------
trans : NumPy array
The transition probabilities of the uHMM.
Raises
------
Exception
If `initial_node` is not a valid initial node.
Notes
-----
The final node can be obtained from self.final[initial_node].
"""
final_node = self.final_node[initial_node]
if final_node == -1:
raise InvalidInitialNode(initial_node)
inodes = self.node_tuples[initial_node]
pm_uhmms = [self.dd[i].pm_uhmm(inodes[i])
for i in range(self.nMatrices)]
trans = pm_uhmms[0]
for pm_uhmm in pm_uhmms[1:]:
trans = np.kron(trans, pm_uhmm)
return trans
class Infer(object):
"""
New methods are those which require a distribution over start nodes.
"""
prng = None
posterior = None
inode_prior = None
inode_posterior = None
# The final node distribution is a deterministic function of the initial
# node posterior distribution. It is a "posterior". For the prior, the
# fnode_prior would be equal to inode_prior, and so, we do not include it
# here.
fnode_dist = None
_nodedist_class = dit.ScalarDistribution
_symboldist_class = dit.ScalarDistribution
_posterior_class = DirichletDistribution
def __init__(self, tmatrix, data=None, inode_prior=None, node_path=False, prng=None, out_arrays=None, options=None):
"""
inode_prior is the initial node prior distribution.
"""
# Allow the user to customize the classes used internally.
if options is not None:
attrs = ['nodedist_class', 'symboldist_class', 'posterior_class']
for attr in attrs:
_attr = '_' + attr
setattr(self, _attr, options.get(attr, getattr(self, _attr)))
if prng is None:
prng = np.random.RandomState()
self.prng = prng
self.posterior = self._posterior_class(
tmatrix, data, node_path, self.prng, out_arrays
)
self._inode_init(inode_prior)
self._fnode_init()
def _inode_init(self, inode_prior):
#
# Set up initial node prior distribution.
#
if inode_prior is None:
outcomes = self.posterior.nodes
n = self.posterior.nNodes
pmf = [1 / n] * n
inode_prior = self._nodedist_class(outcomes, pmf)
else:
# Assumes:
# 1) the distribution is normalized
# 2) sample space is range(n)
if inode_prior.is_log():
inode_prior.set_base('linear')
# If the initial node dist does not assign positive probability to
# any of the valid initial nodes, then the evidence (averaged over
# the prior of nodes) will be 0, and the posterior over nodes is
# not defined. So we make sure that some probability is assigned
# to at least one valid initial node.
zero = inode_prior.ops.zero
for node in self.posterior.valid_initial_nodes:
if inode_prior[node] > zero:
break
else:
msg = "`inode_prior` does not assign probability to a valid node."
raise Exception(msg)
# There is no reason to make it sparse, except to match the posterior.
inode_prior.make_sparse()
self.inode_prior = inode_prior
#
# Calculate initial node posterior distribution. For state s and data x,
#
# p(s|x) = p(x|s) p(s) / p(x)
#
# where p(x) = \sum_s p(x|s) p(s)
#
base = 2
ops = dit.math.get_ops(base)
p_xgs = self.posterior.log_evidence_array()
# Need to use dense version of the prior's pmf
p_s = dit.copypmf(inode_prior, base=base, mode='dense')
p_sgx = ops.mult(p_xgs, p_s)
p_x = ops.add_reduce(p_sgx)
ops.mult_inplace(p_sgx, ops.invert(p_x))
# No need to sort since the prior was already sorted.
nodes = self.posterior.nodes
d = self._nodedist_class(nodes, p_sgx, base=base, sort=False)
d.set_base('linear')
d.make_sparse()
self.inode_posterior = d
def _fnode_init(self):
# This averages over initial nodes. Recall, due to unifilarity, for any
# given initial node, there is exact one final node.
#
# p(s_f | x) = \sum_{s_i} p(s_f | x, s_i) p(s_i | x)
#
# so p(s_f | x, s_i) is equal to 1.
#
ops = dit.math.LogOperations('e')
pmf = np.zeros(self.posterior.nNodes, dtype=float)
for initial_node in self.posterior.valid_initial_nodes:
p = self.inode_posterior[initial_node]
final_node = self.posterior.final_node[initial_node]
pmf[final_node] += p
nodes = self.posterior.nodes
d = self._nodedist_class(nodes, pmf, base='linear', validate=False)
d.make_sparse()
self.fnode_dist = d
def add_counts_from(self, data):
"""
Adds additional counts from `data`.
"""
self.posterior.add_counts_from(data)
self._inode_init(self.inode_prior)
self._fnode_init()
def get_updated_prior(self):
"""
Returns a new Infer that incorporates observed counts.
"""
posterior = self.posterior
try:
self.posterior = None
new = deepcopy(self)
finally:
self.posterior = posterior
new.posterior = posterior.get_updated_prior()
# The difference here is that we must use the inode_posterior as our
# new initial distribution.
new._inode_init(self.inode_posterior)
# There is no need to reinit the fnode_dist since
# new.posterior.valid_initial_nodes and new.posterior.final_node are
# the same as in `self.posterior`.
return new
def pm_next_symbol_dist(self):
# This averages over initial nodes.
#
# p(x | D) = \sum_{s_i} p( x | D, s_i) p(s_i | D)
#
# where
#
# p(x | D, s_i) = \int dtheta p(x | theta, D, s_i) p( theta | D, s_i)
#
# p(x | theta, D, s_i) = \sum_{s_f} p( x, s_f | theta, D, s_i)
# = p( x | theta, delta(s_i, D) )
#
# but note, this last equation is not really necessary for unifilar
# HMMs because the symbol x uniquely identifies the next state s_f.
# So we have:
#
# p(x | D, s_i) = \int dtheta p(x | theta, delta(s_i, D)) p(theta | D, s_i)
#
# Thus,
#
# p(x | D, s_i) = posterior mean of edge (x, delta(s_i, D))
#
# So for each valid initial node, we grab the row from the posterior
# mean uHMM corresponding to its final node. These are the mean
# probabilities of each symbol. This gives us a matrix of shape
# (number of valid initial nodes, symbols). We construct a column
# vector of the probability of each valid initial node and multiply
# it elementwise on the rows (with broadcasting) to the mean
# probabilities. Then, we sum the rows to get the final p(x | D).
shape = (self.posterior.nInitial, self.posterior.nSymbols)
probs = np.zeros(shape, dtype=float)
# We must work with valid initial nodes since we are indexing with
# the final node.
for i, initial_node in enumerate(self.posterior.valid_initial_nodes):
pm_uhmm = self.posterior.pm_uhmm(initial_node)
final_node = self.posterior.final_node[initial_node]
probs[i] = pm_uhmm[final_node]
weights = dit.copypmf(self.inode_posterior, 'linear', 'sparse')
weights = np.array([weights]).transpose()
probs *= weights
pmf = probs.sum(axis=0)
d = self._symboldist_class(self.posterior.symbols, pmf)
d.make_sparse()
return d
def log_evidence(self, initial_node=None):
"""
Returns the log evidence of the data using `node` as the initial node.
p(D | s) if initial_node is not None
\sum_s p(D|s) p(s) if initial_node is None
Parameters
----------
initial_node : int, None
An initial node. If `None`, then the expected log evidence is
returned, where the expectation is over the initial node prior
distribution.
Returns
-------
log_evid : float
The base-2 log evidence of the data.
"""
if initial_node is not None:
return self.posterior.log_evidence(initial_node)
base = 2
ops = dit.math.get_ops(base)
p_s = dit.copypmf(self.inode_prior, base=base, mode='dense')
evidences = self.posterior.log_evidence_array()
log_evid = ops.add_reduce(ops.mult(evidences, p_s))
return log_evid
def sample_uhmm(self, initial_node=None, size=None, prng=None):
"""
Returns uHMM transition matrices sampled from the posterior.
Parameters
----------
initial_node : int
The initial node. If `None`, then the initial node is sampled from
the initial node posterior distribution.
size : int
The number of uHMMs to return.
prng : np.RandomState
A pseudorandom number generator, compatible with NumPy RandomState.
Returns
-------
inodes : int or NumPy array
The initial nodes. If `size` is None, then return the integer
corresponding to the sampled initial node. Otherwise, a NumPy array
of shape (`size`,) containing the sampled initial nodes.
trans : NumPy array
The transition probabilities of the uHMM. If `size` is None, then
return a single transition matrix, shape (n, k). Otherwise, return
`size` transition matrices in an array of shape (`size`, n, k).
Raises
------
Exception
If `initial_node` is not a valid initial node.
"""
if prng is None:
prng = self.prng
single = False
if size is None:
size = 1
single = True
n, k = self.posterior.nNodes, self.posterior.nSymbols
uhmms = np.zeros((size, n, k))
if initial_node is None:
inodes = self.inode_posterior.rand(size, prng=prng)
else:
inodes = [initial_node] * size
for i, inode in enumerate(inodes):
uhmms[i] = self.posterior.sample_uhmm(inode, prng=prng)
if single:
inodes = inodes[0]
uhmms = uhmms[0]
return inodes, uhmms
# Depends on CMPy
def sample_stationary_distributions(self, n=None, prng=None):
from cmpy.math import stationary_distribution
if prng is None:
prng = self.prng
if n is None:
single = True
n = 1
else:
single = False
inodes = self.inode_posterior.rand(n, prng=prng)
# n is likely to be small...let's build it up.
pis = []
posterior = self.posterior
for initial_node in inodes:
tmatrix = posterior.sample_uhmm(initial_node, prng=prng)
ntm = posterior._ntm(tmatrix)
pi = stationary_distribution(ntm, logs=False)
pis.append(pi)
if single:
sdists = pis[0]
else:
sdists = np.array(pis)
return sdists
def predictive_probability(self, x, initial_node=None):
"""
Returns the mean predictive probability of `x` given `initial_node`.
That is, we calculate:
\Pr(x | D, \sigma) = \int \Pr( x | D, \theta, \sigma)
\Pr( \theta | D, \sigma) d \theta
This is a calculation from the posterior predictive distribution. When
`initial_node` is `None`, then we calculate:
\Pr(x | D) = \sum \Pr(x | D, \sigma) \Pr( \sigma | D)
Parameters
----------
x : iterable
The new data used to calculate the predictive probability.
initial_node : int or None
The initial node. If `None`, then the predictive probability is
averaged over the initial node posterior distribution.
Returns
-------
p : float
The base-e logarithm of the mean predictive probability of `x`.
Raises
------
InvalidInitialNode
If `initial_node` is not a valid initial node.
"""
new = self.get_updated_prior()
new.add_counts_from(x)
return new.log_evidence(initial_node)
class InferCP(Infer):
_posterior_class = DirichletDistributionCP
|
the-stack_0_7303 | from collections import OrderedDict
from functools import partial
from matplotlib.figure import Figure
from PyQt5 import QtWidgets, QtCore, QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from .model import AxesSet
from .widgets import *
class AxPositioningEditor(QtWidgets.QWidget):
"""
main widget for editing axes positions
Example:
>>>from matplotlib import pyplot as plt
>>>fig = plt.figure()
>>>w, h = fig.get_size_inches()
>>>AxPositioningEditor((w, h), bounds=[])
"""
position_codes = ['S', 'N', 'W', 'E', 'SW', 'NW', 'NE', 'SE', 'C']
position_names = [
'lower center',
'top center',
'left center',
'right center',
'lower left',
'upper left',
'upper right',
'lower right',
'center']
click_axes_data = dict(w=.3, h=.3)
def __init__(self, figsize, bounds=(), anchor='C', dpi=150):
super().__init__()
self.figsize = figsize
w, h = self.figsize
self.figure = Figure(figsize=(w, h))
self.dpi = dpi
self.settings = dict(guides=False,
guides_selected=False,
relative=True)
self.guides_subsetting_fields = []
self.axes = AxesSet(self.figure, bounds, anchor)
self.build()
self.canvas.mpl_connect('button_release_event', self.draw_axes)
self.pointing_axes = False
def build(self):
"""build the widget"""
self.setMinimumWidth(600)
self.setMinimumHeight(350)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
figure_layout = QtWidgets.QVBoxLayout()
layout.addLayout(figure_layout)
self.build_figure(figure_layout)
self.build_tools(layout)
self.msg_label = QtWidgets.QLabel()
self.msg_label.setContentsMargins(5, 5, 5, 5)
figure_layout.addWidget(self.msg_label)
self.draw()
self.set_message(None)
def build_figure(self, layout):
"""build the figure area"""
figure_scroll_area = QtWidgets.QScrollArea()
figure_scroll_area.setAlignment(QtCore.Qt.AlignCenter)
# create canvas
self.canvas = FigureCanvas(self.figure)
# update the canvas size based on the figure size
self.update_canvas_size()
figure_scroll_area.setWidget(self.canvas)
layout.addWidget(figure_scroll_area)
def build_tools(self, layout):
"""build the tools area"""
tools_widget = QtWidgets.QTabWidget()
tools_widget.setFixedWidth(320)
layout.addWidget(tools_widget)
fw = QtWidgets.QWidget()
figsize_layout = QtWidgets.QFormLayout(fw)
self.figure_fields = dict()
w, h = self.figsize
self.figure_fields['w'] = f = QtWidgets.QLineEdit('{:.2f}'.format(w))
f.setValidator(QtGui.QDoubleValidator(0, 1000, 2))
figsize_layout.addRow('Width', f)
self.figure_fields['h'] = f = QtWidgets.QLineEdit('{:.2f}'.format(h))
f.setValidator(QtGui.QDoubleValidator(0, 1000, 2))
figsize_layout.addRow('Height', f)
b = QtWidgets.QPushButton('Apply')
b.clicked.connect(self.set_figsize)
figsize_layout.addRow('', b)
tools_widget.addTab(fw, 'Figure')
tools_widget.addTab(self.build_positions_tab(), 'Positions')
w = AddAxesWidget(self.figure)
w.newbounds.connect(self.set_axes)
w.axes_added.connect(lambda x: self.add_axes_at_position(**x))
w.click_axes.connect(self.click_new_axes)
tools_widget.addTab(w, 'Add axes')
tools_widget.addTab(self.build_settings_tab(), 'Settings')
def build_settings_tab(self):
sw = QtWidgets.QWidget()
settings_layout = QtWidgets.QVBoxLayout(sw)
settings_layout.addWidget(QtWidgets.QLabel('Axes anchor'))
dropdown = QtWidgets.QComboBox()
dropdown.addItems(self.position_names)
dropdown.currentIndexChanged.connect(lambda x: self.update_anchor(self.position_codes[x]))
dropdown.setCurrentIndex(self.position_codes.index(self.axes.anchor))
settings_layout.addWidget(dropdown)
settings_layout.addWidget(hline())
cb = QtWidgets.QCheckBox('show guides')
cb.setChecked(self.settings.get('guides'))
cb.stateChanged.connect(self.set_show_guides)
settings_layout.addWidget(cb)
f = QtWidgets.QFrame()
l = QtWidgets.QVBoxLayout(f)
l.setContentsMargins(10, 5, 5, 5)
cb2 = QtWidgets.QCheckBox('for selected axes only')
cb2.setChecked(self.settings['guides_selected'])
cb2.stateChanged.connect(self.set_guides_selected)
cb2.setEnabled(self.settings['guides'])
self.guides_subsetting_fields.append(cb2)
l.addWidget(cb2)
settings_layout.addWidget(f)
cb3 = QtWidgets.QCheckBox('absolute positions (dots)')
cb3.setChecked(not self.settings['relative'])
cb3.stateChanged.connect(self.set_absolute)
settings_layout.addWidget(cb3)
settings_layout.addItem(QtWidgets.QSpacerItem(
0, 0,
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding))
return sw
def build_positions_tab(self):
w = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout(w)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(5)
# main buttons
button_layout = QtWidgets.QHBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
clear_figure_button = QtWidgets.QPushButton('Clear figure')
clear_figure_button.clicked.connect(self.clear_figure)
button_layout.addWidget(clear_figure_button)
select_all_button = QtWidgets.QPushButton('Select all')
select_all_button.clicked.connect(self.select_all_axes)
button_layout.addWidget(select_all_button)
select_none_button = QtWidgets.QPushButton('Clear selection')
select_none_button.clicked.connect(self.select_none_axes)
button_layout.addWidget(select_none_button)
layout.addLayout(button_layout)
# actions
action_layout = QtWidgets.QHBoxLayout()
layout.addLayout(action_layout)
action_layout.addItem(QtWidgets.QSpacerItem(
0, 0,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Maximum))
action_layout.addWidget(QtWidgets.QLabel('Actions'))
self.actions_dropdown = QtWidgets.QComboBox()
self.actions_dropdown.addItems(sorted(self.axes_actions.keys()))
action_layout.addWidget(self.actions_dropdown)
execute_action_button = QtWidgets.QPushButton('Apply')
execute_action_button.clicked.connect(self.execute_current_action)
action_layout.addWidget(execute_action_button)
self.axtable = AxesPositionsWidget(self.axes)
self.axtable.changed.connect(self.set_ax_position)
self.axtable.selected.connect(self.select_axes)
self.axtable.invalid_value.connect(self.reset_value)
self.axtable.moved.connect(self.move_axes)
layout.addWidget(self.axtable)
return w
def update_canvas_size(self):
w, h = self.figsize
self.figure.set_size_inches(w, h)
self.figure.set_dpi(self.dpi)
screenwidth, screenheight = w * self.dpi, h * self.dpi
self.canvas.resize(.5*screenwidth, .5*screenheight)
def set_figsize(self):
w = self.figure_fields['w'].text()
h = self.figure_fields['h'].text()
try:
w = float(w)
h = float(h)
except ValueError:
w, h = self.figure.get_size_inches()
self.figure_fields['w'].setText('{:.2f}'.format(w))
self.figure_fields['h'].setText('{:.2f}'.format(h))
else:
self.figsize = w, h
self.figure.set_size_inches(*self.figsize)
self.update_canvas_size()
self.draw(posfields=True)
def reset_value(self, row, col, attr):
ax = self.axes.names[row]
self.axtable.blockSignals(True)
self.axtable.item(row, col).setText('{:.3f}'.format(getattr(ax, attr)))
self.axtable.blockSignals(False)
def get_bounds(self):
"""returns a list of axes bounds as [(x, y, w, h)]"""
bounds = []
for n, a in self.axes.items():
bounds.append(a.bounds)
return bounds
def as_dict(self):
return dict(bounds=self.get_bounds(), figsize=self.figsize)
# ---------
# edit axes
# ---------
def draw_axes(self, event):
"""create an axes at the click location if self.pointing_axes is enabled"""
if self.pointing_axes:
x, y = self.figure.transFigure.inverted().transform((event.x, event.y))
a = self.add_axes_at_position(x, y, **self.click_axes_data)
self.pointing_axes = False
# clear the message widget
self.set_message(None)
def set_show_guides(self, b):
self.settings['guides'] = bool(b)
for item in self.guides_subsetting_fields:
item.setEnabled(b)
self.draw(posfields=False)
def set_guides_selected(self, b):
self.settings['guides_selected'] = bool(b)
self.draw(posfields=False)
def set_absolute(self, b):
self.settings['relative'] = not bool(b)
self.draw(posfields=True)
def click_new_axes(self, data):
self.pointing_axes = True
self.set_message('Click in the figure to place a new axes at that position')
self.click_axes_data = data
def add_axes_at_position(self, x, y, w=.4, h=.4, n=None, draw=True):
"""add axes at specified location in Figure coordinates"""
self.axes.add(x, y, w, h, apply_anchor=True)
if draw:
self.draw(posfields=True)
def add_axes(self, bounds, draw=True):
self.axes.add(*bounds)
if draw:
self.draw(posfields=True)
def set_axes(self, bounds, draw=True):
"""set several axes from a list of bounds"""
for bnd in bounds:
self.axes.add(*bnd)
if draw:
self.draw(posfields=True)
def set_ax_position(self, row, attr, value):
"""
set the position of an axes from the attribute name
:param axname: name of the axes
:param attr: name of the position attribute
:param value: value of the position attribute
"""
axname = self.axes.names[row]
self.axes.set_property(str(axname), attr, value, relative=self.settings['relative'])
self.draw(posfields=True)
def delete_axes(self, name, redraw=True):
"""delete an axes from the editor"""
self.axes.pop(str(name))
if redraw:
self.draw(posfields=True)
def move_axes(self, rows, ind):
if ind in rows or ind-1 in rows:
return
names = self.axes.names
def keyfn(v):
if v in rows:
return 1
elif v < ind:
return 0
else:
return 2
indices = sorted(list(range(len(names))), key=keyfn)
self.axes.change_order([names[i] for i in indices])
self.draw(posfields=True)
# -----------
# update gui
# -----------
def set_message(self, msg, level='INFO'):
"""
set a message in the message window
hide the messages if msg is None
:param msg: message text
:param level: level (see logging levels) of the message
"""
if msg is None:
self.msg_label.setText('')
self.msg_label.hide()
else:
self.msg_label.show()
styles = dict(
DEBUG='background-color: rgb(100, 250, 100)',
INFO='',
WARNING='background-color: rgb(250, 230, 150)',
ERROR='background-color: rgb(255, 150, 100)',
)
self.msg_label.setStyleSheet(styles[level])
self.msg_label.setText(msg)
def add_message(self, msg):
"""add to the end of the message (keep formatting)"""
txt = self.msg_label.text()
self.msg_label.setText(txt+'\n'+msg)
def draw(self, posfields=False):
"""redraw the contents"""
self.figure.clear()
for name, a in self.axes.items():
a.format_placeholder(name)
self.figure.add_axes(a)
if self.settings['guides']:
self.axes.plot_guides(selected=self.settings['guides_selected'],
relative=self.settings['relative'])
self.canvas.draw_idle()
if posfields:
self.axtable.clear()
self.axtable.fill(self.axes, relative=self.settings['relative'])
def update_anchor(self, pos, redraw=True):
"""set the position reference anchor of the axes to a new location"""
for name, a in self.axes.items():
a.set_anchor(pos)
self.axes.anchor = pos
if redraw:
self.draw(posfields=True)
# ------------------------------------
# selecting axes and executing actions
# ------------------------------------
def execute_current_action(self):
if not self.axes.any_selected():
return
action = self.actions_dropdown.currentText()
fn = getattr(self, self.axes_actions[str(action)])
fn(self.axes.selected_names, self.axes.selected)
def select_axes(self, key, b=True):
self.axes.select(str(key), b)
self.draw()
def clear_figure(self):
self.figure.clear()
for k in list(self.axes.keys()):
self.delete_axes(k, redraw=False)
self.draw(posfields=True)
def select_all_axes(self):
self.axes.select_all()
self.draw(posfields=True)
def select_none_axes(self):
self.axes.select_none()
self.draw(posfields=True)
# --------------
# Define actions
# --------------
axes_actions = {
'delete': 'delete_axes_objects',
'align X': 'axes_equal_x',
'align Y': 'axes_equal_y',
'equal width': 'axes_equal_w',
'equal height': 'axes_equal_h',
'equal aspect': 'axes_equal_aspect',
'join': 'axes_join',
'split': 'axes_split'
}
def delete_axes_objects(self, names, axes, redraw=True):
for n in names:
self.axes.pop(n)
if redraw:
self.draw(posfields=True)
def axes_equal_x(self, names, axes, redraw=True):
x = axes.pop(0).x
for a in axes:
a.x = x
if redraw:
self.draw(posfields=True)
def axes_equal_y(self, names, axes, redraw=True):
y = axes.pop(0).y
for a in axes:
a.y = y
if redraw:
self.draw(posfields=True)
def axes_equal_w(self, names, axes, redraw=True):
w = axes.pop(0).w
for a in axes:
a.w = w
if redraw:
self.draw(posfields=True)
def axes_equal_h(self, names, axes, redraw=True):
h = axes.pop(0).h
for a in axes:
a.h = h
if redraw:
self.draw(posfields=True)
def axes_equal_aspect(self, names, axes, redraw=True):
A = axes.pop(0).aspect
for a in axes:
a.aspect = A
if redraw:
self.draw(posfields=True)
def axes_join(self, names, axes, redraw=True):
"""join axes within bounding box of all selected axes"""
# store anchor
anchor = self.axes.anchor
# update anchor to lower left during processing
self.update_anchor('SW', True, redraw=False)
# determine bounding box
xll = min(a.x for a in axes)
yll = min(a.y for a in axes)
xur = max(a.x + a.w for a in axes)
yur = max(a.y + a.h for a in axes)
# redefine first axes position to bounding box
axes[0].set_position((xll, yll, xur-xll, yur-yll))
# delete other axes
self.delete_axes_objects(names[1:], axes[1:], redraw=False)
# update the anchor to the original
self.update_anchor(anchor, True, redraw=redraw)
def axes_split(self, names, axes, redraw=True):
"""
split axes in two parts based on a given ratio
"""
def show_error(msg):
m = QtWidgets.QMessageBox()
m.setText(msg)
m.exec()
# create dialog to input ratio, spacing and h/v split
dialog = SplitDialog()
if dialog.exec() != QtWidgets.QDialog.Accepted:
return
ratio, spacing, horizontal = dialog.get_data()
if ratio < 0 or ratio > 1:
show_error('ratio must be between 0 and 1')
return
for a in axes:
try:
new_bounds = a.split(ratio, spacing, wsplit=horizontal)
except ValueError as e:
show_error(str(e))
return
else:
# create 2nd axes and copy selected state
new_ax = self.axes.add(*new_bounds, anchor=a.get_anchor())
new_ax._selected = a._selected
if redraw:
self.draw(posfields=True)
|
the-stack_0_7304 | import os
from configparser import ConfigParser
from nipype.utils import config as nuc
from pkg_resources import resource_filename
def get_fitlins_config():
"""Construct Nipype configuration object with precedence:
- Local config (``./nipype.cfg``)
- Global config (``$HOME/.nipype/nipype.cfg`` or ``$NIPYPE_CONFIG_DIR/nipype.cfg``)
- FitLins config (``<fitlins_install_dir>/data/nipype.cfg``)
- Nipype default config (defined in ``nipype/utils/config.py``)
"""
config = nuc.NipypeConfig()
config.set_default_config()
fitlins_config_file = resource_filename('fitlins', 'data/nipype.cfg')
global_config_file = os.path.join(
os.path.expanduser(os.getenv("NIPYPE_CONFIG_DIR", default="~/.nipype")), "nipype.cfg"
)
local_config_file = "nipype.cfg"
fitlins_conf = ConfigParser()
fitlins_conf.read([fitlins_config_file, global_config_file, local_config_file])
config.update_config(fitlins_conf)
return config
|
the-stack_0_7306 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-07-01"
self.config = config
def list(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2020_07_01.models.LoadBalancerPaged[~azure.mgmt.network.v2020_07_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'}
|
the-stack_0_7308 | import inspect
def get_classname(o):
""" Returns the classname of an object r a class
:param o:
:return:
"""
if inspect.isclass(o):
target = o
elif callable(o):
target = o
else:
target = o.__class__
try:
return target.__qualname__
except AttributeError: # pragma: no cover
return target.__name__
def fqn(o):
"""Returns the fully qualified class name of an object or a class
:param o: object or class
:return: class name
>>> import django_db_logging
>>> fqn('str')
Traceback (most recent call last):
...
ValueError: Invalid argument `str`
>>> class A(object):
... def method(self):
... pass
>>> str(fqn(A))
'django_db_logging.utils.A'
>>> str(fqn(A()))
'django_db_logging.utils.A'
>>> str(fqn(A.method))
'django_db_logging.utils.A.method'
>>> str(fqn(django_db_logging))
'django_db_logging'
"""
parts = []
if hasattr(o, '__module__'):
parts.append(o.__module__)
parts.append(get_classname(o))
elif inspect.ismodule(o):
return o.__name__
if not parts:
raise ValueError("Invalid argument `%s`" % o)
return ".".join(parts)
|
the-stack_0_7310 | import os
from conan.tools.build import build_jobs
from conan.tools.files.files import load_toolchain_args
from conan.tools.microsoft.subsystems import subsystem_path, deduce_subsystem
from conans.client.build import join_arguments
class Autotools(object):
def __init__(self, conanfile, namespace=None):
self._conanfile = conanfile
toolchain_file_content = load_toolchain_args(self._conanfile.generators_folder,
namespace=namespace)
self._configure_args = toolchain_file_content.get("configure_args")
self._make_args = toolchain_file_content.get("make_args")
def configure(self, build_script_folder=None):
"""
http://jingfenghanmax.blogspot.com.es/2010/09/configure-with-host-target-and-build.html
https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html
"""
source = self._conanfile.source_folder
if build_script_folder:
source = os.path.join(self._conanfile.source_folder, build_script_folder)
configure_cmd = "{}/configure".format(source)
subsystem = deduce_subsystem(self._conanfile, scope="build")
configure_cmd = subsystem_path(subsystem, configure_cmd)
cmd = "{} {}".format(configure_cmd, self._configure_args)
self._conanfile.output.info("Calling:\n > %s" % cmd)
self._conanfile.run(cmd)
def make(self, target=None):
make_program = self._conanfile.conf.get("tools.gnu:make_program",
default="mingw32-make" if self._use_win_mingw() else "make")
str_args = self._make_args
jobs = ""
if "-j" not in str_args and "nmake" not in make_program.lower():
njobs = build_jobs(self._conanfile)
if njobs:
jobs = "-j{}".format(njobs)
command = join_arguments([make_program, target, str_args, jobs])
self._conanfile.run(command)
def install(self):
self.make(target="install")
def _use_win_mingw(self):
if hasattr(self._conanfile, 'settings_build'):
os_build = self._conanfile.settings_build.get_safe('os')
else:
os_build = self._conanfile.settings.get_safe("os")
if os_build == "Windows":
compiler = self._conanfile.settings.get_safe("compiler")
sub = self._conanfile.settings.get_safe("os.subsystem")
if sub in ("cygwin", "msys2", "msys") or compiler == "qcc":
return False
else:
if self._conanfile.win_bash:
return False
return True
return False
|
the-stack_0_7311 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def _MakeFunc(v, arg_name):
"""Ensure v is a func."""
if isinstance(v, attr_value_pb2.NameAttrList):
return v
fn_attr = attr_value_pb2.NameAttrList()
if isinstance(v, compat.bytes_or_text_types):
fn_attr.name = v
elif hasattr(v, "add_to_graph"):
v.add_to_graph(ops.get_default_graph())
fn_attr.name = v.name
else:
raise TypeError("Don't know how to convert {} to a func for "
"argument {}".format(v, arg_name))
return fn_attr
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError(
"%s that are invalid. Tensors: %s" % (prefix, values))
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead. Error: %s" %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__, err))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any(bt != base_types[0] for bt in base_types):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
attr_value.func.CopyFrom(_MakeFunc(value, key))
elif attr_def.type == "list(func)":
attr_value.list.func.extend([_MakeFunc(x, key) for x in value])
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_structure = []
for arg in op_def.output_arg:
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
output_structure.append(len(t.list.type))
else:
output_structure.append(None)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
op = g.create_op(op_type_name, inputs, dtypes=None, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
|
the-stack_0_7313 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
from random import randint, choice
from math import radians, pi, sin, cos
import kivent_core
import kivent_cymunk
from kivent_core.gameworld import GameWorld
from kivent_core.managers.resource_managers import texture_manager
from kivent_core.systems.renderers import RotateRenderer
from kivent_core.systems.position_systems import PositionSystem2D
from kivent_core.systems.rotate_systems import RotateSystem2D
from kivent_cymunk.interaction import CymunkTouchSystem
from kivy.properties import StringProperty, NumericProperty
from functools import partial
from os.path import dirname, join, abspath
texture_manager.load_atlas(join(dirname(dirname(abspath(__file__))), 'assets',
'background_objects.atlas'))
class TestGame(Widget):
def on_kv_post(self, *args):
self.gameworld.init_gameworld(
['cymunk_physics', 'rotate_renderer', 'rotate', 'position',
'cymunk_touch'],
callback=self.init_game)
def init_game(self):
self.setup_states()
self.set_state()
def destroy_created_entity(self, ent_id, dt):
self.gameworld.remove_entity(ent_id)
self.app.count -= 1
def draw_some_stuff(self):
size = Window.size
w, h = size[0], size[1]
delete_time = 2.5
create_asteroid = self.create_asteroid
destroy_ent = self.destroy_created_entity
for x in range(100):
pos = (randint(0, w), randint(0, h))
ent_id = create_asteroid(pos)
self.app.count += 100
def create_asteroid(self, pos):
x_vel = randint(-500, 500)
y_vel = randint(-500, 500)
angle = radians(randint(-360, 360))
angular_velocity = radians(randint(-150, -150))
shape_dict = {'inner_radius': 0, 'outer_radius': 22,
'mass': 50, 'offset': (0, 0)}
col_shape = {'shape_type': 'circle', 'elasticity': .5,
'collision_type': 1, 'shape_info': shape_dict, 'friction': 1.0}
col_shapes = [col_shape]
physics_component = {'main_shape': 'circle',
'velocity': (x_vel, y_vel),
'position': pos, 'angle': angle,
'angular_velocity': angular_velocity,
'vel_limit': 250,
'ang_vel_limit': radians(200),
'mass': 50, 'col_shapes': col_shapes}
create_component_dict = {'cymunk_physics': physics_component,
'rotate_renderer': {'texture': 'asteroid1',
'size': (45, 45),
'render': True},
'position': pos, 'rotate': 0, }
component_order = ['position', 'rotate', 'rotate_renderer',
'cymunk_physics',]
return self.gameworld.init_entity(
create_component_dict, component_order)
def update(self, dt):
self.gameworld.update(dt)
def setup_states(self):
self.gameworld.add_state(state_name='main',
systems_added=['rotate_renderer'],
systems_removed=[], systems_paused=[],
systems_unpaused=['rotate_renderer'],
screenmanager_screen='main')
def set_state(self):
self.gameworld.state = 'main'
class DebugPanel(Widget):
fps = StringProperty(None)
def __init__(self, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
Clock.schedule_once(self.update_fps)
def update_fps(self,dt):
self.fps = str(int(Clock.get_fps()))
Clock.schedule_once(self.update_fps, .05)
class YourAppNameApp(App):
count = NumericProperty(0)
if __name__ == '__main__':
YourAppNameApp().run() |
the-stack_0_7314 | def save(file, conf):
with open(file, 'w') as configfile:
conf.write(configfile)
def getOpts():
import configparser
import copy
import os
config = configparser.ConfigParser()
file = os.path.abspath(os.path.join('.', 'config.ini'))
DEFAULT_OPTIONS = {
'DEFAULT': {
'limit': 5,
'domain_name': 'https://nyaa.si',
'out_dir': os.path.abspath(os.path.join('.', 'output'))
}
}
if os.path.isfile(file):
config.read(file)
for i in DEFAULT_OPTIONS:
if i not in config:
config[i] = copy.deepcopy(DEFAULT_OPTIONS[i])
for x in DEFAULT_OPTIONS[i]:
if x not in config[i]:
config[i][x] = str(copy.deepcopy(DEFAULT_OPTIONS[i][x]))
save(file, config)
else:
for i in DEFAULT_OPTIONS:
config[i] = DEFAULT_OPTIONS[i]
save(file, config)
options = copy.deepcopy(dict(config['DEFAULT']))
return options
|
the-stack_0_7315 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vmutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_DEFINE_SYSTEM = 'DefineSystem'
_DESTROY_SYSTEM = 'DestroySystem'
_DESTROY_SNAPSHOT = 'DestroySnapshot'
_ADD_RESOURCE = 'AddResourceSettings'
_REMOVE_RESOURCE = 'RemoveResourceSettings'
_SETTING_TYPE = 'VirtualSystemType'
_VM_GEN = constants.VM_GEN_2
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
def test_create_vm(self):
super(VMUtilsV2TestCase, self).test_create_vm()
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData.new()
self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2,
mock_vssd.VirtualSystemSubType)
self.assertFalse(mock_vssd.SecureBootEnabled)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
@mock.patch.object(vmutilsv2, 'wmi', create=True)
@mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
mock_add_virt_res):
self._lookup_vm()
fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_disk.path_.return_value = self._FAKE_RES_PATH
mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
fake_metric_def_paths = ['fake_0', 'fake_0', None]
fake_metric_resource_paths = [self._FAKE_VM_PATH,
self._FAKE_VM_PATH,
self._FAKE_RES_PATH]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = [mock.call(Name=def_name)
for def_name in [self._vmutils._METRIC_AGGR_CPU_AVG,
self._vmutils._METRIC_AGGR_MEMORY_AVG]]
self._vmutils._conn.CIM_BaseMetricDefinition.assert_has_calls(calls)
calls = []
for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
Subject=fake_metric_resource_paths[i],
Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH])
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
vm_path, dynamic_memory_ratio=1.0):
mock_vs_man_svc = mock.MagicMock()
mock_vs_data = mock.MagicMock()
mock_job = mock.MagicMock()
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
mock_check_ret_val.return_value = mock_job
_conn.new.return_value = mock_vs_data
mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
vm_path,
fake_ret_val)
mock_job.associators.return_value = ['fake vm path']
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name='fake vm',
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=dynamic_memory_ratio)
if not vm_path:
mock_job.associators.assert_called_once_with(
self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineSystem.assert_called_once_with(
ResourceSettings=[], ReferenceConfiguration=None,
SystemSettings=mock_vs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
if dynamic_memory_ratio > 1:
self.assertFalse(mock_vs_data.VirtualNumaEnabled)
mock_get_wmi_obj.assert_called_with('fake vm path')
self.assertEqual(mock_vs_data.Notes, 'fake notes')
self.assertEqual(response, mock_get_wmi_obj())
def test_create_vm_obj(self):
self._test_create_vm_obj(vm_path='fake vm path')
def test_create_vm_obj_no_vm_path(self):
self._test_create_vm_obj(vm_path=None)
def test_create_vm_obj_dynamic_memory(self):
self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"res_sub_type_dvd":
self._vmutils._DVD_DRIVE_RES_SUB_TYPE,
"parent": mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)
def test_get_vm_dvd_disk_paths(self):
mock_vm = self._lookup_vm()
mock_sasd1 = mock.MagicMock(
ResourceSubType=self._vmutils._DVD_DISK_RES_SUB_TYPE,
HostResource=[mock.sentinel.FAKE_DVD_PATH1])
mock_settings = mock.MagicMock()
mock_settings.associators.return_value = [mock_sasd1]
mock_vm.associators.return_value = [mock_settings]
ret_val = self._vmutils.get_vm_dvd_disk_paths(self._FAKE_VM_NAME)
self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0])
|
the-stack_0_7320 | # Create a list of strings: mutants
mutants = ['charles xavier',
'bobby drake',
'kurt wagner',
'max eisenhardt',
'kitty pryde']
aliases= ['prof x', 'iceman', 'nightcrawler', 'magneto', 'shadowcat']
powers = ['telepathy',
'thermokinesis',
'teleportation',
'magnetokinesis',
'intangibility']
# Create a list of tuples: mutant_data
mutant_data = list(zip(mutants, aliases, powers))
# Print the list of tuples
print(mutant_data)
# Create a zip object using the three lists: mutant_zip
mutant_zip = zip(mutants, aliases, powers)
# Print the zip object
print(mutant_zip)
# Unpack the zip object and print the tuple values
for value1, value2, value3 in mutant_zip:
print(value1, value2, value3)
# Create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# Print the tuples in z1 by unpacking with *
print(*z1)
# Re-create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# 'Unzip' the tuples in z1 by unpacking with * and zip(): result1, result2
result1, result2 = zip(*z1)
# Check if unpacked tuples are equivalent to original tuples
print(result1 == mutants)
print(result2 == powers)
|
the-stack_0_7322 | import sys
from typing import Iterable, Optional
import numpy as np
import tensorflow as tf
def _as_tensor(x):
if isinstance(x, np.ndarray):
x = tf.convert_to_tensor(x)
return x
def _build_train_step(model, data, jit_compile: bool):
data = tf.nest.map_structure(_as_tensor, data)
@tf.function(jit_compile=jit_compile)
def train_fn():
return model.train_step(data)
return train_fn
def _build_test_step(model, data, jit_compile: bool):
data = tf.nest.map_structure(_as_tensor, data)
@tf.function(jit_compile=jit_compile)
def test_fn():
model.reset_metrics()
return model.test_step(data)
return test_fn
class EpochProgbarLogger(tf.keras.callbacks.Callback):
"""Progress bar that updates at the end of each epoch."""
def __init__(self):
super().__init__()
self.progbar = None
self.epochs = None
self.last_seen = None
def set_params(self, params):
self.epochs = params["epochs"]
def on_train_begin(self, logs=None):
class Universe:
"""Contains everything."""
def __contains__(self, x):
return True
self.progbar = tf.keras.utils.Progbar(target=self.epochs, unit_name="epoch",)
# probar uses stateful metrics to determine which metric values to average.
# Since this is only called on_epoch_end, no metrics should be averaged
# i.e. all metrics should be considered 'stateful'.
# don't set stateful_metrics in constructor because it wraps it in `set`.
self.progbar.stateful_metrics = Universe()
def on_epoch_end(self, epoch: int, logs=None):
self.last_seen = epoch + 1
self.progbar.update(epoch + 1, list(logs.items()))
def on_train_end(self, logs=None):
if self.last_seen < self.progbar.target:
if tf.version.VERSION < "2.3":
sys.stdout.write("\n")
else:
self.progbar.update(self.last_seen, finalize=True)
def fit_single(
model: tf.keras.Model,
train_data,
validation_data=None,
epochs: int = 1,
initial_epoch: int = 0,
validation_freq: int = 1,
callbacks: Iterable[tf.keras.callbacks.Callback] = (),
verbose: bool = True,
jit_compile: bool = False,
):
"""
Optimized keras.Model.fit for training on a single graph.
Args:
model: keras model to train.
train_data: (inputs, labels, sample_weight) or dataset with a
single element for training.
validation_data: (inputs, labels, sample_weight) or dataset with a
single element for validation.
epochs: int, maximum number of epochs / steps to train for.
initial_epoch: int, starting epoch.
validation_freq: int, number of training steps/epochs per validation.
callbacks: Iterable of tf.keras.callbacks.Callbacks.
verbose: flag resulting in verbose outputs.
jit_compile: flag indicating whether train/validation steps are compiled
with `jit`. Not all ops are jit compatible, though where they are this may
result in speed-ups.
Returns:
history: `tf.keras.callbacks.History` object.
"""
if isinstance(train_data, tf.data.Dataset):
train_data = tf.data.experimental.get_single_element(train_data)
if isinstance(validation_data, tf.data.Dataset):
validation_data = tf.data.experimental.get_single_element(validation_data)
do_validation = validation_data is not None
params = dict(epochs=epochs, verbose=verbose, steps=1, do_validation=do_validation,)
callbacks = list(callbacks)
if verbose:
callbacks.append(EpochProgbarLogger())
cb = tf.keras.callbacks.CallbackList(
callbacks, add_history=True, add_progbar=False, model=model, **params,
)
del callbacks
train_step = _build_train_step(model, train_data, jit_compile=jit_compile)
if validation_data is None:
validation_step = None
else:
validation_step = _build_test_step(
model, validation_data, jit_compile=jit_compile
)
model.stop_training = False
cb.on_train_begin(logs=None)
# _maybe_load_initial_epoch_from_ckpt behaviour is influenced by
# callbacks.experimental.BackupAndRestore
initial_epoch = model._maybe_load_initial_epoch_from_ckpt( # pylint: disable=protected-access
initial_epoch
)
logs = None
for epoch in range(initial_epoch, epochs):
model.reset_metrics()
cb.on_epoch_begin(epoch, logs=None)
cb.on_train_batch_begin(batch=0)
logs = train_step()
cb.on_train_batch_end(batch=0, logs=logs)
if model.stop_training:
break
# validation
if validation_step is not None and (epoch + 1) % validation_freq == 0:
val_logs = validation_step()
logs.update({f"val_{k}": v for k, v in val_logs.items()})
cb.on_epoch_end(epoch, logs)
if model.stop_training:
break
cb.on_train_end(logs)
return model.history
def fit(
model: tf.keras.Model,
train_data,
validation_data=None,
epochs: int = 1,
initial_epoch: int = 0,
validation_freq: int = 1,
callbacks: Iterable[tf.keras.callbacks.Callback] = (),
steps_per_epoch: Optional[int] = None,
verbose: bool = True,
jit_compile: bool = False,
):
"""
Call `fit_single` or `Model.fit` based on `train_data`.
Delegates to either `graph_tf.train.fit_single` or `tf.keras.Model.fit`.
Args:
model: keras model to train.
train_data: (inputs, labels, sample_weight) or dataset with a
single element for training.
validation_data: (inputs, labels, sample_weight) or dataset with a
single element for validation.
epochs: int, maximum number of steps/epochs to train for.
initial_epoch: int, starting epoch.
validation_freq: int, number of training steps/epochs per validation.
callbacks: Iterable of `tf.keras.callbacks.Callbacks`.
steps_per_epoch: Number of steps per epoch. Must be 1 if specified and
train_data is a not a `tf.data.Dataset`.
verbose: flag resulting in verbose outputs.
jit_compile: used in fit_single. Ignored if more than one example.
Returns:
history: `tf.keras.callbacks.History` object.
"""
if not isinstance(train_data, tf.data.Dataset) or len(train_data) == 1:
assert steps_per_epoch is None or steps_per_epoch == 1
return fit_single(
model=model,
train_data=train_data,
validation_data=validation_data,
epochs=epochs,
initial_epoch=initial_epoch,
validation_freq=validation_freq,
callbacks=callbacks,
verbose=verbose,
jit_compile=jit_compile,
)
return model.fit(
train_data,
validation_data=validation_data,
epochs=epochs,
initial_epoch=initial_epoch,
validation_freq=validation_freq,
callbacks=callbacks,
verbose=verbose,
steps_per_epoch=steps_per_epoch,
)
|
the-stack_0_7323 | # If executes from local python Kernel
import sys
sys.path.append('./python_env/lib/python3.6/site-packages')
# Import libraries for general use
from unidecode import unidecode # Library to parse format
from bs4 import BeautifulSoup # Library to scripting in HTML
import numpy # Library for math function
import requests # Library for request to a website
# This function returns an array that contains the percentage of the population.
def get_population_percentage(population_array):
# Get the percentage for each item
population_percentage = []
for item in population_array:
population_percentage.append(item/numpy.sum(population_array)*100)
return numpy.array(population_percentage)
# This function create the output file
def create_output_file(head, data, population_percentage):
# Concatenate all data Head + Rows + New Colum (percentage)
result = numpy.concatenate((data, population_percentage.T),axis=1)
result = numpy.concatenate((head, result), axis=0)
result = result.astype('str')
# Save the result in to a csv file
return numpy.savetxt('Organización territorial de Chile.csv', result, delimiter=",",fmt="%s")
def get_data():
try:
website_text = requests.get('https://es.wikipedia.org/wiki/Chile').text
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
# Parse all data in to HTML format
soup = BeautifulSoup(website_text,'html.parser')
# Get the table 'Orgazniación ....'
territory_table = soup.find('table',{'class':'wikitable col1izq col2der col3der col4der col5der col6izq'})
# Get all data from table - tag <td>
if territory_table:
list_td = territory_table.find_all('td')
# Data Frames
head = ['Región', 'Población', 'Superficie', 'Densidad', 'Capital', 'Porcentaje']
data = []
row = []
population = []
for cell in list_td:
if(list_td.index(cell)==5): # Delete de 'Mapa administrativo' cell
continue
if cell.find_all('a'): # Get text for columm that contains an '<a>' tag.
a = cell.find_all('a')[0]
row.append(a.get_text())
else:
# For numbers parse into american float format
cell = unidecode(cell.get_text()).replace(" ","").replace(",",".")
# Delete <sub> tag info
if "(" in cell:
cell = cell.split("(")[0]
# Add cell to the row's table
row.append(float(cell))
# Save the population data to calculate percentage
if(len(row) == 2):
population.append(row[1])
# Add row to the table
if len(row) == 5:
data.append(row)
row = []
return numpy.array([head]), numpy.array(data), numpy.array([population])
else:
print("Table not found.")
return sys.exit(1)
if __name__ == '__main__':
head,data,population = get_data()
population_percentage = get_population_percentage(population)
create_output_file(head,data,population_percentage)
|
the-stack_0_7325 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import itertools
import pandas as pd
import unittest
from coremltools._deps import HAS_SKLEARN
from coremltools.models.utils import evaluate_classifier, macos_version
import pytest
if HAS_SKLEARN:
from coremltools.converters import sklearn as skl_converter
from sklearn.tree import DecisionTreeClassifier
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase):
def _check_metrics(self, metrics, params = {}):
self.assertEquals(metrics['num_errors'], 0, msg = 'Failed case %s. Results %s' % (params, metrics))
def _train_convert_evaluate_assert(self, **scikit_params):
scikit_model = DecisionTreeClassifier(random_state = 1, **scikit_params)
scikit_model.fit(self.X, self.target)
# Convert the model
spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name)
if macos_version() >= (10, 13):
# Get predictions
df = pd.DataFrame(self.X, columns=self.feature_names)
df['prediction'] = scikit_model.predict(self.X)
# Evaluate it
metrics = evaluate_classifier(spec, df)
self._check_metrics(metrics, scikit_params)
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeBinaryClassificationBostonHousingScikitNumericTest(
DecisionTreeClassificationBostonHousingScikitNumericTest):
@classmethod
def setUpClass(self):
from sklearn.datasets import load_boston
from sklearn.tree import DecisionTreeClassifier
# Load data and train model
scikit_data = load_boston()
self.scikit_data = scikit_data
self.X = scikit_data.data.astype('f').astype('d') ## scikit-learn downcasts data
self.target = 1 * (scikit_data['target'] > scikit_data['target'].mean())
self.feature_names = scikit_data.feature_names
self.output_name = 'target'
def test_simple_binary_classifier(self):
self._train_convert_evaluate_assert()
@pytest.mark.slow
def test_binary_classifier_stress_test(self):
options = dict(
splitter = ['best'],
max_depth = [1, 10, None],
min_samples_split = [2, 10, 0.5],
min_samples_leaf = [1, 5],
min_weight_fraction_leaf = [0.0, 0.5],
max_features = [None, 1, 5],
max_leaf_nodes = [None, 20],
presort = [False, True],
)
# Make a cartesian product of all options
import itertools
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
@unittest.skipIf(not HAS_SKLEARN, 'Missing sklearn. Skipping tests.')
class DecisionTreeMultiClassClassificationBostonHousingScikitNumericTest(
DecisionTreeClassificationBostonHousingScikitNumericTest):
@classmethod
def setUpClass(self):
from sklearn.datasets import load_boston
import numpy as np
# Load data and train model
scikit_data = load_boston()
num_classes = 3
self.X = scikit_data.data.astype('f').astype('d') ## scikit-learn downcasts data
t = scikit_data.target
target = np.digitize(t, np.histogram(t, bins = num_classes - 1)[1]) - 1
# Save the data and the model
self.scikit_data = scikit_data
self.target = target
self.feature_names = scikit_data.feature_names
self.output_name = 'target'
def test_simple_multiclass(self):
self._train_convert_evaluate_assert()
@pytest.mark.slow
def test_multiclass_stress_test(self):
options = dict(
splitter = ['best'],
max_depth = [1, 10, None],
min_samples_split = [2, 10, 0.5],
min_samples_leaf = [1, 5],
min_weight_fraction_leaf = [0.0, 0.5],
max_features = [None, 1, 5],
max_leaf_nodes = [None, 20],
presort = [False, True],
)
# Make a cartesian product of all options
product = itertools.product(*options.values())
args = [dict(zip(options.keys(), p)) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for it, arg in enumerate(args):
self._train_convert_evaluate_assert(**arg)
|
the-stack_0_7326 | from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
jupyterhub-share-link-serverextension does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='jupyterhub-share-link-serverextension',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python package for doing science.",
long_description=readme,
author="JupyterHub Share Link Contributors",
author_email='[email protected]',
url='https://github.com/danielballan/jupyterhub-share-link-serverextension',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'jupyterhub_share_link_serverextension': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
the-stack_0_7328 | """
Modified from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Edits:
ResNet:
- Changed input layer from 3 channel -> 1 channel (depth images)
- Divided inplanes, planes, and width_per_group by 4
BasicBlock:
- Commented out ValueError triggered by base_width != 64
'To make the number of parameters comparable to point-based methods,
we use ResNet18 with one-fourth filters (ResNet18/4) as the backbone.'
"""
from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torchvision.models.resnet import (
Bottleneck,
conv3x3,
conv1x1
)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
"""
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
"""
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_4(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64//4,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64//4
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64//4, layers[0])
self.layer2 = self._make_layer(block, 128//4, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256//4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512//4, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512//4 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def resnet18_4() -> ResNet_4:
"""
ResNet18/4: ResNet18 with 1/4 the filters
Note: contains ~0.83M params which is close to the 0.8M params reported in paper
"""
return ResNet_4(block=BasicBlock, layers=[2, 2, 2, 2])
|
the-stack_0_7329 | from hpc.autoscale.ccbindings.mock import MockClusterBinding
from hpc.autoscale.job.job import Job
from hpc.autoscale.job.schedulernode import SchedulerNode
from hpc.autoscale.node.nodemanager import new_node_manager
def setup_module() -> None:
SchedulerNode.ignore_hostnames = True
def test_placement_group() -> None:
node = SchedulerNode("", {})
node.exists = False
node.placement_group = ""
assert node.placement_group is None
node.placement_group = "a"
assert node.placement_group == "a"
node.placement_group = "0"
assert node.placement_group == "0"
try:
node.placement_group = "."
except Exception:
pass
assert node.placement_group == "0"
node.set_placement_group_escaped(".")
assert node.placement_group == "_"
node.exists = True
try:
node.placement_group = "123"
except Exception:
assert node.placement_group == "_"
def test_custom_node_attrs_and_node_config() -> None:
b = MockClusterBinding()
b.add_nodearray("htc", {}, software_configuration={"myscheduler": {"A": 1}})
b.add_bucket("htc", "Standard_F2", 10, 10)
b.add_node("htc-1", "htc")
node_mgr = new_node_manager({"_mock_bindings": b})
(existing_node,) = node_mgr.get_nodes()
try:
existing_node.node_attribute_overrides["willfail"] = 123
assert False
except TypeError:
pass
result = node_mgr.allocate({"exclusive": True}, node_count=2)
assert result
(node,) = [n for n in result.nodes if not n.exists]
assert node.software_configuration.get("test_thing") is None
node.node_attribute_overrides["Configuration"] = {"test_thing": "is set"}
assert node.software_configuration.get("test_thing") == "is set"
try:
node.software_configuration["willfail"] = 123
assert not node.software_configuration.get("willfail")
except TypeError:
pass
# we won't handle dict merges here.
assert node.software_configuration.get("myscheduler") == {"A": 1}
node.node_attribute_overrides["Configuration"] = {"myscheduler": {"B": 2}}
assert node.software_configuration.get("myscheduler") == {"B": 2}
# if you want to add to the existing software_configuration, use
# the node.software_configuration
node.node_attribute_overrides["Configuration"][
"myscsheduler"
] = node.software_configuration.get("myscheduler", {})
node.node_attribute_overrides["Configuration"]["myscheduler"]["B"] = 2
node.node_attribute_overrides["Configuration"] = {"myscheduler": {"A": 1, "B": 2}}
node.software_configuration["willsucceed"] = 123
node.exists = True
try:
node.software_configuration["willfail"] = 123
assert False
except TypeError:
pass
def test_clone() -> None:
orig = SchedulerNode("lnx0", {"ncpus": 4})
orig.metadata["exists_in_both"] = True
new = orig.clone()
assert new.available["ncpus"] == 4
assert new.resources["ncpus"] == 4
new.available["ncpus"] -= 1
assert new.available["ncpus"] == 3
assert orig.available["ncpus"] == 4
job = Job("1", {"ncpus": 2})
new.decrement(job._constraints, assignment_id=job.name)
assert new.available["ncpus"] == 1
assert orig.available["ncpus"] == 4
assert new.assignments == set(["1"])
assert orig.assignments == set()
orig.metadata["exists_in_orig"] = True
new.metadata["exists_in_new"] = True
assert orig.metadata["exists_in_both"] is True
assert "exists_in_new" not in orig.metadata
assert orig.metadata["exists_in_orig"] is True
assert new.metadata["exists_in_both"] is True
assert new.metadata["exists_in_new"] is True
assert "exists_in_orig" not in new.metadata
|
the-stack_0_7330 | """
Copyright (C) 2021 University of Luxembourg
Developed by Dr. Joshua Heneage Dawes.
Module containing classes for construction of iCFTL specifications.
Specifications are constructed hierarchically, as chains of objects.
The root object is always a Specification instance. This can contain configuration information for the specification.
The first object inside the Specification must be a Forall instance. This indicates universal quantification.
There can arbitrarily many Forall instances nested.
The final instance in the chain must be a Constraint instance. This has recursive structure (based on the grammar of iCFTL).
"""
import logging
logger = logging.getLogger("VyPR")
from VyPR.Specifications.predicates import changes, calls, future
from VyPR.Specifications.constraints import (Constraint,
ConstraintBase,
ConcreteStateExpression,
TransitionExpression,
ConcreteStateVariable,
TransitionVariable,
Conjunction,
Disjunction,
Negation,
TimeBetween,
ValueInConcreteStateEqualsConstant,
ValueInConcreteStateLessThanConstant,
ValueInConcreteStateGreaterThanConstant,
DurationOfTransitionLessThanConstant,
DurationOfTransitionGreaterThanConstant,
ConcreteStateBeforeTransition,
ConcreteStateAfterTransition,
NextTransitionFromConcreteState,
NextConcreteStateFromConcreteState,
NextTransitionFromTransition,
NextConcreteStateFromTransition,
TimeBetweenLessThanConstant)
class Specification():
"""
The top-level class for specifications.
"""
def __init__(self):
logger.info("Instantiating new specification...")
self._quantifier = None
def __repr__(self):
"""
Construct the string representation recursively.
"""
return f"{self._quantifier}"
def get_quantifier(self):
return self._quantifier
def get_variable_to_obj_map(self) -> dict:
"""
Traverse the specification in order to construct a map
from each variable to the type of object it will hold
(either a ConcreteState or a Transition instance).
Note: this function should not try to serialise any objects from the specification
because serialisation of a Constraint instance requires calling of this function,
hence the result would be an infinite loop.
"""
logger.info("Deriving map variable names -> variable object from quantifiers")
# initialise an empty map
variable_to_obj = {}
# set the current object to be the top-level specification
current_obj = self
# iterate through the structure, using the type Constraint as a place to stop
logger.info("Traversing specification structure")
while type(current_obj) is not Constraint:
logger.info(f"current_obj = {type(current_obj)}")
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj._quantifier
elif type(current_obj) is Forall:
# first, add to the map
# we check the type of the predicate so we know what kind of variable to instantiate
if type(current_obj._predicate) is changes:
variable_to_obj[current_obj._variable] = ConcreteStateVariable(current_obj._variable)
elif type(current_obj._predicate) is calls:
variable_to_obj[current_obj._variable] = TransitionVariable(current_obj._variable)
elif type(current_obj._predicate) is future:
if type(current_obj._predicate._predicate) is changes:
variable_to_obj[current_obj._variable] = ConcreteStateVariable(current_obj._variable)
elif type(current_obj._predicate._predicate) is calls:
variable_to_obj[current_obj._variable] = TransitionVariable(current_obj._variable)
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj._quantifier:
current_obj = current_obj._quantifier
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj._constraint
logger.info(f"variable_to_obj = {variable_to_obj}")
return variable_to_obj
def get_variables(self) -> list:
"""
Traverse the specification in order to construct a list of variables.
The order of the list matches the order in which the variables occur in quantifiers.
"""
logger.info("Deriving list of variables from quantifiers")
# initialise an empty list
variables = []
# set the current object to be the top-level specification
current_obj = self
# iterate through the structure, using the type Constraint as a place to stop
logger.info("Traversing specification structure")
while type(current_obj) is not Constraint:
logger.info(f"current_obj = {type(current_obj)}")
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj._quantifier
elif type(current_obj) is Forall:
# first, add to the map
# we check the type of the predicate so we know what kind of variable to instantiate
if type(current_obj._predicate) is changes:
variables.append(current_obj._variable)
elif type(current_obj._predicate) is calls:
variables.append(current_obj._variable)
elif type(current_obj._predicate) is future:
if type(current_obj._predicate._predicate) is changes:
variables.append(current_obj._variable)
elif type(current_obj._predicate._predicate) is calls:
variables.append(current_obj._variable)
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj._quantifier:
current_obj = current_obj._quantifier
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj._constraint
return variables
def get_function_names_used(self):
"""
Traverse the specification and, each time a predicate is encountered, extract the function
name used and add to the list.
"""
# initialise an empty list of function names
all_function_names = []
# initialise stack wth top-level Specification object for traversal
stack = [self]
# process the stack while it is not empty
while len(stack) > 0:
# get the top element from the stack
top = stack.pop()
# based on the type, add child elements to the stack or add a new function name
# to the list
if type(top) in [changes, calls]:
all_function_names.append(top._during_function)
elif type(top) is future:
stack.append(top.get_predicate())
elif type(top) is Specification:
stack.append(top.get_quantifier())
elif type(top) is Forall:
# add the predicate to the stack
stack.append(top.get_predicate())
# also, carry on traversing the specification
if top.get_quantifier():
stack.append(top.get_quantifier())
else:
stack.append(top.get_constraint())
elif type(top) is Constraint:
stack.append(top.instantiate())
elif type(top) is Conjunction:
stack += top.get_conjuncts()
elif type(top) is Disjunction:
stack += top.get_disjuncts()
elif type(top) is Negation:
stack.append(top.get_operand())
elif type(top) in [ValueInConcreteStateEqualsConstant, ValueInConcreteStateLessThanConstant, ValueInConcreteStateGreaterThanConstant]:
stack.append(top.get_value_expression().get_concrete_state_expression())
elif type(top) in [ConcreteStateBeforeTransition, ConcreteStateAfterTransition]:
stack.append(top.get_transition_expression())
elif type(top) in [DurationOfTransitionLessThanConstant, DurationOfTransitionGreaterThanConstant]:
stack.append(top.get_transition_duration_obj().get_transition_expression())
elif type(top) in [NextTransitionFromConcreteState, NextConcreteStateFromConcreteState]:
stack.append(top.get_predicate())
elif type(top) in [NextTransitionFromTransition, NextConcreteStateFromTransition]:
stack.append(top.get_predicate())
elif type(top) is TimeBetweenLessThanConstant:
# traverse both arguments to the timeBetween operator
stack.append(top.get_time_between_expression().get_lhs_expression())
stack.append(top.get_time_between_expression().get_rhs_expression())
all_function_names = list(set(all_function_names))
return all_function_names
def get_constraint(self):
"""
Traverse the specification until a constraint is reached.
"""
# set the current object to be the first quantifier
current_obj = self._quantifier
# iterate through the structure, using the type Constraint as a place to stop
while type(current_obj) is not Constraint:
# traverse depending on the type of the current object
if type(current_obj) is Specification:
current_obj = current_obj.get_quantifier()
elif type(current_obj) is Forall:
# in the case of a quantifier, the two possibilities are
# that the next item to consider is a quantifier or a constraint
if current_obj.get_quantifier():
current_obj = current_obj.get_quantifier()
else:
# if we arrive at a constraint, the loop
# will stop at the next ieration
current_obj = current_obj.get_constraint()
return current_obj
def forall(self, **quantified_variable):
"""
**quantified variable must be a dictionary with only one key - the variable being given.
The value associated with the variable must be a Predicate instance.
"""
# if there is more than 1 variable, raise an exception
if len(quantified_variable.keys()) > 1:
raise Exception("A single variable must be given for each level of universal quantification.")
# check the type of the value
predicate = list(quantified_variable.values())[0]
if type(predicate) not in [changes, calls, future]:
raise Exception(f"Type '{type(predicate).__name__}' not supported.")
# make sure the predicate is complete
variable = list(quantified_variable.keys())[0]
if not predicate._during_function:
raise Exception(f"Predicate used for variable {variable} not complete")
logger.info(f"Adding quantifier with arguments {quantified_variable}")
# store the quantifier
self._quantifier = Forall(self, **quantified_variable)
return self._quantifier
class Forall():
"""
The class for representing universal quantification in specifications.
"""
def __init__(self, specification_obj: Specification, **quantified_variable):
self._specification_obj = specification_obj
# we will use the fact that either a constraint or a quantifier is stored
# to determine what the next thing we will see in the structure of the specification is
self._constraint = None
self._quantifier = None
# Note: .keys() does not give a structure with an ordering,
# so normally converting to a list would be problematic
# but here we know that there must be one element
self._variable = list(quantified_variable.keys())[0]
self._predicate = list(quantified_variable.values())[0]
def __repr__(self):
if self._constraint:
# this is the last quantifier, so the next thing to turn into a string is a constraint
return f"forall {self._variable} in {self._predicate}:\n {self._constraint}"
else:
# this is not the last quantifier - there is another nested inside
return f"forall {self._variable} in {self._predicate}:\n{self._quantifier}"
def get_specification_obj(self):
return self._specification_obj
def get_quantifier(self):
return self._quantifier
def get_constraint(self):
return self._constraint
def get_predicate(self):
return self._predicate
def get_variable(self):
return self._variable
def forall(self, **quantified_variable):
"""
**quantified variable must be a dictionary with only one key - the variable being given.
The value associated with the variable must be a Predicate instance.
"""
# if there is more than 1 variable, raise an exception
if len(quantified_variable.keys()) > 1:
raise Exception("A single variable must be given for each level of universal quantification.")
# check the type of the value - this is not the first quantifier,
# so the type must be future
predicate = list(quantified_variable.values())[0]
if type(predicate) is not future:
raise Exception(f"Type '{type(predicate).__name__}' not supported.")
# make sure the predicate is complete
variable = list(quantified_variable.keys())[0]
if not predicate._predicate._during_function:
raise Exception(f"Predicate used for variable {variable} not complete")
logger.info(f"Initialising new instance of Forall with quantified_variable = {quantified_variable}")
# store the quantifier
self._quantifier = Forall(self._specification_obj, **quantified_variable)
return self._quantifier
def check(self, expression):
"""
Instantiate a top-level Constraint instance with the given constraint lambda.
The lambda will later be called and supplied with the necessary variables during instrumentation and monitoring.
"""
# make sure constraint is a lambda
if type(expression) is not type(lambda:0):
raise Exception("Constraint given must be a lambda expression.")
logger.info("Setting self._constraint to new Constraint instance")
self._constraint = Constraint(self._specification_obj, expression)
return self._specification_obj
"""
Syntax sugar functions.
"""
def all_are_true(*conjuncts):
"""
Encode a conjunction.
"""
return Conjunction(*conjuncts)
def one_is_true(*disjuncts):
"""
Encode a disjunction.
"""
return Disjunction(*disjuncts)
def not_true(operand):
"""
Given an operand, instantiate either a single negation,
or another structure by propagating negation through to atomic constraints.
"""
if type(operand) is Conjunction:
# rewrite negation of conjunction as disjunction of negations
return Disjunction(*map(lambda conjunct : not_true(conjunct), operand.get_conjuncts()))
elif type(operand) is Disjunction:
# rewrite negation of disjunction as conjunction of negations
return Conjunction(*map(lambda disjunct : not_true(disjunct), operand.get_disjuncts()))
elif type(operand) is Negation:
# eliminate double negation
return operand.get_operand()
else:
# assume operand is atomic constraint
return Negation(operand)
def timeBetween(concrete_state_expression_1, concrete_state_expression_2):
return TimeBetween(concrete_state_expression_1, concrete_state_expression_2) |
the-stack_0_7331 | # -*- coding: utf-8 -*-
from .fixtures import fixture_data, Amount, Asset, Price
import unittest
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_init(self):
# self.assertEqual(1, 1)
Price("0.315 USD/GPH")
Price(1.0, "USD/GOLD")
Price(0.315, base="USD", quote="GPH")
Price(0.315, base=Asset("USD"), quote=Asset("GPH"))
Price(
{
"base": {"amount": 1, "asset_id": "1.3.0"},
"quote": {"amount": 10, "asset_id": "1.3.8"},
}
)
Price(
{
"receives": {"amount": 1, "asset_id": "1.3.0"},
"pays": {"amount": 10, "asset_id": "1.3.8"},
},
base_asset=Asset("1.3.0"),
)
Price(quote="10 GOLD", base="1 USD")
Price("10 GOLD", "1 USD")
Price(Amount("10 GOLD"), Amount("1 USD"))
def test_multiplication(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "EUR/USD")
p3 = p1 * p2
p4 = p3.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 50 EUR/GOLD = 0.02 GOLD/EUR
self.assertEqual(float(p4), 0.02)
# Inline multiplication
p5 = p1
p5 *= p2
p4 = p5.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.02 GOLD/EUR
self.assertEqual(float(p4), 0.02)
def test_div(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
p4 = p3.as_base("EUR")
self.assertEqual(p4["base"]["symbol"], "EUR")
self.assertEqual(p4["quote"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 2)
def test_div2(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/GOLD")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
self.assertTrue(isinstance(p3, (float, int)))
self.assertEqual(float(p3), 2.0)
|
the-stack_0_7333 | import torch
import numpy as np
from baseline.utils import lookup_sentence, get_version
from torch.autograd import Variable
import torch.autograd
import torch.nn as nn
import torch.nn.functional
import math
import copy
PYT_MAJOR_VERSION = get_version(torch)
def sequence_mask(lengths):
lens = lengths.cpu()
max_len = torch.max(lens)
# 1 x T
row = torch.arange(0, max_len.item()).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def classify_bt(model, batch_time):
tensor = torch.from_numpy(batch_time) if type(batch_time) == np.ndarray else batch_time
probs = model(torch.autograd.Variable(tensor, requires_grad=False).cuda()).exp().data
probs.div_(torch.sum(probs))
results = []
batchsz = probs.size(0)
for b in range(batchsz):
outcomes = [(model.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]
results.append(outcomes)
return results
def predict_seq_bt(model, x, xch, lengths):
x_t = torch.from_numpy(x) if type(x) == np.ndarray else x
xch_t = torch.from_numpy(xch) if type(xch) == np.ndarray else xch
len_v = torch.from_numpy(lengths) if type(lengths) == np.ndarray else lengths
x_v = torch.autograd.Variable(x_t, requires_grad=False).cuda()
xch_v = torch.autograd.Variable(xch_t, requires_grad=False).cuda()
#len_v = torch.autograd.Variable(len_t, requires_grad=False)
results = model((x_v, xch_v, len_v))
#print(results)
#if type(x) == np.ndarray:
# # results = results.cpu().numpy()
# # Fix this to not be greedy
# results = np.argmax(results, -1)
return results
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss):
super(SequenceCriterion, self).__init__()
self.crit = LossFn(ignore_index=0, size_average=False)
def forward(self, inputs, targets):
# This is BxT, which is what we want!
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return loss
class StackedLSTMCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTMCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs += [h_i]
cs += [c_i]
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRUCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs += [h_i]
hs = torch.stack(hs)
return input, hs
def pytorch_rnn_cell(insz, hsz, rnntype, nlayers, dropout):
if rnntype == 'gru':
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_embedding(x2vec, finetune=True):
dsz = x2vec.dsz
lut = nn.Embedding(x2vec.vsz + 1, dsz, padding_idx=0)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(x2vec.weights),
requires_grad=finetune)
return lut
def pytorch_activation(name="relu"):
if name == "tanh":
return nn.Tanh()
if name == "hardtanh":
return nn.Hardtanh()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
return nn.ReLU()
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(c.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(c.weight)
else:
nn.init.xavier_uniform_(c.weight)
return c
def pytorch_linear(in_sz, out_sz, unif=0, initializer=None):
l = nn.Linear(in_sz, out_sz)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
l.bias.data.zero_()
return l
def pytorch_clone_module(module_, N):
return nn.ModuleList([copy.deepcopy(module_) for _ in range(N)])
def _cat_dir(h):
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], dim=-1)
class BiRNNWrapper(nn.Module):
def __init__(self, rnn, nlayers):
super(BiRNNWrapper, self).__init__()
self.rnn = rnn
self.nlayers = nlayers
def forward(self, seq):
output, hidden = self.rnn(seq)
if isinstance(hidden, tuple):
hidden = tuple(_cat_dir(h) for h in hidden)
else:
hidden = _cat_dir(hidden)
return output, hidden
def pytorch_rnn(insz, hsz, rnntype, nlayers, dropout):
if nlayers == 1:
dropout = 0.0
if rnntype == 'gru':
rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=dropout)
elif rnntype == 'blstm':
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True)
rnn = BiRNNWrapper(rnn, nlayers)
else:
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout)
return rnn
class ParallelConv(nn.Module):
def __init__(self, insz, outsz, filtsz, activation_type, pdrop):
super(ParallelConv, self).__init__()
convs = []
outsz_filts = outsz
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.outsz = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
pad = fsz//2
conv = nn.Sequential(
nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad),
pytorch_activation(activation_type)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
self.conv_drop = nn.Dropout(pdrop)
def forward(self, input_bct):
mots = []
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return self.conv_drop(mots)
class Highway(nn.Module):
def __init__(self,
input_size):
super(Highway, self).__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
def forward(self, input):
proj_result = nn.functional.relu(self.proj(input))
proj_gate = nn.functional.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
class LayerNorm(nn.Module):
"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
This is provided in pytorch's master, and can be replaced in the near future.
For the time, being, this code is adapted from:
http://nlp.seas.harvard.edu/2018/04/03/attention.html
https://github.com/pytorch/pytorch/pull/2019
"""
def __init__(self, num_features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a = nn.Parameter(torch.ones(num_features))
self.b = nn.Parameter(torch.zeros(num_features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = ((x - mean).pow(2).sum(-1, keepdim=True).div(x.size(-1) - 1) + self.eps).sqrt()
d = (std + self.eps) + self.b
return self.a * (x - mean) / d
def pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif=0, batch_first=False, initializer=None):
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith('b') else 1
#print('ndir: %d, rnntype: %s, nlayers: %d, dropout: %.2f, unif: %.2f' % (ndir, rnntype, nlayers, dropout, unif))
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first)#, bias=False)
if unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn, ndir*hsz
def pytorch_prepare_optimizer(model, **kwargs):
mom = kwargs.get('mom', 0.9)
optim = kwargs.get('optim', 'sgd')
eta = kwargs.get('eta', kwargs.get('lr', 0.01))
decay_rate = float(kwargs.get('decay_rate', 0.0))
decay_type = kwargs.get('decay_type', None)
if optim == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=eta)
elif optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=eta)
elif optim == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=eta)
elif optim == 'asgd':
optimizer = torch.optim.ASGD(model.parameters(), lr=eta)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=eta, momentum=mom)
scheduler = None
if decay_rate > 0.0 and decay_type is not None:
if decay_type == 'invtime':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)
return optimizer, scheduler
def append2seq(seq, modules):
for i, module in enumerate(modules):
seq.add_module('%s-%d' % (str(module).replace('.', 'dot'), i), module)
def tensor_max(tensor):
return tensor.max()
def tensor_shape(tensor):
return tensor.size()
def tensor_reverse_2nd(tensor):
idx = torch.LongTensor([i for i in range(tensor.size(1)-1, -1, -1)])
return tensor.index_select(1, idx)
def long_0_tensor_alloc(dims, dtype=None):
lt = long_tensor_alloc(dims)
lt.zero_()
return lt
def long_tensor_alloc(dims, dtype=None):
if type(dims) == int or len(dims) == 1:
return torch.LongTensor(dims)
return torch.LongTensor(*dims)
def prepare_src(model, tokens, mxlen=100):
src_vocab = model.get_src_vocab()
length = min(len(tokens), mxlen)
x = torch.LongTensor(length).zero_()
for j in range(length):
word = tokens[j]
if word not in src_vocab:
if word != '':
print(word)
idx = 0
else:
idx = src_vocab[word]
x[j] = idx
return torch.autograd.Variable(x.view(-1, 1))
#def beam_decode_tokens(model, src_tokens, K, idx2word, mxlen=50):
# src = prepare_src(model, src_tokens, mxlen)
# paths, scores = beam_decode(model, src, K)
# path_str = []
# for j, path in enumerate(paths):
# path_str.append([idx2word[i] for i in path])
# return path_str, scores
#return beam_decode(model, src, K)
def show_examples_pytorch(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):
si = np.random.randint(0, len(es))
batch_dict = es[si]
src_array = batch_dict['src']
tgt_array = batch_dict['dst']
src_len = batch_dict['src_len']
if max_examples > 0:
max_examples = min(max_examples, src_array.size(0))
src_array = src_array[0:max_examples]
tgt_array = tgt_array[0:max_examples]
src_len = src_len[0:max_examples]
# TODO: fix this, check for GPU first
src_array = src_array.cuda()
for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):
print('========================================================================')
src_len_i = torch.ones(1).fill_(src_len_i).type_as(src_len)
sent = lookup_sentence(rlut1, src_i.cpu().numpy(), reverse=reverse)
print('[OP] %s' % sent)
sent = lookup_sentence(rlut2, tgt_i.cpu().numpy())
print('[Actual] %s' % sent)
src_dict = {'src': torch.autograd.Variable(src_i.view(1, -1), requires_grad=False),
'src_len': torch.autograd.Variable(src_len_i, requires_grad=False)}
dst_i = model.run(src_dict)[0][0]
dst_i = [idx.item() for idx in dst_i]
sent = lookup_sentence(rlut2, dst_i)
print('Guess: %s' % sent)
print('------------------------------------------------------------------------')
# Some of this code is borrowed from here:
# https://github.com/rguthrie3/DeepLearningForNLPInPytorch
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.data[0]
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def vec_log_sum_exp(vec, dim):
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
class CRF(nn.Module):
def __init__(self, n_tags, idxs=None):
"""Initialize the object.
:param n_tags: int The number of tags in your output (emission size)
:param idxs: Tuple(int. int) The index of the start and stop symbol
in emissions.
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
"""
super(CRF, self).__init__()
if idxs is None:
self.start_idx = n_tags
self.end_idx = n_tags + 1
self.n_tags = n_tags + 2
self.add_ends = True
else:
self.start_idx, self.end_idx = idxs
self.n_tags = n_tags
self.add_ends = False
self.transitions = nn.Parameter(torch.Tensor(self.n_tags, self.n_tags).zero_())
@staticmethod
def _prep_input(input_):
ends = torch.Tensor(input_.size()[0], 2).fill_(-1000.).to(input_.device)
return torch.cat([input_, ends], dim=1)
def neg_log_loss(self, unary, tags):
if self.add_ends:
unary = CRF._prep_input(unary)
viterbi_score = self.forward(unary)
gold_score = self.score_sentence(unary, tags)
return viterbi_score - gold_score
def score_sentence(self, unary, tags):
# Gives the score of a provided tag sequence
score = torch.autograd.Variable(torch.Tensor([0]).cuda())
tags = torch.cat([torch.LongTensor([self.start_idx]).cuda(), tags])
for i, unary_t in enumerate(unary):
score = score + self.transitions[tags[i + 1], tags[i]] + unary_t[tags[i + 1]]
score = score + self.transitions[self.end_idx, tags[-1]]
return score
def forward(self, unary):
"""Vectorized forward algorithm for CRF layer
:param unary: The observations
:param transitions: The transitions
:param start_idx: The index of the start position
:param end_idx: The index of the end position
:return: Alphas
"""
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.n_tags).fill_(-1000.).to(unary.device)
# START_TAG has all of the score.
init_alphas[0][self.start_idx] = 0.
# Wrap in a variable so that we will get automatic backprop
alphas = torch.autograd.Variable(init_alphas)
# Iterate through the sentence
for t, unary_t in enumerate(unary):
emit_scores_transpose = unary_t.view(-1, 1)
next_tag_var = alphas + emit_scores_transpose + self.transitions
scores = vec_log_sum_exp(next_tag_var, 1).transpose(0, 1)
alphas = scores
terminal_var = alphas + self.transitions[self.end_idx]
alpha = log_sum_exp(terminal_var)
return alpha
def decode(self, unary):
if self.add_ends:
unary = CRF._prep_input(unary)
backpointers = []
inits = torch.Tensor(1, self.n_tags).fill_(-10000.).cuda()
inits[0][self.start_idx] = 0
# alphas at step i holds the viterbi variables for step i-1
alphas = torch.autograd.Variable(inits)
for unary_t in unary:
backpointers_t = [] # holds the backpointers for this step
viterbi_t = [] # holds the viterbi variables for this step
for tag in range(self.n_tags):
next_tag_var = alphas + self.transitions[tag]
best_tag_id = argmax(next_tag_var)
backpointers_t.append(best_tag_id)
viterbi_t.append(next_tag_var[0][best_tag_id])
if PYT_MAJOR_VERSION < 0.4:
alphas = (torch.cat(viterbi_t) + unary_t).view(1, -1)
else:
alphas = (torch.stack(viterbi_t, 0) + unary_t).view(1, -1)
backpointers.append(backpointers_t)
# Transition to STOP_TAG
terminal_var = alphas + self.transitions[self.end_idx]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for backpointers_t in reversed(backpointers):
best_tag_id = backpointers_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.start_idx
best_path.reverse()
return torch.LongTensor(best_path), path_score
|
the-stack_0_7334 |
import sys, os
sys.path.append(os.path.join(os.path.expanduser("~"), "chipfish"))
import app as chipfish
from glbase_wrapper import location, glload, genelist
draw = 'pdf'
c = chipfish.app()
c.startup(os.path.expanduser("../trk_TEs.txt"))
#annot = glload(os.path.expanduser('~/hg38/hg38_ensembl_v95_enst.glb'))
#annot = annot.renameKey('name', 'gene_symbol')
gllocs = glload('../../te_discovery/te_transcripts/transcript_table_merged.mapped.glb')
locs = ['SOX2', 'NANOG', 'SALL4', 'LIN28A', 'LIN28B', 'SALL1', 'POU5F1A',
'DPPA2', 'DPPA3', 'DPPA5A', 'PRDM14', 'JARID2', 'SALL2', 'SALL3', 'TCF3',
'ZFP42', 'C9ORF135', 'ST6GAL1', 'LRP4', 'MSTO1', 'PRODH',# From Pontis et al., 2019 CSC
'ESRRB', 'LIN28A', 'LIN28B', 'PRDM14',
'POU5F1', 'SOX2', 'NANOG', 'NCOR1', 'NCOR2', 'SALL1', 'KLF4', 'SALL1', 'NR5A1', 'NR5A2', 'NR5A3',
'KLF2', 'KLF5', 'LEFTY1', 'LEFTY2', 'FGF4', 'NODAL',
# Naive-specific genes;
'ESRRB', 'TFCP2L1', 'ZFP42', 'MT1H', 'DPPA3', 'DPPA4', 'DPPA5', 'ZNF486', 'CR1L', 'DNMT3L', 'ZNF534',
# Diffenretiation genes;
'GATA2', 'GATA3', 'GATA4', 'SOX17', 'CER1',
# 2C genes
'NR0B1', 'CDX2', 'DUXF3',
# Down in naive:
'SFRP1', 'ZIC2', 'KDR', 'OTX2', 'DUSP6', 'SPRY4', 'THY1', 'ID3', 'ZIC5',
# MA Gang's possibles:
'HNRNPK', 'DDX1', 'DDX50', 'BRCA2', 'BRCA1', 'TOP1', 'RAP3', 'TRIM25', 'HNRNPU',
# Headline genes from Ihry et al., Cell Rep.
# Significantly down-regualte POU5F1 P<0.05
'TGFBR2',
'GGCT',
'MAML2',
'POU5F1',
'TGFBR1',
'TMEM107',
'ZNF469',
'SMARCA4',
'PROK2',
'PAQR7',
'MINDY4',
# Odd stuff:
'LIN28B-AS1',
# Wang Jichang paper, Fig 3a. These ones have HERV spliced into their message
'SCGB3A2', 'NCR1', 'KLKB1', 'IL34', 'PLP1', 'ESRG', 'RPL39L',
]
locs = genelist(loadable_list=[{'gene_symbol': k} for k in locs])
ll = locs.map(genelist=gllocs, key='gene_symbol')
print(ll)
for gene in ll:
print(gene['name'])
c.draw.setLocation(loc=gene['loc'].expand(len(gene['loc']) / 10))
scale = 1.0
if draw == 'svg':
scale = 0.3
c.draw.exportImage("%s/%s_%s.%s" % (draw, gene['name'], gene['transcript_id'], draw), scale=scale, type=draw) # Cannot draw png and svg interleaved for some reason.
|
the-stack_0_7336 | # -*- coding: utf-8 -*-
"""
Capacity scaling minimum cost flow algorithm.
"""
__author__ = """ysitu <[email protected]>"""
# Copyright (C) 2014 ysitu <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['capacity_scaling']
from itertools import chain
from math import log
import networkx as nx
from ...utils import BinaryHeap
from ...utils import generate_unique_node
from ...utils import not_implemented_for
from ...utils import arbitrary_element
def _detect_unboundedness(R):
"""Detect infinite-capacity negative cycles.
"""
s = generate_unique_node()
G = nx.DiGraph()
G.add_nodes_from(R)
# Value simulating infinity.
inf = R.graph['inf']
# True infinity.
f_inf = float('inf')
for u in R:
for v, e in R[u].items():
# Compute the minimum weight of infinite-capacity (u, v) edges.
w = f_inf
for k, e in e.items():
if e['capacity'] == inf:
w = min(w, e['weight'])
if w != f_inf:
G.add_edge(u, v, weight=w)
if nx.negative_edge_cycle(G):
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
@not_implemented_for('undirected')
def _build_residual_network(G, demand, capacity, weight):
"""Build a residual network and initialize a zero flow.
"""
if sum(G.node[u].get(demand, 0) for u in G) != 0:
raise nx.NetworkXUnfeasible("Sum of the demands should be 0.")
R = nx.MultiDiGraph()
R.add_nodes_from((u, {'excess': -G.node[u].get(demand, 0),
'potential': 0}) for u in G)
inf = float('inf')
# Detect selfloops with infinite capacities and negative weights.
for u, v, e in G.selfloop_edges(data=True):
if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf:
raise nx.NetworkXUnbounded(
'Negative cost cycle of infinite capacity found. '
'Min cost flow may be unbounded below.')
# Extract edges with positive capacities. Self loops excluded.
if G.is_multigraph():
edge_list = [(u, v, k, e)
for u, v, k, e in G.edges(data=True, keys=True)
if u != v and e.get(capacity, inf) > 0]
else:
edge_list = [(u, v, 0, e) for u, v, e in G.edges(data=True)
if u != v and e.get(capacity, inf) > 0]
# Simulate infinity with the larger of the sum of absolute node imbalances
# the sum of finite edge capacities or any positive value if both sums are
# zero. This allows the infinite-capacity edges to be distinguished for
# unboundedness detection and directly participate in residual capacity
# calculation.
inf = max(sum(abs(R.node[u]['excess']) for u in R),
2 * sum(e[capacity] for u, v, k, e in edge_list
if capacity in e and e[capacity] != inf)) or 1
for u, v, k, e in edge_list:
r = min(e.get(capacity, inf), inf)
w = e.get(weight, 0)
# Add both (u, v) and (v, u) into the residual network marked with the
# original key. (key[1] == True) indicates the (u, v) is in the
# original network.
R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0)
R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0)
# Record the value simulating infinity.
R.graph['inf'] = inf
_detect_unboundedness(R)
return R
def _build_flow_dict(G, R, capacity, weight):
"""Build a flow dictionary from a residual network.
"""
inf = float('inf')
flow_dict = {}
if G.is_multigraph():
for u in G:
flow_dict[u] = {}
for v, es in G[u].items():
flow_dict[u][v] = dict(
# Always saturate negative selfloops.
(k, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for k, e in es.items())
for v, es in R[u].items():
if v in flow_dict[u]:
flow_dict[u][v].update((k[0], e['flow'])
for k, e in es.items()
if e['flow'] > 0)
else:
for u in G:
flow_dict[u] = dict(
# Always saturate negative selfloops.
(v, (0 if (u != v or e.get(capacity, inf) <= 0 or
e.get(weight, 0) >= 0) else e[capacity]))
for v, e in G[u].items())
flow_dict[u].update((v, e['flow']) for v, es in R[u].items()
for e in es.values() if e['flow'] > 0)
return flow_dict
def capacity_scaling(G, demand='demand', capacity='capacity', weight='weight',
heap=BinaryHeap):
r"""Find a minimum cost flow satisfying all demands in digraph G.
This is a capacity scaling successive shortest augmenting path algorithm.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph or MultiDiGraph on which a minimum cost flow satisfying all
demands is to be found.
demand : string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight : string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
heap : class
Type of heap to be used in the algorithm. It should be a subclass of
:class:`MinHeap` or implement a compatible interface.
If a stock heap implementation is to be used, :class:`BinaryHeap` is
recommeded over :class:`PairingHeap` for Python implementations without
optimized attribute accesses (e.g., CPython) despite a slower
asymptotic running time. For Python implementations with optimized
attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better
performance. Default value: :class:`BinaryHeap`.
Returns
-------
flowCost : integer
Cost of a minimum cost flow satisfying all demands.
flowDict : dictionary
If G is a digraph, a dict-of-dicts keyed by nodes such that
flowDict[u][v] is the flow on edge (u, v).
If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes
so that flowDict[u][v][key] is the flow on edge (u, v, key).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed,
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
Notes
-----
This algorithm does not work if edge weights are floating-point numbers.
See also
--------
:meth:`network_simplex`
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost, flowDict = nx.capacity_scaling(G)
>>> flowCost
24
>>> flowDict # doctest: +SKIP
{'a': {'c': 1, 'b': 4}, 'c': {'d': 1}, 'b': {'d': 4}, 'd': {}}
It is possible to change the name of the attributes used for the
algorithm.
>>> G = nx.DiGraph()
>>> G.add_node('p', spam = -4)
>>> G.add_node('q', spam = 2)
>>> G.add_node('a', spam = -2)
>>> G.add_node('d', spam = -1)
>>> G.add_node('t', spam = 2)
>>> G.add_node('w', spam = 3)
>>> G.add_edge('p', 'q', cost = 7, vacancies = 5)
>>> G.add_edge('p', 'a', cost = 1, vacancies = 4)
>>> G.add_edge('q', 'd', cost = 2, vacancies = 3)
>>> G.add_edge('t', 'q', cost = 1, vacancies = 2)
>>> G.add_edge('a', 't', cost = 2, vacancies = 4)
>>> G.add_edge('d', 'w', cost = 3, vacancies = 4)
>>> G.add_edge('t', 'w', cost = 4, vacancies = 1)
>>> flowCost, flowDict = nx.capacity_scaling(G, demand = 'spam',
... capacity = 'vacancies',
... weight = 'cost')
>>> flowCost
37
>>> flowDict # doctest: +SKIP
{'a': {'t': 4}, 'd': {'w': 2}, 'q': {'d': 1}, 'p': {'q': 2, 'a': 2}, 't': {'q': 1, 'w': 1}, 'w': {}}
"""
R = _build_residual_network(G, demand, capacity, weight)
inf = float('inf')
# Account cost of negative selfloops.
flow_cost = sum(
0 if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0
else e[capacity] * e[weight]
for u, v, e in G.selfloop_edges(data=True))
# Determine the maxmimum edge capacity.
wmax = max(chain([-inf],
(e['capacity'] for u, v, e in R.edges(data=True))))
if wmax == -inf:
# Residual network has no edges.
return flow_cost, _build_flow_dict(G, R, capacity, weight)
R_node = R.node
R_succ = R.succ
delta = 2 ** int(log(wmax, 2))
while delta >= 1:
# Saturate Δ-residual edges with negative reduced costs to achieve
# Δ-optimality.
for u in R:
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
for k, e in es.items():
flow = e['capacity'] - e['flow']
if e['weight'] - p_u + R_node[v]['potential'] < 0:
flow = e['capacity'] - e['flow']
if flow >= delta:
e['flow'] += flow
R_succ[v][u][(k[0], not k[1])]['flow'] -= flow
R_node[u]['excess'] -= flow
R_node[v]['excess'] += flow
# Determine the Δ-active nodes.
S = set()
T = set()
S_add = S.add
S_remove = S.remove
T_add = T.add
T_remove = T.remove
for u in R:
excess = R_node[u]['excess']
if excess >= delta:
S_add(u)
elif excess <= -delta:
T_add(u)
# Repeatedly augment flow from S to T along shortest paths until
# Δ-feasibility is achieved.
while S and T:
s = arbitrary_element(S)
t = None
# Search for a shortest path in terms of reduce costs from s to
# any t in T in the Δ-residual network.
d = {}
pred = {s: None}
h = heap()
h_insert = h.insert
h_get = h.get
h_insert(s, 0)
while h:
u, d_u = h.pop()
d[u] = d_u
if u in T:
# Path found.
t = u
break
p_u = R_node[u]['potential']
for v, es in R_succ[u].items():
if v in d:
continue
wmin = inf
# Find the minimum-weighted (u, v) Δ-residual edge.
for k, e in es.items():
if e['capacity'] - e['flow'] >= delta:
w = e['weight']
if w < wmin:
wmin = w
kmin = k
emin = e
if wmin == inf:
continue
# Update the distance label of v.
d_v = d_u + wmin - p_u + R_node[v]['potential']
if h_insert(v, d_v):
pred[v] = (u, kmin, emin)
if t is not None:
# Augment Δ units of flow from s to t.
while u != s:
v = u
u, k, e = pred[v]
e['flow'] += delta
R_succ[v][u][(k[0], not k[1])]['flow'] -= delta
# Account node excess and deficit.
R_node[s]['excess'] -= delta
R_node[t]['excess'] += delta
if R_node[s]['excess'] < delta:
S_remove(s)
if R_node[t]['excess'] > -delta:
T_remove(t)
# Update node potentials.
d_t = d[t]
for u, d_u in d.items():
R_node[u]['potential'] -= d_u - d_t
else:
# Path not found.
S_remove(s)
delta //= 2
if any(R.node[u]['excess'] != 0 for u in R):
raise nx.NetworkXUnfeasible('No flow satisfying all demands.')
# Calculate the flow cost.
for u in R:
for v, es in R_succ[u].items():
for e in es.values():
flow = e['flow']
if flow > 0:
flow_cost += flow * e['weight']
return flow_cost, _build_flow_dict(G, R, capacity, weight)
|
the-stack_0_7337 | # -*- coding: utf-8 -*-
from nose.plugins.attrib import attr
from unittest import TestCase
import os
class BookTestCase(TestCase):
@attr("skip")
def test_scaffold(self):
assert False
# create temp directory
directory = "../var/tests/book"
if not os.path.exists(directory):
os.makedirs(directory)
# unpack the pattern with some settings
os.system("cd ../var/tests/book && ../../../bin/diamond scaffold --no-interactive analysis")
# assert
assert os.stat("../var/tests/book/Makefile")
# run the makefile
os.system("cd ../var/tests/book && make")
# test for certain files to be built
assert os.stat("../var/tests/book/.build/mybook.pdf")
|
the-stack_0_7338 | import numpy as np
def levenshtein_distance(string1, string2):
m, n = len(string1), len(string2)
matrix = np.zeros((m + 1, n + 1), dtype=np.int32)
# source prefixes can be transformed into empty string by
# dropping all characters
for i in range(m + 1):
matrix[i, 0] = i
# target prefixes can be reached from empty source prefix
# by inserting every character
for j in range(n + 1):
matrix[0, j] = j
for j in range(n + 1):
for i in range(m + 1):
if string1[i - 1] == string2[j - 1]:
substitution_cost = 0
else:
substitution_cost = 1
matrix[i, j] = min(matrix[i - 1, j] + 1, # deletion
matrix[i, j - 1] + 1, # insertion
matrix[i - 1, j - 1] + substitution_cost) # substitution
return matrix[m, n]
|
the-stack_0_7340 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import torch
import random
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
self.segment_length = args.segment_length
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
|
the-stack_0_7341 | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from kornia.utils.helpers import _torch_svd_cast
__all__ = ["zca_mean", "zca_whiten", "linear_transform", "ZCAWhitening"]
class ZCAWhitening(nn.Module):
r"""Compute the ZCA whitening matrix transform and the mean vector and applies the transform to the data.
The data tensor is flattened, and the mean :math:`\mathbf{\mu}`
and covariance matrix :math:`\mathbf{\Sigma}` are computed from
the flattened data :math:`\mathbf{X} \in \mathbb{R}^{N \times D}`, where
:math:`N` is the sample size and :math:`D` is flattened dimensionality
(e.g. for a tensor with size 5x3x2x2 :math:`N = 5` and :math:`D = 12`). The ZCA whitening
transform is given by:
.. math::
\mathbf{X}_{\text{zca}} = (\mathbf{X - \mu})(US^{-\frac{1}{2}}U^T)^T
where :math:`U` are the eigenvectors of :math:`\Sigma` and :math:`S` contain the corresponding
eigenvalues of :math:`\Sigma`. After the transform is applied, the output is reshaped to same shape.
args:
dim: Determines the dimension that represents the samples axis.
eps: a small number used for numerical stability.
unbiased: Whether to use the biased estimate of the covariance matrix.
compute_inv: Compute the inverse transform matrix.
detach_transforms: Detaches gradient from the ZCA fitting.
shape:
- x: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- x_whiten: :math:`(D_0,...,D_{\text{dim}},...,D_N)` same shape as input.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0],[0,-1]], dtype = torch.float32)
>>> zca = ZCAWhitening().fit(x)
>>> x_whiten = zca(x)
>>> zca = ZCAWhitening()
>>> x_whiten = zca(x, include_fit = True) # Includes the fitting step
>>> x_whiten = zca(x) # Can run now without the fitting set
>>> # Enable backprop through ZCA fitting process
>>> zca = ZCAWhitening(detach_transforms = False)
>>> x_whiten = zca(x, include_fit = True) # Includes the fitting step
Note:
This implementation uses :py:meth:`~torch.svd` which yields NaNs in the backwards step
if the singular values are not unique. See `here <https://pytorch.org/docs/stable/torch.html#torch.svd>`_ for
more information.
References:
[1] `Stanford PCA & ZCA whitening tutorial <http://ufldl.stanford.edu/tutorial/unsupervised/PCAWhitening/>`_
"""
def __init__(
self,
dim: int = 0,
eps: float = 1e-6,
unbiased: bool = True,
detach_transforms: bool = True,
compute_inv: bool = False,
) -> None:
super().__init__()
self.dim = dim
self.eps = eps
self.unbiased = unbiased
self.detach_transforms = detach_transforms
self.compute_inv = compute_inv
self.fitted = False
def fit(self, x: torch.Tensor):
r"""Fit ZCA whitening matrices to the data.
Args:
x: Input data.
returns:
Returns a fitted ZCAWhiten object instance.
"""
T, mean, T_inv = zca_mean(x, self.dim, self.unbiased, self.eps, self.compute_inv)
self.mean_vector: torch.Tensor = mean
self.transform_matrix: torch.Tensor = T
if T_inv is None:
self.transform_inv: Optional[torch.Tensor] = torch.empty([0])
else:
self.transform_inv = T_inv
if self.detach_transforms:
self.mean_vector = self.mean_vector.detach()
self.transform_matrix = self.transform_matrix.detach()
self.transform_inv = self.transform_inv.detach()
self.fitted = True
return self
def forward(self, x: torch.Tensor, include_fit: bool = False) -> torch.Tensor:
r"""Apply the whitening transform to the data.
Args:
x: Input data.
include_fit: Indicates whether to fit the data as part of the forward pass.
Returns:
The transformed data.
"""
if include_fit:
self.fit(x)
if not self.fitted:
raise RuntimeError("Needs to be fitted first before running. Please call fit or set include_fit to True.")
x_whiten = linear_transform(x, self.transform_matrix, self.mean_vector, self.dim)
return x_whiten
def inverse_transform(self, x: torch.Tensor) -> torch.Tensor:
r"""Apply the inverse transform to the whitened data.
Args:
x: Whitened data.
Returns:
Original data.
"""
if not self.fitted:
raise RuntimeError("Needs to be fitted first before running. Please call fit or set include_fit to True.")
if not self.compute_inv:
raise RuntimeError("Did not compute inverse ZCA. Please set compute_inv to True")
mean_inv: torch.Tensor = -self.mean_vector.mm(self.transform_matrix) # type: ignore
y = linear_transform(x, self.transform_inv, mean_inv) # type: ignore
return y
def zca_mean(
inp: torch.Tensor, dim: int = 0, unbiased: bool = True, eps: float = 1e-6, return_inverse: bool = False
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
r"""Compute the ZCA whitening matrix and mean vector.
The output can be used with :py:meth:`~kornia.color.linear_transform`.
See :class:`~kornia.color.ZCAWhitening` for details.
Args:
inp: input data tensor.
dim: Specifies the dimension that serves as the samples dimension.
unbiased: Whether to use the unbiased estimate of the covariance matrix.
eps: a small number used for numerical stability.
return_inverse: Whether to return the inverse ZCA transform.
Shapes:
- inp: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- transform_matrix: :math:`(\Pi_{d=0,d\neq \text{dim}}^N D_d, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- mean_vector: :math:`(1, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- inv_transform: same shape as the transform matrix
Returns:
A tuple containing the ZCA matrix and the mean vector. If return_inverse is set to True,
then it returns the inverse ZCA matrix, otherwise it returns None.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0],[0,-1]], dtype = torch.float32)
>>> transform_matrix, mean_vector,_ = zca_mean(x) # Returns transformation matrix and data mean
>>> x = torch.rand(3,20,2,2)
>>> transform_matrix, mean_vector, inv_transform = zca_mean(x, dim = 1, return_inverse = True)
>>> # transform_matrix.size() equals (12,12) and the mean vector.size equal (1,12)
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(eps, float):
raise TypeError(f"eps type is not a float. Got{type(eps)}")
if not isinstance(unbiased, bool):
raise TypeError(f"unbiased type is not bool. Got{type(unbiased)}")
if not isinstance(dim, int):
raise TypeError(f"Argument 'dim' must be of type int. Got {type(dim)}")
if not isinstance(return_inverse, bool):
raise TypeError(f"Argument return_inverse must be of type bool {type(return_inverse)}")
inp_size = inp.size()
if dim >= len(inp_size) or dim < -len(inp_size):
raise IndexError(
"Dimension out of range (expected to be in range of [{},{}], but got {}".format(
-len(inp_size), len(inp_size) - 1, dim
)
)
if dim < 0:
dim = len(inp_size) + dim
feat_dims = torch.cat([torch.arange(0, dim), torch.arange(dim + 1, len(inp_size))])
new_order: List[int] = torch.cat([torch.tensor([dim]), feat_dims]).tolist()
inp_permute = inp.permute(new_order)
N = inp_size[dim]
feature_sizes = torch.tensor(inp_size[0:dim] + inp_size[dim + 1::])
num_features: int = int(torch.prod(feature_sizes).item())
mean: torch.Tensor = torch.mean(inp_permute, dim=0, keepdim=True)
mean = mean.reshape((1, num_features))
inp_center_flat: torch.Tensor = inp_permute.reshape((N, num_features)) - mean
cov = inp_center_flat.t().mm(inp_center_flat)
if unbiased:
cov = cov / float(N - 1)
else:
cov = cov / float(N)
U, S, _ = _torch_svd_cast(cov)
S = S.reshape(-1, 1)
S_inv_root: torch.Tensor = torch.rsqrt(S + eps)
T: torch.Tensor = (U).mm(S_inv_root * U.t())
T_inv: Optional[torch.Tensor] = None
if return_inverse:
T_inv = (U).mm(torch.sqrt(S + eps) * U.t())
return T, mean, T_inv
def zca_whiten(inp: torch.Tensor, dim: int = 0, unbiased: bool = True, eps: float = 1e-6) -> torch.Tensor:
r"""Apply ZCA whitening transform.
See :class:`~kornia.color.ZCAWhitening` for details.
Args:
inp: input data tensor.
dim: Specifies the dimension that serves as the samples dimension.
unbiased: Whether to use the unbiased estimate of the covariance matrix.
eps: a small number used for numerical stability.
Returns:
Whiten Input data.
.. note::
See a working example `here <https://colab.sandbox.google.com/github/kornia/tutorials/
blob/master/source/zca_whitening.ipynb>`__.
Examples:
>>> x = torch.tensor([[0,1],[1,0],[-1,0]], dtype = torch.float32)
>>> zca_whiten(x)
tensor([[ 0.0000, 1.1547],
[ 1.0000, -0.5773],
[-1.0000, -0.5773]])
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(eps, float):
raise TypeError(f"eps type is not a float. Got{type(eps)}")
if not isinstance(unbiased, bool):
raise TypeError(f"unbiased type is not bool. Got{type(unbiased)}")
if not isinstance(dim, int):
raise TypeError(f"Argument 'dim' must be of type int. Got {type(dim)}")
transform, mean, _ = zca_mean(inp, dim, unbiased, eps, False)
inp_whiten = linear_transform(inp, transform, mean, dim)
return inp_whiten
def linear_transform(
inp: torch.Tensor, transform_matrix: torch.Tensor, mean_vector: torch.Tensor, dim: int = 0
) -> torch.Tensor:
r"""
Given a transformation matrix and a mean vector, this function will flatten
the input tensor along the given dimension and subtract the mean vector
from it. Then the dot product with the transformation matrix will be computed
and then the resulting tensor is reshaped to the original input shape.
.. math::
\mathbf{X}_{T} = (\mathbf{X - \mu})(T)
Args:
inp: Input data :math:`X`.
transform_matrix: Transform matrix :math:`T`.
mean_vector: mean vector :math:`\mu`.
dim: Batch dimension.
Shapes:
- inp: :math:`(D_0,...,D_{\text{dim}},...,D_N)` is a batch of N-D tensors.
- transform_matrix: :math:`(\Pi_{d=0,d\neq \text{dim}}^N D_d, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
- mean_vector: :math:`(1, \Pi_{d=0,d\neq \text{dim}}^N D_d)`
Returns:
Transformed data.
Example:
>>> # Example where dim = 3
>>> inp = torch.ones((10,3,4,5))
>>> transform_mat = torch.ones((10*3*4,10*3*4))
>>> mean = 2*torch.ones((1,10*3*4))
>>> out = linear_transform(inp, transform_mat, mean, 3)
>>> print(out.shape, out.unique()) # Should a be (10,3,4,5) tensor of -120s
torch.Size([10, 3, 4, 5]) tensor([-120.])
>>> # Example where dim = 0
>>> inp = torch.ones((10,2))
>>> transform_mat = torch.ones((2,2))
>>> mean = torch.zeros((1,2))
>>> out = linear_transform(inp, transform_mat, mean)
>>> print(out.shape, out.unique()) # Should a be (10,2) tensor of 2s
torch.Size([10, 2]) tensor([2.])
"""
inp_size = inp.size()
if dim >= len(inp_size) or dim < -len(inp_size):
raise IndexError(
"Dimension out of range (expected to be in range of [{},{}], but got {}".format(
-len(inp_size), len(inp_size) - 1, dim
)
)
if dim < 0:
dim = len(inp_size) + dim
feat_dims = torch.cat([torch.arange(0, dim), torch.arange(dim + 1, len(inp_size))])
perm = torch.cat([torch.tensor([dim]), feat_dims])
perm_inv = torch.argsort(perm)
new_order: List[int] = perm.tolist()
inv_order: List[int] = perm_inv.tolist()
feature_sizes = torch.tensor(inp_size[0:dim] + inp_size[dim + 1::])
num_features: int = int(torch.prod(feature_sizes).item())
inp_permute = inp.permute(new_order)
inp_flat = inp_permute.reshape((-1, num_features))
inp_center = inp_flat - mean_vector
inp_transformed = inp_center.mm(transform_matrix)
inp_transformed = inp_transformed.reshape(inp_permute.size())
inp_transformed = inp_transformed.permute(inv_order)
return inp_transformed
|
the-stack_0_7342 | import collections
import claripy
class SimVariable(object):
__slots__ = ['ident', 'name', 'region', 'category']
def __init__(self, ident=None, name=None, region=None, category=None):
"""
:param ident: A unique identifier provided by user or the program. Usually a string.
:param str name: Name of this variable.
"""
self.ident = ident
self.name = name
self.region = region if region is not None else ""
self.category = category
@property
def phi(self):
return False
class SimConstantVariable(SimVariable):
__slots__ = ['value', '_hash']
def __init__(self, ident=None, value=None, region=None):
super(SimConstantVariable, self).__init__(ident=ident, region=region)
self.value = value
self._hash = None
def __repr__(self):
s = "<%s|const %s>" % (self.region, self.value)
return s
def __eq__(self, other):
if not isinstance(other, SimConstantVariable):
return False
if self.value is None or other.value is None:
# they may or may not represent the same constant. return not equal to be safe
return False
return self.ident == other.ident and self.value == other.value and self.region == other.region
def __hash__(self):
if self._hash is None:
self._hash = hash(('const', self.value, self.ident, self.region, self.ident))
return self._hash
class SimTemporaryVariable(SimVariable):
__slots__ = ['tmp_id', '_hash']
def __init__(self, tmp_id):
SimVariable.__init__(self)
self.tmp_id = tmp_id
self._hash = None
def __repr__(self):
s = "<tmp %d>" % (self.tmp_id)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash('tmp_%d' % (self.tmp_id))
return self._hash
def __eq__(self, other):
if isinstance(other, SimTemporaryVariable):
return hash(self) == hash(other)
return False
class SimRegisterVariable(SimVariable):
__slots__ = ['reg', 'size', '_hash']
def __init__(self, reg_offset, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.reg = reg_offset
self.size = size
self._hash = None
def __repr__(self):
ident_str = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
phi_str = ("phi(%s)|" % (",".join(v.ident for v in self.variables))) if self.phi else "" #pylint:disable=no-member
s = "<%s%s%s|Reg %s, %sB>" % (phi_str, region_str, ident_str, self.reg, self.size)
return s
def __hash__(self):
if self._hash is None:
self._hash = hash(('reg', self.region, self.reg, self.size, self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimRegisterVariable):
return self.ident == other.ident and \
self.name == other.name and \
self.reg == other.reg and \
self.size == other.size and \
self.region == other.region and \
self.phi == other.phi
return False
class SimRegisterVariablePhi(SimRegisterVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
reg_offset = var.reg
size = var.size
super(SimRegisterVariablePhi, self).__init__(reg_offset, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimRegisterVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimMemoryVariable(SimVariable):
__slots__ = ['addr', 'size', '_hash']
def __init__(self, addr, size, ident=None, name=None, region=None, category=None):
SimVariable.__init__(self, ident=ident, name=name, region=region, category=category)
self.addr = addr
if isinstance(size, claripy.ast.BV) and not size.symbolic:
# Convert it to a concrete number
size = size._model_concrete.value
self.size = size
self._hash = None
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
if type(self.addr) is int:
s = "<%s|Mem %#x %s>" % (self.region, self.addr, size)
else:
s = "<%s|Mem %s %s>" % (self.region, self.addr, size)
return s
def __hash__(self):
if self._hash is not None:
return self._hash
if isinstance(self.addr, AddressWrapper):
addr_hash = hash(self.addr)
elif type(self.addr) is int:
addr_hash = self.addr
elif self.addr._model_concrete is not self.addr:
addr_hash = hash(self.addr._model_concrete)
elif self.addr._model_vsa is not self.addr:
addr_hash = hash(self.addr._model_vsa)
elif self.addr._model_z3 is not self.addr:
addr_hash = hash(self.addr._model_z3)
else:
addr_hash = hash(self.addr)
self._hash = hash((addr_hash, hash(self.size), self.ident))
return self._hash
def __eq__(self, other):
if isinstance(other, SimMemoryVariable):
return self.ident == other.ident and \
self.addr == other.addr and \
self.name == other.name and \
self.size == other.size and \
self.phi == other.phi
return False
class SimMemoryVariablePhi(SimMemoryVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
addr = var.addr
size = var.size
super(SimMemoryVariablePhi, self).__init__(addr, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimMemoryVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.addr == other.addr and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimStackVariable(SimMemoryVariable):
__slots__ = ['base', 'offset']
def __init__(self, offset, size, base='sp', base_addr=None, ident=None, name=None, region=None, category=None):
if offset > 0x1000000 and isinstance(offset, int):
# I don't think any positive stack offset will be greater than that...
# convert it to a negative number
mask = (1 << offset.bit_length()) - 1
offset = - ((0 - offset) & mask)
if base_addr is not None:
addr = offset + base_addr
else:
# TODO: this is not optimal
addr = offset
super(SimStackVariable, self).__init__(addr, size, ident=ident, name=name, region=region, category=category)
self.base = base
self.offset = offset
def __repr__(self):
if type(self.size) is int:
size = '%d' % self.size
else:
size = '%s' % self.size
prefix = "%s(stack)" % self.name if self.name is not None else "Stack"
ident = "[%s]" % self.ident if self.ident else ""
region_str = hex(self.region) if isinstance(self.region, int) else self.region
phi_str = "phi|" if self.phi else ""
if type(self.offset) is int:
if self.offset < 0:
offset = "%#x" % self.offset
elif self.offset > 0:
offset = "+%#x" % self.offset
else:
offset = ""
s = "<%s%s%s|%s %s%s, %s B>" % (phi_str, region_str, ident, prefix, self.base, offset, size)
else:
s = "<%s%s%s|%s %s%s, %s B>" % (phi_str, region_str, ident, prefix, self.base, self.addr, size)
return s
def __eq__(self, other):
if type(other) is not SimStackVariable:
return False
return self.ident == other.ident and \
self.name == other.name and \
self.base == other.base and \
self.offset == other.offset and \
self.size == other.size and \
self.phi == other.phi
def __hash__(self):
return hash((self.ident, self.name, self.base, self.offset, self.size, self.phi))
class SimStackVariablePhi(SimStackVariable):
__slots__ = ['variables', '_hash']
def __init__(self, ident=None, name=None, region=None, variables=None):
var = next(iter(variables))
offset = var.addr
size = var.size
super(SimStackVariablePhi, self).__init__(offset, size, ident=ident, name=name, region=region)
self.variables = set(variables)
self._hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self.region, self.size, self.ident, tuple(self.variables)))
return self._hash
def __eq__(self, other):
if type(other) is not SimStackVariablePhi:
return False
return self.ident == other.ident and \
self.variables == other.variables and \
self.addr == other.addr and \
self.name == other.name and \
self.region == other.region and \
self.size == other.size
@property
def phi(self):
return True
class SimVariableSet(collections.MutableSet):
"""
A collection of SimVariables.
"""
def __init__(self):
self.register_variables = set()
# For the sake of performance optimization, all elements in register_variables must be concrete integers which
# representing register offsets..
# There shouldn't be any problem apart from GetI/PutI instructions. We simply ignore them for now.
# TODO: Take care of register offsets that are not aligned to (arch.bytes)
# TODO: arch.bits/what? That number has no power here anymore.
self.register_variable_offsets = set()
# memory_variables holds SimMemoryVariable objects
self.memory_variables = set()
# For the sake of performance, we have another set that stores memory addresses of memory_variables
self.memory_variable_addresses = set()
def add(self, item):
if type(item) is SimRegisterVariable:
if not self.contains_register_variable(item):
self.add_register_variable(item)
elif type(item) is SimMemoryVariable:
if not self.contains_memory_variable(item):
self.add_memory_variable(item)
else:
# TODO:
raise Exception('WTF')
def add_register_variable(self, reg_var):
self.register_variables.add(reg_var)
self.register_variable_offsets.add(reg_var.reg)
def add_memory_variable(self, mem_var):
self.memory_variables.add(mem_var)
base_address = mem_var.addr.address # Dealing with AddressWrapper
for i in range(mem_var.size):
self.memory_variable_addresses.add(base_address + i)
def discard(self, item):
if type(item) is SimRegisterVariable:
if self.contains_register_variable(item):
self.discard_register_variable(item)
elif isinstance(item, SimMemoryVariable):
if self.contains_memory_variable(item):
self.discard_memory_variable(item)
else:
# TODO:
raise Exception('')
def discard_register_variable(self, reg_var):
self.register_variables.remove(reg_var)
self.register_variable_offsets.remove(reg_var.reg)
def discard_memory_variable(self, mem_var):
self.memory_variables.remove(mem_var)
for i in range(mem_var.size):
self.memory_variable_addresses.remove(mem_var.addr.address + i)
def __len__(self):
return len(self.register_variables) + len(self.memory_variables)
def __iter__(self):
for i in self.register_variables: yield i
for i in self.memory_variables: yield i
def add_memory_variables(self, addrs, size):
for a in addrs:
var = SimMemoryVariable(a, size)
self.add_memory_variable(var)
def copy(self):
s = SimVariableSet()
s.register_variables |= self.register_variables
s.register_variable_offsets |= self.register_variable_offsets
s.memory_variables |= self.memory_variables
s.memory_variable_addresses |= self.memory_variable_addresses
return s
def complement(self, other):
"""
Calculate the complement of `self` and `other`.
:param other: Another SimVariableSet instance.
:return: The complement result.
"""
s = SimVariableSet()
s.register_variables = self.register_variables - other.register_variables
s.register_variable_offsets = self.register_variable_offsets - other.register_variable_offsets
s.memory_variables = self.memory_variables - other.memory_variables
s.memory_variable_addresses = self.memory_variable_addresses - other.memory_variable_addresses
return s
def contains_register_variable(self, reg_var):
reg_offset = reg_var.reg
# TODO: Make sure reg_offset is aligned to machine-word length
return reg_offset in self.register_variable_offsets
def contains_memory_variable(self, mem_var):
a = mem_var.addr
if type(a) in (tuple, list): a = a[-1]
return a in self.memory_variable_addresses
def __ior__(self, other):
# other must be a SimVariableSet
self.register_variables |= other.register_variables
self.register_variable_offsets |= other.register_variable_offsets
self.memory_variables |= other.memory_variables
self.memory_variable_addresses |= other.memory_variable_addresses
def __contains__(self, item):
if type(item) is SimRegisterVariable:
return self.contains_register_variable(item)
elif type(item) is SimMemoryVariable:
# TODO: Make it better!
return self.contains_memory_variable(item)
else:
__import__('ipdb').set_trace()
raise Exception("WTF is this variable?")
from .storage.memory import AddressWrapper
|
the-stack_0_7343 | """Definitions for command-line (Click) commands for invoking Annif
operations and printing the results to console."""
import collections
import os.path
import re
import sys
import click
import click_log
from flask import current_app
from flask.cli import FlaskGroup, ScriptInfo
import annif
import annif.corpus
import annif.parallel
import annif.project
import annif.registry
from annif.project import Access
from annif.suggestion import SuggestionFilter, ListSuggestionResult
from annif.exception import ConfigurationException, NotSupportedException
logger = annif.logger
click_log.basic_config(logger)
cli = FlaskGroup(create_app=annif.create_app, add_version_option=False)
cli = click.version_option(message='%(version)s')(cli)
def get_project(project_id):
"""
Helper function to get a project by ID and bail out if it doesn't exist"""
try:
return annif.registry.get_project(project_id,
min_access=Access.private)
except ValueError:
click.echo(
"No projects found with id \'{0}\'.".format(project_id),
err=True)
sys.exit(1)
def open_documents(paths, docs_limit):
"""Helper function to open a document corpus from a list of pathnames,
each of which is either a TSV file or a directory of TXT files. The
corpus will be returned as an instance of DocumentCorpus or
LimitingDocumentCorpus."""
def open_doc_path(path):
"""open a single path and return it as a DocumentCorpus"""
if os.path.isdir(path):
return annif.corpus.DocumentDirectory(path, require_subjects=True)
return annif.corpus.DocumentFile(path)
if len(paths) == 0:
logger.warning('Reading empty file')
docs = open_doc_path(os.path.devnull)
elif len(paths) == 1:
docs = open_doc_path(paths[0])
else:
corpora = [open_doc_path(path) for path in paths]
docs = annif.corpus.CombinedCorpus(corpora)
if docs_limit is not None:
docs = annif.corpus.LimitingDocumentCorpus(docs, docs_limit)
return docs
def parse_backend_params(backend_param, project):
"""Parse a list of backend parameters given with the --backend-param
option into a nested dict structure"""
backend_params = collections.defaultdict(dict)
for beparam in backend_param:
backend, param = beparam.split('.', 1)
key, val = param.split('=', 1)
validate_backend_params(backend, beparam, project)
backend_params[backend][key] = val
return backend_params
def validate_backend_params(backend, beparam, project):
if backend != project.config['backend']:
raise ConfigurationException(
'The backend {} in CLI option "-b {}" not matching the project'
' backend {}.'
.format(backend, beparam, project.config['backend']))
BATCH_MAX_LIMIT = 15
def generate_filter_batches(subjects):
import annif.eval
filter_batches = collections.OrderedDict()
for limit in range(1, BATCH_MAX_LIMIT + 1):
for threshold in [i * 0.05 for i in range(20)]:
hit_filter = SuggestionFilter(subjects, limit, threshold)
batch = annif.eval.EvaluationBatch(subjects)
filter_batches[(limit, threshold)] = (hit_filter, batch)
return filter_batches
def set_project_config_file_path(ctx, param, value):
"""Override the default path or the path given in env by CLI option"""
with ctx.ensure_object(ScriptInfo).load_app().app_context():
if value:
current_app.config['PROJECTS_FILE'] = value
def common_options(f):
"""Decorator to add common options for all CLI commands"""
f = click.option(
'-p', '--projects', help='Set path to projects.cfg',
type=click.Path(dir_okay=False, exists=True),
callback=set_project_config_file_path, expose_value=False,
is_eager=True)(f)
return click_log.simple_verbosity_option(logger)(f)
def backend_param_option(f):
"""Decorator to add an option for CLI commands to override BE parameters"""
return click.option(
'--backend-param', '-b', multiple=True,
help='Override backend parameter of the config file. ' +
'Syntax: "-b <backend>.<parameter>=<value>".')(f)
@cli.command('list-projects')
@common_options
@click_log.simple_verbosity_option(logger, default='ERROR')
def run_list_projects():
"""
List available projects.
"""
template = "{0: <25}{1: <45}{2: <10}{3: <7}"
header = template.format(
"Project ID", "Project Name", "Language", "Trained")
click.echo(header)
click.echo("-" * len(header))
for proj in annif.registry.get_projects(
min_access=Access.private).values():
click.echo(template.format(
proj.project_id, proj.name, proj.language, str(proj.is_trained)))
@cli.command('show-project')
@click.argument('project_id')
@common_options
def run_show_project(project_id):
"""
Show information about a project.
"""
proj = get_project(project_id)
click.echo(f'Project ID: {proj.project_id}')
click.echo(f'Project Name: {proj.name}')
click.echo(f'Language: {proj.language}')
click.echo(f'Access: {proj.access.name}')
click.echo(f'Trained: {proj.is_trained}')
click.echo(f'Modification time: {proj.modification_time}')
@cli.command('clear')
@click.argument('project_id')
@common_options
def run_clear_project(project_id):
"""
Initialize the project to its original, untrained state.
"""
proj = get_project(project_id)
proj.remove_model_data()
@cli.command('loadvoc')
@click.argument('project_id')
@click.argument('subjectfile', type=click.Path(exists=True, dir_okay=False))
@common_options
def run_loadvoc(project_id, subjectfile):
"""
Load a vocabulary for a project.
"""
proj = get_project(project_id)
if annif.corpus.SubjectFileSKOS.is_rdf_file(subjectfile):
# SKOS/RDF file supported by rdflib
subjects = annif.corpus.SubjectFileSKOS(subjectfile, proj.language)
else:
# probably a TSV file
subjects = annif.corpus.SubjectFileTSV(subjectfile)
proj.vocab.load_vocabulary(subjects, proj.language)
@cli.command('train')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--cached/--no-cached', '-c/-C', default=False,
help='Reuse preprocessed training data from previous run')
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option('--jobs',
'-j',
default=0,
help='Number of parallel jobs (0 means choose automatically)')
@backend_param_option
@common_options
def run_train(project_id, paths, cached, docs_limit, jobs, backend_param):
"""
Train a project on a collection of documents.
"""
proj = get_project(project_id)
backend_params = parse_backend_params(backend_param, proj)
if cached:
if len(paths) > 0:
raise click.UsageError(
"Corpus paths cannot be given when using --cached option.")
documents = 'cached'
else:
documents = open_documents(paths, docs_limit)
proj.train(documents, backend_params, jobs)
@cli.command('learn')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@backend_param_option
@common_options
def run_learn(project_id, paths, docs_limit, backend_param):
"""
Further train an existing project on a collection of documents.
"""
proj = get_project(project_id)
backend_params = parse_backend_params(backend_param, proj)
documents = open_documents(paths, docs_limit)
proj.learn(documents, backend_params)
@cli.command('suggest')
@click.argument('project_id')
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@backend_param_option
@common_options
def run_suggest(project_id, limit, threshold, backend_param):
"""
Suggest subjects for a single document from standard input.
"""
project = get_project(project_id)
text = sys.stdin.read()
backend_params = parse_backend_params(backend_param, project)
hit_filter = SuggestionFilter(project.subjects, limit, threshold)
hits = hit_filter(project.suggest(text, backend_params))
for hit in hits.as_list(project.subjects):
click.echo(
"<{}>\t{}\t{}".format(
hit.uri,
'\t'.join(filter(None, (hit.label, hit.notation))),
hit.score))
@cli.command('index')
@click.argument('project_id')
@click.argument('directory', type=click.Path(exists=True, file_okay=False))
@click.option(
'--suffix',
'-s',
default='.annif',
help='File name suffix for result files')
@click.option('--force/--no-force', '-f/-F', default=False,
help='Force overwriting of existing result files')
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@backend_param_option
@common_options
def run_index(project_id, directory, suffix, force,
limit, threshold, backend_param):
"""
Index a directory with documents, suggesting subjects for each document.
Write the results in TSV files with the given suffix.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
hit_filter = SuggestionFilter(project.subjects, limit, threshold)
for docfilename, dummy_subjectfn in annif.corpus.DocumentDirectory(
directory, require_subjects=False):
with open(docfilename, encoding='utf-8-sig') as docfile:
text = docfile.read()
subjectfilename = re.sub(r'\.txt$', suffix, docfilename)
if os.path.exists(subjectfilename) and not force:
click.echo(
"Not overwriting {} (use --force to override)".format(
subjectfilename))
continue
with open(subjectfilename, 'w', encoding='utf-8') as subjfile:
results = project.suggest(text, backend_params)
for hit in hit_filter(results).as_list(project.subjects):
line = "<{}>\t{}\t{}".format(
hit.uri,
'\t'.join(filter(None, (hit.label, hit.notation))),
hit.score)
click.echo(line, file=subjfile)
@cli.command('eval')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--limit', '-l', default=10, help='Maximum number of subjects')
@click.option('--threshold', '-t', default=0.0, help='Minimum score threshold')
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option(
'--results-file',
'-r',
type=click.File(
'w',
encoding='utf-8',
errors='ignore',
lazy=True),
help="""Specify file in order to write non-aggregated results per subject.
File directory must exist, existing file will be overwritten.""")
@click.option('--jobs',
'-j',
default=1,
help='Number of parallel jobs (0 means all CPUs)')
@backend_param_option
@common_options
def run_eval(
project_id,
paths,
limit,
threshold,
docs_limit,
results_file,
jobs,
backend_param):
"""
Analyze documents and evaluate the result.
Compare the results of automated indexing against a gold standard. The
path may be either a TSV file with short documents or a directory with
documents in separate files.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
import annif.eval
eval_batch = annif.eval.EvaluationBatch(project.subjects)
if results_file:
try:
print('', end='', file=results_file)
click.echo('Writing per subject evaluation results to {!s}'.format(
results_file.name))
except Exception as e:
raise NotSupportedException(
"cannot open results-file for writing: " + str(e))
docs = open_documents(paths, docs_limit)
jobs, pool_class = annif.parallel.get_pool(jobs)
project.initialize(parallel=True)
psmap = annif.parallel.ProjectSuggestMap(
project.registry, [project_id], backend_params, limit, threshold)
with pool_class(jobs) as pool:
for hits, uris, labels in pool.imap_unordered(
psmap.suggest, docs.documents):
eval_batch.evaluate(hits[project_id],
annif.corpus.SubjectSet((uris, labels)))
template = "{0:<30}\t{1}"
for metric, score in eval_batch.results(results_file=results_file).items():
click.echo(template.format(metric + ":", score))
@cli.command('optimize')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@backend_param_option
@common_options
def run_optimize(project_id, paths, docs_limit, backend_param):
"""
Analyze documents, testing multiple limits and thresholds.
Evaluate the analysis results for a directory with documents against a
gold standard given in subject files. Test different limit/threshold
values and report the precision, recall and F-measure of each combination
of settings.
"""
project = get_project(project_id)
backend_params = parse_backend_params(backend_param, project)
filter_batches = generate_filter_batches(project.subjects)
ndocs = 0
docs = open_documents(paths, docs_limit)
for doc in docs.documents:
raw_hits = project.suggest(doc.text, backend_params)
hits = raw_hits.filter(project.subjects, limit=BATCH_MAX_LIMIT)
assert isinstance(hits, ListSuggestionResult), \
"Optimize should only be done with ListSuggestionResult " + \
"as it would be very slow with VectorSuggestionResult."
gold_subjects = annif.corpus.SubjectSet((doc.uris, doc.labels))
for hit_filter, batch in filter_batches.values():
batch.evaluate(hit_filter(hits), gold_subjects)
ndocs += 1
click.echo("\t".join(('Limit', 'Thresh.', 'Prec.', 'Rec.', 'F1')))
best_scores = collections.defaultdict(float)
best_params = {}
template = "{:d}\t{:.02f}\t{:.04f}\t{:.04f}\t{:.04f}"
# Store the batches in a list that gets consumed along the way
# This way GC will have a chance to reclaim the memory
filter_batches = list(filter_batches.items())
while filter_batches:
params, filter_batch = filter_batches.pop(0)
metrics = ['Precision (doc avg)',
'Recall (doc avg)',
'F1 score (doc avg)']
results = filter_batch[1].results(metrics=metrics)
for metric, score in results.items():
if score >= best_scores[metric]:
best_scores[metric] = score
best_params[metric] = params
click.echo(
template.format(
params[0],
params[1],
results['Precision (doc avg)'],
results['Recall (doc avg)'],
results['F1 score (doc avg)']))
click.echo()
template2 = "Best {:>19}: {:.04f}\tLimit: {:d}\tThreshold: {:.02f}"
for metric in metrics:
click.echo(
template2.format(
metric,
best_scores[metric],
best_params[metric][0],
best_params[metric][1]))
click.echo("Documents evaluated:\t{}".format(ndocs))
@cli.command('hyperopt')
@click.argument('project_id')
@click.argument('paths', type=click.Path(exists=True), nargs=-1)
@click.option('--docs-limit', '-d', default=None,
type=click.IntRange(0, None),
help='Maximum number of documents to use')
@click.option('--trials', '-T', default=10, help='Number of trials')
@click.option('--jobs',
'-j',
default=1,
help='Number of parallel runs (0 means all CPUs)')
@click.option('--metric', '-m', default='NDCG',
help='Metric to optimize (default: NDCG)')
@click.option(
'--results-file',
'-r',
type=click.File(
'w',
encoding='utf-8',
errors='ignore',
lazy=True),
help="""Specify file path to write trial results as CSV.
File directory must exist, existing file will be overwritten.""")
@common_options
def run_hyperopt(project_id, paths, docs_limit, trials, jobs, metric,
results_file):
"""
Optimize the hyperparameters of a project using a validation corpus.
"""
proj = get_project(project_id)
documents = open_documents(paths, docs_limit)
click.echo(f"Looking for optimal hyperparameters using {trials} trials")
rec = proj.hyperopt(documents, trials, jobs, metric, results_file)
click.echo(f"Got best {metric} score {rec.score:.4f} with:")
click.echo("---")
for line in rec.lines:
click.echo(line)
click.echo("---")
if __name__ == '__main__':
cli()
|
the-stack_0_7345 | import os
from setup import basedir
class BaseConfig(object):
SECRET_KEY = "SO_SECURE"
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# SQLALCHEMY_DATABASE_URI = "postgresql://localhost/Cathal"
MONGODB_URI = os.environ['MONGODB_URL']
SQLALCHEMY_TRACK_MODIFICATIONS = True
JSON_AS_ASCII = False
GOOGLE_CLIENT_ID = os.environ['GOOGLE_CLIENT_ID']
GOOGLE_CLIENT_SECRET = os.environ['GOOGLE_CLIENT_SECRET']
class TestingConfig(object):
"""Development configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
DEBUG_TB_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
|
the-stack_0_7346 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import cslib
def get_drive_mapped_path_dict():
def get_mapped_path_for_drive(drive):
# use window API (WNetGetConnectionW)
try:
import ctypes
from ctypes import wintypes
mpr = ctypes.WinDLL('mpr')
ERROR_SUCCESS = 0x0000
ERROR_MORE_DATA = 0x00EA
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
mpr.WNetGetConnectionW.restype = wintypes.DWORD
mpr.WNetGetConnectionW.argtypes = (wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.LPDWORD)
length = (wintypes.DWORD * 1)()
result = mpr.WNetGetConnectionW(drive, None, length)
if result != ERROR_MORE_DATA:
return ''
remote_name = (wintypes.WCHAR * length[0])()
result = mpr.WNetGetConnectionW(drive, remote_name, length)
if result != ERROR_SUCCESS:
return ''
return remote_name.value.replace('\\\\', '')
except Exception:
import libcsbuild
libcsbuild.write_csbuild_log('EXCEPTION_IN_PROCESSING_NETWORK_DRIVE: %s' % drive)
return ''
drive_mapped_path_dict = {}
if not cslib.is_windows():
return drive_mapped_path_dict
import win32api
import libcsbuild
drive_letter_list = [drive_letter.replace('\\', '') for drive_letter in
win32api.GetLogicalDriveStrings().split('\000')[:-1] if drive_letter != '']
for drive_letter in drive_letter_list:
key = get_mapped_path_for_drive(drive_letter)
if key == '':
continue
libcsbuild.write_csbuild_log('network_drive: %s, path: %s' % (drive_letter, key))
drive_mapped_path_dict[key] = drive_letter
libcsbuild.write_csbuild_log(str(drive_mapped_path_dict))
return drive_mapped_path_dict
def convert_network_drive_path(open_file, mapped_dict):
unc_prefix = '\\Device\\Mup'
if not cslib.is_windows() or not open_file.startswith(unc_prefix):
return open_file
for key in mapped_dict.keys():
inx = open_file.find(key)
if inx == -1:
continue
import libcsbuild
libcsbuild.write_csbuild_log(
'%s -> %s' % (open_file, os.path.join(mapped_dict[key], open_file[inx + len(key):])))
open_file = os.path.join(mapped_dict[key], open_file[inx + len(key):])
return open_file
|
the-stack_0_7347 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import collections
import errno
import os
import sys
import unittest
try:
import fcntl
except ImportError: # pragma: no cover
# Doesn't exist on Windows. See also crbug.com/515704.
pass
from testing_support import auto_stub
from infra.libs.service_utils import daemon
import mock
Stat = collections.namedtuple('Stat', ['st_ino'])
class TestFlock(auto_stub.TestCase):
@unittest.skipIf(sys.platform == 'win32', 'Requires not windows')
def setUp(self):
super(TestFlock, self).setUp()
@contextlib.contextmanager
def _assert_reached(self):
reached = {'yup': False}
yield reached
self.assertTrue(reached['yup'])
def _mock_basic_fs_calls(self):
"""Mocks os.open, os.close as well as os.fstat."""
def _noop_handler(*_args, **_kwargs):
return 1
def _noop_os_close(*_args, **_kwargs):
pass
def _noop_fstat(*_args, **_kwargs):
return Stat(st_ino=45678)
self.mock(os, 'open', _noop_handler)
self.mock(os, 'close', _noop_os_close)
self.mock(os, 'fstat', _noop_fstat)
def _set_lock_status(self, success=True):
"""Mocks os.fcntl and whether the mock succeeds or not."""
def _lock_status(_fd, flags, **_kwargs):
if flags != fcntl.LOCK_UN: # We don't care if unlock fails.
if not success:
raise IOError('Couldn\'t get lock.')
self.mock(fcntl, 'lockf', _lock_status)
def _set_stat_status(self, success=True, matching=True):
"""Mocks os.stat, sets its success and if st_ino matches os.fstat mock."""
def _stat_handler(*_args, **_kwargs):
if not success:
raise OSError('Not found.')
if matching:
return Stat(st_ino=45678)
return Stat(st_ino=67890)
self.mock(os, 'stat', _stat_handler)
def _set_unlink_status(self, success=True):
"""Mocks os.unlink and sets whether it succeeds or not."""
def _unlink_handler(*_args, **_kwargs):
if not success:
raise OSError('Not found.')
self.mock(os, 'unlink', _unlink_handler)
#### Tests.
def testGetLock(self):
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
self._set_unlink_status()
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
def testDontGetLock(self):
self._mock_basic_fs_calls()
self._set_lock_status(success=False)
self._set_stat_status()
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testFileDeletedAfterLockAcquired(self):
"""Test that we abort if we acquire a lock but the file has been deleted."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status(success=False)
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testLockfileRecreated(self):
"""Test that we abort if a new lockfile is created under us."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status(matching=False)
self._set_unlink_status()
with self.assertRaises(daemon.LockAlreadyLocked):
with daemon.flock('bogus'):
# Should never reach this.
# pylint: disable=redundant-unittest-assert
self.assertTrue(False) # pragma: no cover
def testDeleteWhenDone(self):
"""Test that we delete the lockfile when we're done."""
data = {'count': 0}
def _mock_unlink(*_args, **_kwargs):
data['count'] += 1
self.mock(os, 'unlink', _mock_unlink)
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
self.assertEqual(data['count'], 1)
def testUnlinkFailureDoesntBreak(self):
"""Test that a failing unlink doesn't break us."""
self._mock_basic_fs_calls()
self._set_lock_status()
self._set_stat_status()
self._set_unlink_status(success=False)
with self._assert_reached() as reached:
with daemon.flock('bogus'):
reached['yup'] = True
@mock.patch('os.fork', return_value=0)
@mock.patch('os.setsid')
@mock.patch('os.close')
@mock.patch('os.open')
@mock.patch('os.dup2')
@mock.patch('os.chdir')
@mock.patch('os._exit')
class TestBecomeDaemon(unittest.TestCase):
@unittest.skipIf(sys.platform == 'win32', 'Requires not windows')
def setUp(self):
super(TestBecomeDaemon, self).setUp()
def testClosesFds(self, _mock_exit, _mock_chdir, _mock_dup2, _mock_open,
mock_close, _mock_setsid, _mock_fork):
daemon.become_daemon()
self.assertEqual(2048, mock_close.call_count)
self.assertEqual([((i,),) for i in reversed(range(2048))],
mock_close.call_args_list)
def testClosesFdWithExceptions(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid,
_mock_fork):
daemon.become_daemon(keep_fds={42})
self.assertEqual(2047, mock_close.call_count)
self.assertEqual([((i,),) for i in reversed(range(2048)) if i != 42],
mock_close.call_args_list)
def testClosesFdsKeepingAll(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid,
_mock_fork):
daemon.become_daemon(keep_fds=True)
self.assertEqual(0, mock_close.call_count)
def testClosesInvalidFds(self, _mock_exit, _mock_chdir, _mock_dup2,
_mock_open, mock_close, _mock_setsid, _mock_fork):
mock_close.side_effect = EnvironmentError(errno.EIO, '')
with self.assertRaises(EnvironmentError):
daemon.become_daemon()
mock_close.side_effect = EnvironmentError(errno.EBADF, '')
daemon.become_daemon()
def testOpensDevNull(self, _mock_exit, _mock_chdir, mock_dup2, mock_open,
_mock_close, _mock_setsid, _mock_fork):
handle = object()
mock_open.return_value = handle
daemon.become_daemon()
self.assertEqual([
((handle, 0),),
((handle, 1),),
((handle, 2),),
], mock_dup2.call_args_list)
def testOpensDevNullWithExceptions(self, _mock_exit, _mock_chdir, mock_dup2,
mock_open, _mock_close, _mock_setsid,
_mock_fork):
handle = object()
mock_open.return_value = handle
daemon.become_daemon(keep_fds={1})
self.assertEqual([
((handle, 0),),
((handle, 2),),
], mock_dup2.call_args_list)
def testChangesToRoot(self, _mock_exit, mock_chdir, _mock_dup2, _mock_open,
_mock_close, _mock_setsid, _mock_fork):
daemon.become_daemon()
mock_chdir.assert_called_with('/')
def testForkExitsParent(self, mock_exit, _mock_chdir, _mock_dup2, _mock_open,
_mock_close, _mock_setsid, mock_fork):
mock_fork.return_value = 0
daemon.become_daemon()
self.assertFalse(mock_exit.called)
mock_fork.return_value = 123
daemon.become_daemon()
self.assertTrue(mock_exit.called)
|
the-stack_0_7348 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Projects manager."""
import empower.apps
import empower.primitives
from empower.main import srv_or_die
from empower.core.service import EService
from empower.managers.projectsmanager.project import Project
from empower.managers.projectsmanager.project import EmbeddedWiFiProps
from empower.managers.projectsmanager.project import EmbeddedLTEProps
from empower.managers.projectsmanager.project import T_BSSID_TYPE_SHARED
from empower.managers.projectsmanager.project import T_BSSID_TYPE_UNIQUE
from empower.managers.projectsmanager.cataloghandler import CatalogHandler
from empower.managers.projectsmanager.appshandler import AppsHandler
from empower.managers.projectsmanager.projectshandler import ProjectsHandler, \
ProjectsWiFiACLHandler, ProjectsWiFiSlicesHandler, \
ProjectsLTESlicesHandler, ProjectLVAPsHandler
class ProjectsManager(EService):
"""Projects manager."""
HANDLERS = [CatalogHandler, AppsHandler, ProjectLVAPsHandler,
ProjectsHandler, ProjectsWiFiACLHandler,
ProjectsWiFiSlicesHandler, ProjectsLTESlicesHandler]
projects = {}
accounts_manager = None
def start(self):
"""Start projects manager."""
super().start()
self.accounts_manager = srv_or_die("accountsmanager")
for project in Project.objects.all():
self.projects[project.project_id] = project
self.projects[project.project_id].start_services()
@property
def catalog(self):
"""Return available apps."""
results = {}
results.update(self.walk_module(empower.apps))
results.update(self.walk_module(empower.primitives))
return results
def load_project_by_ssid(self, ssid):
"""Find a project by SSID."""
for project in self.projects.values():
if not project.wifi_props:
continue
if project.wifi_props.ssid == ssid:
break
else:
project = None
return project
def load_project_by_plmnid(self, plmnid):
"""Find a project by PLMNID."""
for project in self.projects.values():
if not project.lte_props:
continue
if project.lte_props.plmnid == plmnid:
break
else:
project = None
return project
def get_available_ssids(self, sta, block):
"""Return the list of available networks for the specified sta."""
networks = list()
for project in self.projects.values():
if not project.wifi_props:
continue
if sta not in project.wifi_props.allowed:
continue
if project.wifi_props.bssid_type == T_BSSID_TYPE_SHARED:
bssid = project.generate_bssid(block.hwaddr)
ssid = project.wifi_props.ssid
networks.append((bssid, ssid))
elif project.wifi_props.bssid_type == T_BSSID_TYPE_UNIQUE:
bssid = project.generate_bssid(sta)
ssid = project.wifi_props.ssid
networks.append((bssid, ssid))
else:
self.log.error("Invalid BSSID type: %s",
project.wifi_props.bssid_type)
return networks
def create(self, desc, project_id, owner, wifi_props=None, lte_props=None):
"""Create new project."""
if project_id in self.projects:
raise ValueError("Project %s already defined" % project_id)
if owner not in self.accounts_manager.accounts:
raise ValueError("Undefined account %s" % owner)
project = Project(project_id=project_id, desc=desc, owner=owner)
if wifi_props:
project.wifi_props = EmbeddedWiFiProps(**wifi_props)
if lte_props:
project.lte_props = EmbeddedLTEProps(**lte_props)
project.save()
self.projects[project_id] = project
project.upsert_wifi_slice(slice_id=0)
project.upsert_lte_slice(slice_id=0)
self.projects[project_id].start_services()
return self.projects[project_id]
def update(self, project_id, wifi_props=None, lte_props=None):
"""Update project."""
if project_id not in self.projects:
raise ValueError("Project %s not available" % project_id)
project = self.projects[project_id]
print(wifi_props["allowed"])
try:
# not all wifi props can be modified
if wifi_props:
if "allowed" in wifi_props:
project.wifi_props.allowed = wifi_props["allowed"]
# not all lte props can be modified
if lte_props:
pass
project.save()
finally:
project.refresh_from_db()
return self.projects[project_id]
def remove_all(self):
"""Remove all projects."""
for project_id in list(self.projects):
self.remove(project_id)
def remove(self, project_id):
"""Remove project."""
# Check if project exists
if project_id not in self.projects:
raise KeyError("%s not registered" % project_id)
# Fetch project
project = self.projects[project_id]
# Remove hosted LVAPs
for lvap in list(project.lvaps.values()):
# The LVAP is associated
if lvap.ssid and lvap.wtp.connection:
lvap.wtp.connection.send_client_leave_message_to_self(lvap)
# Reset the LVAP
del lvap.wtp.connection.manager.lvaps[lvap.addr]
lvap.clear_blocks()
# Remove hosted VAPs
for vap in list(project.vaps.values()):
# Reset the LVAP
del vap.wtp.connection.manager.vaps[vap.bssid]
vap.clear_block()
# Stop running services
self.projects[project_id].stop_services()
# Delete project from datase and manager
project.delete()
del self.projects[project_id]
def launch(context, service_id):
""" Initialize the module. """
return ProjectsManager(context=context, service_id=service_id)
|
the-stack_0_7349 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions required to interact with Docker to build and run images, shells
and notebooks in a Docker environment.
"""
from typing import List, Optional
from blessings import Terminal
import caliban.config as c
import caliban.docker.build as b
import caliban.platform.shell as ps
import caliban.util.fs as ufs
t = Terminal()
def run_notebook(job_mode: c.JobMode,
port: Optional[int] = None,
lab: Optional[bool] = None,
version: Optional[bool] = None,
run_args: Optional[List[str]] = None,
**run_interactive_kwargs) -> None:
"""Start a notebook in the current working directory; the process will run
inside of a Docker container that's identical to the environment available to
Cloud jobs that are submitted by `caliban cloud`, or local jobs run with
`caliban run.`
if you pass mount_home=True your jupyter settings will persist across calls.
Keyword args:
- port: the port to pass to Jupyter when it boots, useful if you have
multiple instances running on one machine.
- lab: if True, starts jupyter lab, else jupyter notebook.
- version: explicit Jupyter version to install.
run_interactive_kwargs are all extra arguments taken by run_interactive.
"""
if port is None:
port = ufs.next_free_port(8888)
if lab is None:
lab = False
if run_args is None:
run_args = []
inject_arg = b.NotebookInstall.lab if lab else b.NotebookInstall.jupyter
jupyter_cmd = "lab" if lab else "notebook"
jupyter_args = [
"-m", "jupyter", jupyter_cmd, \
"--ip=0.0.0.0", \
"--port={}".format(port), \
"--no-browser"
]
docker_args = ["-p", "{}:{}".format(port, port)] + run_args
ps.run_interactive(job_mode,
entrypoint="python",
entrypoint_args=jupyter_args,
run_args=docker_args,
inject_notebook=inject_arg,
jupyter_version=version,
**run_interactive_kwargs)
|
the-stack_0_7350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# IMPORTS
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import modules.globals as sg
# CLASS DEFINITION
class MailHelper:
# Constructor
def __init__(self):
self.load_conf()
self.smtp = None
try:
self.smtp = smtplib.SMTP(self.smtp_host, self.smtp_port, None, 5)
self.smtp.ehlo()
if self.smtp_tls:
self.smtp.starttls()
self.smtp.ehlo()
# self.smtp.login(self.smtp_from, self.smtp_pwd)
except Exception as e:
sg.logger.error('Failed to bind to smtp server: %s' % str(e))
# Configuration loader
def load_conf(self):
self.smtp_host = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_HOST]
self.smtp_port = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_PORT]
self.smtp_tls = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_TLS]
self.smtp_from = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_FROM]
self.smtp_pwd = sg.conf[sg.CONF_SMTP_SECTION][sg.CONF_SMTP_PWD]
def build_gmail(self):
if not hasattr(self, 'sender') or not hasattr(self, 'code'):
sg.logger.warning('Detected GMAIL but failed to parse sender or code...')
return
sender = self.sender
code = self.code
# Build the answer
subject = '[SCIZ] Code de confirmation de transfert GMAIL'
text = 'Votre code de transfert GMAIL pour %s est : %s' % (sg.user.mail, code)
html = '''
<html>
<head></head>
<body>
<p>Votre code de transfert pour %s est : %s</p>
</body>
</html>
''' % (sg.user.mail, code)
# Send the mail
self.send_mail(sender, subject, text, html)
def build_yahoo(self):
if not hasattr(self, 'link'):
sg.logger.warning('Detected YAHOO but failed to parse link...')
return
link = self.link
# Build the answer
subject = '[SCIZ] Lien de confirmation de transfert YAHOO'
text = 'Votre code de transfert YAHOO pour %s est : %s' % (sg.user.mail, link)
html = '''
<html>
<head></head>
<body>
<p>Votre lien de transfert pour %s est : %s</p>
</body>
</html>
''' % (sg.user.mail, link)
# Send the mail
self.send_mail(None, subject, text, html)
def send_mail(self, to, subject, body_text, body_html):
if self.smtp is None:
sg.logger.error('An attempt was made to send a mail but no previous bind to a SMTP server was successful')
return
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = self.smtp_from
msg['To'] = to if to is not None else sg.user.user_mail
msg.attach(MIMEText(body_text, 'plain'))
msg.attach(MIMEText(body_html, 'html'))
if msg['To'] is not None:
self.smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
else:
sg.logger.warning('No address to send back...')
def __del__(self):
if self.smtp:
self.smtp.quit()
|
the-stack_0_7352 | import urllib.parse
from django.contrib import auth
from django.db import close_old_connections
from channels.middleware import BaseMiddleware
import rest_framework_jwt.serializers
import rest_framework.exceptions
import jwt.exceptions
import backend.auth
class TokenMiddleware(object):
"""
Middleware that authenticates against a token in the http authorization header.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
self.process_request(request)
response = self.get_response(request)
return response
def process_request(self, request):
auth_header = request.META.get('HTTP_AUTHORIZATION', b'').split()
print("asd")
if not auth_header:
return None
user = auth.authenticate()
if user:
request.user = user
class JWTAuthMiddleware(BaseMiddleware):
"""
Middleware to authenticate a user with a JSON Web Token.
"""
def populate_scope(self, scope):
# Populate top level of scope.
if "user" not in scope:
raise ValueError(
"JWTAuthMiddleware cannot find user in scope. AuthMiddleware must be above it."
)
async def resolve_scope(self, scope):
if not scope["user"]._wrapped.is_anonymous:
return
if not "query_string" in scope:
return
qs = urllib.parse.parse_qs(scope['query_string'].decode('utf-8'))
user = None
try:
qs['token'] = qs['token'][0]
validated = rest_framework_jwt.serializers.VerifyJSONWebTokenSerializer().validate(qs)
# If no exception is thrown, the token is valid. Store it in the session if it is a kit.
user = backend.auth.downcast_user_type(validated['user'])
except (KeyError, jwt.exceptions.InvalidTokenError, rest_framework.exceptions.ValidationError):
pass
close_old_connections()
# Set the user.
if user:
scope["user"]._wrapped = user
|
the-stack_0_7353 | """Commands the vehicle simulator to drive autonomously based on a given keras model.
Usage:
Use `model.h5` to drive in autonomous mode
`python drive.py model.h5`
Or, use `model.h5` to drive in autonomous mode, and save dashcam photos of the run to `./run1/`
`python drive.py model.h5 run1`
"""
#----------------------------------------------------------------------------------------------------------------------------------------------
import argparse
import base64
import os
import shutil
from datetime import datetime
from io import BytesIO
import eventlet.wsgi
import h5py
import numpy as np
import socketio
import tensorflow as tf
from PIL import Image
from flask import Flask
from keras import __version__ as keras_version, backend as K
from keras.models import load_model
MAX_SPEED = 18
MIN_SPEED = 8
speed_limit = MAX_SPEED
K.clear_session()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_session(session)
#----------------------------------------------------------------------------------------------------------------------------------------------
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
#----------------------------------------------------------------------------------------------------------------------------------------------
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
return self.Kp * self.error + self.Ki * self.integral
#----------------------------------------------------------------------------------------------------------------------------------------------
controller = SimplePIController(0.1, 0.002)
# Force desired driving speed.
set_speed = 25
controller.set_desired(set_speed)
#----------------------------------------------------------------------------------------------------------------------------------------------
@sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = float(data["steering_angle"])
# The current throttle of the car
throttle = float(data["throttle"])
# The current speed of the car
speed = float(data["speed"])
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
global speed_limit
if speed > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
#throttle = controller.update(float(speed))
throttle = 1.0 - ( (steering_angle)**2 ) - ( (speed/speed_limit)**2 )
print(steering_angle, throttle, speed)
send_control(steering_angle, throttle)
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
#----------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
parser.add_argument(
'maxspeed',
type=int,
nargs='?',
default=MAX_SPEED,
help='Maximum speed limit'
)
parser.add_argument(
'minspeed',
type=str,
nargs='?',
default=MIN_SPEED,
help='Minimum speed limit'
)
args = parser.parse_args()
MIN_SPEED = args.minspeed
MAX_SPEED = args.maxspeed
# check that model Keras version is same as local Keras version
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with middleware
app = socketio.Middleware(sio, app)
# deploy as an WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
#----------------------------------------------------------------------------------------------------------------------------------------------
|
the-stack_0_7354 | from ebaysdk.finding import Connection
from ebaysdk.exception import ConnectionError
import time
import psycopg2
import re
from gen_utils import database_connection, get_api_key, get_search_words, get_test_search_words, get_trace_and_log
class SearchRequest(object):
def __init__(self, api_key, keyword):
self.api_key, self.keyword = api_key, keyword
# define which site we wish to connect to and feed in our api-key
self.api = Connection(siteid='EBAY-US', appid=self.api_key, config_file=None)
# create a live db cursor
self.cursor = database_connection()
# establish lists for appending data to
self.completed_product_ids = []
self.completed_product_nick = []
self.completed_product_titles = []
self.completed_product_prices = []
self.completed_product_cat_names = []
self.completed_product_cat_ids = []
self.completed_product_img_thumb = []
self.completed_product_img_url = []
self.completed_product_lst_type = []
self.completed_product_con = []
self.completed_product_loc = []
self.completed_product_start = []
self.completed_product_end = []
self.completed_product_depth = []
self.depthCountStorage = []
# outline our search body paramaters
self.search_body_pages = {
'keywords': keyword,
'itemFilter': [
# US only sellers -- can also limit by feedback score, business type, top-rated status, charity, etc.
{'name': 'MinPrice', 'value': '59', 'paramName': 'Currency', 'paramValue': 'USD'},
{'name': 'MaxPrice', 'value': '9999999', 'paramName': 'Currency', 'paramValue': 'USD'},
# sold items only
{'name': 'SoldItemsOnly', 'value': 'true'},
],
'paginationInput': {
'entriesPerPage': '100',
# always 1, as we want to pull the maximum number of pages given a maximum of 100 results per page
'pageNumber': '1'
},
# can filter this to multiple different options as well (Best Offer, Most Watched, etc.)
'sortOrder': 'PricePlusShippingLowest'
}
def get_pages(self):
"""() -> dict
Connects to the API,
Executes a query to find items by their category and takes in predefined parameters search_body_pages,
Returns the data in dictionary form,
Returns an integer with the total number of pages.
"""
try:
self.api.execute('findCompletedItems', self.search_body_pages)
self.data = self.api.response.dict()
self.pages = int(self.data['paginationOutput']['totalPages'])
return self.pages
except Exception as e:
get_trace_and_log(e)
def fetch_completed_data(self, pages):
"""() -> dict
Connects to the API,
Iterates over each page in the previously established range of 1 -> the total number of pages,
Establishes search_body_data parameters,
Executes a query to find items by their category and takes in predefined parameters search_body_data,
Returns the data in dictionary form,
Iterates over each item in the returned data dictionary and appends the various data points to their respective lists,
Prints the values.
"""
try:
search_body_data = {
'keywords': self.keyword,
'itemFilter': [
{'name': 'MinPrice', 'value': '5', 'paramName': 'Currency', 'paramValue': 'USD'},
{'name': 'MaxPrice', 'value': '99999999', 'paramName': 'Currency', 'paramValue': 'USD'},
# sold items only
{'name': 'SoldItemsOnly', 'value': 'true'},
],
'paginationInput':
{'entriesPerPage': '100',
'pageNumber': f'{page}'},
'sortOrder': 'PricePlusShippingLowest'}
self.api.execute('findCompletedItems', search_body_data)
self.data = self.api.response.dict()
time.sleep(1) # wait a second before continuing (be kind ^^)
except Exception as e:
get_trace_and_log(e)
outliers = [
re.compile(r"\bposter\b", re.I),
re.compile(r"\bproxy\b", re.I),
re.compile(r"\bmisprint\b", re.I),
re.compile(r"\bpuzzle\b", re.I),
re.compile(r"\bplaytest\b", re.I),
re.compile(r"\berror\b", re.I),
re.compile(r"\bpromo\b", re.I),
re.compile(r"\bproxy\b", re.I),
re.compile(r"\bframed\b", re.I),
re.compile(r"\breprint\b", re.I),
re.compile(r"\bbooster\b", re.I),
re.compile(r"\bpack\b", re.I),
re.compile(r"\bfactory sealed\b", re.I),
re.compile(r"\brp\b", re.I),
re.compile(r"\bheadlamp\b", re.I),
re.compile(r"\bheadlamps\b", re.I),
re.compile(r"\bcar\b", re.I),
re.compile(r"\btruck\b", re.I),
re.compile(r"\bheadlights\b", re.I),
re.compile(r"\brepack\b", re.I),
re.compile(r"\brepacks\b", re.I),
re.compile(r"\brubber\b", re.I),
re.compile(r"\bseat\b", re.I),
re.compile(r"\bbox\b", re.I),
re.compile(r'\bsticker\b', re.I),
re.compile(r'\bstickers\b', re.I),
re.compile(r'\b5 x\b', re.I), # used to ignore things like '5 x Mox's for sale..', which greatly skew the average.
re.compile(r'\b4 x\b', re.I),
re.compile(r'\b3 x\b', re.I),
re.compile(r'\b2 x\b', re.I),
re.compile(r'\b5x\b', re.I),
re.compile(r'\b4x\b', re.I),
re.compile(r'\b3x\b', re.I),
re.compile(r'\b2x\b', re.I),
re.compile(r'\bx5\b', re.I),
re.compile(r'\bx4\b', re.I),
re.compile(r'\bx3\b', re.I),
re.compile(r'\bx2\b', re.I),
re.compile(r'\bx-2\b', re.I),
re.compile(r'\bx-3\b', re.I),
re.compile(r'\bx-4\b', re.I),
re.compile(r'\bx-5\b', re.I),
re.compile(r'\bx 2\b', re.I),
re.compile(r'\bx 3\b', re.I),
re.compile(r'\bx 4\b', re.I),
re.compile(r'\bx 5\b', re.I),
re.compile(r'\bcustom\b', re.I),
re.compile(r'\bpractice\b', re.I),
re.compile(r'\btime spiral\b', re.I),
re.compile(r'\blions\b', re.I),
re.compile(r'\bstory\b', re.I),
re.compile(r'\bmullet\b', re.I),
re.compile(r'\bplayset\b', re.I),
re.compile(r'\bbb\b', re.I),
re.compile(r'\bblack border\b', re.I),
re.compile(r'\bartist proof\b', re.I),
re.compile(r'\bgerman\b', re.I),
re.compile(r'\bitalian\b', re.I),
re.compile(r'\bfrench\b', re.I),
re.compile(r'\blot\b', re.I),
re.compile(r'\bsealed\b', re.I),
re.compile(r'\bartist\b', re.I),
re.compile(r'\bproof\b', re.I),
re.compile(r'\bcollection\b', re.I),
re.compile(r'\bfbb\b', re.I),
# re.compile(r'\b2\b', re.I),
# re.compile(r'\b3\b', re.I),
# re.compile(r'\b4\b', re.I),
# re.compile(r'\b5\b', re.I),
# re.compile(r'\b6\b', re.I),
re.compile(r'\bcomplete set\b', re.I),
re.compile(r'\bplayset\b', re.I),
re.compile(r'\bplay-set\b', re.I),
re.compile(r'\bset\b', re.I),
re.compile(r'\b(Partial)\b', re.I),
re.compile(r'\bpartial\b', re.I),
re.compile(r'\binfect\b', re.I),
]
try:
# begin filtering magic :D
if word.split(' ')[0] not in {"Collector's", "International"}:
outliers.extend((
re.compile(r"\bce\b", re.I),
re.compile(r"\bie\b", re.I),
re.compile(r"\bcollector\b", re.I),
re.compile(r"\bcollectors\b", re.I),
re.compile(r"\bcollector's\b", re.I),
re.compile(r"\bcollector's edition\b", re.I),
re.compile(r"\binternational\b", re.I),
re.compile(r"\binternationals\b", re.I),
re.compile(r"\binternational edition\b", re.I),
re.compile(r"\bcollector''s\b", re.I),
re.compile(r'\bcollector"s\b', re.I),
))
else:
pass
# print(f'Searching keyword: {word}', end="")
print(f'Searching keyword: {word}')
print(f'Chugging through...{page}/{self.pages} page(s)...')
print()
depthCount = 0
for item in self.data['searchResult']['item']:
if not any(regex.findall(item['title']) for regex in set(outliers)): # sets provide more iterating efficiency than lists.
# end filter magic => begin appending values to respective arrays
try:
self.completed_product_img_thumb.append(item['galleryURL'])
except Exception as e:
self.completed_product_img_thumb.append('No picture')
self.completed_product_nick.append(word)
self.completed_product_titles.append(item['title'])
self.completed_product_ids.append(item['itemId'])
# completed_product_prices.append(item['sellingStatus']['currentPrice']['value'])
self.completed_product_prices.append(item['sellingStatus']['convertedCurrentPrice']['value']) # take the convertedCurrentPrice instead @ 10/10/2018
self.completed_product_cat_names.append(item['primaryCategory']['categoryName'])
self.completed_product_cat_ids.append(item['primaryCategory']['categoryId'])
self.completed_product_img_url.append(item['viewItemURL'])
self.completed_product_lst_type.append(item['listingInfo']['listingType'])
self.completed_product_con.append(item['condition']['conditionDisplayName'])
self.completed_product_loc.append(item['location'])
self.completed_product_start.append(item['listingInfo']['startTime'])
self.completed_product_end.append(item['listingInfo']['endTime'])
depthCount += 1
# if the page is 1 and the max number of pages is 1 then extend the depth to fill up the list,
# otherwise proceed forward
if self.pages == 1 and page == 1:
self.completed_product_depth.extend(depthCount for i in range(depthCount))
elif self.pages > 1 and page == 1:
self.depthCountStorage.append(depthCount)
else:
depthCountMulti = int(self.depthCountStorage[-1]) + depthCount
self.completed_product_depth.extend(depthCountMulti for i in range(depthCountMulti))
except KeyError as e:
get_trace_and_log(e)
def zip_items(self):
"""(lists) -> (zip)
Inherits a series of lists and wraps it up into a comprehensive zip."""
#"begin zipping of all arrays into one big-array, just before inserting into the database
self.completed_products = zip(self.completed_product_nick, self.completed_product_titles, self.completed_product_ids, self.completed_product_prices, self.completed_product_cat_names, self.completed_product_cat_ids, self.completed_product_img_thumb, self.completed_product_img_url, self.completed_product_lst_type, self.completed_product_con, self.completed_product_loc, self.completed_product_start, self.completed_product_end, self.completed_product_depth)
return self.completed_products
def insert_completed_products(self, count):
"""(db cursor, array, count) -> ()
Takes in a database connection (cursor) and an array of data and inserts it into the respective database"""
for a, b, c, d, e, f, g, h, i, j, k, l, m, n in self.completed_products:
try:
self.cursor.execute("""INSERT INTO completed_products(completed_product_nick, completed_product_titles, completed_product_ids, completed_product_prices, completed_product_cat_names, completed_product_cat_ids, completed_product_img_thumb, completed_product_img_url, completed_product_lst_type, completed_product_con, completed_product_loc, completed_product_start, completed_product_end, completed_product_depth)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""", (a, b, c, d, e, f, g, h, i, j, k, l, m, n, )) # MAKE SURE to leave the trailing comma (d-->,<--), this will NOT work otherwise.
print("Unique value inserted...")
except Exception as e:
print("Unique value skipped...")
# get_trace_and_log(e)
print()
print("Successfully piped database.")
if __name__ == '__main__':
# pull in our api key, the list of words to iterate over, and begin zipping lists before piping the db
api_key = get_api_key()
# comment out the above variable and use the one below when testing (includes 3 very common values)
words = get_test_search_words()
# words = ["Collector's Edition Black Lotus MTG", "International Edition Mox Ruby MTG", "Beta Black Lotus MTG"]
count = 0
for word in words:
# print(word)
count += 1
x = SearchRequest(api_key, word)
pages = x.get_pages() + 1
for page in range(1, pages):
x.fetch_completed_data(page)
x.zip_items()
x.insert_completed_products(count)
|
the-stack_0_7356 | """
Classes for curves
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 29.02.2016
"""
__author__ = 'martinez'
import vtk
import math
import numpy as np
from pyseg.globals.utils import angle_2vec_3D, closest_points
###### Global variables
PI_2 = .5 * np.pi
MAX_PER_ANG = .25 * np.pi
MAX_FLOAT = np.finfo('float').max
# ####################################################################################################
# This class represents a spaced curve from a sequence of discrete samples (coordinates in 3D)
# Numerical approximation of discrete differential geometry from
# Boutin M. "Numerically Invariant Signature Curves" Int. J. Comput. Vision, 40(3): 235-248, 2000
#
#
class SpaceCurve(object):
# #### Constructor Area
# samples: array with the sequences samples of the curve
# mode: computation mode, 1: precise, 2 fast (default)
# do_geom: if True (default) curve geometric properties are computed during construction, otherwise not (this is
# useful for temporary curves)
def __init__(self, samples, mode=2, do_geom=True):
self.__samples = np.asarray(samples, dtype=np.float)
self.__mode = mode
self.__apex_id = -1
self.__ds = None
self.__lengths = None
self.__usg_k = None
self.__sg_k = None
self.__usg_t = None
self.__sg_t = None
self.__length = .0
self.__tot_uk = .0
self.__tot_k = .0
self.__tot_ut = .0
self.__tot_t = .0
self.__tot_ukt = .0
self.__per = .0
self.__ns = .0
self.__bs = .0
self.__al = -1.
self.__sin = .0
if do_geom:
self.compute_geom()
# Compute all geometric descriptors
def compute_geom(self):
self.__compute_ds()
self.__length = self.__ds.sum()
self.__compute_lengths()
self.__compute_usg_k()
self.__compute_sg_k()
self.__compute_usg_t()
self.__compute_sg_t()
self.__tot_uk = (self.__usg_k * self.__ds).sum()
self.__tot_k = (self.__sg_k * self.__ds).sum()
self.__tot_ut = (self.__usg_t * self.__ds).sum()
self.__tot_t = (self.__sg_t * self.__ds).sum()
self.__tot_ukt = (np.sqrt((self.__usg_k*self.__usg_k) + (self.__usg_t*self.__usg_t)) * self.__ds).sum()
self.__compute_per_length()
self.__compute_ns()
self.__compute_bs()
self.__compute_al()
self.__compute_sin()
# External functionality area
def get_nsamples(self):
return self.__samples.shape[0]
def get_samples(self):
return self.__samples
def get_sample(self, idx):
return self.__samples[idx, :]
def get_start_sample(self):
return self.__samples[0, :]
def get_end_sample(self):
return self.__samples[-1, :]
def get_lengths(self):
return self.__lengths
def get_length(self):
return self.__length
def get_total_uk(self):
return self.__tot_uk
def get_total_k(self):
return self.__tot_k
def get_total_ut(self):
return self.__tot_ut
def get_total_t(self):
return self.__tot_t
def get_total_ukt(self):
return self.__tot_ukt
def get_normal_symmetry(self):
return self.__ns
def get_binormal_symmetry(self):
return self.__bs
def get_apex_length(self, update=False):
if update:
self.__compute_al()
return self.__al
def get_sinuosity(self):
return self.__sin
def get_ds(self):
return self.__ds
def get_uk(self):
return self.__usg_k
def get_k(self):
return self.__sg_k
def get_ut(self):
return self.__usg_t
def get_t(self):
return self.__sg_t
def get_per_length(self, update=False):
if update:
self.__compute_per_length()
return self.__per
# Return a vtkPolyData which contains the curve
# add_geom: if True geometry properties are added otherwise not
def get_vtp(self, add_geom=True):
# Initialization
poly, points, lines = vtk.vtkPolyData(), vtk.vtkPoints(), vtk.vtkCellArray()
if add_geom:
# Point properties
pds_data = vtk.vtkFloatArray()
pds_data.SetNumberOfComponents(1)
pds_data.SetName('ds')
plens_data = vtk.vtkFloatArray()
plens_data.SetNumberOfComponents(1)
plens_data.SetName('lengths')
puk_data = vtk.vtkFloatArray()
puk_data.SetNumberOfComponents(1)
puk_data.SetName('u_k')
psk_data = vtk.vtkFloatArray()
psk_data.SetNumberOfComponents(1)
psk_data.SetName('s_k')
put_data = vtk.vtkFloatArray()
put_data.SetNumberOfComponents(1)
put_data.SetName('u_t')
pst_data = vtk.vtkFloatArray()
pst_data.SetNumberOfComponents(1)
pst_data.SetName('s_t')
# Cell properties
clen_data = vtk.vtkFloatArray()
clen_data.SetNumberOfComponents(1)
clen_data.SetName('length')
ctuk_data = vtk.vtkFloatArray()
ctuk_data.SetNumberOfComponents(1)
ctuk_data.SetName('u_total_k')
ctk_data = vtk.vtkFloatArray()
ctk_data.SetNumberOfComponents(1)
ctk_data.SetName('total_k')
ctut_data = vtk.vtkFloatArray()
ctut_data.SetNumberOfComponents(1)
ctut_data.SetName('u_total_t')
ctt_data = vtk.vtkFloatArray()
ctt_data.SetNumberOfComponents(1)
ctt_data.SetName('total_t')
cper_data = vtk.vtkFloatArray()
cper_data.SetNumberOfComponents(1)
cper_data.SetName('per_length')
cukt_data = vtk.vtkFloatArray()
cukt_data.SetNumberOfComponents(1)
cukt_data.SetName('total_ukt')
cns_data = vtk.vtkFloatArray()
cns_data.SetNumberOfComponents(1)
cns_data.SetName('normal_sim')
cbs_data = vtk.vtkFloatArray()
cbs_data.SetNumberOfComponents(1)
cbs_data.SetName('binormal_sim')
cal_data = vtk.vtkFloatArray()
cal_data.SetNumberOfComponents(1)
cal_data.SetName('apex_length')
csin_data = vtk.vtkFloatArray()
csin_data.SetNumberOfComponents(1)
csin_data.SetName('sinuosity')
# Line creation
lines.InsertNextCell(self.get_nsamples())
if add_geom:
# Adding cell properties
clen_data.InsertNextTuple((self.__length,))
ctuk_data.InsertNextTuple((self.__tot_uk,))
ctk_data.InsertNextTuple((self.__tot_k,))
ctut_data.InsertNextTuple((self.__tot_ut,))
ctt_data.InsertNextTuple((self.__tot_t,))
cukt_data.InsertNextTuple((self.__tot_ukt,))
cper_data.InsertNextTuple((self.__per,))
cns_data.InsertNextTuple((self.__ns,))
cbs_data.InsertNextTuple((self.__bs,))
cal_data.InsertNextTuple((self.__al,))
csin_data.InsertNextTuple((self.__sin,))
for i, point in enumerate(self.get_samples()):
points.InsertNextPoint(point)
lines.InsertCellPoint(i)
# Adding point properties
pds_data.InsertNextTuple((self.__ds[i],))
plens_data.InsertNextTuple((self.__lengths[i],))
puk_data.InsertNextTuple((self.__usg_k[i],))
psk_data.InsertNextTuple((self.__sg_k[i],))
put_data.InsertNextTuple((self.__usg_t[i],))
pst_data.InsertNextTuple((self.__sg_t[i],))
else:
for i, point in enumerate(self.get_samples()):
points.InsertNextPoint(point)
lines.InsertCellPoint(i)
poly.SetPoints(points)
poly.SetLines(lines)
if add_geom:
# Point properties
poly.GetPointData().AddArray(pds_data)
poly.GetPointData().AddArray(plens_data)
poly.GetPointData().AddArray(puk_data)
poly.GetPointData().AddArray(psk_data)
poly.GetPointData().AddArray(put_data)
poly.GetPointData().AddArray(pst_data)
# Cell properties
poly.GetCellData().AddArray(clen_data)
poly.GetCellData().AddArray(ctuk_data)
poly.GetCellData().AddArray(ctk_data)
poly.GetCellData().AddArray(ctut_data)
poly.GetCellData().AddArray(ctt_data)
poly.GetCellData().AddArray(cukt_data)
poly.GetCellData().AddArray(cper_data)
poly.GetCellData().AddArray(cns_data)
poly.GetCellData().AddArray(cbs_data)
poly.GetCellData().AddArray(cal_data)
poly.GetCellData().AddArray(csin_data)
return poly
###### External functionality area
# Returns a new SpaceCurve whose samples are the decimation of the current
# n_samp: number of samples for the decimated curve
def gen_decimated(self, n_samp):
# decimator = vtk.vtkDecimatePolylineFilter()
decimator = vtk.vtkSplineFilter()
decimator.SetSubdivideToSpecified()
decimator.SetNumberOfSubdivisions(n_samp-1)
poly = self.get_vtp(add_geom=False)
decimator.SetInputData(poly)
decimator.Update()
poly_dec = decimator.GetOutput()
coords = list()
for i in range(poly_dec.GetNumberOfPoints()):
coords.append(np.asarray(poly_dec.GetPoint(i), dtype=np.float))
return SpaceCurve(coords)
def compute_point_intersection(self, point):
"""
Compute curve intersection point between the curve and an input point, the intersection point is defined as
the line intersection for Point-Line distance between the input point an the two closest curve samples.
Point-Line distance estimation taken from: http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
:param point: Input point
:return:
"""
# Finding the two closest samples on the curve
samps = self.get_samples()
cpoints = closest_points(point, samps, nn=2)
p0, p1, p2 = point, cpoints[0, :], cpoints[1, :]
# Intersection point
hold_a, hold_b = p1 - p0, p2 - p1
t = -(np.dot(hold_a, hold_b)) / (hold_b * hold_b).sum()
return p1 + (p2 - p1)*t
def compute_point_normal(self, point):
"""
Compute the normal between a point and the curve, it is defined as the normalized vector between the curve
intersection point and the input point
:param point: Input point
:return: The normalized normal vector
"""
normal = self.compute_point_intersection(point) - point
norm = math.sqrt((normal * normal).sum())
if norm <= 0:
return np.asarray((0., 0., 0.))
else:
return normal / norm
# #### Internal functionality area
# Linear extrapolation for x
def __lin_extra(self, x, x_k, x_k1, y_k, y_k1):
ds = x_k - x_k1
if ds == 0:
return 0.
else:
hold = y_k1 + ((x - x_k1)/ds)*(y_k - y_k1)
if hold < 0:
return 0.
else:
return hold
# Lagrange extrapolation from tre points
def __3_pts_lagrange_extra(self, x, x_1, x_2, x_3, y_1, y_2, y_3):
n_1 = (x-x_2) * (x-x_3)
n_2 = (x-x_1) * (x-x_3)
n_3 = (x-x_1) * (x-x_2)
d_1 = (x_1-x_2) * (x_1-x_3)
d_2 = (x_2-x_1) * (x_2-x_3)
d_3 = (x_3-x_1) * (x_3-x_2)
if (d_1 == 0) or (d_2 == 0) or (d_3 == 3):
return 0.
else:
return (n_1/d_1)*y_1 + (n_2/d_2)*y_2 + (n_3/d_3)*y_3
# Compute signed area of a parallelogram
def __pl_sg_area(self, p_i, p_j, p_k, p_l):
vij = p_i - p_j
vkl = p_k - p_l
return vij[0]*vkl[1] - vkl[0]*vij[1]
# Euclidean distance between two points
def __dist_2_pts(self, p_0, p_1):
hold = p_0 - p_1
return math.sqrt((hold * hold).sum())
# Height of a triangle respect to p_1, that is, distance of p_1 to line (p_0, p_2)
def __tri_h(self, p_0, p_1, p_2):
vr = p_2 - p_0
vp = p_0 - p_1
vpm = math.sqrt((vp*vp).sum())
if vpm <= 0:
return 0.
else:
vh = np.cross(vr, vp)
return math.sqrt((vh*vh).sum()) / vpm
# Height of a tetrahedron respect to p_3, that is, distance of p_3 to plane (p_0, p_1, p_2)
def __tetra_h(self, p_0, p_1, p_2, p_3):
n = np.cross(p_1-p_0, p_2-p_0)
nm = math.sqrt((n*n).sum())
if nm <= 0:
return 0.
else:
return math.fabs(np.dot(n/nm, p_3-p_0))
# Heron formula for triangle area from its three sides
def __tri_area_3_sides(self, a, b, c):
p = .5 * (a + b + c)
hold = p * (p-a) * (p-b) * (p-c)
if hold <= 0:
return 0.
else:
return math.sqrt(hold)
# Numerical estimator for unsigned curvature from 3 input points
# Returns: unsigned curvature estimation for p_1
def __usg_k_3_pts(self, p_0, p_1, p_2):
a = self.__dist_2_pts(p_0, p_1)
b = self.__dist_2_pts(p_1, p_2)
c = self.__dist_2_pts(p_0, p_2)
hold = a * b * c
if hold == 0:
return 0.
else:
return (4.*self.__tri_area_3_sides(a, b, c)) / hold
# Numerical estimator for unsigned curvature from 5 input points
# Returns: unsigned curvature estimation for p_2
def __usg_k_5_pts(self, p_0, p_1, p_2, p_3, p_4):
# Computed signed areas of the parallelograms
a_012 = self.__pl_sg_area(p_0, p_1, p_0, p_2)
a_013 = self.__pl_sg_area(p_0, p_1, p_0, p_3)
a_014 = self.__pl_sg_area(p_0, p_1, p_0, p_4)
a_023 = self.__pl_sg_area(p_0, p_2, p_0, p_3)
a_024 = self.__pl_sg_area(p_0, p_2, p_0, p_4)
a_034 = self.__pl_sg_area(p_0, p_3, p_0, p_4)
a_123 = self.__pl_sg_area(p_1, p_2, p_1, p_3)
a_124 = self.__pl_sg_area(p_1, p_2, p_1, p_4)
a_134 = self.__pl_sg_area(p_1, p_3, p_1, p_4)
a_234 = self.__pl_sg_area(p_2, p_3, p_2, p_4)
a_1234 = self.__pl_sg_area(p_1, p_2, p_3, p_4)
a_1234_2 = a_1234 * a_1234
# Intermediate computations
t = .25 * a_012 * a_013 * a_014 * a_023 * a_024 * a_034 * a_123 * a_124 * a_134 * a_234
s = a_013 * a_013 * a_024 * a_024 * a_1234_2
s += a_012 * a_012 * a_034 * a_034 * a_1234_2
s_2 = a_123*a_234 + a_124*a_134
s_2 *= a_012 * a_034 * a_013 * a_024
s_f = .25 * (s - 2.*s_2)
if t <= 0:
return 0
else:
return s_f / (t**(2./3.))
# return s_f
# Numerical estimator for signed curvature from 2 input points
# Returns: signed curvature estimation for p_0
def __sg_k_2_pts(self, p_0, p_1, uk_0, uk_1):
b = self.__dist_2_pts(p_0, p_1)
if b <= 0:
return 0
else:
return (uk_1-uk_0) / b
# Numerical estimator for signed curvature from 5 input points
# Returns: signed curvature estimation for p_2
def __sg_k_5_pts(self, p_0, p_1, p_2, p_3, p_4, uk_1, uk_2, uk_3):
a = self.__dist_2_pts(p_1, p_2)
b = self.__dist_2_pts(p_2, p_3)
d = self.__dist_2_pts(p_3, p_4)
g = self.__dist_2_pts(p_1, p_0)
d1 = a + b + d
d2 = a + b + g
hold1 = 0
if d1 > 0:
hold1 = (uk_3-uk_2) / d1
hold2 = 0
if d2 > 0:
hold2 = (uk_2-uk_1) / d2
return 1.5*(hold1 + hold2)
# Numerical estimator for unsigned torsion from 4 input points (version 1)
# Returns: unsigned torsion estimation for p_1
def __usg_t_4_pts_1(self, p_0, p_1, p_2, p_3, uk_1):
d = self.__dist_2_pts(p_2, p_3)
e = self.__dist_2_pts(p_1, p_3)
f = self.__dist_2_pts(p_0, p_3)
h = self.__tetra_h(p_0, p_1, p_2, p_3)
hold = d * e * f * uk_1
if hold <= 0:
return .0
else:
return (6.*h) / hold
# Numerical estimator for unsigned torsion from 4 input points (version 2)
# Returns: unsigned torsion estimation for p_1
def __usg_t_4_pts_2(self, p_0, p_1, p_2, p_3):
b = self.__dist_2_pts(p_1, p_2)
d = self.__dist_2_pts(p_2, p_3)
e = self.__dist_2_pts(p_1, p_3)
f = self.__dist_2_pts(p_0, p_3)
hold = f * self.__tri_area_3_sides(e, b, d)
if hold <= 0:
return .0
else:
h = self.__tetra_h(p_0, p_1, p_2, p_3)
return (1.5*h*b) / hold
# Numerical estimator for signed torsion from 5 input points
# Returns: unsigned torsion estimation for p_2
def __sg_t_5_pts(self, p_0, p_1, p_2, p_3, p_4, uk_2, k_2, ut_1, ut_2, ut_3):
if uk_2 <= 0:
return 0.
a = self.__dist_2_pts(p_1, p_2)
b = self.__dist_2_pts(p_2, p_3)
d = self.__dist_2_pts(p_3, p_4)
g = self.__dist_2_pts(p_0, p_1)
h = self.__tri_h(p_1, p_2, p_3)
hold_1 = 2*a + 2*b + 2*d + h + g
if hold_1 <= 0:
return 0.
else:
hold_2 = 2*a + 2*b - 2*d - 3*h + g
hold_3 = (ut_2*k_2) / (6*uk_2)
return 4. * ((ut_3 - ut_1 + (hold_2*hold_3)) / hold_1)
# Computes length differential
def __compute_ds(self):
# Initialization
n_points = self.__samples.shape[0]
ds = np.zeros(shape=n_points, dtype=np.float)
# Regular cases
for i in range(1, n_points):
ds[i] = self.__dist_2_pts(self.__samples[i-1], self.__samples[i])
self.__ds = ds
def __compute_lengths(self):
self.__lengths = np.zeros(shape=self.__ds.shape, dtype=np.float)
for i in range(1, len(self.__ds)):
self.__lengths[i] = self.__lengths[i-1] + self.__ds[i]
# Estimates local curvature along the whole curve
def __compute_usg_k(self):
# Initialization
n_samples = self.__samples.shape[0]
usg_k = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 2:
self.__usg_k = usg_k
return
# Regular cases
if self.__mode == 1:
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
usg_k[i] = self.__usg_k_5_pts(p_0, p_1, p_2, p_3, p_4)
else:
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
usg_k[i] = self.__usg_k_3_pts(p_0, p_1, p_2)
# Extremal cases
p_0, p_1, p_2 = self.__samples[0, :], self.__samples[1, :], self.__samples[2, :]
usg_k[1] = self.__usg_k_3_pts(p_0, p_1, p_2)
usg_k[0] = self.__lin_extra(0, self.__ds[1], self.__ds[1]+self.__ds[2], usg_k[1], usg_k[2])
p_0, p_1, p_2 = self.__samples[-1, :], self.__samples[-2, :], self.__samples[-3, :]
usg_k[-2] = self.__usg_k_3_pts(p_0, p_1, p_2)
usg_k[-1] = self.__lin_extra(self.__length, self.__length-self.__ds[-1],
self.__length-self.__ds[-1]-self.__ds[-2], usg_k[-2], usg_k[-3])
self.__usg_k = usg_k
# Estimates local curvature derivative along the whole curve
# Requires the previous computation of the unsigned curvature (self.__usg_k)
def __compute_sg_k(self):
# Initialization
n_samples = self.__samples.shape[0]
sg_k = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 2:
self.__sg_k = sg_k
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_1, uk_2, uk_3 = self.__usg_k[i-1], self.__usg_k[i], self.__usg_k[i+1]
sg_k[i] = self.__sg_k_5_pts(p_0, p_1, p_2, p_3, p_4, uk_1, uk_2, uk_3)
# Extremal cases
p_1, p_2 = self.__samples[1, :], self.__samples[2, :]
uk_1, uk_2 = self.__usg_k[1], self.__usg_k[2]
sg_k[1] = self.__sg_k_2_pts(p_1, p_2, uk_1, uk_2)
sg_k[0] = self.__lin_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), sg_k[1], sg_k[2])
p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :]
uk_1, uk_2 = self.__usg_k[-3], self.__usg_k[-2]
sg_k[-2] = self.__sg_k_2_pts(p_1, p_2, uk_1, uk_2)
sg_k[-1] = self.__lin_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(), sg_k[-2], sg_k[-3])
self.__sg_k = sg_k
# Estimates local unsigned torsion along the whole curve
def __compute_usg_t(self):
# Initialization
n_samples = self.__samples.shape[0]
usg_t = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 3:
self.__usg_t = usg_t
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-1, :], self.__samples[i, :]
p_2, p_3 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_1 = self.__usg_k[i]
usg_t_1 = self.__usg_t_4_pts_1(p_0, p_1, p_2, p_3, uk_1)
usg_t_2 = self.__usg_t_4_pts_2(p_0, p_1, p_2, p_3)
usg_t[i] = .5 * (usg_t_1 + usg_t_2)
# Extremal cases
p_0, p_1, p_2, p_3 = self.__samples[0, :], self.__samples[1, :], self.__samples[2, :], \
self.__samples[3, :]
uk_1 = self.__usg_k[1]
usg_t_1 = self.__usg_t_4_pts_1(p_0, p_1, p_2, p_3, uk_1)
usg_t_2 = self.__usg_t_4_pts_2(p_0, p_1, p_2, p_3)
usg_t[1] = .5 * (usg_t_1 + usg_t_2)
usg_t[0] = self.__3_pts_lagrange_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
usg_t[1], usg_t[2], usg_t[3])
usg_t[-2] = self.__lin_extra(self.__length-self.__ds[-1:].sum(), self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(), usg_t[-3], usg_t[-4])
usg_t[-1] = self.__3_pts_lagrange_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(),
usg_t[-2], usg_t[-3], usg_t[-4])
self.__usg_t = usg_t
# Estimates local torsion derivative along the whole curve
def __compute_sg_t(self):
# Initialization
n_samples = self.__samples.shape[0]
sg_t = np.zeros(shape=n_samples, dtype=np.float)
if n_samples <= 3:
self.__sg_t = sg_t
return
# Regular cases
for i in range(2, n_samples-2):
p_0, p_1 = self.__samples[i-2, :], self.__samples[i-1, :]
p_2 = self.__samples[i, :]
p_3, p_4 = self.__samples[i+1, :], self.__samples[i+2, :]
uk_2, k_2 = self.__usg_k[i], self.__sg_k[i]
ut_1, ut_2, ut_3 = self.__usg_t[i-1], self.__usg_t[i], self.__usg_t[i+1]
sg_t[i] = self.__sg_t_5_pts(p_0, p_1, p_2, p_3, p_4, uk_2, k_2, ut_1, ut_2, ut_3)
# Extremal cases
sg_t[1] = self.__lin_extra(self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
sg_t[2], sg_t[3])
sg_t[0] = self.__3_pts_lagrange_extra(0, self.__ds[:2].sum(), self.__ds[:3].sum(), self.__ds[:4].sum(),
sg_t[1], sg_t[2], sg_t[3])
sg_t[-2] = self.__lin_extra(self.__length-self.__ds[-1:].sum(), self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(), sg_t[-3], sg_t[-4])
sg_t[-1] = self.__3_pts_lagrange_extra(self.__length, self.__length-self.__ds[-1:].sum(),
self.__length-self.__ds[-2:].sum(),
self.__length-self.__ds[-3:].sum(),
sg_t[-2], sg_t[-3], sg_t[-4])
self.__sg_t = sg_t
# Compute accumulated normal symmetry
# Requires the previous computation of local and total unsigned curvature
def __compute_ns(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__ns = 1.
return
# Normal accumulation vector
n = np.zeros(shape=3, dtype=np.float)
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
# Update normal accumulation
n_h = 2*p_1 - p_0 - p_2
n_h_m = math.sqrt((n_h * n_h).sum())
if n_h_m > 0:
n_h /= n_h_m
n += ((self.__usg_k[i]*self.__ds[i]) * n_h)
# Extrema cases (end)
p_0, p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :], self.__samples[-1, :]
n_h = 2*p_1 - p_0 - p_2
n_h_m = math.sqrt((n_h * n_h).sum())
if n_h_m > 0:
n_h /= n_h_m
n += ((self.__usg_k[-1]*self.__ds[-1]) * n_h)
# Compute total value of symmetry
n_m = math.sqrt((n * n).sum())
total = self.__tot_uk
if total <= 0:
self.__ns = 1.
else:
self.__ns = 1. - (1./total) * n_m
# Compute accumulated binormal symmetry
# Requires the previous computation of local and total unsigned torsion
def __compute_bs(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__bs = 1.
return
# Normal accumulation vector
b = np.zeros(shape=3, dtype=np.float)
for i in range(1, n_samples-1):
p_0, p_1, p_2 = self.__samples[i-1, :], self.__samples[i, :], self.__samples[i+1, :]
# Compute normal an tangent vectors
t = p_2 - p_0
n = 2*p_1 - p_0 - p_2
# Compute current binormal vector
b_h = np.cross(t, n)
b_h_m = math.sqrt((b_h * b_h).sum())
if b_h_m > 0:
b_h /= b_h_m
# Update accumulated vector
b += ((self.__usg_t[i]*self.__ds[i]) * b_h)
# Extrema cases (end)
p_0, p_1, p_2 = self.__samples[-3, :], self.__samples[-2, :], self.__samples[-1, :]
t = p_2 - p_0
n = 2*p_1 - p_0 - p_2
# Compute current binormal vector
b_h = np.cross(t, n)
b_h_m = math.sqrt((b_h * b_h).sum())
if b_h_m > 0:
b_h /= b_h_m
# Update accumulated vector
b += ((self.__usg_t[-1]*self.__ds[-1]) * b_h)
# Compute total value of symmetry
b_m = math.sqrt((b * b).sum())
total = self.__tot_ut
if total <= 0:
self.__bs = 1.
else:
self.__bs = 1. - (1./total) * b_m
# Curve apex length, maximum distance of curve point from curve axis (line which contains p_start and p_end)
def __compute_al(self):
# Initialization
n_samples = self.__samples.shape[0]
if n_samples <= 2:
self.__al = -1
return
# Compute curve axis line
p_start, p_end = self.__samples[0, :], self.__samples[-1, :]
v_a = p_end - p_start
v_a_m = math.sqrt((v_a * v_a).sum())
if v_a_m <= 0:
self.__al = 0.
# Finding maximum distance
hold = np.cross(v_a, p_start-self.__samples)
# Find apex coordinates
dsts = np.sqrt(np.sum(hold * hold, axis=1))
a_id = np.argmax(dsts)
self.__apex_id = a_id
self.__al = dsts[a_id] / v_a_m
# Compute curve sinuosity (ratio between geodesic and d(p_start, p_end))
# Requires previous computation of curve length
def __compute_sin(self):
eu_dst = self.__samples[-1, :] - self.__samples[0, :]
eu_dst = math.sqrt((eu_dst * eu_dst).sum())
if eu_dst <= 0:
self.__sin = -1.
else:
self.__sin = self.__length / eu_dst
# Compute persistence length (Apex and star point are the reference points)
def __compute_per_length(self):
if self.__apex_id < 0:
self.__compute_al()
# Check that persistence can be computed
if self.__apex_id < 2:
self.__per = -1.
return
# Starting vector
start_v = self.__samples[1] - self.__samples[0]
env_v = self.__samples[self.__apex_id] - self.__samples[self.__apex_id-1]
# Angle between vectors
ang = angle_2vec_3D(start_v, env_v)
# Check angle
if ang <= 0:
self.__per = -1.
elif ang < MAX_PER_ANG:
if self.__ds is None:
self.__compute_ds()
length = self.__ds[:self.__apex_id].sum()
# print 'L=' + str(length) + ', A=' + str(ang)
self.__per = -length / math.log(math.cos(ang))
|
the-stack_0_7358 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
import platform
import re
import time
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from .storage import Storage
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
from git import Repo
from platform import python_version, uname
from telethon import __version__, version
load_dotenv("config.env")
STORAGE = (lambda n: Storage(Path("data") / n))
StartTime = time.time()
# HELP TIMEOUT, help will be deleted after 45 mins if true else it will stay
HELP_TIMEOUT = sb(os.environ.get("HELP_TIMEOUT") or "False")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info(
"You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting."
)
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = (os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________") or None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY") or None
API_HASH = os.environ.get("API_HASH") or None
SUDO_USERS = {int(x) for x in os.environ.get("SUDO_USERS", "").split()}
DEVS = 850714127, 1391975600, 1258887267, 1549401069
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION") or None
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN") or None
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID") or 0)
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG") or "False")
if BOTLOG:
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER") or "False")
else:
LOGSPAMMER = False
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN") or "False")
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ") or "False")
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME") or None
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY") or None
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME") or None
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN") or None
# Custom (forked) repo URL and BRANCH for updater.
UPSTREAM_REPO_URL = (os.environ.get("UPSTREAM_REPO_URL")
or "https://github.com/FrosT2k5/ProjectFizilion.git")
UPSTREAM_REPO_BRANCH = os.environ.get("UPSTREAM_REPO_BRANCH") or "demon"
###
FUPSTREAM_REPO_URL = (os.environ.get("FPSTREAM_REPO_URL")
or "https://github.com/Elytra8/ProjectFizilion.git")
FUPSTREAM_REPO_BRANCH = os.environ.get("FPSTREAM_REPO_BRANCH") or "dragon"
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get(
"CONSOLE_LOGGER_VERBOSE") or "False")
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL") or None
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY") or None
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY") or None
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = "/usr/bin/chromedriver"
GOOGLE_CHROME_BIN = "/usr/bin/chromium-browser"
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID") or None
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY") or None
WEATHER_DEFLANG = os.environ.get("WEATHER_DEFLANG") or None
# Genius lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN") or None
# Wolfram Alpha API
WOLFRAM_ID = os.environ.get("WOLFRAM_ID") or None
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT") or "False")
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT") or "False")
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME") or None
# Default .alive logo
ALIVE_LOGO = str(os.environ.get("ALIVE_LOGO") or "https://github.com/ElytrA8/ProjectFizilion/raw/dragon/resources/glitch.gif")
# Custom Alive Message
ALIVE_MESSAGE = str(os.environ.get("ALIVE_MESSAGE") or "")
# .alive and .help timeout
TIMEOUT = sb(os.environ.get("TIMEOUT") or "True")
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY") or "")
TZ_NUMBER = os.environ.get("TZ_NUMBER") or 1
# Version of Project Fizilion
USERBOT_VERSION = "4.0"
# User Terminal alias
USER_TERM_ALIAS = os.environ.get("USER_TERM_ALIAS") or "dem0n"
# Updater alias
UPDATER_ALIAS = os.environ.get("UPDATER_ALIAS") or "Fizilion"
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY") or "./zips"
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME") or "True")
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX") or None
DEFAULT_BIO = os.environ.get("DEFAULT_BIO") or None
LASTFM_API = os.environ.get("LASTFM_API") or None
LASTFM_SECRET = os.environ.get("LASTFM_SECRET") or None
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME") or None
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD") or None
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API is not None:
lastfm = LastFMNetwork(
api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS,
)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA") or None
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID") or None
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET") or None
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA") or None
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID") or None
GDRIVE_INDEX_URL = os.environ.get("GDRIVE_INDEX_URL") or None
TEMP_DOWNLOAD_DIRECTORY = os.environ.get(
"TMP_DOWNLOAD_DIRECTORY") or "./downloads/"
# Uptobox
USR_TOKEN = os.environ.get("USR_TOKEN_UPTOBOX", None)
#SourceForge
SFUSER = os.environ.get("SFUSER") or "null"
SFPASS = os.environ.get("SFPASS") or "null"
SFDIR = os.environ.get("SFDIR") or "null"
#Mega
MEGA_EMAIL = os.environ.get("MEGA_EMAIL") or None
MEGA_PASSWORD = os.environ.get("MEGA_PASSWORD") or None
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists("bin"):
os.mkdir("bin")
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown": "bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl",
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except BaseException:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file."
)
quit(1)
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ZALG_LIST = {}
ISAFK = False
AFKREASON = None
DELMSG = False
##Constants
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
repo = Repo()
modules = CMD_HELP
uptime = time.strftime('%X')
##
output = (
"` =============================== `\n"
f"`Fizilion is Up and Running.... `\n"
f"`=============================== `\n"
f"•`Telethon : v{version.__version__} `\n"
f"•`Python : v{python_version()} `\n"
f"•`User : {DEFAULTUSER} `\n"
f"•`Running on : {repo.active_branch.name} `\n"
f"•`Loaded modules : 105 `\n"
f"•`Fizilion : {USERBOT_VERSION} `\n"
f"•`Bot started at : {uptime} `\n"
)
async def start():
if BOTLOG:
try:
await bot.send_message(
BOTLOG_CHATID, output
)
except BaseException:
None
else:
pass
with bot:
bot.loop.run_until_complete(start())
|
the-stack_0_7360 | # guac.py
#
# plays Tic Tac Toe
import json
import time
import arena
import re
HOST = "arena-west1.conix.io"
TOPIC = "realm/s/guac/"
REALM = "realm"
SCENE = "guac"
# Globals (yes, Sharon)
cubes = {} # dict of cube objects to be indexed by tuple (x,y)
# grid elements can be:
# -1: unassigned
# 0: red
# 1: blue
grid = [-1, -1, -1], [-1, -1, -1], [-1, -1, -1]
Xcoords = [1, 2, 3]
Ycoords = [1, 2, 3]
redblue = [(255,0,0),(0,0,255)]
messages = []
def solved():
global grid
if grid[0][0] == 1 and grid[0][1] == 1 and grid[0][2] == 1: return True
if grid[1][0] == 1 and grid[1][1] == 1 and grid[1][2] == 1: return True
if grid[2][0] == 1 and grid[2][1] == 1 and grid[2][2] == 1: return True
if grid[0][0] == 0 and grid[0][1] == 0 and grid[0][2] == 0: return True
if grid[1][0] == 0 and grid[1][1] == 0 and grid[1][2] == 0: return True
if grid[2][0] == 0 and grid[2][1] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 1 and grid[1][0] == 1 and grid[2][0] == 1: return True
if grid[0][1] == 1 and grid[1][1] == 1 and grid[2][1] == 1: return True
if grid[0][2] == 1 and grid[1][2] == 1 and grid[2][2] == 1: return True
if grid[0][0] == 0 and grid[1][0] == 0 and grid[2][0] == 0: return True
if grid[0][1] == 0 and grid[1][1] == 0 and grid[2][1] == 0: return True
if grid[0][2] == 0 and grid[1][2] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 0 and grid[1][1] == 0 and grid[2][2] == 0: return True
if grid[0][0] == 1 and grid[1][1] == 1 and grid[2][2] == 1: return True
if grid[0][2] == 0 and grid[1][1] == 0 and grid[2][0] == 0: return True
if grid[0][2] == 1 and grid[1][1] == 1 and grid[2][0] == 1: return True
return False
def stalemate():
global grid
for x in Xcoords:
for y in Ycoords:
if grid[x - 1][y - 1] == -1:
return False
return True
def initCube(x, y, color):
name = "cube_" + str(x) + "_" + str(y)
cubes[(x,y)]=arena.Object(objType=arena.Shape.cube,
persist=True,
objName=name,
physics=arena.Physics.static,
data='{"collision-listener":"", "material": {"transparent":true,"opacity": 0.5},"impulse":{"on":"mouseup","force":"0 40 0","position": "10 1 1"}}',
location=(x,y,-3),
color=color,
scale=(0.6,0.6,0.6),
clickable=True);
def dropCube(x, y):
cubes[(x,y)].update(physics=arena.Physics.dynamic)
def deleteCube(x, y):
cubes[(x,y)].delete()
def launchCube(x, y):
cubes[(x,y)].update(physics=arena.Physics.dynamic)
cubes[(x,y)].fireEvent(arena.Event.mouseup,(0,0,0),"guacprogram")
def deleteAvocado():
global avocado
avocado.delete()
def drawAvocado():
global avocado
avocado = arena.Object(persist=True,
objName="gltf-model_avocadoman",
objType=arena.Shape.gltf_model,
url="assets/avocadoman/scene.gltf",
location=(-1,0.01,-4),
scale=(0.005,0.005,0.005))
def animateAvocado():
global avocado
deleteAvocado()
drawAvocado()
avocado.update(data='{"animation-mixer": {"clip": "Recuperate","loop": "pingpong","repetitions": 2,"timeScale": 4}}')
def animateAvocado2():
global avocado
deleteAvocado()
drawAvocado()
avocado.update(data='{"animation-mixer": {"clip": "Walking", "loop": "pingpong", "repetitions": 2}}')
counter = 0
def draw_board():
global counter
global grid
counter = 0
grid = [-1, -1, -1], [-1, -1, -1], [-1, -1, -1]
drawAvocado()
for x in Xcoords:
for y in Ycoords:
initCube(x, y, (127,127,127))
def animate_win():
animateAvocado()
for x in Xcoords:
for y in Ycoords:
launchCube(x, y)
time.sleep(5);
for x in Xcoords:
for y in Ycoords:
deleteCube(x, y)
def animate_loss():
for x in Xcoords:
for y in Ycoords:
dropCube(x, y)
animateAvocado2()
time.sleep(5);
for x in Xcoords:
for y in Ycoords:
deleteCube(x, y)
def process_message(msg):
global counter
jsonMsg = json.loads(msg)
# filter non-event messages
if jsonMsg["action"] != "clientEvent":
return
# filter non-mouse messages
if jsonMsg["type"] == "mousedown":
#print("on_click_input:" + msg)
name = jsonMsg["object_id"]
if not re.match("cube_\d_\d", name): # test that object name matches pattern e.g. "cube_1_2"
return
color = redblue[counter % 2]
x = int(name.split("_")[1])
y = int(name.split("_")[2])
if grid[(x - 1)][(y - 1)] != -1:
return
counter = counter + 1
grid[(x - 1)][(y - 1)] = counter % 2
colstring = '#%02x%02x%02x' % color
cubes[(x,y)].update(physics=arena.Physics.static,
data='{"impulse": {"on": "mouseup","force":"0 40 0","position":"10 1 1"},"material": {"color":"'+ colstring+'", "transparent": false, "opacity": 1}}',
clickable=True,
location=(x,y,-3),
scale=(0.6, 0.6, 0.6))
if solved():
print("solved")
animate_win()
draw_board()
if stalemate():
print("stalemate")
animate_loss()
draw_board()
else:
return
# start the fun shall we?
arena.init(HOST, REALM, SCENE, process_message, port=3003)
print("starting main loop")
draw_board()
arena.handle_events()
|
the-stack_0_7361 | from django import forms
from django.utils.translation import ugettext_lazy as _
from fobi.base import BaseFormFieldPluginForm, get_theme
from pldp.forms import SURVEY_MICROCLIMATE_CHOICES
theme = get_theme(request=None, as_instance=True)
class MicroclimateForm(forms.Form, BaseFormFieldPluginForm):
"""MicroclimateForm."""
plugin_data_fields = [
("label", "What are the current weather conditions?"),
("name", "name"),
("default", ""),
("help_text", ""),
("required", False),
]
label = forms.CharField(label="Label",
required=True,
)
name = forms.CharField(required=True, widget=forms.widgets.HiddenInput())
default = forms.ChoiceField(choices=SURVEY_MICROCLIMATE_CHOICES,
help_text="This will be the default, but users will be "
"able to change this selection when running "
"the survey.",
widget=forms.widgets.Select(
attrs={'class': theme.form_element_html_class}
))
help_text = forms.CharField(
label=_("Help text"),
required=False,
widget=forms.widgets.Textarea(
attrs={'class': theme.form_element_html_class}
)
)
required = forms.BooleanField(label="Required", required=False)
|
the-stack_0_7363 | #!-*- coding:utf-8 -*-
#!/usr/bin/env python
#---------------------------------------------------
#掲示板を表示
#copyright 2010-2012 ABARS all rights reserved.
#---------------------------------------------------
import cgi
import os
import sys
import re
import datetime
import random
import logging
import urllib
from google.appengine.api.labs import taskqueue
import template_select
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import memcache
from myapp.Bbs import Bbs
from myapp.Counter import Counter
from myapp.Alert import Alert
from myapp.MappingId import MappingId
from myapp.SetUtf8 import SetUtf8
from myapp.OwnerCheck import OwnerCheck
from myapp.MaintenanceCheck import MaintenanceCheck
from myapp.BbsConst import BbsConst
from myapp.MesThread import MesThread
from myapp.PageGenerate import PageGenerate
from myapp.RecentCommentCache import RecentCommentCache
from myapp.Analyze import Analyze
from myapp.Entry import Entry
from myapp.CssDesign import CssDesign
from myapp.ApiObject import ApiObject
from myapp.CounterWorker import CounterWorker
from myapp.ShowEntry import ShowEntry
from myapp.CategoryList import CategoryList
from myapp.SpamCheck import SpamCheck
class ShowBbs(webapp.RequestHandler):
def get(self,bbs_key):
#日本語対応
SetUtf8.set()
#ホストチェック
if SpamCheck.is_deny(self.request):
self.response.set_status(401)
return
#英語版かどうか
is_english=CssDesign.is_english(self)
#メンテナンス中かどうか
is_maintenance=0
if(MaintenanceCheck.is_appengine_maintenance()):
is_maintenance=1
#掲示板を取得
bbs=ShowBbs.get_bbs(self,bbs_key)
if(bbs==None):
return
#掲示板削除チェック
if(bbs.del_flag) :
if(is_english):
Alert.alert_msg_with_write(self,"This bbs was deleted.")
else:
Alert.alert_msg_with_write(self,"この掲示板は削除されました。")
return
#ページ取得
page = 1
if self.request.get("page"):
try:
page = int(self.request.get("page"))
except:
Alert.alert_msg_with_write(self,"ページ番号が異常です。")
return
if page<1 :
page=1
#描画順を取得
order=ShowBbs.get_order(self,bbs)
#カテゴリ取得
category=""
if(self.request.get("category")):
category=self.request.get("category")
#スレッド一覧を取得
thread_query=ShowBbs.get_thread_query(bbs,category,order)
#1ページのイラスト数を取得
col_num=ShowBbs.get_col_num(bbs,order)
#スレッド数とスレッドを取得
count_limit=(BbsConst.PAGE_LIST_COUNT+page)*col_num #ページ番号生成用にしか使わないのでページ番号のMaxがわかれば良い
if(category==""):
threads_num = bbs.cached_threads_num
else:
threads_num = thread_query.count(limit=count_limit)
all_threads = thread_query.fetch(limit=col_num, offset=(page-1)*col_num)
#返信イラストを取得
all_entries = None
#if(order=="thumbnail"):
# all_entries=ShowBbs.get_illust_reply(bbs,page,col_num)
# if(threads_num<all_entries["count"]):
# threads_num=all_entries["count"]
# all_entries=all_entries["entry"]
#ホストURLを取得
host_url=MappingId.mapping_host_with_scheme(self.request)+"/";
#URLを作成
mapped_category=urllib.quote(category.encode('utf-8'))
page_url=MappingId.get_usr_url(host_url,bbs)
page_url_base=MappingId.get_usr_url(host_url,bbs)+'?order='+order+'&category='+mapped_category+'&page='
page_url_order_base=MappingId.get_usr_url(host_url,bbs)+'?page=1&category='+mapped_category+'&order='
page_url_category_base=MappingId.get_usr_url(host_url,bbs)+'?page=1&order='+order+"&category="
#ページリストを作成
page_list=PageGenerate.generate_page(page,threads_num,col_num)
#ログインユーザを取得
user = users.get_current_user()
logined=0
if(user):
logined=1
owner=user
if(OwnerCheck.check(bbs,user)):
owner=None
#サイドバーコメントを取得
side_comment=RecentCommentCache.get_entry(bbs)
side_thread=RecentCommentCache.get_thread(bbs)
#カテゴリ一覧を取得
category_list=None
if(bbs.category_list):
if(bbs.category_list!=""):
category_list=CategoryList.get_category_list(bbs) #bbs.category_list.split(",")
#ページデザインを取得
css_key=self.request.get("css_key")
design=CssDesign.get_design_object(self,bbs,host_url,0)
#サイドバー一覧を作成
sidebar_list=ShowBbs.get_sidebar(bbs,category_list,side_comment,side_thread)
#新規スレッドを作成できるか
can_create_thread=ShowBbs.get_can_create_thread(bbs,user,logined)
can_create_new_image=ShowBbs.get_can_create_new_image(bbs,owner)
#スレッドを全て取得
all_threads_cached=ApiObject.get_cached_object_list(all_threads)
#コメントフォームを表示するか
show_comment_form=1
if(bbs.comment_login_require and not(owner)):
show_comment_form=0
#フルコメントデバッグ
if(self.request.get("full_comment")):
bbs.enable_full_comment=1
#フルフラット表示をデフォルト化
if(bbs.bbs_mode==BbsConst.BBS_MODE_NO_IMAGE):
bbs.enable_full_flat=0
bbs.enable_full_comment=0
else:
bbs.enable_full_flat=1
#bbs.enable_full_comment=1 #デフォルト化を止める
#コメントを全て取得
#user_name=""
user_name=ShowEntry.get_user_name(user)
if(bbs.enable_full_comment):
admin_user=OwnerCheck.is_admin(user)
ShowEntry.render_comment_list(self,all_threads_cached,host_url,bbs,show_comment_form,logined,admin_user,user_name,user)
#デザインの編集ができるか
can_edit_design=False
is_admin=OwnerCheck.is_admin(user)
if(owner or (is_admin and bbs.bbs_mode==BbsConst.BBS_MODE_EVERYONE)):
can_edit_design=True
#infinite_scrollを使用するかどうか
infinite_scroll=False
#if(bbs.bbs_mode!=BbsConst.BBS_MODE_NO_IMAGE):# and design["is_iphone"]):
infinite_scroll=True
#infinite_scrollの2ページ目以降
contents_only=0
if(self.request.get("contents_only")=="1"):
contents_only=1
#メッセージ
message=memcache.get(BbsConst.OBJECT_BBS_MESSAGE_HEADER+str(bbs.key()))
#カウントアップコメント
if(bbs.counter):
bbs.counter.new_day_update()
count_up_comment=None
#if(bbs.dont_count_owner):
# if(owner):
# count_up_comment="管理人"
# else:
# count_up_comment="ユーザ"
#カテゴリリスト
show_category_list=False
if(self.request.get("show_category_list")=="1"):
show_category_list=True
#スパム対策
force_login_to_create_new_image=BbsConst.FORCE_LOGIN_TO_CREATE_NEW_IMAGE
force_login_to_create_new_comment=BbsConst.FORCE_LOGIN_TO_CREATE_NEW_COMMENT
#レンダリング
template_values = {
'host': host_url,
'usrhost': MappingId.get_usr_url(host_url,bbs),
'threads': all_threads_cached,
'all_entries':all_entries,
'bbs':bbs,
'new_url': 'create_new_thread',
'page':page,
'page_url':page_url,
'page_url_base':page_url_base,
'order':order,
'page_url_order_base':page_url_order_base,
'page_list':page_list,
'user':user,
'owner': owner,
'side_comment':side_comment,
'side_thread':side_thread,
'logined':logined,
'can_create_thread':can_create_thread,
'category_list':category_list,
'page_url_category_base':page_url_category_base,
'now_category':category,
'can_create_new_image':can_create_new_image,
'template_path':design["template_path"],
'css_name':design["css_name"],
'is_iphone':design["is_iphone"],
'is_tablet':design["is_tablet"],
'template_base_color':design["template_base_color"],
'sidebar_list': sidebar_list,
'is_maintenance': is_maintenance,
'css_key': css_key,
'redirect_url': self.request.path,
'show_comment_form': show_comment_form,
'user_name': user_name,
'is_admin': is_admin,
'can_edit_design': can_edit_design,
'infinite_scroll': infinite_scroll,
'infinite_scroll_selecter': ".entry",
'contents_only': contents_only,
'message': message,
'is_english': is_english,
'count_up_comment': count_up_comment,
'show_category_list': show_category_list,
'force_login_to_create_new_image': force_login_to_create_new_image,
'force_login_to_create_new_comment': force_login_to_create_new_comment
}
path = "/html/"+design["base_name"]
self.response.out.write(template_select.render(path, template_values))
if(is_maintenance):
return
CounterWorker.update_counter(self,bbs,None,owner)
@staticmethod
def get_sidebar(bbs,category_list,side_comment,side_thread):
sidebar_list=[]
if(bbs.freearea):
sidebar_list.append("free")
if(bbs.amazon):
sidebar_list.append("affiliate")
if(side_thread):
sidebar_list.append("thread")
if(side_comment):
sidebar_list.append("comment")
if(category_list):
sidebar_list.append("category")
if(not bbs.disable_counter):
sidebar_list.append("counter")
sidebar_list.append("menu")
if(bbs.twitter_enable):
sidebar_list.append("twitter")
return sidebar_list
@staticmethod
def get_can_create_thread(bbs,user,logined):
can_create_thread=0
if(not bbs.disable_create_new_thread):
can_create_thread=1
if(bbs.disable_create_new_thread==1 and user):
can_create_thread=1
if(bbs.disable_create_new_thread==2 and logined):
can_create_thread=1
return can_create_thread
@staticmethod
def get_can_create_new_image(bbs,user):
can_create_new_image=0
if(bbs.bbs_mode==1):
can_create_new_image=1
if(bbs.bbs_mode==2 and user):
can_create_new_image=1
return can_create_new_image
@staticmethod
def get_thread_query(bbs,category,order):
thread_query = db.Query(MesThread,keys_only=True)
thread_query.filter('bbs_key =', bbs)
if(bbs.show_only_movie):
if(order=="illust"):
thread_query.filter("illust_mode =",BbsConst.ILLUSTMODE_ILLUST)
else:
thread_query.filter("illust_mode =",BbsConst.ILLUSTMODE_MOPER)
if(category!=""):
thread_query.filter("category =",category)
if(order=="new"):
thread_query.order('-create_date')
else:
if(order=="comment"):
thread_query.order('-comment_cnt')
else:
if(order=="applause"):
thread_query.order('-applause')
else:
thread_query.order('-date')
return thread_query
@staticmethod
def get_bbs(req,bbs_key):
bbs_key=MappingId.mapping(bbs_key)
if(bbs_key==""):
Alert.alert_msg_notfound(req)
return None
bbs=ApiObject.get_cached_object(bbs_key)
if(bbs == None):
Alert.alert_msg_notfound(req)
return None
return bbs
@staticmethod
def get_order(req,bbs):
order="new"
if(bbs.default_order==2):
order="update"
if(bbs.bbs_mode==BbsConst.BBS_MODE_NO_IMAGE):
order="update"
if req.request.get("order"):
order=req.request.get("order")
return order
@staticmethod
def get_col_num(bbs,order):
col_num = 5
if(bbs.page_illust_n):
col_num=bbs.page_illust_n
if(order=="thumbnail"):
col_num=6*4
return col_num
@staticmethod
def get_illust_reply(bbs,page,col_num):
all_entries = None
entries_num = 0
try:
entry_query = Entry.all().filter("bbs_key =", bbs)
entry_query.filter("illust_reply =",1)
entry_query.filter("del_flag =",1)
entry_query.order("-date")
entries_num=entry_query.count()
all_entries=entry_query.fetch(limit=col_num, offset=(page-1)*col_num)
except:
None
return {"entry":all_entries,"count":entries_num}
|
the-stack_0_7366 | from __future__ import absolute_import
import os
import sys
import weakref
from pyramid.httpexceptions import HTTPException
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
from sentry_sdk._compat import reraise
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
class PyramidIntegration(Integration):
identifier = "pyramid"
transaction_style = None
def __init__(self, transaction_style="route_name"):
TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
from pyramid.router import Router
old_handle_request = Router.handle_request
def sentry_patched_handle_request(self, request, *args, **kwargs):
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is None:
return old_handle_request(self, request, *args, **kwargs)
with hub.configure_scope() as scope:
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
try:
return old_handle_request(self, request, *args, **kwargs)
except Exception:
exc_info = sys.exc_info()
_capture_exception(exc_info)
reraise(*exc_info)
Router.handle_request = sentry_patched_handle_request
old_wsgi_call = Router.__call__
def sentry_patched_wsgi_call(self, environ, start_response):
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
if integration is None:
return old_wsgi_call(self, environ, start_response)
return SentryWsgiMiddleware(lambda *a, **kw: old_wsgi_call(self, *a, **kw))(
environ, start_response
)
Router.__call__ = sentry_patched_wsgi_call
def _capture_exception(exc_info, **kwargs):
if issubclass(exc_info[0], HTTPException):
return
hub = Hub.current
if hub.get_integration(PyramidIntegration) is None:
return
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "pyramid", "handled": False},
)
hub.capture_event(event, hint=hint)
class PyramidRequestExtractor(RequestExtractor):
def url(self):
return self.request.path_url
def env(self):
return self.request.environ
def cookies(self):
return self.request.cookies
def raw_data(self):
return self.request.text
def form(self):
return {
key: value
for key, value in self.request.POST.items()
if not getattr(value, "filename", None)
}
def files(self):
return {
key: value
for key, value in self.request.POST.items()
if getattr(value, "filename", None)
}
def size_of_file(self, postdata):
file = postdata.file
try:
return os.fstat(file.fileno()).st_size
except Exception:
return 0
def _make_event_processor(weak_request, integration):
def event_processor(event, hint):
request = weak_request()
if request is None:
return event
if "transaction" not in event:
try:
if integration.transaction_style == "route_name":
event["transaction"] = request.matched_route.name
elif integration.transaction_style == "route_pattern":
event["transaction"] = request.matched_route.pattern
except Exception:
pass
with capture_internal_exceptions():
PyramidRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
user_info = event.setdefault("user", {})
if "id" not in user_info:
user_info["id"] = request.authenticated_userid
return event
return event_processor
|
the-stack_0_7367 | """
Hexpatch
========
Patch a binary file from a simple description, using non-overlapping longest-match context matches.
Useful for instruction-patching executables, if codegen has not changed too much, even different versions will match.
Patch file format
-----------------
- Line-based text file
- comments lines start with # at character 1
- empty lines are ignored
Pairs of lines of what remains form the patch patterns, in hexadecimal. Pattern and replacement don't have
to be the same size, allowing for insertions.
Example:
```
# replace a jump instruction
ab 00 aa bb 75 33 55
ab 00 aa bb ec 33 55
```
"""
import sys
def main(patch, left, right=None):
if right is None:
right = left + ".patched"
with open(left, "rb") as f:
source = f.read()
patterns = []
with open(patch, "rt") as f:
def dataline(it) -> str:
while True:
l = next(it).strip()
if l and not l.startswith('#'):
return l
liter = iter(f)
try:
l = dataline(liter)
a = bytes.fromhex(l)
l = dataline(liter)
b = bytes.fromhex(l)
patterns.append((a, b))
except StopIteration:
pass
patterns.sort(key=lambda p: len(p[0]), reverse=True)
modified = bytearray(source)
wp = 0
while wp < len(modified):
found = False
for pat, rep in patterns:
try:
loc = modified.index(pat, wp)
except ValueError:
continue
modified[loc:loc + len(pat)] = rep
wp += len(rep)
found = True
break
if not found:
break
with open(right, "wb") as f:
f.write(modified)
if __name__ == "__main__":
main(*sys.argv[1:])
|
the-stack_0_7368 | import os
import logging
import multiprocessing as mp
# from hexrd.utils.decorators import memoized
from hexrd import imageseries
from .config import Config
from .instrument import Instrument
from .findorientations import FindOrientationsConfig
from .fitgrains import FitGrainsConfig
from .material import MaterialConfig
logger = logging.getLogger('hexrd.config')
class RootConfig(Config):
@property
def analysis_name(self):
return str(self.get('analysis_name', default='analysis'))
@analysis_name.setter
def analysis_name(self, val):
self.set('analysis_name', val)
@property
def analysis_dir(self):
return os.path.join(self.working_dir, self.analysis_name)
@property
def find_orientations(self):
return FindOrientationsConfig(self)
@property
def fit_grains(self):
return FitGrainsConfig(self)
@property
def instrument(self):
if not hasattr(self, '_instr_config'):
instr_file = self.get('instrument')
instr_file = self.check_filename(instr_file, self.working_dir)
self._instr_config = Instrument(self, instr_file)
return self._instr_config
@instrument.setter
def instrument(self, instr_config):
self._instr_config = instr_config
@property
def material(self):
if not hasattr(self, '_material_config'):
self._material_config = MaterialConfig(self)
# !!! must make matl beam energy consistent with the instrument
beam_energy = self.instrument.hedm.beam_energy
self._material_config.beam_energy = beam_energy
return self._material_config
@material.setter
def material(self, material_config):
self._material_config = material_config
@property
def analysis_id(self):
return '_'.join(
[self.analysis_name.strip().replace(' ', '-'),
self.material.active.strip().replace(' ', '-')]
)
@property
def multiprocessing(self):
# determine number of processes to run in parallel
multiproc = self.get('multiprocessing', default=-1)
ncpus = mp.cpu_count()
if multiproc == 'all':
res = ncpus
elif multiproc == 'half':
temp = ncpus // 2
res = temp if temp else 1
elif isinstance(multiproc, int):
if multiproc >= 0:
if multiproc > ncpus:
logger.warning(
'Resuested %s processes, %d available',
multiproc, ncpus
)
res = ncpus
else:
res = multiproc if multiproc else 1
else:
temp = ncpus + multiproc
if temp < 1:
logger.warning(
'Cannot use less than 1 process, requested %d of %d',
temp, ncpus
)
res = 1
else:
res = temp
else:
temp = ncpus - 1
logger.warning(
"Invalid value %s for multiprocessing",
multiproc
)
res = temp
return res
@multiprocessing.setter
def multiprocessing(self, val):
if val in ('half', 'all', -1):
self.set('multiprocessing', val)
elif (val >= 0 and val <= mp.cpu_count):
self.set('multiprocessing', int(val))
else:
raise RuntimeError(
'"multiprocessing": must be 1:%d, got %s'
% (mp.cpu_count(), val)
)
@property
def working_dir(self):
try:
temp = self.get('working_dir')
if not os.path.exists(temp):
raise IOError(
'"working_dir": "%s" does not exist', temp
)
return temp
except RuntimeError:
temp = os.getcwd()
was_dirty = self.dirty
self.working_dir = temp
if not was_dirty:
self._dirty = False
logger.info(
'"working_dir" not specified, defaulting to "%s"' % temp
)
return temp
@working_dir.setter
def working_dir(self, val):
val = os.path.abspath(val)
if not os.path.isdir(val):
raise IOError('"working_dir": "%s" does not exist' % val)
self.set('working_dir', val)
@property
def image_series(self):
"""Return the imageseries dictionary."""
if not hasattr(self, '_image_dict'):
self._image_dict = dict()
fmt = self.get('image_series:format')
imsdata = self.get('image_series:data')
for ispec in imsdata:
fname = self.check_filename(ispec['file'], self.working_dir)
args = ispec['args']
ims = imageseries.open(fname, fmt, **args)
oms = imageseries.omega.OmegaImageSeries(ims)
try:
panel = ispec['panel']
except(KeyError):
panel = oms.metadata['panel']
self._image_dict[panel] = oms
return self._image_dict
@image_series.setter
def image_series(self, ims_dict):
self._image_dict = ims_dict
|
the-stack_0_7369 | import sys
sys.path.append(".")
from query_representation.query import *
from evaluation.eval_fns import *
from cardinality_estimation.featurizer import *
from cardinality_estimation.algs import *
from cardinality_estimation.fcnn import FCNN
from cardinality_estimation.mscn import MSCN
import glob
import argparse
import random
import json
import klepto
from sklearn.model_selection import train_test_split
import pdb
import copy
def eval_alg(alg, eval_funcs, qreps, samples_type):
'''
'''
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
start = time.time()
alg_name = alg.__str__()
exp_name = alg.get_exp_name()
ests = alg.test(qreps)
for efunc in eval_funcs:
rdir = None
if args.result_dir is not None:
rdir = os.path.join(args.result_dir, exp_name)
make_dir(rdir)
errors = efunc.eval(qreps, ests, args=args, samples_type=samples_type,
result_dir=rdir, user = args.user, db_name = args.db_name,
db_host = args.db_host, port = args.port,
num_processes = args.num_eval_processes,
alg_name = alg_name)
print("{}, {}, {}, #samples: {}, {}: mean: {}, median: {}, 99p: {}"\
.format(args.db_name, samples_type, alg, len(errors),
efunc.__str__(),
np.round(np.mean(errors),3),
np.round(np.median(errors),3),
np.round(np.percentile(errors,99),3)))
print("all loss computations took: ", time.time()-start)
def get_alg(alg):
if alg == "saved":
assert args.model_dir is not None
return SavedPreds(model_dir=args.model_dir)
elif alg == "postgres":
return Postgres()
elif alg == "true":
return TrueCardinalities()
elif alg == "true_rank":
return TrueRank()
elif alg == "true_random":
return TrueRandom()
elif alg == "true_rank_tables":
return TrueRankTables()
elif alg == "random":
return Random()
elif alg == "rf":
return RandomForest(grid_search = False,
n_estimators = 100,
max_depth = 10,
lr = 0.01)
elif alg == "xgb":
return XGBoost(grid_search=False, tree_method="hist",
subsample=1.0, n_estimators = 100,
max_depth=10, lr = 0.01)
elif alg == "fcnn":
return FCNN(max_epochs = args.max_epochs, lr=args.lr,
mb_size = args.mb_size,
weight_decay = args.weight_decay,
load_query_together = args.load_query_together,
result_dir = args.result_dir,
num_hidden_layers=args.num_hidden_layers,
eval_epoch = args.eval_epoch,
optimizer_name=args.optimizer_name,
clip_gradient=args.clip_gradient,
loss_func_name = args.loss_func_name,
hidden_layer_size = args.hidden_layer_size)
elif alg == "mscn":
return MSCN(max_epochs = args.max_epochs, lr=args.lr,
load_padded_mscn_feats = args.load_padded_mscn_feats,
mb_size = args.mb_size,
weight_decay = args.weight_decay,
load_query_together = args.load_query_together,
result_dir = args.result_dir,
# num_hidden_layers=args.num_hidden_layers,
eval_epoch = args.eval_epoch,
optimizer_name=args.optimizer_name,
clip_gradient=args.clip_gradient,
loss_func_name = args.loss_func_name,
hidden_layer_size = args.hidden_layer_size)
else:
assert False
def get_query_fns():
fns = list(glob.glob(args.query_dir + "/*"))
skipped_templates = []
train_qfns = []
test_qfns = []
val_qfns = []
if args.train_test_split_kind == "template":
# the train/test split will be on the template names
sorted_fns = copy.deepcopy(fns)
sorted_fns.sort()
train_tmps, test_tmps = train_test_split(sorted_fns,
test_size=args.test_size,
random_state=args.diff_templates_seed)
for qi,qdir in enumerate(fns):
if ".json" in qdir:
continue
template_name = os.path.basename(qdir)
if args.query_templates != "all":
query_templates = args.query_templates.split(",")
if template_name not in query_templates:
skipped_templates.append(template_name)
continue
# let's first select all the qfns we are going to load
qfns = list(glob.glob(qdir+"/*.pkl"))
qfns.sort()
if args.num_samples_per_template == -1:
qfns = qfns
elif args.num_samples_per_template < len(qfns):
qfns = qfns[0:args.num_samples_per_template]
else:
assert False
if args.train_test_split_kind == "template":
cur_val_fns = []
if qdir in train_tmps:
cur_train_fns = qfns
cur_test_fns = []
elif qdir in test_tmps:
cur_train_fns = []
cur_test_fns = qfns
else:
assert False
elif args.train_test_split_kind == "query":
if args.val_size == 0:
cur_val_fns = []
else:
cur_val_fns, qfns = train_test_split(qfns,
test_size=1-args.val_size,
random_state=args.seed)
cur_train_fns, cur_test_fns = train_test_split(qfns,
test_size=args.test_size,
random_state=args.seed)
train_qfns += cur_train_fns
val_qfns += cur_val_fns
test_qfns += cur_test_fns
print("Skipped templates: ", " ".join(skipped_templates))
if args.train_test_split_kind == "query":
print("""Selected {} train queries, {} test queries, and {} val queries"""\
.format(len(train_qfns), len(test_qfns), len(val_qfns)))
elif args.train_test_split_kind == "template":
train_tmp_names = [os.path.basename(tfn) for tfn in train_tmps]
test_tmp_names = [os.path.basename(tfn) for tfn in test_tmps]
print("""Selected {} train templates, {} test templates"""\
.format(len(train_tmp_names), len(test_tmp_names)))
print("""Training templates: {}\nEvaluation templates: {}""".\
format(",".join(train_tmp_names), ",".join(test_tmp_names)))
# going to shuffle all these lists, so queries are evenly distributed. Plan
# Cost functions for some of these templates take a lot longer; so when we
# compute them in parallel, we want the queries to be shuffled so the
# workload is divided evely
random.shuffle(train_qfns)
random.shuffle(test_qfns)
random.shuffle(val_qfns)
return train_qfns, test_qfns, val_qfns
def load_qdata(fns):
qreps = []
for qfn in fns:
qrep = load_qrep(qfn)
# TODO: can do checks like no queries with zero cardinalities etc.
qreps.append(qrep)
template_name = os.path.basename(os.path.dirname(qfn))
qrep["name"] = os.path.basename(qfn)
qrep["template_name"] = template_name
return qreps
def get_featurizer(trainqs, valqs, testqs):
featurizer = Featurizer(args.user, args.pwd, args.db_name,
args.db_host, args.port)
featdata_fn = os.path.join(args.query_dir, "featdata.json")
if args.regen_featstats or not os.path.exists(featdata_fn):
featurizer.update_column_stats(trainqs+valqs+testqs)
ATTRS_TO_SAVE = ['aliases', 'cmp_ops', 'column_stats', 'joins',
'max_in_degree', 'max_joins', 'max_out_degree', 'max_preds',
'max_tables', 'regex_cols', 'tables']
featdata = {}
for k in dir(featurizer):
if k not in ATTRS_TO_SAVE:
continue
attrvals = getattr(featurizer, k)
if isinstance(attrvals, set):
attrvals = list(attrvals)
featdata[k] = attrvals
f = open(featdata_fn, "w")
json.dump(featdata, f)
f.close()
else:
f = open(featdata_fn, "r")
featdata = json.load(f)
f.close()
featurizer.update_using_saved_stats(featdata)
print("updated featdata from saved file!!")
pdb.set_trace()
if args.algs == "mscn":
feat_type = "set"
else:
feat_type = "combined"
# Look at the various keyword arguments to setup() to change the
# featurization behavior; e.g., include certain features etc.
# these configuration properties do not influence the basic statistics
# collected in the featurizer.update_column_stats call; Therefore, we don't
# include this in the cached version
featurizer.setup(ynormalization=args.ynormalization,
featurization_type=feat_type)
featurizer.update_ystats(trainqs+valqs+testqs)
return featurizer
def main():
train_qfns, test_qfns, val_qfns = get_query_fns()
trainqs = load_qdata(train_qfns)
# Note: can be quite memory intensive to load them all; might want to just
# keep around the qfns and load them as needed
valqs = load_qdata(val_qfns)
testqs = load_qdata(test_qfns)
# only needs featurizer for learned models
if args.algs in ["xgb", "fcnn", "mscn"]:
featurizer = get_featurizer(trainqs, valqs, testqs)
else:
featurizer = None
algs = []
for alg_name in args.algs.split(","):
algs.append(get_alg(alg_name))
eval_fns = []
for efn in args.eval_fns.split(","):
eval_fns.append(get_eval_fn(efn))
for alg in algs:
alg.train(trainqs, valqs=valqs, testqs=testqs,
featurizer=featurizer, result_dir=args.result_dir)
eval_alg(alg, eval_fns, trainqs, "train")
if len(valqs) > 0:
eval_alg(alg, eval_fns, valqs, "val")
if len(testqs) > 0:
eval_alg(alg, eval_fns, testqs, "test")
def read_flags():
parser = argparse.ArgumentParser()
parser.add_argument("--query_dir", type=str, required=False,
default="./queries/imdb/")
## db credentials
parser.add_argument("--db_name", type=str, required=False,
default="imdb")
parser.add_argument("--db_host", type=str, required=False,
default="localhost")
parser.add_argument("--user", type=str, required=False,
default="ceb")
parser.add_argument("--pwd", type=str, required=False,
default="password")
parser.add_argument("--port", type=int, required=False,
default=5432)
parser.add_argument("--result_dir", type=str, required=False,
default="results")
parser.add_argument("--query_templates", type=str, required=False,
default="all")
parser.add_argument("--seed", type=int, required=False,
default=13)
parser.add_argument("--num_eval_processes", type=int, required=False,
default=-1, help="""Used for computing plan costs in parallel. -1 use all cpus; -2: use no cpus; else use n cpus. """)
parser.add_argument("--train_test_split_kind", type=str, required=False,
default="query", help="""query OR template.""")
parser.add_argument("--diff_templates_seed", type=int, required=False,
default=1, help="""Seed used when train_test_split_kind == template""")
parser.add_argument("-n", "--num_samples_per_template", type=int,
required=False, default=-1)
parser.add_argument("--test_size", type=float, required=False,
default=0.5)
parser.add_argument("--val_size", type=float, required=False,
default=0.2)
parser.add_argument("--algs", type=str, required=False,
default="postgres")
parser.add_argument("--eval_fns", type=str, required=False,
default="qerr,ppc,plancost")
# featurizer arguments
parser.add_argument("--regen_featstats", type=int, required=False,
default=1)
parser.add_argument("--ynormalization", type=str, required=False,
default="log")
## NN training features
parser.add_argument("--load_padded_mscn_feats", type=int, required=False, default=0, help="""==1 loads all the mscn features with padded zeros in memory -- speeds up training, but can take too much RAM.""")
parser.add_argument("--weight_decay", type=float, required=False,
default=0.0)
parser.add_argument("--max_epochs", type=int,
required=False, default=10)
parser.add_argument("--eval_epoch", type=int,
required=False, default=1)
parser.add_argument("--mb_size", type=int, required=False,
default=1024)
parser.add_argument("--num_hidden_layers", type=int,
required=False, default=2)
parser.add_argument("--hidden_layer_size", type=int,
required=False, default=128)
parser.add_argument("--load_query_together", type=int, required=False,
default=0)
parser.add_argument("--optimizer_name", type=str, required=False,
default="adamw")
parser.add_argument("--clip_gradient", type=float,
required=False, default=20.0)
parser.add_argument("--lr", type=float,
required=False, default=0.0001)
parser.add_argument("--loss_func_name", type=str, required=False,
default="mse")
return parser.parse_args()
if __name__ == "__main__":
args = read_flags()
main()
|
the-stack_0_7372 | #!/usr/bin/env python
import json
import yaml
import urllib
import os
import sys
from jsonref import JsonRef # type: ignore
import click
from openapi2jsonschema.log import info, debug, error
from openapi2jsonschema.util import (
additional_properties,
replace_int_or_string,
allow_null_optional_fields,
change_dict_values,
append_no_duplicates,
)
from openapi2jsonschema.errors import UnsupportedError
@click.command()
@click.option(
"-o",
"--output",
default="schemas",
metavar="PATH",
help="Directory to store schema files",
)
@click.option(
"-p",
"--prefix",
default="_definitions.json",
help="Prefix for JSON references (only for OpenAPI versions before 3.0)",
)
@click.option(
"--stand-alone", is_flag=True, help="Whether or not to de-reference JSON schemas"
)
@click.option(
"--expanded", is_flag=True, help="Expand Kubernetes schemas by API version"
)
@click.option(
"--kubernetes", is_flag=True, help="Enable Kubernetes specific processors"
)
@click.option(
"--strict",
is_flag=True,
help="Prohibits properties not in the schema (additionalProperties: false)",
)
@click.argument("schema", metavar="SCHEMA_URL")
def default(output, schema, prefix, stand_alone, expanded, kubernetes, strict):
"""
Converts a valid OpenAPI specification into a set of JSON Schema files
"""
info("Downloading schema")
if sys.version_info < (3, 0):
response = urllib.urlopen(schema)
else:
if os.path.isfile(schema):
schema = "file://" + os.path.realpath(schema)
req = urllib.request.Request(schema)
response = urllib.request.urlopen(req)
info("Parsing schema")
# Note that JSON is valid YAML, so we can use the YAML parser whether
# the schema is stored in JSON or YAML
data = yaml.load(response.read(), Loader=yaml.SafeLoader)
if "swagger" in data:
version = data["swagger"]
elif "openapi" in data:
version = data["openapi"]
if not os.path.exists(output):
os.makedirs(output)
if version < "3":
with open("%s/_definitions.json" % output, "w") as definitions_file:
info("Generating shared definitions")
definitions = data["definitions"]
if kubernetes:
definitions["io.k8s.apimachinery.pkg.util.intstr.IntOrString"] = {
"oneOf": [{"type": "string"}, {"type": "integer"}]
}
# Although the kubernetes api does not allow `number` as valid
# Quantity type - almost all kubenetes tooling
# recognizes it is valid. For this reason, we extend the API definition to
# allow `number` values.
definitions["io.k8s.apimachinery.pkg.api.resource.Quantity"] = {
"oneOf": [{"type": "string"}, {"type": "number"}]
}
# For Kubernetes, populate `apiVersion` and `kind` properties from `x-kubernetes-group-version-kind`
for type_name in definitions:
type_def = definitions[type_name]
if "x-kubernetes-group-version-kind" in type_def:
for kube_ext in type_def["x-kubernetes-group-version-kind"]:
if expanded and "apiVersion" in type_def["properties"]:
api_version = (
kube_ext["group"] + "/" +
kube_ext["version"]
if kube_ext["group"]
else kube_ext["version"]
)
append_no_duplicates(
type_def["properties"]["apiVersion"],
"enum",
api_version,
)
if "kind" in type_def["properties"]:
kind = kube_ext["kind"]
append_no_duplicates(
type_def["properties"]["kind"], "enum", kind
)
if strict:
definitions = additional_properties(definitions)
definitions_file.write(json.dumps(
{"definitions": definitions}, indent=2))
with open("%s/_definitions.json" % output, 'w') as definitions_file:
definitions = data['definitions']
updated = ovirt_change_array(definitions, prefix, version, False)
definitions_file.write(json.dumps({"definitions": updated}, indent=2))
types = []
info("Generating individual schemas")
if version < "3":
components = updated #data['definitions']
else:
components = data["components"]["schemas"]
for title in components:
kind = title.split(".")[-1].lower()
if kubernetes:
group = title.split(".")[-3].lower()
api_version = title.split(".")[-2].lower()
specification = components[title]
specification["$schema"] = "http://json-schema.org/schema#"
specification.setdefault("type", "object")
if strict:
specification["additionalProperties"] = False
if kubernetes and expanded:
if group in ["core", "api"]:
full_name = "%s-%s" % (kind, api_version)
else:
full_name = "%s-%s-%s" % (kind, group, api_version)
else:
full_name = kind
types.append(title)
try:
debug("Processing %s" % full_name)
# These APIs are all deprecated
if kubernetes:
if title.split(".")[3] == "pkg" and title.split(".")[2] == "kubernetes":
raise UnsupportedError(
"%s not currently supported, due to use of pkg namespace"
% title
)
# This list of Kubernetes types carry around jsonschema for Kubernetes and don't
# currently work with openapi2jsonschema
if (
kubernetes
and stand_alone
and kind
in [
"jsonschemaprops",
"jsonschemapropsorarray",
"customresourcevalidation",
"customresourcedefinition",
"customresourcedefinitionspec",
"customresourcedefinitionlist",
"customresourcedefinitionspec",
"jsonschemapropsorstringarray",
"jsonschemapropsorbool",
]
):
raise UnsupportedError("%s not currently supported" % kind)
updated = change_dict_values(specification, prefix, version)
specification = updated
if stand_alone:
base = "file://%s/%s/" % (os.getcwd(), output)
specification = JsonRef.replace_refs(
specification, base_uri=base)
if "additionalProperties" in specification:
if specification["additionalProperties"]:
updated = change_dict_values(
specification["additionalProperties"], prefix, version
)
specification["additionalProperties"] = updated
if strict and "properties" in specification:
updated = additional_properties(specification["properties"])
specification["properties"] = updated
if kubernetes and "properties" in specification:
updated = replace_int_or_string(specification["properties"])
updated = allow_null_optional_fields(updated)
specification["properties"] = updated
with open("%s/%s.json" % (output, full_name), "w") as schema_file:
debug("Generating %s.json" % full_name)
schema_file.write(json.dumps(specification, indent=2))
except Exception as e:
error("An error occured processing %s: %s" % (kind, e))
with open("%s/all.json" % output, "w") as all_file:
info("Generating schema for all types")
contents = {"oneOf": []}
for title in types:
if version < "3":
contents["oneOf"].append(
{"$ref": "%s#/definitions/%s" % (prefix, title)}
)
else:
contents["oneOf"].append(
{"$ref": (title.replace("#/components/schemas/", "") + ".json")}
)
all_file.write(json.dumps(contents, indent=2))
if __name__ == "__main__":
default()
|
the-stack_0_7373 | from typing import Text, List, Tuple
from rasa.core.domain import Domain
from rasa.core.training.story_conflict import (
StoryConflict,
find_story_conflicts,
_get_previous_event,
)
from rasa.core.training.generator import TrainingDataGenerator, TrackerWithCachedStates
from rasa.validator import Validator
from rasa.importers.rasa import RasaFileImporter
from tests.core.conftest import DEFAULT_STORIES_FILE, DEFAULT_DOMAIN_PATH_WITH_SLOTS
async def _setup_trackers_for_testing(
domain_path: Text, training_data_file: Text
) -> Tuple[List[TrackerWithCachedStates], Domain]:
importer = RasaFileImporter(
domain_path=domain_path, training_data_paths=[training_data_file],
)
validator = await Validator.from_importer(importer)
trackers = TrainingDataGenerator(
validator.story_graph,
domain=validator.domain,
remove_duplicates=False,
augmentation_factor=0,
).generate()
return trackers, validator.domain
async def test_find_no_conflicts():
trackers, domain = await _setup_trackers_for_testing(
DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert conflicts == []
async def test_find_conflicts_in_short_history():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_1.md"
)
# `max_history = 3` is too small, so a conflict must arise
conflicts = find_story_conflicts(trackers, domain, 3)
assert len(conflicts) == 1
# With `max_history = 4` the conflict should disappear
conflicts = find_story_conflicts(trackers, domain, 4)
assert len(conflicts) == 0
async def test_find_conflicts_checkpoints():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_2.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_goodbye", "utter_default"]
async def test_find_conflicts_or():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_3.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_default", "utter_goodbye"]
async def test_find_conflicts_slots_that_break():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_4.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert conflicts[0].conflicting_actions == ["utter_default", "utter_greet"]
async def test_find_conflicts_slots_that_dont_break():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_5.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 0
async def test_find_conflicts_multiple_stories():
trackers, domain = await _setup_trackers_for_testing(
"data/test_domains/default.yml", "data/test_stories/stories_conflicting_6.md"
)
# Create a list of `StoryConflict` objects
conflicts = find_story_conflicts(trackers, domain, 5)
assert len(conflicts) == 1
assert "and 2 other trackers" in str(conflicts[0])
async def test_add_conflicting_action():
sliced_states = [
None,
{},
{"intent_greet": 1.0, "prev_action_listen": 1.0},
{"prev_utter_greet": 1.0, "intent_greet": 1.0},
]
conflict = StoryConflict(sliced_states)
conflict.add_conflicting_action("utter_greet", "xyz")
conflict.add_conflicting_action("utter_default", "uvw")
assert conflict.conflicting_actions == ["utter_greet", "utter_default"]
async def test_has_prior_events():
sliced_states = [
None,
{},
{"intent_greet": 1.0, "prev_action_listen": 1.0},
{"prev_utter_greet": 1.0, "intent_greet": 1.0},
]
conflict = StoryConflict(sliced_states)
assert conflict.conflict_has_prior_events
async def test_get_previous_event():
assert _get_previous_event({"prev_utter_greet": 1.0, "intent_greet": 1.0}) == (
"action",
"utter_greet",
)
assert _get_previous_event({"intent_greet": 1.0, "prev_utter_greet": 1.0}) == (
"action",
"utter_greet",
)
assert _get_previous_event({"intent_greet": 1.0, "prev_action_listen": 1.0}) == (
"intent",
"greet",
)
async def test_has_no_prior_events():
sliced_states = [None]
conflict = StoryConflict(sliced_states)
assert not conflict.conflict_has_prior_events
|
the-stack_0_7374 | import logging
from collections import namedtuple, defaultdict
from enum import Enum
from itertools import product
from gym import Env
import gym
from gym.utils import seeding
import numpy as np
class Action(Enum):
NONE = 0
NORTH = 1
SOUTH = 2
WEST = 3
EAST = 4
LOAD = 5
class Player:
def __init__(self):
self.controller = None
self.position = None
self.level = None
self.field_size = None
self.score = None
self.reward = 0
self.history = None
self.current_step = None
def setup(self, position, level, field_size):
self.history = []
self.position = position
self.level = level
self.field_size = field_size
self.score = 0
def set_controller(self, controller):
self.controller = controller
def step(self, obs):
return self.controller._step(obs)
@property
def name(self):
if self.controller:
return self.controller.name
else:
return "Player"
class ForagingEnv(Env):
"""
A class that contains rules/actions for the game level-based foraging.
"""
metadata = {"render.modes": ["human"]}
action_set = [Action.NORTH, Action.SOUTH, Action.WEST, Action.EAST, Action.LOAD]
Observation = namedtuple(
"Observation",
["field", "actions", "players", "game_over", "sight", "current_step", "button"],
)
PlayerObservation = namedtuple(
"PlayerObservation", ["position", "level", "history", "reward", "is_self"]
) # reward is available only if is_self
def __init__(
self,
players,
max_player_level,
field_size,
sight,
max_episode_steps,
normalize_reward=True,
):
assert players>1, "Need at least 2 players"
self.logger = logging.getLogger(__name__)
self.seed()
self.players = [Player() for _ in range(players)]
self.field = np.zeros(field_size, np.int32)
self.max_food = 1 #
self._food_spawned = 0.0
self.max_player_level = max_player_level
self.sight = sight
self.force_coop = True #
self._game_over = None
self.button_pressed = False
self._button_loc = None
self._food_loc = None
self.action_space = gym.spaces.Tuple(tuple([gym.spaces.Discrete(6)] * len(self.players)))
self.observation_space = gym.spaces.Tuple(tuple([self._get_observation_space()] * len(self.players)))
# New stuff
# self.share_observation_space = gym.spaces.Tuple(tuple([self._get_observation_space()] * len(self.players)))
self.share_observation_space = gym.spaces.Tuple(tuple([self._get_shared_observation_space()] * len(self.players)))
self._rendering_initialized = False
self._valid_actions = None
self._max_episode_steps = max_episode_steps
self._normalize_reward = normalize_reward
self.viewer = None
self.n_agents = len(self.players)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _get_observation_space(self):
"""The Observation Space for each agent.
- all of the board (board_size^2) with foods
- player description (x, y, level)*player_count
"""
field_x = self.field.shape[1]
field_y = self.field.shape[0]
# field_size = field_x * field_y
max_food = self.max_food
max_food_level = self.max_player_level * len(self.players)
min_obs = [-1, -1, 0] * max_food + [0, 0, 1] * len(self.players) + [0, 0] * 1
max_obs = [field_x, field_y, max_food_level] * max_food + [
field_x,
field_y,
self.max_player_level,
] * len(self.players) + [field_x, field_y] * 1
return gym.spaces.Box(np.array(min_obs), np.array(max_obs), dtype=np.float32)
def _get_shared_observation_space(self):
"""The Observation Space for each agent.
for n_players:
- all of the board (board_size^2) with foods
- player description (x, y, level)*player_count
"""
shared_obs_space_min = self.observation_space[0].low
shared_obs_space_high = self.observation_space[0].high
for obs_space in self.observation_space[1:]:
shared_obs_space_min = np.append(shared_obs_space_min, obs_space.low)
shared_obs_space_high = np.append(shared_obs_space_high, obs_space.high)
return gym.spaces.Box(shared_obs_space_min, shared_obs_space_high, dtype=np.float32)
@classmethod
def from_obs(cls, obs):
players = []
for p in obs.players:
player = Player()
player.setup(p.position, p.level, obs.field.shape)
player.score = p.score if p.score else 0
players.append(player)
env = cls(players, None, None, None, None)
env.field = np.copy(obs.field)
env.current_step = obs.current_step
env.sight = obs.sight
env._gen_valid_moves()
return env
@property
def field_size(self):
return self.field.shape
@property
def rows(self):
return self.field_size[0]
@property
def cols(self):
return self.field_size[1]
@property
def game_over(self):
return self._game_over
def _gen_valid_moves(self):
self._valid_actions = {
player: [
action for action in Action if self._is_valid_action(player, action)
]
for player in self.players
}
def neighborhood(self, row, col, distance=1, ignore_diag=False):
if not ignore_diag:
return self.field[
max(row - distance, 0) : min(row + distance + 1, self.rows),
max(col - distance, 0) : min(col + distance + 1, self.cols),
]
return (
self.field[
max(row - distance, 0) : min(row + distance + 1, self.rows), col
].sum()
+ self.field[
row, max(col - distance, 0) : min(col + distance + 1, self.cols)
].sum()
)
def adjacent_food(self, row, col):
return (
self.field[max(row - 1, 0), col]
+ self.field[min(row + 1, self.rows - 1), col]
+ self.field[row, max(col - 1, 0)]
+ self.field[row, min(col + 1, self.cols - 1)]
)
def adjacent_food_location(self, row, col):
if row > 1 and self.field[row - 1, col] > 0:
return row - 1, col
elif row < self.rows - 1 and self.field[row + 1, col] > 0:
return row + 1, col
elif col > 1 and self.field[row, col - 1] > 0:
return row, col - 1
elif col < self.cols - 1 and self.field[row, col + 1] > 0:
return row, col + 1
def adjacent_players(self, row, col):
return [
player
for player in self.players
if abs(player.position[0] - row) == 1
and player.position[1] == col
or abs(player.position[1] - col) == 1
and player.position[0] == row
]
def spawn_food(self, max_food, max_level):
food_count = 0
attempts = 0
min_level = 2*max_level if self.force_coop else 1 # The fruit is 2x larger than max_level
while food_count < max_food and attempts < 1000:
attempts += 1
row = self.np_random.randint(1, self.rows - 1)
col = self.np_random.randint(1, self.cols - 1)
# check if it has neighbors:
if (
self.neighborhood(row, col).sum() > 0
or self.neighborhood(row, col, distance=2, ignore_diag=True) > 0
or not self._is_empty_location(row, col)
):
continue
self.field[row, col] = min_level
food_count += 1
self._food_loc = (row, col)
self._food_spawned = self.field.sum()
def _is_empty_location(self, row, col):
if self.field[row, col] != 0:
return False
for a in self.players:
if a.position and row == a.position[0] and col == a.position[1]:
return False
return True
def spawn_players(self, max_player_level):
for player in self.players:
attempts = 0
player.reward = 0
while attempts < 1000:
row = self.np_random.randint(0, self.rows - 1)
col = self.np_random.randint(0, self.cols - 1)
if self._is_empty_location(row, col):
player.setup(
(row, col),
self.np_random.randint(1, max_player_level),
self.field_size,
)
break
attempts += 1
def _is_valid_action(self, player, action):
if action == Action.NONE:
return True
elif action == Action.NORTH:
return (
player.position[0] > 0
and self.field[player.position[0] - 1, player.position[1]] == 0
)
elif action == Action.SOUTH:
return (
player.position[0] < self.rows - 1
and self.field[player.position[0] + 1, player.position[1]] == 0
)
elif action == Action.WEST:
return (
player.position[1] > 0
and self.field[player.position[0], player.position[1] - 1] == 0
)
elif action == Action.EAST:
return (
player.position[1] < self.cols - 1
and self.field[player.position[0], player.position[1] + 1] == 0
)
elif action == Action.LOAD:
return self.adjacent_food(*player.position) > 0
self.logger.error("Undefined action {} from {}".format(action, player.name))
raise ValueError("Undefined action")
def _transform_to_neighborhood(self, center, sight, position):
return (
position[0] - center[0] + min(sight, center[0]),
position[1] - center[1] + min(sight, center[1]),
)
def get_valid_actions(self) -> list:
return list(product(*[self._valid_actions[player] for player in self.players]))
def _make_obs(self, player):
return self.Observation(
actions=self._valid_actions[player],
players=[
self.PlayerObservation(
position=self._transform_to_neighborhood(
player.position, self.sight, a.position
),
level=a.level,
is_self=a == player,
history=a.history,
reward=a.reward if a == player else None,
)
for a in self.players
if (
min(
self._transform_to_neighborhood(
player.position, self.sight, a.position
)
)
>= 0
)
and max(
self._transform_to_neighborhood(
player.position, self.sight, a.position
)
)
<= 2 * self.sight
],
# todo also check max?
field=np.copy(self.neighborhood(*player.position, self.sight)),
game_over=self.game_over,
sight=self.sight,
current_step=self.current_step,
button=self._transform_to_neighborhood(player.position, self.sight, self._button_loc)
if (min(self._transform_to_neighborhood(player.position, self.sight, self._button_loc)) >= 0)
and (max(self._transform_to_neighborhood( player.position, self.sight, self._button_loc))<= 2 * self.sight)
else [-1, -1]
)
def _make_gym_obs(self, observations):
def make_obs_array(observation):
obs = np.zeros(self.observation_space[0].shape, dtype=np.float32)
# obs[: observation.field.size] = observation.field.flatten()
# self player is always first
seen_players = [p for p in observation.players if p.is_self] + [
p for p in observation.players if not p.is_self
]
for i in range(self.max_food):
obs[3 * i] = -1
obs[3 * i + 1] = -1
obs[3 * i + 2] = 0
for i, (y, x) in enumerate(zip(*np.nonzero(observation.field))):
obs[3 * i] = y
obs[3 * i + 1] = x
obs[3 * i + 2] = observation.field[y, x]
for i in range(len(self.players)):
obs[self.max_food * 3 + 3 * i] = -1
obs[self.max_food * 3 + 3 * i + 1] = -1
obs[self.max_food * 3 + 3 * i + 2] = 0
for i, p in enumerate(seen_players):
obs[self.max_food * 3 + 3 * i] = p.position[0]
obs[self.max_food * 3 + 3 * i + 1] = p.position[1]
obs[self.max_food * 3 + 3 * i + 2] = p.level
obs[-2:] = np.array(observation.button)
return obs
def get_player_reward(observation):
for p in observation.players:
if p.is_self:
return p.reward
nobs = tuple([make_obs_array(obs) for obs in observations])
nreward = [[get_player_reward(obs)] for obs in observations]
ndone = [obs.game_over for obs in observations]
# ninfo = [{'observation': obs} for obs in observations]
ninfo = {}
return nobs, nreward, ndone, ninfo
def reset(self):
self.field = np.zeros(self.field_size, np.int32)
self.spawn_players(self.max_player_level)
player_levels = sorted([player.level for player in self.players])
self.spawn_food(
self.max_food, max_level=sum(player_levels[:3])
)
self.spawn_button()
# print(self._button_loc)
self.current_step = 0
self._game_over = False
self._gen_valid_moves()
observations = [self._make_obs(player) for player in self.players]
nobs, nreward, ndone, ninfo = self._make_gym_obs(observations)
return nobs
def spawn_button(self):
attempts = 0
while attempts < 1000:
attempts += 1
row = self.np_random.randint(1, self.rows - 1)
col = self.np_random.randint(1, self.cols - 1)
# check if it has neighbors:
if (
self.neighborhood(row, col).sum() > 0
or self.neighborhood(row, col, distance=2, ignore_diag=True) > 0
or not self._is_empty_location(row, col)
):
continue
self._button_loc = np.array([row, col])
return
def step(self, actions):
self.current_step += 1
for p in self.players:
p.reward = 0
actions = [
Action(a) if Action(a) in self._valid_actions[p] else Action.NONE
for p, a in zip(self.players, actions)
]
# check if actions are valid
for i, (player, action) in enumerate(zip(self.players, actions)):
if action not in self._valid_actions[player]:
self.logger.info(
"{}{} attempted invalid action {}.".format(
player.name, player.position, action
)
)
actions[i] = Action.NONE
loading_players = set()
# move players
# if two or more players try to move to the same location they all fail
collisions = defaultdict(list)
# so check for collisions
for player, action in zip(self.players, actions):
if action == Action.NONE:
collisions[player.position].append(player)
elif action == Action.NORTH:
collisions[(player.position[0] - 1, player.position[1])].append(player)
elif action == Action.SOUTH:
collisions[(player.position[0] + 1, player.position[1])].append(player)
elif action == Action.WEST:
collisions[(player.position[0], player.position[1] - 1)].append(player)
elif action == Action.EAST:
collisions[(player.position[0], player.position[1] + 1)].append(player)
elif action == Action.LOAD:
collisions[player.position].append(player)
loading_players.add(player)
# and do movements for non colliding players
for k, v in collisions.items():
if len(v) > 1: # make sure no more than an player will arrive at location
continue
v[0].position = k
# process the button
if not self.button_pressed:
for player in self.players:
if player.position[0] == self._button_loc[0] and player.position[1] == self._button_loc[1]:
self.field[self._food_loc] = int(self.field[self._food_loc]/2)
self.button_pressed = True
# finally process the loadings:
while loading_players:
# find adjacent food
player = loading_players.pop()
frow, fcol = self.adjacent_food_location(*player.position)
food = self.field[frow, fcol]
adj_players = self.adjacent_players(frow, fcol)
adj_players = [
p for p in adj_players if p in loading_players or p is player
]
adj_player_level = sum([a.level for a in adj_players])
loading_players = loading_players - set(adj_players)
if adj_player_level < food:
# failed to load
continue
# else the food was loaded and each player scores points
for a in adj_players:
a.reward = float(a.level * food)
if self._normalize_reward:
a.reward = a.reward / float(
adj_player_level * self._food_spawned
) # normalize reward
# and the food is removed
self.field[frow, fcol] = 0
self._game_over = (
self.field.sum() == 0 or self._max_episode_steps <= self.current_step
)
self._gen_valid_moves()
for p in self.players:
p.score += p.reward
observations = [self._make_obs(player) for player in self.players]
return self._make_gym_obs(observations)
def _init_render(self):
from .rendering_subgoal import Viewer
self.viewer = Viewer((self.rows, self.cols))
self._rendering_initialized = True
def render(self, mode="human"):
if not self._rendering_initialized:
self._init_render()
return self.viewer.render(self, return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close() |
the-stack_0_7376 | # -*- coding: utf-8 -*-
from PySide6.QtWidgets import (
QApplication)
from PySide6.QtGui import (
QFontMetrics,
QTextOption)
from PySide6.QtCore import (
QEvent)
from .textline import SourceTextLineBase
from .textviewer import TextViewer
__all__ = ["SourceViewer"]
class SourceTextLine(SourceTextLineBase):
def __init__(self, text, font, option):
super().__init__(text, font, option)
def rehighlight(self):
formats = self._commonHighlightFormats()
if formats:
self._layout.setFormats(formats)
class SourceViewer(TextViewer):
def __init__(self, parent=None):
super().__init__(parent)
self._panel = None
self._blockEventFilter = False
self.verticalScrollBar().valueChanged.connect(
self._onVScrollBarValueChanged)
settings = QApplication.instance().settings()
settings.tabSizeChanged.connect(self.delayUpdateSettings)
settings.showWhitespaceChanged.connect(self.delayUpdateSettings)
settings.diffViewFontChanged.connect(self.delayUpdateSettings)
def toTextLine(self, text):
return SourceTextLine(text, self._font, self._option)
def setPanel(self, panel):
if self._panel:
if panel != self._panel:
self._panel.removeEventFilter(self)
else:
return
self._panel = panel
if panel:
self._updatePanelGeo()
panel.installEventFilter(self)
else:
self.setViewportMargins(0, 0, 0, 0)
@property
def panel(self):
return self._panel
def reloadSettings(self):
settings = QApplication.instance().settings()
self.updateFont(settings.diffViewFont())
fm = QFontMetrics(self._font)
tabSize = settings.tabSize()
tabstopWidth = fm.horizontalAdvance(' ') * tabSize
self._option = QTextOption()
self._option.setTabStopDistance(tabstopWidth)
if settings.showWhitespace():
flags = self._option.flags()
self._option.setFlags(flags | QTextOption.ShowTabsAndSpaces)
self.reloadBugPattern()
def _onVScrollBarValueChanged(self, value):
if self._panel:
self._panel.update()
def _updatePanelGeo(self):
if self._panel:
rc = self.rect()
width = self._panel.width()
self.setViewportMargins(width + 1, 0, 0, 0)
self._panel.setGeometry(rc.left() + 1,
rc.top() + 1,
width,
self.viewport().height())
def _reloadTextLine(self, textLine):
# reload bugPattern
super()._reloadTextLine(textLine)
if isinstance(textLine, SourceTextLineBase):
textLine.setDefOption(self._option)
textLine.setFont(self._font)
def resizeEvent(self, event):
if event.oldSize().height() != event.size().height():
self._blockEventFilter = True
self._updatePanelGeo()
self._blockEventFilter = False
super().resizeEvent(event)
def eventFilter(self, obj, event):
if not self._blockEventFilter and \
obj == self._panel and \
event.type() == QEvent.Resize:
self._updatePanelGeo()
return True
return super().eventFilter(obj, event)
|
the-stack_0_7379 | from InitProb import *
from collections import defaultdict
PNFile = "lsup"
OVERRIDE = 1
vertices = [[1, 1], [0, 3], [-1, 1], [-1, -1], [1, -1], [0.5, 0], [0, 0.75], [-0.5, 0], [0, 1]]
edgelists = [[0,1,2,3,4], [5,6,7,8]]
trpl = [[1, 1], [0, 3], [-1, 1]]
c = .1
box = [np.array([ 0.08*c, 0.08*c]),\
np.array([ -0.08*c, 0.08*c]),\
np.array([ -0.08*c,-0.08*c]),\
np.array([ 0.08*c,-0.08*c])]
#def clip(PND, aL):
#Instead of modifying the polychain data, just reproduce it based on PND.PCC
if __name__ == "__main__" or OVERRIDE:
OFF = PolyNodeData()
OFF.loadOFF("lsup.off")
acuteAngles = [] #This will be a list of lists that we fill with vertex idx for each polychain
PND = PolyNodeData()
PND.loadPND(PNFile + ".poly", PNFile + ".node")
aL = identifyAcute(PND)
# L = aL[0]
# A = aL[-3][0]
# out = []
#
# for aI in L:
# for b in box:
# out.append((PND.nodes[aI]+b).tolist())
#
# for b in box:
# out.append((PND.nodes[A]+b).tolist())
#
#
# OFFOut = OFFData()
#
# OFFOut.vertices = out
# OFFOut.elements = [[i*4 + j + 518 for j in range(4)] for i in range(len(OFFOut.vertices)//4)]
# #OFFOut.elements = [[i*4 + j for j in range(4)] for i in range(len(OFFOut.vertices)//4)]
# OFFOut.NV = len(out)
# OFFOut.NE = len(OFFOut.elements)
#
# OFFOut.export("marks.off")
# exportToOFF(vertices, edgelists, "House.off")
# #VVV All meant to determine boundary from a tri/quadrangulation
#
# D = defaultdict(lambda: 0)
# OFF = importOFF("superior.off")
#
# #Determine Boundary and Holes
#
# for ele in OFF.elements:
# for idx in range(len(ele)):
# D[str({ele[idx], ele[(idx+1) % len(ele)]})] += 1
#
# unsortedBoundary = set()
#
# for edge in D:
# if D[edge] == 1:
# unsortedBoundary.add(edge)
|
the-stack_0_7380 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# flake8: noqa
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import mock
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# to support markdown
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath("../"))
DEPLOY = os.environ.get("READTHEDOCS") == "True"
# -- Project information -----------------------------------------------------
try:
import torch # noqa
except ImportError:
for m in [
"torch",
"torchvision",
"torch.nn",
"torch.nn.parallel",
"torch.distributed",
"torch.multiprocessing",
"torch.autograd",
"torch.autograd.function",
"torch.nn.modules",
"torch.nn.modules.utils",
"torch.utils",
"torch.utils.data",
"torch.onnx",
"torchvision",
"torchvision.ops",
]:
sys.modules[m] = mock.Mock(name=m)
for m in [
"cv2",
"scipy",
"portalocker",
"detectron2._C",
"pycocotools",
"pycocotools.mask",
"pycocotools.coco",
"pycocotools.cocoeval",
"google",
"google.protobuf",
"google.protobuf.internal",
"onnx",
"caffe2",
"caffe2.proto",
"caffe2.python",
"caffe2.python.utils",
"caffe2.python.onnx",
"caffe2.python.onnx.backend",
]:
sys.modules[m] = mock.Mock(name=m)
sys.modules["cv2"].__version__ = "3.4"
import detectron2 # isort: skip
project = "detectron2"
copyright = "2019-2020, detectron2 contributors"
author = "detectron2 contributors"
# The short X.Y version
version = detectron2.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "1.7"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
if DEPLOY:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_parsers = {".md": CommonMarkParser}
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "detectron2doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"detectron2",
"detectron2 Documentation",
author,
"detectron2",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
_DEPRECATED_NAMES = set()
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, "__HIDE_SPHINX_DOC__", False):
return True
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
return None
def url_resolver(url):
if ".html" not in url:
url = url.replace("../", "")
return "https://github.com/facebookresearch/detectron2/blob/master/" + url
else:
if DEPLOY:
return "http://detectron2.readthedocs.io/" + url
else:
return "/" + url
def setup(app):
from recommonmark.transform import AutoStructify
app.connect("autodoc-skip-member", autodoc_skip_member)
# app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
"recommonmark_config",
{
"url_resolver": url_resolver,
"enable_math": True,
"enable_inline_math": True,
"enable_eval_rst": True,
},
True,
)
app.add_transform(AutoStructify)
|
the-stack_0_7381 | # -*- coding: utf-8 -*-
u"""Simplify rendering jinja2
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkinspect
from pykern import pkio
from pykern import pkresource
from pykern.pkdebug import pkdc, pkdp
import jinja2
#: Implicit extension including '.' added to resources
RESOURCE_SUFFIX = '.jinja'
def render_file(filename, j2_ctx, output=None, strict_undefined=False, jinja_env=None):
"""Render filename as template with j2_ctx.
Args:
basename (str): name without jinja extension
j2_ctx (dict): how to replace values in Jinja2 template
output (str): file name of output; if None, return str
strict_undefined (bool): set `jinja2.StrictUndefined` if True
jinja_env (dict): add values to jinja2 environment
Returns:
str: rendered template
"""
t = pkio.read_text(filename)
kw = dict(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
extensions=['jinja2.ext.do'],
)
if strict_undefined:
kw['undefined'] = jinja2.StrictUndefined
if jinja_env:
kw.update(jinja_env)
je = jinja2.Environment(**kw)
res = je.from_string(t).render(j2_ctx)
if output:
pkio.write_text(output, res)
return res
def render_resource(basename, *args, **kwargs):
"""Render a pkresource as a jinja template.
Args:
basename (str): name without `RESOURCE_SUFFIX`
args (list): see func:`render_file` for rest of args and return
"""
return render_file(
pkresource.filename(
basename + RESOURCE_SUFFIX,
pkinspect.caller_module(),
),
*args,
**kwargs
)
|
the-stack_0_7382 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Libraries
import sys
import time
import logging
import platform
import os
import random
# Modules
import core.log
from core.data.wordlist import *
from core.data.crossword import *
from core.data.constants import *
from core.helpers.parse import *
from core.implements.basic_backtracking import *
from core.implements.fc_backtracking import *
from core.implements.live_backtracking import *
from cli.arguments.parsers import DEFAULT_PARSER
from cli.arguments.constants import *
from cli.printers.crossword import *
# Constants
LOGGER = logging.getLogger(__name__)
# Functions
"""
Takes the system arguments vector and tries to parse the arguments in it given
the argument parser specified and returns the namespace generated
@param parser the ArgumentParser objects to use to parse the arguments
"""
def parseArguments(parser):
return parser.parse_args()
"""
Given the origin of the data for the wordlist, loads the wordlist and returns
it, while giving some information about it if it's required
@param origin the source to load the wordlist from
@return wordlist valid object (or None if couldn't load)
"""
def loadWordlist(origin):
LOGGER.info("-> Loading wordlist (from %s)",origin)
wordlist = WordList(origin)
if args.timers > 1: time_load_wordlist_start = time.time()
wordlist.read()
if args.timers > 2:
LOGGER.info("--> Read in %f seconds",time.time()-\
time_load_wordlist_start)
time_load_wordlist_start_parse = time.time()
wordlist.parse()
if args.timers > 2:
LOGGER.info("--> Parsed in %f seconds",time.time()-\
time_load_wordlist_start_parse)
if args.timers > 1:
LOGGER.info("--> Loaded in %f seconds",time.time()-\
time_load_wordlist_start)
if args.show_wordlist:
LOGGER.info(wordlist)
return wordlist
"""
Given the origin of the data for the crossword, loads the crossword and returns
it, while giving some information about it if it's required
@param origin the source to load the wordlist from
@return crossword valid object (or None if couldn't load)
"""
def loadCrossword(origin):
crossword = Crossword(origin)
LOGGER.info("-> Loading crossword (from %s)",origin)
if args.timers > 1: time_load_crossword_start = time.time()
crossword.read().parse()
if args.timers > 1:
LOGGER.info("--> Loaded in %f seconds",time.time()-\
time_load_crossword_start)
if args.show_crossword:
LOGGER.info(crossword)
return crossword
"""
Retrieves the algorithm object to use depending on the arguments
@return algorithm callable object
"""
def selectAlgorithm():
alg = None
LOGGER.info("Chose %s algorithm"%args.algorithm)
if args.algorithm == ALG_BACKTRACKING_SIMPLE:
alg = CrosswordBasicBacktracking(wordlist.getList(),
crossword.getConstraints())
elif args.algorithm == ALG_BACKTRACKING_FC:
alg = CrosswordForwardCheckingBacktracking(wordlist.getList(),
crossword.getConstraints())
elif args.algorithm == ALG_BACKTRACKING_LIVE:
crossword_printer = CrosswordPrinter(crossword,args.frames)
crossword_printer.setStyle(args.style)
alg = CrosswordLiveBacktracking(wordlist.getList(),
crossword.getConstraints(),crossword_printer)
return alg
"""
Given the solution returned from the crossword, searches over the internet for
the definitions of the words appearing in the solution and shows the user the
definitions so they can solve the crossword theyreselves
@param solution solution to show hints
"""
def playGame(solution):
from bs4 import BeautifulSoup
import mwapi
LOGGER.info("---- GAME MODE ----")
LOGGER.info("I want to play a game...")
session = mwapi.Session('https://ca.wiktionary.org')
for word_i in range(len(solution)):
word = "".join(list(map(chr,solution[word_i]))).lower()
var = crossword.getVariableString(word_i)
resp = session.get(action='query',prop='extracts',titles=word)\
["query"]["pages"]
pages = list(resp.keys())
try:
extract = resp[pages[0]]["extract"]
except:
extract = None
parser = None
if extract:
parser = BeautifulSoup(extract,"html.parser").findAll("li")
definition = ""
if parser != None:
valid_defs = []
for info in parser:
text = info.getText()
if "Pronúncia" in text \
or "Exemples" in text \
or "Etimologia" in text \
or "Per a més informació vegeu" in text\
or len(text.split()) < 4:
continue
else:
valid_defs.append(text)
if len(valid_defs):
definition = random.choice(valid_defs)
if definition == "":
definition = word + " (no hem trobat cap definició)"
LOGGER.info("%s: %s",var,definition)
"""
Given a solution from the crossword, tries to print it over the screen, or logs
that no solution was found if necessary
@param solution solution to print
"""
def showSolution(solution):
if solution == None:
LOGGER.info("The algorithm hasn't found any valid solution :(")
else:
printer = CrosswordPrinter(crossword)
printer.setStyle(args.style)
if args.solution:
if args.play:
print(printer)
playGame(solution)
elif args.algorithm != ALG_BACKTRACKING_LIVE:
printer.printSolution(solution)
else:
LOGGER.info("The algorithm has found a valid solution :)")
if __name__ == "__main__":
# Prepare coding
if platform.system() == "Windows":
os.system("chcp 65001")
# Parse arguments
args = parseArguments(DEFAULT_PARSER)
# Set default tablesets
args.style = CHAR_TABLESETS[args.style]
# Welcome
LOGGER.info("Welcome to Crossword solver")
# Load data
LOGGER.info("Loading crossword and wordlist")
if args.timers > 0: time_load_start = time.time()
# Datasets
if args.wordlist == None:
args.wordlist = ITEMSET_BYNAME[args.itemset]["wordlist"]
if args.crossword == None:
args.crossword = ITEMSET_BYNAME[args.itemset]["crossword"]
# Wordlist
wordlist = loadWordlist(args.wordlist)
# Crossword
crossword = loadCrossword(args.crossword)
# Loading ended
if args.timers > 0:
time_load_end = time.time()
LOGGER.info("Loaded all in %f seconds",
time_load_end-time_load_start)
else:
LOGGER.info("Loaded all data succesfully")
# Choose algorithm
alg = selectAlgorithm()
# Solve the problem
LOGGER.info("Started backtracking algorithm")
if args.timers > 0: time_alg_start = time.time()
solution = alg(crossword.getVariables())
if args.timers > 0:
time_alg_end = time.time()
LOGGER.info("Ended alg. in %f seconds",
time_alg_end-time_alg_start)
else:
LOGGER.info("Ended backtracking algorithm")
# Solution
if args.timers > 0:
LOGGER.info("TOTAL TIME: %f seconds",time_alg_end-time_load_start)
showSolution(solution)
LOGGER.info("Thanks for trusting our app ;)")
|
the-stack_0_7383 | """Test suite main conftest."""
import transaction
import pytest
from mock import Mock
from pyramid.decorator import reify
from pyramid.request import Request
from pyramid import testing
from zope.sqlalchemy import register
import pyramid_basemodel
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import NullPool
from pyramid_localize import build_localize_config
from pyramid_localize.models import Language
def web_request_func():
"""Mock web request for views testing."""
# pylint:disable=import-outside-toplevel
from pyramid_localize.request import LocalizeRequestMixin
from pyramid_localize.request import database_locales
from pyramid_localize.request import locale_id
from pyramid_localize.request import locales
class TestRequest(LocalizeRequestMixin, Request): # pylint:disable=too-many-ancestors
"""Test request object."""
@reify
def _database_locales(self):
return database_locales(self)
@reify
def locale_id(self):
"""Returns a database locale id."""
return locale_id(self)
def locales(self, *args, **kwargs):
"""Return all availablee locales."""
return locales(self, *args, **kwargs)
request = TestRequest({})
localize_config = build_localize_config(
{
"localize.locales.available": ["en", "pl", "de", "cz"],
"localize.domain": "test",
}
)
configurator = testing.setUp()
request.registry = configurator.registry # pylint:disable=attribute-defined-outside-init
request.registry["localize"] = localize_config
return request
@pytest.fixture
def web_request():
"""Mock web request for views testing."""
return web_request_func()
@pytest.fixture
def locale_negotiator_request():
"""Request for locale_negotiator tests."""
request = Mock()
mock_configuration = {
"cookies": {"_LOCALE_": "cz"},
"_LOCALE_": "fr",
"accept_language.best_match.return_value": "de",
"path": "/pl/page",
"registry": {
"localize": build_localize_config(
{
"localize.locales.available": ["en", "pl", "de", "cz", "fr"],
"localize.locales.default": "en",
}
)
},
}
request.configure_mock(**mock_configuration)
return request
@pytest.fixture
def db_session(request):
"""Session for SQLAlchemy."""
from pyramid_localize.models import Base # pylint:disable=import-outside-toplevel
engine = create_engine("sqlite:///localize.sqlite", echo=False, poolclass=NullPool)
pyramid_basemodel.Session = scoped_session(sessionmaker())
register(pyramid_basemodel.Session)
pyramid_basemodel.bind_engine(engine, pyramid_basemodel.Session, should_create=True, should_drop=True)
def destroy():
transaction.commit()
Base.metadata.drop_all(engine)
request.addfinalizer(destroy)
return pyramid_basemodel.Session
@pytest.fixture
def db_locales(db_session): # pylint:disable=redefined-outer-name
"""Add Languages to db_session."""
for locale in ["pl", "cz", "fr"]:
locale_object = Language(name=locale, native_name=locale, language_code=locale)
db_session.add(locale_object)
transaction.commit()
@pytest.fixture
def request_i18n():
"""Create request with i18n subscribers on."""
config = testing.setUp()
config.scan("pyramid_localize.subscribers.i18n")
request = Request({})
request.registry = config.registry
return request
@pytest.fixture
def request_fake():
"""Create request with fake i18n subscribers on."""
config = testing.setUp()
config.scan("pyramid_localize.subscribers.fake")
request = Request({})
request.registry = config.registry
return request
|
the-stack_0_7384 | import os
from pypaper import latex_tools as lt
test_dir = os.path.dirname(__file__)
def test_compile_bibtex():
ffp = test_dir + "/test_data_files/sample.bib"
citations = ["Safak:1992ub", "Vesic:1975"]
not_cited = ["Rodriguez:2000sr"]
bibtex_str = lt.compile_bibtex(citations, ffp)
print(bibtex_str)
for cite in citations:
assert cite in bibtex_str, cite
for cite in not_cited:
assert cite not in bibtex_str
# can not find
unlisted_citations = ["Rathje:2017ip"]
bibtex_str = lt.compile_bibtex(unlisted_citations, ffp)
for cite in unlisted_citations:
assert cite not in bibtex_str
def test_extract_citations():
ffp = test_dir + "/test_data_files/sample_latex.tex"
expected_citations = ['Vesic:1975', 'Chatzigogos:2008uv', 'Safak:1992ub', 'Raychowdhury:2009hw']
citations = lt.extract_citation_keys_from_latex(latex_ffp=ffp)
assert len(expected_citations) == len(citations)
for ec in expected_citations:
assert ec in citations
def test_extract_multi_citations():
ffp = test_dir + "/test_data_files/sample_latex_w_eg_and_multiple.tex"
expected_citations = ['Vesic:1975', 'Chatzigogos:2008uv', 'Safak:1992ub', 'Raychowdhury:2009hw', "NIST:2013ssi",
'Taylor:1979uc', 'Gajan:2008cs', 'Deng:2012ta']
citations = lt.extract_citation_keys_from_latex(latex_ffp=ffp)
print(citations)
assert len(expected_citations) == len(citations)
for ec in expected_citations:
assert ec in citations, ec
if __name__ == '__main__':
test_extract_multi_citations()
|
the-stack_0_7385 | # coding=utf-8
# Kevin Manfredy Axpuac Juárez - 15006597
# Miguel Angel Lemus Morales - 14003328
# Archivo Principal
from help import Help
from instructions import Inst
from playerVsPlayer import PlayerVsPlayer
from playerVsMachine import PlayerVsMachine
from machineVsMachine import MachineVsMachine
# menu de el juego
def menu():
try:
while True:
print('\n====================================')
print('*** Bienvenido al juego CONNECT4 ***')
print('====================================')
while True:
print('(1) INSTRUCCIONES')
print('(2) PLAYER vs PLAYER')
print('(3) PLAYER vs MACHINE')
print('(4) MACHINE vs MACHINE')
print('(5) HELP')
print('(6) EXIT')
try:
option = int(input("Ingrese una opcion: "))
if option == 1:
Inst()
elif option == 2:
PlayerVsPlayer()
elif option == 3:
PlayerVsMachine()
elif option == 4:
MachineVsMachine()
elif option == 5:
Help()
elif option == 6:
print('\nGracias por visitar Connect4 !!! ')
print('Saliendo del juego ...')
break
else:
print('\nERROR: Opcion invalida! Solo hay opciones 1, 2, 3, 4, 5 y 6\n')
except ValueError:
print('\nERROR: Opcion invalida! No ingreso un numero entero\n')
break
except:
print()
menu()
|
the-stack_0_7386 | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import timeit
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
num_classes = 1000
num_batches = 10
batch_size = 120
image_w = 128
image_h = 128
num_repeat = 20
cuda_available = torch.cuda.is_available()
print("===================================================")
print("Cuda Available : {}".format(cuda_available))
print("===================================================")
def train(model):
model.train(True)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001)
one_hot_indices = torch.LongTensor(batch_size) \
.random_(0, num_classes) \
.view(batch_size, 1)
for _ in range(num_batches):
# generate random inputs and labels
inputs = torch.randn(batch_size, 3, image_w, image_h)
labels = torch.zeros(batch_size, num_classes) \
.scatter_(1, one_hot_indices, 1)
# run forward pass
optimizer.zero_grad()
if cuda_available:
outputs = model(inputs.to('cuda:0'))
else:
outputs = model(inputs)
# print("Output-device {}".format(outputs.device))
# run backward pass
labels = labels.to(outputs.device)
loss_fn(outputs, labels).backward()
optimizer.step()
stmt = "train(model)"
setup = None
if cuda_available:
setup = "model = AlexNet(num_classes=num_classes).to('cuda:0')"
else:
setup = "model = AlexNet(num_classes=num_classes)"
stats = []
for i in range(10):
rn_run_times = timeit.repeat(stmt, setup, number=1, repeat=num_repeat,
globals=globals())
rn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)
stats.append(rn_mean)
print("Single Node Training Time:", rn_mean)
stats_ar = np.array(stats)
mean = stats_ar.mean()
print(" Mean Training Time {}".format(mean))
with open('stats_alexnet_s_v1.csv', 'a+') as fp:
fp.write(str(mean) + "\n")
|
the-stack_0_7388 | from tabulate import tabulate
import requests
import argparse
import pprint
import json
import os
class EnvDefault(argparse.Action):
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
if envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-pn', '--package-number', type=str, required=True, help='Package number, you may use the PKG environment variable as well',
action=EnvDefault, envvar="PKG")
parser.add_argument('-ih', '--is-heb', action='store_true', required=False, help='Print in hebrew')
parser.add_argument('-pj', '--print-json', action='store_true', required=False, help='Print json instead of table')
parser.add_argument('-cf', '--cookie-file', type=str, required=False, default='cookie.json', help='Cookie file')
return parser.parse_args()
def _get_state_list(item_code, request_key, verification_key, is_heb):
url = "https://mypost.israelpost.co.il/umbraco/Surface/ItemTrace/GetItemTrace"
payload = f"itemCode={item_code}{'&lcid=1037' if is_heb else ''}&__RequestVerificationToken={request_key}"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': f'__RequestVerificationToken={verification_key};',
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def _check_response(response):
if response['ReturnCode'] != 0:
print(f"Got the following error: {response['ErrorDescription']}")
return False
else:
return True
def _print_response(response, print_json):
if print_json:
pprint.pprint(response)
else:
print(tabulate(tabular_data=response['Result']['itemcodeinfo']['InfoLines'],
headers=response['Result']['itemcodeinfo']['ColumnHeaders'],
tablefmt="plain",
stralign='right'))
if __name__ == "__main__":
args = _parse_args()
cookie = json.load(open(args.cookie_file))
post_response = _get_state_list(args.package_number, cookie['request_key'], cookie['verification_key'], args.is_heb)
if _check_response(post_response):
_print_response(post_response, args.print_json)
else:
print('Failed to get package information')
|
the-stack_0_7390 | # SPDX-FileCopyrightText: Copyright 2022, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
__all__ = ['get_data_type_name']
# ==================
# get data type name
# ==================
def get_data_type_name(data):
"""
Returns the typename of data as string.
"""
if data.dtype in [b'float32', 'float32']:
data_type_name = b'float32'
elif data.dtype in [b'float64', 'float64']:
data_type_name = b'float64'
elif data.dtype in [b'float128', 'float128']:
data_type_name = b'float128'
elif data.dtype in [b'int32', 'int32']:
data_type_name = b'int32'
elif data.dtype in [b'int64', 'int64']:
data_type_name = b'int64'
else:
raise TypeError('Data type should be "float32", "float64", ' +
'"float128", "int32", or "int64".')
return data_type_name
|
the-stack_0_7391 | # import the necessary packages
from PIL import Image, ImageOps
import pytesseract
from pytesseract import Output
import argparse
import cv2
import os
import json
def process_list(text: str) -> dict:
final = {}
items = text.split("\n")[4:][:-1]
for item in items:
if item == "":
continue
x = item.split(" ")
itemtype = x[1].lower()
if itemtype not in final:
final[itemtype] = {}
final[itemtype][x[0]] = {}
return final
def process_query(text: str, existing: dict) -> dict:
current = ("", "")
lines = text.split("\n")
for line in lines:
if line != "":
stuff = line.split(": ")
if stuff[0] == "ID":
for cat in existing:
for item in existing[cat]:
if item == stuff[1]:
current = (cat, stuff[1])
break
if stuff[0] == "LOCATION":
cat, id = current
if cat == "" or id == "":
continue
if id not in existing[cat]:
print("warning - item found in query, not in list", id)
existing[cat][id]["location"] = stuff[1]
return existing
def print_results(items: dict):
for category in items:
print(category.upper())
for item in items[category]:
print(" ├──" + item)
for prop in items[category][item]:
print(" ├── {}: {}".format(prop.upper(), items[category][item][prop]))
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("image",
help="path to input image to be OCR'd")
ap.add_argument("-t", "--type", default="list")
ap.add_argument("-d", "--debug", dest="debug", action="store_true",
help="whether to save results of ocr to file")
ap.add_argument("-p", "--pretty-print", dest="prettyprint", action="store_true",
help="whether to print nice data, or produce json")
ap.set_defaults(debug=False, prettyprint=False)
args = vars(ap.parse_args())
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image and apply some basic transformations.
img = Image.open(filename)
os.remove(filename)
img = img.crop((300, 120, 2000, 1300))
img = ImageOps.invert(img)
res = {}
if args["type"] == "list":
res = process_list(pytesseract.image_to_string(img))
f = open("list.json", "w+")
f.write(json.dumps(res))
f.close()
if args["type"] == "query":
f = open("list.json", "r")
existing = json.loads(f.read())
res = process_query(pytesseract.image_to_string(img), existing)
if args["prettyprint"]:
print_results(res)
else:
print(json.dumps(res, indent=2))
if args["debug"]:
img.save(filename)
img = cv2.imread(filename)
os.remove(filename)
d = pytesseract.image_to_data(img, output_type=Output.DICT)
n_boxes = len(d['level'])
for i in range(n_boxes):
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.imwrite("f.png", img) |
the-stack_0_7394 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest
paddle.enable_static()
class TestMaxMinAPI(unittest.TestCase):
def setUp(self):
self.init_case()
self.cal_np_out_and_gradient()
self.place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.5, 0.9], [0.1, 0.2, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = None
self.keepdim = False
# If there are multiple minimum or maximum elements, max/min/ is non-derivable,
# its gradient check is not supported by unittest framework,
# thus we calculate the gradient by numpy function.
def cal_np_out_and_gradient(self):
def _cal_np_out_and_gradient(func):
if func is 'max':
out = np.max(self.x_np, axis=self.axis, keepdims=self.keepdim)
elif func is 'min':
out = np.min(self.x_np, axis=self.axis, keepdims=self.keepdim)
else:
print('This unittest only test max/min, but now is', func)
self.np_out[func] = out
grad = np.zeros(self.shape)
out_b = np.broadcast_to(out, self.shape)
grad[self.x_np == out_b] = 1
self.np_grad[func] = grad
self.np_out = dict()
self.np_grad = dict()
_cal_np_out_and_gradient('max')
_cal_np_out_and_gradient('min')
def _choose_paddle_func(self, func, x):
if func is 'max':
out = paddle.max(x, self.axis, self.keepdim)
elif func is 'min':
out = paddle.min(x, self.axis, self.keepdim)
else:
print('This unittest only test max/min, but now is', func)
return out
# We check the output between paddle API and numpy in static graph.
def test_static_graph(self):
def _test_static_graph(func):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(startup_program, train_program):
x = fluid.data(name='input', dtype=self.dtype, shape=self.shape)
x.stop_gradient = False
out = self._choose_paddle_func(func, x)
exe = fluid.Executor(self.place)
res = exe.run(fluid.default_main_program(),
feed={'input': self.x_np},
fetch_list=[out])
self.assertTrue((np.array(res[0]) == self.np_out[func]).all())
_test_static_graph('max')
_test_static_graph('min')
# As dygraph is easy to compute gradient, we check the gradient between
# paddle API and numpy in dygraph.
def test_dygraph(self):
def _test_dygraph(func):
paddle.disable_static()
x = paddle.to_tensor(
self.x_np, dtype=self.dtype, stop_gradient=False)
out = self._choose_paddle_func(func, x)
grad_tensor = paddle.ones_like(x)
paddle.autograd.backward([out], [grad_tensor], True)
self.assertEqual(np.allclose(self.np_out[func], out.numpy()), True)
self.assertEqual(np.allclose(self.np_grad[func], x.grad), True)
paddle.enable_static()
_test_dygraph('max')
_test_dygraph('min')
# test multiple minimum or maximum elements
class TestMaxMinAPI2(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = None
self.keepdim = False
# test different axis
class TestMaxMinAPI3(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = 0
self.keepdim = False
# test keepdim = True
class TestMaxMinAPI4(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array([[0.2, 0.3, 0.9, 0.9], [0.1, 0.1, 0.6, 0.7]])
self.shape = [2, 4]
self.dtype = 'float64'
self.axis = 1
self.keepdim = True
# test axis is tuple
class TestMaxMinAPI5(TestMaxMinAPI):
def init_case(self):
self.x_np = np.array(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.int32)
self.shape = [2, 2, 2]
self.dtype = 'int32'
self.axis = (0, 1)
self.keepdim = False
|
the-stack_0_7396 | # Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import argparse
def predict(start_date: str,
end_date: str,
path_to_ips_file: str,
output_file_path) -> None:
"""
Generates and saves a file with daily new cases predictions for the given countries, regions and intervention
plans, between start_date and end_date, included.
:param start_date: day from which to start making predictions, as a string, format YYYY-MM-DDD
:param end_date: day on which to stop making predictions, as a string, format YYYY-MM-DDD
:param path_to_ips_file: path to a csv file containing the intervention plans between inception date (Jan 1 2020)
and end_date, for the countries and regions for which a prediction is needed
:param output_file_path: path to file to which to save the the predictions
:return: Nothing. Saves the generated predictions to an output_file_path CSV file
with columns "CountryName,RegionName,Date,PredictedDailyNewCases"
"""
# !!! YOUR CODE HERE !!!
raise NotImplemented
# !!! PLEASE DO NOT EDIT. THIS IS THE OFFICIAL COMPETITION API !!!
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date",
dest="start_date",
type=str,
required=True,
help="Start date from which to predict, included, as YYYY-MM-DD. For example 2020-08-01")
parser.add_argument("-e", "--end_date",
dest="end_date",
type=str,
required=True,
help="End date for the last prediction, included, as YYYY-MM-DD. For example 2020-08-31")
parser.add_argument("-ip", "--interventions_plan",
dest="ip_file",
type=str,
required=True,
help="The path to an intervention plan .csv file")
parser.add_argument("-o", "--output_file",
dest="output_file",
type=str,
required=True,
help="The path to the CSV file where predictions should be written")
args = parser.parse_args()
print(f"Generating predictions from {args.start_date} to {args.end_date}...")
predict(args.start_date, args.end_date, args.ip_file, args.output_file)
print("Done!")
|
the-stack_0_7398 | import json
import public_config as c
import logging
import argparse
import shutil
from tinydb import TinyDB, Query
from juriscraper.pacer import (
DocketReport,
PacerSession,
PossibleCaseNumberApi,
FreeOpinionReport,
)
logging.basicConfig(level=logging.DEBUG)
district_dict = {
"00": "med",
"01": "mad",
"02": "nhd",
"03": "rid",
"05": "ctd",
"10": "vtd",
}
class PlymouthState(object):
logging.info("Initializing Plymouth State object")
s = PacerSession(username=c.PACER_USERNAME, password=c.PACER_PASSWORD)
results = []
def get_pacer_case_ids(self):
"""Find PACER Case IDs from iQuery
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search((q.PACER_CASE_ID == "")):
report = PossibleCaseNumberApi(row["COURT"], self.s)
report.query(row["DOCKET_NO"])
data = report.data(office_number=row["OFFICE"], docket_number_letters="cv")
fjc_table.update(
{"PACER_CASE_ID": data["pacer_case_id"], "TITLE": data["title"]},
doc_ids=[row.doc_id],
)
def get_docket_json(self):
"""Download docket to disk from Pacer
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search(~(q.PACER_CASE_ID == "") & (q.JSON == "False")):
rep = DocketReport(row["COURT"], self.s)
rep.query(
row["PACER_CASE_ID"],
show_parties_and_counsel=True,
show_terminated_parties=True,
show_list_of_member_cases=True,
include_pdf_headers=True,
show_multiple_docs=False,
)
with open(
"downloads/json/pacer_docket_%s.json" % row["PACER_CASE_ID"], "w"
) as write_file:
json.dump(rep.data, write_file, indent=4, sort_keys=True, default=str)
with open(
"downloads/html/pacer_docket_%s.html" % row["PACER_CASE_ID"], "w"
) as file:
file.write(rep.response.text)
fjc_table.update(
{
"JSON": "True",
"pacer_doc_id": rep.data["docket_entries"][0]["pacer_doc_id"],
},
doc_ids=[row.doc_id],
)
logging.info("Finished collecting JSON and HTML")
def download_pdfs(self):
"""Download the first (presumably complaint) PDF to downlaods dir.
:return: None
"""
q = Query()
db = TinyDB("db/master.json")
fjc_table = db.table("fjc")
for row in fjc_table.search((q.JSON == "True") & (q.PDF == "False")):
logging.info(
"Collecting PDF #%s, in %s" % (row["PACER_CASE_ID"], row["TITLE"])
)
report = FreeOpinionReport(row["COURT"], self.s)
r = report.download_pdf(row["PACER_CASE_ID"], row["pacer_doc_id"])
with open(
"downloads/pdf/pacer_complaint_%s.pdf" % row["PACER_CASE_ID"], "w"
) as file:
file.write(r.content)
fjc_table.update(
{"PDF": "True"}, doc_ids=[row.doc_id],
)
logging.info(
"Collected PDF #%s, in %s" % (row["PACER_CASE_ID"], row["TITLE"])
)
def get_pacer_ids():
"""Use PACER iQuery to Identify PACER unique IDs
:return: None
"""
logging.info("Begin collecting PACER CASE IDS")
p = PlymouthState()
p.get_pacer_case_ids()
def download_json_html():
"""Scrape HTML and JSON from Pacer
Save resp from juriscraper to download/JSON & HTML dir
:return: None
"""
logging.info("Begin collecting Dockets")
p = PlymouthState()
p.get_docket_json()
def get_pdfs():
"""Collect PDF from Pacer
:return: None
"""
logging.info("Begin collecting PDFS")
p = PlymouthState()
p.download_pdfs()
def zip_files():
"""Zip the HTML, PDF and JSON Directories
:return: None
"""
shutil.make_archive("downloads/zip/html_files", "zip", "downloads/html/")
shutil.make_archive("downloads/zip/pdf_files", "zip", "downloads/pdf/")
shutil.make_archive("downloads/zip/json_files", "zip", "downloads/json/")
class Command(object):
help = "Collect cases for Plymouth State client project"
VALID_ACTIONS = {
"get-pacer-ids": get_pacer_ids,
"get-dockets": download_json_html,
"get-pdfs": get_pdfs,
"zip-files": zip_files,
}
parser = argparse.ArgumentParser(description="Process Plymouth State")
parser.add_argument("-a", "--action", help="Must choose an action", required=True)
args = vars(parser.parse_args())
VALID_ACTIONS[args["action"]]()
|
the-stack_0_7402 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore.nn as nn
import mindspore.context as context
from mindspore.common import dtype as mstype
context.set_context(device_target='GPU')
class UnsortedSegmentSumNet(nn.Cell):
def __init__(self, num_segments):
super(UnsortedSegmentSumNet, self).__init__()
self.unsorted_segment_sum = P.UnsortedSegmentSum()
self.num_segments = num_segments
def construct(self, data, ids):
return self.unsorted_segment_sum(data, ids, self.num_segments)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_1D():
input_x = Tensor([1, 2, 3, 4], mstype.float32)
segment_ids = Tensor([0, 0, 1, 2], mstype.int32)
num_segments = 4
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [3, 3, 4, 0]
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_2D():
input_x = Tensor([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], mstype.float32)
segment_ids = Tensor([2, 1, 1], mstype.int32)
num_segments = 4
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [[ 0, 0, 0, 0],
[14, 16, 18, 20],
[ 1, 2, 3, 4],
[ 0, 0, 0, 0]]
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_3D():
input_x = Tensor(np.arange(4 * 5 * 3, dtype=np.float32).reshape(4, 5, 3))
segment_ids = Tensor([2, 1, 1, -1], mstype.int32)
num_segments = 5
net = UnsortedSegmentSumNet(num_segments)
output = net(input_x, segment_ids)
expect = [[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[45., 47., 49.],
[51., 53., 55.],
[57., 59., 61.],
[63., 65., 67.],
[69., 71., 73.]],
[[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.],
[12., 13., 14.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]]
assert (output.asnumpy() == expect).all()
|
the-stack_0_7403 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that pushes Z gates later and later in the circuit."""
from typing import Iterator, Tuple, cast
from cirq import ops, extension
from cirq.circuits import Circuit, InsertStrategy, OptimizationPass
from cirq.google.decompositions import is_negligible_turn
from cirq.google.xmon_gates import ExpZGate
from cirq.value import Symbol
KNOWN_Z_TYPES = (ExpZGate, ops.RotZGate)
class EjectZ(OptimizationPass):
"""Removes Z gates by pushing them later and later until they merge.
As Z gates are removed from the circuit, 'lost phase' builds up. As lost
phase is pushed rightward, it modifies phaseable operations along the way.
Eventually the lost phase is discharged into a 'drain'. Only Z gates
without a parameter dependence are removed.
There are three kinds of drains:
- Measurement gates, which absorb phase by discarding it.
- Parameterized Z gates, which absorb phase into their turns attribute.
- The end of the circuit, which absorbs phase into a new Z gate.
"""
def __init__(self,
tolerance: float = 0.0,
ext: extension.Extensions=None) -> None:
"""
Args:
tolerance: Maximum absolute error tolerance. The optimization is
permitted to simply drop negligible combinations of Z gates,
with a threshold determined by this tolerance.
ext: Extensions object used for determining if gates are phaseable
(i.e. if Z gates can pass through them).
"""
self.tolerance = tolerance
self.ext = ext or extension.Extensions()
def optimize_circuit(self, circuit: Circuit):
qubits = {
q
for m in circuit.moments for op in m.operations for q in op.qubits
}
for qubit in qubits:
for start, drain in self._find_optimization_range_drains(circuit,
qubit):
self._optimize_range(circuit, qubit, start, drain)
def _find_optimization_range_drains(
self,
circuit: Circuit,
qubit: ops.QubitId) -> Iterator[Tuple[int, int]]:
"""Finds ranges where Z gates can be pushed rightward.
Args:
circuit: The circuit being optimized.
qubit: The qubit along which Z operations are being merged.
Yields:
(start, drain) tuples. Z gates on the given qubit from moments with
indices in the range [start, drain) should all be merged into
whatever is at the drain index.
"""
start_z = None
prev_z = None
for i in range(len(circuit.moments)):
op = circuit.operation_at(qubit, i)
if op is None:
continue
if start_z is None:
# Unparameterized Zs start optimization ranges.
if (isinstance(op.gate, KNOWN_Z_TYPES) and
not isinstance(op.gate.half_turns,
Symbol)):
start_z = i
prev_z = None
elif self.ext.can_cast(op.gate, ops.MeasurementGate):
# Measurement acts like a drain. It destroys phase information.
yield start_z, i
start_z = None
elif (isinstance(op.gate, KNOWN_Z_TYPES) and
not isinstance(op.gate.half_turns, Symbol)):
# Could be a drain. Depends if an unphaseable gate follows.
prev_z = i
elif not self.ext.can_cast(op.gate, ops.PhaseableGate):
# Unphaseable gates force earlier draining.
if prev_z is not None:
yield start_z, prev_z
start_z = None
# End of the circuit forces draining.
if start_z is not None:
yield start_z, len(circuit.moments)
def _optimize_range(self, circuit: Circuit, qubit: ops.QubitId,
start: int, drain: int):
"""Pushes Z gates from [start, drain) into the drain.
Assumes no unphaseable gates will be crossed, and that the drain is
valid.
Args:
circuit: The circuit being optimized.
qubit: The qubit along which Z operations are being merged.
start: The inclusive start of the range containing Z gates to
eject.
drain: The exclusive end of the range containing Z gates to eject.
Also the index of where the effects of the Z gates should end
up.
"""
lost_phase_turns = 0.0
for i in range(start, drain):
op = circuit.operation_at(qubit, i)
if op is None:
# Empty.
pass
elif isinstance(op.gate, KNOWN_Z_TYPES):
# Move Z effects out of the circuit and into lost_phase_turns.
circuit.clear_operations_touching([qubit], [i])
lost_phase_turns += cast(float, op.gate.half_turns) / 2
elif self.ext.can_cast(op.gate, ops.PhaseableGate):
# Adjust phaseable gates to account for the lost phase.
phaseable = self.ext.cast(op.gate, ops.PhaseableGate)
k = op.qubits.index(qubit)
circuit.clear_operations_touching(op.qubits, [i])
circuit.insert(i + 1,
phaseable.phase_by(-lost_phase_turns, k).on(
*op.qubits),
InsertStrategy.INLINE)
self._drain_into(circuit, qubit, drain, lost_phase_turns)
def _drain_into(self, circuit: Circuit, qubit: ops.QubitId,
drain: int, accumulated_phase: float):
if is_negligible_turn(accumulated_phase, self.tolerance):
return
# Drain type: end of circuit.
if drain == len(circuit.moments):
circuit.append(
ExpZGate(half_turns=2*accumulated_phase).on(qubit),
InsertStrategy.INLINE)
return
# Drain type: another Z gate.
op = cast(ops.Operation, circuit.operation_at(qubit, drain))
if isinstance(op.gate, ExpZGate):
half_turns = cast(float, op.gate.half_turns) + accumulated_phase * 2
circuit.clear_operations_touching([qubit], [drain])
circuit.insert(
drain + 1,
ExpZGate(half_turns=half_turns).on(qubit),
InsertStrategy.INLINE)
return
# Drain type: measurement gate.
# (Don't have to do anything.)
|
the-stack_0_7404 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline)) |
the-stack_0_7405 | import platform
import os
import sys
# Resources shared by both the Network and Server files
BUFSIZE = 4096 * 2
PORT = 5555
INTERNAL_PORT = 4321
# The ipv4 address of the host machine. Run ipconfig from cmd to get this
HOST = "127.0.0.1"
if platform.system() == 'Darwin':
LOCAL = "127.0.0.1" #"192.168.1.154"
else:
# This allows you to try out different ports when running through DO cli
if len(sys.argv) >= 2:
LOCAL = os.getenv('HOSTNAME')
PORT = sys.argv[1]
else:
LOCAL = os.getenv('HOSTNAME')
SINGLE_PLAYER = True
# Time client waits between sending requests for changed state
CLIENT_WAIT = 0.1
# Messages
GET_STATE = 'Get'
DO_ACTION = 'Do'
INIT_MSG = 'Init'
MULLIGAN_MSG = 'Mull'
# Responses
NO_UPDATE = 'No update'
UPDATE = 'Update'
VALID_CHOICE = 'Valid choice'
INVALID_CHOICE = 'Invalid choice'
# Log into router, port forwarding, port 5555 to my local machine
# Tell my router goes to the ip I had been using |
the-stack_0_7406 | """
TODO: ADD FEATURE TO ENABLE USE OF UNBOUNDED VARIABLES
Note: This does not work with the current version of PLEpy, to be fixed
in future versions
Uses a calculated "cross-talk" matrix (converts 3D counts to 2D
activity for each 3D and 2D shell) to fit first-order rate coefficients
and initial activity in 3D shells using simulated 2D planar imaging
data. Each 3D shell only moves inward.
Model:
dA5/dt = -k5*A5
dA4/dt = k5*A5 - k4*A4
dA3/dt = k4*A4 - k3*A3
dA2/dt = k3*A3 - k2*A2
dA1/dt = k2*A2 - k1*A1
where k1-k5 are the rate coefficients and k1 > k2 > k3 > k4 > k5
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
import pyomo.environ as penv
from pyomo.dae import ContinuousSet, DerivativeVar
sys.path.append(os.path.abspath("../../"))
from plepy import PLEpy
pwd = os.getcwd()
fpath = os.path.dirname(__file__)
os.chdir(fpath)
# Import 2D data
ydata = np.load('toy2D_data_exp3.npz')['arr_0']
ytotal = ydata.sum(axis=1)
tdata = list(range(0, 81, 2))
# Import cross-talk matrix
crssfile = loadmat('shelltoy_crsstlk_dist.mat')
ctalk = crssfile['crsstlk']
ictalk = np.linalg.inv(ctalk)
iydata = np.dot(ictalk, ydata.T).T
iydata[1:, :] = (iydata[1:, :] + iydata[:-1, :])/2
# Actual data (for comparison)
datafile = loadmat('shelltoydata_exp3.mat')
data3d = datafile['a']
# Initial guesses
k0 = [5., 5., 1., 0.75, 0.5] # [p1, p2, p3, p4, k5]
a0 = np.dot(ictalk, ydata[0, :].T) # [A1, A2, a3, A4, A5]'
da0dt = [k0[i+1]*a0[i+1] - k0[i]*a0[i] for i in range(4)]
da0dt.append(-k0[4]*a0[4])
da0dt = [1e-2*a for a in da0dt]
# Create dynamic model
model = penv.ConcreteModel()
# Define parameters
model.t = ContinuousSet(bounds=(0, 81), initialize=range(81))
# Rate coefficients are fit as sum of previous rate coefficient and
# corresponding "p" parameter.
# k4 = k5 + p4, k3 = k4 + p3, etc.
model.p1 = penv.Var(initialize=k0[0], bounds=(1e-3, 100.))
model.p2 = penv.Var(initialize=k0[1], bounds=(1e-3, 100.))
model.p3 = penv.Var(initialize=k0[2], bounds=(1e-3, 100.))
model.p4 = penv.Var(initialize=k0[3], bounds=(1e-3, 100.))
model.k5 = penv.Var(initialize=k0[4], bounds=(1e-3, 100.))
# Define 3D shell states
model.A1 = penv.Var(model.t, initialize=a0[0], within=penv.NonNegativeReals)
model.A2 = penv.Var(model.t, initialize=a0[1], within=penv.NonNegativeReals)
model.A3 = penv.Var(model.t, initialize=a0[2], within=penv.NonNegativeReals)
model.A4 = penv.Var(model.t, initialize=a0[3], within=penv.NonNegativeReals)
model.A5 = penv.Var(model.t, initialize=a0[4], within=penv.NonNegativeReals)
# Initialize derivatives
model.dA1dt = DerivativeVar(model.A1, wrt=model.t, initialize=da0dt[0])
model.dA2dt = DerivativeVar(model.A2, wrt=model.t, initialize=da0dt[1])
model.dA3dt = DerivativeVar(model.A3, wrt=model.t, initialize=da0dt[2])
model.dA4dt = DerivativeVar(model.A4, wrt=model.t, initialize=da0dt[3])
model.dA5dt = DerivativeVar(model.A5, wrt=model.t, initialize=da0dt[4])
# System dynamics
def _dA1dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
k2 = k3 + m.p2
k1 = k2 + m.p1
return m.dA1dt[t] == 1e-2*(k2*m.A2[t] - k1*m.A1[t])
model.dA1dt_ode = penv.Constraint(model.t, rule=_dA1dt)
def _dA2dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
k2 = k3 + m.p2
return m.dA1dt[t] == 1e-2*(k3*m.A3[t] - k2*m.A2[t])
model.dA2dt_ode = penv.Constraint(model.t, rule=_dA2dt)
def _dA3dt(m, t):
k4 = m.k5 + m.p4
k3 = k4 + m.p3
return m.dA3dt[t] == 1e-2*(k4*m.A4[t] - k3*m.A3[t])
model.dA3dt_ode = penv.Constraint(model.t, rule=_dA3dt)
def _dA4dt(m, t):
k4 = m.k5 + m.p4
return m.dA4dt[t] == 1e-2*(m.k5*m.A5[t] - k4*m.A4[t])
model.dA4dt_ode = penv.Constraint(model.t, rule=_dA4dt)
def _dA5dt(m, t):
return m.dA5dt[t] == 1e-2*(- m.k5*m.A5[t])
model.dA5dt_ode = penv.Constraint(model.t, rule=_dA5dt)
# Objective function (SSE)
def _obj(m):
a3D = np.array([[m.A1[t], m.A2[t], m.A3[t], m.A4[t], m.A5[t]]
for t in tdata]).T
a2D = np.dot(ctalk, a3D).T
# err = (ydata - a2D)**2
err = (iydata - a3D.T)**2
return sum(sum(err))
model.obj = penv.Objective(rule=_obj)
# Set-up solver
TFD=penv.TransformationFactory("dae.finite_difference")
TFD.apply_to(model, nfe=2*len(model.t), wrt=model.t, scheme="BACKWARD")
solver = penv.SolverFactory('ipopt')
solver.options['linear_solver'] = 'ma97' # academic solver
solver.options['tol'] = 1e-6
solver.options['max_iter'] = 6000
results = solver.solve(model, keepfiles=False, tee=True)
model.solutions.load_from(results)
# Plot results
sns.set(context='talk')
plt.figure()
ccycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.plot(tdata, data3d[:, 0], ls='None', marker='o', color=ccycle[0])
plt.plot(tdata, data3d[:, 1], ls='None', marker='o', color=ccycle[1])
plt.plot(tdata, data3d[:, 2], ls='None', marker='o', color=ccycle[2])
plt.plot(tdata, data3d[:, 3], ls='None', marker='o', color=ccycle[3])
plt.plot(tdata, data3d[:, 4], ls='None', marker='o', color=ccycle[4])
# plt.plot(tdata, iydata[:, 0], label='Shell 1', color=ccycle[0])
# plt.plot(tdata, iydata[:, 1], label='Shell 2', color=ccycle[1])
# plt.plot(tdata, iydata[:, 2], label='Shell 3', color=ccycle[2])
# plt.plot(tdata, iydata[:, 3], label='Shell 4', color=ccycle[3])
# plt.plot(tdata, iydata[:, 4], label='Shell 5', color=ccycle[4])
plt.plot(model.t, model.A1[:](), label='Shell 1', color=ccycle[0])
plt.plot(model.t, model.A2[:](), label='Shell 2', color=ccycle[1])
plt.plot(model.t, model.A3[:](), label='Shell 3', color=ccycle[2])
plt.plot(model.t, model.A4[:](), label='Shell 4', color=ccycle[3])
plt.plot(model.t, model.A5[:](), label='Shell 5', color=ccycle[4])
plt.xlabel('Time (min)')
plt.ylabel('Activity (counts)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.tight_layout()
plt.show()
# Initialize PLEpy object
ps = [model.p1(), model.p2(), model.p3(), model.p4(), model.k5()]
ps.reverse()
ks = np.cumsum(ps)
A0s = [model.A1[0](), model.A2[0](), model.A3[0](), model.A4[0](),
model.A5[0]()]
PLobj = PLEpy(model,
['p1', 'p2', 'p3', 'p4', 'k5', 'A1', 'A2', 'A3', 'A4', 'A5'],
indices={'t0': [0]})
PLobj.set_index('A1', 't0')
PLobj.set_index('A2', 't0')
PLobj.set_index('A3', 't0')
PLobj.set_index('A4', 't0')
PLobj.set_index('A5', 't0')
# Get confidence limits using binary search (currently won't work
# because initial activity is unbounded)
PLobj.get_clims(['A1', 'A2', 'A3', 'A4', 'A5'])
# Generate profile likelihood curves
PLobj.get_PL(['A1', 'A2', 'A3', 'A4', 'A5'])
PLobj.plot_PL(pnames=['A1', 'A2', 'A3', 'A4', 'A5'], join=True, jmax=5)
os.chdir(pwd)
|
the-stack_0_7410 | # Pelenet modules
from .anisotropic import AnisotropicExperiment
from ..network import ReservoirNetwork
from ._abstract import Experiment
"""
@desc: Class for running an experiment, usually contains performing
several networks (e.g. for training and testing)
"""
class AnisotropicReadoutExperiment(AnisotropicExperiment):
"""
# @desc: Define parameters for this experiment
# """
def defineParameters(self):
# Parent parameters
aniP = super().defineParameters()
expP = {
# Experiment
'seed': 3, # Random seed
'trials': 25, # Number of trials
'stepsPerTrial': 110, # Number of simulation steps for every trial
'isReset': True, # Activate reset after every trial
# Network
'refractoryDelay': 2, # Refactory period
'voltageTau': 10.24, # Voltage time constant
'currentTau': 10.78, # Current time constant
'thresholdMant': 1000, # Spiking threshold for membrane potential
'reservoirConnProb': 0.05,
# Anisotropic
'anisoStdE': 12, # Space constant, std of gaussian for excitatory neurons
'anisoStdI': 9, # Space constant, std of gaussian for inhibitory neurons (range 9 - 11)
'anisoShift': 1, # Intensity of the shift of the connectivity distribution for a neuron
#'percShift': 1, # Percentage of shift (default 1)
'anisoPerlinScale': 4, # Perlin noise scale, high value => dense valleys, low value => broad valleys
'weightExCoefficient': 12, # Coefficient for excitatory anisotropic weight
'weightInCoefficient': 48, # Coefficient for inhibitory anisotropic weight
# Input
'inputIsTopology': True, # Activate a 2D input area
'inputIsLeaveOut': True, # Leaves one target neuron out per trial
'patchNeuronsShiftX': 44, # x-position of the input area
'patchNeuronsShiftY': 24, # y-position of the input area
'inputNumTargetNeurons': 25, # Number of target neurons for the input
'inputSteps': 5, # Number of steps the network is activated by the input
'inputWeightExponent': 0, # The weight exponent of the weights from the generator to the target neurons
'inputGenSpikeProb': 1.0, # Spiking probability of the spike generators
# Output
'partitioningClusterSize': 10, # Size of clusters connected to an output neuron (6|10)
# Probes
'isExSpikeProbe': True, # Probe excitatory spikes
'isInSpikeProbe': True, # Probe inhibitory spikes
'isOutSpikeProbe': True # Probe output spikes
}
# Experiment parameters overwrite parameters from parent experiment
return { **aniP, **expP }
"""
@desc: Build all networks
"""
def build(self):
# Instanciate innate network
self.net = ReservoirNetwork(self.p)
self.net.landscape = None
# Draw anisotropic mask and weights
self.drawMaskAndWeights()
# Draw output weights
self.net.drawOutputMaskAndWeights()
# Connect ex-in reservoir
self.net.connectReservoir()
# Connect reservoir to output
self.net.connectOutput()
# Add patch input
self.net.addInput()
# Add Probes
self.net.addProbes()
|
the-stack_0_7413 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from flask import (
Flask, abort, request, redirect, url_for, render_template, g,
send_from_directory, jsonify)
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import func
from PIL import Image, ImageDraw, ImageFont
from configuration import (
get_args, get_db_uri, get_templates_list,
BASE_DIR, MEME_DIR, FONT_PATH)
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = get_db_uri()
db = SQLAlchemy(app)
# Model for representing created Memes
class Meme(db.Model):
id = db.Column(db.Integer, primary_key=True)
template = db.Column(db.String(80), nullable=False)
top_text = db.Column(db.String(80), nullable=False)
bot_text = db.Column(db.String(80), nullable=False)
def __repr__(self):
return '<Meme %r>' % self.id
Portfolio_Stocks = db.Table(
'Portfolio_Stocks',
db.Column('stock_id', db.Integer, db.ForeignKey('stock.id'),
primary_key=True),
db.Column('portfolio_id', db.Integer, db.ForeignKey('portfolio.id'),
primary_key=True)
)
# class Stock(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(80), nullable=False)
# symbol = db.Column(db.String(10), nullable=False)
# price = db.Column(db.Float, nullable=False)
# def __repr__(self):
# return '<Stock %r>' % self.id
# class Portfolio(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# portfolio_owner = db.Column(db.String(80), nullable=False)
# stock_id = db.Column(db.Integer, db.ForeignKey('stock.id'), nullable=Fals
# e)
# def __repr__(self):
# return '<Portfolio %r>' % self.id
class Stock(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
symbol = db.Column(db.String(10), nullable=False)
price = db.Column(db.Float, nullable=False)
portfolios_linked = db.relationship('Portfolio',
secondary=Portfolio_Stocks,
lazy='subquery',
backref=db.backref('stocks_linked',
lazy=True)
)
def __repr__(self):
return '<Stock %r>' % self.id
def serialize(self):
return {
"id": self.id,
"name": self.name,
"symbol": self.symbol,
"price": self.price
}
class Portfolio(db.Model):
id = db.Column(db.Integer, primary_key=True)
owner = db.Column(db.String(80), nullable=False)
def __repr__(self):
return '<Portfolio %r>' % self.id
# Portfolio_Stocks = db.Table(
# db.Column('stock_id', db.Integer, db.ForeignKey('stock.id'),
# primary_key=True),
# db.Column('portfolio_id', db.Integer, db.ForeignKey('portfolio.id'),
# primary_key=True)
# )
#
#
@app.before_first_request
def setup_db():
# Create folder for memes if it doesn't exist
if not os.path.exists(MEME_DIR):
os.makedirs(MEME_DIR)
# Create tables for models if they don't exist
db.create_all()
@app.before_request
def setup_request_time():
start_time = time.time()
g.request_time = lambda: "%d ms" % ((time.time() - start_time) * 1000)
@app.route('/')
def index():
return redirect(url_for("get_create_menu"))
@app.route('/recent', methods=['GET'])
def view_recent():
memes = Meme.query.order_by(Meme.id.desc()).limit(20).all()
return render_template('recent.html', memes=memes)
@app.route('/random', methods=['GET'])
def view_random():
meme = Meme.query.order_by(func.random()).first()
return redirect(url_for('view_meme', meme_id=meme.id))
@app.route('/template', methods=['GET'])
def get_create_menu():
templates = get_templates_list()
return render_template('view.html', templates=templates)
@app.route('/template/<string:template>', methods=['GET'])
def get_create(template):
if template not in get_templates_list():
abort(400, "Template does not exist.")
return render_template('create_meme.html', template=template)
@app.route('/meme/<int:meme_id>', methods=['GET'])
def view_meme(meme_id):
meme_file = os.path.join(MEME_DIR, '%d.png' % meme_id)
if not os.path.exists(meme_file):
generate_meme(meme_file, meme_id)
print(meme_file)
return send_from_directory(MEME_DIR, '%d.png' % meme_id)
@app.route('/meme', methods=['POST'])
def create_meme():
try:
meme = Meme(
template=request.form['template'],
top_text=request.form['top'],
bot_text=request.form['bottom']
)
db.session.add(meme)
db.session.commit()
return redirect(url_for('view_meme', meme_id=meme.id))
except KeyError:
abort(400, "Incorrect parameters.")
# Creates a stock
@app.route('/stock', methods=["POST"])
def create_stock():
try:
stock = Stock(
name=request.form['name'],
symbol=request.form['symbol'],
price=request.form['price']
)
db.session.add(stock)
db.session.commit()
print("stock created!")
# return redirect(url_for('view_stock', stock_id=stock.id))
return redirect('/template')
except KeyError:
abort(400, "Incorrect Parameters!")
# Gets all stocks
@app.route('/api/v1/stocks', methods=["GET"])
def api_stocks():
stocks = Stock.query.order_by(Stock.id.desc()).all()
return jsonify([s.serialize() for s in stocks])
# Gets all stocks
@app.route('/stock', methods=["GET"])
def view_stocks():
stocks = Stock.query.order_by(Stock.id.desc()).all()
return render_template('stocks.html', stocks=stocks)
# Get stock by stock id
@app.route('/stock/<int:stock_id>', methods=["GET"])
def view_stock(stock_id):
stock = Stock.query.filter_by(id=stock_id).first()
return render_template('stock_id.html', stock=stock)
# Renders create_stock.html
@app.route('/stock/cstock', methods=["GET"])
def get_create_stock():
return render_template("create_stock.html")
# Renders create_stock.html
@app.route('/portfolio/cportfolio', methods=["GET"])
def get_create_portfolio():
return render_template("create_portfolio.html")
# Creates a portfolio
@app.route('/portfolio', methods=["POST"])
def create_portfolio():
try:
portfolio = Portfolio(
owner=request.form['owner']
)
db.session.add(portfolio)
db.session.commit()
print("portfolio created!")
return redirect('/template')
except KeyError:
abort(400, "Incorrect Parameters!")
# Gets all portfolios
@app.route('/portfolio', methods=["GET"])
def view_portfolios():
portfolios = Portfolio.query.order_by(Portfolio.id.desc()).all()
return render_template('portfolios.html', portfolios=portfolios)
# Gets portfolio by stock id
@app.route('/portfolio/<int:portfolio_id>', methods=["GET"])
def view_portfolio(portfolio_id):
portfolio = Portfolio.query.filter_by(id=portfolio_id).first()
return render_template('portfolio_id.html', portfolio=portfolio)
# Allows a stock to be assigned to a portfolio
@app.route('/portfolio/psip/<int:stock_id>/<int:portfolio_id>',
methods=["POST"])
def put_stock_in_portfolio(stock_id, portfolio_id):
portfolio_rel = Portfolio.query.filter_by(id=portfolio_id).first()
stock_rel = Stock.query.filter_by(id=stock_id).first()
portfolio_rel.stocks_linked.append(stock_rel)
print("stock assigned to portfolio")
db.session.commit()
# return redirect('/template')
return render_template('portfolio_id.html', portfolio=portfolio_rel)
def generate_meme(file, meme_id):
# Query for meme
meme = Meme.query.filter(Meme.id == meme_id).first()
if meme is None:
abort(400, 'Meme does not exist.')
# Load template
template_file = os.path.join(
BASE_DIR, 'static', 'templates', meme.template)
if not os.path.exists(template_file):
abort(400, 'Template does not exist')
template = Image.open(template_file)
# Get Font Details
font, top_loc, bot_loc = calc_font_details(
meme.top_text, meme.bot_text, template.size)
draw = ImageDraw.Draw(template)
draw_text(draw, top_loc[0], top_loc[1], meme.top_text, font)
draw_text(draw, bot_loc[0], bot_loc[1], meme.bot_text, font)
template.save(file)
# Calculate font size and location
def calc_font_details(top, bot, img_size):
font_size = 50
font = ImageFont.truetype(FONT_PATH, font_size)
max_width = img_size[0] - 20
# Get ideal font size
while font.getsize(top)[0] > max_width or font.getsize(bot)[0] > max_width:
font_size = font_size - 1
font = ImageFont.truetype(FONT_PATH, font_size)
# Get font locations
top_loc = ((img_size[0] - font.getsize(top)[0])/2, -5)
bot_size = font.getsize(bot)
bot_loc = ((img_size[0] - bot_size[0])/2, img_size[1] - bot_size[1] - 5)
return font, top_loc, bot_loc
# Draws the given text with a border
def draw_text(draw, x, y, text, font):
# Draw border
draw.text((x-1, y-1), text, font=font, fill="black")
draw.text((x+1, y-1), text, font=font, fill="black")
draw.text((x-1, y+1), text, font=font, fill="black")
draw.text((x+1, y+1), text, font=font, fill="black")
# Draw text
draw.text((x, y), text, font=font, fill="white")
if __name__ == '__main__':
# Run dev server (for debugging only)
args = get_args()
app.run(host=args.host, port=args.port, debug=True)
|
the-stack_0_7417 | import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# from .utils import load_state_dict_from_url
from ib_layers import *
# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
# 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
# 'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
resnet_alpha = 1.0
def conv3x3(in_planes, out_planes, wib, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, wib, stride=1):
"""1x1 convolution"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=1, stride=stride, bias=False)
cfg = {
##### 括号里第一个数是卷积核个数;第二个数是卷积结构,1表示正常卷基层,2表示resnet的2层block结构,3表示resnet
##### 的三层Bottleneck结构;第三个数表示如果是resnet结构,第一个卷积层的stride
#resnet18 (2,2,2,2)
'G5': [(64, 1, 1, 1.0/32), ## InformationBottleneck
'M',
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(128, 2, 2, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(256, 2, 2, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(512, 2, 2, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
'A'],
#resnet34 (3,4,6,3)
'G1': [(64, 1, 1, 1.0/32), ## InformationBottleneck
'M',
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(64, 2, 1, 1.0/32), ## InformationBottleneck
(128, 2, 2, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(128, 2, 1, 1.0/16), ## InformationBottleneck
(256, 2, 2, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(256, 2, 1, 1.0/8), ## InformationBottleneck
(512, 2, 2, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
(512, 2, 1, 1.0/4), ## InformationBottleneck
'A'],
# resnet50 (3,4,6,3)
'G2': [(64, 1, 1, 1.0 / 32), ## InformationBottleneck
'M',
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(64, 3, 1, 1.0 / 32), ## InformationBottleneck
(128, 3, 2, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(128, 3, 1, 1.0 / 16), ## InformationBottleneck
(256, 3, 2, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(256, 3, 1, 1.0 / 8), ## InformationBottleneck
(512, 3, 2, 1.0 / 4), ## InformationBottleneck
(512, 3, 1, 1.0 / 4), ## InformationBottleneck
(512, 3, 1, 1.0 / 4), ## InformationBottleneck
'A']
}
def reparameterize(mu, logalpha):
std = logalpha.mul(0.5).exp_()
eps = torch.FloatTensor(std.size(0)).cuda(mu.get_device()).normal_()
eps = Variable(eps)
# phi = std * eps - std * std / 2
# return phi
phi = (std * eps - std * std / 2).exp_()
return phi * mu
# std = logalpha.mul(0.5).exp_()
# eps = torch.FloatTensor(std.size(0)).cuda(mu.get_device()).normal_()
# eps = Variable(eps)
# return mu + eps * std
class WeightIB(nn.Module):
def __init__(self, out_channels, init_mag=9, init_var=0.01):
super(WeightIB, self).__init__()
self.dim = out_channels
print(self.dim)
# self.phi = Parameter(torch.Tensor(self.dim))
self.logalpha = Parameter(torch.Tensor(self.dim))
self.mu = Parameter(torch.Tensor(self.dim))
self.epsilon = 1e-8
self.offset = 0.00
self.mu.data.normal_(1, init_var)
self.logalpha.data.normal_(-init_mag, init_var)
def forward(self, x, training=False):
if self.training:
# z_scale = reparameterize(self.mu, self.logalpha)
# z_scale_exp = z_scale.exp_()
# hard_mask, _ = self.get_mask_hard(self.epsilon)
# z_scale = z_scale_exp * Variable(hard_mask)
z_scale = reparameterize(self.mu, self.logalpha)
hard_mask, _ = self.get_mask_hard(self.epsilon)
z_scale *= Variable(hard_mask)
# print('self.mu: ', self.mu)
# print('z_scale1: ', z_scale)
# print('z_scale1: ', z_scale)
else:
# z_scale = reparameterize(self.mu, self.logalpha)
# z_scale_exp = z_scale.exp_()
z_scale = reparameterize(self.mu, self.logalpha)
hard_mask, _ = self.get_mask_hard(self.epsilon)
z_scale *= Variable(hard_mask)
# z_scale = Variable(self.get_mask_weighted(self.epsilon))
# print('z_scale2: ', z_scale)
# new_shape = self.adapt_shape(z_scale_exp.size(), x.size())
# return x * z_scale_exp.view(new_shape)
new_shape = self.adapt_shape(z_scale.size(), x.size())
return x * z_scale.view(new_shape)
def adapt_shape(self, src_shape, x_shape):
if len(src_shape) == 2:
new_shape = src_shape
# print('new_shape1: ',new_shape)
else:
new_shape = (src_shape[0], 1)
# print('new_shape2: ', new_shape)
if len(x_shape)>2:
new_shape = list(new_shape)
new_shape += [1 for i in range(len(x_shape)-2)]
# print('new_shape3: ', new_shape)
return new_shape
def get_mask_hard(self, threshold=0):
hard_mask = (self.mu.abs() > threshold).float()
prune = self.mu.abs().cpu() > threshold # e.g. [True, False, True, True, False]
mask = np.where(prune)[0] # e.g. [0, 2, 3]
return hard_mask, len(mask)
def get_mask_weighted(self, threshold=0):
mask = (self.mu.abs() > threshold).float() * self.mu.data.float()
return mask
def compute_Wib_upbound(self, logalpha):
return - 0.5 * logalpha.sum()
class WibConv2d(nn.Conv2d):
def __init__(self, alpha, **kwargs):
super(WibConv2d, self).__init__(**kwargs)
self.alpha = alpha
self.weight_ib = WeightIB(self.out_channels)
self.W = torch.empty(self.weight.data.size())
torch.nn.init.xavier_normal(self.W, gain=1)
def forward(self, x):
if self.training:
# kernel_in = self.weight.data
# self.W.data = self.weight_ib(self.weight, training=self.training)
# y = nn.functional.conv2d(x, self.W, self.bias, self.stride, self.padding, self.dilation, self.groups)
new_weight = self.weight_ib(self.weight, training=self.training)
y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# self.W.data= self.W
else:
# y = nn.functional.conv2d(x, self.W, self.bias, self.stride, self.padding, self.dilation, self.groups)
new_weight = self.weight_ib(self.weight, training=self.training)
y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# self.weight.data = self.W.data
# print('self.weight2: ', self.weight)
# new_weight = self.weight_ib(self.weight, training=self.training)
# y = nn.functional.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# y = nn.functional.conv2d(x, new_weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return y
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, wib=0, kl_mult=1):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
# self.wib=wib
self.conv1 = conv3x3(inplanes, planes, wib, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.ib1 = InformationBottleneck(planes, kl_mult=kl_mult)
self.conv2 = conv3x3(planes, planes, wib)
self.bn2 = norm_layer(planes)
self.ib2 = InformationBottleneck(planes, kl_mult=kl_mult)
self.downsample = downsample
self.stride = stride
def compute_Wib_upbound(self, ):
wib_upbound =0
wib_upbound += self.conv1.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
# 之前版本错了
# wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
# 正确版本
wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv2.weight_ib.logalpha)
return wib_upbound
def compute_compression_ratio(self, threshold, pre_mask, n=0):
# applicable for structures with global pooling before fc
total_params, pruned_params, remain_params = 0, 0, 0
fmap_size=32
out_channels1 = self.conv1.out_channels
out_channels2 = self.conv2.out_channels
in_channels1=self.conv1.in_channels
in_channels2 = self.conv2.in_channels
total_params = in_channels1 * out_channels1 * 9
total_params += in_channels2 * out_channels2 * 9
hard_mask1 = self.conv1.get_mask_hard(threshold)
hard_mask2 = self.conv2.get_mask_hard(threshold)
remain_params = pre_mask * hard_mask1 * 9
remain_params += hard_mask1 *hard_mask2 * 9
pruned_params = total_params - remain_params
flops = (fmap_size ** 2) * remain_params
# print('in_channels1: {}, in_channels2: {}, out_channels1:{}, out_channels2: {},'
# .format(in_channels1, in_channels2, out_channels1, out_channels2))
# print('pre_mask: {}, hard_mask1: {}, hard_mask2:{},'
# .format(pre_mask, hard_mask1, hard_mask2))
# print('total parameters: {}, pruned parameters: {}, remaining params:{}, remaining flops: {},'
# .format(total_params, pruned_params, remain_params, flops))
return total_params, pruned_params, remain_params, flops
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.ib1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ib2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, wib=0, kl_mult=1):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, wib)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, wib, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.ib2 = InformationBottleneck(width, kl_mult=kl_mult)
self.conv3 = conv1x1(width, planes * self.expansion, wib)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def compute_Wib_upbound(self, ):
wib_upbound =0
wib_upbound += self.conv1.weight_ib.compute_Wib_upbound(self.conv1.weight_ib.logalpha)
wib_upbound += self.conv2.weight_ib.compute_Wib_upbound(self.conv2.weight_ib.logalpha)
wib_upbound += self.conv3.weight_ib.compute_Wib_upbound(self.conv3.weight_ib.logalpha)
return wib_upbound
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.ib2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class RESNET_IB(nn.Module):
def __init__(self, block, config=None, mag=9, batch_norm=False, threshold=0,
init_var=0.01, sample_in_training=True, sample_in_testing=False, n_cls=10, no_ib=False, a=0.5, b=0.5,
###resnet 初始参数
zero_init_residual=False, wib=1,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None
):
super(RESNET_IB, self).__init__()
self.expansion = block.expansion
### resnet 初始化
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
# self.layers = layers
self.wib = wib
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.init_mag = mag
self.threshold = threshold
self.config = config
self.init_var = init_var
self.sample_in_training = sample_in_training
self.sample_in_testing = sample_in_testing
self.no_ib = no_ib
self.a = a
self.b = b
print('Using structure1 {}'.format(cfg[config]))
self.conv_layers, conv_kl_list = self.make_conv_layers(cfg[config], batch_norm, block)
print('Using structure {}'.format(cfg[config]))
# print('conv_layers {}'.format(self.conv_layers))
print('conv_layers {}'.format(self.conv_layers))
print('conv_kl_list {}'.format(conv_kl_list))
# self.compute_Wib_upbound()
fc_ib1 = InformationBottleneck(512*block.expansion, mask_thresh=threshold, init_mag=self.init_mag, init_var=self.init_var,
sample_in_training=sample_in_training, sample_in_testing=sample_in_testing,a=self.a,b=self.b)
fc_ib2 = InformationBottleneck(512*block.expansion, mask_thresh=threshold, init_mag=self.init_mag, init_var=self.init_var,
sample_in_training=sample_in_training, sample_in_testing=sample_in_testing,a=self.a,b=self.b)
self.n_cls = n_cls
# self.n = 2048
# self.n = 4096
self.n = 1024
if self.config in ['G1', 'D6']:
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512*block.expansion, self.n_cls))
self.kl_list = conv_kl_list
#resnet32
init_kl_list = [64, 64, 64, 64, 64, 64, 64,
128, 128, 128, 128, 128, 128, 128, 128,
256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256,
512, 512, 512, 512, 512, 512]
self.init_kl_list = [x / self.n for x in init_kl_list]
# resnet32
kl_mult_temp = [64, 64, 64, 64, 64, 64, 64,
128, 128, 128, 128, 128, 128, 128, 128,
256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256,
512, 512, 512, 512, 512, 512]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
elif self.config == 'G2':
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512 * block.expansion, self.n_cls))
self.kl_list = conv_kl_list
# resnet50
init_kl_list = [64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256, 256, 256,
512, 512, 512]
# init_kl_list = [256, 256, 256, 256,
# 256, 256, 256, 256,
# 256, 256, 256, 256, 256, 256,
# 256, 256, 256]
self.init_kl_list = [x / self.n for x in init_kl_list]
# resnet50
kl_mult_temp = [64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256, 256, 256,
512, 512, 512]
# kl_mult_temp = [256, 256, 256, 256,
# 256, 256, 256, 256,
# 256, 256, 256, 256, 256, 256,
# 256, 256, 256]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1, 1, 1,
1, 1, 1]
_, self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_, self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
elif self.config == 'G5':
# t3p3 t4p2
self.fc_layers = nn.Sequential(nn.Linear(512*block.expansion, self.n_cls))
self.kl_list = conv_kl_list
init_kl_list = [64,
64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256,
512, 512, 512, 512]
self.init_kl_list = [x / self.n for x in init_kl_list]
kl_mult_temp = [64,
64, 64, 64, 64,
128, 128, 128, 128,
256, 256, 256, 256,
512, 512, 512, 512]
self.kl_mult_temp = [x / self.n for x in kl_mult_temp]
self.ratio = [1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
else:
# D4 t3p1
fc_layer_list = [nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, self.n_cls)] if no_ib else \
[nn.Linear(512, 512), nn.ReLU(), fc_ib1, nn.Linear(512, 512), nn.ReLU(), fc_ib2, nn.Linear(512, self.n_cls)]
self.fc_layers = nn.Sequential(*fc_layer_list)
self.kl_list = conv_kl_list + [fc_ib1, fc_ib2]
self.init_kl_list = [1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 1, 1]
self.kl_mult_temp = [1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 1, 1]
_,self.last_prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
_,self.pruned_structure = self.get_masks(hard_mask=True, threshold=threshold)
print(self.kl_mult_temp)
print(self.init_kl_list)
### resnet 初始化
for m in self.modules():
if isinstance(m, nn.Conv2d):
print('ok1')
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, WibConv2d):
# print('ok2')
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
### resnet的make_layer函数
def _make_layer(self, block, planes, blocks=1, stride=1, dilate=False, kl_mult=1 ):
norm_layer = self._norm_layer
wib = self.wib
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, wib, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, wib, kl_mult))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, wib = wib, kl_mult = kl_mult))
return nn.Sequential(*layers)
def make_conv_layers(self, config, batch_norm, block, blocks=1, dilate=False):
layers, kl_list = [], []
in_channels = 3
for v in config:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
elif v == 'A':
# layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
layers += [nn.AdaptiveAvgPool2d((1, 1))]
else:
##判断是第一个卷积层,还是block模块
if v[1]==1:#第一个卷积层,按照vgg类似操作构建信息瓶颈层
# conv2d = nn.Conv2d(in_channels, v[0], kernel_size=7, stride=2, padding=3, bias=False)
# conv2d = nn.Conv2d(in_channels, v[0], kernel_size=3, stride=1, padding=1, bias=False)
conv2d = conv3x3(3, v[0], stride=1, wib=self.wib)
ib = InformationBottleneck(v[0], mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v[0]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
if not self.no_ib:
layers.append(ib)
kl_list.append(ib)
if v[1]==2:#属于resnet的BasicBlock模块,调用resnet的make_layer函数
resblock = self._make_layer(block, v[0], stride=v[2], kl_mult=v[3])
layers += [resblock]
kl_list.append(resblock[0].ib1)
kl_list.append(resblock[0].ib2)
ib = InformationBottleneck(v[0]*block.expansion, mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
# if not self.no_ib:
# layers.append(ib)
# kl_list.append(ib)
if v[1]==3:#属于resnet的Bottleneck模块,调用resnet的make_layer函数
resblock = self._make_layer(block, v[0], stride=v[2], kl_mult=v[3])
layers += [resblock]
kl_list.append(resblock[0].ib2)
# ib = InformationBottleneck(v[0]*block.expansion, mask_thresh=self.threshold, init_mag=self.init_mag, init_var=self.init_var,
# kl_mult=v[3], sample_in_training=self.sample_in_training, sample_in_testing=self.sample_in_testing,a=self.a,b=self.b)
in_channels = v[0]
# if not self.no_ib:
# layers.append(ib)
# kl_list.append(ib)
return nn.Sequential(*layers), kl_list
def auto_kl_mult(self):
# _, prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp=[]
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes)-1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0]=conv_shapes[0]/self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
# print('prune_stat: {}, last_prune_stat:{}'.format(prune_stat, self.last_prune_stat))
remain_stat = [out_channels - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
init_stat = [out_channels for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
sum = 0
# a=32
for i in range(len(init_stat)):
a = init_stat[i]/2
self.ratio[i] = remain_stat[i] / init_stat[i]
# sum = sum + self.ratio[i]
sum = sum + math.tan(math.pi*(a-1)/a/2*self.ratio[i])
# offset = 1 / len(self.init_kl_list)
b = 1.2
c= 0.01
# conv_kl_mult = 4
for i in range(len(self.init_kl_list)):
a=init_stat[i]/2
temp1 = len(self.init_kl_list)/2 - abs(i-len(self.init_kl_list)/2)
max1 = len(self.init_kl_list)/2
temp2 = remain_stat[i]
max2 = max(remain_stat)
# print('i:')
# print('(a-1)/a/2:',(a-1)/a/2)
# print('self.ratio[i]:', self.ratio[i])
# print('math.pi*(a-1)/a/2*self.ratio[i]:', math.pi*(a-1)/a/2*self.ratio[i])
# self.kl_list[i].kl_mult = self.init_kl_list[i] * (
# 1 + b* math.log(temp2,2)/math.log(max2,2)*
# (math.log(1 + temp1, 2) / math.log(1 + max1, 2)) *
# (math.tan(math.pi*(a-1)/a/2*self.ratio[i]) / sum) * len(self.init_kl_list))
if temp2==0:
self.kl_list[i].kl_mult=0
self.kl_mult_temp[i]=0
else:
self.kl_list[i].kl_mult = self.init_kl_list[i] * (
2* b* math.log(2+temp2,2)/math.log(max2,2)*
(math.log(1 + temp1, 2) / math.log(2 + max1, 2)) *
(math.tan(math.pi*(a-1)/a/2*self.ratio[i]) / sum) * len(self.init_kl_list))
self.kl_mult_temp[i] = self.kl_list[i].kl_mult / self.init_kl_list[i]
# print('conv_kl_mult:',conv_kl_mult)
print(b)
print(self.ratio)
print(self.init_kl_list)
print(self.kl_mult_temp)
def adapt_dropout(self, p):
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp = []
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes) - 1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0] = conv_shapes[0] / self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
remain_stat = [out_channels - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
for i in range(len(self.init_kl_list)):
if remain_stat[i] < 150:
# if remain_stat[i] < 200:
# if remain_stat[i] < 120:
# self.kl_list[i].p=1
self.kl_list[i].p = 1
else:
# self.kl_list[i].p = 1.0-1.0/remain_stat[i]
# 原设置
# self.kl_list[i].p = 0.99
self.kl_list[i].p = 1
print(i,self.kl_list[i].p)
def forward(self, x):
batch_size = x.size(0)
x = self.conv_layers(x).view(batch_size, -1)
x = self.fc_layers(x)
if self.training:
if self.no_ib:
# return x
if not self.wib:
return x
else:
Wib_upbound = self.compute_Wib_upbound()
return x, Wib_upbound
else:
if not self.wib:
ib_kld = self.kl_list[0].kld
for ib in self.kl_list[1:]:
ib_kld += ib.kld
return x, ib_kld.float()
else:
ib_kld = self.kl_list[0].kld
for ib in self.kl_list[1:]:
ib_kld += ib.kld
Wib_upbound = self.compute_Wib_upbound()
return x, ib_kld.float(), Wib_upbound
else:
return x
def get_masks(self, hard_mask=True, threshold=0):
masks = []
if hard_mask:
masks = [ib_layer.get_mask_hard(threshold) for ib_layer in self.kl_list]
return masks, [np.sum(mask.cpu().numpy()==0) for mask in masks]
else:
masks = [ib_layer.get_mask_weighted(threshold) for ib_layer in self.kl_list]
return masks
def compute_Wib_upbound(self,):
Wib_upbound = 0
offset=0
interval=0
Wib_upbound += self.conv_layers[0].weight_ib.compute_Wib_upbound(self.conv_layers[0].weight_ib.logalpha)
# print('conv_layers: {}'.format(self.conv_layers))
if not self.no_ib:
offset=5
interval=0
else:
offset=4
interval=1
for i in range(8):
# print('self.conv_layers[5+i*2]: {}'.format(self.conv_layers[5+i*2]))
block=self.conv_layers[offset+i*(2-interval)]
# print('block: {}'.format(block[0]))
Wib_upbound += block[0].compute_Wib_upbound()
return Wib_upbound
def print_params(self,):
mu = []
logalpha = []
weight = []
weight += [self.conv_layers[0].weight]
offset = 0
interval = 0
if not self.no_ib:
offset=5
interval=0
else:
offset=4
interval=1
# print('weight: {}'.format(weight))
if self.wib:
mu += [self.conv_layers[0].weight_ib.mu]
logalpha += [self.conv_layers[0].weight_ib.logalpha]
mask_w,_= self.conv_layers[0].weight_ib.get_mask_hard(self.conv_layers[0].weight_ib.epsilon)
if not self.no_ib:
mask_a = self.kl_list[0].get_mask_hard()
mask_dert = mask_w - mask_a
print('mask_w: {}'.format(mask_w))
if not self.no_ib:
print('mask_a: {}'.format(mask_a))
print('mask_dert: {}'.format(mask_dert))
print('mu: {}, logalpha: {}'.format(mu, logalpha))
for i in range(8):
# print('self.conv_layers[5+i*2]: {}'.format(self.conv_layers[5+i*2]))
block = self.conv_layers[offset + i * (2 - interval)]
# block=self.conv_layers[5+i*2]
# print('block: {}'.format(block[0]))
mu += [block[0].conv1.weight_ib.mu]
mu += [block[0].conv2.weight_ib.mu]
logalpha += [block[0].conv1.weight_ib.logalpha]
logalpha += [block[0].conv2.weight_ib.logalpha]
# mu = [ib_layer.post_z_mu for ib_layer in self.kl_list]
# logalpha = [ib_layer.post_z_logD for ib_layer in self.kl_list]
# print('mu: {}, logalpha: {}'.format(mu, logalpha))
def print_compression_ratio(self, threshold, writer=None, epoch=-1):
# applicable for structures with global pooling before fc
_, prune_stat = self.get_masks(hard_mask=True, threshold=threshold)
# conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
if self.config in ['G', 'D6']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
fc_shapes = [512]
elif self.config in ['G5', 'G1']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
conv_shapes_temp = []
conv_shapes_temp += [conv_shapes[0]]
for i in range(len(conv_shapes) - 1):
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes_temp += [conv_shapes[i + 1]]
conv_shapes = conv_shapes_temp
fc_shapes = []
elif self.config in ['G2']:
conv_shapes = [v[0] for v in cfg[self.config] if type(v) is not str]
# conv_shapes[0] = conv_shapes[0] / self.expansion
fc_shapes = []
else:
fc_shapes = [512, 512]
# print('prune_stat: {}, last_prune_stat:{}'.format(prune_stat, self.last_prune_stat))
self.pruned_structure = [prune_stat[idx] - self.last_prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
self.last_prune_stat = [prune_stat[idx] for idx, out_channels in enumerate(conv_shapes + fc_shapes)]
net_shape = [ out_channels-prune_stat[idx] for idx, out_channels in enumerate(conv_shapes+fc_shapes)]
#conv_shape_with_pool = [v[0] if v != 'M' else 'M' for v in cfg[self.config]]
current_n, hdim, last_channels, flops, fmap_size = 0, 64, 3, 0, 32
for n, pruned_channels in enumerate(prune_stat):
if n < len(conv_shapes):
# current_channels = cfg[self.config][current_n][0] - pruned_channels
current_channels = conv_shapes[current_n] - pruned_channels
flops += (fmap_size**2) * 9 * last_channels * current_channels
last_channels = current_channels
current_n += 1
if self.config in ['G1']:
if current_n==1 or current_n==8 or current_n==16 or current_n==28 or current_n==33:
fmap_size /= 2
hdim *= 2
if self.config in ['G5']:
if current_n==1 or current_n==6 or current_n==10 or current_n==14 or current_n==17:
fmap_size /= 2
hdim *= 2
# if type(cfg[self.config][current_n]) is str:
# current_n += 1
# fmap_size /= 2
# hdim *= 2
else:
current_channels = 512 - pruned_channels
flops += last_channels * current_channels
last_channels = current_channels
flops += last_channels * self.n_cls
total_params, pruned_params, remain_params = 0, 0, 0
# total number of conv params
in_channels, in_pruned = 3, 0
for n, n_out in enumerate(conv_shapes):
n_params = in_channels * n_out * 9
total_params += n_params
n_remain = (in_channels - in_pruned) * (n_out - prune_stat[n]) * 9
remain_params += n_remain
pruned_params += n_params - n_remain
in_channels = n_out
in_pruned = prune_stat[n]
# print('n_params: {}, n_remain: {}, in_channels:{}, in_pruned:{}, n_out: {}, prune_stat: {},'.format(n_params, n_remain, in_channels, in_pruned, n_out, prune_stat))
# fc layers
offset = len(prune_stat) - len(fc_shapes)
for n, n_out in enumerate(fc_shapes):
n_params = in_channels * n_out
total_params += n_params
n_remain = (in_channels - in_pruned) * (n_out - prune_stat[n+offset])
remain_params += n_remain
pruned_params += n_params - n_remain
in_channels = n_out
in_pruned = prune_stat[n+offset]
# print('n_params: {}, n_remain: {}, in_channels:{}, in_pruned:{}, n_out: {}, prune_stat: {},'.format(n_params, n_remain, in_channels, in_pruned, n_out, prune_stat))
total_params += in_channels * self.n_cls
remain_params += (in_channels - in_pruned) * self.n_cls
pruned_params += in_pruned * self.n_cls
self.print_params()
print('total parameters: {}, pruned parameters: {}, remaining params:{}, remain/total params:{}, remaining flops: {}, remaining flops/params: {},'
'each layer pruned: {}, this epoch each layer pruned: {}, remaining structure:{}'.format(total_params, pruned_params, remain_params,
float(total_params-pruned_params)/total_params, flops, 0.0000000001 * flops/(float(total_params-pruned_params)/total_params), prune_stat, self.pruned_structure, net_shape))
if writer is not None:
writer.add_scalar('flops', flops, epoch)
writer.add_scalar('remain/total params', float(total_params-pruned_params)/total_params, epoch)
writer.add_scalar('flops/remaining params', 0.0000000001 * flops/(float(total_params-pruned_params)/total_params), epoch)
|
the-stack_0_7418 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
This is your result for task 1:
mAP: 0.7066194189913816
ap of each class:
plane:0.8905480010393588,
baseball-diamond:0.7845764249543027,
bridge:0.4415489914209597,
ground-track-field:0.6515721505439082,
small-vehicle:0.7509226622459368,
large-vehicle:0.7288453788151275,
ship:0.8604046905135039,
tennis-court:0.9082569687774237,
basketball-court:0.8141347275878138,
storage-tank:0.8253027715641935,
soccer-ball-field:0.5623560181901192,
roundabout:0.6100656068973895,
harbor:0.5648618127447264,
swimming-pool:0.6767393616949172,
helicopter:0.5291557178810407
The submitted information is :
Description: RetinaNet_DOTA_R3Det_2x_20191108_70.2w
Username: SJTU-Det
Institute: SJTU
Emailadress: [email protected]
TeamMembers: yangxue
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_R3Det_2x_20191108'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 27000 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
USE_IOU_FACTOR = False
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'DOTA' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
NUM_SUBNET_CONV = 4
NUM_REFINE_STAGE = 1
USE_RELU = False
FPN_CHANNEL = 256
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
REFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]
REFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
|
the-stack_0_7419 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import time
from typing import Any, Dict, Optional
import requests
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.plexus.hooks.plexus import PlexusHook
logger = logging.getLogger(__name__)
class PlexusJobOperator(BaseOperator):
"""
Submits a Plexus job.
:param job_params: parameters required to launch a job.
:type job_params: dict
Required job parameters are the following
- "name": job name created by user.
- "app": name of the application to run. found in Plexus UI.
- "queue": public cluster name. found in Plexus UI.
- "num_nodes": number of nodes.
- "num_cores": number of cores per node.
"""
def __init__(self, job_params: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.job_params = job_params
self.required_params = {"name", "app", "queue", "num_cores", "num_nodes"}
self.lookups = {
"app": ("apps/", "id", "name"),
"billing_account_id": ("users/{}/billingaccounts/", "id", None),
"queue": ("queues/", "id", "public_name"),
}
self.job_params.update({"billing_account_id": None})
self.is_service = None
def execute(self, context: Any) -> Any:
hook = PlexusHook()
params = self.construct_job_params(hook)
if self.is_service is True:
if self.job_params.get("expected_runtime") is None:
end_state = "Running"
else:
end_state = "Finished"
elif self.is_service is False:
end_state = "Completed"
else:
raise AirflowException(
"Unable to determine if application "
"is running as a batch job or service. "
"Contact Core Scientific AI Team."
)
logger.info("creating job w/ following params: %s", params)
jobs_endpoint = hook.host + "jobs/"
headers = {"Authorization": f"Bearer {hook.token}"}
create_job = requests.post(jobs_endpoint, headers=headers, data=params, timeout=5)
if create_job.ok:
job = create_job.json()
jid = job["id"]
state = job["last_state"]
while state != end_state:
time.sleep(3)
jid_endpoint = jobs_endpoint + f"{jid}/"
get_job = requests.get(jid_endpoint, headers=headers, timeout=5)
if not get_job.ok:
raise AirflowException(
"Could not retrieve job status. "
f"Status Code: [{get_job.status_code}]. Reason: {get_job.reason} - {get_job.text}"
)
new_state = get_job.json()["last_state"]
if new_state in ("Cancelled", "Failed"):
raise AirflowException(f"Job {new_state}")
elif new_state != state:
logger.info("job is %s", new_state)
state = new_state
else:
raise AirflowException(
"Could not start job. "
f"Status Code: [{create_job.status_code}]. Reason: {create_job.reason} - {create_job.text}"
)
def _api_lookup(self, param: str, hook):
lookup = self.lookups[param]
key = lookup[1]
mapping = None if lookup[2] is None else (lookup[2], self.job_params[param])
if param == "billing_account_id":
endpoint = hook.host + lookup[0].format(hook.user_id)
else:
endpoint = hook.host + lookup[0]
headers = {"Authorization": f"Bearer {hook.token}"}
response = requests.get(endpoint, headers=headers, timeout=5)
results = response.json()["results"]
v = None
if mapping is None:
v = results[0][key]
else:
for dct in results:
if dct[mapping[0]] == mapping[1]:
v = dct[key]
if param == 'app':
self.is_service = dct['is_service']
if v is None:
raise AirflowException(f"Could not locate value for param:{key} at endpoint: {endpoint}")
return v
def construct_job_params(self, hook: Any) -> Dict[Any, Optional[Any]]:
"""
Creates job_params dict for api call to
launch a Plexus job.
Some parameters required to launch a job
are not available to the user in the Plexus
UI. For example, an app id is required, but
only the app name is provided in the UI.
This function acts as a backend lookup
of the required param value using the
user-provided value.
:param hook: plexus hook object
:type hook: airflow hook
"""
missing_params = self.required_params - set(self.job_params)
if len(missing_params) > 0:
raise AirflowException(f"Missing the following required job_params: {', '.join(missing_params)}")
params = {}
for prm in self.job_params:
if prm in self.lookups:
v = self._api_lookup(param=prm, hook=hook)
params[prm] = v
else:
params[prm] = self.job_params[prm]
return params
|
the-stack_0_7420 | from openshift import Openshift
from command import Command
import re
import requests
import time
class NodeJSApp(object):
nodesj_app_image = "quay.io/pmacik/nodejs-rest-http-crud"
api_end_point = 'http://{route_url}/api/status/dbNameCM'
openshift = Openshift()
pod_name_pattern = "{name}.*$(?<!-build)"
name = ""
namespace = ""
def __init__(self, name, namespace):
self.cmd = Command()
self.name = name
self.namespace = namespace
def is_running(self, wait=False):
deployment_flag = False
if wait:
pod_name = self.openshift.wait_for_pod(self.get_pod_name_pattern(), self.namespace, timeout=180)
else:
pod_name = self.openshift.search_pod_in_namespace(self.get_pod_name_pattern(), self.namespace)
if pod_name is not None:
application_pod_status = self.openshift.check_pod_status(pod_name, self.namespace, wait_for_status="Running")
print("The pod {} is running: {}".format(pod_name, application_pod_status))
deployment = self.openshift.search_resource_in_namespace("deployments", f"{self.name}.*", self.namespace)
if deployment is not None:
print("deployment is {}".format(deployment))
deployment_flag = True
if application_pod_status and deployment_flag:
return True
else:
return False
else:
return False
def install(self):
create_new_app_output, exit_code = self.cmd.run(f"oc new-app --docker-image={self.nodesj_app_image} --name={self.name} -n {self.namespace}")
assert exit_code == 0, f"Non-zero exit code ({exit_code}) returned when attempting to create a new app: {create_new_app_output}"
assert re.search(f'imagestream.image.openshift.io.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create imagestream: {create_new_app_output}"
assert re.search(f'deployment.apps.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create deployment: {create_new_app_output}"
assert re.search(f'service.*{self.name}.*created',
create_new_app_output) is not None, f"Unable to create service: {create_new_app_output}"
assert self.openshift.expose_service_route(self.name, self.namespace) is not None, "Unable to expose service route"
return self.is_running(wait=True)
def get_db_name_from_api(self, interval=5, timeout=60):
route_url = self.openshift.get_route_host(self.name, self.namespace)
if route_url is None:
return None
start = 0
while ((start + interval) <= timeout):
db_name = requests.get(url=self.api_end_point.format(route_url=route_url))
if db_name.status_code == 200:
return db_name.text
time.sleep(interval)
start += interval
return None
def get_observed_generation(self):
return self.openshift.get_resource_info_by_jsonpath("deployment", self.name, self.namespace, "{.status.observedGeneration}")
def get_running_pod_name(self, interval=5, timeout=60):
start = 0
while ((start + interval) <= timeout):
pod_list = self.openshift.get_pod_lst(self.namespace)
for pod in pod_list.split(" "):
if re.fullmatch(self.get_pod_name_pattern(), pod):
if self.openshift.get_pod_status(pod, self.namespace) == "Running":
return pod
time.sleep(interval)
start += interval
return None
def get_redeployed_pod_name(self, old_pod_name, interval=5, timeout=60):
start = 0
while ((start + interval) <= timeout):
pod_list = self.openshift.get_pod_lst(self.namespace)
for pod in pod_list.split(" "):
if pod != old_pod_name and re.fullmatch(self.get_pod_name_pattern(), pod):
if self.openshift.get_pod_status(pod, self.namespace) == "Running":
return pod
time.sleep(interval)
start += interval
return None
def get_pod_name_pattern(self):
return self.pod_name_pattern.format(name=self.name)
|
the-stack_0_7421 | import logging
import torch
import torch.nn.functional as F
from torch import nn
from predict_pv_yield.models.base_model import BaseModel
logging.basicConfig()
_LOG = logging.getLogger("predict_pv_yield")
class Model(BaseModel):
name = "conv3d_sat_nwp"
def __init__(
self,
include_pv_yield: bool = True,
include_nwp: bool = True,
include_time: bool = True,
forecast_minutes: int = 30,
history_minutes: int = 60,
number_of_conv3d_layers: int = 4,
conv3d_channels: int = 32,
image_size_pixels: int = 64,
number_sat_channels: int = 12,
number_nwp_channels: int = 10,
fc1_output_features: int = 128,
fc2_output_features: int = 128,
fc3_output_features: int = 64,
output_variable: str = "pv_yield",
):
"""
3d conv model, that takes in different data streams
architecture is roughly
1. satellite image time series goes into many 3d convolution layers.
2. nwp time series goes into many 3d convolution layers.
3. Final convolutional layer goes to full connected layer. This is joined by other data inputs like
- pv yield
- time variables
Then there ~4 fully connected layers which end up forecasting the pv yield / gsp into the future
include_pv_yield: include pv yield data
include_nwp: include nwp data
include_time: include hour of data, and day of year as sin and cos components
forecast_len: the amount of minutes that should be forecasted
history_len: the amount of historical minutes that are used
number_of_conv3d_layers, number of convolution 3d layers that are use
conv3d_channels, the amount of convolution 3d channels
image_size_pixels: the input satellite image size
number_sat_channels: number of nwp channels
fc1_output_features: number of fully connected outputs nodes out of the the first fully connected layer
fc2_output_features: number of fully connected outputs nodes out of the the second fully connected layer
fc3_output_features: number of fully connected outputs nodes out of the the third fully connected layer
output_variable: the output variable to be predicted
number_nwp_channels: The number of nwp channels there are
"""
self.include_pv_yield = include_pv_yield
self.include_nwp = include_nwp
self.include_time = include_time
self.number_of_conv3d_layers = number_of_conv3d_layers
self.number_of_nwp_features = 128
self.fc1_output_features = fc1_output_features
self.fc2_output_features = fc2_output_features
self.fc3_output_features = fc3_output_features
self.forecast_minutes = forecast_minutes
self.history_minutes = history_minutes
self.output_variable = output_variable
self.number_nwp_channels = number_nwp_channels
super().__init__()
conv3d_channels = conv3d_channels
self.cnn_output_size = (
conv3d_channels
* ((image_size_pixels - 2 * self.number_of_conv3d_layers) ** 2)
* (self.forecast_len_5 + self.history_len_5 + 1 - 2 * self.number_of_conv3d_layers)
)
# conv0
self.sat_conv0 = nn.Conv3d(
in_channels=number_sat_channels,
out_channels=conv3d_channels,
kernel_size=(3, 3, 3),
padding=0,
)
for i in range(0, self.number_of_conv3d_layers - 1):
layer = nn.Conv3d(
in_channels=conv3d_channels, out_channels=conv3d_channels, kernel_size=(3, 3, 3), padding=0
)
setattr(self, f"sat_conv{i + 1}", layer)
self.fc1 = nn.Linear(in_features=self.cnn_output_size, out_features=self.fc1_output_features)
self.fc2 = nn.Linear(in_features=self.fc1_output_features, out_features=self.fc2_output_features)
# nwp
if include_nwp:
self.nwp_conv0 = nn.Conv3d(
in_channels=number_nwp_channels,
out_channels=conv3d_channels,
kernel_size=(3, 3, 3),
padding=0,
)
for i in range(0, self.number_of_conv3d_layers - 1):
layer = nn.Conv3d(
in_channels=conv3d_channels, out_channels=conv3d_channels, kernel_size=(3, 3, 3), padding=0
)
setattr(self, f"nwp_conv{i + 1}", layer)
self.nwp_fc1 = nn.Linear(in_features=self.cnn_output_size, out_features=self.fc1_output_features)
self.nwp_fc2 = nn.Linear(in_features=self.fc1_output_features, out_features=self.number_of_nwp_features)
fc3_in_features = self.fc2_output_features
if include_pv_yield:
fc3_in_features += self.number_of_samples_per_batch * (self.history_len_30 + 1)
if include_nwp:
fc3_in_features += 128
if include_time:
fc3_in_features += 4
self.fc3 = nn.Linear(in_features=fc3_in_features, out_features=self.fc3_output_features)
self.fc4 = nn.Linear(in_features=self.fc3_output_features, out_features=self.forecast_len)
# self.fc5 = nn.Linear(in_features=32, out_features=8)
# self.fc6 = nn.Linear(in_features=8, out_features=1)
def forward(self, x):
# ******************* Satellite imagery *************************
# Shape: batch_size, seq_length, width, height, channel
sat_data = x["sat_data"]
batch_size, seq_len, width, height, n_chans = sat_data.shape
# Conv3d expects channels to be the 2nd dim, https://pytorch.org/docs/stable/generated/torch.nn.Conv3d.html
sat_data = sat_data.permute(0, 4, 1, 3, 2)
# Now shape: batch_size, n_chans, seq_len, height, width
# :) Pass data through the network :)
out = F.relu(self.sat_conv0(sat_data))
for i in range(0, self.number_of_conv3d_layers - 1):
layer = getattr(self, f"sat_conv{i + 1}")
out = F.relu(layer(out))
out = out.reshape(batch_size, self.cnn_output_size)
# Fully connected layers
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
# which has shape (batch_size, 128)
# add pv yield
if self.include_pv_yield:
pv_yield_history = x[self.output_variable][:, : self.history_len_30 + 1].nan_to_num(nan=0.0)
pv_yield_history = pv_yield_history.reshape(
pv_yield_history.shape[0], pv_yield_history.shape[1] * pv_yield_history.shape[2]
)
# join up
out = torch.cat((out, pv_yield_history), dim=1)
# *********************** NWP Data ************************************
if self.include_nwp:
# Shape: batch_size, channel, seq_length, width, height
nwp_data = x["nwp"]
out_nwp = F.relu(self.nwp_conv0(nwp_data))
for i in range(0, self.number_of_conv3d_layers - 1):
layer = getattr(self, f"nwp_conv{i + 1}")
out_nwp = F.relu(layer(out_nwp))
# fully connected layers
out_nwp = out_nwp.reshape(batch_size, self.cnn_output_size)
out_nwp = F.relu(self.nwp_fc1(out_nwp))
out_nwp = F.relu(self.nwp_fc2(out_nwp))
# join with other FC layer
out = torch.cat((out, out_nwp), dim=1)
# ########## include time variables #########
if self.include_time:
# just take the value now
x_sin_hour = x["hour_of_day_sin"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_cos_hour = x["hour_of_day_cos"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_sin_day = x["day_of_year_sin"][:, self.history_len_5 + 1].unsqueeze(dim=1)
x_cos_day = x["day_of_year_cos"][:, self.history_len_5 + 1].unsqueeze(dim=1)
# join up
out = torch.cat((out, x_sin_hour, x_cos_hour, x_sin_day, x_cos_day), dim=1)
# Fully connected layers.
out = F.relu(self.fc3(out))
out = self.fc4(out)
out = out.reshape(batch_size, self.forecast_len)
return out
|
the-stack_0_7422 | from ffai.web.api import *
import numpy as np
import time
class MyRandomBot(Agent):
def __init__(self, name):
super().__init__(name)
self.my_team = None
self.actions_taken = 0
def new_game(self, game, team):
self.my_team = team
self.actions_taken = 0
def act(self, game):
while True:
action_choice = np.random.choice(game.state.available_actions)
if action_choice.action_type != ActionType.PLACE_PLAYER:
break
pos = np.random.choice(action_choice.positions) if len(action_choice.positions) > 0 else None
player = np.random.choice(action_choice.players) if len(action_choice.players) > 0 else None
action = Action(action_choice.action_type, pos=pos, player=player)
self.actions_taken += 1
return action
def end_game(self, game):
winner = game.get_winning_team()
print("Casualties: ", game.num_casualties())
if winner is None:
print("It's a draw")
elif winner == self.my_team:
print("I ({}) won".format(self.name))
else:
print("I ({}) lost".format(self.name))
print("I took", self.actions_taken, "actions")
if __name__ == "__main__":
# Load configurations, rules, arena and teams
config = get_config("ff-11.json")
#config.competition_mode = False
# config = get_config("ff-7.json")
# config = get_config("ff-5.json")
# config = get_config("ff-3.json")
ruleset = get_rule_set(config.ruleset, all_rules=False) # We don't need all the rules
arena = get_arena(config.arena)
home = get_team_by_id("human-1", ruleset)
away = get_team_by_id("human-2", ruleset)
# Play 100 games
for i in range(10):
away_agent = MyRandomBot("Random Bot 1")
home_agent = MyRandomBot("Random Bot 2")
config.debug_mode = False
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i+1))
start = time.time()
game.init()
game.step()
end = time.time()
print(end - start)
|
the-stack_0_7424 | # -*- encoding: utf-8 -*-
import builtins
import unittest
import unittest.mock
import pytest
from common.utils.backend import Backend
class BackendStub(Backend):
def __init__(self):
self.__class__ = Backend
def setup_load_model_mocks(openMock, pickleLoadMock, seed, idx, budget):
model_path = "/runs/%s_%s_%s/%s.%s.%s.model" % (seed, idx, budget, seed, idx, budget)
file_handler = "file_handler"
expected_model = "model"
fileMock = unittest.mock.MagicMock()
fileMock.__enter__.return_value = file_handler
openMock.side_effect = (
lambda path, flag: fileMock if path == model_path and flag == "rb" else None
)
pickleLoadMock.side_effect = lambda fh: expected_model if fh == file_handler else None
return expected_model
@pytest.fixture
def backend_stub():
backend = BackendStub()
backend.internals_directory = "/"
return backend
@unittest.mock.patch("pickle.load")
@unittest.mock.patch("os.path.exists")
def test_load_model_by_seed_and_id(exists_mock, pickleLoadMock, backend_stub):
exists_mock.return_value = False
open_mock = unittest.mock.mock_open(read_data="Data")
with unittest.mock.patch(
"common.utils.backend.open",
open_mock,
create=True,
):
seed = 13
idx = 17
budget = 50.0
expected_model = setup_load_model_mocks(
open_mock,
pickleLoadMock,
seed,
idx,
budget,
)
actual_model = backend_stub.load_model_by_seed_and_id_and_budget(seed, idx, budget)
assert expected_model == actual_model
@unittest.mock.patch("pickle.load")
@unittest.mock.patch.object(builtins, "open")
@unittest.mock.patch("os.path.exists")
def test_loads_models_by_identifiers(exists_mock, openMock, pickleLoadMock, backend_stub):
exists_mock.return_value = True
seed = 13
idx = 17
budget = 50.0
expected_model = setup_load_model_mocks(openMock, pickleLoadMock, seed, idx, budget)
expected_dict = {(seed, idx, budget): expected_model}
actual_dict = backend_stub.load_models_by_identifiers([(seed, idx, budget)])
assert isinstance(actual_dict, dict)
assert expected_dict == actual_dict
|
the-stack_0_7425 | import os
import pypandoc
import json
import yaml
import re
import datetime
from .config import Config
from pypandoc.pandoc_download import download_pandoc
from pylatex import Document, Description
from pylatex.section import Chapter
from pylatex.utils import *
class Cover:
artist = None
title = None
year = None
medium = None
musium = None
location = None
license = None
# Goya, Francisco. The Family of Charles IV. 1800, oil on canvas, Museo del Prado, Madrid.
def __init__(self, cover):
for dic in cover:
key = list(dic.keys())[0]
try:
setattr(self, key, dic[key])
except:
print('error', key)
return
def export_citation(self):
firstname = self.artist.split(' ')[0]
lastname = ' '.join(self.artist.split(' ')[1:])
name = ', '.join([firstname, lastname])
return '{name}. {title}, {year}, {medium}, {musium}, {location}'.format(
name = name,
title = '\\textit{%s}'%self.title,
year = self.year,
medium = self.medium,
musium = self.musium,
location= self.location
)
class Content:
title = None
layout = None
latex = None
type = 'mainmatter'
filename = None
endnote = None
sample = False
def __init__(self, content):
for key in content:
try:
setattr(self, key, content[key])
except:
print(key)
self.latex = self.convert_latex()
return
def convert_latex(self):
filepath = os.path.join(Config().manuscript_dir, self.filename+'.md')
return pypandoc.convert_file( filepath, 'latex', extra_args=[
'--data-dir='+os.path.join(os.getcwd(), 'BartlebyMachine', '.pandoc'),
'--wrap=none',
'--variable',
'documentclass=book',
])
def write_latex(self):
output_path = os.path.join(Config().manuscript_dir, 'tex', self.filename) + '.tex';
f = open(output_path, 'w', encoding='utf-8')
f.write(self.latex)
f.close()
class TableOfContent:
title = None
author = None
dateOfPublished = None
cover = None
license = None
content = []
sample = False
def __init__(self, toc):
for key in toc:
try:
if(key == 'content'):
content = list(map(lambda x: Content(x), toc[key]))
toc[key] = content
if(key == 'cover'):
toc[key] = Cover(toc[key])
setattr(self, key, toc[key])
except:
print(key)
def export_content(self):
concat = []
for content in self.content:
if self.sample == True and content.sample == False:
continue
if content.type == 'mainmatter':
str = '\\\\begin{{{layout}}}\n{latex}\n\end{{{layout}}}'.format(latex=content.latex.replace('\\\r\n', '\\\\\n'), layout=content.layout);
concat.append(str)
return '\n'.join(concat)
def export_preface(self):
if self.sample == True:
prefaces = list(filter(lambda x: x.type == 'preface' and x.sample == True, self.content))
else:
prefaces = list(filter(lambda x: x.type == 'preface', self.content))
return '\n'.join(list(map(lambda x: x.latex, prefaces)))
def export_endpaper(self):
options = ['itemsep=1pt', 'parsep=1pt']
book = Description(options=options)
book.add_item('제목', self.title)
book.add_item('저자', self.author)
book.add_item('편집', '미루')
book.add_item('디자인', '써드엔지니어링카르텔')
book.add_item('출간일', '2018-06-01')
publisher = Description(options=options)
publisher.add_item('출판', '금치산자레시피')
publisher.add_item('웹사이트', 'http://gtszrcp.com')
cover = Description(options=options)
cover.add_item('표지', NoEscape(self.cover.export_citation()))
cover.add_item('표지 그림 저작권', self.cover.license)
license = Description(options=options)
license.add_item('저작권', NoEscape('이 책에 수록된 저작물 중 별도로 표기되지 않은 모든 저작물의 저작권은 저자에게 있습니다. %s에 의해 이용할 수 있습니다.'%italic(self.license)))
license.add_item('', '이 책은 BartlebyMachine으로 제작되었습니다.')
endpaper = map(lambda x: x.dumps().replace('\\', '\\\\'), [
book, publisher, cover, license
])
return '\n'.join(list(endpaper))
def export_appendix(self):
appendix = []
appendix.append(Chapter('참조'))
content = Description()
endnotes = list(filter(lambda x: x.endnote != None, self.content))
for note in endnotes:
content.add_item(note.title, note.endnote)
appendix.append(content)
appendix = list(map(lambda x: x.dumps().replace('\\', '\\\\'), appendix))
return '\n'.join(appendix)
class Bartleby:
toc = None
manuscripts = None
overcite = None
orphan = None
config = None
sample = False
def __init__(self):
self.manuscripts = list(filter(
lambda x: os.path.isdir(os.path.join(Config().manuscript_dir, x)) == False,
os.listdir(Config().manuscript_dir)
))
self.toc = [];
def write_latex(self):
latex = self.replace_template()
filename = 'ggded.tex'
if self.sample == True:
filename = 'ggded.sample.tex'
f = open(filename, 'w', encoding='utf-8')
f.write(latex)
f.close()
return
def replace_template(self):
template = Config().template
book = []
if self.sample == True:
self.toc.sample = True
self.toc.title = self.toc.title + ' 샘플북'
replaces = [
(re.compile('<<content>>'), self.toc.export_content()),
(re.compile('<<author>>'), self.toc.author),
(re.compile('<<title>>'), self.toc.title),
(re.compile('<<date>>'), datetime.datetime.strptime(self.toc.dateOfPublished, '%Y-%m-%d').strftime('%Y')),
(re.compile('<<preface>>'), self.toc.export_preface()),
(re.compile('<<endpaper>>'), self.toc.export_endpaper()),
(re.compile('<<endnotes>>'), self.toc.export_appendix()),
]
for replace in replaces:
if replace[0].findall(template):
template = replace[0].sub(replace[1], template)
return template
def md_to_latex(self):
result = False
for content in self.toc.content:
content.write_latex()
return result
def add_toc(self, filename):
result = False
file = os.path.join(os.getcwd(), filename)
if os.path.exists(file) == False:
return result
with open(file, encoding='utf-8') as toc_file:
toc = yaml.load(toc_file)
result = True
self.toc = TableOfContent(toc)
return result
def manuscriptCount(self):
result = False
cite = {}
entries = []
if self.toc == None:
return result
for toc in self.toc:
for entry in toc.content:
entries.append(entry.filename)
for script in self.manuscripts:
needle = script.split('.')[0]
cite[needle] = entries.count(needle)
return cite
def manuscriptStatus(self):
self.orphan = []
self.overcite = []
for script in self.manuscripts:
cnt = list(
map(lambda x: '%s.md'%x.filename, self.toc.content)
).count(script)
if(cnt < 1):
self.orphan.append(script)
if(cnt > 1):
self.overcite.append(script)
return True
|
the-stack_0_7427 | # preprocessing.py
"""
parses MIMIC-CXR radiology reports from data/mimic-cxr-reports/ into data/train.csv and data/test.csv
train.csv contains two columns with each column wrapped in double quotes; the first column contains
the input text (radiology examination, technique, comparison, and findings) while the second
column contains the output text (impressions). All reports without the term "IMPRESSIONS:" are ommitted
test.csv has the same structure as train.csv.
The processing also lematizes all of the terms using nltk and strips whitespace.
REQUIREMENTS:
- data/mimic-cxr-reports/*
- data/cxr-study-list.csv.gz
- overwrite data/train.csv
- overwrite data/test.csv
"""
import os;
import pandas as pd;
import re
TEST_FRACTION = 0.1 # fraction for test set
VALIDATION_FRACTION = 0.1
ROOT = os.path.dirname( os.path.abspath(__file__) );
LIST_FILE = os.path.join(ROOT, 'data', 'cxr-study-list.csv.gz');
REPORTS_DIR = os.path.join(ROOT, 'data', 'mimic-cxr-reports');
TRAIN_FILE = os.path.join(ROOT, 'data', 'train.csv');
TEST_FILE = os.path.join(ROOT, 'data', 'test.csv');
VALIDATION_FILE = os.path.join(ROOT, 'data', 'validation.csv');
def remove_notification_section(text):
"""
We noticed that some reports have a notification section after
the impressions (summary) section, which was impeding our data, so
we decided to remove this section all together. We use various rule-based
mechanisms to parse and remove the notification section.
params: text
returns: text with notification section removed
"""
idx = text.rfind("NOTIFICATION");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("telephone notification");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Telephone notification");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("These findings were");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Findings discussed");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Findings were");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("This preliminary report");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("Reviewed with");
if( idx > 0 ):
text = text[:idx];
idx = text.rfind("A preliminary read");
if( idx > 0 ):
text = text[:idx];
return(text);
def sanitize(text):
"""
Cleanses the text to be written in CSV, which will be fed directly to
the summarizer. Tokenization and lemmatization is not performed in this
step, as the summarizer performs those directly.
params: text
returns: cleaned text
"""
text = text.strip();
text = re.sub("\n", "", text);
text = re.sub(",", "", text);
# Remove all text before FINDINGS: section
regex = r'^(.*finding.?:)'
if( re.search(regex, text, flags=re.IGNORECASE)==None ): #if no summary
return None;
text = re.sub(regex,"", text, flags=re.IGNORECASE);
text = remove_notification_section(text);
return(text);
def split(slicable, fraction):
"""
splits data into test-train set or dev-validation set; does not shuffle.
params: slicable - an object that responds to len() and [], works on dataframes
fraction - a value between 0 and 1
returns: (x, y) - where x has (1-fraction) percent entries and y has the rest
"""
partition = int(len(slicable) * (1.0 - fraction));
return( (slicable[:partition], slicable[partition:]) );
def parse_summary(text):
"""
parses and separates input text from summary in cxr reports, returns None if
not found
params: text
returns: None or [input_text, summary]
"""
regex = r'impression.?(?::|" ")'
if( re.search(regex, text, flags=re.IGNORECASE)==None ): #if no summary
return None;
data = re.split(regex, text, flags=re.IGNORECASE);
data[0] = data[0].strip();
data[1] = data[1].strip();
return(data);
def write_csv(filename, reports):
"""
writes a csv file for summarization. The CSV file has four columns: "subject_id",
"study_id", "findings", and "impression" based on MIMIC-CXR reports. "findings"
contains the input text, and "impression" contains the true summary.
params: filename - name of csv file to write, will overwrite if it exists
reports - dataframe of cxr reports from cxr-study-list file
"""
print(f"Writing {filename}...");
f = open(filename, 'w');
f.write(f"\"subject_id\",\"study_id\",\"findings\",\"impression\"\n");
ommitted = 0;
progress = 1;
for report in reports:
x = open(os.path.join(REPORTS_DIR, report));
text = x.read();
x.close();
text = sanitize(text);
if( text==None ):
ommitted += 1;
continue; #toss out data and go to next textfile
if (progress % 10000 == 0):
print(f'Read {progress} files so far...');
progress += 1;
data = parse_summary(text);
if( (data==None) or (data[0]=='') or (data[1]=='') ):
ommitted += 1;
continue; #toss out data and go to next textfile
folders = report.split('/');
f.write(f"\"{folders[2]}\",\"{folders[3].split('.')[0]}\",\"{data[0]}\",\"{data[1]}\"\n");
f.close();
print(f"Ommited {ommitted} files out of {progress} total files in dataset.\n")
print("Done.\n");
print("================ Starting data preprocessing ==================");
print(f"Reading {os.path.basename(LIST_FILE)}...");
radiology_reports = pd.read_csv(LIST_FILE)['path']; # file paths as pandas series
train_reports, test_reports = split(radiology_reports, TEST_FRACTION);
print("Done.");
# if you want validation set:
train_reports, validation_reports = split(train_reports, VALIDATION_FRACTION / (1 - TEST_FRACTION));
write_csv(VALIDATION_FILE, validation_reports);
# sanity check
#print(train_reports);
#print(validation_reports);
#print(test_reports);
write_csv(TRAIN_FILE, train_reports);
write_csv(TEST_FILE, test_reports);
print("==================== End data preprocessing ======================");
|
the-stack_0_7428 | import threading
import time
from datetime import datetime
import schedule
import atexit
import SocketServer
import BioControle
import syslog
# ------------------------------------------------------------------------
def display_jobs():
print('------------------------------------------------------------------')
for job in schedule.jobs:
print(job)
print('------------------------------------------------------------------')
print()
# ------------------------------------------------------------------------
def run_threaded(job_func, run_time):
job_thread = threading.Thread(target=job_func, args=[run_time])
job_thread.start()
# ------------------------------------------------------------------------
@atexit.register
def cleanup():
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog(syslog.LOG_INFO, '%s: Cleaning up before exit ' % (date_time))
print('%s: Cleaning up before exit ' % (date_time))
pump.off()
circ.off()
fan.off()
userled.off()
def process_request(data, server, client_sock, bio):
#print(data[0])
s = data[0].decode()
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog(syslog.LOG_INFO, '%s: Received Message: << %s >> ' % (date_time, s))
print('%s: Received Message: << %s >> ' % (date_time, s))
if 'GET /?circ=' in s:
result = int(s[s.find('circ=')+5:s.find(' HTTP')],10)
server.send_response(client_sock, "Circulating for {} seconds".format(result));
run_threaded(bio.run_circulate, result)
#bio.run_circulate(result)
if 'GET /?pump=' in s:
result = int(s[s.find('pump=')+5:s.find(' HTTP')])
server.send_response(client_sock, "Pumping for {} seconds".format(result));
run_threaded(bio.run_pump, result)
#bio.run_pump(result)
if 'GET /?fan=' in s:
result = int(s[s.find('fan=')+4:s.find(' HTTP')])
server.send_response(client_sock, "Fanning for {} seconds".format(result));
run_threaded(bio.run_fan, result)
#bio.run_fan(result)
if 'GET /?status' in s:
status = "<html><head></head><body>"
status = status + "<h2>Current Schedule</h2><hr><ul>"
for job in schedule.jobs:
status = status + "<li>" + str(job) + "</li>\n"
status = status + "</ul></body>"
server.send_response(client_sock, status)
# MAIN
# ------------------------------------------------------------------------
def main():
server = SocketServer.SocketServer()
bio = BioControle.BioControle()
pump_duration = 300
fan_duration = 200
circ_duration = 1800
# Pump schedule
schedule.every().day.at("08:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
schedule.every().day.at("13:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
schedule.every().day.at("18:00").do(run_threaded, bio.run_pump, run_time=pump_duration)
#schedule.every().day.at("19:25").do(run_threaded, bio.run_pump, run_time=pump_duration)
# Fan schedule
schedule.every(2).hours.do(run_threaded, bio.run_fan, run_time=fan_duration)
# Circulation pump schedule
schedule.every().day.at("07:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
schedule.every().day.at("12:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
schedule.every().day.at("17:45").do(run_threaded, bio.run_circulate, run_time=circ_duration)
# Job display schedule
#schedule.every(15).minutes.do(display_jobs)
display_jobs()
date_time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
syslog.syslog('%s: Server Ready ' % (date_time))
print('%s: Server Ready ' % (date_time))
sleepTimer = 10;
while True:
try:
schedule.run_pending()
try:
(data, client_sock) = server.check_select()
if data:
process_request(data, server, client_sock, bio)
server.close_client(client_sock)
except:
pass
time.sleep(sleepTimer)
BioControle.userled.toggle()
except (KeyboardInterrupt, SystemExit):
cleanup()
exit()
# ------------------------------------------------------------------------
main()
print("exit") |
the-stack_0_7430 | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from collections import Counter
import os.path
import shutil
import tempfile
import queue
import nose.tools
import numpy as np
import PIL.Image
from . import create_db
from digits import test_utils
test_utils.skipIfNotFramework('none')
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8, 10, 3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8, 10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in range(3):
for j in range(3):
print((type(cls.good_file[0]), type(cls.color_image_file[1])))
tmp = '%s %s\n' % (cls.color_image_file[1], i)
os.write(cls.good_file[0], tmp.encode(encoding='UTF-8'))
os.write(cls.good_file[0], tmp.encode(encoding='UTF-8'))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
q = queue.Queue()
result = create_db._fill_load_queue(self.good_file[1], q, shuffle)
assert result == self.image_count, 'lines not added'
assert q.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
q = queue.Queue()
nose.tools.assert_raises(
create_db.BadInputFileError,
create_db._fill_load_queue,
self.empty_file[1], q, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = create_db._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
create_db.ParseLineError,
create_db._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert create_db._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert create_db._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = create_db._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = create_db._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = create_db._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = create_db._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8, 10, 3), dtype='float64')
else:
s = np.ones((8, 10), dtype='float64')
create_db._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg', 'npy', 'png', 'binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
def test_dset_limit(self):
db_dir = os.path.join(self.empty_dir, 'db')
create_db.create_db(self.good_file[1], db_dir,
10, 10, 1, 'hdf5', hdf5_dset_limit=10 * 10)
with open(os.path.join(db_dir, 'list.txt')) as infile:
lines = infile.readlines()
assert len(lines) == self.image_count, '%d != %d' % (len(lines), self.image_count)
|
the-stack_0_7433 | # -*- coding: utf-8 -*-
"""
Created on Mon May 31 18:14:32 2021
@author: ilayda
"""
#1.kutuphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#2.veri onisleme
#2.1.veri yukleme
veriler = pd.read_csv('odev_tenis.txt.crdownload')
#pd.read_csv("veriler.csv")
#test
print(veriler)
'''
#encoder: Kategorik -> Numeric dönüşüm yapalım.
play = veriler.iloc[:,-1:].values
print(play)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
play[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(play)
#sondan ikinci kolonu 1 ve 0 a dönüştürdük
windy = veriler.iloc[:,-2:-1].values
print(play)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
windy[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(windy)
#encoder: Kategorik -> Numeric
c = veriler.iloc[:,-1:].values
print(c)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
c[:,-1] = le.fit_transform(veriler.iloc[:,-1])
print(c)
'''
#1)DÖNÜŞTÜRMEK
#yukrdaki gibi tek tek dönüştürmek yerine aşağıdaki tek kodla 1 ve 0 a dönüştürdük.
#sundy,rainy,overcast,windy,play labelenconder ile 1 ve 0 a dönüştürdük.
from sklearn.preprocessing import LabelEncoder
veriler2 = veriler.apply(LabelEncoder().fit_transform)
#temperature ve humidity onehot ettik. onehot ile yaptık çünkü zaten sayılardı.true falan değildi.
#temperature ve humidity
c = veriler2.iloc[:,:1]
from sklearn import preprocessing
ohe = preprocessing.OneHotEncoder()
c=ohe.fit_transform(c).toarray()
print(c)
#2)EKLEME yapalım bir tabloda
#havadurumu ile dataframe yaparak rainy,sunny,overcast 1 ve 0 şeklinde bir tabloya ekledik.
havadurumu = pd.DataFrame(data = c, index = range(14), columns=['o','r','s'])
#sonveriler ile=havadurumuna veriler tablosundan windy ve play 0 ve 1 şeklini tabloya ekledik.
sonveriler = pd.concat([havadurumu,veriler.iloc[:,1:3]],axis = 1)
#yukarda yazdırdığımız veriler2 temperature ve humidity de onehot şeklinde tabloya ekleyelim.
sonveriler = pd.concat([veriler2.iloc[:,-2:],sonveriler], axis = 1)
#3)VERİLERİ BÖLME
#humidity bağımlı değişken olduğu için o hariç hepsini bölüyoruz ayrı tabloda
#y_train ve y_test humadityi tabloda göstercek. sonveriler.iloc[:,-1:]
from sklearn.model_selection import train_test_split
x_train, x_test,y_train,y_test = train_test_split(sonveriler.iloc[:,:-1],sonveriler.iloc[:,-1:],test_size=0.33, random_state=0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train,y_train)
#tahmin edelim. humidity tahmin edelim. y_test ile karşılaştırarak.
y_pred = regressor.predict(x_test)
print(y_pred)
#4)GERİYE ELEME
#Başarı ölçerek hangi verileri çıkarcağımıza bakalım. Önce tüm değişkenleri tanımlayalım.
#14 satır var.
import statsmodels.api as sm
X = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1 )
#6 kolon var. Kolon tanımlayalım. Sonrada sm.OLS ile tüm değişkenleri alsın.sonveriler.iloc[:,-1:]
X_l=sonveriler.iloc[:,[0,1,2,3,4,5]].values
X_l=np.array(X_l,dtype=float)
model=sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()
print(model.summary())
#4)VERİ ATMA
#yandaki raporda en yüksek olan p>t değeri x1 olduğu için 0.593 onu atıyoruz.
#2.playden sonrasını yazdırıp windy atıyoruz [:,1:]
sonveriler = sonveriler.iloc[:,1:]
#yeni tabloyu yazdıralım. x1 olmadığı yani windy olmadığı- kolon sayısı 5 oldu iloc[:,[0,1,2,3,4]]
import statsmodels.api as sm
X = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1 )
X_l=sonveriler.iloc[:,[0,1,2,3,4]].values
X_l=np.array(X_l,dtype=float)
model=sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()
print(model.summary())
#5)X_TRAİN VE X_TEST HUMİDİTY DEĞİŞKENİNİ ATMA
x_train = x_train.iloc[:,1:]
x_test = x_test.iloc[:,1:]
#sonra y_test ve y_pred karşılaştırınca iyileşmiş halini görebilirsin.
regressor.fit(x_train,y_train)
y_pred = regressor.predict(x_test)
#y_pred ilk tahmin 84 ken x1 i silip başarı oranını arttırınca 77 oldu ve y_testteki 70 e daha fazla yaklaştı.
|
the-stack_0_7434 | # -----------------------------------------------------------------------------
#
# Copyright (C) The BioDynaMo Project.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
import os
import shutil
from print_command import Print
def CopySupportFiles(sim_name):
SUPPORT_DIR = os.path.join(os.environ['BDMSYS'], 'share', 'util', 'support_files')
Print.new_step("Copy additional support files")
for filename in os.listdir(SUPPORT_DIR):
full_file_name = os.path.join(SUPPORT_DIR, filename)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, sim_name)
|
the-stack_0_7436 | """
Copyright (C) 2019 Authors of gHHC
This file is part of "hyperbolic_hierarchical_clustering"
http://github.com/nmonath/hyperbolic_hierarchical_clustering
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import datetime
import numpy as np
from absl import logging
import tensorflow as tf
from ghhc.util.Config import Config
from ghhc.util.load import load
from ghhc.util.initializers import random_pts,init_from_afkmc2_and_hac,init_from_rand_and_hac
from ghhc.model.ghhc import gHHCTree, gHHCInference
from ghhc.util.io import mkdir_p
tf.enable_eager_execution()
logging.set_verbosity(logging.INFO)
if __name__ == "__main__":
config = Config(sys.argv[1])
now = datetime.datetime.now()
ts = "{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}".format(
now.year, now.month, now.day, now.hour, now.minute, now.second)
config.exp_out_dir = os.path.join(config.exp_out_base, config.dataset_name, config.alg_name, "%s-%s" %(ts,config.to_filename()))
config.checkpoint_dir = os.path.join(config.exp_out_dir, 'models', 'ckpt')
mkdir_p(config.exp_out_dir)
mkdir_p(os.path.join(config.exp_out_dir, 'models'))
config.save_config(config.exp_out_dir,config.to_filename() + ".json")
config.save_config(config.exp_out_dir)
pids, lbls, dataset = load(config.inference_file, config)
dev_pids, dev_lbls, dev_dataset = load(config.dev_file, config)
if config.random_projection is not None:
logging.info('Using random projection: %s', config.random_projection)
_proj = np.random.randn(dataset.shape[1], config.random_projection).astype(np.float32)
def p(x):
projd = tf.matmul(x, _proj)
projd /= tf.linalg.norm(projd,axis=1,keepdims=True)
projd = tf.clip_by_norm(projd, 0.9, axes=[1])
return projd
proj = lambda x: p(x)
init_tree = random_pts(proj(dataset).numpy(), config.num_internals, config.random_pts_scale)
else:
if config.init_method == 'randompts':
init_tree = random_pts(dataset, config.num_internals, config.random_pts_scale)
elif config.init_method == 'afkmc2hac':
init_tree = init_from_afkmc2_and_hac(dataset, config.num_internals)
elif config.init_method == 'randhac':
init_tree = init_from_rand_and_hac(dataset, config.num_internals, config.random_pts_scale)
proj = None
tree = gHHCTree(init_tree.copy(), config=config, projection=proj)
optimizer = tf.train.GradientDescentOptimizer(config.tree_learning_rate)
inf = gHHCInference(tree, optimizer, config, dev_dataset, dev_lbls)
samples = np.load(config.sample_file)
inf.inference(samples, dataset, config.batch_size) |
the-stack_0_7437 | """Limited version of os module: only keep what is more or less relevant in a
browser context
"""
import sys
error = OSError
name = 'posix'
linesep = '\n'
from posix import *
import posixpath as path
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
environ = {'HOME': __BRYTHON__.curdir,
'PYTHONPATH': __BRYTHON__.brython_path
}
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = True
def chdir(path):
__BRYTHON__.curdir = path
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
encoding = sys.getfilesystemencoding()
errors = 'surrogateescape'
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def getcwd():
return __BRYTHON__.curdir
_set = set()
supports_dir_fd = _set
supports_effective_ids = _set
supports_fd = _set
supports_follow_symlinks = _set
|
the-stack_0_7439 | from datetime import date
pessoas = {}
listaDePessoas = []
hoje = date.today().year
somaIdade = mediaIdade = 0
while True:
pessoas.clear()
pessoas['nome'] = str(input('Nome: ')).strip()
while True:
pessoas['sexo'] = str(input('Sexo [M/F]: ')).upper()[0]
if pessoas['sexo'] in 'MF':
break
print('Erro! Digite apenas "M" ou "F".')
idade = int(input('Ano de Nascimento: '))
pessoas['idade'] = hoje - idade
somaIdade += pessoas['idade']
listaDePessoas.append(pessoas.copy())
while True:
continuar = str(input('Deseja continuar?[S/N]: ')).upper()[0]
if continuar in 'SN':
break
print('Erro! Digite apenas "SIM" ou "NÃO".')
if continuar in 'N':
break
pessoas.clear()
print('-=' * 25)
print(listaDePessoas)
print('-=' * 12, 'Total de Pessoas Cadastradas ', '-=' * 12)
print(f'Total de pessoas cadastradas: {len(listaDePessoas)}')
print('-=' * 10, 'A média das Idades ', '-=' * 10)
mediaIdade = somaIdade / len(listaDePessoas)
print(f'A média das Idades: {mediaIdade} anos.')
print('-=' * 10, 'Lista com Todas as Mulheres', '-=' * 10)
for elemento in listaDePessoas:
if elemento['sexo'] in 'F':
print(f'{elemento["nome"]}')
print()
print('-=' * 10, 'Lista de Pessoas Acima da Média de Idade ', '-=' * 10)
for elemento in listaDePessoas:
if elemento['idade'] >= mediaIdade:
print(f'{elemento["nome"]} está acima da Média da Idade. ')
print() |
the-stack_0_7440 | import logging
from van.adam import transactions as t
from van.adam.inventory_methods.output import calculate_output
from van.adam.inventory_methods.sell_outs import take_all, take_less
from van.adam.transactions import is_taxable
def calc_profit(sell: tuple) -> tuple:
"""
Calculates the buy price, sell price, total profit, and taxable profit from a sell transaction tuple.
:param sell: the sell transaction as a tuple (date: datetime.date, quantity: float, price: float)
:return: a tuple containing (weighted_buy_price: float, sell_price: float, total_profit: float, taxable_profit:
float); all numbers are rounded to two decimal places
"""
sell_date, total_sell_quantity, sell_price = sell
sell_outs = []
quantity = 0.0
while quantity != total_sell_quantity:
log.debug("quantity: {}".format(quantity))
try:
buy = t.buys.pop()
except IndexError:
log.error("Not enough crypto assets for the sale {}: list index out of range!".format(sell))
latest_sell_out = sell_outs.pop()
t.buys.insert(len(t.buys), latest_sell_out)
log.warning("Re-added latest sell out {} to buy transactions".format(latest_sell_out))
return 0.0, 0.0, 0.0, 0.0
buy_date, buy_quantity, buy_price = buy
# round buy_quantity to 10 decimal places to avoid IndexError above
buy_quantity = round(buy_quantity, 10)
taxable = is_taxable(buy_date, sell_date)
if (quantity + buy_quantity) == total_sell_quantity:
quantity = take_all(quantity, buy_quantity, buy_price, sell_outs, taxable)
elif (quantity + buy_quantity) < total_sell_quantity:
quantity = take_all(quantity, buy_quantity, buy_price, sell_outs, taxable)
elif (quantity + buy_quantity) > total_sell_quantity:
quantity, updated_buy = take_less(total_sell_quantity, quantity, buy_date, buy_quantity, buy_price,
sell_outs, taxable)
t.buys.insert(len(t.buys), updated_buy)
return calculate_output(sell_outs, total_sell_quantity, sell_price)
log = logging.getLogger()
|
the-stack_0_7441 | """Setup for TcEx Module."""
# standard library
import os
# third-party
from setuptools import find_packages, setup
metadata = {}
metadata_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tcex', '__metadata__.py')
with open(
metadata_file,
encoding='utf-8',
) as f:
exec(f.read(), metadata) # nosec; pylint: disable=exec-used
if not metadata:
raise RuntimeError(f'Could not load metadata file ({metadata_file}).')
with open('README.md') as f:
readme = f.read()
dev_packages = [
'bandit',
'black',
'CommonMark',
'deepdiff',
'flake8',
# isort 5 currently causes issues with pylint
'isort>=4,<5',
'mako',
'pre-commit',
'pydocstyle',
'pylint',
'pytest',
'pytest-cov',
'pytest-html',
'pytest-xdist',
'pyupgrade',
'recommonmark',
'reno',
'sphinx',
'sphinx-rtd-theme',
]
setup(
author=metadata['__author__'],
author_email=metadata['__author_email__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Security',
],
description=metadata['__description__'],
download_url=metadata['__download_url__'],
extras_require={'dev': dev_packages, 'develop': dev_packages, 'development': dev_packages},
include_package_data=True,
install_requires=[
'colorama',
'future',
'hvac',
'inflect',
'jmespath',
'jsonschema',
'lark',
'paho-mqtt',
'parsedatetime',
'pyaes',
'python-dateutil',
'pytz',
'redis',
'requests',
'six',
'stdlib-list',
'stix2',
'tzlocal',
'wrapt',
],
license=metadata['__license__'],
long_description=readme,
long_description_content_type='text/markdown',
name=metadata['__package_name__'],
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={'': ['*.json', '*.lark']},
package_dir={'tcex': 'tcex'},
project_urls={
'Documentation': 'https://threatconnect-inc.github.io/tcex/',
'Source': 'https://github.com/ThreatConnect-Inc/tcex',
},
python_requires='>=3.6',
scripts=[
'bin/tcinit',
'bin/tcinit.cmd',
'bin/tclib',
'bin/tclib.cmd',
'bin/tcpackage',
'bin/tcpackage.cmd',
'bin/tctest',
'bin/tctest.cmd',
'bin/tcvalidate',
'bin/tcvalidate.cmd',
],
url=metadata['__url__'],
version=metadata['__version__'],
zip_safe=True,
)
|
the-stack_0_7442 | import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
import data_iter
import nn_extra_gauss
import nn_extra_nvp
from config_rnn import defaults
batch_size = 32
sample_batch_size = 1
n_samples = 4
rng = np.random.RandomState(42)
rng_test = np.random.RandomState(317070)
seq_len = defaults.seq_len
eps_corr = defaults.eps_corr
mask_dims = defaults.mask_dims
nonlinearity = tf.nn.elu
weight_norm = True
train_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size,
set='train', rng=rng, digits=[0, 2, 4, 6, 8])
test_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size, set='test',
digits=[1, 3, 5, 7, 9], rng=rng_test)
valid_data_iter = data_iter.BaseExchSeqDataIterator(seq_len=seq_len, batch_size=batch_size,
set='test', rng=rng_test, digits=[0, 2, 4, 6, 8])
test_data_iter2 = data_iter.BaseTestBatchSeqDataIterator(seq_len=seq_len,
set='test',
rng=rng_test,
digits=[1, 3, 5, 7, 9])
obs_shape = train_data_iter.get_observation_size() # (seq_len, 28,28,1)
print('obs shape', obs_shape)
ndim = np.prod(obs_shape[1:])
corr_init = np.ones((ndim,), dtype='float32') * 0.1
optimizer = 'rmsprop'
learning_rate = 0.001
lr_decay = 0.999995
max_iter = 50000
save_every = 1000
validate_every = 1000
n_valid_batches = 20
scale_student_grad = 0.
student_grad_schedule = {0: 0., 100: 0.1}
nvp_layers = []
nvp_dense_layers = []
student_layer = None
def build_model(x, init=False, sampling_mode=False):
global nvp_layers
global nvp_dense_layers
with arg_scope([nn_extra_nvp.conv2d_wn, nn_extra_nvp.dense_wn], init=init):
if len(nvp_layers) == 0:
build_nvp_model()
if len(nvp_dense_layers) == 0:
build_nvp_dense_model()
global student_layer
if student_layer is None:
student_layer = nn_extra_gauss.GaussianRecurrentLayer(shape=(ndim,), corr_init=corr_init)
x_shape = nn_extra_nvp.int_shape(x)
x_bs = tf.reshape(x, (x_shape[0] * x_shape[1], x_shape[2], x_shape[3], x_shape[4]))
x_bs_shape = nn_extra_nvp.int_shape(x_bs)
log_det_jac = tf.zeros(x_bs_shape[0])
logit_layer = nn_extra_nvp.LogitLayer()
scale_layer = nn_extra_nvp.ScaleLayer()
y, log_det_jac = scale_layer.forward_and_jacobian(x_bs, None, log_det_jac)
y, log_det_jac = logit_layer.forward_and_jacobian(y, None, log_det_jac)
# construct forward pass
z = None
for layer in nvp_layers:
y, z, log_det_jac = layer.forward_and_jacobian(y, z, log_det_jac)
z = tf.concat([z, y], 3)
for layer in nvp_dense_layers:
z, _, log_det_jac = layer.forward_and_jacobian(z, None, log_det_jac)
z_shape = nn_extra_nvp.int_shape(z)
z_vec = tf.reshape(z, (x_shape[0], x_shape[1], -1))
log_det_jac = tf.reshape(log_det_jac, (x_shape[0], x_shape[1]))
log_probs = []
z_samples = []
latent_log_probs = []
latent_log_probs_prior = []
if mask_dims:
mask_dim = tf.greater(student_layer.corr, tf.ones_like(student_layer.corr) * eps_corr)
mask_dim = tf.cast(mask_dim, tf.float32)
else:
mask_dim = None
with tf.variable_scope("one_step") as scope:
student_layer.reset()
for i in range(seq_len):
if sampling_mode:
z_sample = student_layer.sample(nr_samples=n_samples)
z_samples.append(z_sample)
latent_log_prob = student_layer.get_log_likelihood(z_sample[:, 0, :])
latent_log_probs.append(latent_log_prob)
else:
latent_log_prob = student_layer.get_log_likelihood(z_vec[:, i, :], mask_dim=mask_dim)
latent_log_probs.append(latent_log_prob)
log_prob = latent_log_prob + log_det_jac[:, i]
log_probs.append(log_prob)
latent_log_prob_prior = student_layer.get_log_likelihood_under_prior(z_vec[:, i, :],
mask_dim=mask_dim)
latent_log_probs_prior.append(latent_log_prob_prior)
student_layer.update_distribution(z_vec[:, i, :])
scope.reuse_variables()
if sampling_mode:
# one more sample after seeing the last element in the sequence
z_sample = student_layer.sample(nr_samples=n_samples)
z_samples.append(z_sample)
z_samples = tf.concat(z_samples, 1)
latent_log_prob = student_layer.get_log_likelihood(z_sample[:, 0, :])
latent_log_probs.append(latent_log_prob)
z_samples_shape = nn_extra_nvp.int_shape(z_samples)
z_samples = tf.reshape(z_samples,
(z_samples_shape[0] * z_samples_shape[1],
z_shape[1], z_shape[2], z_shape[3])) # (n_samples*seq_len, z_img_shape)
log_det_jac = tf.zeros(z_samples_shape[0] * z_samples_shape[1])
for layer in reversed(nvp_dense_layers):
z_samples, _, log_det_jac = layer.backward(z_samples, None, log_det_jac)
x_samples = None
for layer in reversed(nvp_layers):
x_samples, z_samples, log_det_jac = layer.backward(x_samples, z_samples, log_det_jac)
x_samples, log_det_jac = logit_layer.backward(x_samples, None, log_det_jac)
x_samples, log_det_jac = scale_layer.backward(x_samples, None, log_det_jac)
x_samples = tf.reshape(x_samples,
(z_samples_shape[0], z_samples_shape[1], x_shape[2], x_shape[3], x_shape[4]))
log_det_jac = tf.reshape(log_det_jac, (z_samples_shape[0], z_samples_shape[1]))
latent_log_probs = tf.stack(latent_log_probs, axis=1)
for i in range(seq_len + 1):
log_prob = latent_log_probs[:, i] - log_det_jac[:, i]
log_probs.append(log_prob)
log_probs = tf.stack(log_probs, axis=1)
return x_samples, log_probs
log_probs = tf.stack(log_probs, axis=1)
latent_log_probs = tf.stack(latent_log_probs, axis=1)
latent_log_probs_prior = tf.stack(latent_log_probs_prior, axis=1)
return log_probs, latent_log_probs, latent_log_probs_prior, z_vec
def build_nvp_model():
global nvp_layers
num_scales = 2
num_filters = 32
for scale in range(num_scales - 1):
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_1' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_2' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_3' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.SqueezingLayer(name='Squeeze%d' % scale))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel0', name='Channel%d_1' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm,
num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel1', name='Channel%d_2' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('channel0', name='Channel%d_3' % scale, nonlinearity=nonlinearity,
weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.FactorOutLayer(scale, name='FactorOut%d' % scale))
# final layer
scale = num_scales - 1
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_1' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_2' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard0', name='Checkerboard%d_3' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(
nn_extra_nvp.CouplingLayerConv('checkerboard1', name='Checkerboard%d_4' % scale,
nonlinearity=nonlinearity, weight_norm=weight_norm, num_filters=num_filters))
nvp_layers.append(nn_extra_nvp.FactorOutLayer(scale, name='FactorOut%d' % scale))
def build_nvp_dense_model():
global nvp_dense_layers
for i in range(6):
mask = 'even' if i % 2 == 0 else 'odd'
name = '%s_%s' % (mask, i)
nvp_dense_layers.append(
nn_extra_nvp.CouplingLayerDense(mask, name=name, nonlinearity=nonlinearity, n_units=256,
weight_norm=weight_norm))
def loss(log_probs):
return -tf.reduce_mean(log_probs)
|
the-stack_0_7443 | # -*- coding = utf-8 -*-
# /usr/bin/env python
# @Time : 20-11-18 下午8:25
# @File : test.py
# @Software: PyCharm
# try/except/else while/else break continue
# while True:
# reply = input('Enter txt:')
# if reply == 'stop':
# break
# try:
# num = int(reply)
# except:
# print('Bad!' * 8)
# else:
# print(int(reply)**2)
#
# print('Bye')
while True:
reply = input('Enter txt:')
if reply == 'stop':
break
elif not reply.isdigit():
print('Bad!'*8)
else:
num = int(reply)
if num < 20:
print('low'*4)
else:
print(num**2)
print('Bye'*3)
|
the-stack_0_7446 | """empty message
Revision ID: d68e85682c2c
Revises: fed65154fba4
Create Date: 2018-09-27 11:27:26.206337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd68e85682c2c'
down_revision = 'fed65154fba4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('collections',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('posts_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['posts_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('collections')
# ### end Alembic commands ###
|
the-stack_0_7451 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import textwrap
import numpy as np
import pytest
from astropy.io import fits
from astropy.nddata.nduncertainty import (
StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty,
InverseVariance)
from astropy import units as u
from astropy import log
from astropy.wcs import WCS, FITSFixedWarning
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames,
get_pkg_data_contents)
from astropy.utils.exceptions import AstropyWarning
from astropy.nddata.ccddata import CCDData
from astropy.nddata import _testing as nd_testing
from astropy.table import Table
DEFAULT_DATA_SIZE = 100
with NumpyRNGContext(123):
_random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE])
def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {'my_key': 42, 'your_key': 'not 42'}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([2, 2]))
def test_ccddata_unit_cannot_be_set_to_none():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError) as exc:
CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2})
assert "can't have both header and meta." in str(exc.value)
def test_ccddata_simple():
ccd_data = create_ccd_data()
assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros([2, 2]), unit="electron")
assert ccd.unit is u.electron
def test_initialize_from_FITS(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = u.adu.to_string()
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['bunit'] = 'ADU'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'definetely-not-a-unit'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header['bunit'] = 'ELECTRONS/S'
filename = tmpdir.join('afile.fits').strpath
hdu.writeto(filename)
ccd = CCDData.read(filename)
assert ccd.unit == u.electron/u.s
def test_initialize_from_fits_with_data_in_different_extension(tmpdir):
fake_img = np.arange(4).reshape(2, 2)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmpdir):
fake_img1 = np.zeros([2, 2])
fake_img2 = np.arange(4).reshape(2, 2)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1, name='first', ver=1)
hdu2 = fits.ImageHDU(fake_img2, name='second', ver=1)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = tmpdir.join('afile.fits').strpath
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit='adu')
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu string parameter
ccd = CCDData.read(filename, hdu='second', unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu tuple parameter
ccd = CCDData.read(filename, hdu=('second', 1), unit='adu')
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu():
ccd_data = create_ccd_data()
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert 'bunit' in hdulist[0].header
assert hdulist[0].header['bunit'] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit,
do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(tmpdir):
ccd_data = create_ccd_data()
filename = tmpdir.join('test.fits').strpath
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_meta_is_case_sensitive():
ccd_data = create_ccd_data()
key = 'SoMeKEY'
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header():
ccd_data = create_ccd_data()
ccd_data.meta = {'OBSERVER': 'Edwin Hubble'}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(tmpdir):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = tmpdir.join('afile.fits').strpath
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromdict():
dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
def test_header2meta():
hdr = fits.header.Header()
hdr['observer'] = 'Edwin Hubble'
hdr['exptime'] = '3600'
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta['OBSERVER'] == 'Edwin Hubble'
assert d1.header['OBSERVER'] == 'Edwin Hubble'
def test_metafromstring_fail():
hdr = 'this is not a valid header'
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array():
ccd_data = create_ccd_data()
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(ValueError):
ccd_data.uncertainty = np.zeros([3, 4])
def test_to_hdu():
ccd_data = create_ccd_data()
ccd_data.meta = {'observer': 'Edwin Hubble'}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_copy():
ccd_data = create_ccd_data()
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize('operation,affects_uncertainty', [
("multiply", True),
("divide", True),
])
@pytest.mark.parametrize('operand', [
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_mult_div_overload(operand, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize('operation,affects_uncertainty', [
("add", False),
("subtract", False),
])
@pytest.mark.parametrize('operand,expect_failure', [
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
])
@pytest.mark.parametrize('with_uncertainty', [
True,
False])
def test_add_sub_overload(operand, expect_failure, with_uncertainty,
operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert (result.uncertainty is None or
isinstance(result.uncertainty, StdDevUncertainty))
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data,
np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(result.uncertainty.array,
np_method(ccd_data.uncertainty.array,
op_value))
else:
np.testing.assert_array_equal(result.uncertainty.array,
ccd_data.uncertainty.array)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert (result.unit == ccd_data.unit and result.unit == operand.unit)
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit='')
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_true(_, __):
return True
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2)
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1)
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2)
nd_testing.assert_wcs_seem_equal(
ccd1.add(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.subtract(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.multiply(ccd2, compare_wcs=return_true).wcs,
wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.divide(ccd2, compare_wcs=return_true).wcs,
wcs1)
def test_arithmetic_with_wcs_compare_fail():
def return_false(_, __):
return False
ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS())
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_false)
def test_arithmetic_overload_ccddata_operand():
ccd_data = create_ccd_data()
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array,
np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
ccd_data.data ** 2)
expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data,
np.ones_like(ccd_data.data))
expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
ccd_data.uncertainty.array)
np.testing.assert_allclose(result.uncertainty.array,
expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit='')
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit='adu')
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmpdir):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header['history'] = 'one'
hdu.header['history'] = 'two'
hdu.header['history'] = 'three'
assert len(hdu.header['history']) == 3
tmp_file = tmpdir.join('temp.fits').strpath
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header['history'] == hdu.header['history']
def test_infol_logged_if_unit_in_fits_header(tmpdir):
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
ccd_data.write(tmpfile.strpath)
log.setLevel('INFO')
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
_ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(tmpdir):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = tmpdir.join('temp.fits')
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile.strpath)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile.strpath)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ['', 'COMMENT', 'HISTORY']:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename('data/sip-wcs.fits')
ccd = CCDData.read(data_file)
with pytest.warns(AstropyWarning,
match=r'Some non-standard WCS keywords were excluded'):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits')
with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"):
ccd = CCDData.read(data_file1, unit='count')
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename('data/sip-wcs.fits')
test_keys = ['A_0_0', 'B_0_1']
# Make sure the keywords added to this file for testing are there
with fits.open(data_file) as hdu:
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
@pytest.mark.filterwarnings('ignore')
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import _generate_wcs_and_update_header
from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs, _PCs)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames('../../wcs/tests/data',
pattern='*.hdr')
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or
'chandra-pixlist-wcs' in hdr):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) &
set(new_wcs_header) -
keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmpdir):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent('''
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
''')
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n')
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = tmpdir.join('afile.fits').strpath
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit='adu')
assert ccd.wcs is None
def test_header():
ccd_data = create_ccd_data()
a = {'Observer': 'Hubble'}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
result = ccd_data.multiply(1.0)
nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_wcs_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.wcs = WCS(naxis=2)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs)
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename('data/sip-wcs.fits')
def check_wcs_ctypes(header):
expected_wcs_ctypes = {
'CTYPE1': 'RA---TAN-SIP',
'CTYPE2': 'DEC--TAN-SIP'
}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN'
assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN'
@pytest.mark.parametrize('operation',
['multiply', 'divide', 'add', 'subtract'])
def test_mask_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.mask = (ccd_data.data > 0)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(tmpdir):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_default(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
@pytest.mark.parametrize(
'uncertainty_type',
[StdDevUncertainty, VarianceUncertainty, InverseVariance])
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
tmpdir, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, key_uncertainty_type='Blah')
ccd_after = CCDData.read(filename, key_uncertainty_type='Blah')
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_write_read_multiextensionfits_not(tmpdir):
# Test that writing mask and uncertainty can be disabled
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(tmpdir):
# Test writing mask, uncertainty in another extension than default
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = tmpdir.join('afile.fits').strpath
ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun')
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(ccd_data.uncertainty.array,
ccd_after.uncertainty.array)
def test_read_old_style_multiextensionfits(tmpdir):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu')
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header['UTYPE']
file_name = tmpdir.join('old_ccddata_mef.fits').strpath
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
assert ccd_data.wcs is wcs
def test_recognized_fits_formats_for_read_write(tmpdir):
# These are the extensions that are supposed to be supported.
ccd_data = create_ccd_data()
supported_extensions = ['fit', 'fits', 'fts']
for ext in supported_extensions:
path = tmpdir.join(f"test.{ext}")
ccd_data.write(path.strpath)
from_disk = CCDData.read(path.strpath)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit='')
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmpdir):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
fits.ImageHDU(img)])
filename = tmpdir.join('table_image.fits').strpath
hdul.writeto(filename)
ccd = CCDData.read(filename, unit='adu')
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
# https://github.com/astropy/astropy/issues/9664
def test_sliced_ccdata_to_hdu():
wcs = WCS(naxis=2)
wcs.wcs.crpix = 10, 10
ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel')
trimmed = ccd[2:-2, 2:-2]
hdul = trimmed.to_hdu()
assert isinstance(hdul, fits.HDUList)
assert hdul[0].header['CRPIX1'] == 8
assert hdul[0].header['CRPIX2'] == 8
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.