input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
return outputs
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild):
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = action['action_name']
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
rule_name = self.WriteNewNinjaRule(name, action['action'], description)
inputs = [self.GypPathToNinja(i) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
outputs = [self.GypPathToNinja(o) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild):
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = rule['rule_name']
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
rule_name = self.WriteNewNinjaRule(name, args, description)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
basename = os.path.basename(source)
root, ext = os.path.splitext(basename)
# Gather the list of outputs, expanding $vars if possible.
outputs = []
for output in rule['outputs']:
outputs.append(output.replace(
generator_default_variables['RULE_INPUT_ROOT'], root))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', root))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', source_expanded))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', basename))
else:
assert var == None, repr(var)
inputs = map(self.GypPathToNinja, rule.get('inputs', []))
outputs = map(self.GypPathToNinja, outputs)
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild):
outputs = []
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename))
outputs += self.ninja.build(dst, 'copy', src,
order_only=prebuild)
return outputs
def WriteSources(self, config, sources, predepends):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.WriteVariableList('defines',
['-D' + MaybeQuoteShellArgument(ninja_syntax.escape(d))
for d in config.get('defines', [])])
self.WriteVariableList('includes',
['-I' + self.GypPathToNinja(i)
for i in config.get('include_dirs', [])])
self.WriteVariableList('cflags', config.get('cflags'))
self.WriteVariableList('cflags_c', config.get('cflags_c'))
self.WriteVariableList('cflags_cc', config.get('cflags_cc'))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext in ('c', 's', 'S'):
command = 'cc'
else:
# TODO: should we assert here on unexpected extensions?
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + '.o')
self.ninja.build(output, command, input,
order_only=predepends)
outputs.append(output)
self.ninja.newline()
return outputs
def WriteTarget(self, spec, config, final_deps):
if spec['type'] == 'none':
# This target doesn't have any explicit final output, but is instead
# used for its effects before the final output (e.g. copies steps).
# Reuse the existing output if it's easy.
if len(final_deps) == 1:
return final_deps[0]
# Otherwise, fall through to writing out a stamp file.
output = self.ComputeOutput(spec)
output_uses_linker = spec['type'] in ('executable', 'loadable_module',
'shared_library')
implicit_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
if output_uses_linker:
extra_deps = set()
for dep in spec['dependencies']:
input, linkable = self.target_outputs.get(dep, (None, False))
if not input:
continue
if linkable:
extra_deps.add(input)
else:
# TODO: Chrome-specific HACK. Chrome runs this lastchange rule on
# every build, but we don't want to rebuild when it runs.
if 'lastchange' not in input:
implicit_deps.add(input)
final_deps.extend(list(extra_deps))
command_map = {
'executable': 'link',
'static_library': 'alink',
'loadable_module': 'solink_module',
'shared_library': 'solink',
'none': 'stamp',
}
command = command_map[spec['type']]
if output_uses_linker:
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
config.get('ldflags', []))))
self.WriteVariableList('libs',
gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', []))))
extra_bindings = []
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
self.ninja.build(output, command, final_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return output
def ComputeOutputFileName(self, spec):
"""Compute the filename of the final output for the current target."""
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': 'lib',
'shared_library': 'lib',
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(spec['type'], ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'static_library': 'a',
'loadable_module': 'so',
'shared_library': 'so',
}
extension = spec.get('product_extension',
DEFAULT_EXTENSION.get(spec['type'], ''))
if extension:
extension = '.' + extension
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if spec['type'] in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif spec['type'] == 'none':
return '%s.stamp' % target
else:
raise 'Unhandled output type', spec['type']
def ComputeOutput(self, spec):
"""Compute the path for the final output of the spec."""
filename = self.ComputeOutputFileName(spec)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Executables and loadable modules go into the output root,
# libraries go into shared library dir, and everything else
# goes into the normal place.
if spec['type'] in ('executable', 'loadable_module'):
return filename
elif spec['type'] == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = 'lib/%s' % self.toolset
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule."""
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = rule_name.replace(' ', '_')
args = args[:]
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
cd = 'cd %s; ' % self.build_to_base
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
command = cd + gyp.common.EncodePOSIXShellList(args)
self.ninja.rule(rule_name, command, description)
self.ninja.newline()
return rule_name
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
cc_target = os.environ.get('CC.target', os.environ.get('CC', 'cc'))
default_variables['LINKER_SUPPORTS_ICF'] = \
gyp.system_test.TestLinkerSupportsICF(cc_command=cc_target)
def OpenOutput(path):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, 'w')
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
if options.generator_output:
raise NotImplementedError, "--generator_output not implemented for ninja"
config_name = generator_flags.get('config', None)
if config_name is None:
# Guess which config we want to use: pick the first one from the
# first target.
config_name = target_dicts[target_list[0]]['default_configuration']
# builddir: relative path from source root to our output files.
# e.g. "out/Debug"
builddir = os.path.join(generator_flags.get('output_dir', 'out'), config_name)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(options.toplevel_dir, builddir, 'build.ninja')),
width=120)
# TODO: compute cc/cxx/ld/etc. by command-line arguments and system tests.
master_ninja.variable('cc', os.environ.get('CC', 'gcc'))
master_ninja.variable('cxx', os.environ.get('CXX', 'g++'))
master_ninja.variable('ld', '$cxx | |
NPY neurons in the Arc play a critical role in the control of energy homeostasis.",
{"entities": [(15, 18, NT), (34, 37, LABEL), (66, 95, FUNC)]}),
("In addition, NAc dysfunction is associated with many mental disorders,",
{"entities": [(13, 16, LABEL), (48, 69, FUNC)]}),
("The median number of whole-brain labeled neurons to the NAc subnuclei was 9,518.",
{"entities": [(56, 59, LABEL)]}),
("The 75 brain regions could be grouped into nine major brain areas, including the isocortex, olfactory areas (OLF), hippocampal formation (HPF), cortical subplate (CTXsp), striatum (STR), pallidum (PAL), and thalamus (TH).",
{"entities": [(81, 90, LABEL), (92, 107, LABEL), (109, 112, LABEL), (115, 136, LABEL), (138, 141, LABEL), (144, 161, LABEL), (163, 168, LABEL), (171, 179, LABEL), (181, 184, LABEL), (187, 195, LABEL), (203, 211, LABEL), (213, 215, LABEL)]}),
("Summary of Distribution of Input Neurons to Subregions of NAcC and NAcS",
{"entities": [(58, 62, LABEL), (67, 71, LABEL)]}),
("The critical cardiovascular features include hypotension and paradoxical sinus bradycardia, heart block, or sinus arrest after sympathetic excitation.",
{"entities": []}),
("The neuropeptide oxytocin is mainly produced in the hypothalamic paraventricular (PVN) and supraoptic nuclei (SON), and it is released from magnocellular cells by axonal and somatodendritic release.",
{"entities": [(17, 25, NT), (52, 64, LABEL), (65, 80, LABEL), (82, 85, LABEL), (91, 108, LABEL), (110, 113, LABEL)]}),
("The VTA projects primarily to NAcc/PFC and receives input from the lateral hypothalamus that detects the presence of food reward (Schultz, 1998).",
{"entities": [(4, 7, LABEL), (30, 34, LABEL), (35, 38, LABEL), (67, 87, LABEL), (130, 137, PER), (139, 143, "DATE"), (93, 128, FUNC)]}),
("Notwithstanding, the approach system must interact and even suppress the aversive system to attain reward.",
{"entities": []}),
("In brief, in instrumental conditioning, the response associated with the availability of a reward excites the pathway from the VTA to NAcc, which in turn triggers an approach response by acting on the motor system.",
{"entities": [(127, 130, LABEL), (134, 138, LABEL), (166, 183, FUNC)]}),
("BJ and MM contributed equally to the manuscript.",
{"entities": []}),
("Individuals affected by prenatal alcohol exposure (PAE) can present with a complex profile of cognitive, behavioral, physical, and mental health problems.",
{"entities": []}),
("All SES measures were based on the participants\u2019 current caregiver participating in the study.",
{"entities": []}),
("A secondary aim was to examine the potential cognitive relevance of observed differences in SES-brain associations between the Control and PAE groups.",
{"entities": []}),
("All analyses were conducted with the statistical program R (v3.0.2) (R Core Team, 2013).",
{"entities": []}),
###100
("Prescription opioid medication dispensings were also extracted for the matched samples.",
{"entities": []}),
("Homelessness, housing and health are intrinsically linked [10]. ",
{"entities": []}),
("Descriptive statistics are reported as percentages for dichotomous variables and means with standard deviations for all other variables.",
{"entities": []}),
("Imaging revealed enhanced soft tissue thickening around the right paraspinal site at T11–12 (Fig. 2A).",
{"entities": []}),
("In 2016, 40% of the opioid-related overdose deaths were associated with the use of prescription opioids.",
{"entities": [(3, 7, "DATE")]}),
("For quantification, pERK levels were normalized against corresponding tubulin levels determined in the same experiment, which served as a loading control.",
{"entities": [(20, 24, PHYS), (70, 77, PHYS)]}),
("Dopamine (DA) released by dopaminergic VTA and SNc inputs to striatum signals the difference between received and expected rewards the reward prediction error (RPE)",
{"entities": [(0, 8, NT), (10, 12, NT), (26, 38, NT), (39, 42, LABEL), (47, 50, LABEL), (61, 69, LABEL), (82, 131, FUNC), (135, 158, FUNC), (160, 163, FUNC)]}),
("The BG is suggested to remain involved in action selection after the action-reward association is learned [5,25].",
{"entities": [(4, 6, LABEL), (42, 58, FUNC)]}),
###TESTED TO HERE
("On the other hand, clinical interventions for Parkinson disease (PD) do not cause impairments in learned movements [26–28].",
{"entities": []}),
("Specifically, GPi lesions and deep brain stimulation (DBS) in the STN, which both thought to disrupt the main output of the BG, are used to improve motor functions.",
{"entities": [(14, 17, LABEL), (66, 69, LABEL), (148, 163, FUNC)]}),
#110
("Other models are constructed based on functional ideas and emulate how biophysical changes caused by a disorder violate the functions [35–39].",
{"entities": []}),
("We adopt rate model formalism extensively used to reproduce activity and function of numerous brain structures [46].",
{"entities": []}),
("The BG receives inputs from the prefrontal cortex (PFC) signaling the conditioning stimulus (CS) as well as reward inputs via substantia nigra pars compacta",
{"entities": [(4, 6, LABEL), (32, 49, LABEL), (51, 54, LABEL), (126, 156, LABEL), (56, 91, FUNC), (108, 121, FUNC)]}),
("The rest of the nuclei are the globus pallidus external (GPe), subthalamic nucleus (STN), and the output structures: substantia nigra pars reticulata and globus pallidus internal (SNr/GPi).",
{"entities": [(31, 55, LABEL), (57, 60, LABEL), (63, 82, LABEL), (84, 87, LABEL), (117, 149, LABEL), (154, 178, LABEL), (180, 183, LABEL), (184, 187, LABEL)]}),
("The activity of every neuron is governed by the following differential equation [42]:",
{"entities": []}),
("The pathology of Huntington’s Disease (HD) is less well-understood; however, it is clear that there is a progression of the disease from chorea (involuntary, jerky movement) at its onset to akinesia (loss of the power of voluntary movement) at its conclusion [76].",
{"entities": []}),
("For example, rates of cancer incidence and mortality [16], heart disease [17], obesity [18], diabetes [19], smoking [20], and drug use [21] are all higher in Appalachia compared to other US regions, and those living in Appalachia are at greater risk of low health literacy [22] and perceive their overall health status as poorer than those living in other areas [23].",
{"entities": []}),
("Elevated rates of opioid use in the region likely intersect with family planning practices in a variety of ways.",
{"entities": [(18, 24, NT)]}),
("3.1.2. Appropriate Outreach Materials",
{"entities": []}),
("Spinal epidural abscess (SEA) is a rare but life-threatening infection; recent epidemiologic studies report an increased incidence of SEA over the past two decades [1, 2].",
{"entities": []}),
#120
("Review of laboratory tests revealed a hemoglobin of 11.1 g/dL (nonpregnant reference range: 12-14 g/dL), hematocrit of 33.1% (nonpregnant reference range: 37%-48%),",
{"entities": []}),
("The use of cannabis during pregnancy could produce neurochemical alterations both in humans and in research animals.",
{"entities": []}),
("Chronic non-malignant pain (CNMP) affects between an estimated 11% and 20% of the population in Europe and USA and can impact heavily on people’s quality of life.",
{"entities": []}),
("Another review, which identified 11 patient survey studies, found that between 40 and 94% of prescribed pills went unused, with one outlier reporting roughly 10% unused (57).",
{"entities": []}),
("KORs are Gi/o protein-coupled receptors highly expressed in the midbrain dopamine system (Mansour et al., 1996)",
{"entities": [(0, 4, PHYS), (64, 88, LABEL), (90, 97, PER), (106, 110, "DATE")]}),
("Moreover, Margolis et al. (2006) found that KORs inhibit VTA dopamine neurons that project to the mPFC and basolateral amygdala, but not those that project to the NAc.",
{"entities": [(10, 18, PER), (27, 31, "DATE"), (44, 48, PHYS), (49, 56, FUNC), (57, 60, LABEL), (61, 69, NT)]}),
("Furthermore, the activation of KOR decreases the amplitude of excitatory (Margolis et al., 2005) and inhibitory (Ford et al., 2007) postsynaptic currents into midbrain dopamine neurons.",
{"entities": [(31, 34, PHYS), (35, 72, FUNC), (74, 82, PER), (91, 95, "DATE"), (113, 117, PER), (126, 130, "DATE"), (159, 184, LABEL)]}),
("How do KORs modulate dopamine signaling to elaborate motivated behaviors and when does it result in a sensitized compulsive behavior?",
{"entities": [(7, 11, PHYS), (21, 29, NT)]}),
("AE and JF wrote the first draft of the manuscript with input from MA.",
{"entities": []}),
| |
<reponame>Zacharias030/ProGraML
# Copyright 2019 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for analyzing log databases."""
import pathlib
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
import numpy as np
import pandas as pd
import sklearn.metrics
import sqlalchemy as sql
from matplotlib import pyplot as plt
from matplotlib import ticker
from deeplearning.ml4pl import run_id as run_id_lib
from deeplearning.ml4pl.graphs.labelled import graph_database_reader
from deeplearning.ml4pl.graphs.labelled import graph_tuple
from deeplearning.ml4pl.graphs.labelled import graph_tuple_database
from deeplearning.ml4pl.models import checkpoints
from deeplearning.ml4pl.models import epoch
from deeplearning.ml4pl.models import export_logs
from deeplearning.ml4pl.models import log_database
from labm8.py import app
from labm8.py import decorators
from labm8.py import progress
from labm8.py import sqlutil
# export_logs module is required to pull in dependencies required to construct
# summary tables.
del export_logs
FLAGS = app.FLAGS
app.DEFINE_output_path(
"log_analysis_outdir",
None,
"When //deeplearning/ml4pl/models:log_analysis is executed as a script, this "
"determines the directory to write files to.",
)
class LogAnalyzer(object):
"""Analyze the logs in a database."""
def __init__(
self,
log_db: log_database.Database,
run_ids: List[run_id_lib.RunId] = None,
ctx: progress.ProgressContext = progress.NullContext,
):
self.log_db = log_db
self.run_ids = run_ids
self.ctx = ctx
# Check that the requested run exists in the database.
if not self.log_db.run_ids:
raise ValueError("Log database is empty")
for run_id in self.run_ids:
if str(run_id) not in self.log_db.run_ids:
raise ValueError(f"Run ID not found: {run_id}")
@decorators.memoized_property
def tables(self) -> Dict[str, pd.DataFrame]:
"""Get the {parameters, epochs, runs} tables for the run."""
return {
name: df for name, df in self.log_db.GetTables(run_ids=self.run_ids)
}
def PlotEpochMetrics(
self, metric: str, epoch_types: List[str] = None, ax=None,
) -> plt.axis:
"""Plot a metric over epochs.
Args:
metric: The metric of interest.
epoch_types: The epoch types to plot. A list of {train,val,test} values.
ax: An axis to plot on.
"""
# Set default argument values.
epoch_typenames = epoch_types or ["train", "val", "test"]
ax = ax or plt.gca()
# Read the epochs table.
df = self.tables["epochs"]
# Check that the requested metric exists.
if f"train_{metric}" not in df:
available_metrics = sorted(
c[len("train_") :] for c in df.columns.values if c.startswith("train_")
)
raise ValueError(
f"No such metric: {metric}. Available metrics: {available_metrics}"
)
# Create the metric plot for each run.
for run_id in set(df["run_id"].values):
for epoch_type in epoch_typenames:
metric_name = f"{epoch_type}_{metric}"
run_df = df[(df["run_id"] == run_id) & (df[metric_name].notnull())]
if len(run_df):
x = run_df["epoch_num"].values
y = run_df[metric_name].values
ax.plot(x, y, label=f"{run_id}:{epoch_type}")
# Configure the Y axis.
ax.set_ylabel(metric.capitalize())
# Configure the X axis.
ax.set_xlabel("Epoch")
# Force integer axis for epoch number.
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# Force the legend.
plt.legend()
return ax
class RunLogAnalyzer(LogAnalyzer):
"""Analyse the logs of a single run."""
def __init__(
self,
log_db: log_database.Database,
run_id: run_id_lib.RunId,
graph_db: Optional[graph_tuple_database.Database] = None,
ctx: progress.ProgressContext = progress.NullContext,
):
super(RunLogAnalyzer, self).__init__(
log_db=log_db, run_ids=[run_id], ctx=ctx
)
self._graph_db = graph_db
@decorators.memoized_property
def graph_db(self) -> graph_tuple_database.Database:
"""Return the graph database for a run. This is reconstructed from the
--graph_db flag value recorded for the run."""
if self._graph_db:
return self._graph_db
with self.log_db.Session() as session:
graph_param: log_database.Parameter = session.query(
log_database.Parameter
).filter(
log_database.Parameter.type_num
== log_database.ParameterType.FLAG.value,
log_database.Parameter.run_id == str(self.run_ids[0]),
log_database.Parameter.name == "graph_db",
).scalar()
if not graph_param:
raise ValueError("Unable to determine graph_db flag")
graph_db_url = str(graph_param.value)
return graph_tuple_database.Database(graph_db_url, must_exist=True)
@decorators.memoized_property
def best_results(self) -> Dict[epoch.Type, epoch.BestResults]:
"""Get the best results dict.
Returns:
A mapping from <epoch_type, epoch.BestResults> for the best accuracy on
each of the epoch types.
"""
return self.log_db.GetBestResults(run_id=self.run_ids[0])
def GetBestEpochNum(self, metric="best accuracy") -> int:
"""Select the train/val/test epoch logs using the given metric.
Supported metrics are:
best accuracy
best precision
best recall
best f1
90% val acc
95% val acc
99% val acc
99.9% val acc
"""
epochs: pd.DataFrame = self.tables["epochs"]
if metric in {"best accuracy", "best precision", "best recall", "best f1"}:
column_name = "val_" + metric[len("best ") :]
best_epoch = epochs.iloc[epochs[column_name].idxmax()]
epoch_num = best_epoch.epoch_num
elif metric in {
"90% val acc",
"95% val acc",
"99% val acc",
"99.9% val acc",
}:
accuracy = float(metric.split("%")[0]) / 100
matching_rows = epochs[epochs["val_accuracy"] >= accuracy]
if not len(matching_rows):
raise ValueError(f"No {self.run_ids[0]} epochs reached {metric}")
# Select the first epoch when there are more than one matching rows.
epoch_num = epochs.iloc[matching_rows.index[0]].epoch
else:
raise ValueError(f"Unknown metric `{metric}`")
return epoch_num
def GetGraphsForBatch(
self, batch: log_database.Batch
) -> Iterable[graph_tuple_database.GraphTuple]:
"""Reconstruct the graphs for a batch.
Returns:
A iterable sequence of the unique graphs from a batch. Note that order may
not be the same as the order they appeared in the batch, and that
duplicate graphs in the batch will only be returned once.
"""
if not batch.details:
raise OSError("Cannot re-create batch without detailed logs")
filters = [lambda: graph_tuple_database.GraphTuple.id.in_(batch.graph_ids)]
return graph_database_reader.BufferedGraphReader(
self.graph_db, filters=filters
)
def GetInputOutputGraphs(
self, batch: log_database.Batch
) -> Iterable[Tuple[graph_tuple.GraphTuple, graph_tuple.GraphTuple]]:
"""Reconstruct the input/output graphs for a batch.
This returns the raw GraphTuples for a batch, with node_y or graph_y
attributes on the output graphs set to the raw model predictions.
"""
# Read the graphs from the batch.
unique_graphs: List[graph_tuple_database.GraphTuple] = list(
self.GetGraphsForBatch(batch)
)
id_to_graphs = {graph.id: graph for graph in unique_graphs}
# Re-construct the full set of graphs from the batch.
input_graphs: List[graph_tuple_database.GraphTuple] = [
id_to_graphs[id] for id in batch.graph_ids
]
# Reconstruct the output graphs.
predictions = batch.predictions
if input_graphs[0].node_y_dimensionality:
# Node-level predictions:
node_count = 0
for graph in input_graphs:
input_graph = graph.tuple
output_graph = input_graph.SetFeaturesAndLabels(
node_y=predictions[node_count : node_count + input_graph.node_count],
copy=False,
)
node_count += input_graph.node_count
yield input_graph, output_graph
elif input_graphs[0].graph_y_dimensionality:
# Graph-level predictions:
for graph_count, graph in enumerate(input_graphs):
input_graph = graph.tuple
output_graph = input_graph.SetFeaturesAndLabels(
graph_y=predictions[graph_count], copy=False
)
yield input_graph, output_graph
else:
raise NotImplementedError("neither node_y or graph_y set")
@staticmethod
def NodeConfusionMatrix(
input_graph: graph_tuple.GraphTuple, output_graph: graph_tuple.GraphTuple
) -> pd.DataFrame:
"""Build a confusion matrix for the given input/output graph pair."""
targets = input_graph.node_y
predictions = output_graph.node_y
confusion_matrix = BuildConfusionMatrix(
targets=targets, predictions=predictions
)
return pd.DataFrame(
confusion_matrix,
columns=[f"pred_{i}" for i in range(len(confusion_matrix))],
index=[f"true_{i}" for i in range(len(confusion_matrix))],
)
def SortGraphsByAccuracy(
input_output_graphs: Iterable[
Tuple[graph_tuple.GraphTuple, graph_tuple.GraphTuple]
]
) -> List[Tuple[graph_tuple.GraphTuple, graph_tuple.GraphTuple]]:
"""Sort the list of input/output graphs by their accuracy."""
input_output_graphs = list(input_output_graphs)
return sorted(input_output_graphs, key=lambda x: ComputeGraphAccuracy(*x))
def ComputeGraphAccuracy(
input_graph: graph_tuple.GraphTuple, output_graph: graph_tuple.GraphTuple,
):
"""Return the classification accuracy of the given input/output graph.
Supports node-level or graph-level labels.
Returns:
Accuracy in the range 0 <= x <= 1.
"""
if input_graph.has_node_y:
true_y = np.argmax(input_graph.node_y, axis=1)
pred_y = np.argmax(output_graph.node_y, axis=1)
elif input_graph.has_graph_y:
true_y = np.argmax(input_graph.graph_y)
pred_y = np.argmax(output_graph.graph_y)
else:
raise NotImplementedError("unreachable")
return sklearn.metrics.accuracy_score(true_y, pred_y)
def BuildConfusionMatrix(targets: np.array, predictions: np.array) -> np.array:
"""Build a confusion matrix.
Args:
targets: A list of 1-hot vectors with shape
(num_instances, num_classes), dtype int32.
predictions: A list of 1-hot vectors with shape
(num_instances, num_classes), dtype float32.
Returns:
A pickled confusion matrix, which is a matrix of shape
[num_classes, num_classes] where the rows indicate true target class,
the columns indicate predicted target class, and the element values are
the number of instances of this type in the batch.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Predictions shape {predictions.shape} must match targets "
f"shape {targets.shape}"
)
num_classes = targets.shape[1]
# Convert 1-hot vectors to dense lists of integers.
targets = np.argmax(targets, axis=1)
predictions = np.argmax(predictions, axis=1)
confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.int32)
for target, prediction in zip(targets, predictions):
confusion_matrix[target][prediction] += 1
return confusion_matrix
class WriteGraphsToFile(progress.Progress):
"""Write graphs in a graph database to pickled files.
This is for debugging.
"""
def __init__(self, outdir: pathlib.Path):
self.outdir = outdir
self.outdir.mkdir(parents=True, exist_ok=True)
checkpoint_ref = checkpoints.CheckpointReference.FromString(FLAGS.run_id)
self.analysis = RunLogAnalyzer(
log_db=FLAGS.log_db(), run_id=checkpoint_ref.run_id,
)
self.epoch_num = 0
with self.analysis.log_db.Session() as session:
detailed_batch_graph_count = (
session.query(sql.func.sum(log_database.Batch.graph_count))
.filter(
log_database.Batch.run_id == str(checkpoint_ref.run_id),
log_database.Batch.epoch_num == self.epoch_num,
)
.join(log_database.BatchDetails)
.scalar()
)
super(WriteGraphsToFile, self).__init__(
"analyzer", i=0, n=detailed_batch_graph_count
)
self.analysis.ctx = self.ctx
def Run(self):
"""Read and write the graphs."""
# GetInputOutputGraphs
with self.analysis.log_db.Session() as session:
query = (
session.query(log_database.Batch)
.options(sql.orm.joinedload(log_database.Batch.details))
.filter(
log_database.Batch.run_id == str(self.analysis.run_id),
log_database.Batch.epoch_num == self.epoch_num,
)
.join(log_database.BatchDetails)
)
for i, batches in enumerate(
sqlutil.OffsetLimitBatchedQuery(query, batch_size=512)
):
| |
<reponame>guiferviz/recipipe<gh_stars>1-10
from unittest import TestCase
from unittest.mock import MagicMock
from tests.fixtures import TransformerMock
from tests.fixtures import RecipipeTransformerMock
from tests.fixtures import create_df_3dtypes
import recipipe as r
class RecipipeTest(TestCase):
def test_no_error_empty_init(self):
"""Test the pipeline constructor.
No expecting any error with empty pipelines.
"""
r.recipipe()
def test_error_empty_fit(self):
"""An exception should be throw when trying to fit empty pipelines.
The type of the exception is not important.
"""
p = r.recipipe()
with self.assertRaises(Exception):
p.fit(None) # We check later that fit None should work.
def test_with_transformers(self):
"""Constructor with transformers. """
t = TransformerMock()
p = r.recipipe([t, t])
def test_len_empty(self):
"""Len of pipeline should give 0 when no transformers provided. """
p = r.recipipe()
self.assertEqual(len(p), 0)
def test_len_with_transformers(self):
"""Len of pipeline should give the number of transformers provided. """
t = TransformerMock()
p = r.recipipe([t, t])
self.assertEqual(len(p), 2)
def test_transformer_name_generic(self):
"""The steps should have a default name indicating the step index. """
t0 = TransformerMock()
t1 = TransformerMock()
p = r.recipipe([t0, t1])
self.assertEqual(p.steps[0][0], "step00")
self.assertEqual(p.steps[1][0], "step01")
self.assertEqual(p.steps[0][1], t0)
self.assertEqual(p.steps[1][1], t1)
def test_transformer_name_attribute(self):
"""The steps should have transformer attr "name" as key. """
t0 = TransformerMock()
t0.name = "t0_name"
t1 = TransformerMock()
t1.name = "t1_name"
p = r.recipipe([t0, t1])
print(p.steps)
self.assertEqual(p.steps[0][0], "t0_name")
self.assertEqual(p.steps[1][0], "t1_name")
self.assertEqual(p.steps[0][1], t0)
self.assertEqual(p.steps[1][1], t1)
def test_add_after_creation(self):
"""Check the add method. """
t = TransformerMock()
p = r.recipipe()
p.add(t)
self.assertEqual(p.steps[0][0], "step00")
self.assertEqual(p.steps[0][1], t)
def test_add_after_creation_operator(self):
"""Check the + operator. """
t = TransformerMock()
p = r.recipipe()
p + t
self.assertEqual(p.steps[0][0], "step00")
self.assertEqual(p.steps[0][1], t)
def test_fit_and_transform(self):
"""Test that all the transformer fit/transform methods are called. """
t = TransformerMock()
p = r.recipipe([t, t, t])
p.fit(None)
p.transform(None)
# Let's call n = number of steps of the pipeline.
n = 3
# The fit method is called n times.
self.assertEqual(t.n_fit, n)
# Called n * 2 - 1. Pipelines use the output of the previous step
# as input to the next one, so the transform method should be called
# while fitting to know which input the next step is going to receive.
# The last transformer does not need to be used while fitting because
# his output is not required by any other transformer, so - 1.
# Of course, the p.transform call adds n to the number of
# transformations performed.
self.assertEqual(t.n_transform, n * 2 - 1)
class RecipipeTransformerTest(TestCase):
def test_inheritance(self):
"""Inherit and test constructor. """
class TestTransformer(r.RecipipeTransformer):
pass
TestTransformer()
def test_inheritance_var_args_sklearn_params_no_init(self):
class T(r.RecipipeTransformer):
pass
print(T(keep_original=False))
def test_inheritance_var_args_sklearn_params(self):
"""Params are used as SKLearn estimator params (they are inherit). """
class T1(r.RecipipeTransformer):
def __init__(self, *args, param1=1, **kwargs):
self.param1 = param1
super().__init__(*args, **kwargs)
class T2(T1):
def __init__(self, *args, param1=7, param2=2, **kwargs):
self.param2 = param2
super().__init__(*args, param1=param1, **kwargs)
params = T2(1, 2, param1=3, param2=4, name="<NAME>").get_params()
params_expected = dict(param1=3, param2=4, name="<NAME>",
col_format='{}', cols_init=[1, 2], cols_not_found_error=False,
exclude=[], dtype=None, keep_original=False)
self.assertDictEqual(params, params_expected)
def test_init_cols_mix(self):
t = RecipipeTransformerMock(cols_init=[
"c1", ["c2"], set(["c3"]), ("c4", "c5")])
self.assertEqual(len(t.cols_init), 5)
def test_init_args_mix(self):
"""Strs, lists, sets and tuples are allowed as var args. """
t = RecipipeTransformerMock("c1", ["c2"], set(["c3"]), ("c4", "c5"))
self.assertEqual(len(t.cols_init), 5)
def test_init_cols_args(self):
"""Cols is appended to args. """
t = RecipipeTransformerMock("c1", cols_init=["c2"])
self.assertListEqual(t.cols_init, ["c1", "c2"])
def test_fit_cols_and_dtype(self):
t = RecipipeTransformerMock("c*", dtype=int)
t.fit(create_df_3dtypes())
self.assertListEqual(t.cols, ["c1"])
self.assertEqual(t.n_fit, 1)
def test_fit_cols_and_dtype_exclude(self):
t = RecipipeTransformerMock("c*", dtype=dict(exclude=int))
t.fit(create_df_3dtypes())
self.assertListEqual(t.cols, ["c2"])
self.assertEqual(t.n_fit, 1)
def test_fit_cols_all(self):
"""When not cols are specified we need to fit all of them. """
t = RecipipeTransformerMock()
t.fit(create_df_3dtypes())
self.assertListEqual(t.cols, ["c1", "c2", "t1"])
self.assertEqual(t.n_fit, 1)
def test_fit_cols_keep_original_collision(self):
"""Keep original only works when no name collisions exist. """
t = RecipipeTransformerMock(keep_original=True)
with self.assertRaises(ValueError):
t.fit(create_df_3dtypes())
def test_fit_exclude(self):
t = RecipipeTransformerMock(exclude=["c1", ["c*"]])
t.fit(create_df_3dtypes())
self.assertListEqual(t.cols, ["t1"])
self.assertEqual(t.n_fit, 1)
# TODO: this test is not working, do we really need to check the returned
# column mapping?
def _test_fit_check_column_mapping(self):
t = r.RecipipeTransformer()
t.get_column_mapping = MagicMock(return_value={
"c1": ["c1", "c2"], "c2": []})
with self.assertRaises(ValueError):
t.fit(create_df_3dtypes())
def test_get_column_map_not_fitted(self):
"""Error in column map if no columns are fitted. """
t = RecipipeTransformerMock()
with self.assertRaises(ValueError):
t.get_column_mapping()
def test_get_column_map(self):
"""Default column mapping, 1:1 mapping. """
t = RecipipeTransformerMock()
t.cols = ["c2", "c1"]
cols_map = t.get_column_mapping()
self.assertDictEqual(cols_map, {"c2": "c2", "c1": "c1"})
def test_get_column_map_format(self):
"""Column mapping should use `cols_format`. """
t = RecipipeTransformerMock(col_format="{}_new")
t.cols = ["c2", "c1"]
cols_map = t.get_column_mapping()
self.assertDictEqual(cols_map, {"c2": "c2_new", "c1": "c1_new"})
def test_transform_all_columns(self):
"""Transform a df and return the same columns. """
t = RecipipeTransformerMock()
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
self.assertListEqual(list(df.columns), ["c1", "c2", "t1"])
def test_transform_some_columns(self):
class C(r.RecipipeTransformer):
def _transform(self, df):
return df[self.cols]
t = C("c1", "c2")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
self.assertListEqual(list(df.columns), ["c1", "c2", "t1"])
def test_transform_keep_original(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _transform(self, df):
df = df[self.cols]
df.columns = [self.col_format.format(i) for i in df.columns]
return df
t = C("c1", "c2", keep_original=True, col_format="{}_out")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
out_cols = ["c1", "c1_out", "c2", "c2_out", "t1"]
self.assertListEqual(list(df.columns), out_cols)
def test_transform_keep_original_false_and_format(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _transform(self, df):
df = df[self.cols]
df.columns = [self.col_format.format(i) for i in df.columns]
return df
t = C("c1", "c2", keep_original=False, col_format="{}_out")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
out_cols = ["c1_out", "c2_out", "t1"]
self.assertListEqual(list(df.columns), out_cols)
def test_transform_cols_map_tuples(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_column_mapping(self):
return {"c1": ("c1_1", "c1_2"), ("c1", "t1"): "c1t1"}
def _transform(self, df):
df = df[["c1", "c1", "t1"]]
df.columns = ["c1_1", "c1_2", "c1t1"]
return df
t = C("c1", "t1")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
out_cols = ["c1_1", "c1_2", "c1t1", "c2"]
self.assertListEqual(list(df.columns), out_cols)
def test_transform_cols_map_str_and_tuples(self):
"""Test 1:1 and n:1 in the same map. """
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_column_mapping(self):
return {"c1": "c1", ("c1", "t1"): "c1t1"}
def _transform(self, df):
df = df[["c1", "t1"]]
df.columns = ["c1", "c1t1"]
return df
t = C("c1", "t1")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
out_cols = ["c1", "c1t1", "c2"]
self.assertListEqual(list(df.columns), out_cols)
def test_cols_taken_from_col_map(self):
"""If no cols are given, the col_map should be used to obtain them. """
class C(r.RecipipeTransformer):
def get_column_mapping(self):
return {"c1": ["hi", "bye"]}
t = C()
t.fit(create_df_3dtypes())
self.assertListEqual(t.cols, ["c1"])
def test_inverse_transform_cols_map_tuples(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_column_mapping(self):
return {"c1": ("c1_1", "c1_2"), ("c1", "t1"): "c1t1"}
def _transform(self, df):
df = df[["c1", "c1", "t1"]]
df.columns = ["c1_1", "c1_2", "c1t1"]
return df
def _inverse_transform(self, df):
df = df[["c1_1", "c1t1"]]
df.columns = ["c1", "t1"]
return df
t = C("c1", "t1")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
df = t.inverse_transform(df)
out_cols = ["c1", "t1", "c2"]
self.assertListEqual(list(df.columns), out_cols)
def test_inverse_transform_cols_map_str_and_tuples(self):
"""Test 1:1 and n:1 in the same map. """
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_column_mapping(self):
return {"c1": "c1", ("c1", "t1"): "c1t1"}
def _transform(self, df):
df = df[["c1", "t1"]]
df.columns = ["c1", "c1t1"]
return df
def _inverse_transform(self, df):
df = df[["c1", "c1t1"]]
df.columns = ["c1", "t1"]
return df
t = C("c1", "t1")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
df = t.inverse_transform(df)
out_cols = ["c1", "t1", "c2"]
self.assertListEqual(list(df.columns), out_cols)
def test_inverse_transform_keep_original_false_and_format(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _transform(self, df):
df = df[self.cols]
df.columns = [self.col_format.format(i) for i in df.columns]
return df
def _inverse_transform(self, df):
df = df[["c1_out", "c2_out"]]
df.columns = ["c1", "c2"]
return df
t = C("c1", "c2", keep_original=False, col_format="{}_out")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
df = t.inverse_transform(df)
out_cols = ["c1", "c2", "t1"]
self.assertListEqual(list(df.columns), out_cols)
def test_inverse_transform_keep_original_true_and_format(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _transform(self, df):
df = df[self.cols]
df.columns = ["c1_out", "c2_out"]
return df
def _inverse_transform(self, df):
df = df[["c1_out", "c2_out"]]
df.columns = ["c1", "c2"]
return df
t = C("c*", keep_original=True, col_format="{}_out")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
df = t.inverse_transform(df)
out_cols = ["c1", "c2", "t1"]
self.assertListEqual(list(df.columns), out_cols)
def test_inverse_transform_keep_original_without_original(self):
class C(r.RecipipeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _transform(self, df):
df = df[self.cols]
df.columns = ["c1_out", "c2_out"]
return df
def _inverse_transform(self, df):
df = df[["c1_out", "c2_out"]]
df.columns = ["c1", "c2"]
return df
t = C("c*", keep_original=True, col_format="{}_out")
df = create_df_3dtypes()
t.fit(df)
df = t.transform(df)
df = df.drop(["c1", "c2"], axis=1)
df = t.inverse_transform(df)
out_cols = ["c1", "c2", "t1"]
self.assertListEqual(list(df.columns), out_cols)
def test_transform_no_fit(self):
"""Raise exception if the transformer method | |
<filename>lrs/tests/test_AgentProfile.py
import hashlib
import urllib
import base64
import json
import ast
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from adl_lrs.views import register
class AgentProfileTests(TestCase):
testagent = '{"mbox":"mailto:<EMAIL>"}'
otheragent = '{"mbox":"mailto:<EMAIL>"}'
content_type = "application/json"
testprofileId1 = "http://profile.test.id/test/1"
testprofileId2 = "http://profile.test.id/test/2"
testprofileId3 = "http://profile.test.id/test/3"
otherprofileId1 = "http://profile.test.id/other/1"
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
super(AgentProfileTests, cls).setUpClass()
def setUp(self):
self.username = "tester"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
self.auth = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username, self.password))
form = {'username': self.username, 'email': self.email,
'password': <PASSWORD>, 'password2': self.password}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams1 = {
"profileId": self.testprofileId1, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams1))
self.testprofile1 = {"test": "put profile 1", "obj": {"agent": "test"}}
self.put1 = self.client.put(path, self.testprofile1, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams2 = {
"profileId": self.testprofileId2, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams2))
self.testprofile2 = {"test": "put profile 2", "obj": {"agent": "test"}}
self.put2 = self.client.put(path, self.testprofile2, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams3 = {
"profileId": self.testprofileId3, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams3))
self.testprofile3 = {"test": "put profile 3", "obj": {"agent": "test"}}
self.put3 = self.client.put(path, self.testprofile3, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.testparams4 = {
"profileId": self.otherprofileId1, "agent": self.otheragent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams4))
self.otherprofile1 = {
"test": "put profile 1", "obj": {"agent": "other"}}
self.put4 = self.client.put(path, self.otherprofile1, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def tearDown(self):
self.client.delete(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse('lrs:agent_profile'), self.testparams2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse('lrs:agent_profile'), self.testparams3,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse('lrs:agent_profile'), self.testparams4,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_agent_not_found(self):
a = '{"mbox":"mailto:<EMAIL>"}'
p = 'http://agent.not.found'
param = {"profileId": p, "agent": a}
r = self.client.get(reverse('lrs:agent_profile'), param,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 404)
def test_put(self):
self.assertEqual(self.put1.status_code, 204)
self.assertEqual(self.put1.content, '')
self.assertEqual(self.put2.status_code, 204)
self.assertEqual(self.put2.content, '')
self.assertEqual(self.put3.status_code, 204)
self.assertEqual(self.put3.content, '')
self.assertEqual(self.put4.status_code, 204)
self.assertEqual(self.put4.content, '')
def test_put_etag_missing_on_change(self):
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams1))
profile = {"test": "error - trying to put new profile w/o etag header",
"obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 409)
self.assertIn(
'If-Match and If-None-Match headers were missing', response.content)
r = self.client.get(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['agent'],
self.testprofile1['obj']['agent'])
def test_put_etag_right_on_change(self):
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams1))
profile = {"test": "good - trying to put new profile w/ etag header",
"obj": {"agent": "test"}}
thehash = '"%s"' % hashlib.sha1('%s' % self.testprofile1).hexdigest()
response = self.client.put(path, profile, content_type=self.content_type, If_Match=thehash,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
r = self.client.get(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], profile['test'])
self.assertEqual(robj['obj']['agent'], profile['obj']['agent'])
def test_put_etag_wrong_on_change(self):
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams1))
profile = {"test": "error - trying to put new profile w/ wrong etag value",
"obj": {"agent": "test"}}
thehash = '"%s"' % hashlib.sha1('%s' % 'wrong hash').hexdigest()
response = self.client.put(path, profile, content_type=self.content_type, If_Match=thehash,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 412)
self.assertIn('No resources matched', response.content)
r = self.client.get(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['agent'],
self.testprofile1['obj']['agent'])
def test_put_etag_if_none_match_good(self):
params = {"profileId": 'http://etag.nomatch.good',
"agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
profile = {"test": "good - trying to put new profile w/ if none match etag header",
"obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type, If_None_Match='*',
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], profile['test'])
self.assertEqual(robj['obj']['agent'], profile['obj']['agent'])
r = self.client.delete(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_put_etag_if_none_match_bad(self):
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(self.testparams1))
profile = {"test": "error - trying to put new profile w/ if none match etag but one exists",
"obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type, If_None_Match='*',
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 412)
self.assertEqual(response.content, 'Resource detected')
r = self.client.get(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['agent'],
self.testprofile1['obj']['agent'])
def test_get_invalid_agent_structure(self):
r = self.client.get(reverse('lrs:agent_profile'), {
"profileId": self.testprofileId1, "agent": "wrong"}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 400)
self.assertEqual(
r.content, "agent param for agent profile is not valid")
def test_get_invalid_agent(self):
r = self.client.get(reverse('lrs:agent_profile'), {"profileId": self.testprofileId1, "agent": {
"mbox": "foo"}}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 400)
self.assertEqual(
r.content, "agent param for agent profile is not valid")
def test_get(self):
r = self.client.get(reverse('lrs:agent_profile'), self.testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], self.testprofile1['test'])
self.assertEqual(robj['obj']['agent'],
self.testprofile1['obj']['agent'])
self.assertEqual(r['etag'], '"%s"' % hashlib.sha1(
'%s' % self.testprofile1).hexdigest())
r2 = self.client.get(reverse('lrs:agent_profile'), self.testparams2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r2.status_code, 200)
robj2 = ast.literal_eval(r2.content)
self.assertEqual(robj2['test'], self.testprofile2['test'])
self.assertEqual(robj2['obj']['agent'],
self.testprofile2['obj']['agent'])
self.assertEqual(r2['etag'], '"%s"' % hashlib.sha1(
'%s' % self.testprofile2).hexdigest())
r3 = self.client.get(reverse('lrs:agent_profile'), self.testparams3,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r3.status_code, 200)
robj3 = ast.literal_eval(r3.content)
self.assertEqual(robj3['test'], self.testprofile3['test'])
self.assertEqual(robj3['obj']['agent'],
self.testprofile3['obj']['agent'])
self.assertEqual(r3['etag'], '"%s"' % hashlib.sha1(
'%s' % self.testprofile3).hexdigest())
r4 = self.client.get(reverse('lrs:agent_profile'), self.testparams4,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r4.status_code, 200)
robj4 = ast.literal_eval(r4.content)
self.assertEqual(robj4['test'], self.otherprofile1['test'])
self.assertEqual(robj4['obj']['agent'],
self.otherprofile1['obj']['agent'])
self.assertEqual(r4['etag'], '"%s"' % hashlib.sha1(
'%s' % self.otherprofile1).hexdigest())
def test_get_no_params(self):
r = self.client.get(reverse('lrs:agent_profile'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 400)
self.assertIn('agent parameter missing', r.content)
def test_get_no_agent(self):
params = {"profileId": self.testprofileId1}
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 400)
self.assertIn('agent parameter missing', r.content)
def test_get_no_profileId(self):
params = {"agent": self.testagent}
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
def test_delete(self):
prof_id = "http://deleteme"
params = {"profileId": prof_id, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
profile = {"test": "delete profile", "obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test'], profile['test'])
self.assertEqual(robj['obj']['agent'], profile['obj']['agent'])
r = self.client.delete(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 404)
def test_get_agent_since(self):
prof_id = "http://oldprofile/time"
updated = "2012-06-12:T12:00:00Z"
params = {"profileId": prof_id, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
profile = {"test1": "agent profile since time: %s" %
updated, "obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type, updated=updated,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test1'], profile['test1'])
self.assertEqual(robj['obj']['agent'], profile['obj']['agent'])
since = "2012-07-01T12:00:00Z"
params2 = {"agent": self.testagent, "since": since}
r2 = self.client.get(reverse('lrs:agent_profile'), params2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertNotIn(prof_id, r2.content)
self.client.delete(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_agent_since_tz(self):
prof_id = "http://oldprofile/time"
updated = "2012-06-12:T12:00:00Z"
params = {"profileId": prof_id, "agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
profile = {"test2": "agent profile since time: %s" %
updated, "obj": {"agent": "test"}}
response = self.client.put(path, profile, content_type=self.content_type, updated=updated,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
r = self.client.get(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
robj = ast.literal_eval(r.content)
self.assertEqual(robj['test2'], profile['test2'])
self.assertEqual(robj['obj']['agent'], profile['obj']['agent'])
prof_id2 = "http://newprofile/timezone"
updated2 = "2012-07-01T08:30:00-04:00"
params2 = {"profileId": prof_id2, "agent": self.testagent}
path2 = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params2))
profile2 = {"test3": "agent profile since time: %s" %
updated2, "obj": {"agent": "test"}}
response = self.client.put(path2, profile2, content_type=self.content_type, updated=updated2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
r2 = self.client.get(reverse('lrs:agent_profile'), params2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r2.status_code, 200)
robj2 = ast.literal_eval(r2.content)
self.assertEqual(robj2['test3'], profile2['test3'])
self.assertEqual(robj2['obj']['agent'], profile2['obj']['agent'])
since = "2012-07-01T12:00:00Z"
par = {"agent": self.testagent, "since": since}
r = self.client.get(reverse('lrs:agent_profile'), par,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertNotIn(prof_id, r.content)
self.assertIn(prof_id2, r.content)
self.client.delete(reverse('lrs:agent_profile'), params,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.client.delete(reverse('lrs:agent_profile'), params2,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_put_delete(self):
prof_id = "http://deleteme.too"
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode({"method": "PUT"}))
content = {"test": "delete profile", "obj": {
"actor": "test", "testcase": "ie cors post for put and delete"}}
thedata = "profileId=%s&agent=%s&content=%s&Authorization=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (
prof_id, self.testagent, content, self.auth)
response = self.client.post(
path, thedata, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
r = self.client.get(reverse('lrs:agent_profile'), {
"profileId": prof_id, "agent": self.testagent}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
import ast
c = ast.literal_eval(r.content)
self.assertEqual(c['test'], content['test'])
thedata = "profileId=%s&agent=%s&Authorization=%s&X-Experience-API-Version=1.0" % (
prof_id, self.testagent, self.auth)
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode({"method": "DELETE"}))
r = self.client.post(path, thedata, content_type="application/x-www-form-urlencoded",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:agent_profile'), {
"profileId": prof_id, "agent": self.testagent}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 404)
def test_group_as_agent(self):
ot = "Group"
name = "the group APT"
mbox = "mailto:<EMAIL>"
members = [{"name": "agentA", "mbox": "mailto:<EMAIL>"},
{"name": "agentB", "mbox": "mailto:<EMAIL>"}]
testagent = json.dumps(
{"objectType": ot, "name": name, "mbox": mbox, "member": members})
testprofileId = "http://profile.test.id/group.as.agent/"
testparams1 = {"profileId": testprofileId, "agent": testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(testparams1))
testprofile = {"test": "put profile - group as agent",
"obj": {"agent": "group"}}
put1 = self.client.put(path, testprofile, content_type=self.content_type,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put1.status_code, 204)
getr = self.client.get(reverse('lrs:agent_profile'), testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(getr.status_code, 200)
robj = ast.literal_eval(getr.content)
self.assertEqual(robj['test'], testprofile['test'])
self.assertEqual(robj['obj']['agent'], testprofile['obj']['agent'])
self.client.delete(reverse('lrs:agent_profile'), testparams1,
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_new_profile(self):
params = {"profileId": "prof:test_post_new_profile",
"agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
prof = {"test": "post new profile", "obj": {
"agent": "mailto:<EMAIL>"}}
post = self.client.post(path, prof, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 204)
get = self.client.get(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(get.status_code, 200)
self.assertEqual(ast.literal_eval(get.content), prof)
self.assertEqual(get.get('etag'), '"%s"' %
hashlib.sha1(get.content).hexdigest())
self.client.delete(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
def test_post_blank_profile(self):
params = {"profileId": "prof:test_post_new_profile",
"agent": self.testagent}
path = '%s?%s' % (reverse('lrs:agent_profile'),
urllib.urlencode(params))
prof = ""
| |
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("pir", ("pir", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir)), ("pbs", ("pbs", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.pir = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir()
self.pir.parent = self
self._children_name_map["pir"] = "pir"
self._children_yang_names.add("pir")
self.pbs = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs()
self.pbs.parent = self
self._children_name_map["pbs"] = "pbs"
self._children_yang_names.add("pbs")
self._segment_path = lambda: "shape"
class Pir(Entity):
"""
PIR in kbps
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir, self).__init__()
self.yang_name = "pir"
self.yang_parent_name = "shape"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "pir"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir, ['value', 'unit'], name, value)
class Pbs(Entity):
"""
PBS in bytes
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs, self).__init__()
self.yang_name = "pbs"
self.yang_parent_name = "shape"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "pbs"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs, ['value', 'unit'], name, value)
class Wfq(Entity):
"""
QoS WFQ parameters
.. attribute:: committed_weight
Committed Weight
**type**\: :py:class:`CommittedWeight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight>`
.. attribute:: programmed_wfq
QoS Programmed WFQ parameters
**type**\: :py:class:`ProgrammedWfq <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq>`
.. attribute:: excess_weight
Excess Weight
**type**\: int
**range:** 0..65535
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq, self).__init__()
self.yang_name = "wfq"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("committed-weight", ("committed_weight", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight)), ("programmed-wfq", ("programmed_wfq", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('excess_weight', YLeaf(YType.uint16, 'excess-weight')),
])
self.excess_weight = None
self.committed_weight = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight()
self.committed_weight.parent = self
self._children_name_map["committed_weight"] = "committed-weight"
self._children_yang_names.add("committed-weight")
self.programmed_wfq = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq()
self.programmed_wfq.parent = self
self._children_name_map["programmed_wfq"] = "programmed-wfq"
self._children_yang_names.add("programmed-wfq")
self._segment_path = lambda: "wfq"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq, ['excess_weight'], name, value)
class CommittedWeight(Entity):
"""
Committed Weight
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight, self).__init__()
self.yang_name = "committed-weight"
self.yang_parent_name = "wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "committed-weight"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight, ['value', 'unit'], name, value)
class ProgrammedWfq(Entity):
"""
QoS Programmed WFQ parameters
.. attribute:: bandwidth
Bandwidth
**type**\: :py:class:`Bandwidth <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth>`
.. attribute:: sum_of_bandwidth
Sum of Bandwidth
**type**\: :py:class:`SumOfBandwidth <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth>`
.. attribute:: excess_ratio
Excess Ratio
**type**\: int
**range:** 0..65535
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq, self).__init__()
self.yang_name = "programmed-wfq"
self.yang_parent_name = "wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("bandwidth", ("bandwidth", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth)), ("sum-of-bandwidth", ("sum_of_bandwidth", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('excess_ratio', YLeaf(YType.uint16, 'excess-ratio')),
])
self.excess_ratio = None
self.bandwidth = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth()
self.bandwidth.parent = self
self._children_name_map["bandwidth"] = "bandwidth"
self._children_yang_names.add("bandwidth")
self.sum_of_bandwidth = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth()
self.sum_of_bandwidth.parent = self
self._children_name_map["sum_of_bandwidth"] = "sum-of-bandwidth"
self._children_yang_names.add("sum-of-bandwidth")
self._segment_path = lambda: "programmed-wfq"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq, ['excess_ratio'], name, value)
class Bandwidth(Entity):
"""
Bandwidth
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth, self).__init__()
self.yang_name = "bandwidth"
self.yang_parent_name = "programmed-wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "bandwidth"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth, ['value', 'unit'], name, value)
class SumOfBandwidth(Entity):
"""
Sum of Bandwidth
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth, self).__init__()
self.yang_name = "sum-of-bandwidth"
self.yang_parent_name = "programmed-wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "sum-of-bandwidth"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth, ['value', 'unit'], name, value)
class Police(Entity):
"""
QoS Policer parameters
.. attribute:: cir
CIR
**type**\: :py:class:`Cir <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cir>`
.. attribute:: cbs
CBS
**type**\: :py:class:`Cbs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cbs>`
.. attribute:: policer_id
policer ID
**type**\: int
**range:** 0..4294967295
.. attribute:: policer_type
Policer type
**type**\: :py:class:`TbAlgorithm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.TbAlgorithm>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police, self).__init__()
self.yang_name = "police"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("cir", ("cir", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cir)), ("cbs", ("cbs", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cbs))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('policer_id', YLeaf(YType.uint32, 'policer-id')),
('policer_type', YLeaf(YType.enumeration, 'policer-type')),
])
self.policer_id = None
self.policer_type = None
self.cir = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cir()
self.cir.parent = self
self._children_name_map["cir"] = "cir"
self._children_yang_names.add("cir")
self.cbs = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cbs()
self.cbs.parent = self
self._children_name_map["cbs"] = "cbs"
self._children_yang_names.add("cbs")
self._segment_path = lambda: "police"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police, ['policer_id', 'policer_type'], name, value)
class Cir(Entity):
"""
CIR
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cir, self).__init__()
self.yang_name = "cir"
self.yang_parent_name = "police"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "cir"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cir, ['value', 'unit'], name, value)
class Cbs(Entity):
"""
CBS
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cbs, self).__init__()
self.yang_name = "cbs"
self.yang_parent_name = "police"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "cbs"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Police.Cbs, ['value', 'unit'], name, value)
class Marking(Entity):
"""
QoS Mark parameters
.. attribute:: mark_only
Mark Only
**type**\: :py:class:`MarkOnly <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly>`
.. attribute:: police_conform
Police conform mark
**type**\: :py:class:`PoliceConform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform>`
.. attribute:: police_exceed
Police exceed mark
**type**\: :py:class:`PoliceExceed <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking, self).__init__()
self.yang_name = "marking"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("mark-only", ("mark_only", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly)), ("police-conform", ("police_conform", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform)), ("police-exceed", ("police_exceed", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.mark_only = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly()
self.mark_only.parent = self
self._children_name_map["mark_only"] = "mark-only"
self._children_yang_names.add("mark-only")
self.police_conform = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform()
self.police_conform.parent = self
self._children_name_map["police_conform"] = "police-conform"
self._children_yang_names.add("police-conform")
self.police_exceed = PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed()
self.police_exceed.parent = self
self._children_name_map["police_exceed"] = "police-exceed"
self._children_yang_names.add("police-exceed")
self._segment_path = lambda: "marking"
class MarkOnly(Entity):
"""
Mark Only
.. attribute:: action_type
Action type
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.Action>`
.. attribute:: mark_detail
Mark value
**type**\: list of :py:class:`MarkDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly.MarkDetail>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly, self).__init__()
self.yang_name = "mark-only"
self.yang_parent_name = "marking"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("mark-detail", ("mark_detail", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly.MarkDetail))])
self._leafs = OrderedDict([
('action_type', YLeaf(YType.enumeration, 'action-type')),
])
self.action_type = None
self.mark_detail = YList(self)
self._segment_path = lambda: "mark-only"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly, ['action_type'], name, value)
class MarkDetail(Entity):
"""
Mark value
.. attribute:: mark_value
Mark value
**type**\: int
**range:** 0..255
.. attribute:: action_opcode
Action opcode
**type**\: | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Train seq-to-seq model on random supervised training tasks."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import json
import os
import random
import sys
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer import decode
from latent_programmer import models as base_models
from latent_programmer.decomposition_transformer_attention import decomposition_models as models
from latent_programmer.decomposition_transformer_attention import input_pipeline
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.')
flags.DEFINE_float('lr', 1e-3, 'Learning rate.')
flags.DEFINE_float('weight_decay', 1e-1,
'Decay factor for AdamW-style weight decay.')
flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.')
flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.')
flags.DEFINE_integer('num_heads', 4, 'Number of layers.')
flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.')
flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?')
flags.DEFINE_string('dataset_filepattern', None,
'Filepattern for TFRecord dataset.')
flags.DEFINE_integer('per_device_batch_size', 16,
'Number of program tasks in a batch.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_program_length', 100,
'Maximum number of tokens in program.')
flags.DEFINE_integer('max_characters', 120,
'Maximum number of characters in input/output strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.')
flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.')
flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.')
flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.')
flags.DEFINE_integer('predict_freq', 50000,
'Number of steps between prediction (beam search).')
flags.DEFINE_integer('checkpoint_freq', 50000,
'Number of steps between checkpoint saves.')
flags.DEFINE_integer('finetune_start_step', -1,
'Step the initial checkpoint should start at for '
'finetuning, or -1 if not finetuning.')
flags.DEFINE_bool('restore_checkpoints', True,
'Whether to restore from existing model checkpoints.')
flags.DEFINE_string('attention_mask_type', 'bos_full_attention',
'The kind of attention mask to use. Options are: baseline, '
'bos_to_bos, bos_full_attention')
flags.DEFINE_bool('use_relative_attention', True,
'Whether to use relative positonal embeddings.')
flags.DEFINE_bool('bos_special_attention', False,
'Whether to use special relative attention computation for '
'BOS tokens.')
_internal = False
if not _internal:
flags.DEFINE_string('xm_parameters', None,
'String specifying hyperparamter search.')
def create_learning_rate_scheduler(
base_learning_rate=0.5,
factors='constant * linear_warmup * rsqrt_normalized_decay',
warmup_steps=16000,
decay_factor=0.5,
steps_per_decay=50000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
base_learning_rate: float, the starting constant for the lr schedule.
factors: a string with factors separated by '*' that defines the schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
A function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
acc = acc * weights
normalizing_factor = weights.sum()
return acc.sum(), normalizing_factor
def compute_metrics(logits, targets, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
acc, _ = compute_weighted_accuracy(logits, targets, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
# Train / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
inputs,
outputs,
programs,
learning_rate_fn,
config,
dropout_rng):
"""Train on batch of program tasks."""
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
def loss_fn(params):
"""Loss function used for training."""
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
programs,
rngs={'dropout': dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
# Get metrics.
metrics = compute_metrics(logits, programs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(params, inputs, outputs, programs, eos_token, config):
"""Collect metrics for evaluation during training."""
weights = jnp.where(
jnp.logical_and(programs > 0,
jnp.logical_and(programs != config.base_config.bos_token,
programs != eos_token)),
1, 0).astype(jnp.float32)
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params}, inputs, outputs, programs)
return compute_metrics(logits, programs, weights)
def initialize_cache(inputs, outputs, programs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (programs.shape[0], max_decode_len)
dtype = config.base_config.dtype
initial_variables = models.DecomposeAttentionTransformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, dtype),
jnp.ones(outputs.shape, dtype),
jnp.ones(target_shape, dtype))
return initial_variables['cache']
def predict_step(params,
inputs,
outputs,
cache,
beam_size,
eos_token,
max_decode_len,
config,
slow_decode=True):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
flat_encoded = decode.flat_batch_beam_expand(
models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
method=models.DecomposeAttentionTransformer.encode),
beam_size)
encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)
flat_encoded_padding_mask = decode.flat_batch_beam_expand(
encoded_padding_mask, beam_size)
if slow_decode:
def tokens_ids_to_logits(flat_ids):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits = models.DecomposeAttentionTransformer(config=config).apply(
{'params': params},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
method=models.DecomposeAttentionTransformer.decode)
return flat_logits
else:
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.DecomposeAttentionTransformer(
config=config).apply(
{'params': params, 'cache': flat_cache},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
mutable=['cache'],
method=models.DecomposeAttentionTransformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
bos_token=config.base_config.bos_token,
eos_token=eos_token,
max_decode_len=max_decode_len,
slow_decode=slow_decode)
# Beam search returns [n_batch, n_beam, n_length] with beam dimension
# sorted in increasing order of log-probability.
return beam_seqs
# Util functions for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
tile_dims = [1] * len(x.shape)
tile_dims[0] = batch_pad
return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)
def tohost(x):
"""Collect batches from all devices | |
<gh_stars>0
"""
Implementation of an async json-rpc client.
"""
from __future__ import annotations
import asyncio
from datetime import datetime
import json
import logging
import os
from pathlib import Path
import re
from typing import Any, Final
from aiohttp import ClientConnectorError, ClientError, ClientSession, TCPConnector
from hahomematic import config
from hahomematic.const import (
ATTR_ERROR,
ATTR_NAME,
ATTR_PASSWORD,
ATTR_RESULT,
ATTR_SESSION_ID,
ATTR_USERNAME,
DEFAULT_ENCODING,
PATH_JSON_RPC,
REGA_SCRIPT_FETCH_ALL_DEVICE_DATA,
REGA_SCRIPT_GET_SERIAL,
REGA_SCRIPT_PATH,
REGA_SCRIPT_SET_SYSTEM_VARIABLE,
REGA_SCRIPT_SYSTEM_VARIABLES_EXT_MARKER,
SYSVAR_HASEXTMARKER,
SYSVAR_HM_TYPE_FLOAT,
SYSVAR_HM_TYPE_INTEGER,
SYSVAR_ID,
SYSVAR_MAX_VALUE,
SYSVAR_MIN_VALUE,
SYSVAR_NAME,
SYSVAR_TYPE,
SYSVAR_TYPE_NUMBER,
SYSVAR_UNIT,
SYSVAR_VALUE,
SYSVAR_VALUE_LIST,
)
from hahomematic.exceptions import BaseHomematicException, HaHomematicException
from hahomematic.helpers import SystemVariableData, get_tls_context, parse_ccu_sys_var
_LOGGER = logging.getLogger(__name__)
class JsonRpcAioHttpClient:
"""Connection to CCU JSON-RPC Server."""
def __init__(
self,
loop: asyncio.AbstractEventLoop,
username: str,
password: str,
device_url: str,
client_session: ClientSession | None = None,
tls: bool = False,
verify_tls: bool = False,
):
"""Session setup."""
self._client_session: Final = (
client_session
if client_session
else ClientSession(connector=TCPConnector(limit=3), loop=loop)
)
self._session_id: str | None = None
self._last_session_id_refresh: datetime | None = None
self._username: Final = username
self._password: Final = password
self._tls: Final = tls
self._tls_context: Final = get_tls_context(verify_tls)
self._url: Final = f"{device_url}{PATH_JSON_RPC}"
@property
def is_activated(self) -> bool:
"""If session exists, then it is activated."""
return self._session_id is not None
async def _login_or_renew(self) -> bool:
"""Renew JSON-RPC session or perform login."""
if not self.is_activated:
self._session_id = await self._do_login()
self._last_session_id_refresh = datetime.now()
return self._session_id is not None
if self._session_id:
self._session_id = await self._do_renew_login(self._session_id)
return self._session_id is not None
async def _do_renew_login(self, session_id: str) -> str | None:
"""Renew JSON-RPC session or perform login."""
try:
if self._updated_within_seconds():
return session_id
method = "Session.renew"
response = await self._do_post(
session_id=session_id,
method=method,
extra_params={ATTR_SESSION_ID: session_id},
)
if response[ATTR_ERROR] is None and response[ATTR_RESULT]:
if response[ATTR_RESULT] is True:
self._last_session_id_refresh = datetime.now()
_LOGGER.debug(
"_do_renew_login: Method: %s [%s]", method, session_id
)
return session_id
return await self._do_login()
except ClientError as cer:
_LOGGER.error(
"_do_renew_login: ClientError [%s] while renewing JSON-RPC session",
cer.args,
)
return None
def _updated_within_seconds(self, age_seconds: int = 90) -> bool:
"""Check if session id has been updated within 90 seconds."""
if self._last_session_id_refresh is None:
return False
delta = datetime.now() - self._last_session_id_refresh
if delta.seconds < age_seconds:
return True
return False
async def _do_login(self) -> str | None:
"""Login to CCU and return session."""
session_id: str | None = None
try:
if not self._username:
_LOGGER.warning("_do_login: No username set.")
return None
if not self._password:
_LOGGER.warning("_do_login: No password set.")
return None
params = {
ATTR_USERNAME: self._username,
ATTR_PASSWORD: self._password,
}
method = "Session.login"
response = await self._do_post(
session_id=False,
method=method,
extra_params=params,
use_default_params=False,
)
if response[ATTR_ERROR] is None and response[ATTR_RESULT]:
session_id = response[ATTR_RESULT]
_LOGGER.debug("_do_login: Method: %s [%s]", method, session_id)
if not session_id:
_LOGGER.warning(
"_do_login: Unable to open session: %s", response[ATTR_ERROR]
)
return None
return session_id
except BaseHomematicException as hhe:
_LOGGER.error(
"_do_login: %s [%s] while logging in via JSON-RPC", hhe.name, hhe.args
)
return None
async def _post(
self,
method: str,
extra_params: dict[str, str] | None = None,
use_default_params: bool = True,
keep_session: bool = True,
) -> dict[str, Any] | Any:
"""Reusable JSON-RPC POST function."""
if keep_session:
await self._login_or_renew()
session_id = self._session_id
else:
session_id = await self._do_login()
if not session_id:
_LOGGER.warning("_post: Error while logging in via JSON-RPC.")
return {"error": "Unable to open session.", "result": {}}
_LOGGER.debug("_post: Method: %s, [%s]", method, extra_params)
response = await self._do_post(
session_id=session_id,
method=method,
extra_params=extra_params,
use_default_params=use_default_params,
)
if not keep_session:
await self._do_logout(session_id=session_id)
if (error := response["error"]) is not None:
raise HaHomematicException(f"post: error: {error}")
return response
async def _post_script(
self,
script_name: str,
extra_params: dict[str, str] | None = None,
keep_session: bool = True,
) -> dict[str, Any] | Any:
"""Reusable JSON-RPC POST_SCRIPT function."""
if keep_session:
await self._login_or_renew()
session_id = self._session_id
else:
session_id = await self._do_login()
if not session_id:
_LOGGER.warning("_post_script: Error while logging in via JSON-RPC.")
return {"error": "Unable to open session.", "result": {}}
script_file = os.path.join(
Path(__file__).resolve().parent, REGA_SCRIPT_PATH, script_name
)
script = Path(script_file).read_text(encoding=DEFAULT_ENCODING)
if extra_params:
for variable, value in extra_params.items():
script = script.replace(f"##{variable}##", value)
method = "ReGa.runScript"
response = await self._do_post(
session_id=session_id,
method=method,
extra_params={"script": script},
)
if not response[ATTR_ERROR]:
response[ATTR_RESULT] = json.loads(response[ATTR_RESULT])
_LOGGER.debug("_post_script: Method: %s [%s]", method, script_name)
if not keep_session:
await self._do_logout(session_id=session_id)
if (error := response["error"]) is not None:
raise HaHomematicException(f"_post_script: error: {error}")
return response
async def _do_post(
self,
session_id: bool | str,
method: str,
extra_params: dict[str, str] | None = None,
use_default_params: bool = True,
) -> dict[str, Any] | Any:
"""Reusable JSON-RPC POST function."""
if not self._username:
no_username = "_do_post: No username set."
_LOGGER.warning(no_username)
return {"error": str(no_username), "result": {}}
if not self._password:
no_password = <PASSWORD>: No password set."
_LOGGER.warning(no_password)
return {"error": str(no_password), "result": {}}
params = _get_params(session_id, extra_params, use_default_params)
try:
payload = json.dumps(
{"method": method, "params": params, "jsonrpc": "1.1", "id": 0}
).encode("utf-8")
headers = {
"Content-Type": "application/json",
"Content-Length": str(len(payload)),
}
if self._tls:
response = await self._client_session.post(
self._url,
data=payload,
headers=headers,
timeout=config.TIMEOUT,
ssl=self._tls_context,
)
else:
response = await self._client_session.post(
self._url, data=payload, headers=headers, timeout=config.TIMEOUT
)
if response.status == 200:
try:
return await response.json(encoding="utf-8")
except ValueError as ver:
_LOGGER.error(
"_do_post: ValueError [%s] Failed to parse JSON. Trying workaround",
ver.args,
)
# Workaround for bug in CCU
return json.loads(
(await response.json(encoding="utf-8")).replace("\\", "")
)
else:
_LOGGER.warning("_do_post: Status: %i", response.status)
return {"error": response.status, "result": {}}
except ClientConnectorError as err:
_LOGGER.error("_do_post: ClientConnectorError")
return {"error": str(err), "result": {}}
except ClientError as cce:
_LOGGER.error("_do_post: ClientError")
return {"error": str(cce), "result": {}}
except TypeError as ter:
_LOGGER.error("_do_post: TypeError")
return {"error": str(ter), "result": {}}
except OSError as oer:
_LOGGER.error("_do_post: OSError")
return {"error": str(oer), "result": {}}
except Exception as ex:
raise HaHomematicException from ex
async def logout(self) -> None:
"""Logout of CCU."""
await self._do_logout(self._session_id)
async def _do_logout(self, session_id: str | None) -> None:
"""Logout of CCU."""
if not session_id:
_LOGGER.debug("_do_logout: Not logged in. Not logging out.")
return
try:
method = "Session.logout"
params = {"_session_id_": session_id}
response = await self._do_post(
session_id=session_id,
method=method,
extra_params=params,
)
_LOGGER.debug("_do_logout: Method: %s [%s]", method, session_id)
if response[ATTR_ERROR]:
_LOGGER.warning("_do_logout: Logout error: %s", response[ATTR_RESULT])
except ClientError as cer:
_LOGGER.error(
"logout: ClientError [%s] while logging in via JSON-RPC", cer.args
)
return
def _has_credentials(self) -> bool:
"""Return if credentials are available."""
return self._username is not None and self._password is not None
async def set_system_variable(self, name: str, value: Any) -> None:
"""Set a system variable on CCU / Homegear."""
_LOGGER.debug("set_system_variable: Setting System variable via JSON-RPC")
try:
params = {
SYSVAR_NAME: name,
SYSVAR_VALUE: value,
}
if isinstance(value, bool):
params[SYSVAR_VALUE] = int(value)
response = await self._post("SysVar.setBool", params)
elif isinstance(value, str):
if re.findall("<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});", value):
_LOGGER.warning(
"set_system_variable: value (%s) contains html tags. This is not allowed.",
value,
)
return
response = await self._post_script(
script_name=REGA_SCRIPT_SET_SYSTEM_VARIABLE, extra_params=params
)
else:
response = await self._post("SysVar.setFloat", params)
if json_result := response[ATTR_RESULT]:
res = json_result
_LOGGER.debug(
"set_system_variable: Result while setting variable: %s",
str(res),
)
except BaseHomematicException as hhe:
_LOGGER.warning("set_system_variable: %s [%s]", hhe.name, hhe.args)
async def delete_system_variable(self, name: str) -> None:
"""Delete a system variable from CCU / Homegear."""
_LOGGER.debug("delete_system_variable: Getting System variable via JSON-RPC")
try:
params = {SYSVAR_NAME: name}
response = await self._post(
"SysVar.deleteSysVarByName",
params,
)
if json_result := response[ATTR_RESULT]:
deleted = json_result
_LOGGER.debug("delete_system_variable: Deleted: %s", str(deleted))
except BaseHomematicException as hhe:
_LOGGER.warning("delete_system_variable: %s [%s]", hhe.name, hhe.args)
async def get_system_variable(self, name: str) -> Any:
"""Get single system variable from CCU / Homegear."""
var = None
_LOGGER.debug("get_system_variable: Getting System variable via JSON-RPC")
try:
params = {SYSVAR_NAME: name}
response = await self._post(
"SysVar.getValueByName",
params,
)
if json_result := response[ATTR_RESULT]:
# This does not yet support strings
try:
var = float(json_result)
except Exception:
var = json_result == "true"
except BaseHomematicException as hhe:
_LOGGER.warning("get_system_variable: %s [%s]", hhe.name, hhe.args)
return var
async def get_all_system_variables(self) -> list[SystemVariableData]:
"""Get all system variables from CCU / Homegear."""
variables: list[SystemVariableData] = []
_LOGGER.debug(
"get_all_system_variables: Getting all system variables via JSON-RPC"
)
try:
response = await self._post(
"SysVar.getAll",
)
if json_result := response[ATTR_RESULT]:
ext_markers = await self._get_system_variables_ext_markers()
for var in json_result:
var_id = var[SYSVAR_ID]
name = var[SYSVAR_NAME]
org_data_type = var[SYSVAR_TYPE]
raw_value = var[SYSVAR_VALUE]
if org_data_type == SYSVAR_TYPE_NUMBER:
data_type = (
SYSVAR_HM_TYPE_FLOAT
if "." in raw_value
else SYSVAR_HM_TYPE_INTEGER
)
else:
data_type = org_data_type
extended_sysvar = ext_markers.get(var_id, False)
unit = var[SYSVAR_UNIT]
value_list: list[str] | None = None
if val_list := var.get(SYSVAR_VALUE_LIST):
value_list = val_list.split(";")
try:
value = parse_ccu_sys_var(
data_type=data_type, raw_value=raw_value
)
max_value = None
if raw_max_value := var.get(SYSVAR_MAX_VALUE):
max_value = parse_ccu_sys_var(
data_type=data_type, raw_value=raw_max_value
)
min_value = | |
'''OpenGL extension EXT.direct_state_access
This module customises the behaviour of the
OpenGL.raw.GL.EXT.direct_state_access to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces a set of new "direct state access"
commands (meaning no selector is involved) to access (update and
query) OpenGL state that previously depended on the OpenGL state
selectors for access. These new commands supplement the existing
selector-based OpenGL commands to access the same state.
The intent of this extension is to make it more efficient for
libraries to avoid disturbing selector and latched state. The
extension also allows more efficient command usage by eliminating
the need for selector update commands.
Two derivative advantages of this extension are 1) display lists
can be executed using these commands that avoid disturbing selectors
that subsequent commands may depend on, and 2) drivers implemented
with a dual-thread partitioning with OpenGL command buffering from
an application thread and then OpenGL command dispatching in a
concurrent driver thread can avoid thread synchronization created by
selector saving, setting, command execution, and selector restoration.
This extension does not itself add any new OpenGL state.
We call a state variable in OpenGL an "OpenGL state selector" or
simply a "selector" if OpenGL commands depend on the state variable
to determine what state to query or update. The matrix mode and
active texture are both selectors. Object bindings for buffers,
programs, textures, and framebuffer objects are also selectors.
We call OpenGL state "latched" if the state is set by one OpenGL
command but then that state is saved by a subsequent command or the
state determines how client memory or buffer object memory is accessed
by a subsequent command. The array and element array buffer bindings
are latched by vertex array specification commands to determine
which buffer a given vertex array uses. Vertex array state and pixel
pack/unpack state decides how client memory or buffer object memory is
accessed by subsequent vertex pulling or image specification commands.
The existence of selectors and latched state in the OpenGL API
reduces the number of parameters to various sets of OpenGL commands
but complicates the access to state for layered libraries which seek
to access state without disturbing other state, namely the state of
state selectors and latched state. In many cases, selectors and
latched state were introduced by extensions as OpenGL evolved to
minimize the disruption to the OpenGL API when new functionality,
particularly the pluralization of existing functionality as when
texture objects and later multiple texture units, was introduced.
The OpenGL API involves several selectors (listed in historical
order of introduction):
o The matrix mode.
o The current bound texture for each supported texture target.
o The active texture.
o The active client texture.
o The current bound program for each supported program target.
o The current bound buffer for each supported buffer target.
o The current GLSL program.
o The current framebuffer object.
The new selector-free update commands can be compiled into display
lists.
The OpenGL API has latched state for vertex array buffer objects
and pixel store state. When an application issues a GL command to
unpack or pack pixels (for example, glTexImage2D or glReadPixels
respectively), the current unpack and pack pixel store state
determines how the pixels are unpacked from/packed to client memory
or pixel buffer objects. For example, consider:
glPixelStorei(GL_UNPACK_SWAP_BYTES, GL_TRUE);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 640);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 47);
glDrawPixels(100, 100, GL_RGB, GL_FLOAT, pixels);
The unpack swap bytes and row length state set by the preceding
glPixelStorei commands (as well as the 6 other unpack pixel store
state variables) control how data is read (unpacked) from buffer of
data pointed to by pixels. The glBindBuffer command also specifies
an unpack buffer object (47) so the pixel pointer is actually treated
as a byte offset into buffer object 47.
When an application issues a command to configure a vertex array,
the current array buffer state is latched as the binding for the
particular vertex array being specified. For example, consider:
glBindBuffer(GL_ARRAY_BUFFER, 23);
glVertexPointer(3, GL_FLOAT, 12, pointer);
The glBindBuffer command updates the array buffering binding
(GL_ARRAY_BUFFER_BINDING) to the buffer object named 23. The
subsequent glVertexPointer command specifies explicit parameters
for the size, type, stride, and pointer to access the position
vertex array BUT ALSO latches the current array buffer binding for
the vertex array buffer binding (GL_VERTEX_ARRAY_BUFFER_BINDING).
Effectively the current array buffer binding buffer object becomes
an implicit fifth parameter to glVertexPointer and this applies to
all the gl*Pointer vertex array specification commands.
Selectors and latched state create problems for layered libraries
using OpenGL because selectors require the selector state to be
modified to update some other state and latched state means implicit
state can affect the operation of commands specifying, packing, or
unpacking data through pointers/offsets. For layered libraries,
a state update performed by the library may attempt to save the
selector state, set the selector, update/query some state the
selector controls, and then restore the selector to its saved state.
Layered libraries can skip the selector save/restore but this risks
introducing uncertainty about the state of a selector after calling
layered library routines. Such selector side-effects are difficult
to document and lead to compatibility issues as the layered library
evolves or its usage varies. For latched state, layered libraries
may find commands such as glDrawPixels do not work as expected
because latched pixel store state is not what the library expects.
Querying or pushing the latched state, setting the latched state
explicitly, performing the operation involving latched state, and
then restoring or popping the latched state avoids entanglements
with latched state but at considerable cost.
EXAMPLE USAGE OF THIS EXTENSION'S FUNCTIONALITY
Consider the following routine to set the modelview matrix involving
the matrix mode selector:
void setModelviewMatrix(const GLfloat matrix[16])
{
GLenum savedMatrixMode;
glGetIntegerv(GL_MATRIX_MODE, &savedMatrixMode);
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(matrix);
glMatrixMode(savedMatrixMode);
}
Notice that four OpenGL commands are required to update the current
modelview matrix without disturbing the matrix mode selector.
OpenGL query commands can also substantially reduce the performance
of modern OpenGL implementations which may off-load OpenGL state
processing to another CPU core/thread or to the GPU itself.
An alternative to querying the selector is to use the
glPushAttrib/glPopAttrib commands. However this approach typically
involves pushing far more state than simply the one or two selectors
that need to be saved and restored. Because so much state is
associated with a given push/pop attribute bit, the glPushAttrib
and glPopAttrib commands are considerably more costly than the
save/restore approach. Additionally glPushAttrib risks overflowing
the attribute stack.
The reliability and performance of layered libraries and applications
can be improved by adding to the OpenGL API a new set of commands
to access directly OpenGL state that otherwise involves selectors
to access.
The above example can be reimplemented more efficiently and without
selector side-effects:
void setModelviewMatrix(const GLfloat matrix[16])
{
glMatrixLoadfEXT(GL_MODELVIEW, matrix);
}
Consider a layered library seeking to load a texture:
void loadTexture(GLint texobj, GLint width, GLint height,
void *data)
{
glBindTexture(GL_TEXTURE_2D, texobj);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8,
width, height, GL_RGB, GL_FLOAT, data);
}
The library expects the data to be packed into the buffer pointed
to by data. But what if the current pixel unpack buffer binding
is not zero so the current pixel unpack buffer, rather than client
memory, will be read? Or what if the application has modified
the GL_UNPACK_ROW_LENGTH pixel store state before loadTexture
is called?
We can fix the routine by calling glBindBuffer(GL_PIXEL_UNPACK_BUFFER,
0) and setting all the pixel store unpack state to the initial state
the loadTexture routine expects, but this is expensive. It also risks
disturbing the state so when loadTexture returns to the application,
the application doesn't realize the current texture object (for
whatever texture unit the current active texture happens to be) and
pixel store state has changed.
We can more efficiently implement this routine without disturbing
selector or latched state as follows:
void loadTexture(GLint texobj, GLint width, GLint height,
void *data)
{
glPushClientAttribDefaultEXT(GL_CLIENT_PIXEL_STORE_BIT);
glTextureImage2D(texobj, GL_TEXTURE_2D, 0, GL_RGB8,
width, height, GL_RGB, GL_FLOAT, data);
glPopClientAttrib();
}
Now loadTexture does not have to worry about inappropriately
configured pixel store state or a non-zero pixel unpack buffer
binding. And loadTexture has no unintended side-effects for
selector or latched state (assuming the client attrib state does
not overflow).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/direct_state_access.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.direct_state_access import *
from OpenGL.raw.GL.EXT.direct_state_access import _EXTENSION_NAME
def glInitDirectStateAccessEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glMatrixLoadfEXT=wrapper.wrapper(glMatrixLoadfEXT).setInputArraySize(
'm', 16
)
glMatrixLoaddEXT=wrapper.wrapper(glMatrixLoaddEXT).setInputArraySize(
'm', 16
)
glMatrixMultfEXT=wrapper.wrapper(glMatrixMultfEXT).setInputArraySize(
'm', 16
)
glMatrixMultdEXT=wrapper.wrapper(glMatrixMultdEXT).setInputArraySize(
'm', 16
)
# INPUT glTextureParameterfvEXT.params size not checked against 'pname'
glTextureParameterfvEXT=wrapper.wrapper(glTextureParameterfvEXT).setInputArraySize(
'params', None
)
# | |
if not isinstance(v, list):
levels[d] = [v]
# Ensure each dimension specified by levels is valid
bad = [dim for dim in levels.keys() if dim not in dims]
if bad:
raise KeyError(f'Dimensions {bad} specified in *levels not found in *dims')
# Ensure each level is valid
bad = {k: v for k, vs in levels.items() for v in vs if v not in self.data.tidy[k].unique()}
if bad:
raise ValueError(f'Values specified in *levels not found in tidy: {bad}')
# Use all levels of remaining dims
levels.update({dim: list(self.data.tidy[dim].unique()) for dim in dims if dim not in levels.keys()}) # Fix once Python >= 3.9
else:
raise TypeError('`levels` must be of type str, list, or dict')
for dim in dims:
assert_is_subset(f'data[{dim}]', levels[dim], self.data.tidy[dim])
else:
levels = {}
return levels
def _parse_coordinates(self, dims: list, levels: dict, coords: None or list or dict) -> dict:
"""Check for consistency between supplied dims/levels/coords or generate coords automatically"""
if coords is not None:
if isinstance(coords, dict):
# Ensure all dim-level pairs in ``levels`` and ``coords`` match exactly
level_tuples = [(dim, level) for dim, levels_list in levels.items() for level in levels_list]
coord_tuples = [(dim, level) for dim, coord_dict in coords.items() for level in coord_dict.keys()]
assert_is_subset('coordinates', coord_tuples, level_tuples)
assert_is_subset('coordinates', level_tuples, coord_tuples)
elif isinstance(coords, list):
assert len(levels.keys()) == 1, \
'Non-dict argument for `continuous_coords` only allowed if `len(continuous_dims)==1`'
dim = dims[0]
assert len(coords) == len(levels[dim])
coords = {dim: {level: coord for level, coord in zip(levels[dim], coords)}}
else:
raise TypeError('Coordinates must be of type list or dict')
if not all(isinstance(coord, (int, float))
for coord_dict in coords.values()
for coord in coord_dict.values()):
raise TypeError('Coordinates must be numeric')
elif dims is not None and levels is not None:
coords = {dim: self._make_coordinates(dim, levels_list) for dim, levels_list in levels.items()}
else:
coords = {}
return coords
def _make_coordinates(self, dim: str, levels_list: list) -> dict:
"""Generate numerical coordinates for each level in each dim under consideration"""
df = self.data.tidy
col = df[df[dim].isin(levels_list)][dim]
if col.dtype in [np.float32, np.float64, np.int32, np.int64]:
coords = {level: level for level in levels_list}
else:
coords = {level: col.astype('category').cat.categories.to_list().index(level) for level in levels_list}
return coords
def get_filtered_data(self, standardized=False, metric='mean'):
"""The portion of the dataset under consideration
A filter is built by comparing the values in the unstandardized dataframe with those in :attr:`filter_dims`,
:attr:`categorical_levels`, and :attr:`continuous_levels`, then the filter is applied to the standardized or
unstandardized dataframe as indicated by the `standardized` input argument.
Parameters
----------
standardized : bool, default True
Whether to return a subset of the raw tidy or the centered and scaled tidy
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
tidy : pd.DataFrame
"""
df = self.data.tidy
allowed = df.isin(self.filter_dims)[self.filter_dims.keys()].all(axis=1)
if 'Metric' in df.columns:
assert_in('Metric', metric, self.data.tidy['Metric'].unique())
allowed &= df['Metric'] == metric
for dim, levels in self.levels.items():
allowed &= df[dim].isin(levels)
return df[allowed] if not standardized else self.data.tidy.z[allowed]
def get_structured_data(self, metric='mean'):
"""Formats input data and observations as parrays
Parameters
----------
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
X : parray
A multilayered column vector of input coordinates.
y : parray
A multilayered (1D) vector of observations
See Also
--------
:meth:`get_filtered_data`
"""
df = self.get_filtered_data(standardized=False, metric=metric)
# Ensure same number of observations for every output (only possible if something broke)
assert len(set(sum(df[self.out_col] == output) for output in self.outputs)) == 1
# Assuming all parameters observed at the same points
# Extract the model dimensions from the dataframe for one of the parameters
dims = set(self.dims) - set([self.out_col])
dim_values = {dim: df[df[self.out_col] == self.outputs[0]].replace(self.coords)[dim].values for dim in dims}
X = self.parray(**dim_values, stdzd=False)
# List of parrays for each output
outputs = {output: df[df[self.out_col] == output]['Value'].values for output in self.outputs}
y = self.parray(**outputs, stdzd=False)
return X, y
def get_shaped_data(self, metric='mean'):
"""Formats input data and observations as plain numpy arrays
Parameters
----------
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
X : np.ndarray
A tall matrix of input coordinates with shape (n_obs, n_dims).
y : np.ndarray
A (1D) vector of observations
See Also
--------
:meth:`get_filtered_data`
"""
self.X, self.y = self.get_structured_data(metric=metric)
# Convert ParameterArray into plain numpy tall array
if self.out_col in self.dims:
ordered_outputs = {k: v for k, v in sorted(self.coords[self.out_col].items(), key=lambda item: item[1])}
y = np.hstack([self.y.z[output+'_z'].values() for output in ordered_outputs.keys()])
X = self.X[:, None] # convert to column vector
X = parray.vstack([X.add_layers(**{self.out_col: coord}) for coord in ordered_outputs.values()])
X = np.atleast_2d(np.column_stack([X[dim].z.values().squeeze() for dim in self.dims]))
else:
y = self.y.z.values().squeeze()
X = np.atleast_2d(np.column_stack([self.X[dim].z.values().squeeze() for dim in self.dims]))
return X, y
################################################################################
# Prediction
################################################################################
@abstractmethod
def predict(self, points_array, with_noise=True, **kwargs):
"""Defined by subclass.
It is not recommended to call :meth:`predict` directly, since it requires a very specific formatting for inputs,
specifically a tall array of standardized coordinates in the same order as :attr:`dims`. Rather, one of the
convenience functions :meth:`predict_points` or :meth:`predict_grid` should be used, as these have a more
intuitive input structure and format the tidy appropriately prior to calling :meth:`predict`.
See Also
--------
:meth:`GP.predict`
:meth:`GLM.predict`
Returns
-------
prediction_mean, prediction_var : list of np.ndarray
Mean and variance of predictions at each supplied points
"""
pass
def _check_has_prediction(self):
"""Does what it says on the tin"""
if self.predictions is None:
raise ValueError('No predictions found. Run self.predict_grid or related method first.')
def _parse_prediction_output(self, output):
if self.out_col in self.categorical_dims:
# Multiple parameters are possible, determine which ones to predict
if output is None:
# predict all parameters in model
output = self.categorical_levels[self.out_col]
elif isinstance(output, list):
assert_is_subset('Outputs', output, self.categorical_levels[self.out_col])
elif isinstance(output, str):
output = [output]
assert_is_subset('Outputs', output, self.categorical_levels[self.out_col])
else:
raise ValueError('"output" must be list, string, or None')
else:
# If self.out_col is not in categorical_dims, it must be in filter_dims, and only one is possible
output = self.filter_dims[self.out_col]
return output
def _prepare_points_for_prediction(self, points: ParameterArray, output):
points = np.atleast_1d(points)
assert points.ndim == 1
assert set(self.dims) - set([self.out_col]) == set(points.names), \
'All model dimensions must be present in "points" parray.'
if self.out_col in self.categorical_dims:
# Multiple parameters are possible, determine which ones to predict
# Get model coordinates for each output to be predicted
param_coords = [self.categorical_coords[self.out_col][p] for p in output]
# Convert input points to tall array and tile once for each output, adding the respective coordinate
tall_points = parray.vstack([points.add_layers(**{self.out_col: coord})[:, None] for coord in param_coords])
else:
# If self.out_col is not in categorical_dims, it must be in filter_dims, and only one is possible
# Convert input points to tall array
param_coords = None
tall_points = points[:, None]
# Combine standardized coordinates into an ordinary tall numpy array for prediction
points_array = np.hstack([tall_points[dim].z.values() for dim in self.dims])
return points_array, tall_points, param_coords
def predict_points(self, points, output=None, with_noise=True, **kwargs):
"""Make predictions at supplied points
Parameters
----------
points : ParameterArray
1-D ParameterArray vector of coordinates for prediction, must have one layer per ``self.dims``
output : str or list of str, optional
Variable for which to make predictions
with_noise : bool, default True
Whether to incorporate aleatoric uncertainty into prediction error
**kwargs
Additional keyword arguments passed to subclass-specific :meth:`predict` method
Returns
-------
prediction : UncertainParameterArray
Predictions as a `uparray`
"""
output = self._parse_prediction_output(output)
points_array, tall_points, param_coords = self._prepare_points_for_prediction(points, output=output)
# Prediction means and variance as a list of numpy vectors
pred_mean, pred_variance = self.predict(points_array, with_noise=with_noise, **kwargs)
self.predictions_X = points
# Store predictions in appropriate structured array format
if len(output) == 1:
# Predicting one output, return an UncertainParameterArray
self.predictions = self.uparray(output[0], pred_mean, pred_variance, stdzd=True)
else:
# Predicting multiple parameters, return an MVUncertainParameterArray
# First split prediction into UncertainParameterArrays
uparrays = []
for i, name in enumerate(output):
idx = (tall_points[self.out_col].values() == param_coords[i]).squeeze()
μ = pred_mean[idx]
σ2 = pred_variance[idx]
uparrays.append(self.uparray(name, μ, σ2, stdzd=True))
# Calculate the correlation matrix from the hyperparameters of the coregion kernel
W = self.MAP[f'W_{self.out_col}'][param_coords, :]
κ = self.MAP[f'κ_{self.out_col}'][param_coords]
B = W @ W.T + np.diag(κ) # covariance matrix
D = | |
from collections import namedtuple
import os
import subprocess
import re
import boto3
import json
STAGE_VARIABLE_ALIAS = "lambdaAlias"
INTEGRATION_URI_APPENDER = ":${{stageVariables.{0}}}".format(STAGE_VARIABLE_ALIAS)
_INTEGRATION = namedtuple(
"INTEGRATION", [
"rest_api_id",
"resource_id",
"http_method",
"path"
]
)
_APIFNS = namedtuple(
"APIFNS", [
"rest_api_id",
"resource_id",
"http_method",
"uri",
"path",
"function_name",
"source_arn",
"aliases"
]
)
STATEMENT_ID = "f6803b46-df32-4504-8c40-567a0390f549"
CLIENT_GATEWAY = boto3.client('apigateway')
CLIENT_LAMBDA = boto3.client('lambda')
CLIENT_CLOUDFORMATION = boto3.client('cloudformation')
SERVICE_FROM_PATH_PATTERN = re.compile(r"^(?:\\.|[^/\\])*/((?:\\.|[^/\\])*)/")
class Mapper:
"""
This is a class that groups a collection of helper functions that
help in gathering information of lambda functions that are integrated
with an AWS api gateway
"""
@staticmethod
def _map_fn(rest_api_id, region, http_method, path, uri):
"""
Extracts the underlying (lambda) function name and
source arn of a particular resource attached to an api gateway.
"""
account_id = boto3.client('sts').get_caller_identity().get('Account')
regex_template = ".*/arn:aws:lambda:{0}:{1}:function:(.*)/invocations$"
source_arn_template = "arn:aws:execute-api:{0}:{1}:{2}/*/{3}{4}"
source_arn = source_arn_template.format(
region,
account_id,
rest_api_id,
http_method,
path
)
regex = regex_template.format(region, account_id)
function_name = re.search(regex, uri).group(1)
if function_name[-len(INTEGRATION_URI_APPENDER):] == INTEGRATION_URI_APPENDER:
function_name = function_name[:-len(INTEGRATION_URI_APPENDER)]
return function_name, source_arn
@staticmethod
def _map_aliases(func_name):
"""
Returns all aliases associated with a given (lambda) function.
"""
list_aliases_res = CLIENT_LAMBDA.list_aliases(
FunctionName=func_name,
)
aliases = list_aliases_res["Aliases"]
return aliases
@staticmethod
def _get_integrations(rest_api_id):
"""
Gathers all resources for a given api gateway and returns
its' integrations.
In particular, the `resource id`, `resource method`
and `resource path`
"""
# fetch api resources
resources = CLIENT_GATEWAY.get_resources(
restApiId=rest_api_id,
)
integrations = []
# traverse all the resources
for resource in resources['items']:
# we are only interested in "resource methods"
if "resourceMethods" not in resource.keys():
continue
resource_methods = resource["resourceMethods"].keys()
for resource_method in resource_methods:
integrations.append(
_INTEGRATION(
rest_api_id,
resource["id"],
resource_method,
resource["path"]))
return integrations
@staticmethod
def _assemble(region, integrations):
"""
Helper function tha combines data.
"""
apifns = []
for integration in integrations:
get_integration_res = CLIENT_GATEWAY.get_integration(
restApiId=integration.rest_api_id,
resourceId=integration.resource_id,
httpMethod=integration.http_method
)
# we are only interested at AWS_PROXY integrations as those
# are integrations that are created by serverless framework
if get_integration_res["type"] != "AWS_PROXY":
continue
function_name, source_arn = Mapper._map_fn(
integration.rest_api_id,
region,
integration.http_method,
integration.path,
get_integration_res["uri"]
)
aliases = Mapper._map_aliases(function_name)
apifns.append(
_APIFNS(
integration.rest_api_id,
integration.resource_id,
integration.http_method,
get_integration_res["uri"],
integration.path,
function_name,
source_arn,
aliases
)
)
return apifns
@staticmethod
def run(rest_api_id, region):
"""
Gets the integrations of a given api gateway and appends
information to them (_assemble)
"""
integrations = Mapper._get_integrations(rest_api_id)
apifns = Mapper._assemble(region, integrations)
return apifns
def _add_permission(api_fn, stage_name):
"""
Add `lambda:InvokeFunction` permission to an (aliased) lambda function
that is wired to an api gateway.
"""
try:
CLIENT_LAMBDA.add_permission(
FunctionName="{0}:{1}".format(api_fn.function_name, stage_name),
StatementId=STATEMENT_ID,
Action='lambda:InvokeFunction',
Principal='apigateway.amazonaws.com',
SourceArn=api_fn.source_arn,
)
except CLIENT_LAMBDA.exceptions.ResourceConflictException as err:
error_msg = {
"exception": "CLIENT_LAMBDA.exceptions.ResourceConflictException",
"fn": api_fn,
"error": err
}
print(json.dumps(error_msg, indent=4))
except Exception as ex:
print(ex)
def _default_alias_is_added(fn_aliases, default_alias_name):
"""
Checks if the `default alias` is part of a list of aliases
"""
for alias in fn_aliases:
if alias["Name"] == default_alias_name:
return True
return False
def _create_alias(function_name, alias_name, function_version):
"""
Creates an alias and points to a specific version of a
(lambda) function.
"""
CLIENT_LAMBDA.create_alias(
FunctionName=function_name,
Name=alias_name,
FunctionVersion=function_version
)
def _integration_uri_is_already_updated(uri):
"""
A typical uri would look like this:
arn:aws:apigateway:{0}:lambda:path/2015-03-31/functions/arn:aws:lambda:{0}:{1}:function:{2}/invocations
{0}: region
{1}: account id
{2}: function name
Example of a function name:
helloWorld
We consider a uri integration to be `updated` when the function name is such:
helloWorld:${stageVariables.lambdaAlias}
This ensures that only a specific alias (therefore a specific locked version) can be called for
a (lambda) function.
"""
invocation_str = "/invocations"
uri_minus_invocation_str = uri[:-len(invocation_str)]
is_already_updated = uri_minus_invocation_str[-len(INTEGRATION_URI_APPENDER):] == INTEGRATION_URI_APPENDER
return is_already_updated, invocation_str, uri_minus_invocation_str
def _get_service_from_path(path):
"""
Resource paths are general simple urls such as:
/generalcalc/getHouseCostByCity
/subscription/signUp
/subscription/subscribeBasicPlan
/country/countries
/subscription/login
/subscription/confirmSignUp
/country/countries/{cca2}
This simply extract the first piece of string that is between / /
"""
return re.search(SERVICE_FROM_PATH_PATTERN, path).group(1)
def _tag_documentation_exists(rest_api_id, path, http_method):
"""
Checks if a specific resource path/method has a `tag` documentation.
"""
get_documentation_parts_res = CLIENT_GATEWAY.get_documentation_parts(
restApiId=rest_api_id,
type="METHOD",
path=path,
)
if not get_documentation_parts_res["items"]:
return False
for item in get_documentation_parts_res["items"]:
if item["location"]["method"] == http_method:
return True
return False
def _get_cloudformation_export(exportName):
exports = CLIENT_CLOUDFORMATION.list_exports()
if 'Exports' not in exports.keys():
exit()
for export in exports['Exports']:
if export['Name'] == exportName:
return export['Value']
exit()
def get_deployed_stages(rest_api_id):
"""
Returns all the stages that are deployed
"""
stages = []
get_stages_res = CLIENT_GATEWAY.get_stages(
restApiId=rest_api_id,
)
for get_stage_res in get_stages_res["item"]:
stages.append(get_stage_res["stageName"])
return stages
def create_alias_default(api_fns, default_alias_name):
"""
Creates the so called `default alias` which is just
an alias that points to the latest version of
(lambda) function(s).
"""
def create(function_name):
return _create_alias(function_name, default_alias_name, "$LATEST")
for api_fn in api_fns:
aliases = api_fn.aliases
function_name = api_fn.function_name
if not aliases:
create(function_name)
else:
default_is_added = _default_alias_is_added(aliases, default_alias_name)
if not default_is_added:
create(function_name)
def update_integration_uri(api_fns, default_alias_name):
"""
For every (lambda) functions that are integrated with
an api gateway - updated the uri integration such that
the api gateway can only call a specific version of
those (lambda) functions through an alias.
"""
for api_fn in api_fns:
is_already_updated, invocation_str, uri_minus_invocation_str = _integration_uri_is_already_updated(api_fn.uri)
if not is_already_updated:
new_uri = "{0}{1}{2}".format(
uri_minus_invocation_str,
INTEGRATION_URI_APPENDER,
invocation_str
)
CLIENT_GATEWAY.update_integration(
restApiId=api_fn.rest_api_id,
resourceId=api_fn.resource_id,
httpMethod=api_fn.http_method,
patchOperations=[
{
'op': 'replace',
'path': '/uri',
'value': new_uri,
},
]
)
_add_permission(api_fn, default_alias_name)
def create_domain_mapping_default(rest_api_id, domain_name, default_alias_name):
"""
Creates the domain name mapping for the default stage.
"""
def create():
CLIENT_GATEWAY.create_base_path_mapping(
domainName=domain_name,
basePath=default_alias_name,
restApiId=rest_api_id,
stage=default_alias_name
)
get_base_path_mappings_res = CLIENT_GATEWAY.get_base_path_mappings(
domainName=domain_name,
)
mappings = get_base_path_mappings_res['items']
if not mappings:
create()
else:
already_mapped = False
for mapping in mappings:
if mapping["basePath"] == default_alias_name and mapping["stage"] == default_alias_name:
already_mapped = True
if not already_mapped:
create()
def default_stage_contains_staged_variable(rest_api_id, default_alias_name):
"""
Checks if the default stage contains the `stage variable alias`.
"""
get_stage_res = CLIENT_GATEWAY.get_stage(
restApiId=rest_api_id,
stageName=default_alias_name
)
if "variables" not in get_stage_res.keys():
return False
stage_variables = get_stage_res["variables"]
if STAGE_VARIABLE_ALIAS not in stage_variables.keys():
return False
if stage_variables[STAGE_VARIABLE_ALIAS] != default_alias_name:
return False
return True
def create_tag_documentation(rest_api_id, api_fns):
"""
Creates tag documentation for a given list of functions.
"""
for api_fn in api_fns:
path = api_fn.path
http_method = api_fn.http_method
already_exists = _tag_documentation_exists(rest_api_id, path, http_method)
if not already_exists:
service_name = _get_service_from_path(path)
tag = {
"tags": [
service_name
]
}
CLIENT_GATEWAY.create_documentation_part(
restApiId=rest_api_id,
location={
"type": "METHOD",
"path": path,
"method": http_method
},
properties=json.dumps(tag),
)
def run_after_default_deployment(rest_api_id, region, default_alias_name, domain_name=None):
"""
Typically what you would need to run after a
`serverless deploy` deployment.
This ensures that every function integrations' are such that
the api gateway can only call a specific version of any
lambda functions via an alias, using a stage variable.
"""
api_fns = Mapper.run(rest_api_id, region)
create_alias_default(api_fns, default_alias_name)
update_integration_uri(api_fns, default_alias_name)
if domain_name is not None:
create_domain_mapping_default(rest_api_id, domain_name, default_alias_name)
# create_tag_documentation(rest_api_id, api_fns)
contains_stage_var = default_stage_contains_staged_variable(rest_api_id, default_alias_name)
if not contains_stage_var:
print('''
PLEASE ADD THE FOLLOWING STAGE VARIABLE TO [STAGE: {0}]:
{1} : {2}
'''.format(default_alias_name, STAGE_VARIABLE_ALIAS, default_alias_name))
def freeze_functions(api_fns, stage_name):
"""
Creates an alias (that's the string for `stage_name`)
for a given list of functions.
"""
for api_fn in api_fns:
function_name = api_fn.function_name
publish_version_res = CLIENT_LAMBDA.publish_version(FunctionName=function_name)
try:
CLIENT_LAMBDA.create_alias(
FunctionName=function_name,
Name=stage_name,
FunctionVersion=publish_version_res["Version"]
)
_add_permission(api_fn, stage_name)
except Exception as ex:
print("[WARNING] error while freezing function from source arn: {0}".format(api_fn.source_arn))
print("DETAILS: {0}".format(ex))
def deploy(rest_api_id, region, version, stage_description, domain_name=None):
"""
Deploys a stage to an api gateway.
The definition of `deploy` here is:
- take all resources of an api gateway (those are lambda functions at the core)
- the integration of those lambda functions are assumed to have been modified
such that only specific version
- `freeze` those functions and create an alias that points to those frozen versions
- creates a new stage under the api gateway and ensure that staged version calls only
those previously frozen functions
"""
# ensure version follows semantic versioning standard
if not re.match("\d+\.\d+\.\d+", version):
exit("{0} DOES NOT FOLLOW SEMANTIC VERSIONING. FOLLOW SEMANTIC VERSIONING!".format(version))
# ensure that version was not already deployed
stage_name = version.replace(".", "-")
stages = get_deployed_stages(rest_api_id)
if stage_name in stages:
exit("YOU ALREADY DEPLOYED {0}".format(version))
# extracted functions' info and freeze them
api_fns = Mapper.run(rest_api_id, region)
freeze_functions(api_fns, stage_name)
# deploy frozen functions
CLIENT_GATEWAY.create_deployment(
restApiId=rest_api_id,
stageName=stage_name,
stageDescription=stage_description,
description=stage_description,
variables={
"{0}".format(STAGE_VARIABLE_ALIAS): stage_name
},
)
# map to domain
if domain_name is not None:
CLIENT_GATEWAY.create_base_path_mapping(
domainName=domain_name,
basePath=stage_name.replace("-", "."),
restApiId=rest_api_id,
stage=stage_name
)
def re_deploy(rest_api_id, region, version, stage_description):
"""
Deploys a stage to an api gateway.
The definition of `deploy` here is:
- take all resources of an api gateway (those are lambda functions at the core)
- the integration of those lambda functions are assumed to have been modified
such that only | |
<reponame>tommac7/hydroshare
"""
This model supports user labeling of resources in various ways.
For a User u, this instantiates a subobject u.ulabels (like u.uaccess)
that contains all the labeling functions.
Functions include:
* u.ulabels.label_resource(r, label)
instantiates a label for a resource. Resources can have multiple labels.
* u.ulabels.unlabel_resource(r, label)
removes a label; there can be many labels.
* u.ulabels.clear_resource_labels(r)
removes all labels for a resource
* u.ulabels.favorite_resource(r)
favorites a resource
* u.ulabels.unfavorite_resource(r)
removes a favorite
and the reporting functions
* u.ulabels.labeled_resources
A queryset of resources that are labeled.
* u.ulabels.favorited_resources
A queryset of resources that have been favorited
* u.ulabels.get_resources_with_label(label)
Get a queryset of resources possessing a specific label.
For a BaseResource r, this also adds a subobject rlabels that reports on labels for resources
* r.rlabels.get_labels(u)
* r.rlabels.is_favorite(u)
* r.rlabels.is_mine(u)
"""
# TODO: combine label filtering with access control
import re
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.db.models import Q
from hs_core.models import BaseResource
class FlagCodes(object):
"""
Flag codes describe the meanings of per-user flags for a resource.
* 1 or FlagCodes.FAVORITE:
marked as a favorite on "My Resources" page
* 2 or FlagCodes.MINE:
marked as being part of "My Resources" on "Discover" page.
"""
FAVORITE = 1
MINE = 2
OPEN_WITH_APP = 3
FLAG_CHOICES = (
(FAVORITE, 'Favorite'), # marked as favorite in my resources page.
(MINE, 'Mine'), # marked as mine in discovery page.
(OPEN_WITH_APP, 'Open With App'), # marked as a open_with app
)
class UserResourceLabels(models.Model):
"""
Labels of a user for a resource
This model stores labels of an individual user, like an access control list. T
"""
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2url', # unused but must be defined and unique
help_text='user assigning a label',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name='r2url', # unused but must be defined and unique
help_text='resource to which a label applies',
on_delete=models.CASCADE)
label = models.TextField(null=False)
class Meta:
unique_together = ('user', 'resource', 'label')
class UserResourceFlags(models.Model):
"""
Per-user flagging of resources.
This model stores labels of an individual user, like an access
control list; There are several kinds of labels documented in FlagCodes.
These are similar in implementation but differ in semantics.
"""
kind = models.IntegerField(choices=FlagCodes.FLAG_CHOICES,
editable=False,
default=FlagCodes.FAVORITE)
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2urf', # unused but must be defined and unique
help_text='user assigning a flag',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name="r2urf", # unused but must be defined and unique
help_text='resource to which a flag applies',
on_delete=models.CASCADE)
class Meta:
unique_together = ('user', 'resource', 'kind')
class UserStoredLabels(models.Model):
"""
Storage class for persistent labels that are reusable across different kinds of objects
"""
user = models.ForeignKey(User, null=False,
help_text='user who stored the label',
related_name='ul2usl',
on_delete=models.CASCADE)
label = models.TextField(help_text='label to be stored by user')
class Meta:
unique_together = ('user', 'label')
class UserLabels(models.Model):
"""
Projection class puts methods and content inside basic User object
so that one can access things easily from that context.
This model is injected into the BaseResource as the related name "user".
Thus for an User u, u.user is this model.
"""
user = models.OneToOneField(User,
editable=False,
null=True,
related_name='ulabels', # induced field in User class.
related_query_name='ulabels',
on_delete=models.CASCADE)
##########################################
# PUBLIC FUNCTIONS: resources
##########################################
@property
def labeled_resources(self):
"""
Get a QuerySet of resources labeled by a user.
This eliminates duplicates.
"""
return BaseResource.objects.filter(r2url__user=self.user).distinct()
def get_flagged_resources(self, this_flagcode):
"""
Get resources with a specific flag.
"""
if __debug__: # during testing only, check argument types and preconditions
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return BaseResource.objects.filter(r2urf__user=self.user,
r2urf__kind=this_flagcode)
@property
def favorited_resources(self):
"""
Get a QuerySet of resources favorited by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.FAVORITE)
@property
def my_resources(self):
"""
Get a QuerySet of resources marked as mine (add to my resources) by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.MINE)
@property
def resources_of_interest(self):
"""
Get a QuerySet of resources the user has tagged in any way.
"""
return BaseResource.objects.filter(Q(r2url__user=self.user) | Q(r2urf__user=self.user)).distinct()
def get_resources_with_label(self, this_label):
"""
Get a QuerySet of resources with a specific label.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, basestring)
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
return BaseResource.objects.filter(r2url__user=self.user,
r2url__label__exact=label_string)\
.distinct()\
.order_by('r2url__label')
@property
def user_labels(self):
"""
Get a QuerySet of labels in use now.
"""
return UserResourceLabels.objects.values_list('label', flat=True)\
.filter(user=self.user)\
.distinct().order_by('label')
######################################
# Label a resource
######################################
@staticmethod
def clean_label(name):
label_string = re.sub('/', r'', name) # no /'s
label_string = label_string.strip() # no leading or trailing whitespace
label_string = re.sub(r'\s+', r' ', label_string) # collapse multiple whitespace, including tabs
return label_string
def label_resource(self, this_resource, this_label):
"""
Assign a label to a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, basestring)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceLabels.objects.get_or_create(resource=this_resource,
label=label_string,
user=self.user)
def unlabel_resource(self, this_resource, this_label):
"""
Remove one label from a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, basestring)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserResourceLabels.objects.filter(resource=this_resource,
label__exact=label_string,
user=self.user).delete()
def clear_resource_labels(self, this_resource):
"""
Clear all labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects.filter(resource=this_resource,
user=self.user).delete()
def remove_resource_label(self, this_label):
"""
clear a label from the labeling system.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, basestring)
UserResourceLabels.objects.filter(label=this_label, user=self.user)\
.delete()
##########################################
# general flagging of resources
##########################################
def flag_resource(self, this_resource, this_flagcode):
"""
flag a resource with a specific flag code from FlagCodes
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceFlags.objects.get_or_create(resource=this_resource,
kind=this_flagcode,
user=self.user)
def unflag_resource(self, this_resource, this_flagcode):
"""
unflag a resource with a specific flag.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
UserResourceFlags.objects.filter(user=self.user,
resource=this_resource,
kind=this_flagcode).delete()
def clear_all_flags(self, this_flagcode):
"""
remove all flags of a specific kind for a user
"""
UserResourceFlags.objects.filter(user=self.user,
kind=this_flagcode)\
.delete()
##########################################
# favorite resources
##########################################
def favorite_resource(self, this_resource):
"""
Mark a resource as favorite.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.FAVORITE)
def unfavorite_resource(self, this_resource):
"""
Clear favorite label for a resource
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.FAVORITE)
##########################################
# my resources
##########################################
def claim_resource(self, this_resource):
"""
Label a resource as 'MINE' (adds to my resources).
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.MINE)
def unclaim_resource(self, this_resource):
"""
Clear 'MINE' label for a resource (removes from my resources)
Users are allowed to flag any resource, including resources to which they do not have access.
This is | |
<reponame>sammdu/bot-tac-toe<filename>python/tictactoe.py<gh_stars>0
"""
The TicTacToe game class and peripheral functions.
--------------------------------------------------------------------------------
MIT License
Copyright (c) 2021 Mu "<NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, Any, Union
import random
import copy
import game_tree as gt
################################################################################
# Tic Tac Toe game representation
################################################################################
def empty_board(side: int) -> list:
"""
generate an empty game board (list of lists) with sidelength `side`
>>> empty_board(3)
[['', '', ''], ['', '', ''], ['', '', '']]
"""
board = []
for _ in range(side):
row = [''] * side
board.append(row)
return board
class GameState():
"""
A class representing a Tic Tac Toe game state.
[c] This class took inspiration from "CSC111 Winter 2021 Assignment 2: Trees, Chess,
and Artificial Intelligence (Minichess Library)", by <NAME> and <NAME>;
though there are very few similarities due to the different nature of this game
Instance Attributes:
- next_player: the player from {'p1', 'p2'} that will place the next game piece
- empty_spots: a list of vacant spot on the game board available to be filled
- move_history: a history of moves that occured in this game
"""
next_player: str
empty_spots: list[Optional[str]]
move_history: list[Optional[str]]
# Private Instance Attributes:
# - _board: a nested list representing a tictactoe board
# - _board_side: the side length of the board
_board: list[list[str]]
_board_side: int
def __init__(
self,
board: list[list[str]],
next_player: str = 'p1',
move_hist: Optional[list] = None
) -> None:
self._board = board
self._board_side = len(self._board) # calculate the side length of the game board
self.move_history = move_hist if move_hist is not None else []
self.next_player = next_player
self.empty_spots = self._find_empty_spots()
def _find_empty_spots(self) -> list[Optional[str]]:
empty_spots = []
for row_idx in range(self._board_side):
for col_idx in range(self._board_side):
if self._board[row_idx][col_idx] == '':
empty_spots.append(str(row_idx) + str(col_idx))
return empty_spots
def get_side_length(self) -> int:
"""
return the board's side length
"""
return self._board_side
def place_piece(self, piece: str, spot: str) -> None:
"""
place the given piece on the given spot on the game board, if the spot is empty;
ensure that the spot given exists on the board (is not out of range)
Preconditions:
- `spot` must be a string of two integers, first representing the row, second
representing the column in the board. `ValueError`s will be raised if this
is not satisfied
- the spot must be empty, or a `ValueError` will be raised
- piece in {'x', 'o'}
"""
row = int(spot[0])
col = int(spot[1])
if row > self._board_side or row < 0:
raise ValueError(f"[!] Given row {row} in spot {spot} is out of range.")
if col > self._board_side or col < 0:
raise ValueError(f"[!] Given column {col} in spot {spot} is out of range.")
if spot in self.empty_spots: # check if the spot is empty
self._board[row][col] = piece
self.empty_spots.remove(spot)
self.next_player = 'p2' if self.next_player == 'p1' else 'p1'
self.move_history.append(spot)
else:
raise ValueError(f"[!] Given spot {spot} is not empty.")
def copy_and_place_piece(self, piece: str, spot: str) -> Any:
"""
make a copy of the current game state, make a move in the game state copy, and
return the game state copy object
"""
next_player = 'p2' if self.next_player == 'p1' else 'p1'
new_board = copy.deepcopy(self._board)
new_hist = copy.deepcopy(self.move_history)
new_game = GameState(new_board, next_player, new_hist)
new_game.place_piece(piece, spot)
return new_game
def get_winning_piece(self) -> str:
"""
return 'x' or 'o' or `None` as the winner of the game in its current state
"""
# check each row
for row in self._board:
if all(spot == 'x' for spot in row):
return 'x'
elif all(spot == 'o' for spot in row):
return 'o'
# grab the side length fo the game board
side = self._board_side
# if no winners in rows, check each column
for col_num in range(side):
if all(row2[col_num] == 'x' for row2 in self._board):
return 'x'
elif all(row2[col_num] == 'o' for row2 in self._board):
return 'o'
# if still no winners, check the two diagonals
# top-left to bottom-right
if all(self._board[i][i] == 'x' for i in range(side)):
return 'x'
elif all(self._board[i][i] == 'o' for i in range(side)):
return 'o'
# top-right to bottom-left
if all(self._board[i][side - i - 1] == 'x' for i in range(side)):
return 'x'
elif all(self._board[i][side - i - 1] == 'o' for i in range(side)):
return 'o'
# if there are no empty spots in the board then it's a tie
# [*] this can be improved by predicting early ties, but I won't implement it
# right now
if not self.empty_spots:
return "tie"
# otherwise there's no winner yet
return None
################################################################################
# Player Classes
################################################################################
class Player:
"""
An abstract class representing a Tic Tac Toe player.
[c] This class and its subclasses took inspiration from "CSC111 Winter 2021 Assignment
2: Trees, Chess, and Artificial Intelligence (Game Tree)", by <NAME> and <NAME>
"""
# Private Instance Attributes:
# - _piece: game piece of the current player, either `x` or `o`
_piece: str
def __init__(self, piece: str) -> None:
assert piece in {'x', 'o'}
self._piece = piece
def return_move(self, game: GameState, prev_move: str) -> tuple[str, str]:
"""
return the game piece {'x', 'o'} and a move in the given game state
`prev_move` is the opponent player's most recent move, or `None` if no moves
have been made
"""
raise NotImplementedError
class AIRandomPlayer(Player):
"""
An 'AI' player that simply makes random moves that are available in the game state.
"""
def return_move(self, game: GameState, prev_move: str) -> tuple[str, str]:
"""
return the game piece {'x', 'o'} and a move in the given game state;
for this player, the move will be chosen at random from the available empty spots
`prev_move` is the opponent player's most recent move, or `None` if no moves
have been made; not used by `AIRandomPlayer`
"""
return self._piece, random.choice(game.empty_spots)
class AIMinimaxPlayer(Player):
"""
An 'AI' player that employs a MiniMax algorithm on a game tree to make moves in the
game state.
Instance Attributes:
- `difficulty`: "easy" or "hard"; used to determine search depth of the algorithm
- `is_x`: True if my piece is 'x', False if my piece is 'o'
"""
difficulty: str
is_x: bool
# Private Instance Attributes:
# - _tree: game tree generated by the current player
_tree: gt.GameTree
_depth: int
def __init__(self, piece: str, difficulty: str) -> None:
super().__init__(piece)
self.difficulty = difficulty
self.is_x = True if piece == 'x' else False
# initialize an empty game tree with my piece, and a 0 x win score
self._tree = gt.GameTree(None, self.is_x, 0)
@staticmethod
def _score_node(game: GameState) -> int:
"""
return a Minimax utility score based on the given game state
There is a scoring constant of '1' when 'x' wins, '-1' when 'x' loses, or '0'
otherwise; this constant is multiplied by the number of empty spots left in the
game, to incentivize victory in the fewest steps
The idea of multiplying the number of empty spots with the scoring constant
{1, -1, 0} to reward wins made in fewer steps came from this video:
https://youtu.be/fT3YWCKvuQE
NO OTHER IDEAS OR CODE CAME FROM THE ABOVE SOURCE
"""
piece = game.get_winning_piece()
if piece == 'x':
return 1 * len(game.empty_spots)
elif piece == 'o':
return -1 * | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import types
import random
try:
from rest_framework_filters import MethodFilter
except ImportError:
from edw.rest.filters.common import MethodFilter
from django.core.exceptions import (
ObjectDoesNotExist,
MultipleObjectsReturned,
)
from django.db import models, transaction
from django.db.models.base import ModelBase
from django.utils import six
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from rest_framework import exceptions
from rest_framework import serializers
from rest_framework.fields import empty
class RESTOptions(object):
"""
ENG: Options class for REST models. Use this as an inner class called ``RESTMeta``::
class MyModel(Model):
class RESTMeta:
exclude = ['name']
include = {
'test_id': ('rest_framework.serializers.IntegerField', {
'write_only': True
'model': 'Full model name' | Class # optional parameter for EntityDetailSerializer class,
can be Class or full import class string (nash_region.models.person.private_person.ResponsiblePerson)
}),
}
filters = {
'published_at': filters.IsoDateTimeFilter(
name='published_at', lookup_expr='exact'),
'close_at': ('rest_framework_filters.IsoDateTimeFilter', {
'name': 'close_at',
'lookup_expr': 'exact',
# 'action': lambda qs, value: qs
}),
'is_id__in__18_19': ('rest_framework_filters.MethodFilter', {
})
}
def filter_is_id__in__18_19(self, name, qs, value):
return qs.filter(id__in=[18, 19])
def filter_queryset(self, request, queryset, view):
# if view.action == 'list':
# pass
return queryset
validators = []
def create(self, validated_data):
test_id = validated_data.pop('test_id', None)
# print ("Get test_id", test_id)
instance = super(self.__class__, self).create(validated_data)
# print("Created instance", instance)
return instance
def update(self, instance, validated_data):
test_id = validated_data.pop('test_id', None)
# print ("Get test_id", test_id)
instance = super(self.__class__, self).update(instance, validated_data)
# print("Updated instance", self.partial, instance)
return instance
def validate_key(self, value):
# print ("+ Validate key +", value)
if 'x' not in value.lower():
raise serializers.ValidationError("Key must have `x`")
return value
def validate(self, data):
try:
ControllerActivity.objects.get(key=data['key'])
except ControllerActivity.DoesNotExist:
raise serializers.ValidationError(_('Invalid controller key'))
return data
group_by = ['particularproblem__name', 'is_solved']
def get_group_by(self):
if self.data_mart is not None and self.queryset.count() <= self.data_mart.limit:
return []
return self.group_by
RUS: Класс опций для REST модели.
"""
exclude = []
include = {}
permission_classes = []
lookup_fields = ('id',)
filters = {}
create = None
update = None
validate = None
DB_FOR_READ = None
@staticmethod
def db_for_read(self, model, **hints):
return model._rest_meta.DB_FOR_READ
validators = None
_fields_validators = []
group_by = []
get_group_by = None
def __init__(self, opts=None, **kwargs):
"""
ENG: Override defaults with options provided
RUS: Переопределяет значение опций по умолчанию.
"""
if opts:
opts = list(opts.__dict__.items())
else:
opts = []
opts.extend(list(kwargs.items()))
for key, value in opts:
if key[:2] == '__':
continue
setattr(self, key, value)
def __iter__(self):
"""
ENG: Override defaults with options provided
RUS: Возвращает объект итератора. Переписываем опции по умолчанию передоставленными данными
"""
return ((k, v) for k, v in self.__dict__.items() if k[0] != '_')
class RESTModelBase(ModelBase):
"""
ENG: Metaclass for REST models.
RUS: Метакласс для REST моделей.
"""
def __new__(cls, name, bases, attrs):
"""
ENG: Create subclasses of Model. This:
- adds the RESTMeta fields to the class
RUS: Создает подклассы Модели.
Добавляет метакласс RESTMeta поля для класса.
Расширяет RESTMeta с базовыми классами.
"""
new = super(RESTModelBase, cls).__new__(cls, name, bases, attrs)
# Grab `Model.RESTMeta`, and rename it `_rest_meta`
RESTMeta = attrs.pop('RESTMeta', None)
if not RESTMeta:
class RESTMeta:
pass
initial_options = frozenset(dir(RESTMeta))
# extend RESTMeta from base classes
for base in bases:
if hasattr(base, '_rest_meta'):
for name, value in base._rest_meta:
if name not in initial_options:
setattr(RESTMeta, name, value)
opts = RESTOptions(RESTMeta)
# parce fields validator
include_fields = opts.include.copy()
for field_name in [field.name for field in new._meta.fields]:
include_fields[field_name] = None
for field_name in opts.exclude:
include_fields.pop(field_name, None)
field_level_validators_names = ["validate_{}".format(field_name) for field_name in include_fields.keys()]
fields_validators = []
for name in field_level_validators_names:
validator = getattr(opts, name, None)
if validator is not None:
fields_validators.append(name)
opts._fields_validators = fields_validators
setattr(new, '_rest_meta', opts)
return new
class RESTMetaSerializerMixin(object):
def __init__(self, *args, **kwargs):
"""
RUS: Конструктор объектов класса.
Переопределяет значение rest_meta.
"""
# Это нужно для того, чтобы при наличии в сериалайзере параметра model не вызывалась
# ошибка ключа, это самый быстрый способ извлечь эти данные из kwargs. Параметр model
# нужен для того, чтобы можно было при конструировании метаданных сериалайзера указать
# конкретную модель для Detail сериалайзера.
kwargs.pop('model', None)
instance = args[0] if args else None
if instance is not None and hasattr(instance, '_rest_meta'):
self.rest_meta = instance._rest_meta
else:
if hasattr(self, 'Meta'):
self.rest_meta = getattr(self.Meta.model, '_rest_meta', None)
else:
self.rest_meta = None
context = kwargs.get('context', None)
if context is not None:
data_mart = context.get('data_mart', None)
if data_mart is not None:
model = data_mart.entities_model
self.rest_meta = model._rest_meta
super(RESTMetaSerializerMixin, self).__init__(*args, **kwargs)
def get_serializer_to_patch(self):
"""
RUS: Определяет путь к сериалайзеру который необходимо пропатчить
"""
return self
class RESTMetaListSerializerPatchMixin(object):
def get_serializer_to_patch(self):
"""
RUS: В случае списочного сериалайзера патчим `self.child`
"""
return self.child
class DynamicFieldsSerializerMixin(RESTMetaSerializerMixin):
"""
RUS: Миксин для динамической сериализации полей базы данных.
Позволяет удалять и добавлять поля в сериализованное представление объекта.
"""
def __init__(self, *args, **kwargs):
super(DynamicFieldsSerializerMixin, self).__init__(*args, **kwargs)
if self.rest_meta:
remove_fields, include_fields = self.rest_meta.exclude, self.rest_meta.include
patch_target = self.get_serializer_to_patch()
for field_name, field in include_fields.items():
# Конструктор сериалайзера в формате
# ('rest_framework.serializers.CharField', <(arg1, arg2)>, <{kwarg1: val1, kwarg2: val2}>)
if isinstance(field, (tuple, list)):
if isinstance(field[1], (tuple, list)):
if len(field) == 3:
field = import_string(field[0])(*field[1], **field[2])
else:
field = import_string(field[0])(*field[1])
else:
field = import_string(field[0])(**field[1])
if isinstance(field, serializers.SerializerMethodField):
default_method_name = 'get_{field_name}'.format(field_name=field_name)
if field.method_name is None:
method_name = default_method_name
else:
method_name = field.method_name
# hack for SerializerMethodField.bind method
if field.method_name == default_method_name:
field.method_name = None
method = getattr(self.rest_meta, method_name)
t_args = [method, patch_target]
six.PY2 and t_args.append(patch_target.__class__)
setattr(patch_target, method_name, types.MethodType(*t_args))
elif isinstance(field, serializers.ListField):
# hack for ListField.__init__ method
field.child.source = None
# elif getattr(field, 'many', False): # todo: не работает когда вызывается описание поля в виде строки
# # hack for `many=True`
# field.source = None
patch_target.fields[field_name] = field
for field_name in remove_fields:
patch_target.fields.pop(field_name, None)
class DynamicFieldsListSerializerMixin(RESTMetaListSerializerPatchMixin, DynamicFieldsSerializerMixin):
pass
class DynamicCreateUpdateValidateSerializerMixin(RESTMetaSerializerMixin):
"""
RUS: Миксин для динамической модификации процедуры создания, обновления и проверки в сериалайзере.
"""
def get_id_attrs(self):
"""
RUS: Возвращает поле для поиска с метаданными.
"""
return self.rest_meta.lookup_fields if self.rest_meta is not None else RESTOptions.lookup_fields
def __init__(self, *args, **kwargs):
"""
RUS: Конструктор объектов класса.
Добавляет методы создания, обновления, проверки по метаданным rest_meta.
"""
super(DynamicCreateUpdateValidateSerializerMixin, self).__init__(*args, **kwargs)
if self.rest_meta:
patch_target = self.get_serializer_to_patch()
if self.rest_meta.validators is not None:
patch_target.validators = self.rest_meta.validators
for method_name in ('create', 'update', 'validate'):
method = getattr(self.rest_meta, method_name, None)
if method is not None:
t_args = [getattr(method, '__func__', method), patch_target]
six.PY2 and t_args.append(patch_target.__class__)
setattr(patch_target, method_name, types.MethodType(*t_args))
for method_name in self.rest_meta._fields_validators:
method = getattr(self.rest_meta, method_name)
setattr(patch_target, method_name, types.MethodType(method, patch_target))
class DynamicCreateUpdateValidateListSerializerMixin(RESTMetaListSerializerPatchMixin,
DynamicCreateUpdateValidateSerializerMixin):
pass
class BasePermissionsSerializerMixin(object):
"""
RUS: Базовы миксин для проверки разрешений в сериалайзере.
"""
@staticmethod
def _get_permissions(permission_classes):
"""
ENG: Instantiates and returns the list of permissions that view requires.
RUS: Создает и возвращает список разрешений.
"""
# todo: Добавить в permission_class информацию о вызове
return [permission() for permission in permission_classes]
def permission_denied(self, request, message=None):
"""
ENG: If request is not permitted, determine what kind of exception to raise.
RUS: Возбуждает исключение если доступ запрещен, либо при отсутствии аутентификации.
"""
if not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied(detail=message)
def get_permission_classes(self, data):
# Пытаемся получить права доступа из метаданных '_rest_meta' в противном случае из представления 'view'.
return data._rest_meta.permission_classes if hasattr(data, '_rest_meta') else self.__view.permission_classes
@cached_property
def __request(self):
request = self.context.get('request', None)
assert request is not None, (
"'%s' `.__init__()` method parameter `context` should include a `request` attribute."
% self.__class__.__name__
)
return request
@cached_property
def __view(self):
return self.context.get('view')
def check_object_permissions(self, data):
permission_classes = self.get_permission_classes(data)
for permission in self._get_permissions(permission_classes):
if not permission.has_object_permission(self.__request, self.__view, data):
self.permission_denied(
self.__request, message=getattr(permission, 'message', None)
)
return permission_classes
def check_permissions(self, data):
permission_classes = self.get_permission_classes(data)
for permission in self._get_permissions(permission_classes):
if not permission.has_permission(self.__request, self.__view):
self.permission_denied(
self.__request, message=getattr(permission, 'message', None)
)
return permission_classes
class CheckPermissionsSerializerMixin(BasePermissionsSerializerMixin):
"""
Миксин проверки прав доступа на уровне сериалайзера.
"""
def __init__(self, *args, **kwargs):
super(CheckPermissionsSerializerMixin, self).__init__(*args, **kwargs)
# Если сериалайзер создается не под конкретный объект - инициализируем кеш
self._permissions_cache = None if self.instance and isinstance(self.instance, models.Model) else {}
def to_representation(self, data):
"""
Check permissions
RUS: Проверка разрешений. Для конкретного объекта вызываем 'check_object_permissions',
иначе 'check_permissions'. Кешуруем результат проверки по классу переданных данных 'data'
"""
if self._permissions_cache is None:
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
self.check_object_permissions(data)
else:
"""
Check if the request should be permitted for list.
Raises an appropriate exception if the request is not permitted.
"""
permission_classes = self._permissions_cache.get(data.__class__, None)
if permission_classes is None:
permission_classes | |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation as animation
from collections import deque
import scipy.ndimage.filters
import serial
import sys
from pynput import keyboard
import os
import threading
import math
import random
import time
import json
from termcolor import *
import argparse
import sys
import logging
import PSD
import DWT
import neural_net
import android_connect
from functools import reduce
# Configuration Dictionary
# Values can be overriden by using command-line arguements
config = {
'com_port' : '/dev/ttyUSB0',
'baud_rate' : 9600,
'sample_time_period' : 10, # in ms
'dip_detect': False,
'ch0_dip_enabled': True,
'ch1_dip_enabled': False,
'ch0_dip_up_threshold': 1700,
'ch0_dip_down_threshold': 400,
'ch1_dip_up_threshold': 2300,
'ch1_dip_down_threshold': -200,
'x_window' : 1000,
'psd_feature_size' : 100, # feature vector size will be 3*psd_feature_size (for PSD)
'ch0_gauss_filter_sigma' : 2,
'ch1_gauss_filter_sigma' : 2,
'sigma_delta' : 0.5,
'feat_method' : 'psd',
'nn_activate' : False,
'nn_single_input': True,
'nn_cluster_name' : 'biometric1',
'nn_learning_rate' : 0.01,
'nn_learning_epochs' : 1000,
'nn_training_batch_size' : 1,
'nn_evaluating_batch_size' : 1,
'nn_current_train_label' : np.array([0,0,1,0]),
'compute_concentration_energy' : True,
'ph_number_time_window' : 10, # 10 seconds timeout window
'ch0_label' : 'ch0',
'ch1_label' : 'ch1',
'ch0_gaussed_label' : 'ch0_gaussed',
'ch1_gaussed_label' : 'ch1_gaussed',
'app_description' : 'Thought Recognition stage3 module',
}
# Globals and pre-init
# Parser for command line arguments
parser = argparse.ArgumentParser(description=config['app_description'])
# serial object for communication with stage 2 module over UARt
serial_obj = None
# specify figure plotting size in inches
fig = plt.figure(figsize=(18, 3))
# subplot for ch0 data
ax_ch0 = plt.subplot2grid((2,2),(0,0))
ax_ch0.set_xlim([0,config['x_window']])
ax_ch0.set_ylim([0,4095])
# subplot for ch1 data
ax_ch1 = plt.subplot2grid((2,2),(1,0))
ax_ch1.set_xlim([0,config['x_window']])
ax_ch1.set_ylim([-100,100])
# subplot for feature set (output from feature extractor)
ax_fft = plt.subplot2grid((2,2),(0,1))
# subplot for neural net output
ax_nn = plt.subplot2grid((2,2),(1,1))
ax_nn.set_xlim([0,config['x_window']])
ax_nn.set_ylim([0,10])
# adjust figure boundaries based on selected feature extractor
if config['feat_method']=='psd':
ax_fft.set_ylim([-2,5])
ax_fft.set_xlim([0,300])
elif config['feat_method']=='dwt':
ax_fft.set_ylim([0,300])
ax_fft.set_xlim([0,300])
else:
raise KeyError("invalid feat method")
# set plot line styles
ch0_line, = ax_ch0.plot([],[], linewidth=0.5, color="k", label=config['ch0_label'])
ch1_line, = ax_ch1.plot([],[], linewidth=0.5, color="k", label=config['ch1_label'])
ch0_grad_line, = ax_ch1.plot([],[], linewidth=2, color="b", label='gradient(ch0)')
ch0_line_gaussed, = ax_ch0.plot([],[], linewidth=2, color="r", label=config['ch0_gaussed_label'])
ch1_line_gaussed, = ax_ch1.plot([],[], linewidth=1, color="r", label=config['ch1_gaussed_label'])
ch0_fft_line, = ax_fft.plot([],[], linewidth=0.75, color="g", label='ch0 feat')
ch1_fft_line, = ax_fft.plot([],[], linewidth=0.75, color="b", label='ch1 feat')
ch0_nn_line, = ax_nn.plot([],[], linewidth=2, color="g", label='ch0 out')
ch1_nn_line, = ax_nn.plot([],[], linewidth=2, color="b", label='ch1 out')
# called after all axes are added
# figure will fill the entire window and leave minimal margin
fig.tight_layout()
# plt.legend()
# Keyboard controller object used to send keystrokes during dip detection
# dip detection is used to detect sudden dips in signal due to artifacts like blinking
kboard = keyboard.Controller()
paused = False
# plot animator object
anim = None
# feature extractor output lists
feat0 = None
feat1 = None
filtered_ch0 = None
filtered_ch1 = None
# some internal control variables
new_feature = True
nn_active = False
nn_train = False
input_counter = 0
nn0=None
nn1=None
# initialise neural net model based on selected feature extractor
if config['feat_method']=='psd':
PSD.n = config['psd_feature_size'] # number of elements in feature-vector
nn0 = neural_net.neural_net_keras(config['nn_cluster_name']+'_ch0_psd',(config['psd_feature_size']*3,500,150,config['nn_current_train_label'].shape[0]))
nn1 = neural_net.neural_net_keras(config['nn_cluster_name']+'_ch1_psd',(config['psd_feature_size']*3,500,150,config['nn_current_train_label'].shape[0]))
elif config['feat_method']=='dwt':
nn0 = neural_net.neural_net_keras(config['nn_cluster_name']+'_ch0_dwt',(300,500,100,config['nn_current_train_label'].shape[0]))
nn1 = neural_net.neural_net_keras(config['nn_cluster_name']+'_ch1_dwt',(300,500,100,config['nn_current_train_label'].shape[0]))
else:
raise KeyError("invalid feat method")
# dip detection control variables
ch0_dipped_up = 0
ch0_dipped_down = 0
ch1_dipped_up = 0
ch1_dipped_down = 0
ph_number_active = False
ph_number_state = 0
ph_number_key = 0
ph_number_timeout = 0
phone = android_connect.android_com(1)
phone.set_lockscreen_coords(304,550,788,1279,1482,1726)
with open('phone_numbers.json','r') as f:
phone_number_data = json.load(f)
# Deque containers to store incoming data
ch0_list = deque([-1]*config['x_window'])
ch1_list = deque([-1]*config['x_window'])
x_list = deque(np.arange(config['x_window'],0,-1))
x_list_np = np.array(x_list)
nn0_out = deque([-1]*config['x_window'])
nn1_out = deque([-1]*config['x_window'])
# timer time counter (in seconds)
timer_time = 0
# status variable for color checking
check_color = False
check_color_end_time = 0
color1_sum = 0
color2_sum = 0
concentration_energy = 0.0
def check_args():
''' Parse command line arguments and update config dict accordingly
'''
global config
parser.add_argument('-b','--baud-rate', help="Baud rate for serial communication. (default=%d)"%(config['baud_rate']),default=config['baud_rate'],nargs=1,metavar=('baud'))
parser.add_argument('-p','--port', help="COM port to use for serial communication. (default=%s)"%(config['com_port']),default=config['com_port'],nargs=1,metavar=('port'))
parser.add_argument('-t','--sample-time', help="Time period (in ms) for sample acquisition by ADC. (default=%d)"%(config['sample_time_period']),default=config['sample_time_period'],nargs=1,metavar=('time'))
parser.add_argument('--train', help="Operate module in training mode only", action='store_const', const=True, default=False)
parser._optionals.title = 'Arguments'
# start parsing arguments
args = parser.parse_args()
# Adjust config dict according to arguments
if type(args.port)==list:
config['com_port'] = args.port[0]
config['baud_rate'] = args.baud_rate
config['sample_time_period'] = args.sample_time
return args
def serial_init():
''' Initialise serial communication and negotiate sampling time period
'''
global serial_obj
global config
try:
logging.debug('Attempting to open {}'.format(config['com_port']))
serial_obj = serial.Serial(config['com_port'], config['baud_rate'], timeout=100)
logging.debug('Opened port {}'.format(serial_obj.name))
# # Set sampling time period
# to_send = int((config['sample_time_period']-1)/4).to_bytes(1,byteorder='big')
# for i in range(3): # send 3 times just to be sure
# serial_obj.write(to_send)
except:
logging.debug('Error opening serial port')
serial_obj = None
def serial_worker():
''' A separately threaded function which reads from serial port and fills the
deque
Also process dips in signal which can be caused as artifacts from blinking
'''
global serial_obj
global ch0_list, ch1_list
global ch0_dipped_down, ch0_dipped_up, ch1_dipped_down, ch1_dipped_up, filtered_ch0, filtered_ch1
global config, input_counter
if serial_obj!=None:
while (1):
if not paused:
# wait for one stray packet to end
while (serial_obj.read()!=b';'):
pass
read_data = serial_obj.read(8)
ch0_val = int(read_data[0:4])
ch1_val = int(read_data[4:8])
# adding to data queue
ch0_list.pop()
ch1_list.pop()
ch0_list.appendleft(ch0_val)
ch1_list.appendleft(ch1_val)
# start neural net data recording only when data fills x-window completely
if input_counter==config['x_window']-1:
logging.debug(color.cyan("x-window full"))
if input_counter<config['x_window']:
input_counter += 1
if config['dip_detect']:
# Detect dip
if (filtered_ch0[200] < config['ch0_dip_down_threshold'] and not ch0_dipped_down):
ch0_dipped_down = 1
try:
dip_down_callback(0)
except NameError:
pass
# logging.debug(color.yellow("Dip down on {}".format(config['ch0_label'])))
elif (filtered_ch0[200] > config['ch0_dip_down_threshold'] and ch0_dipped_down):
ch0_dipped_down = 0
elif (filtered_ch1[200] < config['ch1_dip_down_threshold'] and not ch1_dipped_down):
ch1_dipped_down = 1
try:
dip_down_callback(1)
except NameError:
pass
# logging.debug("Dip down on {}".format(config['ch1_label']))
elif (filtered_ch1[200] > config['ch1_dip_down_threshold'] and ch1_dipped_down):
ch1_dipped_down = 0
# Detect upward rise
if (filtered_ch0[200] > config['ch0_dip_up_threshold'] and not ch0_dipped_up):
ch0_dipped_up = 1
try:
dip_up_callback(0)
except NameError:
pass
# logging.debug(color.yellow("Dip up on {}".format(config['ch0_label'])))
elif (filtered_ch0[200] < config['ch0_dip_up_threshold'] and ch0_dipped_up):
ch0_dipped_up = 0
elif (filtered_ch1[200] > config['ch1_dip_up_threshold'] and not ch1_dipped_up):
ch1_dipped_up = 1
try:
dip_up_callback(1)
except NameError:
pass
# logging.debug("Dip up on {}".format(config['ch1_label']))
elif (filtered_ch1[200] < config['ch1_dip_up_threshold'] and ch1_dipped_up):
ch1_dipped_up = 0
# sleep to sync incoming and outgoing datarates
# time.sleep(config['sample_time_period']/1000)
else:
# serial object not defined, running in debug mode
logging.debug('serial object not defined, running in debug mode')
while (1):
if not paused:
ch0_list.pop()
ch1_list.pop()
ch0_list.appendleft(math.floor(random.random()*4096))
ch1_list.appendleft(math.floor(random.random()*4096))
# sleep to sync incoming and outgoing datarates
# time.sleep(config['sample_time_period']/1000)
def plot_init():
''' Set initial data to blank, or else a spike is observed in plot
'''
global ch0_line, ch1_line
ch0_line.set_data([],[])
ch1_line.set_data([],[])
return (ch0_line,ch1_line)
def plot_worker(frame):
''' Calculate ch0 and ch1 filtered data. Then calculate feature vectors according to the method
selected
Raises:
KeyError -- Error raised if feat_method set wrong in config dict
'''
global ch0_line, ch1_line, ch0_line_gaussed, ch1_line_gaussed, ch0_fft_line, ch1_fft_line, ch0_grad_line
global ch0_list, ch1_list
global x_list, new_feature
global feat0, feat1, filtered_ch0, filtered_ch1
if not paused:
ch0_line.set_data(x_list,ch0_list)
ch1_line.set_data(x_list,ch1_list)
# Gaussian filtering
gauss_inp_ch0 = np.array(ch0_list)
filtered_ch0 = scipy.ndimage.filters.gaussian_filter1d(gauss_inp_ch0, sigma=config['ch0_gauss_filter_sigma'])
gauss_inp_ch1 = np.array(ch1_list)
filtered_ch1 = scipy.ndimage.filters.gaussian_filter1d(gauss_inp_ch1, sigma=config['ch1_gauss_filter_sigma'])
ch0_line_gaussed.set_data(x_list_np,filtered_ch0)
ch1_line_gaussed.set_data(x_list_np,filtered_ch1)
# ==========================================================================================
# # fft plot
# N = np.arange(config['x_window'])
# fft0 = np.fft.fft(filtered_ch0)
# fft1 = np.fft.fft(filtered_ch1)
# freq = np.fft.fftfreq(config['x_window'],d=(config['sample_time_period']/1000))*2000
# ch0_fft_line.set_data(freq, fft0.real)
# ch1_fft_line.set_data(freq, fft1.real)
# ==========================================================================================
if config['feat_method']=='psd':
# PSD extract
feat0 = PSD.PSD_extractor(ch0_list)
feat1 = PSD.PSD_extractor(ch1_list)
N = np.arange(config['psd_feature_size']*3)
ch0_fft_line.set_data(N, np.array(feat0))
ch1_fft_line.set_data(N, np.array(feat1))
# ==========================================================================================
elif config['feat_method']=='dwt':
# DWT extract
feat0 = DWT.DWT_extractor(ch0_list)
feat1 = DWT.DWT_extractor(ch1_list)
N = np.arange(len(feat0))
ch0_fft_line.set_data(N, np.array(feat0))
ch1_fft_line.set_data(N, np.array(feat1))
# ==========================================================================================
else:
raise KeyError("invalid feat method")
new_feature = True
if config['compute_concentration_energy']:
concentration_energy = np.trapz(feat0[20:30],dx=1)#/np.trapz(feat0[8:12],dx=1)
print(concentration_energy)
time.sleep(config['sample_time_period']/1000)
return ch0_line, ch1_line, ch0_line_gaussed, ch1_line_gaussed, ch0_fft_line, ch1_fft_line
def neural_net_worker():
''' Start neural net feed forward if the x_window is filled with incoming data
Backpropagate features extracted from current x_window when key pressed
'''
global nn0, nn1, feat0 ,feat1
global config
global nn0_out, nn1_out
global new_feature, nn_train, input_counter
global x_list, x_list_np, ch0_nn_line, ch1_nn_line, ch0_grad_line, ch0_list
global check_color, check_color_end_time, timer_time, color1_sum, color2_sum, concentration_energy
while (1):
# start neural net only when x-window is completely filled
if new_feature and (not paused) and input_counter==config['x_window']:
# decode one-hot data to integer
n0_p = nn0.predict([feat0])[0].tolist()
n1_p = nn1.predict([feat1])[0].tolist()
n0__ = reduce(lambda val,x: val+(x[0]+1)*x[1], enumerate(n0_p), 0)
n1__ = reduce(lambda val,x: val+(x[0]+1)*x[1], enumerate(n1_p), 0)
print(n0__,n0_p)
# adding neural net output data to the end of queue
nn0_out.pop()
nn1_out.pop()
nn0_out.appendleft(n0__)
nn1_out.appendleft(n1__)
new_feature = False
# plot neural net output
ch0_nn_line.set_data(x_list, np.array(nn0_out))
ch1_nn_line.set_data(x_list, np.array(nn1_out))
# check color
# if check_color:
# check_color_end_time = timer_time + 2
# check_color = False
# color1_sum = 0
# color2_sum = 0
print("BLUE={}, RED={}, GREEN={}".format(np.round(n0_p[0]*100),np.round(n0_p[1]*100),np.round(n0_p[2]*100)))
else:
pass
def key_listener_worker():
# Engage Keyboard listener
with keyboard.Listener(on_press=key_press_callback, on_release=key_release_callback) as listener:
listener.join()
# def dip_down_callback(ch):
# global ph_number_state, ph_number_timeout, timer_time, ph_number_time_window, config, ph_number_key
# if (ch==0) and config['ch0_dip_enabled']:
# # Dip in channel 0
# # kboard.press(keyboard.Key.space)
# # kboard.release(keyboard.Key.space)
# if ph_number_state == 0:
# ph_number_timeout = timer_time+config['ph_number_time_window']
# ph_number_state = 1
# print("dip down, state=1")
# if ph_number_state>=1 and ph_number_state<4 | |
<reponame>TuftsCompArchLab/HotGauge<filename>examples/floorplans.py
#!/usr/bin/env python
import sys
import os
import json
import copy
from collections import defaultdict
import itertools
import math
from HotGauge.utils import Floorplan, FloorplanElement
from HotGauge.configuration import mcpat_to_flp_name, MISSING_POWER_INFO, DERIVED_UNITS, \
REMOVED_UNITS, NODE_LENGTH_FACTORS
# TODO: These values are hard coded due to sim configuration issues
L3_CORRECTION_FACTOR = 4
# These values are used for scaling MCPAT's output
L2_AREA_MULTIPLIER = 2.908127478180562
# These values are used for misc constants
PADDING_SIZE = 500 # um
def get_parents(unit):
heir_indices = [i for i,ch in enumerate(unit) if ch=='/']
for index in heir_indices:
yield unit[:index]
def load_14nm_stats(stats_file, num_cores=8):
# Loaded units are in mm**2 and converted to um**2
stats = json.load(open(stats_file, 'r'))
stats = {unit: {k: (1000**2)*v for k,v in areas.items()} for unit, areas in stats.items()}
# Make L3 part of each core (and quadruple its size??)
L3_area = stats['Processor']['Total L3s/Area'] * L3_CORRECTION_FACTOR
stats['Core']['L3/Area'] = L3_area / num_cores
stats['Core']['Area'] += stats['Core']['L3/Area']
# Make RBB Area instead of Area Overhead
unit_name = 'Execution Unit/Results Broadcast Bus/Area'
stats['Core'][unit_name] = stats['Core'][unit_name + ' Overhead']
del(stats['Core'][unit_name + ' Overhead'])
# Make L2 roughly 1/3 of size of core
stats['Core']['Area'] -= stats['Core']['L2/Area']
stats['Core']['L2/Area'] *= L2_AREA_MULTIPLIER
stats['Core']['Area'] += stats['Core']['L2/Area']
# add derived units (e.g. AVX512)
for unit_name, components in DERIVED_UNITS.items():
unit_area = 0.0
for component in components:
component_area= stats['Core']['{}/Area'.format(component.base)]
unit_area += component_area * component.ratio
stats['Core']['{}/Area'.format(unit_name)] = unit_area
stats['Core']['Area'] += unit_area
# Also propogate the area up to the parents!
for parent_name in get_parents(unit_name):
parent_area_label = '{}/Area'.format(parent_name)
if parent_area_label in stats['Core']:
stats['Core'][parent_area_label] += unit_area
else:
stats['Core'][parent_area_label] = unit_area
# Remove units that are relocated/removed
for unit_name in REMOVED_UNITS:
unit_label = '{}/Area'.format(unit_name)
unit_area = stats['Core'][unit_label]
# Also propogate the removed area up to the parents!
for parent_name in get_parents(unit_name):
parent_area_label = '{}/Area'.format(parent_name)
stats['Core'][parent_area_label] -= unit_area
# TODO: add this line back in! It's a bug from original HPCA submission
# stats['Core']['Area'] -= unit_area
del stats['Core'][unit_label]
# Delete unused units
for unit in ['Processor', 'NUCA', 'BUSES']:
del(stats[unit])
# Return the stats for a single core
assert len(stats) == 1, 'Only expected "Core" to be left'
return stats['Core']
def split_levels(stats):
stats_per_level = defaultdict(dict)
for k,v in stats.items():
tokens = k.split('/')
hierarchy, area = tokens[:-1], tokens[-1]
assert area == 'Area', 'Expected only Area stats but got {}'.format(area)
hierarchy_level = len(hierarchy)
# If top level, call it core
if hierarchy_level == 0:
hierarchy = ['Core']
# Make L2 and L3 part of the top level
if hierarchy_level == 1:
if hierarchy[0] in ['L2', 'L3']:
hierarchy_level = 0
unit_name = '/'.join(hierarchy)
stats_per_level[hierarchy_level][unit_name] = v
# Readjust the top level to account for adding L2 and L3
stats_per_level[0]['Core'] -= stats_per_level[0]['L2']
stats_per_level[0]['Core'] -= stats_per_level[0]['L3']
return stats_per_level
def get_base_floorplan(split_level_stats):
# The total of Core, L2, L3, and possibly more
total_area = sum(split_level_stats[0].values())
cache_area = split_level_stats[0]['L2'] + split_level_stats[0]['L3']
core_area = total_area - cache_area
# ratio of non-cache portion of die from IC photos
core_aspect_ratio = 4.0 / 6.0
core_width = core_area ** 0.5 / (core_aspect_ratio ** 0.5)
core_height = core_area / core_width
core = FloorplanElement('Core', core_width, core_height, 0.0, 0.0)
flp = Floorplan([core], frmt='3D-ICE')
flp.auto_place_element('L2', split_level_stats[0]['L2'], where='right')
flp.auto_place_element('L3', split_level_stats[0]['L3'], where='above')
return flp
def add_pipeline(flp, pipeline_stats):
total_area = sum(pipeline_stats.values())
EX_area = pipeline_stats['Execution Unit']
AVX_FPU_area = pipeline_stats['AVX_FPU']
not_EX_area = total_area - EX_area - AVX_FPU_area
# Place EX on the end, double wide
flp = replace(flp, 'Core', [('Execution Unit', EX_area), ('not_EX', not_EX_area)], vertical=False)
cols = [['Renaming Unit', 'Instruction Fetch Unit'],['Load Store Unit', 'Memory Management Unit']]
col_sizes = []
for col_els in cols:
col_sizes.append(sum(pipeline_stats[el] for el in col_els))
col_flp_els = [('not_EX{}'.format(idx), col_sizes[idx]) for idx in range(len(cols))]
flp = replace(flp, 'not_EX', col_flp_els, extra='none')
for col_idx, col_els in enumerate(cols):
new_els = [(el, pipeline_stats[el]) for el in col_els]
flp = replace(flp, 'not_EX{}'.format(col_idx), new_els, extra='none', vertical=False)
flp = replace(flp, 'Core', [('AVX_FPU', AVX_FPU_area)], vertical=False)
return flp
def replace(flp, unit, subunit_sizes, vertical=True, extra='before'):
unit_idx = ([e.name for e in flp.elements]).index(unit)
unit = flp.elements[unit_idx]
del flp.elements[unit_idx]
total_size = sum(subunit[1] for subunit in subunit_sizes)
x,y = unit.minx, unit.miny
new_els = []
extra_area = unit.area - total_size
# Make sure the children are no larger than 0.1um**2 larger than parent
# Branch Predictor children are slightly larger than Branch Predictor
assert total_size <= unit.area + 1e-1
# If there is more than 0.1um missing, occupy the extra space
if extra_area > 5e-1:
if extra == 'pad': # Pad between each element
padding = extra_area / (len(subunit_sizes) + 1)
all_units = subunit_sizes
elif extra.lower() == 'none': # Don't pad
padding = 0.0
all_units = subunit_sizes
if extra == 'NONE':
msg = 'Parent unit, {}, has {} extra area, but is being removed'.format(unit.name, extra_area)
LOGGER.warn(msg)
else:
assert extra_area < 1e-8
elif extra == 'before': # Place extra (parent) element first
padding = 0.0
all_units = [(unit.name, extra_area)] + subunit_sizes
elif extra == 'after': # Place extra (parent) element last
padding = 0.0
all_units = subunit_sizes + [(unit.name, extra_area)]
else: # No need to pad
padding = 0.0
all_units = subunit_sizes
for subunit, area in all_units:
if vertical:
el_w = unit.width
el_h = area / el_w
y += padding / el_w
new_els.append(FloorplanElement(subunit, el_w, el_h, x, y))
y += el_h
else:
el_h = unit.height
el_w = area / el_h
x += padding / el_h
new_els.append(FloorplanElement(subunit, el_w, el_h, x, y))
x += el_w
flp.elements.extend(new_els)
return flp
def add_level2(flp, split_level_stats2):
avx_order = ['Floating Point Units', 'AVX512 Accelerator']
r_order = ['Int Front End RAT', 'FP Front End RAT', 'Free List']
ls_order = ['LoadQ', 'StoreQ', 'Data Cache']
mmu_order = ['Itlb', 'Dtlb']
# TODO: After adding AVX, should the IALU be moved too?
ex_order = ['Instruction Scheduler', 'Register Files',
'Results Broadcast Bus',
'Complex ALUs', 'Integer ALUs']
if_order = ['Branch Target Buffer', 'Branch Predictor',
'Instruction Decoder',
'Instruction Buffer', 'Instruction Cache']
ordering = {
'Renaming Unit' : r_order,
'Load Store Unit': ls_order,
'Memory Management Unit': mmu_order,
'Execution Unit': ex_order,
'Instruction Fetch Unit': if_order,
'AVX_FPU': avx_order
}
for el, sub_els in ordering.items():
# Force removal of missing elements
kwargs = {'extra' : 'NONE'} if el in MISSING_POWER_INFO else {}
if el == 'Instruction Fetch Unit':
kwargs['vertical'] = False
flp = replace(flp, el.split('/')[-1],
[(sub_el, split_level_stats2['{}/{}'.format(el, sub_el)])
for sub_el in sub_els], **kwargs)
return flp
# TODO: After adding AVX, should the RF be moved?
def add_level3(flp, split_level_stats3):
rf_order = ['Integer RF', 'Floating Point RF']
rf_els = [(sub_el, split_level_stats3['Execution Unit/Register Files/{}'.format(sub_el)]) for sub_el in rf_order]
flp = replace(flp, 'Register Files', rf_els, extra='none')
bp_order = ['Global Predictor', 'L2_Local Predictor',
'L1_Local Predictor', 'Chooser', 'RAS']
bp_els = [(sub_el, split_level_stats3['Instruction Fetch Unit/Branch Predictor/{}'.format(sub_el)]) for sub_el in bp_order]
flp = replace(flp, 'Branch Predictor', bp_els)
is_order = ['Instruction Window', 'FP Instruction Window', 'ROB']
is_els = [(sub_el, split_level_stats3['Execution Unit/Instruction Scheduler/{}'.format(sub_el)]) for sub_el in is_order]
flp = replace(flp, 'Instruction Scheduler', is_els)
return flp
def CORE_SUBSITUTE(core_flp, name):
width = core_flp.width
height = core_flp.height
return Floorplan([FloorplanElement(name, width, height, 0, 0)])
def make_7_core_processor(core_flp):
return make_processor(core_flp, 3, 3, {(1,0):'IMC',(1,2):'SoC'})
def make_processor(core_flp, width, length, core_substitutes):
processor = Floorplan([], frmt=core_flp.frmt)
core_idx=0
for y,x in itertools.product(range(width),range(length)):
if (x,y) in core_substitutes:
substitute_name = core_substitutes[(x,y)]
loc_n = CORE_SUBSITUTE(core_flp, substitute_name)
else:
loc_n = core_flp.create_numbered_instance(core_idx)
if x%2==1:
loc_n.mirror_horizontal()
core_idx+=1
loc_n.minx = x*core_flp.width
loc_n.miny = y*core_flp.height
processor += loc_n
return processor
def add_padding(flp, padding_width):
width = flp.width
flp.auto_place_element('N', width*padding_width, where='above')
flp.auto_place_element('S', width*padding_width, where='below')
height = flp.height
flp.auto_place_element('E', height*padding_width, where='right')
flp.auto_place_element('W', height*padding_width, where='left')
def add_IO(flp, padding_width):
width = flp.width
flp.auto_place_element('IO_N', width*padding_width, where='above')
flp.auto_place_element('IO_S', width*padding_width, where='below')
def generate_floorplans(output_dir, fname_frmt, flp, padding_width=PADDING_SIZE):
"""Saves floorplan for all tech nodes with all formats with added padding"""
old_frmt = flp.frmt
flp.frmt = '3D-ICE'
for node in ['14nm', '10nm', '7nm']:
core_flp = flp * NODE_LENGTH_FACTORS[node]
for el in core_flp.elements:
el.name = mcpat_to_flp_name(el.name)
# The core is now complete. Generate single core and 7 core flps, with padding
processor_flp = make_7_core_processor(core_flp)
if padding_width:
add_padding(core_flp, padding_width)
add_IO(processor_flp, padding_width)
core_flp.reset_to_origin()
processor_flp.reset_to_origin()
for frmt in ['3D-ICE', 'hotspot']:
core_flp.frmt = frmt
core_flp.to_file(os.path.join(output_dir, fname_frmt.format(frmt=frmt, node=node, suffix='core')))
processor_flp.frmt = frmt
processor_flp.to_file(os.path.join(output_dir, fname_frmt.format(frmt=frmt, node=node, suffix='7core')))
# Also save them in 3D-ICE format with power strs
core_flp.frmt = '3D-ICE'
core_flp.to_file(os.path.join(output_dir,
fname_frmt.format(frmt=core_flp.frmt+'_template',
node=node, suffix='core')
), element_powers=True)
processor_flp.frmt = '3D-ICE'
processor_flp.to_file(os.path.join(output_dir,
fname_frmt.format(frmt=processor_flp.frmt+'_template',
node=node, suffix='7core')
), element_powers=True)
flp.frmt = old_frmt
def scale_units(configs):
def scale_fn(stats):
new_stats = dict(stats)
for name, factor in configs:
area_delta = stats['{}/Area'.format(name)] * (factor-1.0)
new_stats['{}/Area'.format(name)] += area_delta
# | |
= other.basis_matrix()
psi = X * phi
# Now psi is a matrix that defines an R-module morphism from other to some
# R-module, whose kernel defines the long sought for intersection of self and other.
L = psi.integer_kernel()
# Finally the kernel of the intersection has basis the linear combinations of
# the basis of other given by a basis for L.
G = L.basis_matrix() * other.basis_matrix()
return other.span(G.rows())
# dispense with the three easy cases
if self == self.ambient_vector_space():
return other
elif other == other.ambient_vector_space():
return self
elif self.dimension() == 0 or other.dimension() == 0:
return self.zero_submodule()
# standard algorithm for computing intersection of general subspaces
if self.dimension() <= other.dimension():
V1 = self; V2 = other
else:
V1 = other; V2 = self
A1 = V1.basis_matrix()
A2 = V2.basis_matrix()
S = A1.stack(A2)
K = S.kernel()
n = int(V1.dimension())
B = [A1.linear_combination_of_rows(v.list()[:n]) for v in K.basis()]
return self.ambient_vector_space().submodule(B, check=False)
def is_subspace(self, other):
"""
True if this vector space is a subspace of other.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: W = V.subspace([V.gen(0), V.gen(0) + V.gen(1)])
sage: W2 = V.subspace([V.gen(1)])
sage: W.is_subspace(V)
True
sage: W2.is_subspace(V)
True
sage: W.is_subspace(W2)
False
sage: W2.is_subspace(W)
True
"""
return self.is_submodule(other)
def span(self, gens, base_ring=None, check=True, already_echelonized=False):
"""
Return the K-span of the given list of gens, where K is the
base field of self or the user-specified base_ring. Note that
this span is a subspace of the ambient vector space, but need
not be a subspace of self.
INPUT:
- ``gens`` - list of vectors
- ``check`` - bool (default: True): whether or not to
coerce entries of gens into base field
- ``already_echelonized`` - bool (default: False):
set this if you know the gens are already in echelon form
EXAMPLES::
sage: V = VectorSpace(GF(7), 3)
sage: W = V.subspace([[2,3,4]]); W
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 5 2]
sage: W.span([[1,1,1]])
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 1 1]
TESTS::
sage: V = FreeModule(RDF,3)
sage: W = V.submodule([V.gen(0)])
sage: W.span([V.gen(1)], base_ring=GF(7))
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[0 1 0]
sage: v = V((1, pi, e)); v
(1.0, 3.14159265359, 2.71828182846)
sage: W.span([v], base_ring=GF(7))
Traceback (most recent call last):
...
ValueError: Argument gens (= [(1.0, 3.14159265359, 2.71828182846)]) is not compatible with base_ring (= Finite Field of size 7).
sage: W = V.submodule([v])
sage: W.span([V.gen(2)], base_ring=GF(7))
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[0 0 1]
"""
if is_FreeModule(gens):
gens = gens.gens()
if base_ring is None or base_ring == self.base_ring():
return FreeModule_submodule_field(
self.ambient_module(), gens=gens, check=check, already_echelonized=already_echelonized)
else:
try:
M = self.ambient_module().change_ring(base_ring)
except TypeError:
raise ValueError, \
"Argument base_ring (= %s) is not compatible with the base field (= %s)." % (base_ring, self.base_field() )
try:
return M.span(gens)
except TypeError:
raise ValueError, \
"Argument gens (= %s) is not compatible with base_ring (= %s)." % (gens, base_ring)
def span_of_basis(self, basis, base_ring=None, check=True, already_echelonized=False):
r"""
Return the free K-module with the given basis, where K is the base
field of self or user specified base_ring.
Note that this span is a subspace of the ambient vector space, but
need not be a subspace of self.
INPUT:
- ``basis`` - list of vectors
- ``check`` - bool (default: True): whether or not to
coerce entries of gens into base field
- ``already_echelonized`` - bool (default: False):
set this if you know the gens are already in echelon form
EXAMPLES::
sage: V = VectorSpace(GF(7), 3)
sage: W = V.subspace([[2,3,4]]); W
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 5 2]
sage: W.span_of_basis([[2,2,2], [3,3,0]])
Vector space of degree 3 and dimension 2 over Finite Field of size 7
User basis matrix:
[2 2 2]
[3 3 0]
The basis vectors must be linearly independent or an
ArithmeticError exception is raised.
::
sage: W.span_of_basis([[2,2,2], [3,3,3]])
Traceback (most recent call last):
...
ValueError: The given basis vectors must be linearly independent.
"""
if is_FreeModule(basis):
basis = basis.gens()
if base_ring is None:
return FreeModule_submodule_with_basis_field(
self.ambient_module(), basis=basis, check=check, already_echelonized=already_echelonized)
else:
try:
M = self.change_ring(base_ring)
except TypeError:
raise ValueError, \
"Argument base_ring (= %s) is not compatible with the base field (= %s)." % (
base_ring, self.base_field() )
try:
return M.span_of_basis(basis)
except TypeError:
raise ValueError, \
"Argument basis (= %s) is not compatible with base_ring (= %s)." % (basis, base_ring)
def subspace(self, gens, check=True, already_echelonized=False):
"""
Return the subspace of self spanned by the elements of gens.
INPUT:
- ``gens`` - list of vectors
- ``check`` - bool (default: True) verify that gens
are all in self.
- ``already_echelonized`` - bool (default: False) set
to True if you know the gens are in Echelon form.
EXAMPLES:
First we create a 1-dimensional vector subspace of an
ambient `3`-dimensional space over the finite field of
order `7`.
::
sage: V = VectorSpace(GF(7), 3)
sage: W = V.subspace([[2,3,4]]); W
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 5 2]
Next we create an invalid subspace, but it's allowed since
``check=False``. This is just equivalent to computing
the span of the element.
::
sage: W.subspace([[1,1,0]], check=False)
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 1 0]
With ``check=True`` (the default) the mistake is
correctly detected and reported with an
``ArithmeticError`` exception.
::
sage: W.subspace([[1,1,0]], check=True)
Traceback (most recent call last):
...
ArithmeticError: Argument gens (= [[1, 1, 0]]) does not generate a submodule of self.
"""
return self.submodule(gens, check=check, already_echelonized=already_echelonized)
def subspaces(self, dim):
"""
Iterate over all subspaces of dimension dim.
INPUT:
- ``dim`` - int, dimension of subspaces to be
generated
EXAMPLE::
sage: V = VectorSpace(GF(3), 5)
sage: len(list(V.subspaces(0)))
1
sage: len(list(V.subspaces(1)))
121
sage: len(list(V.subspaces(2)))
1210
sage: len(list(V.subspaces(3)))
1210
sage: len(list(V.subspaces(4)))
121
sage: len(list(V.subspaces(5)))
1
::
sage: V = VectorSpace(GF(3), 5)
sage: V = V.subspace([V([1,1,0,0,0]),V([0,0,1,1,0])])
sage: list(V.subspaces(1))
[Vector space of degree 5 and dimension 1 over Finite Field of size 3
Basis matrix:
[1 1 0 0 0],
Vector space of degree 5 and dimension 1 over Finite Field of size 3
Basis matrix:
[1 1 1 1 0],
Vector space of degree 5 and dimension 1 over Finite Field of size 3
Basis matrix:
[1 1 2 2 0],
Vector space of degree 5 and dimension 1 over Finite Field of size 3
Basis matrix:
[0 0 1 1 0]]
"""
if not self.base_ring().is_finite():
raise RuntimeError("Base ring must be finite.")
# First, we select which columns will be pivots:
from sage.combinat.subset import Subsets
BASE = self.basis_matrix()
for pivots in Subsets(range(self.dimension()), dim):
MAT = sage.matrix.matrix_space.MatrixSpace(self.base_ring(), dim,
self.dimension(), sparse = self.is_sparse())()
free_positions = []
for i in range(dim):
MAT[i, pivots[i]] = 1
for j in range(pivots[i]+1,self.dimension()):
if j not in pivots:
free_positions.append((i,j))
# Next, we fill in those entries that are not
# determined by the echelon form alone:
num_free_pos = len(free_positions)
ENTS = VectorSpace(self.base_ring(), num_free_pos)
for v in ENTS:
for k in range(num_free_pos):
MAT[free_positions[k]] = v[k]
# Finally, we have to multiply by the basis matrix
# to take corresponding linear combinations of the basis
yield self.subspace((MAT*BASE).rows())
def subspace_with_basis(self, gens, check=True, already_echelonized=False):
"""
Same as ``self.submodule_with_basis(...)``.
EXAMPLES:
We create a subspace with a user-defined basis.
::
sage: V = VectorSpace(GF(7), 3)
sage: W = V.subspace_with_basis([[2,2,2], [1,2,3]]); W
Vector space of degree 3 and dimension 2 over Finite Field of size 7
User basis matrix:
[2 2 2]
[1 2 3]
We then create a subspace of the subspace with user-defined basis.
::
sage: W1 = W.subspace_with_basis([[3,4,5]]); W1
Vector space of degree 3 and dimension 1 over Finite Field of size 7
User basis | |
"""
usernames = request.GET.get('username')
user_email = request.GET.get('email')
search_usernames = []
if usernames:
search_usernames = usernames.strip(',').split(',')
elif user_email:
user_email = user_email.strip('')
try:
user = User.objects.get(email=user_email)
except (UserNotFound, User.DoesNotExist):
return Response(status=status.HTTP_404_NOT_FOUND)
search_usernames = [user.username]
try:
account_settings = get_account_settings(
request, search_usernames, view=request.query_params.get('view'))
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(account_settings)
def search_emails(self, request):
"""
POST /api/user/v1/accounts/search_emails
Content Type: "application/json"
{
"emails": ["<EMAIL>", "<EMAIL>"]
}
Response:
[
{
"username": "edx",
"email": "<EMAIL>",
"id": 3,
},
{
"username": "staff",
"email": "<EMAIL>",
"id": 8,
}
]
"""
if not request.user.is_staff:
return Response(
{
'developer_message': 'not_found',
'user_message': 'Not Found'
},
status=status.HTTP_404_NOT_FOUND
)
try:
user_emails = request.data['emails']
except KeyError as error:
error_message = f'{error} field is required'
return Response(
{
'developer_message': error_message,
'user_message': error_message
},
status=status.HTTP_400_BAD_REQUEST
)
users = User.objects.filter(email__in=user_emails)
data = UserSearchEmailSerializer(users, many=True).data
return Response(data)
def retrieve(self, request, username):
"""
GET /api/user/v1/accounts/{username}/
"""
try:
account_settings = get_account_settings(
request, [username], view=request.query_params.get('view'))
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(account_settings[0])
def partial_update(self, request, username):
"""
PATCH /api/user/v1/accounts/{username}/
Note that this implementation is the "merge patch" implementation proposed in
https://tools.ietf.org/html/rfc7396. The content_type must be "application/merge-patch+json" or
else an error response with status code 415 will be returned.
"""
if request.content_type != MergePatchParser.media_type:
raise UnsupportedMediaType(request.content_type)
try:
with transaction.atomic():
update_account_settings(request.user, request.data, username=username)
account_settings = get_account_settings(request, [username])[0]
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
except AccountValidationError as err:
return Response({"field_errors": err.field_errors}, status=status.HTTP_400_BAD_REQUEST)
except AccountUpdateError as err:
return Response(
{
"developer_message": err.developer_message,
"user_message": err.user_message
},
status=status.HTTP_400_BAD_REQUEST
)
return Response(account_settings)
class NameChangeView(APIView):
"""
Request a profile name change. This creates a PendingNameChange to be verified later,
rather than updating the user's profile name directly.
"""
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
"""
POST /api/user/v1/accounts/name_change/
Example request:
{
"name": "<NAME>"
}
"""
user = request.user
new_name = request.data.get('name', None)
rationale = f'Name change requested through account API by {user.username}'
serializer = PendingNameChangeSerializer(data={'new_name': new_name})
if serializer.is_valid():
pending_name_change = do_name_change_request(user, new_name, rationale)[0]
if pending_name_change:
return Response(status=status.HTTP_201_CREATED)
else:
return Response(
'The name given was identical to the current name.',
status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_400_BAD_REQUEST, data=serializer.errors)
class AccountDeactivationView(APIView):
"""
Account deactivation viewset. Currently only supports POST requests.
Only admins can deactivate accounts.
"""
authentication_classes = (JwtAuthentication, )
permission_classes = (permissions.IsAuthenticated, CanDeactivateUser)
def post(self, request, username):
"""
POST /api/user/v1/accounts/{username}/deactivate/
Marks the user as having no password set for deactivation purposes.
"""
_set_unusable_password(User.objects.get(username=username))
return Response(get_account_settings(request, [username])[0])
class DeactivateLogoutView(APIView):
"""
POST /api/user/v1/accounts/deactivate_logout/
{
"password": "<PASSWORD>",
}
**POST Parameters**
A POST request must include the following parameter.
* password: Required. The current password of the user being deactivated.
**POST Response Values**
If the request does not specify a username or submits a username
for a non-existent user, the request returns an HTTP 404 "Not Found"
response.
If a user who is not a superuser tries to deactivate a user,
the request returns an HTTP 403 "Forbidden" response.
If the specified user is successfully deactivated, the request
returns an HTTP 204 "No Content" response.
If an unanticipated error occurs, the request returns an
HTTP 500 "Internal Server Error" response.
Allows an LMS user to take the following actions:
- Change the user's password permanently to Django's unusable password
- Log the user out
- Create a row in the retirement table for that user
"""
authentication_classes = (JwtAuthentication, SessionAuthentication, )
permission_classes = (permissions.IsAuthenticated, )
def post(self, request):
"""
POST /api/user/v1/accounts/deactivate_logout/
Marks the user as having no password set for deactivation purposes,
and logs the user out.
"""
user_model = get_user_model()
try:
# Get the username from the request and check that it exists
verify_user_password_response = self._verify_user_password(request)
if verify_user_password_response.status_code != status.HTTP_204_NO_CONTENT:
return verify_user_password_response
with transaction.atomic():
user_email = request.user.email
create_retirement_request_and_deactivate_account(request.user)
try:
# Send notification email to user
site = Site.objects.get_current()
notification_context = get_base_template_context(site)
notification_context.update({'full_name': request.user.profile.name})
language_code = request.user.preferences.model.get_value(
request.user,
LANGUAGE_KEY,
default=settings.LANGUAGE_CODE
)
notification = DeletionNotificationMessage().personalize(
recipient=Recipient(lms_user_id=0, email_address=user_email),
language=language_code,
user_context=notification_context,
)
ace.send(notification)
except Exception as exc:
log.exception('Error sending out deletion notification email')
raise
# Log the user out.
logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
except KeyError:
log.exception(f'Username not specified {request.user}')
return Response('Username not specified.', status=status.HTTP_404_NOT_FOUND)
except user_model.DoesNotExist:
log.exception(f'The user "{request.user.username}" does not exist.')
return Response(
f'The user "{request.user.username}" does not exist.', status=status.HTTP_404_NOT_FOUND
)
except Exception as exc: # pylint: disable=broad-except
log.exception(f'500 error deactivating account {exc}')
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def _verify_user_password(self, request):
"""
If the user is logged in and we want to verify that they have submitted the correct password
for a major account change (for example, retiring this user's account).
Args:
request (HttpRequest): A request object where the password should be included in the POST fields.
"""
try:
self._check_excessive_login_attempts(request.user)
user = authenticate(username=request.user.username, password=request.POST['password'], request=request)
if user:
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
self._handle_failed_authentication(request.user)
except AuthFailedError as err:
log.exception(
f"The user password to deactivate was incorrect. {request.user.username}"
)
return Response(str(err), status=status.HTTP_403_FORBIDDEN)
except Exception as err: # pylint: disable=broad-except
return Response(f"Could not verify user password: {err}", status=status.HTTP_400_BAD_REQUEST)
def _check_excessive_login_attempts(self, user):
"""
See if account has been locked out due to excessive login failures
"""
if user and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user):
raise AuthFailedError(_('This account has been temporarily locked due '
'to excessive login failures. Try again later.'))
def _handle_failed_authentication(self, user):
"""
Handles updating the failed login count, inactive user notifications, and logging failed authentications.
"""
if user and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user)
raise AuthFailedError(_('Email or password is incorrect.'))
def _set_unusable_password(user):
"""
Helper method for the shared functionality of setting a user's
password to the unusable password, thus deactivating the account.
"""
user.set_unusable_password()
user.save()
class AccountRetirementPartnerReportView(ViewSet):
"""
Provides API endpoints for managing partner reporting of retired
users.
"""
DELETION_COMPLETED_KEY = 'deletion_completed'
ORGS_CONFIG_KEY = 'orgs_config'
ORGS_CONFIG_ORG_KEY = 'org'
ORGS_CONFIG_FIELD_HEADINGS_KEY = 'field_headings'
ORIGINAL_EMAIL_KEY = 'original_email'
ORIGINAL_NAME_KEY = 'original_name'
STUDENT_ID_KEY = 'student_id'
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser,)
parser_classes = (JSONParser,)
serializer_class = UserRetirementStatusSerializer
@staticmethod
def _get_orgs_for_user(user):
"""
Returns a set of orgs that the user has enrollments with
"""
orgs = set()
for enrollment in user.courseenrollment_set.all():
org = enrollment.course_id.org
# Org can conceivably be blank or this bogus default value
if org and org != 'outdated_entry':
orgs.add(org)
try:
# if the user has ever launched a managed Zoom xblock,
# we'll notify Zoom to delete their records.
if user.launchlog_set.filter(managed=True).count():
orgs.add('zoom')
except AttributeError:
# Zoom XBlock not installed
pass
return orgs
def retirement_partner_report(self, request): # pylint: disable=unused-argument
"""
POST /api/user/v1/accounts/retirement_partner_report/
Returns the list of UserRetirementPartnerReportingStatus users
that are not already being processed and updates their status
to indicate they are currently being processed.
"""
retirement_statuses = UserRetirementPartnerReportingStatus.objects.filter(
is_being_processed=False
).order_by('id')
retirements = []
for retirement_status in retirement_statuses:
retirements.append(self._get_retirement_for_partner_report(retirement_status))
serializer = UserRetirementPartnerReportSerializer(retirements, many=True)
retirement_statuses.update(is_being_processed=True)
return Response(serializer.data)
def _get_retirement_for_partner_report(self, retirement_status):
"""
Get the retirement for this retirement_status. The retirement info will be included in the partner report.
"""
retirement = {
'user_id': retirement_status.user.pk,
'original_username': retirement_status.original_username,
AccountRetirementPartnerReportView.ORIGINAL_EMAIL_KEY: retirement_status.original_email,
AccountRetirementPartnerReportView.ORIGINAL_NAME_KEY: retirement_status.original_name,
'orgs': self._get_orgs_for_user(retirement_status.user),
'created': retirement_status.created,
}
# Some orgs have a custom list of headings and content for the partner report. Add this, if applicable.
self._add_orgs_config_for_user(retirement, retirement_status.user)
return retirement
def _add_orgs_config_for_user(self, retirement, user):
"""
Check to see if the user's info was sent to any partners (orgs) that have a a custom list of headings and
content for the partner report. If so, add this.
"""
# See if the MicroBachelors coaching provider needs to be notified of this user's retirement
if has_ever_consented_to_coaching is not None and has_ever_consented_to_coaching(user):
# See if the user has a MicroBachelors external id. If not, they were never sent to the
# coaching provider.
external_ids = ExternalId.objects.filter(
user=user,
external_id_type__name=ExternalIdType.MICROBACHELORS_COACHING
)
if external_ids.exists():
# User has an external id. Add the additional info.
external_id = str(external_ids[0].external_user_id)
self._add_coaching_orgs_config(retirement, external_id)
def _add_coaching_orgs_config(self, retirement, external_id):
"""
Add the orgs configuration for MicroBachelors coaching
"""
# Add the custom field headings
retirement[AccountRetirementPartnerReportView.ORGS_CONFIG_KEY] = [
{
AccountRetirementPartnerReportView.ORGS_CONFIG_ORG_KEY: 'mb_coaching',
AccountRetirementPartnerReportView.ORGS_CONFIG_FIELD_HEADINGS_KEY: [
AccountRetirementPartnerReportView.STUDENT_ID_KEY,
AccountRetirementPartnerReportView.ORIGINAL_EMAIL_KEY,
AccountRetirementPartnerReportView.ORIGINAL_NAME_KEY,
AccountRetirementPartnerReportView.DELETION_COMPLETED_KEY
]
}
]
# Add the custom field value
retirement[AccountRetirementPartnerReportView.STUDENT_ID_KEY] = external_id
@request_requires_username
def retirement_partner_status_create(self, request):
"""
PUT /api/user/v1/accounts/retirement_partner_report/
```
{
'username': 'user_to_retire'
}
```
Creates a UserRetirementPartnerReportingStatus object for the given user
as part of the retirement pipeline.
"""
username = request.data['username']
try:
retirement | |
<filename>usienarl/agent.py
#
# Copyright (C) 2019 <NAME>
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import logging
import tensorflow
import numpy
# Import required src
from usienarl import Interface, SpaceType
class Agent:
"""
Base agent abstract class.
An agent defines who or what operates in a certain environment during a certain experiment.
An agent can act and being updated, but the inner working (model, policies and so on) are left to be decided by the
implementation.
An agent needs to be generated before running, and generation is done when executing agent setup.
Any agent can act in three different modes:
- warmup mode, before training (not all agents required that, it depends on the agent inner model)
- train mode, during training
- inference mode, when exploiting
One or more policies can be defined for each one of these modes.
To define your own agent, implement the abstract class in a specific child class.
"""
def __init__(self,
name: str):
# Define internal attributes
self._name: str = name
# Define empty attributes
self._saver = None
self._scope: str or None = None
self._summary_writer = None
self._parallel: int or None = None
self._summary_path: str or None = None
self._save_path: str or None = None
self._saves_to_keep: int or None = None
self._observation_space_type: SpaceType or None = None
self._observation_space_shape = None
self._agent_action_space_type: SpaceType or None = None
self._agent_action_space_shape = None
self._save_counter: int or None = None
def setup(self,
logger: logging.Logger,
scope: str,
parallel: int,
observation_space_type: SpaceType, observation_space_shape: (),
agent_action_space_type: SpaceType, agent_action_space_shape: (),
summary_path: str = None, save_path: str = None, saves_to_keep: int = 0) -> bool:
"""
Setup the agent, preparing all its components for execution.
:param logger: the logger used to print the agent information, warnings and errors
:param scope: the experiment scope encompassing the agent scope
:param parallel: the amount of parallel episodes run by the experiment, must be greater than zero
:param observation_space_type: the space type of the observation space
:param observation_space_shape: the shape of the observation space, as a tuple
:param agent_action_space_type: the space type of the agent action space
:param agent_action_space_shape: the shape of the agent action space, as a tuple
:param summary_path: the optional path of the summary writer of the agent
:param save_path: the optional path where to save metagraphs checkpoints of the agent's model
:param saves_to_keep: the optional number of checkpoint saves to keep, it does nothing if there is no save path
:return: True if setup is successful, False otherwise
"""
# Make sure parameters are correct
assert(parallel > 0 and saves_to_keep >= 0)
logger.info("Setup of agent " + self._name + " with scope " + scope + "...")
# Reset agent attributes
self._scope = scope
self._parallel = parallel
self._summary_path = summary_path
self._save_path = save_path
self._saves_to_keep = saves_to_keep
self._observation_space_type: SpaceType = observation_space_type
self._observation_space_shape = observation_space_shape
self._agent_action_space_type: SpaceType = agent_action_space_type
self._agent_action_space_shape = agent_action_space_shape
# Try to generate the agent inner model
if not self._generate(logger,
observation_space_type, observation_space_shape,
agent_action_space_type, agent_action_space_shape):
return False
# Define the summary writer if required
if summary_path is not None:
self._summary_writer = tensorflow.summary.FileWriter(summary_path, graph=tensorflow.get_default_graph())
logger.info("A Tensorboard summary for the agent will be updated during training of its internal model")
logger.info("Tensorboard summary path: " + summary_path)
# Define the saver if required
if self._save_path is not None and self._save_path and self._saves_to_keep > 0:
self._saver = tensorflow.train.Saver(self.saved_variables, max_to_keep=self._saves_to_keep)
if self._saves_to_keep > 1:
logger.info("Agent model metagraph will be saved after each training/validation pair. A set of " + str(self._saves_to_keep) + " models will be stored.")
else:
logger.info("Agent model metagraph will be saved after each training/validation pair")
logger.info("Agent model metagraphs are saved at " + self._save_path)
self._save_counter: int = 0
# Validate setup
return True
def restore(self,
logger: logging.Logger,
session,
path: str) -> bool:
"""
Restore the agent's model from the checkpoint at the given path.
:param logger: the logger used to print the agent information, warnings and errors
:param session: the session of tensorflow currently running
:param path: the path from which to restore, it is required
:return: True if restore is successful, false otherwise
"""
# Make sure parameters are correct
assert(path is not None and path)
# Get checkpoint from path
checkpoint = tensorflow.train.get_checkpoint_state(path)
# If no saver is defined, define one to restore from checkpoint
if self._saver is None:
self._saver = tensorflow.train.Saver(self.saved_variables)
# If checkpoint exists restore from checkpoint
if checkpoint and checkpoint.model_checkpoint_path:
self._saver.restore(session, tensorflow.train.latest_checkpoint(path))
logger.info("Model graph stored at " + path + " loaded successfully!")
return True
logger.error("Checkpoint path specified is wrong: no model can be accessed at " + path)
return False
def save(self,
logger: logging.Logger,
session):
"""
Save the agent's model metagraph. It does nothing if a saver is not defined.
:param logger: the logger used to print the agent information, warnings and errors
:param session: the session of tensorflow currently running
"""
# Check if the saver exists or something has to be saved
if self._saver is None or self._save_path is None or not self._save_path or self._saves_to_keep <= 0:
return
logger.info("Saving the agent " + self._name + " metagraph at path " + self._save_path + "...")
self._saver.save(session, self._save_path, self._save_counter)
self._save_counter += 1
logger.info("Agent " + self._name + " metagraph saved successfully")
def _generate(self,
logger: logging.Logger,
observation_space_type: SpaceType, observation_space_shape: (),
agent_action_space_type: SpaceType, agent_action_space_shape: ()) -> bool:
"""
Generate the agent's model. Used to generate all custom components of the agent.
It is always called during setup.
:param logger: the logger used to print the agent information, warnings and errors
:param observation_space_type: the space type of the observation space
:param observation_space_shape: the shape of the observation space, as a tuple
:param agent_action_space_type: the space type of the agent action space
:param agent_action_space_shape: the shape of the agent action space, as a tuple
:return: True if setup is successful, False otherwise
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def initialize(self,
logger: logging.Logger,
session):
"""
Initialize the agent before acting in the environment.
The environment at this stage is already initialized.
:param logger: the logger used to print the agent information, warnings and errors
:param session: the session of tensorflow currently running
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def act_warmup(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
warmup_step: int, warmup_episode: int) -> numpy.ndarray:
"""
Take an action given the current agent observation in warmup mode.
Usually it uses a random policy.
:param logger: the logger used to print the agent information, warnings and errors
:param session: the session of tensorflow currently running
:param interface: the interface between the agent and the environment
:param agent_observation_current: the current observation of the agent wrapped in a numpy array
:param warmup_step: the current absolute warm-up step of the experiment the agent is warming-up into
:param warmup_episode: the current absolute warm-up episode of the experiment the agent is warming-up into
:return: the action decided by the agent wrapped in a numpy array
"""
# Abstract method, it should be implemented on a child class basis
raise NotImplementedError()
def act_train(self,
logger: logging.Logger,
session,
interface: Interface,
agent_observation_current: numpy.ndarray,
train_step: int, train_episode: int) -> numpy.ndarray:
"""
Take an action given the current agent observation in train mode.
Usually it uses an exploring policy.
:param logger: the logger used to print the agent information, warnings and errors
:param session: the session of tensorflow currently running
:param interface: the interface between the agent and the environment
:param agent_observation_current: the current observation of the agent wrapped in a numpy array
:param train_step: the current absolute train step of the experiment the agent is training into
:param train_episode: the current absolute train episode of the experiment the agent is training into
:return: the action decided by the agent wrapped in a | |
old i+j
# lij is the smallest i+j of the end point of any search path in the current
# pass.
# oij is the value of lij of the previous pass. These values will be used
# to eliminate any entries in P that are no longer nessecary.
flij = Me+Ne
foij = Mb+Nb
# the length of the longest LCS sofar
max_len_lcs = 0
finished = (0 != 0)
# D - the number of non-diagonal steps
D = 0
while (D < (Me+Ne) and not(finished)):
#
# Work on the forward searches
#
D = D + 1
flij = Me+Ne
Fkeys = FV.keys()
Fkeys.sort()
while (len(Fkeys) > 0):
key = Fkeys.pop()
s = FV[key]
del FV[key]
(xi,yj) = s.lastpoint
opt_len = s.lcslength + min(Me-xi+1,Ne-yj+1)
if (opt_len > max_len_lcs):
#
# There (still) is hope that this search can beat the best search sofar
#
#
# First try whether we are onto a snake
#
xi1 = xi+1
yj1 = yj+1
if yj1 <= Ne and xi1 <= Me:
(type,token,lineno,tokenno) = ref.token[yj1]
reftok = (type,token)
(type,token,lineno,tokenno) = dat.token[xi1]
dattok = (type,token)
if reftok == dattok:
# yes, we are onto a snake
xi2 = xi1 + 1
yj2 = yj1 + 1
while (yj2 <=Ne and xi2 <= Me):
(type,token,lineno,tokenno) = ref.token[yj2]
reftok = (type,token)
(type,token,lineno,tokenno) = dat.token[xi2]
dattok = (type,token)
if reftok == dattok:
xi2 = xi2 + 1
yj2 = yj2 + 1
else:
break
xi2 = xi2 - 1
yj2 = yj2 - 1
s.add_snake(xi1,yj1,xi2,yj2,1)
s.increase_length(xi2-xi1+1)
s.set_lastpoint(xi2,yj2)
xi = xi2
yj = yj2
finished = (yj2 == Ne and xi2 == Me)
if finished:
lcs = transform_snake_list(s.snakelist)
break
#
# update the maximum LCS length
#
max_len_lcs = max(max_len_lcs,s.lcslength)
#
# now explore the way forward, horizontal first
#
keep_horizontal = false
xih = xi+1
yjh = yj
if xih <= Me:
if FP.has_key((xih,yjh)):
if FP[(xih,yjh)] < s.lcslength:
keep_horizontal = true
else:
keep_horizontal = true
if xih+yjh < flij:
flij = xih+yjh
finished = (yjh == Ne and xih == Me)
if finished:
lcs = transform_snake_list(s.snakelist)
break
#
# now explore the vertical direction
#
keep_vertical = false
xiv = xi
yjv = yj+1
if yjv <= Ne:
if FP.has_key((xiv,yjv)):
if FP[(xiv,yjv)] < s.lcslength:
keep_vertical = true
else:
keep_vertical = true
if xiv+yjv < flij:
flij = xiv+yjv
finished = (yjv == Ne and xiv == Me)
if finished:
lcs = transform_snake_list(s.snakelist)
break
if keep_vertical:
if keep_horizontal:
# Keeping both horizontal and vertical search direction
# So generate a new search path
sa = copy.copy(s)
sa.set_lastpoint(xiv,yjv)
FV[(xiv,yjv)] = sa
FP[(xiv,yjv)] = sa.lcslength
#
s.set_lastpoint(xih,yjh)
FV[(xih,yjh)] = s
FP[(xih,yjh)] = s.lcslength
else:
# Keeping only the vertical search direction
# So simply update the current search path
s.set_lastpoint(xiv,yjv)
FV[(xiv,yjv)] = s
FP[(xiv,yjv)] = s.lcslength
else:
if keep_horizontal:
# Keeping only the horizontal search direction
# So simply update the current search path
s.set_lastpoint(xih,yjh)
FV[(xih,yjh)] = s
FP[(xih,yjh)] = s.lcslength
else:
# Keeping neither the horizontal or vertical search direction
# So remove the current path from the search list
pass
else:
pass
#
# now tidy up FP
#
ij = foij - (Mb+Nb)
while (ij <= flij - (Mb+Nb)):
xi = Mb + ij
yj = Nb
while (xi >= Mb):
if FP.has_key((xi,yj)):
del FP[(xi,yj)]
xi = xi - 1
yj = yj + 1
ij = ij + 1
foij = flij
return lcs
def tol_decode(tol_line,txt_line):
"""Decode a single line of the tolerance change section and mark the
relevant characters in the text line. Then return the modified
text line.
"""
clist = string.split(tol_line,',')
c1 = 0
c2 = len(clist)
while (c1 < c2):
p = clist[c1]
(s1,s2) = string.split(p,':')
s1 = int(s1)
s2 = int(s2)
s3 = len(txt_line)
while (s3 <= s2):
txt_line = txt_line + " "
s3 = s3 + 1
while (s1 <= s2):
txt_line = txt_line[:s1] + "#" + txt_line[s1+1:]
s1 = s1 + 1
c1 = c1 + 1
return txt_line
def tol_encode(txt_line):
"""We assume that the text line contains hash character in all places where
differences should be tolerated. This function will find the hash
characters and encode their locations in the format of a tolerance
change section line.
"""
#
# s1,s2 - beginning and end of a tolerance
# intol - whether we are in a tolerance part
# ntol - the number of tolerance parts found
#
tol_line = ""
true = (0==0)
false = not true
ntol = 0
i = 0
n = len(txt_line)
intol = false
while (i < n):
if txt_line[i] == "#":
if intol:
s2 = i
else:
s1 = i
s2 = i
intol = true
ntol = ntol + 1
else:
if intol:
if ntol > 1:
tol_line = tol_line + ","
tol_line = tol_line + str(s1)+":"+str(s2)
intol = false
else:
pass
i = i + 1
if intol:
if ntol > 1:
tol_line = tol_line + ","
tol_line = tol_line + str(s1)+":"+str(s2)
return tol_line
def tol_compare_token(change,ref,yj,dat,xi,feps,ieps):
"""Compares two tokens taking into account tolerable differences
stored in 'tol' if any.
"""
reftok = ref.token[yj]
dattok = dat.token[xi]
if change.has_key(yj):
toltok = change[yj]
else:
toltok = ""
result = toldiff_tokens.tokens_match(reftok,dattok,toltok,feps,ieps)
return result
def find_lcs2(tol,ref,Nb,Ne,dat,Mb,Me,feps,ieps):
"""Compares the data stored in 'dat' against the data in 'ref',
and returns the longest common subsequence (LCS) in 'lcs'. The LCS
is stored as a list of snakes. A snake is a sequence of line pairs
(Xi,Yj) to (Xi+p,Yj+p) where the lines X and Y in every pair match.
Whatever happens between two snakes in a path is irrelevant.
In this particular routine the string comparison is modified based
on the information held 'tol'. For every relevant line the tol
dictionary holds a string of splices of characters where differences
should be tolerated. As this routine uses a tolerant comparison it
generates type 2 snakes.
The algorithm used here is inspired by:
<NAME>, 'An O(ND) Difference Algorithm and Its Variations'
Algorithmica 1, 2 (1986), 251-266
http://www.cs.arizona.edu/people/gene/PAPERS/diff.ps
however I cannot guarantee that understood it well enough to reproduce
the actual published algorithm.
<NAME>, SciTech Daresbury Laboratory, June 2006.
"""
lcs = { }
# FP - Forward Pij
# Records the maximum number of diagonal lines of all candidates paths that
# passed through node (i,j). P is a dictionary with tuples (i,j) as keys and
# the maximum number as data.
FP = { }
# FV - Forward search path vector
# Stores the forwards search paths.
FV = { }
# NF - counter for generating forward search path keys
#
s = search_path_linked()
s.set_lastpoint(Mb-1,Nb-1)
FV[(Mb-1,Nb-1)] = s
# flij - forward last i+j
# foij - forward old i+j
# lij is the smallest i+j of the end point of any search path in the current
# pass.
# oij is the value of lij of the previous pass. These values will be used
# to eliminate any entries in P that are no longer nessecary.
flij = Me+Ne
foij = Mb+Nb
# the length of the longest LCS sofar
max_len_lcs = 0
finished = (0 != 0)
# D - the number of non-diagonal steps
D = 0
while (D < (Me+Ne) and not(finished)):
#
# Work on the forward searches
#
D = D + 1
flij = Me+Ne
Fkeys = FV.keys()
Fkeys.sort()
while (len(Fkeys) > 0):
key = Fkeys.pop()
s = FV[key]
del FV[key]
(xi,yj) = s.lastpoint
opt_len = s.lcslength + min(Me-xi+1,Ne-yj+1)
if (opt_len > max_len_lcs):
#
# There (still) is hope that this search can beat the best search sofar
#
# First try whether we are onto a snake
#
xi1 = xi+1
yj1 = yj+1
if yj1 <= Ne and xi1 <= Me:
# line comparison 1
if tol_compare_token(tol,ref,yj1,dat,xi1,feps,ieps):
| |
# WS2812 LED Matrix Gamecontrol (Tetris, Snake, Pong)
# by <NAME>
# https://hackaday.io/project/11064-raspberry-pi-retro-gaming-led-display
# ported from
# Tetromino (a Tetris clone)
# By <NAME> <EMAIL>
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, time, sys, socket, threading, queue, socketserver, os
from PIL import Image # tested with pillow-6.2.1
# If Pi = False the script runs in simulation mode using pygame lib
PI = True
import pygame
from pygame.locals import *
from random import randint # random numbers
import datetime
if PI:
import serial
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.legacy.font import proportional, SINCLAIR_FONT, TINY_FONT, CP437_FONT
from luma.core.legacy import show_message, text
import asyncio
from evdev import InputDevice, categorize, ecodes # PS4 inputs
import evdev
from select import select
# only modify this two values for size adaption!
PIXEL_X=10
PIXEL_Y=20
#TODO implement MineSweeper?
MAX2719_DISPLAYS=4 # number of cascaded displays
MAX2719_ORIENTATION=90 # Corrects block orientation when wired vertically choices=[0, 90, -90]
MAX2719_ROTATION=0 # Rotate display 0=0°, 1=90°, 2=180°, 3=270° choices=[0, 1, 2, 3]
#PORT_NAME = "/dev/ttyAMA0"
PORT_NAME = "/dev/ttyS0"
SIZE= 20
FPS = 15
BOXSIZE = 20
WINDOWWIDTH = BOXSIZE * PIXEL_X
WINDOWHEIGHT = BOXSIZE * PIXEL_Y
BOARDWIDTH = PIXEL_X
BOARDHEIGHT = PIXEL_Y
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.15
FALLING_SPEED = 0.8
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 255, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 255)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (255, 255, 0)
LIGHTYELLOW = (175, 175, 20)
CYAN = ( 0, 255, 255)
MAGENTA = (255, 0, 255)
ORANGE = (255, 100, 0)
SCORES =(0,40,100,300,1200)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = (BLUE,GREEN,RED,YELLOW,CYAN,MAGENTA,ORANGE)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
#assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
# constants defining the keys/buttons on the controller
BUTTON_LEFT=0
BUTTON_RIGHT=1
BUTTON_UP=2
BUTTON_DOWN=3
BUTTON_BLUE=4
BUTTON_GREEN=5
BUTTON_RED=6
BUTTON_YELLOW=7
# Sony PS4 Controller Codes
# using evdev now; should be better to use pygame.joystick, but could not get this to work in the headless setup
PS4BTN_X=304
PS4BTN_CIRCLE=305
PS4BTN_TRIANGLE=307
PS4BTN_QUADRAT=308
PS4BTN_R2=313
PS4BTN_R1=311
PS4BTN_L2=312
PS4BTN_L1=310
#maps the evdev button code to the in-game button event name
# Ps4 Version --> maps an PS4 Button to the in-game event name
# using predfined constants from evdev
if PI:
# controllerEventMapper = {
# BTN_SOUTH : BUTTON_DOWN,
# BTN_EAST : BUTTON_RIGHT,
# BTN_WEST : BUTTON_LEFT,
# BTN_NORTH: BUTTON_UP,
# BTN_TL : BUTTON_YELLOW,
# BTN_TL2 : BUTTON_RED,
# BTN_TR : BUTTON_GREEN,
# BTN_TR2 : BUTTON_BLUE
# }
controllerEventMapper = {
PS4BTN_X : BUTTON_DOWN,
PS4BTN_CIRCLE : BUTTON_RIGHT,
PS4BTN_QUADRAT : BUTTON_LEFT,
PS4BTN_TRIANGLE : BUTTON_UP,
PS4BTN_L1 : BUTTON_YELLOW,
PS4BTN_L2 : BUTTON_RED,
PS4BTN_R1 : BUTTON_GREEN,
PS4BTN_R2 : BUTTON_BLUE
}
keyboardEventMapper = {
pygame.K_DOWN : BUTTON_DOWN,
pygame.K_RIGHT : BUTTON_RIGHT,
pygame.K_LEFT : BUTTON_LEFT,
pygame.K_UP: BUTTON_UP,
pygame.K_4 : BUTTON_YELLOW,
pygame.K_3 : BUTTON_RED,
pygame.K_2 : BUTTON_GREEN,
pygame.K_1 : BUTTON_BLUE
}
#constants for the communication with the external display driver (Arduino) - only 4 commands are currently used
#COMMANDBYTE_SETBRIGHTNESS = 22 # command to set the LED Brightness of the Main Display; Followed by 1 Byte: Brightness value
COMMANDBYTE_DRAWPIXELRGB = 24 # command to set a pixel to a RGB color; followed by 5 byte: X-pos, Y-pos, R-Value, G-Value, B-Value
COMMANDBYTE_DRAWPIXELCOLOR = 26 # command to set a pixel to a RGB color, selected from internal palet; followed by 3 byte: X-pos, Y-pos, Color-Index
#COMMANDBYTE_FULLSCREEN = 28 # command to set the full screen, followed by 200 bytes for each pixel, selected from internal pallet
COMMANDBYTE_UPDATESCREEN = 30 # command to update the screen
COMMANDBYTE_CLEARSCREEN = 32 # command to clear the screen
# constants for the colors in the arduino matrix
COLORINDEX_BLUE = 0
COLORINDEX_GREEN = 1
COLORINDEX_RED = 2
COLORINDEX_YELLOW = 3
COLORINDEX_CYAN = 4
COLORINDEX_MAGENTA = 5
COLORINDEX_ORANGE = 6
COLORINDEX_WHITE = 7
COLORINDEX_BLACK = 8
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
S_SHAPE_TEMPLATE = [['.....',
'.....',
'..OO.',
'.OO..',
'.....'],
['.....',
'..O..',
'..OO.',
'...O.',
'.....']]
Z_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'..OO.',
'.....'],
['.....',
'..O..',
'.OO..',
'.O...',
'.....']]
I_SHAPE_TEMPLATE = [['..O..',
'..O..',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'OOOO.',
'.....',
'.....']]
O_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'.OO..',
'.....']]
J_SHAPE_TEMPLATE = [['.....',
'.O...',
'.OOO.',
'.....',
'.....'],
['.....',
'..OO.',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'...O.',
'.....'],
['.....',
'..O..',
'..O..',
'.OO..',
'.....']]
L_SHAPE_TEMPLATE = [['.....',
'...O.',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..O..',
'..OO.',
'.....'],
['.....',
'.....',
'.OOO.',
'.O...',
'.....'],
['.....',
'.OO..',
'..O..',
'..O..',
'.....']]
T_SHAPE_TEMPLATE = [['.....',
'..O..',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..OO.',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'..O..',
'.....'],
['.....',
'..O..',
'.OO..',
'..O..',
'.....']]
PIECES = {'S': S_SHAPE_TEMPLATE,
'Z': Z_SHAPE_TEMPLATE,
'I': I_SHAPE_TEMPLATE,
'J': J_SHAPE_TEMPLATE,
'L': L_SHAPE_TEMPLATE,
'O': O_SHAPE_TEMPLATE,
'T': T_SHAPE_TEMPLATE}
PIECES_ORDER = {'S': 0,'Z': 1,'I': 2,'J': 3,'L': 4,'O': 5,'T': 6}
# snake constants #
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
# font clock #
clock_font = [
0x1F, 0x11, 0x1F,
0x00, 0x00, 0x1F,
0x1D, 0x15, 0x17,
0x15, 0x15, 0x1F,
0x07, 0x04, 0x1F,
0x17, 0x15, 0x1D,
0x1F, 0x15, 0x1D,
0x01, 0x01, 0x1F,
0x1F, 0x15, 0x1F,
0x17, 0x15, 0x1F]
# serial port pi #
if PI:
serport=serial.Serial(PORT_NAME,baudrate=250000,timeout=3.0)
spiPort = spi(port=0, device=0, gpio=noop())
MAX2719device = max7219(spiPort, cascaded=MAX2719_DISPLAYS, block_orientation=MAX2719_ORIENTATION,
rotate=MAX2719_ROTATION or 0, blocks_arranged_in_reverse_order=False)
#creates object 'gamepad' to store the data
#gamepad = InputDevice(GAMEPAD_DEVICE)
#print(gamepad)
else:
MAX2719device = 0
# key server for controller #
#TODO simply use pygame events?
QKEYDOWN=0
QKEYUP=1
myQueue = queue.Queue()
mask = bytearray([1,2,4,8,16,32,64,128])
class qEvent:
def __init__(self, key, type):
self.key = key
self.type = type
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
oldstr=b'\x80' #create event on connection start (0x80 != 0x00)
while RUNNING:
data = self.request.recv(1)
#cur_thread = threading.current_thread()
#response = bytes("{}: {}".format(cur_thread.name, data), 'ascii')
if data:
if data!=oldstr:
#print(str(time.time()) + ' -- ' + str(oldstr))
for i in range (0,8):
if (bytes(data[0]&mask[i])!=bytes(oldstr[0]&mask[i])) :
if (bytes(data[0]&mask[i])):
myQueue.put(qEvent(i,QKEYDOWN))
else:
myQueue.put(qEvent(i,QKEYUP))
oldstr = data
#print(data)
#self.request.sendall(response)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(bytes(message, 'ascii'))
response = str(sock.recv(1024), 'ascii')
print("Received: {}".format(response))
finally:
sock.close()
# returns the first device that does not contain Touchpad or motion (PS4)
def findController():
for fname in evdev.list_devices():
dev = evdev.InputDevice(fname)
print(dev.name)
if "Touchpad" in dev.name:
next
elif "Motion Sensor" in dev.name:
next
else:
return dev
def gamePadListener():
gamePadConnected = False
while True:
if gamePadConnected==False:
gamepad = findController()
if(gamepad):
print(gamepad)
gamePadConnected=True
else:
time.sleep(0.5)
else: # gamepad is available --> read it
r,w,x = select([gamepad], [], [],0)
if r:
try:
for event in gamepad.read():
#filters by event type
if event.type == ecodes.EV_KEY:
#print(event)
if event.value == 1: # button pressed
thisEventType = QKEYDOWN
else:
thisEventType = QKEYUP
# try to get the correct key mapping
mappedEventCode = controllerEventMapper.get(event.code,-1)
if mappedEventCode != -1: # only insert when button has a mapping
myQueue.put(qEvent(mappedEventCode,thisEventType))
except OSError:
time.sleep(0.5)
gamePadConnected=False
continue
time.sleep(0.01)
def pollKeyboardInput():
for event in pygame.event.get():
#if event.type == pygame.QUIT: # Usually wise to be able to close your program.
# raise SystemExit
if event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
if event.type == pygame.KEYDOWN:
thisEventType = QKEYDOWN
else:
thisEventType = QKEYUP
mappedEventCode = keyboardEventMapper.get(event.key,-1)
if mappedEventCode != -1: # only insert when button has a mapping
myQueue.put(qEvent(mappedEventCode,thisEventType))
# main #
SCREEN_CLOCK = 0
SCREEN_TETRIS = 1
SCREEN_SNAKE = 2
SCREEN_PONG = 3
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
global RUNNING
RUNNING=True
if not PI:
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((PIXEL_X*SIZE, PIXEL_Y*SIZE))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 100)
pygame.display.set_caption('Pi Games')
else:
#MAX2719device.brightness(1) TODO needs fix
MAX2719device.clear()
#MAX2719device.show_message("Waiting for controller...", font=proportional(CP437_FONT),delay=0.015)
gamePadThread = threading.Thread(target=gamePadListener,daemon=True)
gamePadThread.start()
# Port 0 means to select an arbitrary unused port
HOST, PORT = '', 4711
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread:", server_thread.name)
currentScreen = SCREEN_TETRIS
#nextScreen = -1
clearScreen()
drawClock(COLORINDEX_GREEN)
clearScreen()
# if PI:
# show_message(MAX2719device, "Let's play", fill="white", font=proportional(CP437_FONT),scroll_delay=0.03)
while True:
updateStartScreen(currentScreen)
while myQueue.empty():
if not PI:
pollKeyboardInput()
time.sleep(.1)
updateScreen()
if not PI:
checkForQuit()
time.sleep(.1)
# use the down key as enter and right, left to toggle between start screens
event = myQueue.get()
if event.type == QKEYDOWN:
if (event.key == BUTTON_LEFT): # goto previous start screen
currentScreen-=1
if(currentScreen==0):
currentScreen=3
elif (event.key == BUTTON_RIGHT): # goto next start screen
currentScreen+=1
if(currentScreen==4):
currentScreen=1
elif (event.key == BUTTON_DOWN): # start a game
if(currentScreen==SCREEN_TETRIS):
runTetrisGame()
drawGameOverScreen()
| |
<reponame>commodo/bch-gateway<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import logging
import simplejson as json
import platform
import socket
import decimal
import yaml
import serial
import paho.mqtt.client
import appdirs
if platform.system() == 'Linux':
import fcntl
class Gateway:
def __init__(self, config):
self._config = config
self._alias_list = {}
self._alias_action = {}
self._node_rename_id = config['rename'].copy()
self._node_rename_name = {v: k for k, v in config['rename'].items()}
self._name = None
self._data_dir = None
self._cache_nodes = {}
self._info = None
self._info_id = None
self._sub = set(['gateway/ping', 'gateway/all/info/get'])
self._nodes = {}
self._auto_rename_nodes = self._config['automatic_rename_nodes'] or self._config['automatic_rename_kit_nodes'] or self._config['automatic_rename_generic_nodes']
self._ser_error_cnt = 0
self.ser = None
self.mqttc = paho.mqtt.client.Client()
self.mqttc.on_connect = self.mqtt_on_connect
self.mqttc.on_message = self.mqtt_on_message
self.mqttc.on_disconnect = self.mqtt_on_disconnect
self.mqttc.message_callback_add(config['base_topic_prefix'] + "gateway/ping", self.gateway_ping)
self.mqttc.message_callback_add(config['base_topic_prefix'] + "gateway/all/info/get", self.gateway_all_info_get)
self._msg_retain = config['retain_node_messages']
self._msg_qos = config['qos_node_messages']
self.mqttc.username_pw_set(config['mqtt'].get('username'), config['mqtt'].get('password'))
if config['mqtt'].get('cafile'):
self.mqttc.tls_set(config['mqtt'].get('cafile'), config['mqtt'].get('certfile'), config['mqtt'].get('keyfile'))
self._rename()
def _serial_disconnect(self):
logging.info('Disconnect serial port')
self._info_id = None
self._info = None
self._rename()
self._alias_list = {}
self._alias_action = {}
for address in self._nodes.keys():
self.node_remove(address)
self.gateway_all_info_get()
def _run(self):
self.ser = serial.Serial(self._config['device'], baudrate=115200, timeout=3.0)
logging.info('Opened serial port: %s', self._config['device'])
self._ser_error_cnt = 0
if platform.system() == 'Linux':
fcntl.flock(self.ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
logging.debug('Exclusive lock on file descriptor: %d' % self.ser.fileno())
self.ser.reset_input_buffer()
self.ser.reset_output_buffer()
self.ser.write(b'\n')
self.write("/info/get", None)
while True:
try:
line = self.ser.readline()
except serial.SerialException:
self.ser.close()
self._serial_disconnect()
raise
if line:
logging.debug("read %s", line)
if line[0] == 0:
i = 1
while line[i] == 0 and i < len(line):
i += 1
line = line[i:]
line = line.decode()
if line[0] == '#':
self.log_message(line)
continue
try:
talk = json.loads(line, parse_float=decimal.Decimal)
if len(talk) != 2:
raise Exception
except Exception:
logging.warning('Invalid JSON message received from serial port: %s', line)
if self._info is None:
self.write("/info/get", None)
continue
subtopic = talk[0]
if subtopic[0] == "$":
self.sys_message(subtopic, talk[1])
elif subtopic[0] == "/":
self.gateway_message(subtopic, talk[1])
else:
self.node_message(subtopic, talk[1])
def start(self, reconect):
logging.info('Start')
logging.info('Serial port: %s', self._config['device'])
logging.info('MQTT broker host: %s, port: %d, use tls: %s',
self._config['mqtt']['host'],
int(self._config['mqtt']['port']),
bool(self._config['mqtt'].get('cafile')))
self.mqttc.connect_async(self._config['mqtt']['host'], int(self._config['mqtt']['port']), keepalive=10)
self.mqttc.loop_start()
while True:
try:
self._run()
except serial.serialutil.SerialException as e:
if e.errno == 2 and self._ser_error_cnt == 0:
logging.error('Could not open port %s' % self._config['device'])
self._ser_error_cnt += 1
except Exception as e:
logging.error(e)
if os.getenv('DEBUG', False):
raise e
if not reconect:
break
time.sleep(3)
def mqtt_on_connect(self, client, userdata, flags, rc):
logging.info('Connected to MQTT broker with code %s', rc)
lut = {paho.mqtt.client.CONNACK_REFUSED_PROTOCOL_VERSION: 'incorrect protocol version',
paho.mqtt.client.CONNACK_REFUSED_IDENTIFIER_REJECTED: 'invalid client identifier',
paho.mqtt.client.CONNACK_REFUSED_SERVER_UNAVAILABLE: 'server unavailable',
paho.mqtt.client.CONNACK_REFUSED_BAD_USERNAME_PASSWORD: 'bad username or password',
paho.mqtt.client.CONNACK_REFUSED_NOT_AUTHORIZED: 'not authorised'}
if rc != paho.mqtt.client.CONNACK_ACCEPTED:
logging.error('Connection refused from reason: %s', lut.get(rc, 'unknown code'))
if rc == paho.mqtt.client.CONNACK_ACCEPTED:
for topic in self._sub:
logging.debug('subscribe %s', topic)
client.subscribe(self._config['base_topic_prefix'] + topic)
def mqtt_on_disconnect(self, client, userdata, rc):
logging.info('Disconnect from MQTT broker with code %s', rc)
def mqtt_on_message(self, client, userdata, message):
payload = message.payload.decode('utf-8')
topic = message.topic[len(self._config['base_topic_prefix']):]
logging.debug('mqtt_on_message %s %s', message.topic, message.payload)
if payload == '':
payload = 'null'
try:
payload = json.loads(payload)
except Exception as e:
logging.error('parse json ' + str(message.topic) + ' ' + str(message.payload) + ' ' + str(e))
return
if topic.startswith("gateway"):
subtopic = topic[8 + len(self._name):]
if subtopic == '/alias/set':
if "id" in payload and "alias" in payload:
self.node_rename(payload["id"], payload["alias"])
return
elif subtopic == '/alias/remove':
if payload:
self.node_rename(payload, None)
return
else:
subtopic = topic[5:]
self.write(subtopic, payload)
def write(self, topic, payload):
if not self.ser:
return
if isinstance(topic, list):
topic = '/'.join(topic)
if topic[0] != '/' or topic[0] == '$':
i = topic.find('/')
node_name = topic[:i]
node_id = self._node_rename_name.get(node_name, None)
if node_id:
topic = node_id + topic[i:]
line = json.dumps([topic, payload], use_decimal=True) + '\n'
line = line.encode('utf-8')
logging.debug("write %s", line)
self.ser.write(line)
def publish(self, topic, payload):
if isinstance(topic, list):
topic = '/'.join(topic)
self.mqttc.publish(self._config['base_topic_prefix'] + topic, json.dumps(payload, use_decimal=True), qos=1)
def log_message(self, line):
logging.debug('log_message %s', line)
if self._name:
level_char = line[line.find("<") + 1]
self.publish(['log', self._name, log_level_lut[level_char]], line[1:].strip())
def gateway_ping(self, *args):
if self._name:
self.publish("gateway/pong", self._name)
def gateway_all_info_get(self, *args):
if self._name:
self.publish(["gateway", self._name, "info"], self._info)
def sys_message(self, topic, payload):
# logging.debug("on_sys_message %s %s", topic, payload)
if topic.startswith("$eeprom/alias/list/"):
topic, page = topic.rsplit('/', 1)
self._alias_list.update(payload)
if len(payload) == 8:
self.write("$eeprom/alias/list", int(page) + 1)
else:
logging.debug("alias_list: %s", self._alias_list)
for address, name in self._alias_list.items():
self.node_rename(address, name)
self.write("/nodes/get", None)
if topic == "$eeprom/alias/add/ok":
if self._alias_action[payload] == 'add':
del self._alias_action[payload]
self.publish(["gateway", self._name, "alias/set/ok"], {'id': payload, 'alias': self._alias_list.get(payload, None)})
self._alias_action_next()
elif topic == "$eeprom/alias/remove/ok":
if self._alias_action[payload] == 'remove':
del self._alias_action[payload]
self.publish(["gateway", self._name, "alias/remove/ok"], {'id': payload, 'alias': self._alias_list.get(payload, None)})
self._alias_action_next()
def gateway_message(self, topic, payload):
if "/info" == topic:
# TODO: remove in the future
if 'address' in payload:
payload['id'] = payload['address']
del payload['address']
if payload['id'] == '000000000000':
self.write("/info/get", None)
return
self._info_id = payload['id']
self._info = payload
self._rename()
if self._info["firmware"].startswith("bcf-gateway-core-module") or self._info["firmware"].startswith("bcf-usb-gateway"):
self._node_rename_id[self._info_id] = self._name
self._node_rename_name[self._name] = self._info_id
self.node_add(self._info_id)
self.write("$eeprom/alias/list", 0)
elif "/nodes" == topic:
for i, node in enumerate(payload):
if not isinstance(node, dict):
node = {"id": node}
self.node_add(node["id"])
name = self._node_rename_id.get(node["id"], None)
if name:
node["alias"] = name
info = self._nodes[node["id"]].get('info')
if info:
node['firmware'] = info.get('firmware')
node['version'] = info.get('version')
payload[i] = node
elif "/attach" == topic:
self.node_add(payload)
elif "/detach" == topic:
self.node_remove(payload)
if self._name:
self.publish(["gateway", self._name, topic[1:]], payload)
def node_message(self, subtopic, payload):
node_ide, topic = subtopic.split('/', 1)
try:
node_name = self._node_rename_id.get(node_ide, None)
if node_name:
subtopic = node_name + '/' + topic
self.mqttc.publish(self._config['base_topic_prefix'] + "node/" + subtopic, json.dumps(payload, use_decimal=True), qos=self._msg_qos, retain=self._msg_retain)
except Exception:
raise
logging.error('Failed to publish MQTT message: %s, %s', subtopic, payload)
logging.debug('topic %s', topic)
if topic == 'info' and 'firmware' in payload:
self._nodes[node_ide]['info'] = payload
self._save_nodes_json()
if self._auto_rename_nodes:
if node_ide not in self._node_rename_id:
name_base = None
if self._config['automatic_rename_generic_nodes'] and payload['firmware'].startswith("generic-node"):
name_base = 'generic-node'
elif self._config['automatic_rename_nodes']:
name_base = payload['firmware']
if self._config['automatic_remove_kit_from_names'] and name_base.startswith("kit-"):
name_base = name_base[4:]
if name_base:
for i in range(0, 32):
name = name_base + ':' + str(i)
if name not in self._node_rename_name:
self.node_rename(node_ide, name)
return
def sub_add(self, topic):
if isinstance(topic, list):
topic = '/'.join(topic)
if topic not in self._sub:
logging.debug('subscribe %s', topic)
self._sub.update([topic])
self.mqttc.subscribe(self._config['base_topic_prefix'] + topic)
def sub_remove(self, topic):
if isinstance(topic, list):
topic = '/'.join(topic)
if topic in self._sub:
logging.debug('unsubscribe %s', topic)
self._sub.remove(topic)
self.mqttc.unsubscribe(self._config['base_topic_prefix'] + topic)
def node_add(self, address):
if address in self._nodes:
return
logging.debug('node_add %s', address)
self._nodes[address] = {}
if address in self._cache_nodes:
info = self._cache_nodes[address].get('info')
if info:
self._nodes[address]['info'] = info
self.sub_add(['node', address, '+/+/+/+'])
name = self._node_rename_id.get(address, None)
if name:
self.sub_add(['node', name, '+/+/+/+'])
def node_remove(self, address):
logging.debug('node_remove %s', address)
if address not in self._nodes:
logging.debug('address not in self._nodes %s', address)
return
del self._nodes[address]
self.sub_remove(['node', address, '+/+/+/+'])
name = self._node_rename_id.get(address, None)
if name:
self.sub_remove(['node', name, '+/+/+/+'])
if address in self._alias_list and self._alias_list[address] == name:
self._alias_remove(address)
if address not in self._config['rename']:
self._node_rename_id.pop(address, None)
if self._node_rename_name[name] == address:
self._node_rename_name.pop(name, None)
def node_rename(self, address, name):
logging.debug('node_rename %s to %s', address, name)
if name in self._node_rename_name:
logging.debug('name is exists %s to %s', address, name)
return False
old_name = self._node_rename_id.get(address, None)
if old_name:
self.sub_remove(['node', old_name, '+/+/+/+'])
del self._node_rename_name[old_name]
if name:
self._node_rename_id[address] = name
self._node_rename_name[name] = address
if address in self._nodes:
self.sub_add(['node', name, '+/+/+/+'])
if address not in self._alias_list or self._alias_list[address] != name:
self._alias_add(address, name)
else:
if old_name:
del self._node_rename_id[address]
self.sub_add(['node', address, '+/+/+/+'])
self._alias_remove(address)
# if 'config_file' in self._config:
# with open(self._config['config_file'], 'r') as f:
# config_yaml = yaml.load(f)
# config_yaml['rename'] = self._node_rename_id
# with open(self._config['config_file'], 'w') as f:
# yaml.safe_dump(config_yaml, f, indent=2, default_flow_style=False)
return True
def _alias_add(self, address, alias):
if address in self._alias_list and self._alias_list[address] == alias:
return
self._alias_list[address] = alias
self._alias_action[address] = 'add'
if len(self._alias_action) == 1:
self.write('$eeprom/alias/add', {'id': address, 'name': alias})
def _alias_remove(self, address):
if address not in self._alias_list:
return
del self._alias_list[address]
self._alias_action[address] = 'remove'
if len(self._alias_action) == 1:
self.write('$eeprom/alias/remove', address)
def _alias_action_next(self):
if not self._alias_action:
return
for address in self._alias_action:
action = self._alias_action[address]
if action == 'add':
name = self._alias_list[address]
self.write('$eeprom/alias/add', {'id': address, 'name': name})
else:
self.write('$eeprom/alias/remove', address)
return
def _rename(self):
if self._name:
self.sub_remove(["gateway", self._name, '+/+'])
self._name = None
self._data_dir = None
self._cache_nodes = {}
name = self._config.get('name')
if name:
if "{ip}" in name:
ip = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
return
name = name.replace("{ip}", ip)
if "{id}" in name:
if not self._info_id:
return
name = name.replace("{id}", self._info_id)
elif name is None and self._info and 'firmware' in self._info:
name = self._info['firmware'].replace('bcf-gateway-', '', 1)
name = name.split(':', 1)[0]
self._name = | |
change layers widget to prevent recursion
self.w_layers.unobserve(self._on_change_layers, names="value")
self.w_layers.value = len(self.model.structure) - 2
self.w_layers.observe(self._on_change_layers, names="value")
self.w_layers.disabled = False
self.do_fit_button.disabled = False
self.to_code_button.disabled = False
self.save_model_button.disabled = False
self.load_model_button.disabled = False
self._varying_layers = False
self.view_redraw = time.time()
def refresh(self):
"""
Updates the widget values from the underlying `ReflectModel`.
"""
for par in [self.model.scale, self.model.bkg, self.model.dq]:
wid = self.param_widgets_link[id(par)]
wid[0].value = par.value
wid[1].value = par.vary
wid[2].value = par.bounds.lb
wid[3].value = par.bounds.ub
slab_views = self.structure_view.slab_views
for slab_view in slab_views:
slab_view.refresh()
@property
def model_box(self):
"""
`ipywidgets.Vbox` displaying model relevant widgets.
"""
output = [
self.w_layers,
widgets.HBox([self.w_scale, self.c_scale, self.w_dq, self.c_dq]),
widgets.HBox([self.w_bkg, self.c_bkg]),
self.structure_view.box,
widgets.HBox(
[
self.model_slider_min,
self.model_slider,
self.model_slider_max,
]
),
]
if self._varying_layers:
output.append(
widgets.HBox(
[self._location, self.ok_button, self.cancel_button]
)
)
output.append(
widgets.HBox(
[
self.do_fit_button,
self.to_code_button,
self.save_model_button,
self.load_model_button,
]
)
)
return widgets.VBox(output)
@property
def limits_box(self):
varying_pars = self.model.parameters.varying_parameters()
hboxes = [self.default_limits_button]
d = {}
d.update(self.param_widgets_link)
slab_views = self.structure_view.slab_views
for slab_view in slab_views:
d.update(slab_view.param_widgets_link)
for par in varying_pars:
name = widgets.Text(par.name)
name.disabled = True
val, check, ll, ul = d[id(par)]
hbox = widgets.HBox([name, ll, val, ul])
hboxes.append(hbox)
return widgets.VBox(hboxes)
class StructureView(object):
def __init__(self, structure):
self.structure = structure
self.slab_views = [SlabView(slab) for slab in structure]
@property
def box(self):
layout = widgets.Layout(flex="1 1 auto", width="auto")
label_row = widgets.HBox(
[
widgets.HTML("thick", layout=layout),
widgets.HTML("sld", layout=layout),
widgets.HTML("isld", layout=layout),
widgets.HTML("rough", layout=layout),
]
)
hboxes = [label_row]
hboxes.extend([view.box for view in self.slab_views])
# add in layer numbers
self.slab_views[0].w_thick.description = "fronting"
self.slab_views[-1].w_thick.description = "backing"
for i in range(1, len(self.slab_views) - 1):
self.slab_views[i].w_thick.description = str(i)
return widgets.VBox(hboxes)
class SlabView(HasTraits):
"""
An ipywidgets viewport of a `refnx.reflect.Slab`.
Parameters
----------
slab: refnx.reflect.Slab
Notes
-----
An ipywidgets viewport of a `refnx.reflect.Slab`.
Use the `box` property to view/modify the `Slab` parameters.
Observe the `view_changed` traitlet to determine when widget values are
changed.
"""
# traitlet to say when params were last altered
view_changed = traitlets.Float(time.time())
def __init__(self, slab):
self.slab = slab
self.param_widgets_link = {}
self.widgets_param_link = {}
self.param_being_varied = None
p = slab.thick
self.w_thick = widgets.FloatText(value=p.value, step=1)
self.widgets_param_link[self.w_thick] = p
self.c_thick = widgets.Checkbox(value=p.vary)
self.thick_low_limit = widgets.FloatText(value=p.bounds.lb, step=1)
self.thick_hi_limit = widgets.FloatText(value=p.bounds.ub, step=1)
p = slab.sld.real
self.w_sld = widgets.FloatText(value=p.value, step=0.01)
self.widgets_param_link[self.w_sld] = p
self.c_sld = widgets.Checkbox(value=p.vary)
self.sld_low_limit = widgets.FloatText(value=p.bounds.lb, step=0.01)
self.sld_hi_limit = widgets.FloatText(value=p.bounds.ub, step=0.01)
p = slab.sld.imag
self.w_isld = widgets.FloatText(value=p.value, step=0.01)
self.widgets_param_link[self.w_isld] = p
self.c_isld = widgets.Checkbox(value=p.vary)
self.isld_low_limit = widgets.FloatText(value=p.bounds.lb, step=0.01)
self.isld_hi_limit = widgets.FloatText(value=p.bounds.ub, step=0.01)
p = slab.rough
self.w_rough = widgets.FloatText(value=p, step=1)
self.widgets_param_link[self.w_rough] = p
self.c_rough = widgets.Checkbox(value=p.vary)
self.rough_low_limit = widgets.FloatText(p.bounds.lb, step=0.01)
self.rough_hi_limit = widgets.FloatText(value=p.bounds.ub, step=0.01)
self._widget_list = [
self.w_thick,
self.c_thick,
self.w_sld,
self.c_sld,
self.w_isld,
self.c_isld,
self.w_rough,
self.c_rough,
]
self._limits_list = [
self.thick_low_limit,
self.thick_hi_limit,
self.sld_low_limit,
self.sld_hi_limit,
self.isld_low_limit,
self.isld_hi_limit,
self.rough_low_limit,
self.rough_hi_limit,
]
# link widgets to observers
for widget in [self.w_thick, self.w_sld, self.w_isld, self.w_rough]:
widget.style.description_width = "0px"
widget.observe(self._on_slab_values_modified, names="value")
self.w_thick.style.description_width = "50px"
for widget in [self.c_thick, self.c_sld, self.c_isld, self.c_rough]:
widget.style.description_width = "0px"
widget.observe(self._on_slab_varies_modified, names="value")
for widget in self._limits_list:
widget.observe(self._on_slab_limits_modified, names="value")
self._link_param_widgets()
def _on_slab_values_modified(self, change):
d = self.widgets_param_link
d[change["owner"]].value = change["owner"].value
self.param_being_varied = change["owner"]
self.view_changed = time.time()
def _on_slab_varies_modified(self, change):
d = self.param_widgets_link
slab = self.slab
for par in flatten(slab.parameters):
if id(par) in d and change["owner"] in d[id(par)]:
wids = d[id(par)]
par.vary = wids[1].value
break
self.param_being_varied = change["owner"]
self.view_changed = time.time()
def _on_slab_limits_modified(self, change):
slab = self.slab
d = self.param_widgets_link
for par in flatten(slab.parameters):
if id(par) in d and change["owner"] in d[id(par)]:
wids = d[id(par)]
loc = wids.index(change["owner"])
if loc == 2:
par.bounds.lb = wids[loc].value
break
elif loc == 3:
par.bounds.ub = wids[loc].value
break
else:
return
def _link_param_widgets(self):
"""
Creates a dictionary of {parameter: (associated_widgets_tuple)}.
"""
# link parameters to widgets (value, checkbox,
# upperlim, lowerlim)
d = self.param_widgets_link
d[id(self.slab.thick)] = (
self.w_thick,
self.c_thick,
self.thick_low_limit,
self.thick_hi_limit,
)
d[id(self.slab.sld.real)] = (
self.w_sld,
self.c_sld,
self.sld_low_limit,
self.sld_hi_limit,
)
d[id(self.slab.sld.imag)] = (
self.w_isld,
self.c_isld,
self.isld_low_limit,
self.isld_hi_limit,
)
d[id(self.slab.rough)] = (
self.w_rough,
self.c_rough,
self.rough_low_limit,
self.rough_hi_limit,
)
def refresh(self):
"""
Updates the widget values from the underlying `Slab` parameters.
"""
d = self.param_widgets_link
ids = {id(p): p for p in flatten(self.slab.parameters) if id(p) in d}
for idx, par in ids.items():
widgets = d[idx]
widgets[0].value = par.value
widgets[1].value = par.vary
widgets[2].value = par.bounds.lb
widgets[3].value = par.bounds.ub
@property
def box(self):
return widgets.HBox(self._widget_list)
class Motofit(object):
"""
An interactive slab modeller (Jupyter/ipywidgets based) for Neutron and
X-ray reflectometry data.
The interactive modeller is designed to be used in a Jupyter notebook.
>>> # specify that plots are in a separate graph window
>>> %matplotlib qt
>>> # alternately if you want the graph to be embedded in the notebook use
>>> # %matplotlib notebook
>>> from refnx.reflect import Motofit
>>> # create an instance of the modeller
>>> app = Motofit()
>>> # display it in the notebook by calling the object with a datafile.
>>> app('dataset1.txt')
>>> # lets fit a different dataset
>>> app2 = Motofit()
>>> app2('dataset2.txt')
The `Motofit` instance has several useful attributes that can be used in
other cells. For example, one can access the `objective` and `curvefitter`
attributes for more advanced fitting functionality than is available in the
GUI. A `code` attribute can be used to retrieve a Python code fragment that
can be used as a basis for developing more complicated models, such as
interparameter constraints, global fitting, etc.
Attributes
----------
dataset: :class:`refnx.dataset.Data1D`
The dataset associated with the modeller
model: :class:`refnx.reflect.ReflectModel`
Calculates a theoretical model, from an interfacial structure
(`model.Structure`).
objective: :class:`refnx.analysis.Objective`
The Objective that allows one to compare the model against the data.
fig: :class:`matplotlib.figure.Figure`
Graph displaying the data.
"""
def __init__(self):
# attributes for the graph
# for the graph
self.qmin = 0.005
self.qmax = 0.5
self.qpnt = 1000
self.fig = None
self.ax_data = None
self.ax_residual = None
self.ax_sld = None
# gridspecs specify how the plots are laid out. Gridspec1 is when the
# residuals plot is displayed. Gridspec2 is when it's not visible
self._gridspec1 = gridspec.GridSpec(
2, 2, height_ratios=[5, 1], width_ratios=[1, 1], hspace=0.01
)
self._gridspec2 = gridspec.GridSpec(1, 2)
self.theoretical_plot = None
self.theoretical_plot_sld = None
# attributes for a user dataset
self.dataset = None
self.objective = None
self._curvefitter = None
self.data_plot = None
self.residuals_plot = None
self.data_plot_sld = None
self.dataset_name = widgets.Text(description="dataset:")
self.dataset_name.disabled = True
self.chisqr = widgets.FloatText(description="chi-squared:")
self.chisqr.disabled = True
# fronting
slab0 = Slab(0, 0, 0)
slab1 = Slab(25, 3.47, 3)
slab2 = Slab(0, 2.07, 3)
structure = slab0 | slab1 | slab2
rename_params(structure)
self.model = ReflectModel(structure)
structure = slab0 | slab1 | slab2
self.model = ReflectModel(structure)
# give some default parameter limits
self.model.scale.bounds = (0.1, 2)
self.model.bkg.bounds = (1e-8, 2e-5)
self.model.dq.bounds = (0, 20)
for slab in self.model.structure:
slab.thick.bounds = (0, 2 * slab.thick.value)
slab.sld.real.bounds = (0, 2 * slab.sld.real.value)
slab.sld.imag.bounds = (0, 2 * slab.sld.imag.value)
slab.rough.bounds = (0, 2 * slab.rough.value)
# the main GUI widget
self.display_box = widgets.VBox()
self.tab = widgets.Tab()
self.tab.set_title(0, "Model")
self.tab.set_title(1, "Limits")
self.tab.set_title(2, "Options")
self.tab.observe(self._on_tab_changed, names="selected_index")
# an output area for messages.
self.output = widgets.Output()
# options tab
self.plot_type = widgets.Dropdown(
options=["lin", "logY", "YX4", "YX2"],
value="lin",
description="Plot Type:",
disabled=False,
)
self.plot_type.observe(self._on_plot_type_changed, names="value")
self.use_weights = widgets.RadioButtons(
options=["Yes", "No"],
value="Yes",
description="use dataset weights?",
style={"description_width": "initial"},
)
self.use_weights.observe(self._on_use_weights_changed, names="value")
self.transform = Transform("lin")
self.display_residuals = widgets.Checkbox(
value=False, description="Display residuals"
)
self.display_residuals.observe(
self._on_display_residuals_changed, names="value"
)
self.model_view = None
self.set_model(self.model)
def save_model(self, *args, f=None):
"""
Serialise a model to a pickle file.
If `f` is not specified then the file name is constructed from the
current dataset name; if there is no current dataset then the filename
is constructed from the current time. These constructed filenames will
be in the current working directory, for a specific save location `f`
must be provided.
This method is only intended to be used to serialise models created by
this interactive Jupyter widget modeller.
Parameters
----------
f: file like or str, optional
File to save model to.
"""
if f is None:
f = "model_" + datetime.datetime.now().isoformat() + ".pkl"
if self.dataset is not None:
f = "model_" + self.dataset.name + ".pkl"
with possibly_open_file(f) as g:
pickle.dump(self.model, g)
def load_model(self, *args, f=None):
"""
Load a serialised model.
If `f` is | |
animation frame to a static display list so set to false
incAnimFrame = False
animFrame_setting = ""
#END------------------------------------------Static/Dynamic Display List Settings-------------------------------------------
#testString = obj.obj_props.sort_Method
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Many of the most used variables in Poly_Xport64 are defined in __init__.py
#---------------------------------------------------------------------------------------------------------
bitshift = 6 # Bitshift mod
loadlim = 29 # Ammount of verts the sytem will load at a time 32 max limit
exportPolyList = True
path = 'Users\micha\Documents\Game Design' #TEST Exporting C files
#TO_REMOVE#filename = path + obj.name + '.c'
#TO_REMOVE#textureCounter = 0
#ANIMATION Test for animations in scene...
scene = bpy.context.scene
sceneprops = scene.scene_props
frame_current = scene.frame_current
r = range(scene.frame_start, scene.frame_end + 1)
l = len(str(len(r)))
#TO_REMOVE#obs = []
#TO_REMOVE#trans = bpy.context.object.matrix_world
name = self.clean_name(obj.name)
vert = obj.data.vertices
poly = obj.data.polygons
uv = obj.data.uv_layers.active
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: obj.obj_props.anim_Method == "OBJ VARIABLES" checks for whether the user has chosen the Pos, Rot, Scl option for animation export
#---------------------------------------------------------------------------------------------------------
if obj.obj_props.anim_Method == "OBJ VARIABLES":
bpy.context.area.type = 'DOPESHEET_EDITOR'
bpy.context.space_data.mode = 'ACTION'
self.tempAnimAction = bpy.context.object.animation_data.action
#self.tempAnimAction = bpy.data.actions.name
print("Current Action on Export: %s " % self.tempAnimAction)
for animName in bpy.data.actions:
#NOTE ----- Store name of current action as well as the pos,rot, and scale of current frame
# Duplicate frames will be ignored by default and the number of duplicates is stored in animDupCount
animAction = ""
animLastPos = {0,0,0}
animLastRot = {0,0,0}
animLastScl = {0,0,0}
animDupCount = 0
keyFrames = 0
finalKeyFrame = 0
print("\n Action Name: %s \n" % animName.name)
animAction = animName.name
bpy.context.object.animation_data.action = bpy.data.actions[animAction]
#NOTE ----- Get Keyframes of the current animation action
for fcu in bpy.data.actions[animAction].fcurves:
finalKeyFrame = 0
#print(fcu.data_path + " channel " + str(fcu.array_index))
for keyframe in fcu.keyframe_points:
#keyFrames += 1
#keyframe.
finalKeyFrame = bpy.context.scene.frame_current
#exportFrame = bpy.context.scene.frame_current
#print("animAction Keyframe %i: %i" % (keyFrames, finalKeyFrame)) #coordinates x,y
o.write("RigAnimation %s_anim_%s[] = { \n \n" % (name,animAction)) #NOTE ----- begin animation structure in .c file:
#bpy.context.area.type = 'INFO'
#bpy.context.object.animation_data.action = bpy.data.action['playerStartJump']
#for action in obj.animation_data:
#obj.animation_data.action = playerStartJump
#bpy.context.animation_data.action = 'playerStartJump'
#o.write("\n //ANIMATIONS ON OBJECT: %s \n \n" % obj.animation_data.action.name) #NOTE ----- begin animation structure in .c file:
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Certain functions and variables can be defined/declared in a .h script if the user enables it with 'sceneprops.create_header_file'
#TO_DO_NOTE: This functionality will be fleshed out in future updates.
#---------------------------------------------------------------------------------------------------------
if sceneprops.create_header_file == True: #NOTE ----- Save these commands in a string for later use in .h file for defintions and declarations:
self.definitionsFile[self.commandCount] = ("extern RigAnimation %s_anim_%s[];\n" % (name,animAction))
self.definitionsFile.append([])
self.commandCount +=1
for f in r:
scene.update()
scene.frame_set(f)
objPos = obj.matrix_world.to_translation() #records position of bone to the object
objRot = obj.matrix_world.to_euler() #records rotation of bone to the object
objScl = obj.matrix_world.to_scale() #records scale of bone to the object
rot_angles_X = math.degrees(objRot[0]) #converts to degrees
rot_angles_Y = math.degrees(objRot[1]) #converts to degrees
rot_angles_Z = math.degrees(objRot[2]) #converts to degrees
# TO DO NOTE ----- Have a setting for a max number of printed duplicate frames?
# Allow user to skip "duplicate" frames or not?
# Or have "hold" frames as an option in the final export?
# Not sure how to implement but looking into it...
#NOTE ----- Check current frame against previous frames to see if a change occurs...
if self.skipDupFrames == True: #Runs if user chooses to skip any duplicate frames
if animLastPos == objPos and animLastRot == objRot and animLastScl == objScl:
if animDupCount <= 0:
print("\n")
animDupCount += 1
print("Found Duplicate Frame # %i..." % f)
else:
if animDupCount > 0:
o.write("\n//Duplicate Frames: %i \n" % animDupCount)
animDupCount = 0
o.write("%2f, %2f, %2f,\n %2f, %2f, %2f,\n %2f, %2f, %2f,\n " % (objPos.x*self.scale, objPos.y*self.scale, objPos.z*self.scale, rot_angles_X, rot_angles_Y, rot_angles_Z, objScl.x, objScl.y, objScl.z))
else: #Runs if user chooses to include any duplicate animation frames (self.skipDupFrames == False)
o.write("%2f, %2f, %2f,\n %2f, %2f, %2f,\n %2f, %2f, %2f,\n " % (objPos.x*self.scale, objPos.y*self.scale, objPos.z*self.scale, rot_angles_X, rot_angles_Y, rot_angles_Z, objScl.x, objScl.y, objScl.z))
#NOTE ----- store current pos, rot, scl for comparison with next frame to see if they are duplicate frames
animLastPos = objPos
animLastRot = objRot
animLastScl = objScl
if animDupCount > 0:
o.write("\n//Duplicate Frames: %i \n" % animDupCount)
animDupCount = 0
o.write("\n};\n\n")
#---------------------------------------------------------------------------------------------------------
#TO_DO_NOTE: Part of the "OBJ VARIABLES" is commented out as this is version. Right now user must use their own method for rigging.
# However, a simple rigging template is going to be available with the next version of the exporter and demo game.
# - Include more options for a "rig" name / structure
# - Finish setting up rig assignment section of *_UpdateFrame
#---------------------------------------------------------------------------------------------------------
#START------------------------------------------WIP Animation Rig Settings-------------------------------------------
if obj.obj_props.update_frame_function == True:
o.write("void %s_UpdateFrame( %s ){ \n" % (name, animFrame_setting))
o.write(" %s_TempVectorPos = %s_anim[animFrame].pos;\n" % (scene.scene_props.current_scene_id, name))
o.write(" %s_TempVectorRot = %s_anim[animFrame].rot;\n" % (scene.scene_props.current_scene_id, name))
o.write(" %s_TempVectorScl = %s_anim[animFrame].scl;\n\n" % (scene.scene_props.current_scene_id, name))
o.write(" %s_TempVectorPos = RotateAround(%s_TempVectorPos, playerAvatar.rigidBody.centerMass, playerAvatar.obj.rot.z );\n" % (scene.scene_props.current_scene_id, scene.scene_props.current_scene_id))
if obj.obj_props.rig_Method == "XPORT64 RIG":
#NOTE ----- If using the default Xport64 rig:
o.write(" SetVector3(&%s.animRig.joint.%s.pos, %s_TempVectorPos.x,%s_TempVectorPos.y,%s_TempVectorPos.z);\n" % (obj.obj_props.rig_Template_Name, obj.obj_props.joint_Template_Name, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id))
o.write(" SetVector3(&%s.animRig.joint.%s.rot, %s_TempVectorRot.x,%s_TempVectorRot.y,%s_TempVectorRot.z);\n" % (obj.obj_props.rig_Template_Name, obj.obj_props.joint_Template_Name, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id))
o.write(" SetVector3(&%s.animRig.joint.%s.scl, %s_TempVectorScl.x,%s_TempVectorScl.y,%s_TempVectorScl.z);\n" % (obj.obj_props.rig_Template_Name, obj.obj_props.joint_Template_Name, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id, scene.scene_props.current_scene_id))
#o.write(" //NOTE: Assign values to your rig here. For instance: \n")
elif obj.obj_props.rig_Method == "CUSTOM FUNCTION":
o.write(" //NOTE: You're using a custom rig command: \n")
o.write(" %s \n" % obj.obj_props.custom_Rig_Function)
#o.write(" //SetVector3(&playerAvatar.animRig.joint.armR[0].pos, tempObjVectorPos.x,tempObjVectorPos.y,tempObjVectorPos.z);\n")
o.write("\n } \n")
if sceneprops.create_header_file == True: #NOTE ----- If user selects to export definitions / declarations to a header file:
self.definitionsFile[self.commandCount] = ("extern void %s_UpdateFrame( %s );\n" % (name, animFrame_setting))
self.definitionsFile.append([])
self.commandCount +=1
#END--------------------------------------------WIP Animation Rig Settings--------------------------------------------
#if SHOWTIPS == True or DEBUG == True:
#o.write("//>-- ALERT ------------------------------------------- RigAnimation includes objects pos, rot, and scl each frame. Rig template is coming in next version. --<\n")
#o.write("//>-- ALERT ------------------------------------------- Right, now user must provide their own method for rigging and animating in their application. --<\n\n")
if obj.obj_props.sort_Method == "COLLISION" :
o.write("MeshColliderTri %s_MColTri[] = \n { \n" % (name))
#START------------------------------------------Begin structure of polylist function (display list commands)-------------------------------------------
elif obj.obj_props.static_DL != True:
o.write("\nvoid %s_PolyList( %s ){ \n\n Vtx *%s_VTXPointer = &%s_VertList_%i[0]; \n\n" % (name, animFrame_setting, name, name, scene.frame_start))
if sceneprops.create_header_file == True: #NOTE ----- If user selects to export definitions / declarations to a header file:
self.definitionsFile[self.commandCount] = ("extern void %s_PolyList( %s );\n" % (name, animFrame_setting))
self.definitionsFile.append([])
self.commandCount +=1
#START------------------------------------------Begin structure of polylist function (display list commands)-------------------------------------------
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: The following switch statement is used inside of a dynamic display list if the user selects the 'VTX DL' option for animation at export.
# This creates a unique VTX list for each frame of animation and the switch statement cycles through that list based on the current frame of
# animation that is given to it. It is up to the application to provide the correct frame to the struct for animation.
#---------------------------------------------------------------------------------------------------------
#START-------------------------------------------------------SWITCH STATEMENT------------------------------------------------------------------
if obj.obj_props.anim_Method == "VTX DL" and obj.obj_props.static_DL == False and incAnimFrame == True and obj.obj_props.sort_Method != "COLLISION":
o.write(" switch(animFrame)\n {\n")
modifiedUVValues = [[[]]] #make note of an update to a UV value using modifyVertex command. Reset
modifiedUVValues = copy.deepcopy(self.usedUVValues)
for f in r:
scene.update() #NOTE ----- each cycle, update the scene to the next animation frame
if obj.obj_props.static_DL == False and obj.obj_props.sort_Method != "COLLISION":
if obj.obj_props.anim_Method == "OBJ VARIABLES" or obj.obj_props.anim_Method == "VTX DL":
scene.frame_set(f)
if obj.obj_props.anim_Method == "VTX DL" and incAnimFrame == True:
o.write(" case %i:\n" % f)
else:
f = frame_current
if obj.obj_props.anim_Method == "VTX DL" and incAnimFrame == True:
o.write(" %s_VTXPointer = &%s_VertList_%i[0]; \n break;\n" % (name, name, f))
if obj.obj_props.anim_Method == "NO ANIM":
break
if | |
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Bluegem Engine"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
'''
IMPORTANT NOTE
This python FBX importer Only works for
the FBX-ASCII format of Version 6.1.0 from 2006
'''
import os
import time
import argparse
class FbxParser:
def _create_posenodes(self, strnodes):
'''
Create the pose-nodes from a list of posenode strings
:param strnodes:
:return dictionairy of pose-node-dicts:
'''
nodes = list()
for nodestr in strnodes:
node = { }
for line in nodestr['lines']:
if line.startswith("Node: "):
nodeline = line.strip()[7: line.__len__()-1]
node['name'] = nodeline
matrixline = ''
readmatrix = False
for line in nodestr['lines']:
if line.startswith("Matrix: "):
readmatrix = True
nodeline = line.strip()[8:]
matrixline += nodeline
continue
if readmatrix:
matrixline += line
if "}" in line: break
matrixl = matrixline.split(",")
floats = list()
for i in range(0,matrixl.__len__()):
floats.append(float(matrixl[i]))
node['matrix'] = floats
nodes.append(node)
return {'posenodes' : nodes }
def _extractArray(self, lines, indexmode ):
'''
This list converts a list of lines to a list of floats/ints
While also Translating the "indexed mode" from the fbx file,
to the actual index listing.
Hint: Fbx indexed points go in triangles/squares
this means it has to be tagged somehow, therefor
if its a triangle, the last Index of each triangle is
the negative number of the actual index with 1 added
example: 0,1,-3 ( the 3rd index is negative -> triangle)
this means the actual index is -3 * (-1) = 3 then 3 -1 = 2
so it would be : 0, 1, 2
:param lines:
:param indexmode:
:return:
'''
try:
resline = ''
for line in lines:
resline += line + ','
floats = resline.replace(' ','').split(",")
float_list = list()
integer_list = list()
theindex = 0
for f in floats:
if indexmode is False:
if not f == '':
float_list.append(float(f))
theindex += 1
else:
if not f == '':
thei = int(f)
if thei < 0:
integer_list.append(thei * (-1)-1)
if thei >= 0:
integer_list.append(thei)
if indexmode is True:
return integer_list
else:
return float_list
except Exception as e:
print('Failed to extract array')
print(theindex)
raise e
def _extractkeys(self, lines: list):
'''
Extract keys, convert from... microseconds? to frames
:param lines:
:return:
'''
try:
box = {}
resultString = ""
read = False
for line in lines:
if line.strip().startswith("Key: "):
read = True
if line.strip().startswith("Color: "):
read = False
if read is True:
resultString += line.strip()
keystrings = resultString.strip()[4:].split(",")
vals = list()
keys = list()
try:
i = 0
while i < keystrings.__len__():
float_val = float(keystrings[i + 1].strip())
key_int = int(keystrings[i + 0].strip())
int_val = ((key_int - 1924423250) / 1924423250) + 1
vals.append(float_val)
keys.append(int_val)
i += 7
if keys.__len__() < 2 and keys.__len__() > 0:
vals.append(vals[0])
keys.append(keys[0])
except Exception as e:
print('Couldnt resolve keys to frames, adding empty values for both')
box['keys'] = list()
box['values'] = list()
return box
box['keys'] = keys
box['values'] = vals
return box
except Exception as e:
print('Failed to resolve keys')
raise e
def _create_animated_deformer(self, def_string):
'''
This function takes deformerStrings and turns them into Full
Deformer objects, which are ready to be written to file
:param def_string:
:return:
'''
len = def_string[0].strip().__len__()
deformer_name = def_string[0].strip()[8: len - 3]
channel_T = list()
sub_t_x = list()
sub_t_y = list()
sub_t_z = list()
channel_R = list()
sub_r_x = list()
sub_r_y = list()
sub_r_z = list()
channel_S = list()
sub_s_x = list()
sub_s_y = list()
sub_s_z = list()
bracketcounter = 0
#for Translates
for line in def_string:
if line.strip().startswith('Channel: \"T\"'):
bracketcounter += 1
channel_T.append(line)
continue
if '{' in line.strip() and bracketcounter > 0:
bracketcounter += 1
if '}' in line.strip() and bracketcounter > 0:
bracketcounter -= 1
if bracketcounter > 0:
channel_T.append(line)
#for Rotations
for line in def_string:
if line.strip().startswith('Channel: \"R\"'):
bracketcounter += 1
channel_R.append(line)
continue
if '{' in line.strip() and bracketcounter > 0:
bracketcounter += 1
if '}' in line.strip() and bracketcounter > 0:
bracketcounter -= 1
if bracketcounter > 0:
channel_R.append(line)
#for Scale
for line in def_string:
if line.strip().startswith('Channel: \"S\"'):
bracketcounter += 1
channel_S.append(line)
continue
if '{' in line.strip() and bracketcounter > 0:
bracketcounter += 1
if '}' in line.strip() and bracketcounter > 0:
bracketcounter -= 1
if bracketcounter > 0:
channel_S.append(line)
#dissecting the translate lines from the transform channel
bracketcounter = 0
i = 0
while i < channel_T.__len__():
if channel_T[i].strip().startswith('Channel \"T\" {'):
continue
if channel_T[i].strip().startswith('Channel \"X\" {'):
bracketcounter+= 1
sub_t_x.__add__(channel_T[i])
continue
if "{" in channel_T[i].strip():
bracketcounter += 1
if "}" in channel_T[i].strip():
bracketcounter -= 1
if "Channel: \"Y\" {" in channel_T[i].strip():
break
if bracketcounter > 0:
sub_t_x.append(channel_T[i])
i += 1
while i < channel_T.__len__():
if channel_T[i].strip().startswith('Channel \"Y\" {'):
bracketcounter+= 1
sub_t_y.__add__(channel_T[i])
continue
if "{" in channel_T[i].strip():
bracketcounter += 1
if "}" in channel_T[i].strip():
bracketcounter -= 1
if "Channel: \"Z\" {" in channel_T[i].strip():
break
if bracketcounter > 0:
sub_t_y.append(channel_T[i])
i += 1
bracketcounter = 0 #haa? why?
while i < channel_T.__len__():
if channel_T[i].strip().startswith('Channel \"Z\" {'):
bracketcounter+= 1
sub_t_z.__add__(channel_T[i])
continue
if "{" in channel_T[i].strip():
bracketcounter += 1
if "}" in channel_T[i].strip():
bracketcounter -= 1
if "Color: " in channel_T[i].strip():
break
if bracketcounter > 0:
sub_t_z.append(channel_T[i])
i += 1
#dissecting the rotate lines from the transform channel
bracketcounter = 0
i = 0
while i < channel_T.__len__():
if channel_R[i].strip().startswith('Channel \"R\" {'):
continue
if channel_R[i].strip().startswith('Channel \"X\" {'):
bracketcounter+= 1
sub_r_x.__add__(channel_R[i])
continue
if "{" in channel_R[i].strip():
bracketcounter += 1
if "}" in channel_R[i].strip():
bracketcounter -= 1
if "Channel: \"Y\" {" in channel_R[i].strip():
break
if bracketcounter > 0:
sub_r_x.append(channel_R[i])
i += 1
while i < channel_R.__len__():
if channel_R[i].strip().startswith('Channel \"Y\" {'):
bracketcounter+= 1
sub_r_y.__add__(channel_R[i])
continue
if "{" in channel_R[i].strip():
bracketcounter += 1
if "}" in channel_R[i].strip():
bracketcounter -= 1
if "Channel: \"Z\" {" in channel_R[i].strip():
break
if bracketcounter > 0:
sub_r_y.append(channel_R[i])
i += 1
bracketcounter = 0
while i < channel_R.__len__():
if channel_R[i].strip().startswith('Channel \"Z\" {'):
bracketcounter+= 1
sub_r_z.__add__(channel_R[i])
continue
if "{" in channel_R[i].strip():
bracketcounter += 1
if "}" in channel_R[i].strip():
bracketcounter -= 1
if "Color: " in channel_R[i].strip():
break
if bracketcounter > 0:
sub_r_z.append(channel_R[i])
i += 1
#dissecting the scale lines from the transform channel
bracketcounter = 0
i = 0
while i < channel_S.__len__():
if channel_S[i].strip().startswith('Channel \"R\" {'):
continue
if channel_S[i].strip().startswith('Channel \"X\" {'):
bracketcounter+= 1
sub_s_x.__add__(channel_S[i])
continue
if "{" in channel_S[i].strip():
bracketcounter += 1
if "}" in channel_S[i].strip():
bracketcounter -= 1
if "Channel: \"Y\" {" in channel_S[i].strip():
break
if bracketcounter > 0:
sub_s_x.append(channel_S[i])
i += 1
while i < channel_S.__len__():
if channel_S[i].strip().startswith('Channel \"Y\" {'):
bracketcounter+= 1
sub_s_y.__add__(channel_S[i])
continue
if "{" in channel_S[i].strip():
bracketcounter += 1
if "}" in channel_S[i].strip():
bracketcounter -= 1
if "Channel: \"Z\" {" in channel_S[i].strip():
break
if bracketcounter > 0:
sub_s_y.append(channel_S[i])
i += 1
bracketcounter = 0 #haa? why?
while i < channel_S.__len__():
if channel_S[i].strip().startswith('Channel \"Z\" {'):
bracketcounter+= 1
sub_s_z.__add__(channel_S[i])
continue
if "{" in channel_S[i].strip():
bracketcounter += 1
if "}" in channel_S[i].strip():
bracketcounter -= 1
if "Color: " in channel_S[i].strip():
break
if bracketcounter > 0:
sub_s_z.append(channel_S[i])
i += 1
sub_t_x_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_t_y_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_t_z_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_r_x_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_r_y_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_r_z_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_s_x_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_s_y_bin = { 'keydistances' : list(), 'keyvalues' : list() }
sub_s_z_bin = { 'keydistances' : list(), 'keyvalues' : list() }
box_t_x = self._extractkeys(sub_t_x)
box_t_y = self._extractkeys(sub_t_y)
box_t_z = self._extractkeys(sub_t_z)
sub_t_x_bin['keydistances'] = box_t_x['keys']
sub_t_x_bin['keyvalues'] = box_t_x['values']
sub_t_y_bin['keydistances'] = box_t_y['keys']
sub_t_y_bin['keyvalues'] = box_t_y['values']
sub_t_z_bin['keydistances'] = box_t_z['keys']
sub_t_z_bin['keyvalues'] = box_t_z['values']
box_r_x = self._extractkeys(sub_r_x)
box_r_y = self._extractkeys(sub_r_y)
box_r_z = self._extractkeys(sub_r_z)
sub_r_x_bin['keydistances'] = box_r_x['keys']
sub_r_x_bin['keyvalues'] = box_r_x['values']
sub_r_y_bin['keydistances'] = box_r_y['keys']
sub_r_y_bin['keyvalues'] = box_r_y['values']
sub_r_z_bin['keydistances'] = box_r_z['keys']
sub_r_z_bin['keyvalues'] = box_r_z['values']
box_s_x = self._extractkeys(sub_s_x)
box_s_y = self._extractkeys(sub_s_y)
box_s_z = self._extractkeys(sub_s_z)
sub_s_x_bin['keydistances'] = box_s_x['keys']
sub_s_x_bin['keyvalues'] = box_s_x['values']
sub_s_y_bin['keydistances'] = box_s_y['keys']
sub_s_y_bin['keyvalues'] = box_s_y['values']
sub_s_z_bin['keydistances'] = box_s_z['keys']
sub_s_z_bin['keyvalues'] = box_s_z['values']
T_bin = {}
R_bin = | |
SlideShow61 = 0x803D
SlideShow62 = 0x803E
SlideShow63 = 0x803F
SlideShow64 = 0x8040
SlideShow65 = 0x8041
SlideShow66 = 0x8042
SlideShow67 = 0x8043
SlideShow68 = 0x8044
SlideShow69 = 0x8045
SlideShow70 = 0x8046
SlideShow71 = 0x8047
SlideShow72 = 0x8048
SlideShow73 = 0x8049
SlideShow74 = 0x804A
SlideShow75 = 0x804B
SlideShow76 = 0x804C
SlideShow77 = 0x804D
SlideShow78 = 0x804E
SlideShow79 = 0x804F
SlideShow80 = 0x8050
SlideShow81 = 0x8051
SlideShow82 = 0x8052
SlideShow83 = 0x8053
SlideShow84 = 0x8054
SlideShow85 = 0x8055
SlideShow86 = 0x8056
SlideShow87 = 0x8057
SlideShow88 = 0x8058
SlideShow89 = 0x8059
SlideShow90 = 0x805A
SlideShow91 = 0x805B
SlideShow92 = 0x805C
SlideShow93 = 0x805D
SlideShow94 = 0x805E
SlideShow95 = 0x805F
SlideShow96 = 0x8060
SlideShow97 = 0x8061
SlideShow98 = 0x8062
SlideShow99 = 0x8063
SlideShow100 = 0x8064
SlideShow101 = 0x8065
SlideShow102 = 0x8066
SlideShow103 = 0x8067
SlideShow104 = 0x8068
SlideShow105 = 0x8069
SlideShow106 = 0x806A
SlideShow107 = 0x806B
SlideShow108 = 0x806C
SlideShow109 = 0x806D
SlideShow110 = 0x806E
SlideShow111 = 0x806F
SlideShow112 = 0x8070
SlideShow113 = 0x8071
SlideShow114 = 0x8072
SlideShow115 = 0x8073
SlideShow116 = 0x8074
SlideShow117 = 0x8075
SlideShow118 = 0x8076
SlideShow119 = 0x8077
SlideShow120 = 0x8078
SlideShow121 = 0x8079
SlideShow122 = 0x807A
SlideShow123 = 0x807B
SlideShow124 = 0x807C
SlideShow125 = 0x807D
SlideShow126 = 0x807E
SlideShow127 = 0x807F
SlideShow128 = 0x8080
SlideShow129 = 0x8081
SlideShow130 = 0x8082
SlideShow131 = 0x8083
SlideShow132 = 0x8084
SlideShow133 = 0x8085
SlideShow134 = 0x8086
SlideShow135 = 0x8087
SlideShow136 = 0x8088
SlideShow137 = 0x8089
SlideShow138 = 0x808A
SlideShow139 = 0x808B
SlideShow140 = 0x808C
SlideShow141 = 0x808D
SlideShow142 = 0x808E
SlideShow143 = 0x808F
SlideShow144 = 0x8090
SlideShow145 = 0x8091
SlideShow146 = 0x8092
SlideShow147 = 0x8093
SlideShow148 = 0x8094
SlideShow149 = 0x8095
SlideShow150 = 0x8096
SlideShow151 = 0x8097
SlideShow152 = 0x8098
SlideShow153 = 0x8099
SlideShow154 = 0x809A
SlideShow155 = 0x809B
SlideShow156 = 0x809C
SlideShow157 = 0x809D
SlideShow158 = 0x809E
SlideShow159 = 0x809F
SlideShow160 = 0x80A0
SlideShow161 = 0x80A1
SlideShow162 = 0x80A2
SlideShow163 = 0x80A3
SlideShow164 = 0x80A4
SlideShow165 = 0x80A5
SlideShow166 = 0x80A6
SlideShow167 = 0x80A7
SlideShow168 = 0x80A8
SlideShow169 = 0x80A9
SlideShow170 = 0x80AA
SlideShow171 = 0x80AB
SlideShow172 = 0x80AC
SlideShow173 = 0x80AD
SlideShow174 = 0x80AE
SlideShow175 = 0x80AF
SlideShow176 = 0x80B0
SlideShow177 = 0x80B1
SlideShow178 = 0x80B2
SlideShow179 = 0x80B3
SlideShow180 = 0x80B4
SlideShow181 = 0x80B5
SlideShow182 = 0x80B6
SlideShow183 = 0x80B7
SlideShow184 = 0x80B8
SlideShow185 = 0x80B9
SlideShow186 = 0x80BA
SlideShow187 = 0x80BB
SlideShow188 = 0x80BC
SlideShow189 = 0x80BD
SlideShow190 = 0x80BE
SlideShow191 = 0x80BF
SlideShow192 = 0x80C0
SlideShow193 = 0x80C1
SlideShow194 = 0x80C2
SlideShow195 = 0x80C3
SlideShow196 = 0x80C4
SlideShow197 = 0x80C5
SlideShow198 = 0x80C6
SlideShow199 = 0x80C7
SlideShow200 = 0x80C8
SlideShow201 = 0x80C9
SlideShow202 = 0x80CA
SlideShow203 = 0x80CB
SlideShow204 = 0x80CC
SlideShow205 = 0x80CD
SlideShow206 = 0x80CE
SlideShow207 = 0x80CF
SlideShow208 = 0x80D0
SlideShow209 = 0x80D1
SlideShow210 = 0x80D2
SlideShow211 = 0x80D3
SlideShow212 = 0x80D4
SlideShow213 = 0x80D5
SlideShow214 = 0x80D6
SlideShow215 = 0x80D7
SlideShow216 = 0x80D8
SlideShow217 = 0x80D9
SlideShow218 = 0x80DA
SlideShow219 = 0x80DB
SlideShow220 = 0x80DC
SlideShow221 = 0x80DD
SlideShow222 = 0x80DE
SlideShow223 = 0x80DF
SlideShow224 = 0x80E0
SlideShow225 = 0x80E1
SlideShow226 = 0x80E2
SlideShow227 = 0x80E3
SlideShow228 = 0x80E4
SlideShow229 = 0x80E5
SlideShow230 = 0x80E6
SlideShow231 = 0x80E7
SlideShow232 = 0x80E8
SlideShow233 = 0x80E9
SlideShow234 = 0x80EA
SlideShow235 = 0x80EB
SlideShow236 = 0x80EC
SlideShow237 = 0x80ED
SlideShow238 = 0x80EE
SlideShow239 = 0x80EF
SlideShow240 = 0x80F0
SlideShow241 = 0x80F1
SlideShow242 = 0x80F2
SlideShow243 = 0x80F3
SlideShow244 = 0x80F4
SlideShow245 = 0x80F5
SlideShow246 = 0x80F6
SlideShow247 = 0x80F7
SlideShow248 = 0x80F8
SlideShow249 = 0x80F9
SlideShow250 = 0x80FA
SlideShow251 = 0x80FB
SlideShow252 = 0x80FC
SlideShow253 = 0x80FD
SlideShow254 = 0x80FE
SlideShow255 = 0x80FF
# Animation List
Animation0 = 0x8100
Animation1 = 0x8101
Animation2 = 0x8102
Animation3 = 0x8103
Animation4 = 0x8104
Animation5 = 0x8105
Animation6 = 0x8106
Animation7 = 0x8107
Animation8 = 0x8108
Animation9 = 0x8109
Animation10 = 0x810A
Animation11 = 0x810B
Animation12 = 0x810C
Animation13 = 0x810D
Animation14 = 0x810E
Animation15 = 0x810F
Animation16 = 0x8110
Animation17 = 0x8111
Animation18 = 0x8112
Animation19 = 0x8113
Animation20 = 0x8114
Animation21 = 0x8115
Animation22 = 0x8116
Animation23 = 0x8117
Animation24 = 0x8118
Animation25 = 0x8119
Animation26 = 0x811A
Animation27 = 0x811B
Animation28 = 0x811C
Animation29 = 0x811D
Animation30 = 0x811E
Animation31 = 0x811F
Animation32 = 0x8120
Animation33 = 0x8121
Animation34 = 0x8122
Animation35 = 0x8123
Animation36 = 0x8124
Animation37 = 0x8125
Animation38 = 0x8126
Animation39 = 0x8127
Animation40 = 0x8128
Animation41 = 0x8129
Animation42 = 0x812A
Animation43 = 0x812B
Animation44 = 0x812C
Animation45 = 0x812D
Animation46 = 0x812E
Animation47 = 0x812F
Animation48 = 0x8130
Animation49 = 0x8131
Animation50 = 0x8132
Animation51 = 0x8133
Animation52 = 0x8134
Animation53 = 0x8135
Animation54 = 0x8136
Animation55 = 0x8137
Animation56 = 0x8138
Animation57 = 0x8139
Animation58 = 0x813A
Animation59 = 0x813B
Animation60 = 0x813C
Animation61 = 0x813D
Animation62 = 0x813E
Animation63 = 0x813F
Animation64 = 0x8140
Animation65 = 0x8141
Animation66 = 0x8142
Animation67 = 0x8143
Animation68 = 0x8144
Animation69 = 0x8145
Animation70 = 0x8146
Animation71 = 0x8147
Animation72 = 0x8148
Animation73 = 0x8149
Animation74 = 0x814A
Animation75 = 0x814B
Animation76 = 0x814C
Animation77 = 0x814D
Animation78 = 0x814E
Animation79 = 0x814F
Animation80 = 0x8150
Animation81 = 0x8151
Animation82 = 0x8152
Animation83 = 0x8153
Animation84 = 0x8154
Animation85 = 0x8155
Animation86 = 0x8156
Animation87 = 0x8157
Animation88 = 0x8158
Animation89 = 0x8159
Animation90 = 0x815A
Animation91 = 0x815B
Animation92 = 0x815C
Animation93 = 0x815D
Animation94 = 0x815E
Animation95 = 0x815F
Animation96 = 0x8160
Animation97 = 0x8161
Animation98 = 0x8162
Animation99 = 0x8163
Animation100 = 0x8164
Animation101 = 0x8165
Animation102 = 0x8166
Animation103 = 0x8167
Animation104 = 0x8168
Animation105 = 0x8169
Animation106 = 0x816A
Animation107 = 0x816B
Animation108 = 0x816C
Animation109 = 0x816D
Animation110 = 0x816E
Animation111 = 0x816F
Animation112 = 0x8170
Animation113 = 0x8171
Animation114 = 0x8172
Animation115 = 0x8173
Animation116 = 0x8174
Animation117 = 0x8175
Animation118 = 0x8176
Animation119 = 0x8177
Animation120 = 0x8178
Animation121 = 0x8179
Animation122 = 0x817A
Animation123 = 0x817B
Animation124 = 0x817C
Animation125 = 0x817D
Animation126 = 0x817E
Animation127 = 0x817F
Animation128 = 0x8180
Animation129 = 0x8181
Animation130 = 0x8182
Animation131 = 0x8183
Animation132 = 0x8184
Animation133 = 0x8185
Animation134 = 0x8186
Animation135 = 0x8187
Animation136 = 0x8188
Animation137 = 0x8189
Animation138 = 0x818A
Animation139 = 0x818B
Animation140 = 0x818C
Animation141 = 0x818D
Animation142 = 0x818E
Animation143 = 0x818F
Animation144 = 0x8190
Animation145 = 0x8191
Animation146 = 0x8192
Animation147 = 0x8193
Animation148 = 0x8194
Animation149 = 0x8195
Animation150 = 0x8196
Animation151 = 0x8197
Animation152 = 0x8198
Animation153 = 0x8199
Animation154 = 0x819A
Animation155 = 0x819B
Animation156 = 0x819C
Animation157 = 0x819D
Animation158 = 0x819E
Animation159 = 0x819F
Animation160 = 0x81A0
Animation161 = 0x81A1
Animation162 = 0x81A2
Animation163 = 0x81A3
Animation164 = 0x81A4
Animation165 = 0x81A5
Animation166 = 0x81A6
Animation167 = 0x81A7
Animation168 = 0x81A8
Animation169 = 0x81A9
Animation170 = 0x81AA
Animation171 = 0x81AB
Animation172 = 0x81AC
Animation173 = 0x81AD
Animation174 = 0x81AE
Animation175 = 0x81AF
Animation176 = 0x81B0
Animation177 = 0x81B1
Animation178 = 0x81B2
Animation179 = 0x81B3
Animation180 = 0x81B4
Animation181 = 0x81B5
Animation182 = 0x81B6
Animation183 = 0x81B7
Animation184 = 0x81B8
Animation185 = 0x81B9
Animation186 = 0x81BA
Animation187 = 0x81BB
Animation188 = 0x81BC
Animation189 = 0x81BD
Animation190 = 0x81BE
Animation191 = 0x81BF
Animation192 = 0x81C0
Animation193 = 0x81C1
Animation194 = 0x81C2
Animation195 = 0x81C3
Animation196 = 0x81C4
Animation197 = 0x81C5
Animation198 = 0x81C6
Animation199 = 0x81C7
Animation200 = 0x81C8
Animation201 = 0x81C9
Animation202 = 0x81CA
Animation203 = 0x81CB
Animation204 = 0x81CC
Animation205 = 0x81CD
Animation206 = 0x81CE
Animation207 = 0x81CF
Animation208 = 0x81D0
Animation209 = 0x81D1
Animation210 = 0x81D2
Animation211 = 0x81D3
Animation212 = 0x81D4
Animation213 = 0x81D5
Animation214 = 0x81D6
Animation215 = 0x81D7
Animation216 = 0x81D8
Animation217 = 0x81D9
Animation218 = 0x81DA
Animation219 = 0x81DB
Animation220 = 0x81DC
Animation221 = 0x81DD
Animation222 = 0x81DE
Animation223 = 0x81DF
Animation224 = 0x81E0
Animation225 = 0x81E1
Animation226 = 0x81E2
Animation227 = 0x81E3
Animation228 = 0x81E4
Animation229 = 0x81E5
Animation230 = | |
import abc
import struct
from datetime import datetime, timezone
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from nanotime import nanotime
from hashkernel import BitMask, utf8_decode, utf8_encode
from hashkernel.files.buffer import FileBytes
from hashkernel.typings import is_NamedTuple, is_subclass
Buffer = Union[FileBytes, bytes]
class NeedMoreBytes(Exception):
def __init__(self, how_much: int = None):
self.how_much = how_much
@classmethod
def check_buffer(cls, buff_len, fragment_end) -> int:
if buff_len < fragment_end:
raise cls(fragment_end - buff_len)
return fragment_end
class Packer(metaclass=abc.ABCMeta):
cls: type
size: Optional[int] = None
def fixed_size(self) -> bool:
return self.size is not None
@abc.abstractmethod
def pack(self, v: Any) -> bytes:
raise NotImplementedError("subclasses must override")
@abc.abstractmethod
def unpack(self, buffer: Buffer, offset: int) -> Tuple[Any, int]:
raise NotImplementedError("subclasses must override")
def unpack_whole_buffer(self, buffer: Buffer) -> Any:
obj, offset = self.unpack(buffer, 0)
assert len(buffer) == offset
return obj
MARK_BIT = BitMask(7)
class AdjustableSizePacker(Packer):
"""
>>> asp3 = AdjustableSizePacker(3)
>>> asp3.unpack(bytes([0x83]), 0)
(3, 1)
>>> asp3.unpack(bytes([0xff]), 0)
(127, 1)
>>> asp3.unpack(bytes([0x00,0x81]), 0)
(128, 2)
>>> asp3.unpack(bytes([0x7f,0x81]), 0)
(255, 2)
>>> asp3.unpack(bytes([0x00,0xfd]),0)
(16000, 2)
>>> asp3.unpack(bytes([0x69,0x04,0x81]),0)
(17001, 3)
>>> asp3.unpack(bytes([0x00,0x09,0xfa]),0)
(2000000, 3)
>>> asp3.unpack(bytes([0x00,0x09,0x7a,0x81]),0)
Traceback (most recent call last):
...
ValueError: No end bit
>>> asp3.unpack(bytes([0x00,0x09]),0)
Traceback (most recent call last):
...
hashkernel.packer.NeedMoreBytes: 1
>>> asp3.pack(3).hex()
'83'
>>> asp3.pack(127).hex()
'ff'
>>> asp3.pack(128).hex()
'0081'
>>> asp3.pack(255).hex()
'7f81'
>>> asp3.pack(16000).hex()
'00fd'
>>> asp3.pack(17001).hex()
'690481'
>>> asp3.pack(2000000).hex()
'0009fa'
>>> asp3.pack(3000000).hex()
Traceback (most recent call last):
...
ValueError: Size is too big: 3000000
"""
max_size: int
cls = int
def __init__(self, max_size: int):
self.max_size = max_size
def pack(self, v: int) -> bytes:
sz_bytes = []
shift = v
for _ in range(self.max_size):
numerical = shift & MARK_BIT.inverse
shift = shift >> MARK_BIT.position
if 0 == shift:
sz_bytes.append(numerical | MARK_BIT.mask)
return bytes(sz_bytes)
else:
sz_bytes.append(numerical)
raise ValueError(f"Size is too big: {v}")
def unpack(self, buffer: Buffer, offset: int) -> Tuple[int, int]:
"""
Returns:
size: Unpacked size
new_offset: new offset in buffer
"""
sz = 0
buff_len = len(buffer)
for i in range(self.max_size):
NeedMoreBytes.check_buffer(buff_len, offset + i + 1)
v = buffer[offset + i]
end = v & MARK_BIT.mask
sz += (v & MARK_BIT.inverse) << (i * MARK_BIT.position)
if end:
return sz, offset + i + 1
raise ValueError("No end bit")
class SizedPacker(Packer):
cls = bytes
def __init__(self, size_packer):
self.size_packer = size_packer
def pack(self, v: bytes) -> bytes:
return self.size_packer.pack(len(v)) + v
def unpack(self, buffer: Buffer, offset: int) -> Tuple[bytes, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
size, data_offset = self.size_packer.unpack(buffer, offset)
new_offset = NeedMoreBytes.check_buffer(len(buffer), data_offset + size)
return buffer[data_offset:new_offset], new_offset
class GreedyBytesPacker(Packer):
"""
Read buffer to the end, with assumption that buffer end is
aligned with end of last variable
"""
cls = bytes
def pack(self, v: bytes) -> bytes:
return v
def unpack(self, buffer: Buffer, offset: int) -> Tuple[bytes, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
new_offset = len(buffer)
return buffer[offset:new_offset], new_offset
class FixedSizePacker(Packer):
cls = bytes
def __init__(self, size: int) -> None:
self.size = size
def pack(self, v: bytes) -> bytes:
assert len(v) == self.size, f"{len(v)} != {self.size}"
return v
def unpack(self, buffer: Buffer, offset: int) -> Tuple[bytes, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
new_offset = offset + self.size
NeedMoreBytes.check_buffer(len(buffer), new_offset)
return buffer[offset:new_offset], new_offset
class TypePacker(Packer):
def __init__(self, cls: type, fmt: str) -> None:
self.cls = cls
self.fmt = fmt
self.size = struct.calcsize(self.fmt)
def pack(self, v: Any) -> bytes:
return struct.pack(self.fmt, v)
def unpack(self, buffer: Buffer, offset: int) -> Tuple[Any, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
new_offset = self.size + offset
NeedMoreBytes.check_buffer(len(buffer), new_offset)
unpacked_values = struct.unpack(self.fmt, buffer[offset:new_offset])
return unpacked_values[0], new_offset
class ProxyPacker(Packer):
def __init__(
self,
cls: type,
packer: Packer,
to_proxy: Callable[[Any], Any] = bytes,
to_cls: Callable[[Any], Any] = None,
) -> None:
self.cls = cls
self.packer = packer
self.size = self.packer.size
self.to_proxy = to_proxy
if to_cls is None:
to_cls = cls
self.to_cls = to_cls
def pack(self, v: Any) -> bytes:
return self.packer.pack(self.to_proxy(v))
def unpack(self, buffer: Buffer, offset: int) -> Tuple[Any, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
v, new_offset = self.packer.unpack(buffer, offset)
return self.to_cls(v), new_offset
class GreedyListPacker(Packer):
def __init__(
self,
item_cls: type,
item_packer: Packer = None,
packer_lib: "PackerLibrary" = None,
) -> None:
self.cls = list
self.item_cls = item_cls
if item_packer is None:
self.item_packer = packer_lib.get_packer_by_type(item_cls)
else:
self.item_packer = item_packer
self.size = None
def pack(self, v: List[Any]) -> bytes:
return b"".join(map(self.item_packer.pack, v))
def unpack(self, buffer: Buffer, offset: int) -> Tuple[Any, int]:
items = []
while offset < len(buffer):
v, offset = self.item_packer.unpack(buffer, offset)
items.append(v)
assert offset == len(buffer)
return items, offset
class TuplePacker(Packer):
def __init__(self, *packers: Packer, cls=tuple) -> None:
self.packers = packers
self.cls = cls
if is_NamedTuple(cls):
self.factory = lambda values: cls(*values)
else:
self.factory = lambda values: cls(values)
try:
self.size = sum(map(lambda p: p.size, packers))
except TypeError: # expected on `size==None`
self.size = None
def pack(self, values: tuple) -> bytes:
tuple_size = len(self.packers)
if tuple_size == len(values):
return b"".join(self.packers[i].pack(values[i]) for i in range(tuple_size))
else:
raise AssertionError(f"size mismatch {tuple_size}: {values}")
def unpack(self, buffer: Buffer, offset: int) -> Tuple[tuple, int]:
"""
Returns:
value: unpacked value
new_offset: new offset in buffer
"""
values = []
for p in self.packers:
v, offset = p.unpack(buffer, offset)
values.append(v)
return self.factory(values), offset
INT_8 = TypePacker(int, "B")
INT_16 = TypePacker(int, "<H")
INT_32 = TypePacker(int, "<L")
INT_64 = TypePacker(int, "<Q")
BE_INT_64 = TypePacker(int, ">Q")
FLOAT = TypePacker(float, "<f")
DOUBLE = TypePacker(float, "<d")
ADJSIZE_PACKER_3 = AdjustableSizePacker(3)
ADJSIZE_PACKER_4 = AdjustableSizePacker(4)
SMALL_SIZED_BYTES = SizedPacker(ADJSIZE_PACKER_3) # up to 2Mb
SIZED_BYTES = SizedPacker(ADJSIZE_PACKER_4) # up to 256Mb
INT_32_SIZED_BYTES = SizedPacker(INT_32)
BOOL_AS_BYTE = ProxyPacker(bool, INT_8, int)
NANOTIME = ProxyPacker(nanotime, BE_INT_64, lambda nt: nt.nanoseconds(), nanotime)
UTC_DATETIME = ProxyPacker(
datetime,
DOUBLE,
lambda dt: dt.replace(tzinfo=timezone.utc).timestamp(),
datetime.utcfromtimestamp,
)
UTF8_STR = ProxyPacker(str, SIZED_BYTES, utf8_encode, utf8_decode)
GREEDY_BYTES = GreedyBytesPacker()
UTF8_GREEDY_STR = ProxyPacker(str, GREEDY_BYTES, utf8_encode, utf8_decode)
def build_code_enum_packer(code_enum_cls) -> Packer:
return ProxyPacker(code_enum_cls, INT_8, int)
def unpack_constraining_greed(
buffer: Buffer, offset: int, size: int, greedy_packer: Packer
) -> Tuple[Any, int]:
"""
>>> unpack_constraining_greed(b'abc', 0, 3, UTF8_GREEDY_STR)
('abc', 3)
>>> unpack_constraining_greed(b'abc', 1, 1, UTF8_GREEDY_STR)
('b', 2)
>>> unpack_constraining_greed(b'abc', 0, 2, UTF8_GREEDY_STR)
('ab', 2)
>>> unpack_constraining_greed(b'abc', 0, 10, UTF8_GREEDY_STR)
Traceback (most recent call last):
...
hashkernel.packer.NeedMoreBytes: 7
>>> UTF8_GREEDY_STR.pack('abc')
b'abc'
"""
new_buffer, new_offset = FixedSizePacker(size).unpack(buffer, offset)
return greedy_packer.unpack_whole_buffer(new_buffer), new_offset
PackerFactory = Callable[[type], Packer]
def named_tuple_packer(*parts: Packer):
def factory(cls: type):
return TuplePacker(*parts, cls=cls)
return factory
class PackerLibrary:
factories: List[Tuple[type, PackerFactory]]
cache: Dict[type, Packer]
def __init__(self, next_lib: "PackerLibrary" = None):
self.factories = []
self.cache = {}
self.next_lib = next_lib
def __contains__(self, item):
return self[item] is not None
def __getitem__(self, key: type) -> Packer:
return self.get_packer_by_type(key)
def get_packer_by_type(self, key: type) -> Packer:
packer = None
if key in self.cache:
return self.cache[key]
else:
for i in range(len(self.factories)):
factory_cls, factory = self.factories[i]
if is_subclass(key, factory_cls):
packer = factory(key)
self.cache[key] = packer
return packer
if packer is None and self.next_lib is not None:
return self.next_lib.get_packer_by_type(key)
raise KeyError(key)
def resolve(self, key_cls: type):
"""
decorator that make sure that PackerLibrary is capable to
build packer of particular `key_cls`
:param key_cls:
:return:
"""
self.get_packer_by_type(key_cls)
return key_cls
def register(self, packer: Union[PackerFactory, Packer]):
"""
decorator that register particular `packer` with `key_cls`
in library
:param packer:
:return:
"""
def decorate(key_cls: type):
self.register_packer(key_cls, packer)
return key_cls
return decorate
def register_packer(self, key: type, packer: Union[PackerFactory, Packer]):
self.cache = {}
packer_factory = (lambda _: packer) if isinstance(packer, Packer) else packer
for i in range(len(self.factories)):
i_type = self.factories[i][0]
assert i_type != key, "Conflict: Registering {key} twice"
if is_subclass(key, i_type):
self.factories.insert(i, (key, packer_factory))
return
self.factories.append((key, packer_factory))
def register_all(self, *pack_defs: Tuple[type, Union[PackerFactory, Packer]]):
for t in pack_defs:
self.register_packer(*t)
return self
def ensure_packer(o: Any, packerlib: PackerLibrary = None) -> Optional[Packer]:
"""
>>> class A:
... def __init__(self,i): self.i = int(i)
... def __int__(self): return i
... def __str__(self): return f'{i}'
...
>>> A.__packer__ = ProxyPacker(A,INT_32,int)
>>> ensure_packer(A) == A.__packer__
True
>>> ensure_packer(A.__packer__) == A.__packer__
True
>>> s_packer = ProxyPacker(A,UTF8_STR,str)
>>> l = PackerLibrary()
>>> l.register_packer(A,s_packer)
>>> ensure_packer(A,l) == s_packer
True
"""
if isinstance(o, Packer) or o is None:
return o
elif isinstance(o, type) and packerlib is not None and o in packerlib:
return packerlib[o]
elif hasattr(o, "__packer__") and isinstance(o.__packer__, Packer):
return o.__packer__
return None
class PackerDefinitions:
typed_packers: List[Tuple[type, PackerFactory]]
def __init__(self, *pack_defs: Tuple[type, PackerFactory]):
self.typed_packers = [*pack_defs]
def build_lib(self, | |
self.wv.get_normalized_weights(deepcopy=True)
H_postgEmbd_vocab_size = self.POSTagsEmbeddings.len()+1
H_postgEmbd_embedding_size = pembds
H_dptpsEmbd_vocab_size = self.DPTypesEmbeddings.len()+1
H_dptpsEmbd_embedding_size = dtembds
#Dence and Dropout Params
H_dense_out_dim = dod
H_dense_actv_func = d_actvfunc.lower() if len(d_actvfunc)>=3 else None #None:default value will be used
H_dropout_Rate = dr
#Model inputs / outputs with order
MODEL_Inputs = []
MODEL_Outputs = []
#---------------------------------------- BUILDING LAYERS -------------------------------------------------
#1) BASIC INPUTS: Entity-Types
if self.WhichFeaturesToUse["entitytypes"] == True:
entitytype_length = 2*len(self.PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"])
L_entitytypes = keras.layers.Input (shape=(entitytype_length,),name="inp_entity_types")
MODEL_Inputs.append (L_entitytypes)
#2) BASIC INPUTS: SP-Weights
L_sp_weights = keras.layers.Input (shape=(self.MaxSPCount,),name="inp_sp_weights")
MODEL_Inputs.append (L_sp_weights)
#3)WORDS EMBEDDINGS and LSTMs
L_shared_embd_words = keras.layers.Embedding(input_dim=H_wordEmbd_vocab_size, output_dim=H_wordEmbd_embedding_size, input_length=self.MaxSPLength , weights=[H_wordEmbd_weights], mask_zero=True , trainable=True, name = "shared_embd_words")
if H_lstm_actv_func == None:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words")
else:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words", activation=H_lstm_actv_func)
WORDS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_words_"+str(path_index))
embd = L_shared_embd_words(inp)
lstm = L_shared_lstm_words(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_W_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_words_weights_mul_"+str(path_index))([lstm,L_sp_weights])
WORDS_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_word_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_W_lstm_out_dim,), name="aggr_lstm_words_WA")(WORDS_LSTMS_Outputs+[L_sp_weights])
#4)POSTAGS EMBEDDINGS and LSTMs
L_shared_embd_postags = keras.layers.Embedding(input_dim=H_postgEmbd_vocab_size, output_dim=H_postgEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_postags")
if H_lstm_actv_func == None:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags")
else:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags", activation=H_lstm_actv_func)
POSTAGS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_postags_"+str(path_index))
embd = L_shared_embd_postags(inp)
lstm = L_shared_lstm_postags(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_P_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_postags_weights_mul_"+str(path_index))([lstm,L_sp_weights])
POSTAGS_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_postag_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_P_lstm_out_dim,), name="aggr_lstm_postags_WA")(POSTAGS_LSTMS_Outputs+[L_sp_weights])
#5)DPTypes EMBEDDINGS and LSTMs
L_shared_embd_dptypes = keras.layers.Embedding(input_dim=H_dptpsEmbd_vocab_size, output_dim=H_dptpsEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_dptypes")
if H_lstm_actv_func == None:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes")
else:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes", activation=H_lstm_actv_func)
DPTYPES_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_dptypes_"+str(path_index))
embd = L_shared_embd_dptypes(inp)
lstm = L_shared_lstm_dptypes(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_D_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_dptypes_weights_mul_"+str(path_index))([lstm,L_sp_weights])
DPTYPES_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_dptype_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_D_lstm_out_dim,), name="aggr_lstm_dptypes_WA")(DPTYPES_LSTMS_Outputs+[L_sp_weights])
#6) CONCATANATE ALL FEATURES
if self.WhichFeaturesToUse["entitytypes"] == True:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr,L_entitytypes])
else:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr])
#7) Dense/Dropout/Decision layers
Y_dim = len (self.PRJ.Configs["OneHotEncodingForMultiClass"])
if H_dense_out_dim > 0:
L_dense = keras.layers.Dense(units=H_dense_out_dim,activation=H_dense_actv_func)(L_all_features)
if H_dropout_Rate > 0:
L_drop = keras.layers.Dropout(H_dropout_Rate)(L_dense)
L_decision = keras.layers.Dense(units=Y_dim ,activation="softmax",name="decision_Y")(L_drop)
else:
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_dense)
else:
if H_dropout_Rate > 0:
L_drop = keras.layers.Dropout(H_dropout_Rate)(L_all_features)
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_drop)
else:
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_all_features)
MODEL_Outputs.append(L_decision)
model = Model(inputs=MODEL_Inputs, outputs=MODEL_Outputs)
return model , self.WhichFeaturesToUse , self.WhichOutputsToPredit
def Arch_TopKP_3lstms_WA_wpd_DropBeforeDense (self,wlod,plod,dlod,l_actvfunc,pembds,dtembds,dod,d_actvfunc,dr):
#<<<CRITICAL>>> : Setting np random seed everytime BEFORE IMPORTING FROM KERAS!
self.lp ("Building Neural Network Model. RandomSeed:" + str(self.RandomSeed) + " , Please wait ...");
import numpy as np ;
np.random.seed (self.RandomSeed)
from keras.models import Model
import keras.layers
self.WhichFeaturesToUse["weights"] = True
self.WhichFeaturesToUse["words"] = True
self.WhichFeaturesToUse["postags"] = True
self.WhichFeaturesToUse["dptypes"] = True
#LSTMs Params
H_W_lstm_out_dim = wlod #Words-LSTM Output Dimensionality
H_P_lstm_out_dim = plod #POSTags-LSTM Output Dimensionality
H_D_lstm_out_dim = dlod #DTypes-LSTM Output Dimensionality
H_lstm_actv_func = l_actvfunc.lower() if len(l_actvfunc)>=3 else None #Activation function for all 3 LSTMs. None:default value will be used
#Embeddings Params
H_wordEmbd_vocab_size = self.wv.shape()[0]
H_wordEmbd_embedding_size = self.wv.shape()[1]
H_wordEmbd_weights = self.wv.get_normalized_weights(deepcopy=True)
H_postgEmbd_vocab_size = self.POSTagsEmbeddings.len()+1
H_postgEmbd_embedding_size = pembds
H_dptpsEmbd_vocab_size = self.DPTypesEmbeddings.len()+1
H_dptpsEmbd_embedding_size = dtembds
#Dence and Dropout Params
H_dense_out_dim = dod
H_dense_actv_func = d_actvfunc.lower() if len(d_actvfunc)>=3 else None #None:default value will be used
H_dropout_Rate = dr
#Model inputs / outputs with order
MODEL_Inputs = []
MODEL_Outputs = []
#---------------------------------------- BUILDING LAYERS -------------------------------------------------
#1) BASIC INPUTS: Entity-Types
if self.WhichFeaturesToUse["entitytypes"] == True:
entitytype_length = 2*len(self.PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"])
L_entitytypes = keras.layers.Input (shape=(entitytype_length,),name="inp_entity_types")
MODEL_Inputs.append (L_entitytypes)
#2) BASIC INPUTS: SP-Weights
L_sp_weights = keras.layers.Input (shape=(self.MaxSPCount,),name="inp_sp_weights")
MODEL_Inputs.append (L_sp_weights)
#3)WORDS EMBEDDINGS and LSTMs
L_shared_embd_words = keras.layers.Embedding(input_dim=H_wordEmbd_vocab_size, output_dim=H_wordEmbd_embedding_size, input_length=self.MaxSPLength , weights=[H_wordEmbd_weights], mask_zero=True , trainable=True, name = "shared_embd_words")
if H_lstm_actv_func == None:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words")
else:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words", activation=H_lstm_actv_func)
WORDS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_words_"+str(path_index))
embd = L_shared_embd_words(inp)
lstm = L_shared_lstm_words(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_W_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_words_weights_mul_"+str(path_index))([lstm,L_sp_weights])
WORDS_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_word_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_W_lstm_out_dim,), name="aggr_lstm_words_WA")(WORDS_LSTMS_Outputs+[L_sp_weights])
#4)POSTAGS EMBEDDINGS and LSTMs
L_shared_embd_postags = keras.layers.Embedding(input_dim=H_postgEmbd_vocab_size, output_dim=H_postgEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_postags")
if H_lstm_actv_func == None:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags")
else:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags", activation=H_lstm_actv_func)
POSTAGS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_postags_"+str(path_index))
embd = L_shared_embd_postags(inp)
lstm = L_shared_lstm_postags(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_P_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_postags_weights_mul_"+str(path_index))([lstm,L_sp_weights])
POSTAGS_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_postag_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_P_lstm_out_dim,), name="aggr_lstm_postags_WA")(POSTAGS_LSTMS_Outputs+[L_sp_weights])
#5)DPTypes EMBEDDINGS and LSTMs
L_shared_embd_dptypes = keras.layers.Embedding(input_dim=H_dptpsEmbd_vocab_size, output_dim=H_dptpsEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_dptypes")
if H_lstm_actv_func == None:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes")
else:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes", activation=H_lstm_actv_func)
DPTYPES_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_dptypes_"+str(path_index))
embd = L_shared_embd_dptypes(inp)
lstm = L_shared_lstm_dptypes(embd)
X = keras.layers.Lambda(KERASHelper_Multiply_Weights,output_shape=(H_D_lstm_out_dim,), arguments={'PL_index':path_index}, name="lstm_dptypes_weights_mul_"+str(path_index))([lstm,L_sp_weights])
DPTYPES_LSTMS_Outputs.append (X)
MODEL_Inputs.append (inp)
L_dptype_lstms_aggr = keras.layers.Lambda(KERASHelper_Weighted_average,output_shape=(H_D_lstm_out_dim,), name="aggr_lstm_dptypes_WA")(DPTYPES_LSTMS_Outputs+[L_sp_weights])
#6) CONCATANATE ALL FEATURES
if self.WhichFeaturesToUse["entitytypes"] == True:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr,L_entitytypes])
else:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr])
#7) Dropout if needed , BEFORE DENSE ....
if H_dropout_Rate > 0:
L_drop = keras.layers.Dropout(H_dropout_Rate)(L_all_features)
else:
L_drop = L_all_features
#8) Dense and Decision layers
Y_dim = len (self.PRJ.Configs["OneHotEncodingForMultiClass"])
if H_dense_out_dim > 0:
L_dense = keras.layers.Dense(units=H_dense_out_dim,activation=H_dense_actv_func)(L_drop)
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_dense)
else:
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_drop)
MODEL_Outputs.append(L_decision)
model = Model(inputs=MODEL_Inputs, outputs=MODEL_Outputs)
return model , self.WhichFeaturesToUse , self.WhichOutputsToPredit
def Arch_TopKP_3lstms_wpd_maxp (self,wlod,plod,dlod,l_actvfunc,pembds,dtembds,dod,d_actvfunc,dr):
#<<<CRITICAL>>> : Setting np random seed everytime BEFORE IMPORTING FROM KERAS!
self.lp ("Building Neural Network Model. RandomSeed:" + str(self.RandomSeed) + " , Please wait ...");
import numpy as np ;
np.random.seed (self.RandomSeed)
from keras.models import Model
import keras.layers
self.WhichFeaturesToUse["weights"] = False
self.WhichFeaturesToUse["words"] = True
self.WhichFeaturesToUse["postags"] = True
self.WhichFeaturesToUse["dptypes"] = True
#LSTMs Params
H_W_lstm_out_dim = wlod #Words-LSTM Output Dimensionality
H_P_lstm_out_dim = plod #POSTags-LSTM Output Dimensionality
H_D_lstm_out_dim = dlod #DTypes-LSTM Output Dimensionality
H_lstm_actv_func = l_actvfunc.lower() if len(l_actvfunc)>=3 else None #Activation function for all 3 LSTMs. None:default value will be used
#Embeddings Params
H_wordEmbd_vocab_size = self.wv.shape()[0]
H_wordEmbd_embedding_size = self.wv.shape()[1]
H_wordEmbd_weights = self.wv.get_normalized_weights(deepcopy=True)
H_postgEmbd_vocab_size = self.POSTagsEmbeddings.len()+1
H_postgEmbd_embedding_size = pembds
H_dptpsEmbd_vocab_size = self.DPTypesEmbeddings.len()+1
H_dptpsEmbd_embedding_size = dtembds
#Dence and Dropout Params
H_dense_out_dim = dod
H_dense_actv_func = d_actvfunc.lower() if len(d_actvfunc)>=3 else None #None:default value will be used
H_dropout_Rate = dr
#Model inputs / outputs with order
MODEL_Inputs = []
MODEL_Outputs = []
#---------------------------------------- BUILDING LAYERS -------------------------------------------------
#1) BASIC INPUTS: Entity-Types
if self.WhichFeaturesToUse["entitytypes"] == True:
entitytype_length = 2*len(self.PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"])
L_entitytypes = keras.layers.Input (shape=(entitytype_length,),name="inp_entity_types")
MODEL_Inputs.append (L_entitytypes)
#2) BASIC INPUTS: SP-Weights
#L_sp_weights = keras.layers.Input (shape=(self.MaxSPCount,),name="inp_sp_weights")
#MODEL_Inputs.append (L_sp_weights)
#3)WORDS EMBEDDINGS and LSTMs
L_shared_embd_words = keras.layers.Embedding(input_dim=H_wordEmbd_vocab_size, output_dim=H_wordEmbd_embedding_size, input_length=self.MaxSPLength , weights=[H_wordEmbd_weights], mask_zero=True , trainable=True, name = "shared_embd_words")
if H_lstm_actv_func == None:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words")
else:
L_shared_lstm_words = keras.layers.LSTM(units=H_W_lstm_out_dim,name="shared_lstm_words", activation=H_lstm_actv_func)
WORDS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_words_"+str(path_index))
embd = L_shared_embd_words(inp)
lstm = L_shared_lstm_words(embd)
WORDS_LSTMS_Outputs.append (lstm)
MODEL_Inputs.append (inp)
L_word_lstms_aggr = keras.layers.Maximum(name="aggr_lstm_words_sum")(WORDS_LSTMS_Outputs)
#4)POSTAGS EMBEDDINGS and LSTMs
L_shared_embd_postags = keras.layers.Embedding(input_dim=H_postgEmbd_vocab_size, output_dim=H_postgEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_postags")
if H_lstm_actv_func == None:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags")
else:
L_shared_lstm_postags = keras.layers.LSTM(units=H_P_lstm_out_dim,name="shared_lstm_postags", activation=H_lstm_actv_func)
POSTAGS_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_postags_"+str(path_index))
embd = L_shared_embd_postags(inp)
lstm = L_shared_lstm_postags(embd)
POSTAGS_LSTMS_Outputs.append (lstm)
MODEL_Inputs.append (inp)
L_postag_lstms_aggr = keras.layers.Maximum(name="aggr_lstm_postags_sum")(POSTAGS_LSTMS_Outputs)
#5)DPTypes EMBEDDINGS and LSTMs
L_shared_embd_dptypes = keras.layers.Embedding(input_dim=H_dptpsEmbd_vocab_size, output_dim=H_dptpsEmbd_embedding_size, input_length=self.MaxSPLength , mask_zero=True , trainable=True, name = "shared_embd_dptypes")
if H_lstm_actv_func == None:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes")
else:
L_shared_lstm_dptypes = keras.layers.LSTM(units=H_D_lstm_out_dim,name="shared_lstm_dptypes", activation=H_lstm_actv_func)
DPTYPES_LSTMS_Outputs = []
for path_index in range(self.MaxSPCount):
inp = keras.layers.Input (shape=(self.MaxSPLength,),name="inp_dptypes_"+str(path_index))
embd = L_shared_embd_dptypes(inp)
lstm = L_shared_lstm_dptypes(embd)
DPTYPES_LSTMS_Outputs.append (lstm)
MODEL_Inputs.append (inp)
L_dptype_lstms_aggr = keras.layers.Maximum(name="aggr_lstm_dptypes_sum")(DPTYPES_LSTMS_Outputs)
#6) CONCATANATE ALL FEATURES
if self.WhichFeaturesToUse["entitytypes"] == True:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr,L_entitytypes])
else:
L_all_features = keras.layers.concatenate([L_word_lstms_aggr,L_postag_lstms_aggr,L_dptype_lstms_aggr])
#7) Dense/Dropout/Decision layers
Y_dim = len (self.PRJ.Configs["OneHotEncodingForMultiClass"])
if H_dense_out_dim > 0:
L_dense = keras.layers.Dense(units=H_dense_out_dim,activation=H_dense_actv_func)(L_all_features)
if H_dropout_Rate > 0:
L_drop = keras.layers.Dropout(H_dropout_Rate)(L_dense)
L_decision = keras.layers.Dense(units=Y_dim ,activation="softmax",name="decision_Y")(L_drop)
else:
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_dense)
else:
if H_dropout_Rate > 0:
L_drop = keras.layers.Dropout(H_dropout_Rate)(L_all_features)
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_drop)
else:
L_decision = keras.layers.Dense(units=Y_dim, activation="softmax",name="decision_Y")(L_all_features)
MODEL_Outputs.append(L_decision)
model = Model(inputs=MODEL_Inputs, outputs=MODEL_Outputs)
return model , self.WhichFeaturesToUse , self.WhichOutputsToPredit
def Arch_FS_1Bilstm (self,lod,pembds,cnkembs,dod,actfuncs,dr,maxPool):
#<<<CRITICAL>>> : Setting np random seed everytime BEFORE IMPORTING FROM KERAS!
self.lp ("Building Neural Network Model. RandomSeed:" + str(self.RandomSeed) | |
import os
import csv
import re
import glob
import json
from luigi import Parameter, IntParameter, WrapperTask
from collections import OrderedDict
from lib.timespan import get_timespan
from tasks.base_tasks import ColumnsTask, RepoFileUnzipTask, TableTask, CSV2TempTableTask, MetaWrapper
from tasks.meta import current_session, OBSColumn, GEOM_REF
from tasks.au.geo import (SourceTags, LicenseTags, GEOGRAPHIES, GeographyColumns, Geography, GEO_MB, GEO_SA1)
from tasks.tags import SectionTags, SubsectionTags, UnitTags
from lib.columns import ColumnsDeclarations
from lib.logger import get_logger
LOGGER = get_logger(__name__)
PROFILES = {
2011: 'BCP',
2016: 'GCP',
}
STATES = ('NSW', 'Vic', 'Qld', 'SA', 'WA', 'Tas', 'NT', 'ACT', 'OT', )
CONVERT_TO_11_DIGIT = ['SA1', ]
TABLES = {
2011: ['B01', 'B02', 'B03', 'B04A', 'B04B', 'B05', 'B06', 'B07', 'B08A', 'B08B', 'B09', 'B10A', 'B10B', 'B10C',
'B11A', 'B11B', 'B12A', 'B12B', 'B13', 'B14', 'B15', 'B16A', 'B16B', 'B17A', 'B17B', 'B18', 'B19',
'B20A', 'B20B', 'B21', 'B22A', 'B22B', 'B23A', 'B23B', 'B24', 'B25', 'B26', 'B27', 'B28', 'B29', 'B30',
'B31', 'B32', 'B33', 'B34', 'B35', 'B36', 'B37', 'B38', 'B39', 'B40A', 'B40B', 'B41A', 'B41B', 'B41C',
'B42A', 'B42B', 'B43A', 'B43B', 'B43C', 'B43D', 'B44A', 'B44B', 'B45A', 'B45B', 'B46', ],
2016: ['G01', 'G02', 'G03', 'G04A', 'G04B', 'G05', 'G06', 'G07', 'G08',
'G09A', 'G09B', 'G09C', 'G09D', 'G09E', 'G09F', 'G09G', 'G09H', 'G10A', 'G10B', 'G10C',
'G11A', 'G11B', 'G11C', 'G12A', 'G12B', 'G13A', 'G13B', 'G13C', 'G14', 'G15', 'G16A', 'G16B',
'G17A', 'G17B', 'G17C', 'G18', 'G19', 'G20A', 'G20B', 'G21', 'G22A', 'G22B', 'G23A', 'G23B', 'G24', 'G25',
'G26', 'G27', 'G28', 'G29', 'G30', 'G31', 'G32', 'G33', 'G34', 'G35', 'G36', 'G37', 'G38', 'G39', 'G40',
'G41', 'G42', 'G43A', 'G43B', 'G44A', 'G44B', 'G44C', 'G44D', 'G44E', 'G44F', 'G45A', 'G45B', 'G46A', 'G46B',
'G47A', 'G47B', 'G47C', 'G48A', 'G48B', 'G48C', 'G49A', 'G49B', 'G49C', 'G50A', 'G50B', 'G50C',
'G51A', 'G51B', 'G51C', 'G51D', 'G52A', 'G52B', 'G52C', 'G52D', 'G53A', 'G53B', 'G54A', 'G54B',
'G55A', 'G55B', 'G56A', 'G56B', 'G57A', 'G57B', 'G58A', 'G58B', 'G59', ]
}
URL = 'http://www.censusdata.abs.gov.au/CensusOutput/copsubdatapacks.nsf/All%20docs%20by%20catNo/{year}_{profile}_{resolution}_for_{state}/$File/{year}_{profile}_{resolution}_for_{state}_short-header.zip'
class DownloadData(RepoFileUnzipTask):
year = IntParameter()
resolution = Parameter()
profile = Parameter()
state = Parameter()
def get_url(self):
return URL.format(year=self.year,
profile=self.profile,
resolution=self.resolution,
state=self.state,)
class ImportData(CSV2TempTableTask):
tablename = Parameter()
year = IntParameter()
resolution = Parameter()
state = Parameter()
profile = Parameter()
def requires(self):
return DownloadData(resolution=self.resolution, profile=self.profile,
state=self.state, year=self.year)
def input_csv(self):
return glob.glob(os.path.join(self.input().path, '**',
'{year}Census_{tablename}_{state}_{resolution}*.csv'.format(
path=self.input().path,
year=self.year,
tablename=self.tablename,
state=self.state.upper(),
resolution=self.resolution,)), recursive=True)[0]
def after_copy(self):
session = current_session()
query_columns = '''
SELECT column_name
FROM information_schema.columns
WHERE table_schema = '{schema}'
AND table_name = lower('{table}');
'''.format(schema=self.output().schema,
table=self.output().tablename)
columns = session.execute(query_columns).fetchall()
for column in columns:
column_name = column[0]
if column_name != column_name.strip():
alter_column = '''
ALTER TABLE "{schema}".{table}
RENAME COLUMN "{old_column}" TO "{new_column}";
'''.format(schema=self.output().schema,
table=self.output().tablename,
old_column=column_name,
new_column=column_name.strip())
session.execute(alter_column)
session.commit()
class ImportAllTables(WrapperTask):
year = IntParameter()
resolution = Parameter()
state = Parameter()
def requires(self):
for table in TABLES[self.year]:
yield ImportData(resolution=self.resolution, state=self.state,
year=self.year, tablename=table)
class ImportAllStates(WrapperTask):
year = IntParameter()
resolution = Parameter()
def requires(self):
for state in STATES:
yield ImportAllTables(resolution=self.resolution, state=state,
year=self.year)
class ImportAllResolutions(WrapperTask):
year = IntParameter()
state = Parameter()
def requires(self):
for resolution in GEOGRAPHIES[self.year]:
yield ImportAllTables(resolution=resolution, state=self.state, year=self.year)
class ImportAll(WrapperTask):
year = IntParameter()
def requires(self):
for resolution in GEOGRAPHIES[self.year]:
for state in STATES:
yield ImportAllTables(resolution=resolution, state=state, year=self.year)
class Columns(ColumnsTask):
year = IntParameter()
resolution = Parameter()
profile = Parameter()
tablename = Parameter()
def requires(self):
requirements = {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'units': UnitTags(),
'source': SourceTags(),
'license': LicenseTags()
}
reqs = self._fetch_requirements()
col_reqs = reqs.get('all', [])
col_reqs.extend(reqs.get(self.tablename, []))
for col_req in col_reqs:
if col_req != self.tablename:
requirements[col_req] = Columns(tablename=col_req, resolution=self.resolution,
year=self.year, profile=self.profile)
return requirements
def version(self):
return 8
def columns(self):
cols = OrderedDict()
input_ = self.input()
subsectiontags = input_['subsections']
unittags = input_['units']
country = input_['sections']['au']
source = input_['source']['au-census']
licensetags = input_['license']['au-datapacks-license']
# column req's from other tables
column_reqs = {}
for key, value in input_.items():
if key.startswith(self.profile[0]):
column_reqs.update(value)
filepath = "meta/Metadata_{year}_{profile}_DataPack.csv".format(year=self.year, profile=self.profile)
rows = {}
session = current_session()
with open(os.path.join(os.path.dirname(__file__), filepath)) as csv_meta_file:
reader = csv.reader(csv_meta_file, delimiter=',', quotechar='"')
for line in reader:
id_ = line[0] # A: Sequential
tablename = line[4] # H: Tablename
# ignore tables we don't care about right now
if not id_.startswith(self.profile[0]) or \
not tablename.startswith(self.tablename):
continue
col_id = line[1]
rows[col_id] = line
for col_id in rows:
self._process_col(col_id, rows, session, column_reqs, cols, tablename, source, licensetags, country, unittags, subsectiontags)
columnsFilter = ColumnsDeclarations(os.path.join(os.path.dirname(__file__), 'census_columns.json'))
parameters = '{{"year":"{year}","resolution":"{resolution}", "tablename":"{tablename}"}}'.format(
year=self.year, resolution=self.resolution, tablename=self.tablename)
filtered_cols = columnsFilter.filter_columns(cols, parameters)
return filtered_cols
def _fetch_requirements(self):
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'meta')
with (open('{}/{}'.format(dir_path, '{}_{}_requirements.json'.format(self.profile, self.year)))) as f:
return json.load(f)
def _process_col(self, col_id, rows, session, column_reqs, cols, tablename, source, licensetags, country, unittags, subsectiontags):
line = rows[col_id]
col_name = line[2] # C: name
denominators = line[3] # D: denominators
col_unit = line[5] # F: unit
col_subsections = line[6].split('|') # G: subsection
subsection_tags = [subsectiontags[s.strip()] for s in col_subsections]
if tablename == '{}02'.format(self.profile[0]):
col_agg = line[8] # I: AGG (for B02 only)
else:
col_agg = None
tabledesc = line[10] # K: Table description
denominators = denominators.split('|')
targets_dict = {}
for denom_id in denominators:
denom_id = denom_id.strip()
if not denom_id:
continue
reltype = 'denominator'
if col_agg in ['median', 'average']:
reltype = 'universe'
if denom_id in column_reqs:
targets_dict[column_reqs[denom_id].get(session)] = reltype
else:
if denom_id not in cols:
# we load denominators recursively to avoid having to order
# them in the source CSV file
self._process_col(denom_id, rows, session, column_reqs, cols, tablename, source, licensetags, country, unittags, subsectiontags)
targets_dict[cols[denom_id]] = reltype
targets_dict.pop(None, None)
cols[col_id] = OBSColumn(
id=col_id,
type='Numeric',
name=col_name,
description=tabledesc,
# Ranking of importance, sometimes used to favor certain measures in auto-selection
# Weight of 0 will hide this column from the user. We generally use between 0 and 10
weight=5,
aggregate=col_agg or 'sum',
# Tags are our way of noting aspects of this measure like its unit, the country
# it's relevant to, and which section(s) of the catalog it should appear in
tags=[source, licensetags, country, unittags[col_unit]] + subsection_tags,
targets=targets_dict
)
class AllColumnsResolution(WrapperTask):
year = IntParameter()
resolution = Parameter()
def requires(self):
for table in TABLES[self.year]:
yield Columns(year=self.year, resolution=self.resolution, profile=PROFILES[self.year], tablename=table)
class AllColumns(WrapperTask):
year = IntParameter()
def requires(self):
for resolution in GEOGRAPHIES[self.year]:
yield AllColumnsResolution(year=self.year, resolution=resolution)
#####################################
# COPY TO OBSERVATORY
#####################################
class XCP(TableTask):
tablename = Parameter()
year = IntParameter()
resolution = Parameter()
def version(self):
return 5
def targets(self):
return {
self.input()['geo'].obs_table: GEOM_REF,
}
def requires(self):
requirements = {
'geo': Geography(resolution=self.resolution, year=self.year),
'geometa': GeographyColumns(resolution=self.resolution, year=self.year),
'meta': Columns(year=self.year, resolution=self.resolution,
profile=PROFILES[self.year], tablename=self.tablename),
}
import_data = {}
if self.resolution == GEO_MB:
# We need to have the data from the parent geometries
# in order to interpolate
requirements['geo_sa1'] = Geography(resolution=GEO_SA1, year=self.year)
requirements['data'] = XCP(tablename=self.tablename, year=self.year, resolution=GEO_SA1)
else:
for state in STATES:
import_data[state] = ImportData(resolution=self.resolution,
state=state, profile=PROFILES[self.year],
tablename=self.tablename,
year=self.year)
requirements['data'] = import_data
return requirements
def table_timespan(self):
return get_timespan(str(self.year))
def columns(self):
cols = OrderedDict()
input_ = self.input()
cols['region_id'] = input_['geometa']['geom_id']
for colname, coltarget in input_['meta'].items():
cols[colname] = coltarget
return cols
def _get_geoid(self):
if self.year == 2011:
return 'region_id'
else:
if self.resolution == 'SA1':
return '{}_{}_{}'.format(self.resolution, '7DIGITCODE', self.year)
elif self.resolution == 'SA2':
return '{}_{}_{}'.format(self.resolution, 'MAINCODE', self.year)
else:
return '{}_{}_{}'.format(self.resolution, 'CODE', self.year)
def populate(self):
if self.resolution == GEO_MB:
self.populate_mb()
else:
self.populate_general()
def populate_mb(self):
session = current_session()
column_targets = self.columns()
out_colnames = [oc.lower() for oc in list(column_targets.keys())]
in_colnames = ['mb.geom_id as region_id']
for ic in list(column_targets.keys()):
if ic != 'region_id':
in_colnames.append('round(cast(float8 ({ic} * (ST_Area(mb.the_geom)/ST_Area(sa1geo.the_geom))) as numeric), 2) as {ic}'.format(ic=ic.lower()))
insert_query = '''
INSERT INTO {output} ("{out_colnames}")
SELECT {in_colnames}
FROM {input_geo_mb} mb
INNER JOIN {input_geo_sa1} sa1geo ON (mb.parent_id = sa1geo.geom_id)
INNER JOIN {input_data} sa1data ON (mb.parent_id = sa1data.region_id)
'''.format(output=self.output().table,
input_data=self.input()['data'].table,
input_geo_mb=self.input()['geo'].table,
input_geo_sa1=self.input()['geo_sa1'].table,
in_colnames=', '.join(in_colnames),
out_colnames='", "'.join(out_colnames))
try:
LOGGER.debug(insert_query)
session.execute(insert_query)
except Exception:
session.rollback()
def populate_general(self):
session = current_session()
column_targets = self.columns()
out_colnames = [oc.lower() for oc in list(column_targets.keys())]
failstates = []
for state, input_ in self.input()['data'].items():
intable = input_.table
in_colnames = []
for colname, target in column_targets.items():
# weird trailing underscore for australia but no states
if colname.endswith('Median_rent_weekly_') and \
((self.resolution == 'RA' and state.lower() != 'aust') or
(self.resolution == 'SA4' and state.lower() in ('vic', 'wa', 'ot')) or
(self.resolution == 'SA3' and state.lower() in ('vic', 'wa')) or
(self.resolution == 'SA2' and state.lower() in ('vic', 'wa', 'nsw')) or
(self.resolution == 'SA1' and state.lower() in ('vic', 'wa', 'qld', 'nt', 'sa', 'nsw')) or
(self.resolution == 'GCCSA' and state.lower() in ('vic', 'wa', 'ot')) or
(self.resolution == 'LGA' and state.lower() in ('wa')) or
(self.resolution == 'SLA' and state.lower() in ('wa')) or
(self.resolution == 'SSC' and state.lower() in ('vic', 'wa', 'qld', 'nt', 'sa', 'nsw')) or
(self.resolution == 'POA' and state.lower() in ('wa', 'qld', 'nsw')) or
(self.resolution == 'CED' and state.lower() in ('vic', 'wa')) or
| |
line in page.lines]
if page.lines
else [],
words=[DocumentWord._from_generated(word) for word in page.words]
if page.words
else [],
selection_marks=[
DocumentSelectionMark._from_generated(mark)
for mark in page.selection_marks
]
if page.selection_marks
else [],
spans=prepare_document_spans(page.spans),
)
def __repr__(self):
return (
"DocumentPage(page_number={}, angle={}, width={}, height={}, unit={}, lines={}, words={}, "
"selection_marks={}, spans={})".format(
self.page_number,
self.angle,
self.width,
self.height,
self.unit,
repr(self.lines),
repr(self.words),
repr(self.selection_marks),
repr(self.spans),
)
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentPage.
:return: dict
:rtype: dict
"""
return {
"page_number": self.page_number,
"angle": self.angle,
"width": self.width,
"height": self.height,
"unit": self.unit,
"lines": [f.to_dict() for f in self.lines]
if self.lines
else [],
"words": [f.to_dict() for f in self.words]
if self.words
else [],
"selection_marks": [f.to_dict() for f in self.selection_marks]
if self.selection_marks
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentPage
"""Converts a dict in the shape of a DocumentPage to the model itself.
:param dict data: A dictionary in the shape of DocumentPage.
:return: DocumentPage
:rtype: DocumentPage
"""
return cls(
page_number=data.get("page_number", None),
angle=data.get("angle", None),
width=data.get("width", None),
height=data.get("height", None),
unit=data.get("unit", None),
lines=[DocumentLine.from_dict(v) for v in data.get("lines")] # type: ignore
if len(data.get("lines", [])) > 0
else [],
words=[DocumentWord.from_dict(v) for v in data.get("words")] # type: ignore
if len(data.get("words", [])) > 0
else [],
selection_marks=[DocumentSelectionMark.from_dict(v) for v in data.get("selection_marks")] # type: ignore
if len(data.get("selection_marks", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
)
class DocumentSelectionMark(DocumentContentElement):
"""A selection mark object representing check boxes, radio buttons, and other elements indicating a selection.
:ivar state: State of the selection mark. Possible values include: "selected",
"unselected".
:vartype state: str
:ivar content: The text content - not returned for DocumentSelectionMark.
:vartype content: str
:ivar bounding_box: Bounding box of the selection mark.
:vartype bounding_box: list[Point]
:ivar span: Location of the selection mark in the reading order concatenated
content.
:vartype span: ~azure.ai.formrecognizer.DocumentSpan
:ivar confidence: Confidence of correctly extracting the selection mark.
:vartype confidence: float
:ivar str kind: For DocumentSelectionMark, this is "selectionMark".
"""
def __init__(self, **kwargs):
super(DocumentSelectionMark, self).__init__(kind="selectionMark", **kwargs)
self.state = kwargs.get("state", None)
@classmethod
def _from_generated(cls, mark):
return cls(
state=mark.state,
bounding_box=get_bounding_box(mark),
span=DocumentSpan._from_generated(mark.span)
if mark.span
else None,
confidence=mark.confidence,
)
def __repr__(self):
return "DocumentSelectionMark(state={}, content={}, span={}, confidence={}, bounding_box={}, kind={})".format(
self.state,
self.content,
repr(self.span),
self.confidence,
self.bounding_box,
self.kind,
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentSelectionMark.
:return: dict
:rtype: dict
"""
return {
"state": self.state,
"content": self.content,
"bounding_box": [f.to_dict() for f in self.bounding_box]
if self.bounding_box
else [],
"span": self.span.to_dict() if self.span else None,
"confidence": self.confidence,
"kind": self.kind,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentSelectionMark
"""Converts a dict in the shape of a DocumentSelectionMark to the model itself.
:param dict data: A dictionary in the shape of DocumentSelectionMark.
:return: DocumentSelectionMark
:rtype: DocumentSelectionMark
"""
return cls(
state=data.get("state", None),
content=data.get("content", None),
bounding_box=[Point.from_dict(v) for v in data.get("bounding_box")] # type: ignore
if len(data.get("bounding_box", [])) > 0
else [],
span=DocumentSpan.from_dict(data.get("span")) if data.get("span") else None, # type: ignore
confidence=data.get("confidence", None),
)
class DocumentStyle(object):
"""An object representing observed text styles.
:ivar is_handwritten: Is content handwritten?.
:vartype is_handwritten: bool
:ivar spans: Location of the text elements in the concatenated content the style
applies to.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
:ivar confidence: Confidence of correctly identifying the style.
:vartype confidence: float
"""
def __init__(self, **kwargs):
self.is_handwritten = kwargs.get("is_handwritten", None)
self.spans = kwargs.get("spans", None)
self.confidence = kwargs.get("confidence", None)
@classmethod
def _from_generated(cls, style):
return cls(
is_handwritten=style.is_handwritten,
spans=[DocumentSpan._from_generated(span) for span in style.spans]
if style.spans
else [],
confidence=style.confidence,
)
def __repr__(self):
return "DocumentStyle(is_handwritten={}, spans={}, confidence={})".format(
self.is_handwritten,
repr(self.spans),
self.confidence,
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentStyle.
:return: dict
:rtype: dict
"""
return {
"is_handwritten": self.is_handwritten,
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
"confidence": self.confidence,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentStyle
"""Converts a dict in the shape of a DocumentStyle to the model itself.
:param dict data: A dictionary in the shape of DocumentStyle.
:return: DocumentStyle
:rtype: DocumentStyle
"""
return cls(
is_handwritten=data.get("is_handwritten", None),
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
confidence=data.get("confidence", None),
)
class DocumentTable(object):
"""A table object consisting table cells arranged in a rectangular layout.
:ivar row_count: Number of rows in the table.
:vartype row_count: int
:ivar column_count: Number of columns in the table.
:vartype column_count: int
:ivar cells: Cells contained within the table.
:vartype cells: list[~azure.ai.formrecognizer.DocumentTableCell]
:ivar bounding_regions: Bounding regions covering the table.
:vartype bounding_regions: list[~azure.ai.formrecognizer.BoundingRegion]
:ivar spans: Location of the table in the reading order concatenated content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
"""
def __init__(self, **kwargs):
self.row_count = kwargs.get("row_count", None)
self.column_count = kwargs.get("column_count", None)
self.cells = kwargs.get("cells", None)
self.bounding_regions = kwargs.get("bounding_regions", None)
self.spans = kwargs.get("spans", None)
@classmethod
def _from_generated(cls, table):
return cls(
row_count=table.row_count,
column_count=table.column_count,
cells=[DocumentTableCell._from_generated(cell) for cell in table.cells]
if table.cells
else [],
bounding_regions=prepare_bounding_regions(table.bounding_regions),
spans=prepare_document_spans(table.spans),
)
def __repr__(self):
return (
"DocumentTable(row_count={}, column_count={}, cells={}, bounding_regions={}, "
"spans={})".format(
self.row_count,
self.column_count,
repr(self.cells),
repr(self.bounding_regions),
repr(self.spans),
)
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentTable.
:return: dict
:rtype: dict
"""
return {
"row_count": self.row_count,
"column_count": self.column_count,
"cells": [f.to_dict() for f in self.cells]
if self.cells
else [],
"bounding_regions": [f.to_dict() for f in self.bounding_regions]
if self.bounding_regions
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentTable
"""Converts a dict in the shape of a DocumentTable to the model itself.
:param dict data: A dictionary in the shape of DocumentTable.
:return: DocumentTable
:rtype: DocumentTable
"""
return cls(
row_count=data.get("row_count", None),
column_count=data.get("column_count", None),
cells=[DocumentTableCell.from_dict(v) for v in data.get("cells")] # type: ignore
if len(data.get("cells", [])) > 0
else [],
bounding_regions=[BoundingRegion.from_dict(v) for v in data.get("bounding_regions")] # type: ignore
if len(data.get("bounding_regions", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
)
class DocumentTableCell(object):
"""An object representing the location and content of a table cell.
:ivar kind: Table cell kind. Possible values include: "content", "rowHeader", "columnHeader",
"stubHead", "description". Default value: "content".
:vartype kind: str
:ivar row_index: Row index of the cell.
:vartype row_index: int
:ivar column_index: Column index of the cell.
:vartype column_index: int
:ivar row_span: Number of rows spanned by this cell.
:vartype row_span: int
:ivar column_span: Number of columns spanned by this cell.
:vartype column_span: int
:ivar content: Concatenated content of the table cell in reading order.
:vartype content: str
:ivar bounding_regions: Bounding regions covering the table cell.
:vartype bounding_regions: list[~azure.ai.formrecognizer.BoundingRegion]
:ivar spans: Location of the table cell in the reading order concatenated content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
"""
def __init__(self, **kwargs):
self.kind = kwargs.get("kind", "content")
self.row_index = kwargs.get("row_index", None)
self.column_index = kwargs.get("column_index", None)
self.row_span = kwargs.get("row_span", 1)
self.column_span = kwargs.get("column_span", 1)
self.content = kwargs.get("content", None)
self.bounding_regions = kwargs.get("bounding_regions", None)
self.spans = kwargs.get("spans", None)
@classmethod
def _from_generated(cls, cell):
return cls(
kind=cell.kind if cell.kind else "content",
row_index=cell.row_index,
column_index=cell.column_index,
row_span=cell.row_span if cell.row_span else 1,
column_span=cell.column_span if cell.column_span else 1,
content=cell.content,
bounding_regions=[
BoundingRegion._from_generated(region)
for region in cell.bounding_regions
]
if cell.bounding_regions
else [],
spans=[DocumentSpan._from_generated(span) for span in cell.spans]
if cell.spans
else [],
)
def __repr__(self):
return (
"DocumentTableCell(kind={}, row_index={}, column_index={}, row_span={}, column_span={}, "
"content={}, bounding_regions={}, spans={})".format(
self.kind,
self.row_index,
self.column_index,
self.row_span,
self.column_span,
self.content,
repr(self.bounding_regions),
repr(self.spans),
)
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentTableCell.
:return: dict
:rtype: dict
"""
return {
"kind": self.kind,
"row_index": self.row_index,
"column_index": self.column_index,
"row_span": self.row_span,
"column_span": self.column_span,
"content": self.content,
"bounding_regions": [f.to_dict() for f in self.bounding_regions]
if self.bounding_regions
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentTableCell
"""Converts a dict in the shape of a DocumentTableCell to the model itself.
:param dict data: A dictionary in the shape of DocumentTableCell.
:return: DocumentTableCell
:rtype: DocumentTableCell
"""
return cls(
kind=data.get("kind", "content"),
row_index=data.get("row_index", None),
column_index=data.get("column_index", None),
row_span=data.get("row_span", 1),
column_span=data.get("column_span", 1),
content=data.get("content", None),
bounding_regions=[BoundingRegion.from_dict(v) for v in data.get("bounding_regions")] # type: ignore
if len(data.get("bounding_regions", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v | |
thing here is that we don't know which of
# these edges will be removed by merge_edges - one
# of them will get deleted, and then deleted by our
# delete handler.
# the other one will get modified, so by the time we get
# control again after trigrid, we won't know what to update
# so - save the nodes...
saved_nodes = self.edges[ [e1,e2],:2]
remaining = super(LiveDtGridBase,self).merge_edges(e1,e2)
if self.freeze:
pass
elif self.holding:
for n in saved_nodes.ravel():
self.holding_nodes[n] = 'merge_edges'
else:
if remaining == e1:
ab = saved_nodes[0]
else:
ab = saved_nodes[1]
# the one that is *not* remaining has already been deleted
# just update the other one.
try:
self.dt_remove_edge(remaining,nodes=ab)
except MissingConstraint:
print(" on merge_edges, may have an intervener")
raise
self.dt_add_edge(remaining)
return remaining
def unmerge_edges(self,e1,e2,*args,**kwargs):
check_dt_after = False
if self.freeze:
pass
elif self.holding:
pass
else:
# this can be problematic if the middle node is exactly on
# the line between them, because re-inserting that node
# will pre-emptively segment the constrained edge.
try:
self.dt_remove_edge(e1)
except MissingConstraint:
print(" got a missing constraint on merge edges - will verify that it's okay")
check_dt_after = True
#print " after pre-emptive remove_edge"
super(LiveDtGridBase,self).unmerge_edges(e1,e2,*args,**kwargs)
#print " after call to super()"
if self.freeze:
pass
elif self.holding:
n1,n2 = self.edges[e1,:2]
n3,n4 = self.edges[e2,:2]
for n in [n1,n2,n3,n4]:
self.holding_nodes[ n ] = 'unmerge_edges'
else:
if check_dt_after:
AB = self.edges[e1,:2]
BC = self.edges[e2,:2]
B = np.intersect1d(AB,BC)[0]
A = np.setdiff1d(AB,B)[0]
C = np.setdiff1d(BC,B)[0]
print("while unmerging edges, a constraint was pre-emptively created, but will verify that now %d-%d-%d."%(A,B,C))
for edge in self.dt_incident_constraints(self.vh[B]):
v1,v2 = edge.vertices()
if self.vh_info[v1] == A or self.vh_info[v2] == A:
A = None
elif self.vh_info[v1] == B or self.vh_info[v2] == B:
B = None
else:
print("while unmerging edge, the middle point has another constrained DT neighbor - surprising...")
if A is not None or B is not None:
raise MissingConstraint("Failed to verify that implicit constraint was there")
else:
#print " adding reverted edge e1 and e2"
self.dt_add_edge(e1)
# even though trigrid.merge_edges() calls delete_edge()
# on e2, it doesn't register an undelete_edge() b/c
# rollback=0.
self.dt_add_edge(e2)
# def unsplit_edge(...): # not supported by trigrid
def split_edge(self,nodeA,nodeB,nodeC):
""" per trigrid updates, nodeB may be a node index or a tuple (coords, **add_node_opts)
"""
if self.freeze:
pass
elif self.holding:
self.holding_nodes[nodeA] = 'split_edge'
if not isinstance(nodeB,Iterable):
self.holding_nodes[nodeB] = 'split_edge'
self.holding_nodes[nodeC] = 'split_edge'
else:
if self.verbose > 2:
print(" split_edge: %d %d %d"%(nodeA,nodeB,nodeC))
e1 = self.find_edge([nodeA,nodeC])
try:
self.dt_remove_edge(e1)
except MissingConstraint:
if isinstance(nodeB,Iterable):
print(" got a missing constraint on split edge, and node has not been created!")
raise
else:
print(" got a missing constraint on split edge, but maybe the edge has already been split")
self.dt_remove_edge(e1,[nodeA,nodeB])
self.dt_remove_edge(e1,[nodeB,nodeC])
print(" Excellent. The middle node had become part of the constraint")
e2 = super(LiveDtGridBase,self).split_edge(nodeA,nodeB,nodeC)
if self.freeze:
pass
elif self.holding:
pass
else:
self.dt_add_edge(e1)
self.dt_add_edge(e2)
return e2
def delete_node_and_merge(self,n):
if self.freeze:
return super(LiveDtGridBase,self).delete_node_and_merge(n)
if self.holding:
self.holding_nodes[n] = 'delete_node_and_merge'
else:
# remove any constraints going to n -
self.dt_remove_constraints(self.vh[n])
self.dt_remove(n)
# note that this is going to call merge_edges, before it
# calls delete_node() - and merge_edges will try to add the new
# constraint, which will fail if the middle node is collinear with
# the outside nodes. so freeze LiveDT updates, then here we clean up
self.freeze = 1
new_edge = super(LiveDtGridBase,self).delete_node_and_merge(n)
if self.verbose > 2:
print(" Got new_edge=%s from trigrid.delete_node_and_merge"%new_edge)
self.freeze=0
if self.holding:
for n in self.edges[new_edge,:2]:
self.holding_nodes[n] = 'delete_node_and_merge'
else:
# while frozen we missed a merge_edges and a delete node.
# we just want to do them in the opposite order of what trigrid does.
self.dt_add_edge(new_edge)
return new_edge
def renumber(self):
mappings = super(LiveDtGridBase,self).renumber()
self.vh = self.vh[ mappings['valid_nodes'] ]
for i in range(len(self.vh)):
self.vh_info[self.vh[i]] = i
return mappings
def dt_interior_cells(self):
"""
Only valid for a triangulation where all nodes lie on
the boundary. there will be some
cells which fall inside the domain, others outside the
domain.
returns cells which are properly inside the domain as
triples of nodes
"""
log.info("Finding interior cells from full Delaunay Triangulation")
interior_cells = []
for a,b,c in self.dt_cell_node_iter():
# going to be slow...
# How to test whether this face is internal:
# Arbitrarily choose a vertex: a
#
# Find an iter for which the face abc lies to the left of the boundary
internal = 0
for elt in self.all_iters_for_node(a):
d = self.points[elt.nxt.data] - self.points[a]
theta_afwd = np.arctan2(d[1],d[0])
d = self.points[b] - self.points[a]
theta_ab = np.arctan2(d[1],d[0])
d = self.points[elt.prv.data] - self.points[a]
theta_aprv = np.arctan2(d[1],d[0])
dtheta_b = (theta_ab - theta_afwd) % (2*np.pi)
dtheta_elt = (theta_aprv - theta_afwd) % (2*np.pi)
# if b==elt.nxt.data, then dtheta_b==0.0 - all good
if dtheta_b >= 0 and dtheta_b < dtheta_elt:
internal = 1
break
if internal:
interior_cells.append( [a,b,c] )
cells = np.array(interior_cells)
return cells
## DT-based "smoothing"
# First, make sure the boundary is sufficiently sampled
def subdivide(self,min_edge_length=1.0,edge_ids=None):
""" Like medial_axis::subdivide_iterate -
Add nodes along the boundary as needed to ensure that the boundary
is well represented in channels
[ from medial_axis ]
Find edges that need to be sampled with smaller
steps and divide them into two edges.
returns the number of new edges / nodes
method: calculate voronoi radii
iterate over edges in boundary
for each edge, find the voronoi point that they have
in common. So this edge should be part of a triangle,
and we are getting the center of that triangle.
the voronoi radius with the distance between the voronoi
point and the edge. If the edge is too long and needs to
be subdivided, it will be long (and the voronoi radius large)
compared to the distance between the edge and the vor. center.
"""
if edge_ids is None:
print("Considering all edges for subdividing")
edge_ids = list(range(self.Nedges()))
else:
print("Considering only %d supplied edges for subdividing"%len(edge_ids))
to_subdivide = []
# Also keep a list of constrained edges of DT cells for which another edge
# has been selected for subdivision.
neighbors_of_subdivide = {}
print("Choosing edges to subdivide")
for ni,i in enumerate(edge_ids): # range(self.Nedges()):
if ni%500==0:
log.debug('.')
if self.edges[i,0] == -37:
continue # edge has been deleted
# this only works when one side is unpaved and the other boundary -
if self.edges[i,3] != trigrid.UNMESHED or self.edges[i,4] != trigrid.BOUNDARY:
print("Skipping edge %d because it has weird cell ids"%i)
continue
a,b = self.edges[i,:2]
# consult the DT to find who the third node is:
a_nbrs = self.delaunay_neighbors(a)
b_nbrs = self.delaunay_neighbors(b)
abc = np.array([self.points[a],self.points[b],[0,0]])
c = None
for nbr in a_nbrs:
if nbr in b_nbrs:
# does it lie to the left of the edge?
abc[2,:] = self.points[nbr]
if trigrid.is_ccw(abc):
c = nbr
break
if c is None:
print("While looking at edge %d, %s - %s"%(i,self.points[a],self.points[b]))
raise Exception("Failed to find the third node that makes up an interior triangle")
pntV = trigrid.circumcenter(abc[0],abc[1],abc[2])
# compute the point-line distance between
# this edge and the v center, then compare to
# the distance from the endpoint to that
# vcenter
pntA = self.points[a]
pntB = self.points[b]
v_radius = np.sqrt( ((pntA-pntV)**2).sum() )
# This calculates unsigned distance - with Triangle, that's fine because
# it takes care of the Steiner points, but with CGAL we do it ourselves.
# line_clearance = np.sqrt( (( 0.5*(pntA+pntB) - pntV)**2).sum() )
ab = (pntB - pntA)
ab = ab / np.sqrt( np.sum(ab**2) )
pos_clearance_dir = np.array( [-ab[1],ab[0]] )
av = pntV - pntA
line_clearance = av[0]*pos_clearance_dir[0] + av[1]*pos_clearance_dir[1]
# Why do I get some bizarrely short edges?
ab = np.sqrt( np.sum( (pntA - pntB)**2 ) )
if v_radius > 1.2*line_clearance and v_radius > min_edge_length and ab>min_edge_length:
to_subdivide.append(i)
# Also make note of the other edges of this same DT triangle
for maybe_nbr in [ [a,c], [b,c] ]:
# could be an internal DT edge, or a real edge
try:
nbr_edge = self.find_edge(maybe_nbr)
neighbors_of_subdivide[nbr_edge] = 1
except trigrid.NoSuchEdgeError:
pass
print()
print("Will subdivide | |
"ratio that would be suggested if neither provided (v1)"
# nested option --------
vis_nested = cow.patch(g0,cow.patch(g1,g2)+\
cow.layout(ncol=1, rel_heights = [1,2])) +\
cow.layout(nrow=1)
default_w_n, default_h_n = vis_nested._default_size(None,None)
static_aspect_ratio_n = default_h_n / default_w_n
# provide width ----
out_w, out_h = vis_nested._default_size(height=None,width=height_or_width)
assert np.allclose(out_w, height_or_width) and \
np.allclose(out_h, height_or_width * static_aspect_ratio_n), \
"if *only width* is provided, suggested height is relative to aspect "+\
"ratio that would be suggested if neither provided (v1)"
# provide height ----
out_w, out_h = vis_nested._default_size(height=height_or_width,width=None)
assert np.allclose(out_h, height_or_width) and \
np.allclose(out_w, height_or_width / static_aspect_ratio_n), \
"if *only height* is provided, suggested width is relative to aspect "+\
"ratio that would be suggested if neither provided (v1)"
def test_patch__svg_get_sizes():
g0 = p9.ggplot(p9_data.mpg) +\
p9.geom_bar(p9.aes(x="hwy")) +\
p9.labs(title = 'Plot 0')
g1 = p9.ggplot(p9_data.mpg) +\
p9.geom_point(p9.aes(x="hwy", y = "displ")) +\
p9.labs(title = 'Plot 1')
g2 = p9.ggplot(p9_data.mpg) +\
p9.geom_point(p9.aes(x="hwy", y = "displ", color="class")) +\
p9.labs(title = 'Plot 2')
g3 = p9.ggplot(p9_data.mpg[p9_data.mpg["class"].isin(["compact",
"suv",
"pickup"])]) +\
p9.geom_histogram(p9.aes(x="hwy"),bins=10) +\
p9.facet_wrap("class")
# basic option ----------
vis1 = cow.patch(g0,g1,g2) +\
cow.layout(design = np.array([[0,1],
[0,2]]),
rel_heights = [4,1])
# successful sizings ----
sizes, logics = vis1._svg_get_sizes(width_pt = 20 * 72,
height_pt = 20 * 72)
requested_sizes = [(10,20), (10,16), (10,4)]
assert np.all(logics), \
"expected all plotnine objects to be able to be sized correctly "+\
"in very large output (v1)"
assert type(sizes) is list and \
np.all([len(s) == 2 and type(s) is tuple for s in sizes]), \
"expected structure of sizes list is incorrect (v1)"
assert np.all([2/3 < (sizes[s_idx][0]/requested_sizes[s_idx][0]) < 1.5 and \
2/3 < (sizes[s_idx][1]/requested_sizes[s_idx][1]) < 1.5
for s_idx in [0,1,2]]), \
"suggested sizing in sizes isn't too extreme relative to true "+\
"requested sizes- this is just a sanity check, "+\
"not a robust test (v1)"
# failed sizings ------
sizes_f, logics_f = vis1._svg_get_sizes(width_pt = 10 * 72,
height_pt = 10 * 72)
requested_sizes_f = [(5,10), (5,8), (5,2)] # final one should fail...
assert not np.all(logics_f) and (logics_f == [True, True, False]), \
"expected not all plotnine objects to be able to be sized correctly "+\
"in small output (v1.1 - failed)"
assert type(sizes_f) is list and \
np.all([len(s) == 2 and type(s) is tuple for s in sizes_f]), \
"expected structure of sizes list is incorrect (v1.1 - failed)"
assert np.all([2/3 < (sizes_f[s_idx][0]/requested_sizes_f[s_idx][0]) < 1.5 and \
2/3 < (sizes_f[s_idx][1]/requested_sizes_f[s_idx][1]) < 1.5
for s_idx in [0,1]]), \
"suggested sizing in sizes (that didn't fail) isn't too extreme "+\
"relative to true "+\
"requested sizes- this is just a sanity check, "+\
"not a robust test (v1.1 - failed)"
assert sizes_f[2][0] < 1 and sizes_f[2][1] < 1, \
"expected failed sizing (due to being too small, to return a scaling" +\
"below 1 (note the correction to scaling should be 1/suggested scaling))," +\
"(v1.1 - failed)"
# nested option --------
vis_nested = cow.patch(g0,cow.patch(g1, g2)+\
cow.layout(ncol=1, rel_heights = [4,1])) +\
cow.layout(nrow=1)
# successful sizings ----
sizes_n, logics_n = vis_nested._svg_get_sizes(width_pt = 20 * 72,
height_pt = 20 * 72)
requested_sizes_n = [(10,20), (10,16), (10,4)]
assert np.all(_flatten_nested_list(logics_n)), \
"expected all plotnine objects to be able to be sized correctly "+\
"in very large output (v2 - nested)"
assert type(sizes_n) is list and len(sizes_n) == 2 and \
type(sizes_n[0]) is tuple and type(sizes_n[1]) is list and \
len(sizes_n[0]) == 2 and len(sizes_n[1]) == 2 and \
np.all([len(s) == 2 and type(s) is tuple for s in sizes_n[1]]), \
"expected structure of sizes list is incorrect (v2 - nested)"
sizes_n_flattened = _flatten_nested_list(sizes_n)
assert np.all([2/3 < (sizes_n_flattened[s_idx][0]/requested_sizes[s_idx][0]) < 1.5 and \
2/3 < (sizes_n_flattened[s_idx][1]/requested_sizes[s_idx][1]) < 1.5
for s_idx in [0,1,2]]), \
"suggested sizing in sizes isn't too extreme relative to true "+\
"requested sizes- this is just a sanity check, "+\
"not a robust test (v2 - nested)"
assert np.allclose(sizes_n_flattened, sizes), \
"expected nested and non-nested suggested sizes to be equal (v1 vs v2)"
# failed sizings ------
sizes_f_n, logics_f_n = vis_nested._svg_get_sizes(width_pt = 10 * 72,
height_pt = 10 * 72)
requested_sizes_f = [(5,10), (5,8), (5,2)] # final one should fail ...
logic_f_n_flat = _flatten_nested_list(logics_f_n)
sizes_f_n_flat = _flatten_nested_list(sizes_f_n)
assert not np.all(logic_f_n_flat) and \
(logic_f_n_flat == [True, True, False]), \
"expected not all plotnine objects to be able to be sized correctly "+\
"in smaller output (v2.1 - nested, failed)"
assert type(sizes_f_n) is list and len(sizes_f_n) == 2 and \
type(sizes_f_n[0]) is tuple and type(sizes_f_n[1]) is list and \
len(sizes_f_n[0]) == 2 and len(sizes_f_n[1]) == 2 and \
np.all([len(s) == 2 and type(s) is tuple for s in sizes_f_n[1]]), \
"expected structure of sizes list is incorrect (v2.1 - nested, failed)"
assert np.all([2/3 < (sizes_f_n_flat[s_idx][0]/requested_sizes_f[s_idx][0]) < 1.5 and \
2/3 < (sizes_f_n_flat[s_idx][1]/requested_sizes_f[s_idx][1]) < 1.5
for s_idx in [0,1]]), \
"suggested sizing in sizes (that didn't fail) isn't too extreme "+\
"relative to true "+\
"requested sizes- this is just a sanity check, "+\
"not a robust test (v2.1 - nested, failed)"
assert sizes_f_n_flat[2][0] < 1 and sizes_f_n_flat[2][1] < 1, \
"expected failed sizing (due to being too small, to return a scaling" +\
"below 1 (note the correction to scaling should be 1/suggested scaling))," +\
"(v2.1 - nested, failed)"
assert np.allclose(sizes_f_n_flat, sizes_f), \
"expected nested and non-nested suggested sizes to be equal (v1.1 vs v2.1 - failed)"
@given(st.floats(min_value=.5, max_value=49),
st.floats(min_value=.5, max_value=49),
st.floats(min_value=.5, max_value=49),
st.floats(min_value=.5, max_value=49),
st.floats(min_value=.5, max_value=49),
st.floats(min_value=.5, max_value=49))
def test_patch__process_sizes(w1,h1,w2,h2,w3,h3):
# default patch (not needed)
empty_patch = cow.patch()
# not nested -------
sizes = [(w1,h1),(w2,h2),(w3,h3)]
# all true ---
logics = [True, True, True]
out_s = empty_patch._process_sizes(sizes = sizes, logics = logics)
assert out_s == sizes, \
"expected sizes to return if all logics true"
# not all true ----
logics_f = [True, True, False]
out_s1 = empty_patch._process_sizes(sizes = sizes, logics = logics_f)
assert np.allclose(out_s1, 1/np.min(sizes[2])), \
"expected max_scaling should be the max of 1/width_scale and "+\
"1/height_scale assoicated with failed plot(s) (v1.1 - 1 plot failed)"
logics_f2 = [True, False, False]
out_s2 = empty_patch._process_sizes(sizes = sizes, logics = logics_f2)
assert np.allclose(out_s2, 1/np.min([w2,h2,w3,h3])), \
"expected max_scaling should be the max of 1/width_scale and "+\
"1/height_scale assoicated with failed plot(s) (v1.2 - 2 plot failed)"
# nested ---------
sizes_n = [(w1,h1),[(w2,h2),(w3,h3)]]
# all true ---
logics_n = [True, [True, True]]
out_s_n = empty_patch._process_sizes(sizes = sizes_n, logics = logics_n)
assert out_s_n == sizes_n, \
"expected unflatted sizes to return if all logics true (v2 - nested)"
# not all true ----
logics_n_f = [True, [True, False]]
out_s1 = empty_patch._process_sizes(sizes = sizes_n, logics = logics_n_f)
assert np.allclose(out_s1, 1/np.min(sizes_n[1][1])), \
"expected max_scaling should be the max of 1/width_scale and "+\
"1/height_scale assoicated with failed plot(s) (v2.1 - 1 plot failed)"
logics_f2 = [True, [False, False]]
out_s2 = empty_patch._process_sizes(sizes = sizes, logics = logics_f2)
assert np.allclose(out_s2, 1/np.min([w2,h2,w3,h3])), \
"expected max_scaling should be the max of 1/width_scale and "+\
"1/height_scale assoicated with failed plot(s) (v2.2 - 2 plot failed)"
# global savings and showing and creating ------
def _layouts_and_patches_patch_plus_layout(idx):
# creation of some some ggplot objects
g0 = p9.ggplot(p9_data.mpg) +\
p9.geom_bar(p9.aes(x="hwy")) +\
p9.labs(title = 'Plot 0')
g1 = p9.ggplot(p9_data.mpg) +\
p9.geom_point(p9.aes(x="hwy", y = "displ")) +\
p9.labs(title = 'Plot 1')
g2 = p9.ggplot(p9_data.mpg) +\
p9.geom_point(p9.aes(x="hwy", y = "displ", color="class")) +\
p9.labs(title = 'Plot 2')
g3 = p9.ggplot(p9_data.mpg[p9_data.mpg["class"].isin(["compact",
"suv",
"pickup"])]) +\
p9.geom_histogram(p9.aes(x="hwy"), bins=10) +\
p9.facet_wrap("class")
if idx == 0:
patch_obj = cow.patch(g0,g1,g2)
layout_obj = cow.layout(design = np.array([[0,0,0,1,1,1],
[0,0,0,2,2,2],
[0,0,0,2,2,2]]))
elif idx == 1:
patch_obj = cow.patch(g0,g1,g2)
layout_obj = cow.layout(design = """
AB
AC
AC
""")
elif idx == 2:
patch_obj = cow.patch(g0,g1,g2,g3)
layout_obj = cow.layout(ncol=3)
elif idx == 3:
patch_obj = cow.patch(g0,g1,g2,g3)
layout_obj = cow.layout(nrow=2)
elif idx == 4:
patch_obj = cow.patch(g0,g1,g2,g3)
layout_obj = cow.layout(nrow=2,ncol=3)
elif idx == 5:
patch_obj = cow.patch(g0,g1,g2)
layout_obj = cow.layout(nrow=1, rel_widths = [1,1,2])
elif idx == 6:
patch_obj = cow.patch(g0,g1,g2)
layout_obj = cow.layout(nrow=2, rel_widths = [1,2],
rel_heights = [1,2])
return patch_obj, layout_obj
@pytest.mark.parametrize("idx", np.arange(7,dtype=int))
def test_patch_plus_layout_second(image_regression, idx):
"""
test patch + layout (varying)
"""
patch_obj, layout_obj = _layouts_and_patches_patch_plus_layout(idx)
vis_patch = patch_obj + layout_obj
with io.BytesIO() as fid2:
vis_patch.save(filename=fid2, width=12, | |
<gh_stars>0
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ascendex(Exchange):
def describe(self):
return self.deep_extend(super(ascendex, self).describe(), {
'id': 'ascendex',
'name': 'AscendEX',
'countries': ['SG'], # Singapore
# 8 requests per minute = 0.13333 per second => rateLimit = 750
# testing 400 works
'rateLimit': 400,
'certified': True,
'pro': True,
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddresses': False,
'fetchDepositAddressesByNetwork': False,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': True,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchPosition': False,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactionFee': False,
'fetchTransactionFees': False,
'fetchTransactions': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': False,
'fetchWithdrawals': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': True,
'setPositionMode': False,
'transfer': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1d',
'1w': '1w',
'1M': '1m',
},
'version': 'v2',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/112027508-47984600-8b48-11eb-9e17-d26459cc36c6.jpg',
'api': {
'rest': 'https://ascendex.com',
},
'test': {
'rest': 'https://api-test.ascendex-sandbox.com',
},
'www': 'https://ascendex.com',
'doc': [
'https://ascendex.github.io/ascendex-pro-api/#ascendex-pro-api-documentation',
],
'fees': 'https://ascendex.com/en/feerate/transactionfee-traderate',
'referral': {
'url': 'https://ascendex.com/en-us/register?inviteCode=EL6BXBQM',
'discount': 0.25,
},
},
'api': {
'v1': {
'public': {
'get': {
'assets': 1,
'products': 1,
'ticker': 1,
'barhist/info': 1,
'barhist': 1,
'depth': 1,
'trades': 1,
'cash/assets': 1, # not documented
'cash/products': 1, # not documented
'margin/assets': 1, # not documented
'margin/products': 1, # not documented
'futures/collateral': 1,
'futures/contracts': 1,
'futures/ref-px': 1,
'futures/market-data': 1,
'futures/funding-rates': 1,
'risk-limit-info': 1,
},
},
'private': {
'get': {
'info': 1,
'wallet/transactions': 1,
'wallet/deposit/address': 1, # not documented
'data/balance/snapshot': 1,
'data/balance/history': 1,
},
'accountCategory': {
'get': {
'balance': 1,
'order/open': 1,
'order/status': 1,
'order/hist/current': 1,
'risk': 1,
},
'post': {
'order': 1,
'order/batch': 1,
},
'delete': {
'order': 1,
'order/all': 1,
'order/batch': 1,
},
},
'accountGroup': {
'get': {
'cash/balance': 1,
'margin/balance': 1,
'margin/risk': 1,
'futures/collateral-balance': 1,
'futures/position': 1,
'futures/risk': 1,
'futures/funding-payments': 1,
'order/hist': 1,
'spot/fee': 1,
},
'post': {
'transfer': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
},
},
},
},
'v2': {
'public': {
'get': {
'assets': 1,
'futures/contract': 1,
'futures/collateral': 1,
'futures/pricing-data': 1,
},
},
'private': {
'get': {
'account/info': 1,
},
'accountGroup': {
'get': {
'order/hist': 1,
'futures/position': 1,
'futures/free-margin': 1,
'futures/order/hist/current': 1,
'futures/order/open': 1,
'futures/order/status': 1,
},
'post': {
'futures/isolated-position-margin': 1,
'futures/margin-type': 1,
'futures/leverage': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/open': 1,
'subuser/subuser-transfer': 1,
'subuser/subuser-transfer-hist': 1,
},
'delete': {
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/all': 1,
},
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
},
'precisionMode': TICK_SIZE,
'options': {
'account-category': 'cash', # 'cash', 'margin', 'futures' # obsolete
'account-group': None,
'fetchClosedOrders': {
'method': 'v1PrivateAccountGroupGetOrderHist', # 'v1PrivateAccountGroupGetAccountCategoryOrderHistCurrent'
},
'defaultType': 'spot', # 'spot', 'margin', 'swap'
'accountsByType': {
'spot': 'cash',
'future': 'futures',
'margin': 'margin',
},
'transfer': {
'fillResponseFromRequest': True,
},
},
'exceptions': {
'exact': {
# not documented
'1900': BadRequest, # {"code":1900,"message":"Invalid Http Request Input"}
'2100': AuthenticationError, # {"code":2100,"message":"ApiKeyFailure"}
'5002': BadSymbol, # {"code":5002,"message":"Invalid Symbol"}
'6001': BadSymbol, # {"code":6001,"message":"Trading is disabled on symbol."}
'6010': InsufficientFunds, # {'code': 6010, 'message': 'Not enough balance.'}
'60060': InvalidOrder, # {'code': 60060, 'message': 'The order is already filled or canceled.'}
'600503': InvalidOrder, # {"code":600503,"message":"Notional is too small."}
# documented
'100001': BadRequest, # INVALID_HTTP_INPUT Http request is invalid
'100002': BadRequest, # DATA_NOT_AVAILABLE Some required data is missing
'100003': BadRequest, # KEY_CONFLICT The same key exists already
'100004': BadRequest, # INVALID_REQUEST_DATA The HTTP request contains invalid field or argument
'100005': BadRequest, # INVALID_WS_REQUEST_DATA Websocket request contains invalid field or argument
'100006': BadRequest, # INVALID_ARGUMENT The arugment is invalid
'100007': BadRequest, # ENCRYPTION_ERROR Something wrong with data encryption
'100008': BadSymbol, # SYMBOL_ERROR Symbol does not exist or not valid for the request
'100009': AuthenticationError, # AUTHORIZATION_NEEDED Authorization is require for the API access or request
'100010': BadRequest, # INVALID_OPERATION The action is invalid or not allowed for the account
'100011': BadRequest, # INVALID_TIMESTAMP Not a valid timestamp
'100012': BadRequest, # INVALID_STR_FORMAT String format does not
'100013': BadRequest, # INVALID_NUM_FORMAT Invalid number input
'100101': ExchangeError, # UNKNOWN_ERROR Some unknown error
'150001': BadRequest, # INVALID_JSON_FORMAT Require a valid json object
'200001': AuthenticationError, # AUTHENTICATION_FAILED Authorization failed
'200002': ExchangeError, # TOO_MANY_ATTEMPTS Tried and failed too many times
'200003': ExchangeError, # ACCOUNT_NOT_FOUND Account not exist
'200004': ExchangeError, # ACCOUNT_NOT_SETUP Account not setup properly
'200005': ExchangeError, # ACCOUNT_ALREADY_EXIST Account already exist
'200006': ExchangeError, # ACCOUNT_ERROR Some error related with error
'200007': ExchangeError, # CODE_NOT_FOUND
'200008': ExchangeError, # CODE_EXPIRED Code expired
'200009': ExchangeError, # CODE_MISMATCH Code does not match
'200010': AuthenticationError, # PASSWORD_ERROR Wrong assword
'200011': ExchangeError, # CODE_GEN_FAILED Do not generate required code promptly
'200012': ExchangeError, # FAKE_COKE_VERIFY
'200013': ExchangeError, # SECURITY_ALERT Provide security alert message
'200014': PermissionDenied, # RESTRICTED_ACCOUNT Account is restricted for certain activity, such as trading, or withdraw.
'200015': PermissionDenied, # PERMISSION_DENIED No enough permission for the operation
'300001': InvalidOrder, # INVALID_PRICE Order price is invalid
'300002': InvalidOrder, # INVALID_QTY Order size is invalid
'300003': InvalidOrder, # INVALID_SIDE Order side is invalid
'300004': InvalidOrder, # INVALID_NOTIONAL Notional is too small or too large
'300005': InvalidOrder, # INVALID_TYPE Order typs is invalid
'300006': InvalidOrder, # INVALID_ORDER_ID Order id is invalid
'300007': InvalidOrder, # INVALID_TIME_IN_FORCE Time In Force in order request is invalid
'300008': InvalidOrder, # INVALID_ORDER_PARAMETER Some order parameter is invalid
'300009': InvalidOrder, # TRADING_VIOLATION Trading violation on account or asset
'300011': InsufficientFunds, # INVALID_BALANCE No enough account or asset balance for the trading
'300012': BadSymbol, # INVALID_PRODUCT Not a valid product supported by exchange
'300013': InvalidOrder, # INVALID_BATCH_ORDER Some or all orders are invalid in batch order request
'300014': InvalidOrder, # {"code":300014,"message":"Order price doesn't conform to the required tick size: 0.1","reason":"TICK_SIZE_VIOLATION"}
'300020': InvalidOrder, # TRADING_RESTRICTED There is some trading restriction on account or asset
'300021': InvalidOrder, # TRADING_DISABLED Trading is disabled on account or asset
'300031': InvalidOrder, # NO_MARKET_PRICE No market price for market type order trading
'310001': InsufficientFunds, # INVALID_MARGIN_BALANCE No enough margin balance
'310002': InvalidOrder, # INVALID_MARGIN_ACCOUNT Not a valid account for margin trading
'310003': InvalidOrder, # MARGIN_TOO_RISKY Leverage is too high
'310004': BadSymbol, # INVALID_MARGIN_ASSET This asset does not support margin trading
'310005': InvalidOrder, # INVALID_REFERENCE_PRICE There is no valid reference price
'510001': ExchangeError, # SERVER_ERROR Something wrong with server.
'900001': ExchangeError, # HUMAN_CHALLENGE Human change do not pass
},
'broad': {},
},
'commonCurrencies': {
'BOND': 'BONDED',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'PLN': 'Pollen',
},
})
def get_account(self, params={}):
# get current or provided bitmax sub-account
account = self.safe_value(params, 'account', self.options['account'])
return account.lower().capitalize()
def fetch_currencies(self, params={}):
assets = self.v1PublicGetAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode" : "LTCBULL",
# "assetName" : "3X Long LTC Token",
# "precisionScale" : 9,
# "nativeScale" : 4,
# "withdrawalFee" : "0.2",
# "minWithdrawalAmt" : "1.0",
# "status" : "Normal"
# },
# ]
# }
#
margin = self.v1PublicGetMarginAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"BTT",
# "borrowAssetCode":"BTT-B",
# "interestAssetCode":"BTT-I",
# "nativeScale":0,
# "numConfirmations":1,
# "withdrawFee":"100.0",
# "minWithdrawalAmt":"1000.0",
# "statusCode":"Normal",
# "statusMessage":"",
# "interestRate":"0.001"
# }
# ]
# }
#
cash = self.v1PublicGetCashAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"LTCBULL",
# "nativeScale":4,
# "numConfirmations":20,
# "withdrawFee":"0.2",
# "minWithdrawalAmt":"1.0",
# "statusCode":"Normal",
# "statusMessage":""
# }
# ]
# }
#
assetsData = self.safe_value(assets, 'data', [])
marginData = self.safe_value(margin, 'data', [])
cashData = | |
<filename>utils/augmentation.py
import random
import numbers
import math
import collections
import torchvision
from torchvision import transforms
import torchvision.transforms.functional as F
from PIL import ImageOps, Image, ImageFilter
import numpy as np
from joblib import Parallel, delayed
class Padding:
def __init__(self, pad):
self.pad = pad
def __call__(self, img):
return ImageOps.expand(img, border=self.pad, fill=0)
class Scale:
def __init__(self, size, interpolation=Image.BICUBIC):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, imgmap):
img1 = imgmap[0]
if isinstance(self.size, int):
w, h = img1.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return imgmap
if w < h:
ow = self.size
oh = int(self.size * h / w)
return [i.resize((ow, oh), self.interpolation) for i in imgmap]
else:
oh = self.size
ow = int(self.size * w / h)
return [i.resize((ow, oh), self.interpolation) for i in imgmap]
else:
return [i.resize(self.size, self.interpolation) for i in imgmap]
class CenterCrop:
def __init__(self, size, consistent=True):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, imgmap):
img1 = imgmap[0]
w, h = img1.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return [i.crop((x1, y1, x1 + tw, y1 + th)) for i in imgmap]
class FiveCrop:
def __init__(self, size, where=1):
# 1=topleft, 2=topright, 3=botleft, 4=botright, 5=center
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.where = where
def __call__(self, imgmap):
img1 = imgmap[0]
w, h = img1.size
th, tw = self.size
if (th > h) or (tw > w):
raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size, (h,w)))
if self.where == 1:
return [i.crop((0, 0, tw, th)) for i in imgmap]
elif self.where == 2:
return [i.crop((w-tw, 0, w, th)) for i in imgmap]
elif self.where == 3:
return [i.crop((0, h-th, tw, h)) for i in imgmap]
elif self.where == 4:
return [i.crop((w-tw, h-tw, w, h)) for i in imgmap]
elif self.where == 5:
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return [i.crop((x1, y1, x1 + tw, y1 + th)) for i in imgmap]
class RandomSizedCrop:
def __init__(self, size, interpolation=Image.BICUBIC, consistent=True, p=1.0, seq_len=0, bottom_area=0.2):
self.size = size
self.interpolation = interpolation
self.consistent = consistent
self.threshold = p
self.seq_len = seq_len
self.bottom_area = bottom_area
def __call__(self, imgmap):
img1 = imgmap[0]
if random.random() < self.threshold: # do RandomSizedCrop
for attempt in range(10):
area = img1.size[0] * img1.size[1]
target_area = random.uniform(self.bottom_area, 1) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if self.consistent:
if random.random() < 0.5:
w, h = h, w
if w <= img1.size[0] and h <= img1.size[1]:
x1 = random.randint(0, img1.size[0] - w)
y1 = random.randint(0, img1.size[1] - h)
imgmap = [i.crop((x1, y1, x1 + w, y1 + h)) for i in imgmap]
for i in imgmap: assert(i.size == (w, h))
return [i.resize((self.size, self.size), self.interpolation) for i in imgmap]
else:
result = []
if random.random() < 0.5:
w, h = h, w
for idx, i in enumerate(imgmap):
if w <= img1.size[0] and h <= img1.size[1]:
if idx % self.seq_len == 0:
x1 = random.randint(0, img1.size[0] - w)
y1 = random.randint(0, img1.size[1] - h)
result.append(i.crop((x1, y1, x1 + w, y1 + h)))
assert(result[-1].size == (w, h))
else:
result.append(i)
assert len(result) == len(imgmap)
return [i.resize((self.size, self.size), self.interpolation) for i in result]
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(imgmap))
else: #don't do RandomSizedCrop, do CenterCrop
crop = CenterCrop(self.size)
return crop(imgmap)
class RandomHorizontalFlip:
def __init__(self, consistent=True, command=None, seq_len=0):
self.consistent = consistent
if seq_len != 0:
self.consistent = False
if command == 'left':
self.threshold = 0
elif command == 'right':
self.threshold = 1
else:
self.threshold = 0.5
self.seq_len = seq_len
def __call__(self, imgmap):
if self.consistent:
if random.random() < self.threshold:
return [i.transpose(Image.FLIP_LEFT_RIGHT) for i in imgmap]
else:
return imgmap
else:
result = []
for idx, i in enumerate(imgmap):
if idx % self.seq_len == 0: th = random.random()
if th < self.threshold:
result.append(i.transpose(Image.FLIP_LEFT_RIGHT))
else:
result.append(i)
assert len(result) == len(imgmap)
return result
class RandomGray:
'''Actually it is a channel splitting, not strictly grayscale images'''
def __init__(self, consistent=True, p=0.5, dynamic=False, seq_len=0):
self.consistent = consistent
if seq_len != 0:
self.consistent = False
self.p = p # prob to grayscale
self.seq_len = seq_len
def __call__(self, imgmap):
tmp_p = self.p
if self.consistent:
if random.random() < tmp_p:
return [self.grayscale(i) for i in imgmap]
else:
return imgmap
else:
result = []
if self.seq_len == 0:
for i in imgmap:
if random.random() < tmp_p:
result.append(self.grayscale(i))
else:
result.append(i)
else:
for idx, i in enumerate(imgmap):
if idx % self.seq_len == 0:
do_gray = random.random() < tmp_p
if do_gray: result.append(self.grayscale(i))
else: result.append(i)
assert len(result) == len(imgmap)
return result
def grayscale(self, img):
channel = np.random.choice(3)
np_img = np.array(img)[:,:,channel]
np_img = np.dstack([np_img, np_img, np_img])
img = Image.fromarray(np_img, 'RGB')
return img
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, consistent=False, p=1.0, seq_len=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
self.consistent = consistent
self.threshold = p
self.seq_len = seq_len
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = torchvision.transforms.Compose(transforms)
return transform
def __call__(self, imgmap):
if random.random() < self.threshold: # do ColorJitter
if self.consistent:
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return [transform(i) for i in imgmap]
else:
if self.seq_len == 0:
return [self.get_params(self.brightness, self.contrast, self.saturation, self.hue)(img) for img in imgmap]
else:
result = []
for idx, img in enumerate(imgmap):
if idx % self.seq_len == 0:
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
result.append(transform(img))
return result
# result = []
# for img in imgmap:
# transform = self.get_params(self.brightness, self.contrast,
# self.saturation, self.hue)
# result.append(transform(img))
# return result
else: # don't do ColorJitter, do nothing
return imgmap
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string | |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import ast
import re
import textwrap
from .scanner import TextPos
from .ast import *
from .util import get_indentation, is_blank, starts_with_newline, ends_with_newline, escape
from sweetener.node import preorder, set_parent_nodes
def indent(text, indentation, until_prev_line_blank=True, start=0):
out = ''
for i in range(start, len(text)):
ch = text[i]
if ch == '\n':
until_prev_line_blank = True
elif until_prev_line_blank and not is_blank(ch):
out += indentation
until_prev_line_blank = False
out += ch
return out
def get_indentation(text, until_prev_line_blank=True, start=0):
min_indent = None
curr_indent = 0
for i in range(start, len(text)):
ch = text[i]
if ch == '\n':
until_prev_line_blank = True
curr_indent = 0
elif is_blank(ch):
if until_prev_line_blank:
curr_indent += 1
else:
if until_prev_line_blank:
if min_indent is None or curr_indent < min_indent:
min_indent = curr_indent
until_prev_line_blank = False
if min_indent is None:
min_indent = 0
return min_indent
def dedent(text, until_prev_line_blank=True, indentation=None, start=0):
out = ''
if indentation is None:
indentation = get_indentation(text, until_prev_line_blank=until_prev_line_blank, start=start)
curr_indent = 0
i = start
while i < len(text):
ch = text[i]
if ch == '\n':
until_prev_line_blank = True
curr_indent = 0
out += ch
elif is_blank(ch):
if until_prev_line_blank:
curr_indent += 1
if curr_indent > indentation:
out += ch
else:
out += ch
else:
if until_prev_line_blank:
until_prev_line_blank = False
out += ch
i += 1
return out
# def redent(text, indentation, until_prev_line_blank=True):
# return indent(dedent(text, until_prev_line_blank), indentation, until_prev_line_blank)
def skip(iterator, count):
for _i in range(0, count):
next(iterator)
return iterator
class Env:
def __init__(self, parent=None):
self.parent = parent
self._variables = {}
def lookup(self, name):
if name in self._variables:
return self._variables[name]
if self.parent is not None:
return self.parent.lookup(name)
return None
def __contains__(self, item):
return self.lookup(item) is not None
def set(self, name, value):
self._variables[name] = value
def fork(self):
return Env(self)
def to_snake_case(name):
if '-' in name:
return name.replace('-', '_')
else:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def to_camel_case(name, first_char_lowercase=False):
result = re.sub(r'[-_]', '', name.title())
return result[0].lower() + result[1:] if first_char_lowercase else result
DEFAULT_BUILTINS = {
'repr': repr,
'zip': zip,
'enumerate': enumerate,
'isinstance': isinstance,
'range': range,
'reversed': reversed,
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b,
'/': lambda a, b: a / b,
'%': lambda a, b: a % b,
'==': lambda a, b: a == b,
'!=': lambda a, b: a != b,
'camel': to_camel_case,
'snake': to_snake_case,
'upper': lambda s: s.upper(),
'lower': lambda s: s.lower(),
'|>': lambda val, f: f(val),
'in': lambda key, val: key in val
}
def is_inner_wrapped(node):
if isinstance(node, IfStatement):
return all(is_inner_wrapped(case) for case in node.cases)
elif isinstance(node, CodeBlock) \
or isinstance(node, TextStatement) \
or isinstance(node, ExpressionStatement):
return False
elif isinstance(node, ForInStatement) \
or isinstance(node, JoinStatement) \
or isinstance(node, SetIndentStatement) \
or isinstance(node, IfStatementCase) \
or isinstance(node, Template):
body = node.body
else:
raise RuntimeError(f'unable to deduce whether {node} is wrapped or not')
return len(body) > 0 \
and isinstance(body[0], TextStatement) \
and starts_with_newline(body[0].text) \
and isinstance(body[-1], TextStatement) \
and ends_with_newline(body[-1].text)
def is_outer_wrapped(node):
if isinstance(node, IfStatementCase):
node = node.parent
if node.prev_child is not None:
if not isinstance(node.prev_child, TextStatement):
return False
if not (ends_with_newline(node.prev_child.text) \
or (isinstance(node.parent, Template) and is_blank(node.prev_child.text))):
return False
if node.next_child is not None:
if not isinstance(node.next_child, TextStatement):
return False
if not (starts_with_newline(node.next_child.text) \
or (isinstance(node.parent, Template) and is_blank(node.next_child.text))):
return False
return True
def is_wrapped(node):
if isinstance(node, Template):
return False
if isinstance(node, ExpressionStatement):
return False
return is_inner_wrapped(node) and is_outer_wrapped(node)
def expand_body(node):
if isinstance(node, ForInStatement) \
or isinstance(node, JoinStatement) \
or isinstance(node, SetIndentStatement) \
or isinstance(node, IfStatementCase):
for i in range(0, len(node.body)):
yield ['body', i], node.body[i]
def get_inner_indentation(node, after_blank_line=True):
curr_indent = 0
min_indent = None
for child in skip(preorder(node, expand=expand_body), 1):
if isinstance(child, TextStatement):
for ch in child.text:
if ch == '\n':
after_blank_line = True
curr_indent = 0
elif is_blank(ch):
if after_blank_line:
curr_indent += 1
else:
if after_blank_line:
after_blank_line = False
if min_indent is None or curr_indent < min_indent:
min_indent = curr_indent
else:
if after_blank_line:
after_blank_line = False
if min_indent is None or curr_indent < min_indent:
min_indent = curr_indent
return curr_indent if min_indent is None else min_indent
def remove_last_newlines(result, count):
offset = len(result)-1
while count > 0 and offset >= 0:
ch = result[offset]
if ch == '\n':
count -= 1
elif not is_blank(ch):
break
offset -= 1
del result[offset+1:]
def find_first_line(text):
for i in range(0, len(text)):
if text[i] == '\n':
return i
return 0
def find_trailing_newline(text, start=None):
if start is None:
start = len(text)-1
offset = start
for ch in reversed(text[:start+1]):
if ch == '\n':
return offset
if not is_blank(ch):
break
offset -= 1
return len(text)
class Core(Record):
pass
class Join(Core):
separator: Optional[str]
elements: List[Core]
class Text(Core):
value: str
class SetIndent(Core):
indentation: str
expression: Core
def evaluate(ast, ctx={}, indentation=' ', filename="#<anonymous>"):
if isinstance(ast, str):
from .scanner import Scanner
from .parser import Parser
sc = Scanner(filename, ast)
p = Parser(sc)
ast = p.parse_all()
set_parent_nodes(ast)
out = ''
indentation = 0
indent_override = None
curr_indent = ''
after_blank_line = True
skip_next_chars = 0
written_curr_indent = 0
written_after_blank_line = True
def write_or_skip(text):
nonlocal out, skip_next_chars
if skip_next_chars < len(text):
out += text[skip_next_chars:]
skip_next_chars = 0
else:
skip_next_chars -= len(text)
def write(text, undent):
nonlocal out, written_after_blank_line, written_curr_indent
for ch in text:
if ch == '\n':
written_after_blank_line = True
write_or_skip(ch)
written_curr_indent = 0
elif is_blank(ch):
if written_after_blank_line:
written_curr_indent += 1
else:
write_or_skip(ch)
else:
if written_after_blank_line:
write_or_skip(' ' * (indent_override if indent_override is not None else max(0, written_curr_indent - undent)))
written_after_blank_line = False
write_or_skip(ch)
def eval_code_expr(e, env):
if isinstance(e, ConstExpression):
return e.value
elif isinstance(e, IndexExpression):
val = eval_code_expr(e.expression, env)
index = eval_code_expr(e.index, env)
return val[index]
elif isinstance(e, SliceExpression):
val = eval_code_expr(e.expression, env)
low = eval_code_expr(e.min, env)
high = eval_code_expr(e.max, env)
return val[low:high]
elif isinstance(e, MemberExpression):
out = eval_code_expr(e.expression, env)
for name in e.members:
out = getattr(out, name)
return out
elif isinstance(e, VarRefExpression):
value = env.lookup(e.name)
if value is not None:
return value
if e.name == 'globals':
return lambda: global_env
elif e.name == 'locals':
return lambda: env
else:
raise RuntimeError(f"variable '{e.name}' is not defined")
elif isinstance(e, AppExpression):
op = eval_code_expr(e.operator, env)
args = list(eval_code_expr(arg, env) for arg in e.operands)
if not callable(op):
raise RuntimeError("Could not evaluate Templately expression: result is not applicable.".format(op))
return op(*args)
else:
raise RuntimeError("Could not evaluate Templately expression: unknown expression {}.".format(e))
def eval_statement_list(stmts, env, parent, indent_delta):
nonlocal skip_next_chars, out
parent_wrapped = is_wrapped(parent)
outer_indent = parent.span.start_pos.column-1 - indent_delta
inner_indent = (get_inner_indentation(parent, after_blank_line) - (parent.span.start_pos.column-1)) if parent_wrapped else 0
all_empty = True
if parent_wrapped:
skip_next_chars += 1
for i, stmt in enumerate(stmts):
if i > 0 and is_wrapped(stmts[i-1]):
skip_next_chars += 1
is_empty = eval_statement(stmt, env, indent_delta + inner_indent)
if is_empty:
all_empty = False
# if all_empty and is_outer_wrapped(parent):
# skip_next_chars += 1
return all_empty
def update_locals(text):
nonlocal after_blank_line, curr_indent
last_indent = ''
has_newline = False
for ch in reversed(text):
if ch == ' ':
last_indent += ' '
elif ch == '\n':
has_newline = True
after_blank_line = True
break
else:
last_indent = ''
after_blank_line = False
if not has_newline:
if after_blank_line:
curr_indent = curr_indent + last_indent
else:
curr_indent = last_indent
def assign_patterns(env, pattern, value):
if isinstance(pattern, VarPattern):
env.set(pattern.name, value)
elif isinstance(pattern, TuplePattern):
for patt_2, val_2 in zip(pattern.elements, value):
assign_patterns(env, patt_2, val_2)
else:
raise RuntimeError(f'could not evaluate pattern {pattern}')
def eval_repeat(stmt, sep, env, indent_delta):
nonlocal out
all_empty = False
prev_empty = True
written_separator = False
# the actual Python value that is going to be iterated over
iterable = eval_code_expr(stmt.expression, env)
# we 'fork' the env so that variables defined inside it do not leak to
# the parent env
# this is slightly different than the way Python works, but helps in
# avoiding unexpected | |
= random.choice(icy_images)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
# Applying the sprites spawning on platform if wing pow is not initiated
if not self.game.player.has_wings:
if random.randrange(100) < POW_SPAWN_RATIO and not game.player.has_bubble and not game.player.has_jetpack \
and len(self.game.powerups) == 0 and self.game.score != 0 and not self.on_move_y:
Powerup(self.game, self)
self.has_pow = True
if random.randrange(100) < COIN_SPAWN_RATIO:
Coin(self.game, self)
self.has_coin = True
# There shouldn't be 2 much mobs
if len(self.game.mobs) < 3:
if random.randrange(100) < SPIKEY_SPAWN_RATIO and self.image == normal_images[0] and not self.on_move \
and not self.has_mob and PLAT_SNOW_START > self.game.score > SPIKEY_SPAWN_SCORE:
Spikey(self.game, self)
self.has_spikey = True
self.has_mob = True
if random.randrange(100) < CLOUD_SPAWN_RATIO and not self.on_move and not self.has_mob and \
self.game.score > PLAT_STONE_START:
Cloud(self.game, self)
self.has_cloud = True
self.has_mob = True
if random.randrange(100) < WM_SPAWN_RATIO and (self.image == pink_images[0] or self.image == snowy_images[0]) and not self.on_move \
and not self.has_mob and self.game.score > PLAT_PINK_START:
Wingman(self.game, self)
self.has_wingman = True
self.has_mob = True
def update(self, *args):
# Moving left/right
if self.on_move_x:
self.rect.x += self.vel_x
if self.rect.right > WIDTH - 15:
self.vel_x = -1
if self.rect.left < 15:
self.vel_x = 1
# Moving up/down
if self.on_move_y:
self.rect.y += self.vel_y
self.count_vel_y += self.vel_y
if self.count_vel_y > 130:
self.vel_y = -1
if self.count_vel_y < 0:
self.vel_y = 1
class Powerup(pygame.sprite.Sprite):
def __init__(self, game, plat):
self._layer = POW_LAYER
self.groups = game.all_sprites, game.powerups
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.plat = plat
# We define the type as boost and then we change it if needed
self.type = 'boost'
self.spawn_score = 0
self.spawn_ratio = random.randrange(100)
if 20 < self.spawn_ratio < 50:
self.type = 'bubble'
elif 7 < self.spawn_ratio < 20:
self.type = 'wings'
elif 0 < self.spawn_ratio < 7:
self.type = 'jetpack'
if self.type == 'boost':
self.image = self.game.spritesheet1.get_image(820, 1805, 71, 70)
elif self.type == 'bubble':
self.image = self.game.spritesheet1.get_image(826, 134, 71, 70)
elif self.type == 'jetpack':
self.image = self.game.spritesheet1.get_image(852, 1089, 65, 77)
elif self.type == 'wings':
self.image = self.game.spritesheet1.get_image(826, 1292, 71, 70)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
# Position of the pow
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 2
# Jumping var
self.jumpCount = 1.2
def update(self):
self.rect.centerx = self.plat.rect.centerx
# Jetpack does not jump
if self.type != 'jetpack':
# Checking if the powerup is out of the screen or on it
if self.rect.y >= 0:
if self.jumpCount >= -2:
self.jumpCount -= 0.1
self.rect.y -= (self.jumpCount * abs(self.jumpCount)) * 0.5
else:
self.jumpCount = 1.2
self.rect.bottom = self.plat.rect.top - 2
# Else if the powerup is above the screen we change the signs
else:
if self.jumpCount >= 2:
self.jumpCount -= 0.1
self.rect.y -= (self.jumpCount * abs(self.jumpCount)) * 0.5
else:
self.jumpCount = 1.2
self.rect.bottom = self.plat.rect.top - 2
# Jetpack always is still
else:
self.rect.bottom = self.plat.rect.top
# Killing the sprite
if not self.game.platforms.has(self.plat):
self.kill()
self.plat.has_pow = False
class Coin(pygame.sprite.Sprite):
def __init__(self, game, plat):
self._layer = POW_LAYER
self.groups = game.all_sprites, game.coins
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.plat = plat
# Animation properties
self.last_update = 0
self.current_frame = 0
self.load_images()
self.image = self.gold_images[0]
self.rect = self.image.get_rect()
# Position
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 5
# Images depending on the score
if PLAT_STONE_START > self.game.score >= 0:
self.type = 'bronze'
elif PLAT_PINK_START > self.game.score > PLAT_STONE_START:
self.type = 'silver'
else:
self.type = 'gold'
def load_images(self):
self.gold_images = (self.game.spritesheet1.get_image(698, 1931, 84, 84),
self.game.spritesheet1.get_image(829, 0, 66, 84),
self.game.spritesheet1.get_image(897, 1574, 50, 84),
self.game.spritesheet1.get_image(645, 651, 15, 84),
pygame.transform.flip(self.game.spritesheet1.get_image(897, 1574, 50, 84), True, False),
pygame.transform.flip(self.game.spritesheet1.get_image(829, 0, 66, 84), True, False))
for image in self.gold_images:
image.set_colorkey(BLACK)
self.silver_images = (self.game.spritesheet1.get_image(584, 406, 84, 84),
self.game.spritesheet1.get_image(852, 1003, 66, 84),
self.game.spritesheet1.get_image(899, 1219, 50, 84),
self.game.spritesheet1.get_image(662, 651, 14, 84),
pygame.transform.flip(self.game.spritesheet1.get_image(899, 1219, 50, 84), True, False),
pygame.transform.flip(self.game.spritesheet1.get_image(852, 1003, 66, 84), True, False))
for image in self.silver_images:
image.set_colorkey(BLACK)
self.bronze_images = (self.game.spritesheet1.get_image(707, 296, 84, 84),
self.game.spritesheet1.get_image(826, 206, 66, 84),
self.game.spritesheet1.get_image(899, 116, 50, 84),
self.game.spritesheet1.get_image(670, 406, 14, 84),
pygame.transform.flip(self.game.spritesheet1.get_image(899, 116, 50, 84), True, False),
pygame.transform.flip(self.game.spritesheet1.get_image(826, 206, 66, 84), True, False))
for image in self.bronze_images:
image.set_colorkey(BLACK)
def update(self):
time_passed = pygame.time.get_ticks()
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 5
if time_passed - self.last_update > 100:
self.last_update = time_passed
self.current_frame = (self.current_frame + 1) % len(self.gold_images)
if self.type == 'bronze':
self.image = self.bronze_images[self.current_frame]
elif self.type == 'silver':
self.image = self.silver_images[self.current_frame]
else:
self.image = self.gold_images[self.current_frame]
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 5
# We kill the sprite when the plat is killed
if not self.game.platforms.has(self.plat):
self.kill()
self.plat.has_coin = False
class Flyman(pygame.sprite.Sprite):
def __init__(self, game):
self._layer = MOB_LAYER
self.groups = game.all_sprites, game.mobs
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
# Images and animation
self.image_up = self.game.spritesheet1.get_image(566, 510, 122, 139)
self.image_up.set_colorkey(BLACK)
self.image_down = self.game.spritesheet1.get_image(568, 1534, 122, 135)
self.image_down.set_colorkey(BLACK)
self.image = self.image_up
self.rect = self.image.get_rect()
# Position
self.rect.centerx = random.choice([-100, WIDTH + 100])
self.rect.y = HEIGHT / 3
# Move properties
self.velx = random.randrange(1, 4)
self.vely = 0
self.dy = 0.5
def update(self):
# We apply movement
self.rect.x += self.velx
self.vely += self.dy
self.rect.y += self.vely
# We apply up and down movement
if self.vely > 3 or self.vely < -3:
self.dy *= -1
rect_center = self.rect.center
# We apply animation
if self.dy < 0:
self.image = self.image_up
else:
self.image = self.image_down
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.rect.center = rect_center
# The sprite moves left and right until it is off HEIGHT
if self.rect.left > WIDTH + 100 or self.rect.right < -100:
self.velx *= -1
# Killing the sprite
if self.rect.centery > HEIGHT + 100:
self.game.has_flyman = False
self.kill()
class CloudBG(pygame.sprite.Sprite):
def __init__(self, game):
self._layer = CLOUD_LAYER
self.groups = game.all_sprites, game.clouds
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = random.choice(self.game.cloud_images)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
scale = random.randrange(50, 100) / 100
self.image = pygame.transform.scale(self.image, (int(self.rect.width * scale), int(self.rect.height * scale)))
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-500, -50)
def update(self):
if self.rect.top > HEIGHT * 2:
self.kill()
class Spikey(pygame.sprite.Sprite):
def __init__(self, game, plat):
self._layer = MOB_LAYER
self.groups = game.all_sprites, game.mobs
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.plat = plat
self.load_images()
self.current_frame = 0
self.last_update = 0
self.image = self.images_R[0]
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 1
self.acc_x = SPIKEY_ACC
self.facing_left = False
self.facing_right = True
def load_images(self):
self.images_R = (self.game.spritesheet1.get_image(704, 1256, 120, 159),
self.game.spritesheet1.get_image(812, 296, 90, 155))
for image in self.images_R:
image.set_colorkey(BLACK)
self.images_L = (pygame.transform.flip(self.game.spritesheet1.get_image(704, 1256, 120, 159), True, False),
pygame.transform.flip(self.game.spritesheet1.get_image(812, 296, 90, 155), True, False))
for image in self.images_L:
image.set_colorkey(BLACK)
def update(self):
self.animation()
if self.game.platforms.has(self.plat):
self.rect.bottom = self.plat.rect.top - 1
# Applying constant movement
if self.facing_left or self.facing_right:
self.rect.x += self.acc_x
# Moving from right to left
if self.rect.right > self.plat.rect.right:
self.facing_right = False
self.facing_left = True
self.acc_x = -SPIKEY_ACC
# Moving from left to right
if self.rect.left < self.plat.rect.left:
self.facing_right = True
self.facing_left = False
self.acc_x = SPIKEY_ACC
# Killing the sprite when it disappears off the screen
if self.rect.top > HEIGHT:
self.kill()
self.plat.has_spikey = False
self.plat.has_mob = False
def animation(self):
time_passed = pygame.time.get_ticks()
if time_passed - self.last_update > SPIKEY_FRAME_TIME:
self.last_update = time_passed
self.current_frame = (self.current_frame + 1) % len(self.images_R)
rect_bottom = self.rect.bottom
centerx = self.rect.centerx
if self.facing_right:
self.image = self.images_R[self.current_frame]
self.rect.x += self.acc_x
if self.facing_left:
self.image = self.images_L[self.current_frame]
self.rect.x += self.acc_x
self.rect = self.image.get_rect()
self.rect.centerx = centerx
self.rect.bottom = rect_bottom
class Cloud(pygame.sprite.Sprite):
def __init__(self, game, plat):
self._layer = 4
self.groups = game.all_sprites, game.passive_mobs
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.plat = plat
# Defining the images
self.images = (self.game.spritesheet1.get_image(0, 1152, 260, 134),
Get_image_res(pygame.image.load('graphics/Cloud1.png'), 2),
Get_image_res(pygame.image.load('graphics/Cloud2.png'), 2),
Get_image_res(pygame.image.load('graphics/Cloud3.png'), 2),
Get_image_res(pygame.image.load('graphics/Cloud4.png'), 2))
self.image = self.images[0]
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 60
self.last_update = 0
self.last_struck = False
self.current_frame = 0
# The first image is from the spritesheet so we set the colorkey to black
if self.image == self.images[0]:
self.image.set_colorkey(BLACK)
def update(self, *args):
self.rect.centerx = self.plat.rect.centerx
if self.game.platforms.has(self.plat):
self.rect.bottom = self.plat.rect.top - 60
# Setting the animation
time_passed = pygame.time.get_ticks()
if time_passed - self.last_update > 500:
self.last_update = time_passed
self.current_frame = (self.current_frame + 1) % len(self.images)
self.image = self.images[self.current_frame]
# Spawning the lightining at the peak image
if self.image == | |
<reponame>OP2/PyOP2<filename>pyop2/types/dataset.py
import numbers
import numpy as np
from petsc4py import PETSc
from pyop2 import (
caching,
datatypes as dtypes,
exceptions as ex,
mpi,
utils
)
from pyop2.types.set import ExtrudedSet, GlobalSet, MixedSet, Set, Subset
class DataSet(caching.ObjectCached):
"""PyOP2 Data Set
Set used in the op2.Dat structures to specify the dimension of the data.
"""
@utils.validate_type(('iter_set', Set, ex.SetTypeError),
('dim', (numbers.Integral, tuple, list), ex.DimTypeError),
('name', str, ex.NameTypeError))
def __init__(self, iter_set, dim=1, name=None):
if isinstance(iter_set, ExtrudedSet):
raise NotImplementedError("Not allowed!")
if self._initialized:
return
if isinstance(iter_set, Subset):
raise NotImplementedError("Deriving a DataSet from a Subset is unsupported")
self._set = iter_set
self._dim = utils.as_tuple(dim, numbers.Integral)
self._cdim = np.prod(self._dim).item()
self._name = name or "dset_#x%x" % id(self)
self._initialized = True
@classmethod
def _process_args(cls, *args, **kwargs):
return (args[0], ) + args, kwargs
@classmethod
def _cache_key(cls, iter_set, dim=1, name=None):
return (iter_set, utils.as_tuple(dim, numbers.Integral))
@utils.cached_property
def _wrapper_cache_key_(self):
return (type(self), self.dim, self._set._wrapper_cache_key_)
def __getstate__(self):
"""Extract state to pickle."""
return self.__dict__
def __setstate__(self, d):
"""Restore from pickled state."""
self.__dict__.update(d)
# Look up any unspecified attributes on the _set.
def __getattr__(self, name):
"""Returns a Set specific attribute."""
value = getattr(self.set, name)
setattr(self, name, value)
return value
def __getitem__(self, idx):
"""Allow index to return self"""
assert idx == 0
return self
@utils.cached_property
def dim(self):
"""The shape tuple of the values for each element of the set."""
return self._dim
@utils.cached_property
def cdim(self):
"""The scalar number of values for each member of the set. This is
the product of the dim tuple."""
return self._cdim
@utils.cached_property
def name(self):
"""Returns the name of the data set."""
return self._name
@utils.cached_property
def set(self):
"""Returns the parent set of the data set."""
return self._set
def __iter__(self):
"""Yield self when iterated over."""
yield self
def __len__(self):
"""This is not a mixed type and therefore of length 1."""
return 1
def __str__(self):
return "OP2 DataSet: %s on set %s, with dim %s" % \
(self._name, self._set, self._dim)
def __repr__(self):
return "DataSet(%r, %r, %r)" % (self._set, self._dim, self._name)
def __contains__(self, dat):
"""Indicate whether a given Dat is compatible with this DataSet."""
return dat.dataset == self
@utils.cached_property
def lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet`.
"""
lgmap = PETSc.LGMap()
if self.comm.size == 1:
lgmap.create(indices=np.arange(self.size, dtype=dtypes.IntType),
bsize=self.cdim, comm=self.comm)
else:
lgmap.create(indices=self.halo.local_to_global_numbering,
bsize=self.cdim, comm=self.comm)
return lgmap
@utils.cached_property
def scalar_lgmap(self):
if self.cdim == 1:
return self.lgmap
indices = self.lgmap.block_indices
return PETSc.LGMap().create(indices=indices, bsize=1, comm=self.comm)
@utils.cached_property
def unblocked_lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet` with a block size of 1.
"""
if self.cdim == 1:
return self.lgmap
else:
indices = self.lgmap.indices
lgmap = PETSc.LGMap().create(indices=indices,
bsize=1, comm=self.lgmap.comm)
return lgmap
@utils.cached_property
def field_ises(self):
"""A list of PETSc ISes defining the global indices for each set in
the DataSet.
Used when extracting blocks from matrices for solvers."""
ises = []
nlocal_rows = 0
for dset in self:
nlocal_rows += dset.size * dset.cdim
offset = self.comm.scan(nlocal_rows)
offset -= nlocal_rows
for dset in self:
nrows = dset.size * dset.cdim
iset = PETSc.IS().createStride(nrows, first=offset, step=1,
comm=self.comm)
iset.setBlockSize(dset.cdim)
ises.append(iset)
offset += nrows
return tuple(ises)
@utils.cached_property
def local_ises(self):
"""A list of PETSc ISes defining the local indices for each set in the DataSet.
Used when extracting blocks from matrices for assembly."""
ises = []
start = 0
for dset in self:
bs = dset.cdim
n = dset.total_size*bs
iset = PETSc.IS().createStride(n, first=start, step=1,
comm=mpi.COMM_SELF)
iset.setBlockSize(bs)
start += n
ises.append(iset)
return tuple(ises)
@utils.cached_property
def layout_vec(self):
"""A PETSc Vec compatible with the dof layout of this DataSet."""
vec = PETSc.Vec().create(comm=self.comm)
size = (self.size * self.cdim, None)
vec.setSizes(size, bsize=self.cdim)
vec.setUp()
return vec
@utils.cached_property
def dm(self):
dm = PETSc.DMShell().create(comm=self.comm)
dm.setGlobalVector(self.layout_vec)
return dm
class GlobalDataSet(DataSet):
"""A proxy :class:`DataSet` for use in a :class:`Sparsity` where the
matrix has :class:`Global` rows or columns."""
def __init__(self, global_):
"""
:param global_: The :class:`Global` on which this object is based."""
self._global = global_
self._globalset = GlobalSet(comm=self.comm)
self._name = "gdset_#x%x" % id(self)
@classmethod
def _cache_key(cls, *args):
return None
@utils.cached_property
def dim(self):
"""The shape tuple of the values for each element of the set."""
return self._global._dim
@utils.cached_property
def cdim(self):
"""The scalar number of values for each member of the set. This is
the product of the dim tuple."""
return self._global._cdim
@utils.cached_property
def name(self):
"""Returns the name of the data set."""
return self._global._name
@utils.cached_property
def comm(self):
"""Return the communicator on which the set is defined."""
return self._global.comm
@utils.cached_property
def set(self):
"""Returns the parent set of the data set."""
return self._globalset
@utils.cached_property
def size(self):
"""The number of local entries in the Dataset (1 on rank 0)"""
return 1 if mpi.MPI.comm.rank == 0 else 0
def __iter__(self):
"""Yield self when iterated over."""
yield self
def __len__(self):
"""This is not a mixed type and therefore of length 1."""
return 1
def __str__(self):
return "OP2 GlobalDataSet: %s on Global %s" % \
(self._name, self._global)
def __repr__(self):
return "GlobalDataSet(%r)" % (self._global)
@utils.cached_property
def lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet`.
"""
lgmap = PETSc.LGMap()
lgmap.create(indices=np.arange(1, dtype=dtypes.IntType),
bsize=self.cdim, comm=self.comm)
return lgmap
@utils.cached_property
def unblocked_lgmap(self):
"""A PETSc LGMap mapping process-local indices to global
indices for this :class:`DataSet` with a block size of 1.
"""
if self.cdim == 1:
return self.lgmap
else:
indices = self.lgmap.indices
lgmap = PETSc.LGMap().create(indices=indices,
bsize=1, comm=self.lgmap.comm)
return lgmap
@utils.cached_property
def field_ises(self):
"""A list of PETSc ISes defining the global indices for each set in
the DataSet.
Used when extracting blocks from matrices for solvers."""
ises = []
nlocal_rows = 0
for dset in self:
nlocal_rows += dset.size * dset.cdim
offset = self.comm.scan(nlocal_rows)
offset -= nlocal_rows
for dset in self:
nrows = dset.size * dset.cdim
iset = PETSc.IS().createStride(nrows, first=offset, step=1,
comm=self.comm)
iset.setBlockSize(dset.cdim)
ises.append(iset)
offset += nrows
return tuple(ises)
@utils.cached_property
def local_ises(self):
"""A list of PETSc ISes defining the local indices for each set in the DataSet.
Used when extracting blocks from matrices for assembly."""
raise NotImplementedError
@utils.cached_property
def layout_vec(self):
"""A PETSc Vec compatible with the dof layout of this DataSet."""
vec = PETSc.Vec().create(comm=self.comm)
size = (self.size * self.cdim, None)
vec.setSizes(size, bsize=self.cdim)
vec.setUp()
return vec
@utils.cached_property
def dm(self):
dm = PETSc.DMShell().create(comm=self.comm)
dm.setGlobalVector(self.layout_vec)
return dm
class MixedDataSet(DataSet):
r"""A container for a bag of :class:`DataSet`\s.
Initialized either from a :class:`MixedSet` and an iterable or iterator of
``dims`` of corresponding length ::
mdset = op2.MixedDataSet(mset, [dim1, ..., dimN])
or from a tuple of :class:`Set`\s and an iterable of ``dims`` of
corresponding length ::
mdset = op2.MixedDataSet([set1, ..., setN], [dim1, ..., dimN])
If all ``dims`` are to be the same, they can also be given as an
:class:`int` for either of above invocations ::
mdset = op2.MixedDataSet(mset, dim)
mdset = op2.MixedDataSet([set1, ..., setN], dim)
Initialized from a :class:`MixedSet` without explicitly specifying ``dims``
they default to 1 ::
mdset = op2.MixedDataSet(mset)
Initialized from an iterable or iterator of :class:`DataSet`\s and/or
:class:`Set`\s, where :class:`Set`\s are implicitly upcast to
:class:`DataSet`\s of dim 1 ::
mdset = op2.MixedDataSet([dset1, ..., dsetN])
"""
def __init__(self, arg, dims=None):
r"""
:param arg: a :class:`MixedSet` or an iterable or a generator
expression of :class:`Set`\s or :class:`DataSet`\s or a
mixture of both
:param dims: `None` (the default) or an :class:`int` or an iterable or
generator expression of :class:`int`\s, which **must** be
of same length as `arg`
.. Warning ::
When using generator expressions for ``arg`` or ``dims``, these
**must** terminate or else will cause an infinite loop.
"""
if self._initialized:
return
self._dsets = arg
self._initialized = True
@classmethod
def _process_args(cls, arg, dims=None):
# If the second argument is not None it is expect to be a scalar dim
# or an iterable of dims and the first is expected to be a MixedSet or
# an iterable of Sets
if dims is not None:
# If arg is a MixedSet, get its Sets tuple
sets = arg.split if isinstance(arg, MixedSet) else tuple(arg)
# If dims is a scalar, turn it into a tuple of right length
dims = (dims,) * len(sets) if isinstance(dims, int) else tuple(dims)
if len(sets) != len(dims):
raise ValueError("Got MixedSet of %d Sets but %s dims" %
(len(sets), len(dims)))
dsets = tuple(s ** d for | |
from datetime import datetime
from zcrmsdk.src.com.zoho.crm.api import ParameterMap, HeaderMap
from zcrmsdk.src.com.zoho.crm.api.profiles import Profile
from zcrmsdk.src.com.zoho.crm.api.roles import Role
from zcrmsdk.src.com.zoho.crm.api.users import *
from zcrmsdk.src.com.zoho.crm.api.users import User as ZCRMUser
class User(object):
@staticmethod
def get_users():
"""
This method is used to retrieve the users data specified in the API request.
"""
# Get instance of UsersOperations Class
users_operations = UsersOperations()
# Get instance of ParameterMap Class
param_instance = ParameterMap()
# Possible parameters for Get Users operation
param_instance.add(GetUsersParam.page, 1)
param_instance.add(GetUsersParam.per_page, 200)
param_instance.add(GetUsersParam.type, 'ActiveConfirmedUsers')
# Get instance of ParameterMap Class
header_instance = HeaderMap()
# Possible headers for Get Users operation
header_instance.add(GetUsersHeader.if_modified_since, datetime.fromisoformat('2019-07-07T10:00:00+05:30'))
# Call get_users method that takes ParameterMap instance and HeaderMap instance as parameters
response = users_operations.get_users(param_instance, header_instance)
if response is not None:
# Get the status code from response
print('Status Code: ' + str(response.get_status_code()))
if response.get_status_code() in [204, 304]:
print('No Content' if response.get_status_code() == 204 else 'Not Modified')
return
# Get object from response
response_object = response.get_object()
if response_object is not None:
# Check if expected ResponseWrapper instance is received.
if isinstance(response_object, ResponseWrapper):
# Get the list of obtained User instances
user_list = response_object.get_users()
for user in user_list:
# Get the Country of each User
print("User Country: " + str(user.get_country()))
# Get the CustomizeInfo instance of each User
customize_info = user.get_customize_info()
# Check if customizeInfo is not None
if customize_info is not None:
if customize_info.get_notes_desc() is not None:
# Get the NotesDesc of each User
print("User CustomizeInfo NotesDesc: " + str(customize_info.get_notes_desc()))
if customize_info.get_show_right_panel() is not None:
# Get the ShowRightPanel of each User
print("User CustomizeInfo ShowRightPanel: " + str(customize_info.get_show_right_panel()))
if customize_info.get_bc_view() is not None:
# Get the BcView of each User
print("User CustomizeInfo BcView: " + str(customize_info.get_bc_view()))
if customize_info.get_show_home() is not None:
# Get the ShowHome of each User
print("User CustomizeInfo ShowHome: " + str(customize_info.get_show_home()))
if customize_info.get_show_detail_view() is not None:
# Get the ShowDetailView of each User
print("User CustomizeInfo ShowDetailView: " + str(customize_info.get_show_detail_view()))
if customize_info.get_unpin_recent_item() is not None:
# Get the UnpinRecentItem of each User
print("User CustomizeInfo UnpinRecentItem: " + str(customize_info.get_unpin_recent_item()))
# Get the Role instance of each User
role = user.get_role()
if role is not None:
# Get the Name of Role
print("User Role Name: " + str(role.get_name()))
# Get the ID of Role
print("User Role ID: " + str(role.get_id()))
# Get the Signature of each User
print("User Signature: " + str(user.get_signature()))
# Get the City of each User
print("User City: " + str(user.get_city()))
# Get the NameFormat of each User
print("User NameFormat: " + str(user.get_name_format()))
# Get the Language of each User
print("User Language: " + str(user.get_language()))
# Get the Locale of each User
print("User Locale: " + str(user.get_locale()))
# Get the Microsoft of each User
print("User Microsoft: " + str(user.get_microsoft()))
if user.get_personal_account() is not None:
# Get the PersonalAccount of each User
print("User PersonalAccount: " + str(user.get_personal_account()))
# Get the DefaultTabGroup of each User
print("User DefaultTabGroup: " + str(user.get_default_tab_group()))
# Get the Isonline of each User
print("User Isonline: " + str(user.get_isonline()))
# Get the modifiedBy User instance of each User
modified_by = user.get_modified_by()
# Check if modified_by is not null
if modified_by is not None:
# Get the Name of the modifiedBy User
print("User Modified By User-Name: " + str(modified_by.get_name()))
# Get the ID of the modifiedBy User
print("User Modified By User-ID: " + str(modified_by.get_id()))
# Get the Street of each User
print("User Street: " + str(user.get_street()))
# Get the Currency of each User
print("User Currency: " + str(user.get_currency()))
# Get the Alias of each User
print("User Alias: " + str(user.get_alias()))
# Get the Theme instance of each User
theme = user.get_theme()
# Check if theme is not None
if theme is not None:
# Get the TabTheme instance of Theme
normal_tab = theme.get_normal_tab()
# Check if normal_tab is not null
if normal_tab is not None:
# Get the FontColor of NormalTab
print("User Theme NormalTab FontColor: " + str(normal_tab.get_font_color()))
# Get the Background of NormalTab
print("User Theme NormalTab Background: " + str(normal_tab.get_background()))
# Get the TabTheme instance of Theme
selected_tab = theme.get_selected_tab()
# Check if selected_tab is not null
if selected_tab is not None:
# Get the FontColor of selected_tab
print("User Theme Selected Tab FontColor: " + str(selected_tab.get_font_color()))
# Get the Background of selected_tab
print("User Theme Selected Tab Background: " + str(selected_tab.get_background()))
# Get the NewBackground of each Theme
print("User Theme NewBackground: " + str(theme.get_new_background()))
# Get the Background of each Theme
print("User Theme Background: " + str(theme.get_background()))
# Get the Screen of each Theme
print("User Theme Screen: " + str(theme.get_screen()))
# Get the Type of each Theme
print("User Theme Type: " + str(theme.get_type()))
# Get the ID of each User
print("User ID: " + str(user.get_id()))
# Get the State of each User
print("User State: " + str(user.get_state()))
# Get the Fax of each User
print("User Fax: " + str(user.get_fax()))
# Get the CountryLocale of each User
print("User CountryLocale: " + str(user.get_country_locale()))
# Get the FirstName of each User
print("User FirstName: " + str(user.get_first_name()))
# Get the Email of each User
print("User Email: " + str(user.get_email()))
# Get the reportingTo User instance of each User
reporting_to = user.get_reporting_to()
# Check if reporting_to is not None
if reporting_to is not None:
# Get the Name of the reporting_to User
print("User ReportingTo User-Name: " + str(reporting_to.get_name()))
# Get the ID of the reporting_to User
print("User ReportingTo User-ID: " + str(reporting_to.get_id()))
# Get the DecimalSeparator of each User
print("User DecimalSeparator: " + str(user.get_decimal_separator()))
# Get the Zip of each User
print("User Zip: " + str(user.get_zip()))
# Get the CreatedTime of each User
print("User CreatedTime: " + str(user.get_created_time()))
# Get the Website of each User
print("User Website: " + str(user.get_website()))
if user.get_modified_time() is not None:
# Get the ModifiedTime of each User
print("User ModifiedTime: " + str(user.get_modified_time()))
# Get the TimeFormat of each User
print("User TimeFormat: " + str(user.get_time_format()))
# Get the Offset of each User
print("User Offset: " + str(user.get_offset()))
# Get the Profile instance of each User
profile = user.get_profile()
# Check if profile is not None
if profile is not None:
# Get the Name of the profile
print("User Profile Name: " + str(profile.get_name()))
# Get the ID of the profile
print("User Profile ID: " + str(profile.get_id()))
# Get the Mobile of each User
print("User Mobile: " + str(user.get_mobile()))
# Get the LastName of each User
print("User LastName: " + str(user.get_last_name()))
# Get the TimeZone of each User
print("User TimeZone: " + str(user.get_time_zone()))
# Get the Custom Fields, if any
print("Custom Field: " + str(user.get_key_value('Custom_Field')))
# Get the created_by User instance of each User
created_by = user.get_created_by()
# Check if created_by is not None
if created_by is not None:
# Get the Name of the created_by User
print("User Created By User-Name: " + str(created_by.get_name()))
# Get the ID of the created_by User
print("User Created By User-ID: " + str(created_by.get_id()))
# Get the Zuid of each User
print("User Zuid: " + str(user.get_zuid()))
# Get the Confirm of each User
print("User Confirm: " + str(user.get_confirm()))
# Get the FullName of each User
print("User FullName: " + str(user.get_full_name()))
# Get the list of obtained Territory instances
territories = user.get_territories()
# Check if territories is not None
if territories is not None:
for territory in territories:
# Get the Manager of the Territory
print("User Territory Manager: " + str(territory.get_manager()))
# Get the Name of the Territory
print("User Territory Name: " + str(territory.get_name()))
# Get the ID of the Territory
print("User Territory ID: " + str(territory.get_id()))
# Get the Phone of each User
print("User Phone: " + str(user.get_phone()))
# Get the DOB of each User
print("User DOB: " + str(user.get_dob()))
# Get the DateFormat of each User
print("User DateFormat: " + str(user.get_date_format()))
# Get the Status of each User
print("User Status: " + str(user.get_status()))
# Get the obtained Info object
info = response_object.get_info()
if info is not None:
if info.get_per_page() is not None:
# Get the PerPage of the Info
print("User Info | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import sys
import time
import traceback
from fabric import Connection
from patchwork.files import exists
from patchwork import files
conn = None
args = None
java_home = None
def create_user():
initial_user = 'ubuntu'
sudo_group = 'sudo'
with Connection(host=args.hostname, user=initial_user,
connect_kwargs={'key_filename': args.pkey}) as conn:
try:
if not exists(conn,
'/home/{}/.ssh_user_ensured'.format(initial_user)):
conn.sudo('useradd -m -G {1} -s /bin/bash {0}'
.format(args.os_user, sudo_group))
conn.sudo(
'bash -c \'echo "{} ALL = NOPASSWD:ALL" >> /etc/sudoers\''.format(args.os_user, initial_user))
conn.sudo('mkdir /home/{}/.ssh'.format(args.os_user))
conn.sudo('chown -R {0}:{0} /home/{1}/.ssh/'
.format(initial_user, args.os_user))
conn.sudo('cat /home/{0}/.ssh/authorized_keys > '
'/home/{1}/.ssh/authorized_keys'
.format(initial_user, args.os_user))
conn.sudo(
'chown -R {0}:{0} /home/{0}/.ssh/'.format(args.os_user))
conn.sudo('chmod 700 /home/{0}/.ssh'.format(args.os_user))
conn.sudo('chmod 600 /home/{0}/.ssh/authorized_keys'
.format(args.os_user))
conn.sudo(
'touch /home/{}/.ssh_user_ensured'.format(initial_user))
except Exception as err:
logging.error('Failed to create new os_user: ', str(err))
sys.exit(1)
def copy_keys():
try:
conn.put(args.pkey, '/home/{0}/keys/'.format(args.os_user))
conn.sudo('chown -R {0}:{0} /home/{0}/keys'.format(args.os_user))
except Exception as err:
logging.error('Failed to copy admin key: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_dir_endpoint():
try:
if not exists(conn, '/home/{}/.ensure_dir'.format(args.os_user)):
conn.sudo('mkdir /home/{}/.ensure_dir'.format(args.os_user))
except Exception as err:
logging.error('Failed to create ~/.ensure_dir/: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_logs_endpoint():
log_root_dir = "/var/opt/datalab/log"
supervisor_log_file = "/var/log/application/provision-service.log"
try:
if not exists(conn, '/home/' + args.os_user + '/.ensure_dir/logs_ensured'):
if not exists(conn, args.datalab_path):
conn.sudo("mkdir -p " + args.datalab_path)
conn.sudo("chown -R " + args.os_user + ' ' + args.datalab_path)
if not exists(conn, log_root_dir):
conn.sudo('mkdir -p ' + log_root_dir + '/provisioning')
conn.sudo('touch ' + log_root_dir + '/provisioning/provisioning.log')
if not exists(conn, supervisor_log_file):
conn.sudo("mkdir -p /var/log/application")
conn.sudo("touch " + supervisor_log_file)
conn.sudo("chown -R {0} {1}".format(args.os_user, log_root_dir))
conn.sudo('touch /home/' + args.os_user + '/.ensure_dir/logs_ensured')
except Exception as err:
print('Failed to configure logs and DataLab directory: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_jre_jdk_endpoint():
try:
if not exists(conn, '/home/{}/.ensure_dir/jre_jdk_ensured'.format(args.os_user)):
conn.sudo('apt-get install -y openjdk-8-jre-headless')
conn.sudo('apt-get install -y openjdk-8-jdk-headless')
conn.sudo('touch /home/{}/.ensure_dir/jre_jdk_ensured'
.format(args.os_user))
except Exception as err:
logging.error('Failed to install Java JDK: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_supervisor_endpoint():
try:
if not exists(conn, '/home/{}/.ensure_dir/superv_ensured'.format(args.os_user)):
conn.sudo('apt-get -y install supervisor')
conn.sudo('update-rc.d supervisor defaults')
conn.sudo('update-rc.d supervisor enable')
conn.sudo('touch /home/{}/.ensure_dir/superv_ensured'
.format(args.os_user))
except Exception as err:
logging.error('Failed to install Supervisor: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_docker_endpoint():
try:
if not exists(conn, '/home/{}/.ensure_dir/docker_ensured'.format(args.os_user)):
conn.sudo("bash -c "
"'curl -fsSL https://download.docker.com/linux/ubuntu/gpg"
" | apt-key add -'")
conn.sudo('add-apt-repository "deb [arch=amd64] '
'https://download.docker.com/linux/ubuntu '
'$(lsb_release -cs) stable"')
conn.sudo('apt-get update')
conn.sudo('apt-cache policy docker-ce')
conn.sudo('apt-get install -y docker-ce={}'
.format(args.docker_version))
if not exists(conn, '{}/tmp'.format(args.datalab_path)):
conn.run('mkdir -p {}/tmp'.format(args.datalab_path))
conn.put('./daemon.json',
'{}/tmp/daemon.json'.format(args.datalab_path))
conn.sudo('sed -i "s|REPOSITORY|{}:{}|g" {}/tmp/daemon.json'
.format(args.repository_address,
args.repository_port,
args.datalab_path))
if args.cloud_provider == "aws":
dns_ip_resolve = (conn.run("systemd-resolve --status "
"| grep -A 5 'Current Scopes: DNS' "
"| grep 'DNS Servers:' "
"| awk '{print $3}'")
.stdout.rstrip("\n\r"))
conn.sudo('sed -i "s|DNS_IP_RESOLVE|\"dns\": [{0}],|g" {1}/tmp/daemon.json'
.format(dns_ip_resolve, args.datalab_path))
elif args.cloud_provider == "gcp":
dns_ip_resolve = ""
conn.sudo('sed -i "s|DNS_IP_RESOLVE||g" {1}/tmp/daemon.json'
.format(dns_ip_resolve, args.datalab_path))
conn.sudo('mv {}/tmp/daemon.json /etc/docker'
.format(args.datalab_path))
conn.sudo('usermod -a -G docker ' + args.os_user)
conn.sudo('update-rc.d docker defaults')
conn.sudo('update-rc.d docker enable')
conn.sudo('service docker restart')
conn.sudo('touch /home/{}/.ensure_dir/docker_ensured'
.format(args.os_user))
except Exception as err:
logging.error('Failed to install Docker: ', str(err))
traceback.print_exc()
sys.exit(1)
def create_key_dir_endpoint():
try:
if not exists(conn, '/home/{}/keys'.format(args.os_user)):
conn.run('mkdir /home/{}/keys'.format(args.os_user))
except Exception as err:
logging.error('Failed create keys directory as ~/keys: ', str(err))
traceback.print_exc()
sys.exit(1)
def configure_keystore_endpoint(os_user):
try:
# TEMPORARY COMENTED!!!
if args.cloud_provider == "aws":
conn.sudo('apt-get install -y awscli')
if not exists(conn, '/home/' + args.os_user + '/keys/endpoint.keystore.jks'):
conn.sudo('aws s3 cp s3://{0}/datalab/certs/endpoint/endpoint.keystore.jks '
'/home/{1}/keys/endpoint.keystore.jks'
.format(args.ssn_bucket_name, args.os_user))
if not exists(conn, '/home/' + args.os_user + '/keys/datalab.crt'):
conn.sudo('aws s3 cp s3://{0}/datalab/certs/endpoint/endpoint.crt'
' /home/{1}/keys/endpoint.crt'.format(args.ssn_bucket_name, args.os_user))
# if not exists(conn, '/home/' + args.os_user + '/keys/ssn.crt'):
# conn.sudo('aws s3 cp '
# 's3://{0}/datalab/certs/ssn/ssn.crt /home/{1}/keys/ssn.crt'
# .format(args.ssn_bucket_name, args.os_user))
elif args.cloud_provider == "gcp":
if not exists(conn, '/home/' + args.os_user + '/keys/endpoint.keystore.jks'):
conn.sudo('gsutil -m cp -r gs://{0}/datalab/certs/endpoint/endpoint.keystore.jks '
'/home/{1}/keys/'
.format(args.ssn_bucket_name, args.os_user))
if not exists(conn, '/home/' + args.os_user + '/keys/datalab.crt'):
conn.sudo('gsutil -m cp -r gs://{0}/datalab/certs/endpoint/endpoint.crt'
' /home/{1}/keys/'.format(args.ssn_bucket_name, args.os_user))
# if not exists(conn, '/home/' + args.os_user + '/keys/ssn.crt'):
# conn.sudo('gsutil -m cp -r '
# 'gs://{0}/datalab/certs/ssn/ssn.crt /home/{1}/keys/'
# .format(args.ssn_bucket_name, args.os_user))
if not exists(conn, '/home/' + args.os_user + '/.ensure_dir/cert_imported'):
conn.sudo('keytool -importcert -trustcacerts -alias datalab -file /home/{0}/keys/endpoint.crt -noprompt \
-storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_home))
# conn.sudo('keytool -importcert -trustcacerts -file /home/{0}/keys/ssn.crt -noprompt \
# -storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_home))
conn.sudo('touch /home/' + args.os_user + '/.ensure_dir/cert_imported')
print("Certificates are imported.")
except Exception as err:
print('Failed to configure Keystore certificates: ', str(err))
traceback.print_exc()
sys.exit(1)
def configure_supervisor_endpoint():
try:
if not exists(conn,
'/home/{}/.ensure_dir/configure_supervisor_ensured'.format(args.os_user)):
supervisor_conf = '/etc/supervisor/conf.d/supervisor_svc.conf'
if not exists(conn, '{}/tmp'.format(args.datalab_path)):
conn.run('mkdir -p {}/tmp'.format(args.datalab_path))
conn.put('./supervisor_svc.conf',
'{}/tmp/supervisor_svc.conf'.format(args.datalab_path))
datalab_conf_dir = '{}/conf/'.format(args.datalab_path)
if not exists(conn, datalab_conf_dir):
conn.run('mkdir -p {}'.format(datalab_conf_dir))
web_path = '{}/webapp'.format(args.datalab_path)
if not exists(conn, web_path):
conn.run('mkdir -p {}'.format(web_path))
conn.sudo('sed -i "s|OS_USR|{}|g" {}/tmp/supervisor_svc.conf'
.format(args.os_user, args.datalab_path))
conn.sudo('sed -i "s|WEB_CONF|{}|g" {}/tmp/supervisor_svc.conf'
.format(datalab_conf_dir, args.datalab_path))
conn.sudo('sed -i \'s=WEB_APP_DIR={}=\' {}/tmp/supervisor_svc.conf'
.format(web_path, args.datalab_path))
conn.sudo('cp {}/tmp/supervisor_svc.conf {}'
.format(args.datalab_path, supervisor_conf))
conn.put('./provisioning.yml', '{}provisioning.yml'
.format(datalab_conf_dir))
conn.sudo('sed -i "s|KEYNAME|{}|g" {}provisioning.yml'
.format(args.key_name, datalab_conf_dir))
conn.sudo('sed -i "s|KEYSTORE_PASSWORD|{}|g" {}provisioning.yml'
.format(args.endpoint_keystore_password, datalab_conf_dir))
conn.sudo('sed -i "s|JRE_HOME|{}|g" {}provisioning.yml'
.format(java_home, datalab_conf_dir))
conn.sudo('sed -i "s|CLOUD_PROVIDER|{}|g" {}provisioning.yml'
.format(args.cloud_provider, datalab_conf_dir))
conn.sudo('sed -i "s|MONGO_HOST|{}|g" {}provisioning.yml'
.format(args.mongo_host, datalab_conf_dir))
conn.sudo('sed -i "s|MONGO_PORT|{}|g" {}provisioning.yml'
.format(args.mongo_port, datalab_conf_dir))
conn.sudo('sed -i "s|SS_HOST|{}|g" {}provisioning.yml'
.format(args.ss_host, datalab_conf_dir))
conn.sudo('sed -i "s|SS_PORT|{}|g" {}provisioning.yml'
.format(args.ss_port, datalab_conf_dir))
conn.sudo('sed -i "s|KEYCLOACK_HOST|{}|g" {}provisioning.yml'
.format(args.keycloack_host, datalab_conf_dir))
conn.sudo('sed -i "s|CLIENT_SECRET|{}|g" {}provisioning.yml'
.format(args.keycloak_client_secret, datalab_conf_dir))
# conn.sudo('sed -i "s|MONGO_PASSWORD|{}|g" {}provisioning.yml'
# .format(args.mongo_password, datalab_conf_dir))
conn.sudo('sed -i "s|CONF_OS|{}|g" {}provisioning.yml'
.format(args.conf_os, datalab_conf_dir))
conn.sudo('sed -i "s|SERVICE_BASE_NAME|{}|g" {}provisioning.yml'
.format(args.service_base_name, datalab_conf_dir))
conn.sudo('sed -i "s|EDGE_INSTANCE_SIZE|{}|g" {}provisioning.yml'
.format(args.edge_instence_size, datalab_conf_dir))
conn.sudo('sed -i "s|SUBNET_ID|{}|g" {}provisioning.yml'
.format(args.subnet_id, datalab_conf_dir))
conn.sudo('sed -i "s|REGION|{}|g" {}provisioning.yml'
.format(args.region, datalab_conf_dir))
conn.sudo('sed -i "s|ZONE|{}|g" {}provisioning.yml'
.format(args.zone, datalab_conf_dir))
conn.sudo('sed -i "s|TAG_RESOURCE_ID|{}|g" {}provisioning.yml'
.format(args.tag_resource_id, datalab_conf_dir))
conn.sudo('sed -i "s|SG_IDS|{}|g" {}provisioning.yml'
.format(args.sg_ids, datalab_conf_dir))
conn.sudo('sed -i "s|SSN_INSTANCE_SIZE|{}|g" {}provisioning.yml'
.format(args.ssn_instance_size, datalab_conf_dir))
conn.sudo('sed -i "s|VPC2_ID|{}|g" {}provisioning.yml'
.format(args.vpc2_id, datalab_conf_dir))
conn.sudo('sed -i "s|SUBNET2_ID|{}|g" {}provisioning.yml'
.format(args.subnet2_id, datalab_conf_dir))
conn.sudo('sed -i "s|CONF_KEY_DIR|{}|g" {}provisioning.yml'
.format(args.conf_key_dir, datalab_conf_dir))
conn.sudo('sed -i "s|VPC_ID|{}|g" {}provisioning.yml'
.format(args.vpc_id, datalab_conf_dir))
conn.sudo('sed -i "s|PEERING_ID|{}|g" {}provisioning.yml'
.format(args.peering_id, datalab_conf_dir))
conn.sudo('sed -i "s|AZURE_RESOURCE_GROUP_NAME|{}|g" {}provisioning.yml'
.format(args.azure_resource_group_name, datalab_conf_dir))
conn.sudo('sed -i "s|AZURE_SSN_STORAGE_ACCOUNT_TAG|{}|g" {}provisioning.yml'
.format(args.azure_ssn_storage_account_tag, datalab_conf_dir))
conn.sudo('sed -i "s|AZURE_SHARED_STORAGE_ACCOUNT_TAG|{}|g" {}provisioning.yml'
.format(args.azure_shared_storage_account_tag, datalab_conf_dir))
conn.sudo('sed -i "s|AZURE_DATALAKE_TAG|{}|g" {}provisioning.yml'
.format(args.azure_datalake_tag, datalab_conf_dir))
conn.sudo('sed -i "s|AZURE_CLIENT_ID|{}|g" {}provisioning.yml'
.format(args.azure_client_id, datalab_conf_dir))
conn.sudo('sed -i "s|GCP_PROJECT_ID|{}|g" {}provisioning.yml'
.format(args.gcp_project_id, datalab_conf_dir))
conn.sudo('sed -i "s|LDAP_HOST|{}|g" {}provisioning.yml'
.format(args.ldap_host, datalab_conf_dir))
conn.sudo('sed -i "s|LDAP_DN|{}|g" {}provisioning.yml'
.format(args.ldap_dn, datalab_conf_dir))
conn.sudo('sed -i "s|LDAP_OU|{}|g" {}provisioning.yml'
.format(args.ldap_ou, datalab_conf_dir))
conn.sudo('sed -i "s|LDAP_USER_NAME|{}|g" {}provisioning.yml'
.format(args.ldap_user_name, datalab_conf_dir))
conn.sudo('sed -i "s|LDAP_USER_PASSWORD|{}|g" {}provisioning.yml'
.format(args.ldap_user_password, datalab_conf_dir))
conn.sudo('touch /home/{}/.ensure_dir/configure_supervisor_ensured'
.format(args.os_user))
except Exception as err:
logging.error('Failed to configure Supervisor: ', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_jar_endpoint():
try:
ensure_file = ('/home/{}/.ensure_dir/backend_jar_ensured'
.format(args.os_user))
if not exists(conn, ensure_file):
web_path = '{}/webapp'.format(args.datalab_path)
if not exists(conn, web_path):
conn.run('mkdir -p {}'.format(web_path))
if args.cloud_provider == "aws":
if 'Failed' in conn.run('wget -P {} --user={} --password={} '
'https://{}/repository/packages/aws/provisioning-service-'
'2.1.jar --no-check-certificate 2>&1 | tee /tmp/tee.tmp; if grep -w -i -E "ERROR|Failed" /tmp/tee.tmp; then echo -e "==============\nFailed jar download.\n=============="; fi'
.format(web_path, args.repository_user,
args.repository_pass, args.repository_address)).stdout:
sys.exit(1)
elif args.cloud_provider == "gcp":
if 'Failed' in conn.run('wget -P {} --user={} --password={} '
'https://{}/repository/packages/gcp/provisioning-service-'
'2.1.jar --no-check-certificate 2>&1 | tee /tmp/tee.tmp; if grep -w -i -E "ERROR|Failed" /tmp/tee.tmp; then echo -e "==============\nFailed jar download.\n=============="; fi'
.format(web_path, args.repository_user,
args.repository_pass, args.repository_address)).stdout:
sys.exit(1)
conn.run('mv {0}/*.jar {0}/provisioning-service.jar'
.format(web_path))
conn.sudo('touch {}'.format(ensure_file))
except Exception as err:
logging.error('Failed to download jar-provisioner: ', str(err))
traceback.print_exc()
sys.exit(1)
def start_supervisor_endpoint():
try:
conn.sudo("service supervisor restart")
except Exception as err:
logging.error('Unable to start Supervisor: ', str(err))
traceback.print_exc()
sys.exit(1)
def get_sources():
try:
conn.run("git clone https://github.com/apache/incubator-datalab.git {0}/sources".format(args.datalab_path))
if args.branch_name != "":
conn.run("cd {0}/sources && git checkout {1} && cd".format(args.datalab_path, args.branch_name))
except Exception as err:
logging.error('Failed to download sources: ', str(err))
traceback.print_exc()
sys.exit(1)
def pull_docker_images():
try:
ensure_file = ('/home/{}/.ensure_dir/docker_images_pulled'
.format(args.os_user))
if not exists(conn, ensure_file):
conn.sudo('docker login -u {} -p {} {}:{} 2>&1 | tee /tmp/tee.tmp; if grep -w -i -E "ERROR" /tmp/tee.tmp; then echo -e "==============\nFailed docker login.\n=============="; fi'
.format(args.repository_user,
args.repository_pass,
args.repository_address,
args.repository_port))
conn.sudo('docker pull {}:{}/docker.datalab-base-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-edge-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-project-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-jupyter-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-rstudio-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-zeppelin-{}'
.format(args.repository_address, args.repository_port, args.cloud_provider))
conn.sudo('docker pull {}:{}/docker.datalab-tensor-{}'
| |
= 0.5 * self.img_size * np.array([f, 1, 1])
cam_trans = np.array([cam[1], cam[2], tz])
return cam_trans, cam_for_render, f
def get_depth_loss(self, verts, cams, f = 5.0, is_sigmoid = True):
"""
verts : N x 6890 x 3, where N is batch_size;
cams : N x 3, where 3 = S, tx, ty;
"""
# proj_vert2d: N x 6890 x 2;
# pred_depth : N x 6890
proj_vert2d, pred_depth, num_vert = get_proj_vert2d(verts, cams, f, self.img_size)
# GT depth
gt_depth = tf.squeeze(self.depth_loader, axis = -1) # N x H x W x 1 ==> N x H X W;
shape_dep = gt_depth.shape
# undo scale [-1, 1] gt depth to [0, dep_max];
gt_depth = self.depth_max_loader * tf.reshape((gt_depth + 1.0)*0.5, [self.batch_size,-1]) # N x -1
gt_depth = tf.reshape(gt_depth, shape_dep) # N x H x W
# indices along batch dimension
batch_idxs = tf.reshape(tf.range(0, self.batch_size), [-1, 1, 1]) # N x 1 x 1
batch_idxs = tf.tile(batch_idxs, [1, num_vert, 1]) # N x 6890 x 1
# > see https://riptutorial.com/tensorflow/example/29069/how-to-use-tf-gather-nd for details;
# to access elements of gt_depth which is a rank-3 tensor, i.e., 3 = (batch_idx, H_idx, W_idx)
# the innermost dimension of index_to_pick must have length 3;
# 6890 x N x 3, here 3 = (batch_idx, H_idx, W_idx)
#NOTE: updated on Aug 20, 2019!!!
#proj_vert2d: x, y, i.e, x -> imgW, y -> imgH, so if you want to
# get the index of (h_idx, w_idx), you have to change the order (x,y) to (y,x)
index_to_pick = tf.concat([batch_idxs, proj_vert2d[:,:,::-1]], axis = 2) # N x 6890 x 3
gt_depth_picked = tf.gather_nd(gt_depth, index_to_pick) # N x 6890
print ('[???] gt_depth_picked.shape = ', gt_depth_picked.shape)
# get the loss
# f(x) = (1-exp(alpha *x))/(1 + exp(alpha *x ))
# = 1.0 - 2.0*(1 / (1 + exp(alpha*x)))
# = 1.0 - 2.0* sigmoid(-alpha*x)
# where, sigmoid(x) = 1 / (1 + exp(-x))
#alpha = 20.0
alpha = 1.0
diff = tf.abs(gt_depth_picked - pred_depth)
if not is_sigmoid:
return tf.reduce_mean(diff), proj_vert2d, pred_depth
else:
sig_val = 1.0 - 2.0*tf.sigmoid( -diff*alpha)
return tf.reduce_mean(sig_val), proj_vert2d, pred_depth
""" added by CCJ on Jul 26, 2019:
a new 3d loss for 3d joints between gt_js and pred_js,
where gt_js = smpl(gt_pose, gt_shape) and
pred_js = smpl(pred_pose, pred_shape)
"""
def get_smpl_loss(self, Rs, shape, Js, pose):
"""
Rs is N x 24 x 3*3 rotation matrices of pose
Shape is N x 10
Js is N x 19 x 3 joints
Ground truth:
self.poseshape_loader is a long vector of:
relative rotation (24*9)
shape (10)
3D joints (14*3)
"""
Rs = tf.reshape(Rs, [self.batch_size, -1])
params_pred = tf.concat([Rs, shape], 1, name="prep_params_pred")
# 24*9+10 = 226
gt_params = self.poseshape_loader[:, :226]
#looss_poseshape = self.e_3d_weight * compute_3d_loss(params_pred, gt_params, self.has_gt3d_smpl)
#added by CCJ:
loss_poseshape = self.e_3d_weight_smpl * compute_3d_loss(params_pred, gt_params, self.has_gt3d_smpl)
# 14*3 = 42
gt_shapes = self.poseshape_loader[:, 216:226]
gt_poses = self.poseshape_loader[:, 268:]
gt_joints,_ = self.smpl(gt_shapes, gt_poses, get_skin=False, trans = None, idx = 5)
gt_joints = gt_joints[:, :14, :]
pred_joints = Js[:, :14, :]
#NOTE: for debugging;
#gt_joints = tf.Print(gt_joints, [gt_joints[0,0:3,:], pred_joints[0,0:3,:]],
# "few joints gt, and pred joints: ")
#***********************************************************
#***********************************************************
self.smpl_dict_to_save['image']= self.image_loader[0,:,:,:]
self.smpl_dict_to_save['depth']= self.depth_loader[0,:,:,:]
self.smpl_dict_to_save['gt_shapes'] = gt_shapes[0,:]
self.smpl_dict_to_save['gt_poses'] = tf.reshape(gt_poses[0,:], [-1])
self.smpl_dict_to_save['gt_rots'] = tf.reshape(self.poseshape_loader[0,:216], [24, 9]) # 24 x 3*3;
self.smpl_dict_to_save['gt_joints3d'] = gt_joints[0,:,:]
self.smpl_dict_to_save['pred_shapes'] = shape[0,:]
self.smpl_dict_to_save['pred_rots'] = tf.reshape( Rs[0, :], [24,9]) # 24 x 3*3;
self.smpl_dict_to_save['pred_poses'] = tf.reshape( pose[0,:], [-1]) # 24 x 3*3;
self.smpl_dict_to_save['pred_joints3d']= pred_joints[0, :, :]
self.smpl_dict_to_save['fname'] = self.fname
#***********************************************************
#***********************************************************
# Align the joints by pelvis.
gt_joints = align_by_pelvis(gt_joints)
gt_joints = tf.reshape(gt_joints, [self.batch_size, -1])
# Align the joints by pelvis.
pred_joints = align_by_pelvis(pred_joints)
pred_joints = tf.reshape(pred_joints, [self.batch_size, -1])
#loss_joints = self.e_3d_weight * compute_3d_loss( pred_joints, gt_joints, self.has_gt3d_joints)
#added by CCJ:
loss_joints = self.e_3d_weight_js3d * compute_3d_loss( pred_joints, gt_joints, self.has_gt3d_joints)
return loss_poseshape, loss_joints
def save_to_json(self, global_step, sess):
param_path = join(self.model_dir, "smpl_iter%06d.json" % global_step)
dict_to_save = {}
for k in self.smpl_dict_to_save:
dict_to_save[k] = sess.run(self.smpl_dict_to_save[k]).tolist() if k is not "fname" else sess.run(self.smpl_dict_to_save[k])
print ("dict_to_save = {}".format(dict_to_save['fname']))
with open(param_path, 'w') as fp:
json.dump(dict_to_save, fp, indent=4, sort_keys=True)
def get_3d_loss(self, Rs, shape, Js):
"""
Rs is N x 24 x 3*3 rotation matrices of pose
Shape is N x 10
Js is N x 19 x 3 joints
Ground truth:
self.poseshape_loader is a long vector of:
relative rotation (24*9)
shape (10)
3D joints (14*3)
"""
Rs = tf.reshape(Rs, [self.batch_size, -1])
params_pred = tf.concat([Rs, shape], 1, name="prep_params_pred")
# 24*9+10 = 226
gt_params = self.poseshape_loader[:, :226]
loss_poseshape = self.e_3d_weight * compute_3d_loss(
params_pred, gt_params, self.has_gt3d_smpl)
# 14*3 = 42
gt_joints = self.poseshape_loader[:, 226:268]
pred_joints = Js[:, :14, :]
gt_joints = tf.reshape(gt_joints, [self.batch_size, 14, 3])
#NOTE: for debugging;
#gt_joints = tf.Print(gt_joints, [gt_joints[0,0:3,:], pred_joints[0,0:3,:]],
# "few joints gt, and pred joints: ")
# Align the joints by pelvis.
gt_joints = align_by_pelvis(gt_joints)
gt_joints = tf.reshape(gt_joints, [self.batch_size, -1])
# Align the joints by pelvis.
pred_joints = align_by_pelvis(pred_joints)
pred_joints = tf.reshape(pred_joints, [self.batch_size, -1])
loss_joints = self.e_3d_weight * compute_3d_loss(
pred_joints, gt_joints, self.has_gt3d_joints)
return loss_poseshape, loss_joints
#def visualize_img(self, img, gt_kp, vert, pred_kp, cam, renderer):
#NOTE: updated by CCJ on July 1st, 2019;
def visualize_img(self, img, gt_kp, vert, pred_kp, cam, renderer,
gt_vert = None, gt_cam = None,
# newly added on Aug 20, 2019
pred_depth = None, # (6890,)
proj_vert2d = None, # (6890, 2)
depth_max = None # (1,)
):
"""
Overlays gt_kp and pred_kp on img.
Draws vert with text.
Renderer is an instance of SMPLRenderer.
"""
gt_kp = gt_kp[0:self.joint_num,:]
gt_vis = gt_kp[:, 2].astype(bool)
loss = np.sum((gt_kp[gt_vis, :2] - pred_kp[gt_vis])**2)
debug_text = {"sc": cam[0], "tx": cam[1], "ty": cam[2], "kpl": loss}
# Fix a flength so i can render this with persp correct scale
#f = 5.
#tz = f / cam[0]
#cam_for_render = 0.5 * self.img_size * np.array([f, 1, 1])
#cam_trans = np.array([cam[1], cam[2], tz])
cam_trans, cam_for_render, f = self.get_cam_for_render(cam)
# Undo pre-processing.
input_img = (img + 1) * 0.5 # rescale to [0, 1]
rend_img = renderer(vert + cam_trans, cam_for_render, img=input_img)
rend_img = vis_util.draw_text(rend_img, debug_text)
#gt_rendering
if gt_vert is not None:
debug_text_gt = {"sc_gt": gt_cam[0], "tx_gt": gt_cam[1], "ty_gt": gt_cam[2], "kpl": loss}
cam_t_gt = np.array([gt_cam[1], gt_cam[2], f/ gt_cam[0]])
rend_img_gt = renderer(gt_vert + cam_t_gt, cam_for_render, img=input_img)
rend_img_gt = vis_util.draw_text(rend_img_gt, debug_text_gt)
# Draw skeleton
gt_joint = ((gt_kp[:, :2] + 1) * 0.5) * self.img_size
pred_joint = ((pred_kp + 1) * 0.5) * self.img_size
img_with_gt = vis_util.draw_skeleton(
input_img, gt_joint, draw_edges=False, vis=gt_vis)
skel_img = vis_util.draw_skeleton(img_with_gt, pred_joint)
# newly added for depth rendering;
if self.has_depth_loss:
rend_dep = renderer.depth_render(
depth_max,
vert + cam_trans,
cam_for_render,
img_size = [self.img_size, self.img_size]
)
# change it to color
rend_dep = cv2.cvtColor(rend_dep, cv2.COLOR_GRAY2RGB)
# a while line bourdary for visualization only
rend_dep[:, self.img_size-3:self.img_size] = (255, 255, 255)
rend_dep[self.img_size-3:self.img_size, :] = (255, 255, 255)
rend_dep_wigh_gt = vis_util.draw_skeleton(
rend_dep, gt_joint, draw_edges=False, vis=gt_vis)
skel_dep = vis_util.draw_skeleton(rend_dep_wigh_gt, pred_joint)
myproj_dep = np.zeros((self.img_size, self.img_size, 2), dtype= np.float32)
# pred_depth : (6890,)
# proj_vert2d : (6890, 2)
#print ("[???] shapes = {}, {}, {}, {}, {}".format(
# skel_img.shape,
# rend_img.shape,
# skel_dep.shape,
# myproj_dep.shape,
# pred_depth.shape))
for i in range(0, pred_depth.shape[0]):
x,y = proj_vert2d[i]
x = min(x, self.img_size - 1)
y = min(y, self.img_size - 1)
#print ("??? x,y = {}, {}".format(x, y))
myproj_dep[y, x, 0] += pred_depth[i]
myproj_dep[y, x, 1] += 1
nums = myproj_dep[:,:,1]
nums [nums < 1.0] = 1.0
#print ("??? nums.shape = {}".format(nums.shape))
myproj_dep = myproj_dep[:,:, 0]/ nums
myproj_dep /= depth_max
myproj_dep *= 255.0
myproj_dep = myproj_dep.astype(np.uint8)
myproj_dep = cv2.cvtColor(myproj_dep, cv2.COLOR_GRAY2RGB)
# a while line bourdary for visualization only
myproj_dep[:, self.img_size-3:self.img_size] = (255, 255, 255)
myproj_dep[self.img_size-3:self.img_size, :] = (255, 255, 255)
#print ("[???] myproj_dep shape = {}".format(myproj_dep.shape))
# (H,W) -> (H, W, C)
to_combined = [skel_img, rend_img/ 255.,]
if gt_vert is not None:
to_combined.append(rend_img_gt / 255.)
if self.has_depth_loss:
to_combined.append( skel_dep)
to_combined.append( myproj_dep)
#print ("[???] shapes = | |
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from pprint import pprint as pp # for debugging
import sys
import os
import shlex
import subprocess
import time
import socket
import logging
#sys.path.insert(0, os.path.dirname(__file__) + os.sep + '..')
import nori
#nori.pyversion_check(-1, -1)
#nori.pyversion_check(8, -1)
#nori.pyversion_check(-1, 4)
#nori.pyversion_check(8, 4)
#nori.pyversion_check(7, -1)
#nori.pyversion_check(6, -1)
#nori.pyversion_check(-1, 2)
#nori.pyversion_check(-1, 3)
#print(nori.script_name)
#print(nori.script_shortname)
#print(nori.running_as_email)
#print(nori.default_config_files)
#print(nori.char_name(' '))
#print(nori.char_name('\t'))
#print(nori.char_name('\n'))
#print(nori.char_name('\r'))
#print(nori.char_name('\f'))
#print(nori.char_name('\v'))
#print(nori.char_name('\b'))
#print(nori.char_name('\a'))
#print(nori.char_name('x'))
#print(nori.type_tuple_string(nori.STRING_TYPES))
#print(nori.type_tuple_string(nori.STRINGISH_TYPES))
#print(nori.type_tuple_string())
#print(nori.type_tuple_string(5))
#print(nori.scalar_to_tuple(5))
#print(nori.scalar_to_tuple('adsf'))
#print(nori.scalar_to_tuple(()))
#print(nori.scalar_to_tuple([]))
#print(nori.scalar_to_tuple([5]))
#print(nori.scalar_to_tuple((5,)))
#print(nori.scalar_to_list(5))
#print(nori.scalar_to_list('adsf'))
#print(nori.scalar_to_list(()))
#print(nori.scalar_to_list([]))
#print(nori.scalar_to_list([5]))
#print(nori.scalar_to_list((5,)))
#print(nori.re_repl_escape(''))import
#print(nori.re_repl_escape('adsf'))
#print(nori.re_repl_escape('a\na'))
#print(nori.re_repl_escape('a\\na'))
#print(nori.re_repl_escape('\\1'))
#print(nori.str_to_bool(''))
#print(nori.str_to_bool('true'))
#print(nori.str_to_bool('on'))
#print(nori.str_to_bool('yes'))
#print(nori.str_to_bool('True'))
#print(nori.str_to_bool('On'))
#print(nori.str_to_bool('Yes'))
#print(nori.str_to_bool('1'))
#print(nori.str_to_bool('false'))
#print(nori.str_to_bool('off'))
#print(nori.str_to_bool('no'))
#print(nori.str_to_bool('False'))
#print(nori.str_to_bool('Off'))
#print(nori.str_to_bool('No'))
#print(nori.str_to_bool('0'))
#print(nori.str_to_bool('asdfads'))
#print(nori.is_legal_identifier(''))
#print(nori.is_legal_identifier('a'))
#print(nori.is_legal_identifier('_'))
#print(nori.is_legal_identifier('#'))
#print(nori.is_legal_identifier('aA'))
#print(nori.is_legal_identifier('_A'))
#print(nori.is_legal_identifier('#A'))
#print(nori.is_legal_identifier('AA'))
#print(nori.is_legal_identifier('A$'))
#print(nori.file_access_const('f'))
#print(nori.file_access_const('r'))
#print(nori.file_access_const('w'))
#print(nori.file_access_const('x'))
#print(nori.file_access_const('F'))
#print(nori.file_type_info('-'))
#print(nori.file_type_info('f'))
#print(nori.file_type_info('d'))
#print(nori.file_type_info('l'))
#print(nori.file_type_info('s'))
#print(nori.file_type_info('p'))
#print(nori.file_type_info('b'))
#print(nori.file_type_info('c'))
#print(nori.file_type_info('w'))
def run_mode_hook():
#try:
#open('/nonexistent')
#except (OSError, IOError) as e:
#print(nori.render_io_exception(e))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=True,
# use_logger=False, warn_only=False
#))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=False,
# use_logger=False, warn_only=False
#))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=True,
# use_logger=False, warn_only=False
#))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=True,
# use_logger=True, warn_only=False
#))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=True,
# use_logger=False, warn_only=True
#))
#print(nori.file_error_handler(
# e, 'foo', 'fiiile', '/nonexistent', must_exist=True,
# use_logger=True, warn_only=True
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='l', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='l', follow_links=False,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vvv', 'kernel', type_char='l', follow_links=False,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vvv', 'kernel', type_char='l', follow_links=False,
# must_exist=False, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='g', follow_links=False,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='f', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='s', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='spf', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='spd', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_type(
# '/vmlinuz', 'kernel', type_char='sd', follow_links=True,
# must_exist=True, use_logger=False, warn_only=False, exit_val=47
#))
#print(nori.check_file_access(
# '/vmlinuz', 'kernel', file_rwx='g', use_logger=False,
# warn_only=False, exit_val=47
#))
#print(nori.check_file_access(
# '/vmlinuz', 'kernel', file_rwx='f', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/vmlinuz', 'kernel', file_rwx='r', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/vmlinuz', 'kernel', file_rwx='w', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/vmlinuz', 'kernel', file_rwx='x', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/vvv', 'kernel', file_rwx='f', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/tmp', 'temp', file_rwx='r', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/tmp', 'temp', file_rwx='w', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/tmp', 'temp', file_rwx='x', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/root', 'root', file_rwx='x', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_file_access(
# '/root', 'root', file_rwx='', use_logger=False,
# warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/root', 'root', create_type='g', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/root', 'root', create_type='f', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/root', 'root', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/tmp', 'temp', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/asdf', 'asdf', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/tmp/asdf', 'tmpasdf', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/var/log/syslog/foo', 'vlsf', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/root/foo', 'rootfoo', create_type='d', need_rotation=False,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.check_filedir_create(
# '/root/foo', 'rootfoo', create_type='d', need_rotation=True,
# use_logger=False, warn_only=True, exit_val=47
#))
#print(nori.fix_path(''))
#print(nori.fix_path('~'))
#print(nori.fix_path('~root'))
#print(nori.fix_path('~root/asdfg'))
#print(nori.fix_path('/tmp/~'))
#print(nori.fix_path('/tmp/~sgsgfs'))
#print(nori.filemode(0o777))
#print(nori.filemode(0o773))
#print(nori.filemode(0o775))
#print(nori.filemode(0o776))
#print(nori.filemode(0o737))
#print(nori.filemode(0o757))
#print(nori.filemode(0o767))
#print(nori.filemode(0o377))
#print(nori.filemode(0o577))
#print(nori.filemode(0o677))
#print(nori.filemode(0o1777))
#print(nori.filemode(0o2777))
#print(nori.filemode(0o4777))
#print(nori.filemode(0o2707))
#print(nori.filemode(0o4077))
#print(nori.get_file_metadata('/asdf'))
#print(nori.get_file_metadata('/srv'))
#print(nori.get_file_metadata('/tmp'))
#print(nori.get_file_metadata('/vmlinuz'))
#print(nori.get_file_metadata('/var/log/syslog'))
#print(nori.get_file_metadata('/dev/null'))
#print(nori.get_file_metadata('/dev/sda'))
#print(nori.file_newer_than('/tmp', 1))
#nori.touch_file('/tmp', 'temp')
#print(nori.file_newer_than('/tmp', 1))
#print(nori.parentdir('//')) # /
#print(nori.parentdir('//foo')) # /
#print(nori.parentdir('//foo//')) # /
#print(nori.parentdir('//foo//bar')) # //foo
#print(nori.parentdir('//foo//bar//')) # //foo
#print(nori.parentdir('//foo//bar//baz')) # //foo//bar
#print(nori.parentdir('//foo//bar//baz//')) # //foo//bar
#print(nori.parentdir('.')) # ..
#print(nori.parentdir('.//')) # ..
#print(nori.parentdir('.//foo')) # .
#print(nori.parentdir('.//foo//')) # .
#print(nori.parentdir('.//foo//bar')) # .//foo
#print(nori.parentdir('.//foo//bar//')) # .//foo
#print(nori.parentdir('.//foo//bar//baz')) # .//foo//bar
#print(nori.parentdir('.//foo//bar//baz//')) # .//foo//bar
#print(nori.parentdir('..')) # ../..
#print(nori.parentdir('..//')) # ../..
#print(nori.parentdir('..//foo')) # ..
#print(nori.parentdir('..//foo//')) # ..
#print(nori.parentdir('..//foo//bar')) # ..//foo
#print(nori.parentdir('..//foo//bar//')) # ..//foo
#print(nori.parentdir('..//foo//bar//baz')) # ..//foo//bar
#print(nori.parentdir('..//foo//bar//baz//')) # ..//foo//bar
#print(nori.parentdir('foo')) # .
#print(nori.parentdir('foo//')) # .
#print(nori.parentdir('foo//bar')) # foo
#print(nori.parentdir('foo//bar//')) # foo
#print(nori.parentdir('foo//bar//baz')) # foo//bar
#print(nori.parentdir('foo//bar//baz//')) # foo//bar
#print(nori.open_create_only('/tmp/foo'))
#print(nori.open_create_only('/tmp/foo'))
#print(nori.rm_rf('/tmp/foo', 'tmpfoo'))
#print(nori.get_file_metadata('/tmp/foo'))
#print(nori.touch_file('/tmp/foo', 'tmpfoo'))
#print(nori.get_file_metadata('/tmp/foo'))
#print(nori.touch_file('/tmp/foo', 'tmpfoo'))
#print(nori.rm_rf('/tmp/foo', 'tmpfoo'))
#print(nori.touch_file('/root/asdf', 'rootadsf'))
#print(nori.get_file_metadata('/tmp'))
#print(nori.touch_file('/tmp', 'tmp'))
#print(nori.get_file_metadata('/tmp'))
#print(nori.touch_file('/tmp', 'tmp'))
#print(os.mkdir('/tmp/foo'))
#print(nori.touch_file('/tmp/foo/bar', 'tmpfoobar'))
#print(nori.get_file_metadata('/tmp/foo'))
#print(nori.get_file_metadata('/tmp/foo/bar'))
#print(nori.mkdir_p('/tmp/foo', 'tmpfoo'))
#print(nori.mkdir_p('/tmp/foo/../bar', 'tmpbar'))
#print(nori.rm_rf('/tmp/foo', 'tmpfoo'))
#print(nori.rm_rf('/tmp/foo', 'tmpfoo'))
#print(nori.rm_rf('/tmp/foo', 'tmpfoo', must_exist=True))
#nori.rotate_num_files('/root/asdf', '=', 'a')
# test with .gz, in cwd, in other dir
#nori.prune_num_files('/root/asdf', '=', 'a', 1, 1)
# test with .gz, in cwd, in other dir
#print(nori.pps('adsf'))
#nori.err_exit('adsfasdafg\nwhgsfhg', 47)
#print(nori.core.email_loggers)
#l = logging.getLogger('nori.core.status.email-report')
#if l is nori.core.email_loggers['report']:
# print('foo')
#nori.core.email_loggers['report'].error('asdf5a')
#nori.logging_stop_email_logging('report')
#nori.core.email_loggers['report'].error('asdf5b')
#nori.logging_start_email_logging('report')
#nori.core.email_loggers['report'].error('asdf5c')
#nori.logging_stop_syslog()
#nori.logging_stop_stdouterr()
#nori.core.status_logger.info('asdf1')
#nori.core.status_logger.debug('asdf2')
#nori.logging_start_syslog()
#nori.logging_start_stdouterr()
#nori.core.alert_logger.error('asdf3')
#nori.core.alert_logger.debug('asdf4')
#nori.core.email_logger.error('asdf5a')
#nori.logging_stop_email_logging()
#nori.core.email_logger.error('asdf5b')
#nori.logging_start_email_logging()
#nori.core.email_logger.error('asdf5c')
#nori.core.output_logger.info('asdf6')
#nori.core.output_log_fo.write('asdf7\n')
#nori.core.output_log_fo.flush()
#nori.logging_end_logfile('output')
#nori.core.output_log_fo.write('asdf7\n')
#nori.generic_error_handler(ValueError('foo\nbar'), 'broken')
#nori.generic_error_handler(None, 'broken')
#nori.generic_error_handler(ValueError('foo\nbar'), 'broken',
# lambda x: x.message.capitalize())
#nori.generic_error_handler(ValueError('foo\nbar'), 'broken',
# lambda x: x.message.capitalize(),
# warn_only=True)
#nori.generic_error_handler(None, 'broken', exit_val=47)
#print(nori.generic_error_handler(None, 'broken', warn_only=True))
#nori.generic_error_handler(None, 'broken1', use_logger=None)
#def foo(x, y):
# print('a {0} b {1}'.format(x, y))
#nori.generic_error_handler(None, 'broken2', use_logger=foo)
#nori.generic_error_handler(None, 'broken3', use_logger=True)
#nori.generic_error_handler(None, 'broken4', use_logger=False)
#nori.generic_error_handler(None, 'broken5', use_logger=True,
# exit_val=None)
#nori.generic_error_handler(None, 'broken6', use_logger=False,
# exit_val=None)
#nori.generic_error_handler(None, 'broken w1', use_logger=None,
# warn_only=True)
#nori.generic_error_handler(None, 'broken w2', use_logger=foo,
# warn_only=True)
#nori.generic_error_handler(None, 'broken w3', use_logger=True,
# warn_only=True)
#nori.generic_error_handler(None, 'broken w4', use_logger=False,
# warn_only=True)
#nori.generic_error_handler(None, 'broken w5', use_logger=True,
# warn_only=True, exit_val=None)
#nori.generic_error_handler(None, 'broken w6', use_logger=False,
# warn_only=True, exit_val=None)
#f1 = open('core.py')
#f2 = open('ssh.py')
#nori.multi_fan_out(
# [
# (f1, [sys.stdout, sys.stderr]),
# (f1, [sys.stdout, sys.stderr])
# ]
#)
#f1 = os.open('core.py', os.O_RDONLY)
#f2 = os.open('ssh.py', os.O_RDONLY)
#nori.multi_fan_out(
# [
# (f1, [sys.stdout, sys.stderr]),
# (f2, [sys.stdout, sys.stderr])
# ]
#)
#o1 = os.open('/dev/fd/1', os.O_WRONLY|os.O_APPEND)
#o2 = os.open('/dev/fd/2', os.O_WRONLY)
#nori.multi_fan_out(
# [
# (f1, [o1, o2]),
# (f1, [o1, o2])
# ]
#)
#try:
# nori.run_command(
# 'asdf', shlex.split('adsf'), stdin=None, stdout='devnull',
# stderr=subprocess.STDOUT, bg=False, atexit_reg=True,
# env_add=None
# )
#except (OSError, ValueError) as e:
# print(nori.render_command_exception(e))
#nori.run_command(
# 'asdf', shlex.split('adsf'), stdin=None, stdout='devnull',
# stderr=subprocess.STDOUT, bg=False, atexit_reg=True,
# env_add=None, use_logger=True, warn_only=False, exit_val=43
#)
#nori.run_command(
# 'asdf', shlex.split('adsf'), stdin=None, stdout='devnull',
# stderr=subprocess.STDOUT, bg=False, atexit_reg=True,
# env_add=None, use_logger=False, warn_only=False, exit_val=43
#)
#nori.run_command(
# 'asdf', shlex.split('adsf'), stdin=None, stdout='devnull',
# stderr=subprocess.STDOUT, bg=False, atexit_reg=True,
# env_add=None, use_logger=True, warn_only=True, exit_val=43
#)
#nori.run_command(
# 'listing', shlex.split('ls -la /varadsf'), stdin=subprocess.PIPE,
# stdout='devnull', stderr=subprocess.STDOUT, bg=False,
# atexit_reg=True, env_add=None
#)
#nori.run_command(
# 'listing', shlex.split('ls -la /varadsf'), stdin=None,
# stdout='devnull', stderr=subprocess.STDOUT, bg=False,
# atexit_reg=True, env_add=None
#)
#nori.run_command(
# 'env', shlex.split('env'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=False, atexit_reg=True, env_add={'ZZZ':'4'}
#)
#print(nori.run_command(
# 'listing', shlex.split('ls -la /varadsf'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=False, atexit_reg=True, env_add={'ZZZ':'4'}
#))
#print(nori.run_command(
# 'env', shlex.split('env'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=False, atexit_reg=True, env_add={'ZZZ':'4'}, env={'Z':'5'}
#))
#p1, t = nori.run_command(
# 'listing', shlex.split('ls -la /varadsf'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=True, atexit_reg=False, env_add={'ZZZ':'4'}
#)
#p2, t = nori.run_command(
# 'env', shlex.split('env'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=True, atexit_reg=True, env_add={'ZZZ':'4'}, env={'Z':'5'}
#)
#p1.wait()
#time.sleep(30)
#p, t = nori.run_command(
# 'sleep', shlex.split('/bin/sleep 60'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=True, atexit_reg=True, env_add={'ZZZ':'4'}, env={'Z':'5'}
#)
#p, t = nori.run_command(
# 'listing', shlex.split('ls /var'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=True, atexit_reg=True, env_add={'ZZZ':'4'}, env={'Z':'5'}
#)
#p, t = nori.run_command(
# 'listing', shlex.split('find /var'), stdin=None,
# stdout=[sys.stdout, sys.stderr], stderr=[sys.stdout, sys.stderr],
# bg=True, atexit_reg=True, daemon=False, env_add={'ZZZ':'4'}
#)
#print(p)
#print(t)
#time.sleep(3)
#print(nori.kill_bg_command(p, 10))
#nori.run_with_logging('listing', ['lsasdf', '/tmp'], True, True,
# env_add={'PATH':'/bin/:/usr/bin/', 'A':'B'},
# use_logger=True, warn_only=False, exit_val=42)
#nori.run_with_logging('listing', ['lsasdf', '/tmp'], True, True,
# env_add={'PATH':'/bin/:/usr/bin/', 'A':'B'},
# use_logger=False, warn_only=False, exit_val=42)
#nori.run_with_logging('listing', ['lsasdf', '/tmp'], True, True,
# env_add={'PATH':'/bin/:/usr/bin/', 'A':'B'},
# use_logger=True, warn_only=True, exit_val=42)
#nori.run_with_logging('listing', ['ls', '/tmp'], True, True,
# env_add={'PATH':'/bin/:/usr/bin/', 'A':'B'})
#nori.run_with_logging('listing', ['ls', '/tmp', '/adsf'], True, True)
#nori.run_with_logging('listing', ['ls', '/tmp', '/adsf'], True, False)
#nori.run_with_logging('listing', ['ls', '/tmp', '/adsf'], False, True)
#print(nori.run_with_logging('listing', ['ls', '/tmp', '/adsf'],
# False, False))
#print(nori.run_with_logging('listing', ['ls', '/tmp', '/adsf'],
# False, False, True))
#try:
# nori.run_with_logging('listing', ['find', '/'], True, True)
#except IOError as e:
# print('adfasdhhhhjhhhhhhfasdfa')
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 22), ('127.0.0.1', 5555),
# timeout=5, use_logger=False, warn_only=False
#))
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 82), ('127.0.0.1', 5556),
# timeout=5, use_logger=False, warn_only=True
#))
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 22),
# timeout=5, use_logger=False, warn_only=False
#))
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 82),
# timeout=5, use_logger=False, warn_only=True
#))
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 82), ('127.0.0.1', 5556),
# timeout=5, use_logger=False, warn_only=False
#))
#print(nori.test_remote_port(
# 'porrrrt', ('127.0.0.1', 82), ('127.0.0.1', 5556),
# timeout=5, use_logger=False, warn_only=False, exit_val=42
#))
#print(nori.config_settings['syslog_addr']['default'])
#print(nori.config_settings['syslog_sock_type']['default'])
#print(nori.setting_walk('alert_emails_host'))
#print(nori.setting_walk(('alert_emails_host',)))
#print(nori.setting_walk(('alert_emails_host',0)))
#print(nori.setting_walk(('alert_emails_host',2)))
#print(nori.setting_is_set('alert_emails_host'))
#print(nori.setting_is_set(('alert_emails_host',)))
#print(nori.setting_is_set(('alert_emails_host',0)))
#print(nori.setting_is_set(('alert_emails_host',2)))
#print(nori.setting_is_unset('alert_emails_host'))
#print(nori.setting_is_unset(('alert_emails_host',)))
#print(nori.setting_is_unset(('alert_emails_host',0)))
#print(nori.setting_is_unset(('alert_emails_host',2)))
#print(nori.setting_check_is_set('alert_emails_host'))
#print(nori.setting_check_is_set(('alert_emails_host',)))
#print(nori.setting_check_is_set(('alert_emails_host',0)))
#print(nori.setting_check_is_set(('alert_emails_host',2)))
#print(nori.setting_check_one_is_set(['alert_emails_host']))
#print(nori.setting_check_one_is_set([('alert_emails_host', )]))
#print(nori.setting_check_one_is_set([('alert_emails_host', 0)]))
#print(nori.setting_check_one_is_set([('alert_emails_host', 2)]))
#print(nori.setting_check_one_is_set(['alert_emails_host',
# ('alert_emails_host', 0)]))
#print(nori.setting_check_one_is_set([('alert_emails_host', )]))
#print(nori.setting_check_one_is_set([('alert_emails_host', 0)]))
#print(nori.setting_check_one_is_set([('alert_emails_host', 2)]))
#print(nori.setting_check_one_is_set([('alert_emails_host', 2),
# ('alert_emails_host', 0)]))
#print(nori.setting_check_type(('alert_emails_host', 2),
# nori.STRING_TYPES))
#print(nori.setting_check_type(('alert_emails_host', | |
<reponame>GQMai/mbed-cloud-sdk-python
#!/usr/bin/env python3
"""Generate Foundation SDK code from the SDK Foundation Definition file."""
import sys
import argparse
import os
import shutil
import yaml
import logging
import copy
import functools
import subprocess
import re
from collections import defaultdict
import jinja2
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
SUPPORTED_FILTER_OPERATORS = ["eq", "neq", "gte", "lte", "in", "nin", "like"]
# The required parameters for a paginator
PRIVATE_PAGINATOR_PARAMETERS = {
"after": {
'_key': 'after',
'api_fieldname': 'after',
'description': 'Not supported by the API.',
'entity_fieldname': 'after',
'external_param': True,
'in': 'query',
'name': 'after',
'parameter_fieldname': 'after',
'required': False,
'type': 'string',
'python_type': 'str',
'python_field': 'StringField',
'default': None
},
"include": {
'_key': 'include',
'api_fieldname': 'include',
'description': 'Not supported by the API.',
'entity_fieldname': 'include',
'external_param': True,
'in': 'query',
'name': 'include',
'parameter_fieldname': 'include',
'required': False,
'type': 'string',
'python_type': 'str',
'python_field': 'StringField',
'default': None
},
"limit": {
'_key': 'limit',
'api_fieldname': 'limit',
'default': None,
'description': 'Not supported by the API.',
'entity_fieldname': 'limit',
'external_param': True,
'format': 'int32',
'in': 'query',
'name': 'limit',
'parameter_fieldname': 'limit',
'required': False,
'type': 'integer',
'python_type': 'int',
'python_field': 'IntegerField'
},
"order": {
'_key': 'order',
'api_fieldname': 'order',
'default': None,
'description': 'Not supported by the API.',
'entity_fieldname': 'order',
'external_param': True,
'in': 'query',
'name': 'order',
'parameter_fieldname': 'order',
'required': False,
'type': 'string',
'python_type': 'str',
'python_field': 'StringField'
},
"filter": {
'_key': 'filter',
'api_fieldname': 'filter',
'default': None,
'description': 'Optional API filter for listing resources.',
'entity_fieldname': 'filter',
'external_param': True,
'in': 'special_query',
'name': 'filter',
'parameter_fieldname': 'filter',
'required': False,
'type': 'mbed_cloud.client.api_filter.ApiFilter',
'python_type': 'mbed_cloud.client.api_filter.ApiFilter',
'python_field': 'mbed_cloud.client.api_filter.ApiFilter'
},
}
PUBLIC_PAGINATOR_PARAMETERS = {
'filter': {
'_key': 'filter',
'api_fieldname': 'filter',
'default': None,
'description': 'Filtering when listing entities is not supported by the API for this entity.',
'entity_fieldname': 'filter',
'external_param': True,
'in': 'query',
'name': 'after',
'parameter_fieldname': 'filter',
'python_field': 'mbed_cloud.client.api_filter.ApiFilter',
'python_type': 'mbed_cloud.client.api_filter.ApiFilter',
'required': False,
'type': 'mbed_cloud.client.api_filter.ApiFilter',
'_sort_order': "e",
},
'include': {
'_key': 'include',
'api_fieldname': 'include',
'default': None,
'description': 'Comma separated additional data to return.',
'entity_fieldname': 'include',
'external_param': True,
'in': 'query',
'name': 'include',
'parameter_fieldname': 'include',
'python_field': 'StringField',
'python_type': 'str',
'required': False,
'type': 'string',
'_sort_order': "a",
},
'page_size': {
'_key': 'page_size',
'api_fieldname': 'page_size',
'default': None,
'description': 'The number of results to return for each page.',
'entity_fieldname': 'page_size',
'external_param': True,
'format': 'int32',
'in': 'query',
'name': 'page_size',
'parameter_fieldname': 'page_size',
'python_field': 'IntegerField',
'python_type': 'int',
'required': False,
'type': 'integer',
'_sort_order': "c",
},
'order': {
'_key': 'order',
'api_fieldname': 'order',
'default': None,
'description': 'The order of the records based on creation time, ASC or DESC. Default value is ASC',
'entity_fieldname': 'order',
'enum': ['ASC', 'DESC'],
'external_param': True,
'in': 'query',
'name': 'order',
'parameter_fieldname': 'order',
'python_field': 'StringField',
'python_type': 'str',
'required': False,
'type': 'string',
'_sort_order': "d",
},
'max_results': {
'_key': 'max_results',
'api_fieldname': 'max_results',
'default': None,
'description': 'Total maximum number of results to retrieve',
'entity_fieldname': 'max_results',
'external_param': True,
'format': 'int32',
'in': 'query',
'name': 'max_results',
'parameter_fieldname': 'max_results',
'python_field': 'IntegerField',
'python_type': 'int',
'required': False,
'type': 'integer',
'_sort_order': "b",
},
}
# Define a sort order so that method for listing methods appear in a fixed sort order before any endpoint specific
# parameters
SORT_ORDER = {
"after": "1",
"filter": "2",
"order": "3",
"limit": "4",
"max_results": "5",
"page_size": "6",
"include": "7",
}
# Map from Swagger Types / Formats to Foundation field types
SWAGGER_FIELD_MAP = {
# Swagger types
"array": "ListField",
"boolean": "BooleanField",
"integer": "IntegerField",
"number": "FloatField",
"object": "DictField",
"file": "FileField",
"string": "StringField",
# Swagger formats (specialisation of types)
"byte": "BinaryField",
"binary": "BinaryField",
"date-time": "DateTimeField",
"date": "DateField",
# Custom filter field is stored as a dictionary
"filter": "DictField",
}
# Map from Swagger Types / Formats to native Python types
SWAGGER_TYPE_MAP = {
# Swagger types
"array": "list",
"boolean": "bool",
"integer": "int",
"number": "float",
"object": "dict",
"string": "str",
"file": "file",
# Swagger formats (specialisation of types)
"byte": "bytes",
"binary": "bytes",
"date-time": "datetime",
"date": "date",
# Custom filter field is used the standard API filter builder
"filter": "mbed_cloud.client.api_filter.ApiFilter",
}
TEXT_CONTENT_TYPE = '"text/plain"'
CSV_CONTENT_TYPE = '"text/csv"'
JPEG_CONTENT_TYPE = '"image/jpeg"'
PNG_CONTENT_TYPE = '"image/png"'
BINARY_CONTENT_TYPE = '"application/octet-stream"'
# Map from Swagger Types / Formats to multipart MIME content types
SWAGGER_CONTENT_TYPE = {
# Swagger types
"array": TEXT_CONTENT_TYPE,
"boolean": TEXT_CONTENT_TYPE,
"integer": TEXT_CONTENT_TYPE,
"number": TEXT_CONTENT_TYPE,
"object": TEXT_CONTENT_TYPE,
"string": TEXT_CONTENT_TYPE,
"file": BINARY_CONTENT_TYPE,
# Swagger formats (specialisation of types)
"byte": BINARY_CONTENT_TYPE,
"binary": BINARY_CONTENT_TYPE,
"date-time": TEXT_CONTENT_TYPE,
"date": TEXT_CONTENT_TYPE,
# Custom filter field is used the standard API filter builder
"filter": TEXT_CONTENT_TYPE,
}
def map_python_field_types(fields):
"""Add Python types and Foundation field types to definition file."""
for field in fields:
swagger_type = field.get("type")
swagger_format = field.get("format")
field["python_type"] = SWAGGER_TYPE_MAP.get(swagger_format) or SWAGGER_TYPE_MAP.get(swagger_type)
field["python_field"] = SWAGGER_FIELD_MAP.get(swagger_format) or SWAGGER_FIELD_MAP.get(swagger_type)
# The content type is required is the field is part of a multipart upload, there is some guesswork involved
# so check the contents of the description. Unfortunately broken if we have to set different MIME types for
# the same endpoint depending on the file type which is passed e.g. upload branding image
if swagger_type == "file":
file_description = field.get("description", "").lower()
if "csv" in file_description:
field["content_type"] = CSV_CONTENT_TYPE
elif "png" in file_description:
field["content_type"] = PNG_CONTENT_TYPE
elif "jpg" in file_description or "jpeg" in file_description:
field["content_type"] = JPEG_CONTENT_TYPE
else:
field["content_type"] = BINARY_CONTENT_TYPE
else:
field["content_type"] = SWAGGER_CONTENT_TYPE.get(swagger_format) or SWAGGER_CONTENT_TYPE.get(swagger_type)
# The file name is also required for the multipart upload for file fields
if field["content_type"] == BINARY_CONTENT_TYPE:
field["file_name"] = '"%s.bin"' % field["_key"]
elif field["content_type"] == CSV_CONTENT_TYPE:
field["file_name"] = '"%s.csv"' % field["_key"]
elif field["content_type"] == JPEG_CONTENT_TYPE:
field["file_name"] = '"%s.jpg"' % field["_key"]
elif field["content_type"] == PNG_CONTENT_TYPE:
field["file_name"] = '"%s.png"' % field["_key"]
else:
field["file_name"] = None
@functools.lru_cache()
def to_pascal_case(string):
"""Convert from snake_case to PascalCase
Using the standard library `title` doesn't help as it changes everything after the first letter to lowercase, we
want the following:
- API -> API
- api_key -> ApiKey
- user -> User
- paginated_response(account) -> PaginatedResponse(Account)
:param str string: String to reformat.
:returns: Reformatted string.
:rtype: str
"""
string = string.replace(" ", "_")
string = string.replace("__", "_")
string = string.replace("(", "(_")
return string and "".join(n[0].upper() + n[1:] for n in string.split("_") if n)
@functools.lru_cache()
def to_snake_case(string):
"""Converts string to snake_case
we don't use title because that forces lowercase for the word, whereas we want:
PSK -> psk
api key -> api_key
content-length -> content_length
user -> user
"""
return re.sub("[ -]", "_", string).lower()
def to_singular_name(name):
"""Convert to snake case and remove and trailing `s` if present"""
return to_snake_case(name).rstrip('s')
def sort_parg_kwarg(items):
"""Very specific sort ordering for ensuring pargs, kwargs are in the correct order"""
return sorted(items, key=lambda x: not bool(x.get('required')))
class TemplateRenderer(object):
"""Foundation Interface Template Renderer for jinja2"""
def __init__(self, output_root_dir):
"""Setup the jinja2 environment
:param str output_root_dir: Root directory in which to write the Foundation interface.
"""
self.output_root_dir = output_root_dir
self.jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR))
self.jinja_env.filters['repr'] = repr
self.jinja_env.filters['pargs_kwargs'] = sort_parg_kwarg
self.jinja_env.filters.update(dict(
repr=repr,
sort_parg_kwarg=sort_parg_kwarg,
to_snake=to_snake_case,
to_pascal=to_pascal_case,
to_singular_name=to_singular_name,
))
def render_template(self, template_filename, group="group", entity="entity", template_data=None):
"""Render one or more jinja2 templates.
The output filename is relative to the `output_root_dir` defined in the class instance but is also defined by
the `template_filename`. The `template_filename` should be interspersed with `.` to indicate subdirectories.
Two place holders are also supported in the template filename:
- `group`: which will be replaced by the `group` parameter
- `entity`: which will be replaced by the `entity` parameter
:param str template_filename: name of template to render, this also defines the output path and filename.
:param str group: This should be supplied when the template filename contains `group` .
:param str entity: This should be supplied when the template filename contains `entity`.
:param dict template_data: Data to pass to the template.
"""
template = self.jinja_env.get_template(template_filename)
# Remove template extension (we'll append .py later).
output_path = template_filename.replace(".jinja2", "")
# Covert the template filename to a path (which will be relative to the output_root_dir).
output_path = output_path.replace(".", os.path.sep)
# If `group` or `entity` exist in the path name replace them with the provided group and entity parameters
output_path = output_path.replace("group", to_snake_case(group))
output_path = output_path.replace("entity", to_snake_case(entity))
# Combine the root directory with the directory defined by the template filename
output_path = os.path.join(self.output_root_dir, output_path) + ".py"
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
logger.info("Creating subdirectory '%s'", output_dir)
os.makedirs(output_dir)
logger.info("Rendering template from '%s' to '%s'", template_filename, output_path)
rendered = template.render(template_data)
with open(output_path, "w") as output_fh:
output_fh.write(rendered)
def render_foundation_sdk(python_sdk_def_dict, output_dir):
"""Render the Foundation interface using the jinja templates
:param dict python_sdk_def_dict: SDK definitions dictionary post processed for Python
:param str output_dir: Directory in which | |
sym_t("v_out_flag" ,self.v_wei_ik.value)
self.v_out_inb = sym_t("v_out_inb" ,self.v_in_inb.value)
self.v_gemm_in = sym_t("v_gemm_in" ,vseq(1))
self.v_gemm_im = sym_t("v_gemm_im" ,vseq(1))
self.v_co_sub_m_index = sym_t("v_co_sub_m_index" ,self.v_gemm_im.value)
self.v_co_sub_n_index = sym_t("v_co_sub_n_index" ,self.v_gemm_in.value)
self.v_tmp = sym_t("v_tmp" ,vseq(6, 2))
self.v_wei_tmp_pack = sym_t("v_wei_tmp_pack" ,vseq(1) if outer.is_pad_c() else \
(self.v_gld_a.value - 1 if self.v_gld_a.value > 1 else vseq(1)))
if nk_per_thread <= 4 and IGEMM_FWD_GTC_NHWC_PACK_IN_FLAG == 0:
self.v_wei_flag = sym_t("v_wei_flag" ,self.v_tmp.value)
else:
self.v_wei_flag = sym_t("v_wei_flag" ,vseq(nk_per_thread))
total_vgpr = vseq()
self.accum_start = 0
if outer.tunable.fma_type == IGEMM_GTC_TUNABLE_FMA_TYPE_XDLOPS:
if self.mc.arch_config.arch == AMDGPU_ARCH_GFX90A:
total_vgpr = (total_vgpr + 3) // 4 * 4 # round to multiply of 4
self.accum_start = total_vgpr
total_vgpr = total_vgpr + outer.tunable.num_agpr_accumulate_c
else:
# if xdlops agpr is larger than vgpr usage, must change vgpr count to agpr
total_vgpr = max(total_vgpr, outer.tunable.num_agpr_accumulate_c)
self.v_end = sym_t("v_end" ,total_vgpr)
def get_count(self):
return self.v_end.value
def get_accum_start(self):
return self.accum_start
def emit(self):
for k, v in self.__dict__.items():
if k.startswith('v_'):
self._emit(v.declare())
class kernel_agpr_t(mc_base_t):
def __init__(self, mc, outer):
mc_base_t.__init__(self, mc)
assert outer.tunable.fma_type == IGEMM_GTC_TUNABLE_FMA_TYPE_XDLOPS, 'only xdlops can use agpr'
self.outer = outer
if outer.is_accvgpr_unified():
vgpr = outer.kernel_vgpr_t(mc, outer)
aseq = gpr_sequencer_t(vgpr.get_accum_start())
else:
aseq = gpr_sequencer_t()
self.a_c = sym_t("a_c", aseq(outer.tunable.num_agpr_accumulate_c))
self.a_end = sym_t("a_end", aseq())
def get_count(self):
return self.a_end.value
def emit(self):
for k, v in self.__dict__.items():
if k.startswith('a_'):
self._emit(v.declare())
def get_num_vgpr_global_load_a(self):
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
pack_factor = (4 // amdgpu_precision_data_byte(self.tunable.precision)) if ta_c != 1 else 1
return self.tunable.num_global_load_a // pack_factor
def get_num_vgpr_global_load_b(self):
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
pack_factor = (4 // amdgpu_precision_data_byte(self.tunable.precision)) if tb_c != 1 else 1
return self.tunable.num_global_load_b // pack_factor
def get_thread_lengths(self):
t_ta = self.tunable.tensor_a_thread_lengths
t_tb = self.tunable.tensor_b_thread_lengths
assert len(t_ta) == 4 and len(t_tb) == 4
ta_e, ta_c, ta_nb0, ta_nb1 = t_ta[0], t_ta[1], t_ta[2], t_ta[3]
tb_e, tb_c, tb_k0, tb_k1 = t_tb[0], t_tb[1], t_tb[2], t_tb[3]
if self.tunable.tensor_a_pass_through or self.tunable.tensor_b_pass_through:
pass
else:
assert ta_e == tb_e and ta_c == tb_c
if self.tunable.precision == 'fp32':
assert ta_c in (1, 2, 4), "currently c will be used as LDS store/load vector size, now only support this"
elif self.tunable.precision in ('fp16', 'bf16'):
assert ta_c in (1, 2, 4, 8, 16, 32)
elif self.tunable.precision == 'int8':
assert ta_c in (1, 2, 4, 8, 16, 32, 64)
assert ta_e == 1, "currently not support >1 in e dimension"
# it's no point to have both x0, x1 have copy value
if not self.tunable.tensor_a_pass_through:
assert not (ta_nb0 != 1 and ta_nb1 != 1)
if not self.tunable.tensor_b_pass_through:
assert not (tb_k0 != 1 and tb_k1 != 1)
return ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 # M, K, N
def get_cluster_lengths(self):
c_ta = self.tunable.tensor_a_cluster_lengths
c_tb = self.tunable.tensor_b_cluster_lengths
assert len(c_ta) == 4 and len(c_tb) == 4
ca_e, ca_c, ca_nb0, ca_nb1 = c_ta[0], c_ta[1], c_ta[2], c_ta[3]
cb_e, cb_c, cb_k0, cb_k1 = c_tb[0], c_tb[1], c_tb[2], c_tb[3]
if not self.tunable.tensor_a_pass_through:
assert ca_nb1 != 1
assert ca_e == cb_e and ca_c == cb_c
assert ca_nb0 == 1
if not self.tunable.tensor_b_pass_through:
assert cb_k0 == 1
assert ca_e == 1
return ca_nb0, ca_nb1, ca_e, ca_c, cb_e, cb_c, cb_k0, cb_k1 # M, K, N
def get_dims_lengths(self):
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
ca_nb0, ca_nb1, ca_e, ca_c, cb_e, cb_c, cb_k0, cb_k1 = self.get_cluster_lengths()
na_nb0, na_nb1, na_e, na_c = ta_nb0 * ca_nb0, ta_nb1 * ca_nb1, ta_e * ca_e, ta_c * ca_c
nb_k0, nb_k1 , nb_e, nb_c = tb_k0 * cb_k0, tb_k1 * cb_k1, tb_e * cb_e, tb_c * cb_c
return na_nb0, na_nb1, na_e, na_c, nb_e, nb_c, nb_k0, nb_k1 # M, K, N
def get_thread_copy_dims(self):
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
in_thread_copy_dims = [ta_nb0, ta_nb1, ta_e, ta_c]
wei_thread_copy_dims = [tb_k0, tb_k1, tb_e, tb_c] # always reordered!
return in_thread_copy_dims, wei_thread_copy_dims
def get_thread_copy_index(self):
in_thread_copy_dims, wei_thread_copy_dims = self.get_thread_copy_dims()
in_thread_copy_index = _find_non_1_index_in_list(in_thread_copy_dims)
wei_thread_copy_index = _find_non_1_index_in_list(wei_thread_copy_dims)
'''
if thread lengths both dimension is 1, means every thread only copy one pixel.
we need support this also
'''
return in_thread_copy_index, wei_thread_copy_index
def get_k_pack(self):
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
data_byte = amdgpu_precision_data_byte(self.tunable.precision)
if (not self.tunable.tensor_a_pass_through and not self.tunable.tensor_b_pass_through) or \
(self.tunable.tensor_a_pass_through and self.tunable.tensor_b_pass_through):
assert ta_c == tb_c
return tb_c
else:
if self.tunable.tensor_a_pass_through:
assert ta_c % tb_c == 0
return utility_gcd(ta_c, 4 * (4 // data_byte)) if ta_c != 1 else 1
else:
assert tb_c % ta_c == 0
return utility_gcd(tb_c, 4 * (4 // data_byte)) if tb_c != 1 else 1
def is_pad_c(self):
'''
NHWC implementation always want to vector load c, but we can still pad c(like 3) to a good number
another assumption would be write out. in fp32 we prefer non vector store, so no problem
but in fp16 we prefer vector store. hence another assumption would be, if this function is true
then fp16 no longer use vector store.
this is also true for int8
update:
in merge_e config, is_pad_c actually means is_pad_gemm_k
'''
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
if ta_c == 1 and tb_c == 1:
assert self.tunable.vector_store == 0
return True
return False
def get_macro_global_load(self):
'''
NOTICE: input/wei always load gemm_k (e*c) first. indeed always load c, and do vector load if possible
'''
inline = True if self.tunable.fma_interleave else False
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
na_nb0, na_nb1, na_e, na_c, nb_e, nb_c, nb_k0, nb_k1 = self.get_dims_lengths()
in_thread_copy_dims, wei_thread_copy_dims = self.get_thread_copy_dims()
in_thread_copy_index, wei_thread_copy_index = self.get_thread_copy_index()
ctrl_wei_gld = ctrl_2d_global_load_t()
ctrl_in_gld = ctrl_2d_global_load_t()
data_byte = amdgpu_precision_data_byte(self.tunable.precision)
ctrl_wei_gld.precision = self.tunable.precision
ctrl_in_gld.precision = self.tunable.precision
ctrl_wei_gld.vector_d1 = utility_gcd(tb_c, 4 * (4 // data_byte)) if tb_c != 1 else 1
ctrl_in_gld.vector_d1 = utility_gcd(ta_c, 4 * (4 // data_byte)) if ta_c != 1 else 1
if self.tunable.tensor_b_pass_through:
ctrl_wei_gld.precache_ptn = GLOBAL_PTN_D0_S | GLOBAL_PTN_D1_S
ctrl_wei_gld.flag_on_d0 = 1
ctrl_wei_gld.length_d0 = tb_k0 if tb_k0 != 1 else tb_k1
ctrl_wei_gld.length_d1 = tb_c
ctrl_wei_gld.vector_d1 = self.get_k_pack()
# ctrl_wei_gld.flag_merge_v = 0 if self.tunable.tensor_b_pass_through_interleave_gld else 1
else:
ctrl_wei_gld.precache_ptn = GLOBAL_PTN_D0_S | GLOBAL_PTN_D1_S
if self.wei_thread_copy_ndim == 2:
ctrl_wei_gld.flag_on_d0 = 1
ctrl_wei_gld.length_d0 = wei_thread_copy_dims[wei_thread_copy_index[0]]
ctrl_wei_gld.length_d1 = wei_thread_copy_dims[wei_thread_copy_index[1]]
elif self.wei_thread_copy_ndim == 1:
if tb_k0 * tb_k1 != 1:
ctrl_wei_gld.flag_on_d0 = 0
ctrl_wei_gld.flag_on_d1 = 1
else:
ctrl_wei_gld.flag_on_d0 = 1
ctrl_wei_gld.flag_on_d1 = 0
ctrl_wei_gld.length_d0 = 1
ctrl_wei_gld.length_d1 = wei_thread_copy_dims[wei_thread_copy_index[0]]
else:
ctrl_wei_gld.length_d0 = 1
ctrl_wei_gld.flag_on_d1 = 1
ctrl_wei_gld.length_d1 = wei_thread_copy_dims[-1]
if self.tunable.tensor_a_pass_through:
#ctrl_in_gld.length_d0 = ta_c // self.get_k_pack()
#ctrl_in_gld.length_d1 = (ta_nb0 if ta_nb0 != 1 else ta_nb1) * self.get_k_pack()
#ctrl_in_gld.vector_d1 = self.get_k_pack()
#ctrl_in_gld.flag_merge_v = 0 if self.tunable.tensor_a_pass_through_interleave_gld else 1
ctrl_in_gld.length_d0 = ta_nb0 if ta_nb0 != 1 else ta_nb1
ctrl_in_gld.length_d1 = ta_c
ctrl_in_gld.vector_d1 = self.get_k_pack()
assert not self.tunable.tensor_a_pass_through_interleave_gld, "NHWC always not interleave, this may reduce performance"
# ctrl_in_gld.flag_merge_v = 1
ctrl_in_gld.precache_ptn = GLOBAL_PTN_D0_V | GLOBAL_PTN_D1_K
ctrl_in_gld.flag_on_d0 = 1
else:
# ctrl_in_gld.vector_d1 = self.get_k_pack()
if self.in_thread_copy_ndim == 2:
ctrl_in_gld.flag_on_d0 = 1
ctrl_in_gld.precache_ptn = GLOBAL_PTN_D0_V | GLOBAL_PTN_D1_K
ctrl_in_gld.length_d0 = in_thread_copy_dims[in_thread_copy_index[0]]
ctrl_in_gld.length_d1 = in_thread_copy_dims[in_thread_copy_index[1]]
elif self.in_thread_copy_ndim == 1:
if ta_nb0 * ta_nb1 != 1:
ctrl_in_gld.precache_ptn = GLOBAL_PTN_D0_K | GLOBAL_PTN_D1_V
ctrl_in_gld.flag_on_d1 = 1
else:
ctrl_in_gld.precache_ptn = GLOBAL_PTN_D0_V | GLOBAL_PTN_D1_K
ctrl_in_gld.flag_on_d0 = 1
ctrl_in_gld.length_d0 = 1
ctrl_in_gld.length_d1 = in_thread_copy_dims[in_thread_copy_index[0]]
else:
ctrl_in_gld.length_d0 = 1
ctrl_in_gld.length_d1 = in_thread_copy_dims[-1]
ctrl_in_gld.use_flag = 1
ctrl_wei_gld.use_flag = 1
if self.tunable.nxe != 0:
if IGEMM_FWD_GTC_NHWC_PACK_IN_FLAG:
ctrl_wei_gld.bfe_flag = 1
ctrl_in_gld.bfe_flag = 1
if self.tunable.precache_soffset:
return macro_igemm_2d_global_load_precache_offset_t(self.mc, ctrl_wei_gld, inline), \
macro_igemm_2d_global_load_precache_offset_t(self.mc, ctrl_in_gld, inline)
else:
return macro_igemm_2d_global_load_t(self.mc, ctrl_wei_gld, inline), macro_igemm_2d_global_load_precache_voffset_t(self.mc, ctrl_in_gld, inline)
def get_macro_shared_store(self):
#in_thread_copy_dims, wei_thread_copy_dims = self.get_thread_copy_dims()
#in_thread_copy_index, wei_thread_copy_index = self.get_thread_copy_index()
na_nb0, na_nb1, na_e, na_c, nb_e, nb_c, nb_k0, nb_k1 = self.get_dims_lengths()
ta_nb0, ta_nb1, ta_e, ta_c, tb_e, tb_c, tb_k0, tb_k1 = self.get_thread_lengths()
data_byte = amdgpu_precision_data_byte(self.tunable.precision)
k_pack = self.get_k_pack()
k_pack_lanegroup = self.xdlops_mapping.ctrl.lanegroup_k_per_thread()
k_pack_src_mat = k_pack if k_pack != 1 else k_pack_lanegroup
m_wei_2d_global_load, m_in_2d_global_load = self.get_macro_global_load()
k_pack_gld_a = m_in_2d_global_load.ctrl.vector_d1
k_pack_gld_b = m_wei_2d_global_load.ctrl.vector_d1
if not self.tunable.tensor_a_pass_through:
# input is gemm_k * gemm_m * k_pack
in_sst_ctrl = ctrl_3d_shared_store_t()
in_sst_ctrl.precision = self.tunable.precision
in_sst_ctrl.length_d0 = ta_nb0
in_sst_ctrl.length_d1 = ta_nb1
in_sst_ctrl.length_dp = ta_c
in_sst_ctrl.vector_dp = k_pack_gld_a
in_sst_ctrl.stride_d0 = na_nb1 * k_pack_src_mat * data_byte
in_sst_ctrl.stride_d1 = k_pack_src_mat * data_byte
if not self.tunable.tensor_b_pass_through:
# wei is gemm_k * gemm_n * k_pack
wei_sst_ctrl = ctrl_3d_shared_store_t()
wei_sst_ctrl.precision = self.tunable.precision
wei_sst_ctrl.length_d0 = tb_k0
wei_sst_ctrl.length_d1 = tb_k1
wei_sst_ctrl.length_dp = | |
from os import getcwd, listdir
from sys import path
from time import sleep, time
from json import loads
from PyQt5.QtCore import pyqtSignal, QObject
from foo.pictureR import pictureFind
from foo.pictureR import bootyCount
from foo.win import toast
from common import schedule_data
from common2 import adb
class BattleSchedule(QObject):
errorSignal = pyqtSignal(str)
def __init__(self, cwd, ico):
super(BattleSchedule, self).__init__()
self.cwd = cwd
self.ico = ico
self.switch = False
self.switchB = False
self.autoRecMed = False
self.autoRecStone = False
self.isWaitingUser = False
self.isRecovered = False
self.stoneMaxNum = 0
self.BootyDetect = bootyCount.Booty(self.cwd)
self.imgInit()
def recChange(self, num, inputData):
if num == 0:
self.autoRecMed = inputData
elif num == 1:
self.autoRecStone = inputData
elif num == 2:
self.stoneMaxNum = inputData
def imgInit(self):
self.recMed = pictureFind.picRead(self.cwd + "/res/panel/recovery/medicament.png")
self.recStone = pictureFind.picRead(self.cwd + "/res/panel/recovery/stone.png")
self.confirm = pictureFind.picRead(self.cwd + "/res/panel/recovery/confirm.png")
self.sign = pictureFind.picRead(self.cwd + "/res/panel/level/sign.png")
#self.exPos = {'ex1':(220,280),'ex2':(845,580),'ex3':(1230,340)}
#self.screenShot = self.cwd + '/bin/adb/arktemp.png'
self.act = self.cwd + "/res/panel/other/act.png"
self.battle = self.cwd + "/res/panel/other/battle.png"
self.home = self.cwd + "/res/panel/other/home.png"
self.visitNext = self.cwd + "/res/panel/other/visitNext.png"
self.listBattleImg = pictureFind.picRead([self.cwd + "/res/battle/" + i for i in listdir(self.cwd + "/res/battle")])
self.startA = pictureFind.picRead(self.cwd + "/res/battle/startApart.png")
self.startB = pictureFind.picRead(self.cwd + "/res/battle/startBpart.png")
self.autoOff = pictureFind.picRead(self.cwd + "/res/panel/other/autoOff.png")
self.autoOn = pictureFind.picRead(self.cwd + "/res/panel/other/autoOn.png")
self.II = {'MAIN':self.cwd + "/res/panel/level/I/main.png", 'EX':self.cwd + "/res/panel/level/I/exterminate.png",\
'RS':self.cwd + "/res/panel/level/I/resource.png", 'PR':self.cwd + "/res/panel/level/I/chip.png"}
self.III = {'A':self.cwd + "/res/panel/level/II/A.png", 'B':self.cwd + "/res/panel/level/II/B.png",\
'C':self.cwd + "/res/panel/level/II/C.png", 'D':self.cwd + "/res/panel/level/II/D.png",\
'AP':self.cwd + "/res/panel/level/II/AP.png", 'CA':self.cwd + "/res/panel/level/II/CA.png",\
'CE':self.cwd + "/res/panel/level/II/CE.png", 'SK':self.cwd + "/res/panel/level/II/SK.png",\
'LS':self.cwd + "/res/panel/level/II/LS.png", \
'0':self.cwd + "/res/panel/level/II/ep0.png", '1':self.cwd + "/res/panel/level/II/ep1.png",\
'2':self.cwd + "/res/panel/level/II/ep2.png", '3':self.cwd + "/res/panel/level/II/ep3.png",\
'4':self.cwd + "/res/panel/level/II/ep4.png", '5':self.cwd + "/res/panel/level/II/ep5.png",\
'6':self.cwd + "/res/panel/level/II/ep6.png", '7':self.cwd + "/res/panel/level/II/ep7.png",\
'8':self.cwd + "/res/panel/level/II/ep8.png",\
'ex': self.cwd + "/res/panel/level/II/EX.png"}
self.exIV = {'ex1':self.cwd + "/res/panel/level/III/e01.png",'ex2':self.cwd + "/res/panel/level/III/e02.png", 'ex3':self.cwd + "/res/panel/level/III/e03.png",\
'ex4':self.cwd + "/res/panel/level/III/e04.png", 'exSwitch':self.cwd + "/res/panel/level/III/exSwitch.png"}
self.exSymbol = self.cwd + "/res/panel/other/exSymbol.png"
def goLevel(self, level):
tempCount = 0
part = level['part']
chap = level['chap']
objLevel = level['objLevel']
#前往一级菜单
while self.switch:
picTh = pictureFind.matchImg(adb.getScreen_std(), self.sign)
if picTh != None:
break
picAct = pictureFind.matchImg(adb.getScreen_std(), self.act)
if picAct != None:
posAct = picAct['result']
adb.click(posAct[0], posAct[1])
else:
picHome = pictureFind.matchImg(adb.getScreen_std(), self.home)
if picHome != None:
posHome = picHome['result']
adb.click(posHome[0], posHome[1])
picBattle = pictureFind.matchImg(adb.getScreen_std(), self.battle)
if picBattle != None:
posBattle = picBattle['result']
adb.click(posBattle[0], posBattle[1])
else:
continue
else:
tempCount += 1
if tempCount > 5:
print('unable to init')
return False
else:
continue
#二级菜单的选择
if part == 'MAIN':
adb.click(305,750)
elif part == 'EX':
adb.click(1125,750)
elif part == 'RS' or part == 'PR':
adb.click(920,750)
else:
return False
sleep(1)
#三级菜单的选择
#主线MIAN,物资RS,芯片PR
if not self.chooseChap(chap):
return False
#关卡选择
if part == 'EX':
for i in range(5):
picEx = pictureFind.matchImg(adb.getScreen_std(), self.exSymbol)
if picEx != None:
break
else:
return False
for i in range(5):
adb.click(720, 405) #存在不小心点开剿灭关卡无法切换关卡的可能性
sleep(1)
picExChap = pictureFind.matchImg(adb.getScreen_std(), self.exIV["exSwitch"])
if picExChap != None:
adb.click(picExChap['result'][0], picExChap['result'][1])
sleep(0.5)
break
else:
return False
for i in range(5):
screenshot = adb.getScreen_std()
picLevelOn = pictureFind.matchImg(screenshot,self.startA)
if picLevelOn != None:
return True
picExObj = pictureFind.matchImg(screenshot, self.exIV[objLevel])
if picExObj != None:
if objLevel == 'ex4':
adb.click(picExObj['result'][0], picExObj['result'][1] + 80)
else:
adb.click(picExObj['result'][0], picExObj['result'][1])
return True
else:
return False
else:
adb.speedToLeft()
for i in range(25):
if not self.switch:
break
levelOnScreen = pictureFind.levelOcr(adb.getScreen_std())
if levelOnScreen != None:
if objLevel in levelOnScreen:
adb.click(levelOnScreen[objLevel][0],levelOnScreen[objLevel][1])
picLevelOn = pictureFind.matchImg(adb.getScreen_std(),self.startA)
if picLevelOn != None:
return True
else:
adb.onePageRight()
else:
print(f'skip {objLevel}')
return False
else:
return False
def backToOneLayer(self, layerMark):
'回到某一层'
startTime = time()
while pictureFind.matchImg(adb.getScreen_std(), layerMark, confidencevalue = 0.7) is None:
if not self.switch:
break
adb.click(100, 50)
if time() - startTime > 30:
return -1
return 0
def chooseChap(self,chap):
if chap == 'external' or chap == 'tempE':
picChap = pictureFind.matchImg(adb.getScreen_std(), self.III['ex'])
if picChap != None:
adb.click(picChap['result'][0], picChap['result'][1])
self.backToOneLayer(self.III['ex'])
adb.click(picChap['result'][0], picChap['result'][1])
return True
elif chap.isdigit():
#主线
nowChap = -1
if int(chap) <= 3:
adb.click(165, 160)
elif int(chap) <= 8:
adb.click(165, 595)
for eachChap in range(0, 9): #0-8章
picChap = pictureFind.matchImg(adb.getScreen_std(), self.III[str(eachChap)])
if picChap != None:
nowChap = eachChap
break
if nowChap < 0:
adb.mainToLeft()
else:
if int(chap) == nowChap:
adb.click(1050, 400)
return True
elif int(chap) > nowChap:
for i in range(10):
if not self.switch:
break
picChap = pictureFind.matchImg(adb.getScreen_std(), self.III[chap])
if not self.switch:
break
elif picChap == None:
adb.mainToNextChap()
else:
adb.click(1050, 400)
return True
elif int(chap) < nowChap:
for i in range(10):
if not self.switch:
break
picChap = pictureFind.matchImg(adb.getScreen_std(), self.III[chap])
if not self.switch:
break
elif picChap == None:
adb.mainToPreChap()
else:
adb.click(1050, 400)
return True
else:
#各类资源
adb.swipe(1050, 400, 1440, 400, 200) #左滑,避免关卡全开的情况
for i in range(20):
if not self.switch:
break
picChap = pictureFind.matchImg(adb.getScreen_std(), self.III[chap])
if not self.switch:
break
elif picChap == None:
adb.onePageRight()
else:
adb.click(picChap['result'][0],picChap['result'][1])
return True
return False
def runTimes(self, times = 1):
bootyName = None
if isinstance(times, dict):
bootyMode = True
bootyName = times['bootyName']
times = int(times['bootyNum'])
else:
bootyMode = False
times = int(times)
isInBattle = False
countStep = 0
totalCount = 0
bootyTotalCount = 0
errorCount = 0
sleepTime = None
isFirstWait = False
while self.switch and self.switchB:
screenshot = adb.getScreen_std()
#判断代理指挥是否勾选
picAutoOn = pictureFind.matchImg(screenshot, self.autoOn)
if picAutoOn == None and self.switch and self.switchB:
picAutoOff = pictureFind.matchImg(screenshot, self.autoOff)
if picAutoOff != None and self.switch and self.switchB:
posAutoOff = picAutoOff['result']
adb.click(posAutoOff[0], posAutoOff[1])
continue
#sleep(1)
for eachObj in self.listBattleImg:
if self.switch and self.switchB:
confidence = adb.getTagConfidence()
picInfo = pictureFind.matchImg(screenshot, eachObj, confidence)
#print(eachObj+ ':', picInfo)
if picInfo != None:
if 'startApart' in picInfo['obj']:
BInfo = pictureFind.matchImg(screenshot, self.startB, confidence)
#避免是因为匹配到了队伍配置界面低栏上的行动二字
if BInfo != None:
picInfo = BInfo
if picInfo['result'][1] < 270:
continue
if picInfo['obj'] == "error.png" or picInfo['obj'] == "giveup.png":
errorCount += 1
if errorCount > 2:
self.errorSignal.emit('schedule')
sleep(1)
while self.isWaitingUser:
sleep(5)
if not self.isRecovered:
self.switch = False
self.switchB = False
self.isRecovered = False
break
else:
errorCount = 0
if picInfo['obj'] == "startBpart.png":
isInBattle = True
isFirstWait = True
startTime = time()
else:
if sleepTime == None and isInBattle:
sleepTime = int(time() - startTime)
isInBattle = False
picPos = picInfo['result']
if countStep == 0:
if picInfo['obj'] == 'startBpart.png':
countStep += 1
elif countStep == 1:
if picInfo['obj'] == 'endNormal.png':
countStep += 1
if bootyMode:
lastPic = None
for i in range(10):
nowPic = nowPic = adb.getScreen_std()
if lastPic is not None:
if pictureFind.matchImg(lastPic, nowPic, confidencevalue=0.99) != None:
break
lastPic = nowPic
sleep(1)
bootyTotalCount += self.BootyDetect.bootyCheck(bootyName, nowPic)
print(f'{bootyName} 应获得:{times} 实获得:{bootyTotalCount}')
elif countStep == 2:
if picInfo['obj'] == 'startApart.png':
countStep += 1
if countStep == 3:
countStep =0
totalCount += 1
if (totalCount == times) and (not bootyMode):
self.switchB = False
return True
if (bootyTotalCount >= times) and bootyMode:
adb.click(picPos[0], picPos[1], isSleep = True)
self.switchB = False
return True
if picInfo['obj'] == "cancel.png":
if self.autoRecMed or self.autoRecStone:
screenshot = adb.getScreen_std()
medInfo = pictureFind.matchImg(screenshot, self.recMed)
stoneInfo = pictureFind.matchImg(screenshot, self.recStone)
confirmInfo = pictureFind.matchImg(screenshot, self.confirm)
if (not self.autoRecMed) and (self.autoRecStone):
if medInfo != None and stoneInfo == None:
adb.click(medInfo['result'][0]+350, medInfo['result'][1], isSleep= True)
screenshot = adb.getScreen_std()
medInfo = pictureFind.matchImg(screenshot, self.recMed)
stoneInfo = pictureFind.matchImg(screenshot, self.recStone)
if medInfo == None and stoneInfo != None:
if self.restStone >0:
adb.click(confirmInfo['result'][0], confirmInfo['result'][1], isSleep= True)
self.restStone -= 1
break
elif medInfo == None and stoneInfo != None:
if self.restStone >0:
adb.click(confirmInfo['result'][0], confirmInfo['result'][1], isSleep= True)
self.restStone -= 1
break
adb.click(picPos[0], picPos[1], isSleep = True)
self.switch = False
self.switchB = False
toast.broadcastMsg("ArkHelper", "理智耗尽", self.ico)
return False
else:
if self.autoRecMed:
if medInfo != None:
adb.click(confirmInfo['result'][0], confirmInfo['result'][1], isSleep= True)
break
if self.autoRecStone:
if stoneInfo != None:
if self.restStone >0:
adb.click(confirmInfo['result'][0], confirmInfo['result'][1], isSleep= True)
self.restStone -= 1
break
adb.click(picPos[0], picPos[1], isSleep = True)
self.switch = False
self.switchB = False
toast.broadcastMsg("ArkHelper", "理智耗尽", self.ico)
return False
else:
adb.click(picPos[0], picPos[1], isSleep = True)
self.switch = False
self.switchB = False
toast.broadcastMsg("ArkHelper", "理智耗尽", self.ico)
return False
elif picInfo['obj'] == "stoneLack.png":
adb.click(picPos[0], picPos[1], isSleep = True)
self.switch = False
self.switchB = False
toast.broadcastMsg("ArkHelper", "理智耗尽", self.ico)
return False
elif picInfo['obj'] == 'levelup.png':
lackTem = False
for eachTem in self.listBattleImg:
if eachTem['obj'] == 'stoneLack.png':
lackTem = eachTem
break
if lackTem:
picLackInfo = pictureFind.matchImg(screenshot, lackTem, 0.9)
if picLackInfo:
adb.click(picLackInfo['result'][0], picLackInfo['result'][1], isSleep = True)
self.switch = False
toast.broadcastMsg("ArkHelper", "理智耗尽", self.ico)
else:
adb.click(picPos[0], picPos[1], isSleep = True)
if picInfo['obj'] == 'startApartOF.png':
OFend = pictureFind.matchImg(adb.getScreen_std(), self.cwd + '/res/act/OFend.png', 0.8)
if OFend != None:
self.switch = False
toast.broadcastMsg("ArkHelper", | |
" "), justify='center',
fill=POICOLOR, tag="#POI")
def unselect_allpoint(self):
""" Calling process that remove additionnal highlight on all selected nodes. """
FillMapWithNodes(self).node_selection_inactiveall()
def delete_point(self, n):
""" KnownPoint deletion process. """
FillMapWithNodes(self).delete_point(n.rsplit(' (')[0])
def onclickleft(self, event):
""" Left Mouse Click bind on the World map. """
global HOST, node_file, node_list
menu0 = Menu(self, tearoff=0, fg="black", bg=BGC, font='TkFixedFont 7') # node overlap list menu
menu1 = Menu(self, tearoff=0, fg="black", bg=BGC, font='TkFixedFont 7')
# search for overlapping nodes
overlap_range = ICONSIZE * 4
overlap_rect = (self.canvas.canvasx(event.x) - overlap_range), (self.canvas.canvasy(event.y) - overlap_range), (
self.canvas.canvasx(event.x) + overlap_range), (self.canvas.canvasy(event.y) + overlap_range)
node_overlap_match = self.canvas.find_enclosed(*overlap_rect)
overlap_list = []
for item_o in list(node_overlap_match):
if "$#" not in self.canvas.gettags(self.canvas.find_withtag(item_o))[0]:
overlap_list.append(item_o)
if len(node_overlap_match) > 1 and len(overlap_list) != 1: # node icon overlap found, displays menu0
for el1, el2 in enumerate(node_overlap_match):
if "$#" not in str(self.canvas.gettags(el2)): # dont display node highlight tags
HOST = self.canvas.gettags(self.canvas.find_withtag(el2))[0]
# mykeys = ['url', 'id', 'lat', 'lon', 'snr']
# n_field 0 1 2 3 4
n_field = HOST.rsplit("$", 4)
cbg = FillMapWithNodes.color_variant(snr=n_field[4])
dfg = FillMapWithNodes.get_font_color(cbg)
# check if node is already in the TDoA node listing
if len([el for el in fulllist if n_field[1] == el.rsplit("$", 3)[2]]) != 1:
name = n_field[1]
else:
name = "✔ " + n_field[1]
menu0.add_command(label=name, background=cbg, foreground=dfg,
command=lambda x=HOST: self.create_node_menu(x, event.x_root, event.y_root,
menu1))
else:
pass
menu0.tk_popup(event.x_root, event.y_root)
else:
HOST = self.canvas.gettags(self.canvas.find_withtag(CURRENT))[0]
self.create_node_menu(HOST, event.x_root, event.y_root, menu1)
def create_node_menu(self, kiwinodetag, popx, popy, menu):
n_field = kiwinodetag.rsplit("$", 5)
matches = [el for el in fulllist if n_field[1] == el.rsplit("$", 3)[2]]
cbg = FillMapWithNodes.color_variant(snr=n_field[4])
dfg = FillMapWithNodes.get_font_color(cbg)
# show IQ spectrogram in GUI (PlotIQ mode 0)
PlotIQ(node_file[node_list.index(n_field[1].replace("/", ""))], 0, 0).run()
if len(matches) != 1:
menu.add_command(label="Add " + n_field[1] + " for TDoA process", background=cbg, foreground=dfg,
font="TkFixedFont 7 bold", command=lambda *args: self.populate("add", n_field))
elif len(matches) == 1:
menu.add_command(label="Remove " + n_field[1] + " from TDoA process]", background=cbg, foreground=dfg,
font="TkFixedFont 7 bold", command=lambda: self.populate("del", n_field))
menu.tk_popup(int(popx), int(popy)) # popup placement // node icon
def populate(self, action, sel_node_tag):
""" TDoA listing node populate/depopulate process. """
if action == "add":
if len(fulllist) < 6:
fulllist.append(
sel_node_tag[0].rsplit(':')[0] + "$" + sel_node_tag[0].rsplit(':')[1] + "$" + sel_node_tag[
1].replace("/", ""))
FillMapWithNodes(self).node_sel_active(sel_node_tag[0])
else:
tkMessageBox.showinfo(title=" ¯\\_(ツ)_/¯", message="6 nodes Maximum !")
elif action == "del":
fulllist.remove(
sel_node_tag[0].rsplit(':')[0] + "$" + sel_node_tag[0].rsplit(':')[1] + "$" + sel_node_tag[1].replace(
"/", ""))
FillMapWithNodes(self).node_selection_inactive(sel_node_tag[0])
if fulllist:
APP.title(VERSION + "| " + FREQUENCY + ALEID + " - Selected nodes [" + str(
len(fulllist)) + "] : " + '/'.join(str(p).rsplit('$')[2] for p in fulllist))
else:
APP.title(VERSION + "| " + FREQUENCY + ALEID)
def move_from(self, event):
""" Move from. """
self.canvas.scan_mark(event.x, event.y)
def move_to(self, event):
""" Move to. """
if 'HOST' in globals() and "current" not in self.canvas.gettags(self.canvas.find_withtag(CURRENT))[0]:
pass
elif "current" in self.canvas.gettags(self.canvas.find_withtag(CURRENT))[0]:
self.canvas.scan_dragto(event.x, event.y, gain=1)
self.show_image() # redraw the image
def wheel(self, event):
""" Routine for mouse wheel actions. """
x_eve = self.canvas.canvasx(event.x)
y_eve = self.canvas.canvasy(event.y)
global image_scale
bbox = self.canvas.bbox(self.container) # get image area
if bbox[0] < x_eve < bbox[2] and bbox[1] < y_eve < bbox[3]:
pass # Ok! Inside the image
else:
return # zoom only inside image area
scale = 1.0
# Respond to Linux (event.num) or Windows (event.delta) wheel event
if event.num == 5 or event.delta == -120: # scroll down
i = min(self.width, self.height)
if int(i * self.imscale) < 2000:
return # block zoom if image is less than 2000 pixels
self.imscale /= self.delta
scale /= self.delta
if event.num == 4 or event.delta == 120: # scroll up
i = min(self.canvas.winfo_width(), self.canvas.winfo_height())
if i < self.imscale:
return # 1 pixel is bigger than the visible area
self.imscale *= self.delta
scale *= self.delta
# rescale all canvas objects
# scale = 2.0 or 0.5
image_scale = self.imscale
# APP.gui.label04.configure(text="Map Zoom : " + str(int(image_scale)))
self.canvas.scale('all', x_eve, y_eve, scale, scale)
# self.canvas.scale('')
self.show_image()
def show_image(self, event=None):
""" Creating the canvas with the picture. """
global b_box2
b_box1 = self.canvas.bbox(self.container) # get image area
# Remove 1 pixel shift at the sides of the bbox1
b_box1 = (b_box1[0] + 1, b_box1[1] + 1, b_box1[2] - 1, b_box1[3] - 1)
b_box2 = (self.canvas.canvasx(0), # get visible area of the canvas
self.canvas.canvasy(0),
self.canvas.canvasx(self.canvas.winfo_width()),
self.canvas.canvasy(self.canvas.winfo_height()))
bbox = [min(b_box1[0], b_box2[0]), min(b_box1[1], b_box2[1]), # get scroll region box
max(b_box1[2], b_box2[2]), max(b_box1[3], b_box2[3])]
if bbox[0] == b_box2[0] and bbox[2] == b_box2[2]: # whole image in the visible area
bbox[0] = b_box1[0]
bbox[2] = b_box1[2]
if bbox[1] == b_box2[1] and bbox[3] == b_box2[3]: # whole image in the visible area
bbox[1] = b_box1[1]
bbox[3] = b_box1[3]
self.canvas.configure(scrollregion=bbox) # set scroll region
x_1 = max(b_box2[0] - b_box1[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile
y_1 = max(b_box2[1] - b_box1[1], 0)
x_2 = min(b_box2[2], b_box1[2]) - b_box1[0]
y_2 = min(b_box2[3], b_box1[3]) - b_box1[1]
if int(x_2 - x_1) > 0 and int(y_2 - y_1) > 0: # show image if it in the visible area
x = min(int(x_2 / self.imscale), self.width) # sometimes it is larger on 1 pixel...
y = min(int(y_2 / self.imscale), self.height) # ...and sometimes not
image = self.image.crop((int(x_1 / self.imscale), int(y_1 / self.imscale), x, y))
imagetk = ImageTk.PhotoImage(image.resize((int(x_2 - x_1), int(y_2 - y_1))))
imageid = self.canvas.create_image(max(b_box2[0], b_box1[0]), max(b_box2[1], b_box1[1]),
anchor='nw', image=imagetk)
self.canvas.lower(imageid) # set image into background
self.canvas.imagetk = imagetk # keep an extra reference to prevent garbage-collection
class MainWindow(Frame):
""" GUI design definitions. """
def __init__(self, parent):
Frame.__init__(self, parent)
self.member1 = GuiCanvas(parent)
ReadKnownPointFile().start()
global image_scale, node_file
global map_preset, tdoa_in_progress, open_pdf
global lat_min_map, lat_max_map, lon_min_map, lon_max_map
dfgc = '#a3a3a3' # GUI (disabled) foreground color
image_scale = 1
la_f = Font(family="TkFixedFont", size=7, weight="bold")
map_preset = 0
tdoa_in_progress = 0
open_pdf = IntVar(self, value=1)
# Control panel background
self.label0 = Label(parent)
self.label0.place(relx=0, rely=0.64, relheight=0.4, relwidth=1)
self.label0.configure(bg=BGC, fg=FGC, width=214)
# Compute button
self.compute_button = Button(parent)
self.compute_button.place(relx=0.61, rely=0.65, height=64, relwidth=0.115)
self.compute_button.configure(activebackground="#d9d9d9", activeforeground="#000000", bg='#d9d9d9',
disabledforeground=dfgc, fg="#000000", highlightbackground="#d9d9d9",
highlightcolor="#000000", pady="0", text="Compute",
command=self.start_stop_tdoa)
# Trim_iq button
self.trim_iq_button = Button(parent)
self.trim_iq_button.place(relx=0.61, rely=0.75, height=24, relwidth=0.115)
self.trim_iq_button.configure(activebackground="#d9d9d9", activeforeground="#000000", bg="lightblue",
disabledforeground=dfgc, fg="#000000", highlightbackground="#d9d9d9",
highlightcolor="#000000", pady="0", text="Run trim_iq.py",
command=TrimIQ(os.getcwd()).start, state="normal")
# Purge node listing button
self.purge_button = Button(parent)
self.purge_button.place(relx=0.61, rely=0.8, height=24, relwidth=0.115)
self.purge_button.configure(activebackground="#d9d9d9", activeforeground="#000000", bg="orange",
disabledforeground=dfgc, fg="#000000", highlightbackground="#d9d9d9",
highlightcolor="#000000", pady="0", text="Purge Nodes", command=self.purgenode,
state="normal")
# Restart button
self.restart_button = Button(parent)
self.restart_button.place(relx=0.61, rely=0.85, height=24, relwidth=0.115)
self.restart_button.configure(activebackground="#d9d9d9", activeforeground="#000000", bg="red",
disabledforeground=dfgc, fg="#000000", highlightbackground="#d9d9d9",
highlightcolor="#000000", pady="0", text="Restart GUI", command=Restart().run,
state="normal")
# Auto open TDoA PDF result file
self.open_pdf_checkbox = Checkbutton(parent)
self.open_pdf_checkbox.place(relx=0.62, rely=0.9, height=21, relwidth=0.11)
self.open_pdf_checkbox.configure(bg=BGC, fg=FGC, activebackground=BGC, activeforeground=FGC,
font="TkFixedFont 8", width=214, selectcolor=BGC, text="auto-open result",
anchor="w", variable=open_pdf, command=None)
# Known places search textbox
self.choice = Entry(parent)
self.choice.place(relx=0.01, rely=0.95, height=21, relwidth=0.18)
self.choice.insert(0, "TDoA map city/site search here")
self.listbox = Listbox(parent)
self.listbox.place(relx=0.2, rely=0.95, height=21, relwidth=0.3)
# Known places found text label
self.label3 = Label(parent)
self.label3.place(relx=0.54, rely=0.95, height=21, relwidth=0.3)
self.label3.configure(bg=BGC, font="TkFixedFont", fg=FGC, width=214, text="", anchor="w")
# Console window
self.console_window = Text(parent)
self.console_window.place(relx=0.005, rely=0.65, relheight=0.285, relwidth=0.6)
self.console_window.configure(bg=CONS_B, font="TkTextFont", fg=CONS_F, highlightbackground=BGC,
highlightcolor=FGC, insertbackground=FGC, selectbackground="#c4c4c4",
selectforeground=FGC, undo="1", width=970, wrap="word")
# plot IQ preview window
self.plot_iq_button = Button(parent, command=lambda: APP.gui.openinbrowser(
[tag_list[tag_list.index(x)].rsplit("$", 4)[0] for x in tag_list if CLICKEDNODE in x],
''.join(re.match(r"(\d+.\d+)", FREQUENCY).group(1))))
self.plot_iq_button.place(relx=0.73, rely=0.65, height=240, width=320)
# Adding some texts to console window at program start
self.writelog("This is " + VERSION + ", a GUI written for python 2/3 with Tk")
# GUI topbar menus
menubar = Menu(self)
parent.config(menu=menubar)
menu_1 = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Mapbox style", menu=menu_1)
menu_1.add_command(label="streets", command=lambda *args: self.mapbox_style("streets-v11"))
menu_1.add_command(label="outdoors", command=lambda *args: self.mapbox_style("outdoors-v11"))
menu_1.add_command(label="light", command=lambda *args: self.mapbox_style("light-v10"))
menu_1.add_command(label="dark", command=lambda *args: self.mapbox_style("dark-v10"))
menu_1.add_command(label="satellite", command=lambda *args: self.mapbox_style("satellite-v9"))
menu_1.add_command(label="satellite-streets", command=lambda *args: self.mapbox_style("satellite-streets-v11"))
menu_2 = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Map Presets", menu=menu_2)
menu_2.add_command(label="Europe", command=lambda *args: self.map_preset("EU"))
menu_2.add_command(label="Africa", command=lambda *args: self.map_preset("AF"))
menu_2.add_command(label="Middle-East", command=lambda *args: self.map_preset("ME"))
menu_2.add_command(label="South Asia", command=lambda *args: self.map_preset("SAS"))
menu_2.add_command(label="South-East Asia", command=lambda *args: self.map_preset("SEAS"))
menu_2.add_command(label="East Asia", command=lambda *args: self.map_preset("EAS"))
menu_2.add_command(label="North America", command=lambda *args: self.map_preset("NAM"))
menu_2.add_command(label="Central America", command=lambda *args: self.map_preset("CAM"))
menu_2.add_command(label="South America", command=lambda *args: self.map_preset("SAM"))
menu_2.add_command(label="Oceania", command=lambda *args: self.map_preset("O"))
menu_2.add_command(label="West Russia", command=lambda *args: self.map_preset("WR"))
menu_2.add_command(label="East Russia", command=lambda *args: self.map_preset("ER"))
menu_2.add_command(label="USA", command=lambda *args: self.map_preset("US"))
menu_2.add_command(label="World (use with caution)", command=lambda *args: self.map_preset("W"))
# TDoA settings menu
menu_3 = Menu(menubar, tearoff=0)
menubar.add_cascade(label="TDoA settings", menu=menu_3)
sm8 = Menu(menu_3, tearoff=0)
sm9 | |
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
args = (utils.BytearrayStream(), )
self.assertRaisesRegex(
ValueError,
"invalid payload missing template attribute",
payload.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
DeriveKey request payloads with the same data.
"""
a = payloads.DeriveKeyRequestPayload()
b = payloads.DeriveKeyRequestPayload()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
],
derivation_method=enums.DerivationMethod.HASH,
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
),
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
],
derivation_method=enums.DerivationMethod.HASH,
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
),
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_object_type(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different object types.
"""
a = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SECRET_DATA
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_unique_identifiers(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different sets of unique identifiers.
"""
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=['fb4b5b9c-6188-4c63-8142-fe9c328129fc']
)
b = payloads.DeriveKeyRequestPayload(
unique_identifiers=['<KEY>']
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'<KEY>',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
]
)
b = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'1703250b-4d40-4de2-93a0-c494a1d4ae40',
'<KEY>',
'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
]
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
]
)
b = payloads.DeriveKeyRequestPayload(unique_identifiers=[])
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_derivation_method(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different derivation methods.
"""
a = payloads.DeriveKeyRequestPayload(
derivation_method=enums.DerivationMethod.HASH
)
b = payloads.DeriveKeyRequestPayload(
derivation_method=enums.DerivationMethod.PBKDF2
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_derivation_parameters(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different derivation parameters.
"""
a = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_1
),
initialization_vector=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD',
derivation_data=b'\x39\x48\x74\x32\x49\x28\x34\xA3'
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters()
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(derivation_parameters=None)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_template_attribute(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different template attributes.
"""
a = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.BLOWFISH,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=64,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute()
)
self.assertFalse(a == b)
self.assertFalse(b == a)
a = payloads.DeriveKeyRequestPayload(template_attribute=None)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
DeriveKey request payloads with different types.
"""
a = payloads.DeriveKeyRequestPayload()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
DeriveKey request payloads with the same data.
"""
a = payloads.DeriveKeyRequestPayload()
b = payloads.DeriveKeyRequestPayload()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
],
derivation_method=enums.DerivationMethod.HASH,
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
),
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
],
derivation_method=enums.DerivationMethod.HASH,
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
),
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_object_type(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different object types.
"""
a = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY
)
b = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SECRET_DATA
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_unique_identifiers(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different sets of unique identifiers.
"""
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=['fb4b5b9c-6188-4c63-8142-fe9c328129fc']
)
b = payloads.DeriveKeyRequestPayload(
unique_identifiers=['<KEY>']
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'5c9b81ef-4ee5-<KEY>',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
]
)
b = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'1703250b-4d40-4de2-93a0-c494a1d4ae40',
'<KEY>',
'fb4b5b9c-6188-4c63-8142-fe9c328129fc'
]
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(
unique_identifiers=[
'<KEY>',
'<KEY>',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
]
)
b = payloads.DeriveKeyRequestPayload(unique_identifiers=[])
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_derivation_method(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different derivation methods.
"""
a = payloads.DeriveKeyRequestPayload(
derivation_method=enums.DerivationMethod.HASH
)
b = payloads.DeriveKeyRequestPayload(
derivation_method=enums.DerivationMethod.PBKDF2
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_derivation_parameters(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different derivation parameters.
"""
a = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_1
),
initialization_vector=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD',
derivation_data=b'\x39\x48\x74\x32\x49\x28\x34\xA3'
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters()
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(derivation_parameters=None)
b = payloads.DeriveKeyRequestPayload(
derivation_parameters=attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_template_attribute(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different template attribute.
"""
a = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.BLOWFISH,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=64,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute()
)
self.assertTrue(a != b)
self.assertTrue(b != a)
a = payloads.DeriveKeyRequestPayload(template_attribute=None)
b = payloads.DeriveKeyRequestPayload(
template_attribute=objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
DeriveKey request payloads with different types.
"""
a = payloads.DeriveKeyRequestPayload()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to a DeriveKey request payload.
"""
derivation_parameters = attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
template_attribute = objects.TemplateAttribute(
attributes=[
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Algorithm'
),
attribute_value=primitives.Enumeration(
enums.CryptographicAlgorithm,
value=enums.CryptographicAlgorithm.AES,
tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM
)
),
objects.Attribute(
attribute_name=objects.Attribute.AttributeName(
'Cryptographic Length'
),
attribute_value=primitives.Integer(
value=128,
tag=enums.Tags.CRYPTOGRAPHIC_LENGTH
)
)
]
)
payload = payloads.DeriveKeyRequestPayload(
object_type=enums.ObjectType.SYMMETRIC_KEY,
unique_identifiers=[
'fb4b5b9c-6188-4c63-8142-fe9c328129fc',
'<KEY>',
'1703250b-4d40-4de2-93a0-c494a1d4ae40'
],
derivation_method=enums.DerivationMethod.HASH,
derivation_parameters=derivation_parameters,
template_attribute=template_attribute
)
# TODO(peter-hamilton) Update this test string when TemplateAttribute
# supports repr.
expected = (
"DeriveKeyRequestPayload("
"object_type=ObjectType.SYMMETRIC_KEY, "
"unique_identifiers=["
"'fb4b5b9c-6188-4c63-8142-fe9c328129fc', "
"'5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3', "
"'1703250b-4d40-4de2-93a0-c494a1d4ae40'], "
"derivation_method=DerivationMethod.HASH, "
"derivation_parameters={0}, "
"template_attribute={1})".format(
repr(derivation_parameters),
repr(template_attribute)
)
)
observed = repr(payload)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a DeriveKey request payload
"""
derivation_parameters = attributes.DerivationParameters(
cryptographic_parameters=attributes.CryptographicParameters(
hashing_algorithm=enums.HashingAlgorithm.SHA_256
),
initialization_vector=b'\x39\x48\x74\x32\x49\x28\x34\xA3',
derivation_data=b'\xFA\xD9\x8B\x6A\xCA\x6D\x87\xDD'
)
template_attribute = objects.TemplateAttribute(
attributes=[
| |
- sD2
eHD = I_HD - sHD
eH2 = I_H2 - sH2
eD2 = np.multiply(wMat_D2, eD2)
eHD = np.multiply(wMat_HD, eHD)
eH2 = np.multiply(wMat_H2, eH2)
eD2 = clean_mat(eD2)
eHD = clean_mat(eHD)
eH2 = clean_mat(eH2)
# choosing norm
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(eD2)) + np.sum(np.abs(eHD))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(eD2))) + np.sqrt(np.sum(np.square(eHD)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(eD2)) + np.sum(np.square(eHD))
return(E)
#*******************************************************************
#*******************************************************************
def residual_quadratic_TF(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt to theoretical intensity ratio to the sensitivity profile
modelled as a line, ( 1+ c1*x + c2*x**2 )
param : T, c1, c2
'''
# temperature is read from the variable `T_fixed` defined earlier
sosD2 = bp.sumofstate_D2(T_fixed )
sosHD = bp.sumofstate_HD(T_fixed )
sosH2 = bp.sumofstate_H2(T_fixed )
computed_D2 = compute_series_para.spectra_D2(T_fixed , OJ_D2, QJ_D2,
SJ_D2, sosD2)
computed_HD = compute_series_para.spectra_HD(T_fixed , OJ_HD, QJ_HD,
SJ_HD, sosHD)
computed_H2 = compute_series_para.spectra_H2_c(T_fixed , OJ_H2,
QJ_H2, sosH2)
# ------ D2 ------
trueR_D2=gen_intensity_mat (computed_D2, 2)
expt_D2=gen_intensity_mat (dataD2, 0)
I_D2=np.divide(expt_D2,trueR_D2 )
I_D2=clean_mat(I_D2)
# ----------------
# ------ HD ------
trueR_HD=gen_intensity_mat (computed_HD, 2)
expt_HD=gen_intensity_mat (dataHD, 0)
I_HD=np.divide(expt_HD,trueR_HD )
I_HD=clean_mat(I_HD)
# ----------------
# ------ H2 ------
trueR_H2=gen_intensity_mat (computed_H2, 2)
expt_H2=gen_intensity_mat (dataH2, 0)
I_H2=np.divide(expt_H2,trueR_H2 )
I_H2=clean_mat(I_H2)
# ----------------
# generate the RHS : sensitivity factor
sD2=gen_s_quadratic(computed_D2, param)
sHD=gen_s_quadratic(computed_HD, param)
sH2=gen_s_quadratic(computed_H2, param)
# residual matrix
eD2 = I_D2 - sD2
eHD = I_HD - sHD
eH2 = I_H2 - sH2
eD2 = np.multiply(wMat_D2, eD2)
eHD = np.multiply(wMat_HD, eHD)
eH2 = np.multiply(wMat_H2, eH2)
eD2 = clean_mat(eD2)
eHD = clean_mat(eHD)
eH2 = clean_mat(eH2)
# choosing norm
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(eD2)) + np.sum(np.abs(eHD))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(eD2))) + np.sqrt(np.sum(np.square(eHD)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(eD2)) + np.sum(np.square(eHD))
return(E)
#*******************************************************************
#*******************************************************************
def residual_cubic_TF(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt to theoretical intensity ratio to the sensitivity profile
modelled as a line, ( 1+ c1*x + c2*x**2 + c3*x**3 )
param : T, c1, c2, c3
'''
# temperature is read from the variable `T_fixed` defined earlier
sosD2 = bp.sumofstate_D2(T_fixed )
sosHD = bp.sumofstate_HD(T_fixed )
sosH2 = bp.sumofstate_H2(T_fixed )
computed_D2 = compute_series_para.spectra_D2(T_fixed , OJ_D2, QJ_D2,
SJ_D2, sosD2)
computed_HD = compute_series_para.spectra_HD(T_fixed , OJ_HD, QJ_HD,
SJ_HD, sosHD)
computed_H2 = compute_series_para.spectra_H2_c(T_fixed , OJ_H2,
QJ_H2, sosH2)
# ------ D2 ------
trueR_D2=gen_intensity_mat (computed_D2, 2)
expt_D2=gen_intensity_mat (dataD2, 0)
I_D2=np.divide(expt_D2,trueR_D2 )
I_D2=clean_mat(I_D2)
# ----------------
# ------ HD ------
trueR_HD=gen_intensity_mat (computed_HD, 2)
expt_HD=gen_intensity_mat (dataHD, 0)
I_HD=np.divide(expt_HD,trueR_HD )
I_HD=clean_mat(I_HD)
# ----------------
# ------ H2 ------
trueR_H2=gen_intensity_mat (computed_H2, 2)
expt_H2=gen_intensity_mat (dataH2, 0)
I_H2=np.divide(expt_H2,trueR_H2 )
I_H2=clean_mat(I_H2)
# ----------------
# generate the RHS : sensitivity factor
sD2=gen_s_cubic(computed_D2, param)
sHD=gen_s_cubic(computed_HD, param)
sH2=gen_s_cubic(computed_H2, param)
# residual matrix
eD2 = I_D2 - sD2
eHD = I_HD - sHD
eH2 = I_H2 - sH2
eD2 = np.multiply(wMat_D2, eD2)
eHD = np.multiply(wMat_HD, eHD)
eH2 = np.multiply(wMat_H2, eH2)
eD2 = clean_mat(eD2)
eHD = clean_mat(eHD)
eH2 = clean_mat(eH2)
# choosing norm
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(eD2)) + np.sum(np.abs(eHD))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(eD2))) + np.sqrt(np.sum(np.square(eHD)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(eD2)) + np.sum(np.square(eHD))
return(E)
#*******************************************************************
#*******************************************************************
def residual_quartic_TF(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt to theoretical intensity ratio to the sensitivity profile
modelled as a line, ( 1+ c1*x + c2*x**2 + c3*x**3 )
param : T, c1, c2, c3
'''
# temperature is read from the variable `T_fixed` defined earlier
sosD2 = bp.sumofstate_D2( T_fixed )
sosHD = bp.sumofstate_HD( T_fixed )
sosH2 = bp.sumofstate_H2( T_fixed )
computed_D2 = compute_series_para.spectra_D2(T_fixed , OJ_D2, QJ_D2,
SJ_D2, sosD2)
computed_HD = compute_series_para.spectra_HD(T_fixed , OJ_HD, QJ_HD,
SJ_HD, sosHD)
computed_H2 = compute_series_para.spectra_H2_c(T_fixed , OJ_H2,
QJ_H2, sosH2)
# ------ D2 ------
trueR_D2=gen_intensity_mat (computed_D2, 2)
expt_D2=gen_intensity_mat (dataD2, 0)
I_D2=np.divide(expt_D2,trueR_D2 )
I_D2=clean_mat(I_D2)
# ----------------
# ------ HD ------
trueR_HD=gen_intensity_mat (computed_HD, 2)
expt_HD=gen_intensity_mat (dataHD, 0)
I_HD=np.divide(expt_HD,trueR_HD )
I_HD=clean_mat(I_HD)
# ----------------
# ------ H2 ------
trueR_H2=gen_intensity_mat (computed_H2, 2)
expt_H2=gen_intensity_mat (dataH2, 0)
I_H2=np.divide(expt_H2,trueR_H2 )
I_H2=clean_mat(I_H2)
# ----------------
# generate the RHS : sensitivity factor
sD2=gen_s_quartic(computed_D2, param)
sHD=gen_s_quartic(computed_HD, param)
sH2=gen_s_quartic(computed_H2, param)
# residual matrix
eD2 = ( np.multiply( wMat_D2, I_D2 )) - sD2
eHD = ( np.multiply( wMat_HD, I_HD )) - sHD
eH2 = ( np.multiply( wMat_H2, I_H2 )) - sH2
# residual matrix
eD2 = I_D2 - sD2
eHD = I_HD - sHD
eH2 = I_H2 - sH2
eD2 = np.multiply(wMat_D2, eD2)
eHD = np.multiply(wMat_HD, eHD)
eH2 = np.multiply(wMat_H2, eH2)
eD2 = clean_mat(eD2)
eHD = clean_mat(eHD)
eH2 = clean_mat(eH2)
# choosing norm
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(eD2)) + np.sum(np.abs(eHD))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(eD2))) + np.sqrt(np.sum(np.square(eHD)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(eD2)) + np.sum(np.square(eHD))
return(E)
#*******************************************************************
#***************************************************************
#***************************************************************
# Fit functions
#***************************************************************
#***************************************************************
def run_fit_linear_TF ( init_k1 ):
'''Function performing the actual fit using the residual_linear function
defined earlier '''
# init_k1 : Intial guess
param_init = np.array([ init_k1 ])
print("**********************************************************")
print("\t\t -- Linear fit -- ")
print("\t\tNorm (defn of residual): ", norm)
#print("Testing the residual function with data")
print("Initial coef : k1={0} output = {1}".format( init_k1, \
(residual_linear_TF(param_init))))
print("\nOptimization run \n")
res = opt.minimize(residual_linear_TF, param_init, method='Nelder-Mead', \
options={'xatol': 1e-9, 'fatol': 1e-9})
print(res)
optk1 = res.x[0]
print("\nOptimized result : k1={0} \n".format(round(optk1, 6) ))
correction_curve= 1+(optk1/scale1)*(xaxis-scenter) # generate the correction curve
np.savetxt("correction_linear_TF.txt", correction_curve, fmt='%2.8f',\
header='corrn_curve_linear_TF', comments='')
print("**********************************************************")
# save log -----------
log.info('\n ******* Optimization run : Linear *******')
log.info('\n\t Initial : c1 = %4.8f\n', init_k1 )
log.info('\n\t %s\n', res )
log.info('\n Optimized result : c1 = %4.8f\n', optk1 )
log.info(' *******************************************')
# --------------------
return res.fun
# --------------------
#***************************************************************
def run_fit_quadratic_TF ( init_k1, init_k2 ):
'''Function performing the actual fit using the residual_linear function
defined earlier '''
# init_k1, init_k2 : Intial guess
param_init = np.array([ init_k1 , init_k2 ])
print("**********************************************************")
print("\t\t -- Quadratic fit -- ")
print("\t\tNorm (defn of residual): ", norm)
#print("Testing the residual function with data")
print("Initial coef : k1={0}, k2={1} output = {2}".format( init_k1, \
init_k2, (residual_quadratic_TF(param_init))))
print("\nOptimization run \n")
res = opt.minimize(residual_quadratic_TF, param_init, method='Nelder-Mead', \
options={'xatol': 1e-9, 'fatol': 1e-9})
print(res)
optk1 = res.x[0]
optk2 = res.x[1]
print("\nOptimized result : k1={0}, k2={1} \n".format( round(optk1, 6), round(optk2, 6) ))
correction_curve= 1+(optk1/scale1)*(xaxis-scenter) \
+ ((optk2/scale2)*(xaxis-scenter)**2) # generate the\
#correction curve
np.savetxt("correction_quadratic_TF.txt", correction_curve, fmt='%2.8f',\
header='corrn_curve_quadratic_TF', comments='')
print("**********************************************************")
return res.fun
# --------------------
#***************************************************************
def run_fit_cubic_TF ( init_k1, init_k2, init_k3 ):
'''Function performing the actual fit using the residual_linear function
defined earlier '''
# init_k1 : Intial guess
param_init = np.array([ init_k1 , init_k2 , init_k3 ])
print("**********************************************************")
print("\t\t -- Cubic fit -- ")
print("\t\tNorm (defn of residual): ", norm)
#print("Testing the residual function with data")
print("Initial coef : k1={0}, k2={1}, k3={2}, output = {3}".format( init_k1, \
init_k2, init_k3, (residual_cubic_TF(param_init))))
print("\nOptimization run \n")
res = opt.minimize(residual_cubic_TF, param_init, method='Nelder-Mead', \
options={'xatol': 1e-9, 'fatol': 1e-9})
print(res)
optk1 = res.x[0]
optk2 = res.x[1]
optk3 = res.x[2]
print("\nOptimized result : k1={0}, k2={1}, k3={2} \n".format( round(optk1, 6), round(optk2, 6), round(optk3, 6)))
# generate the correction curve
correction_curve = (1+(optk1/scale1)*(xaxis-scenter)) \
+ ((optk2/scale2)*(xaxis-scenter)**2) + ((optk3/scale3)*(xaxis-scenter)**3)
np.savetxt("correction_cubic_TF.txt", correction_curve, fmt='%2.8f',\
header='corrn_curve_cubic_TF', comments='')
print("**********************************************************")
return res.fun
# --------------------
#***************************************************************
def run_fit_quartic_TF ( init_k1, init_k2, init_k3, init_k4 ):
'''Function performing the actual fit using the residual_linear function
defined earlier '''
# init_k1 : Intial guess
param_init = np.array([ init_k1 , init_k2 , init_k3 , init_k4 ])
print("**********************************************************")
print("\t\t -- Quartic fit -- ")
print("\t\tNorm (defn of residual): ", norm)
#print("Testing the | |
<filename>source/pydwf-examples/DigitalOutShowStatusDuringPulsePlayback.py
#! /usr/bin/env python3
"""DigitalOut instrument demo.
Show the behavior of status, run_status, and repeat_status before, during, and after Pulse-mode playback is active.
"""
from typing import Optional, Tuple
import argparse
import time
import numpy as np
import matplotlib.pyplot as plt
from pydwf import (DwfLibrary, DwfEnumConfigInfo, DwfTriggerSource, DwfTriggerSlope, DwfDigitalOutOutput,
DwfDigitalOutType, DwfDigitalOutIdle, DwfState, PyDwfError)
from pydwf.utilities import openDwfDevice
def summarize(sequence, separator: str = " followed by ") -> str:
"""Summarize a sequence of values as a string."""
strings = []
current = None
current_count = 0
for e in sequence:
if current_count == 0:
current = e
current_count = 1
elif e == current:
current_count += 1
else:
strings.append("{} × {}".format(current_count, current))
current_count = 1
current = e
if current_count != 0:
strings.append("{} × {}".format(current_count, current))
return separator.join(strings) if strings else "(none)"
def get_channel_values(digitalOut, func) -> Tuple:
"""Get the result of applying 'func' to all DigitalOut channels."""
return tuple(func(channel_index) for channel_index in range(digitalOut.count()))
def enum_values_to_str(values):
"""Summarize a collection of enumeration values as a string."""
enum_type_name = None
for value in values:
if enum_type_name is None:
enum_type_name = value.__class__.__name__
elif enum_type_name != value.__class__.__name__:
raise RuntimeError("Enum values are of different types.")
return "{}.{{{}}}".format(enum_type_name, "|".join(value.name for value in values))
def print_digital_output_info(digitalOut):
"""Print static Info of the DigitalOut instrument.
Of the 11 queryable "Info" values, 5 are global, and 6 are channel-dependent.
"""
# pylint: disable=line-too-long, unnecessary-lambda
channel_count = digitalOut.count()
print("=== digitalOut global info:")
print()
print(" digitalOut.internalClockInfo() ...... : {:10} [Hz]".format(digitalOut.internalClockInfo()))
print(" digitalOut.triggerSourceInfo() ...... : {}".format(enum_values_to_str(digitalOut.triggerSourceInfo())))
print(" digitalOut.runInfo() ................ : {} [s]".format(digitalOut.runInfo()))
print(" digitalOut.waitInfo() ............... : {} [s]".format(digitalOut.waitInfo()))
print(" digitalOut.repeatInfo() ............. : {} [-]".format(digitalOut.repeatInfo()))
print()
print(" NOTE: digitalOut.triggerSourceInfo() is obsolete.")
print()
print("=== digitalOut per-channel info --- channel index in range {} .. {} (channel count = {}):".format(0, channel_count - 1, channel_count))
print()
print(" digitalOut.outputInfo(idx) .......... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: enum_values_to_str(digitalOut.outputInfo(channel_index))))))
print(" digitalOut.typeInfo(idx) ............ : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: enum_values_to_str(digitalOut.typeInfo(channel_index))))))
print(" digitalOut.idleInfo(idx) ............ : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: enum_values_to_str(digitalOut.idleInfo(channel_index))))))
print(" digitalOut.dividerInfo(idx) ......... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.dividerInfo(channel_index)))))
print(" digitalOut.counterInfo(idx) ......... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.counterInfo(channel_index)))))
print(" digitalOut.dataInfo(idx) ............ : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.dataInfo(channel_index)))))
print()
def print_digital_output_settings(digitalOut):
"""Print regular settings of the DigitalOut instrument.
Note: a setting is considered "regular" if both a "Set" and "Get" exists for it.
Of the 14 queryable regular "Get" values, 6 are global, and 8 are channel-dependent.
"""
# pylint: disable=line-too-long, unnecessary-lambda
channel_count = digitalOut.count()
print("=== digitalOut global current settings:")
print()
print(" digitalOut.triggerSourceGet() ....... : {}".format(digitalOut.triggerSourceGet()))
print(" digitalOut.runGet() ................. : {}".format(digitalOut.runGet()))
print(" digitalOut.waitGet() ................ : {}".format(digitalOut.waitGet()))
print(" digitalOut.repeatGet() .............. : {}".format(digitalOut.repeatGet()))
print(" digitalOut.triggerSlopeGet() ........ : {}".format(digitalOut.triggerSlopeGet()))
print(" digitalOut.repeatTriggerGet() ....... : {}".format(digitalOut.repeatTriggerGet()))
print()
print("=== digitalOut per-channel current settings --- channel index in range {} .. {} (channel count = {}):".format(0, channel_count - 1, channel_count))
print()
print(" digitalOut.enableGet(idx) ........... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.enableGet(channel_index)))))
print(" digitalOut.outputGet(idx) ........... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.outputGet(channel_index)))))
print(" digitalOut.typeGet(idx) ............. : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.typeGet(channel_index)))))
print(" digitalOut.idleGet(idx) ............. : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.idleGet(channel_index)))))
print(" digitalOut.dividerInitGet(idx) ...... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.dividerInitGet(channel_index)))))
print(" digitalOut.dividerGet(idx) .......... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.dividerGet(channel_index)))))
print(" digitalOut.counterInitGet(idx) ...... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.counterInitGet(channel_index)))))
print(" digitalOut.counterGet(idx) .......... : {}".format(summarize(get_channel_values(digitalOut, lambda channel_index: digitalOut.counterGet(channel_index)))))
print()
def change_digital_output_global_settings(
digitalOut,
run_duration : Optional[float],
wait_duration : Optional[float],
repeat_count : Optional[int],
repeat_trigger_flag : Optional[bool],
trigger_source : Optional[DwfTriggerSource],
trigger_slope : Optional[DwfTriggerSlope]
):
"""Change global DigitalOut instrument settings, if given."""
# The DigitalOut device has 14 regular "Set" functions (i.e., Set functions for which there is a Get counterpart).
# 6 of these are channel independent.
if trigger_source is not None:
digitalOut.triggerSourceSet(trigger_source)
if run_duration is not None:
digitalOut.runSet(run_duration)
if wait_duration is not None:
digitalOut.waitSet(wait_duration)
if repeat_count is not None:
digitalOut.repeatSet(repeat_count)
if trigger_slope is not None:
digitalOut.triggerSlopeSet(trigger_slope)
if repeat_trigger_flag is not None:
digitalOut.repeatTriggerSet(repeat_trigger_flag)
def change_digital_output_channel_settings(
digitalOut,
channel_index : int,
enable_flag : Optional[bool],
output : Optional[DwfDigitalOutOutput],
type_ : Optional[DwfDigitalOutType],
idle_mode : Optional[DwfDigitalOutIdle],
divider_init : Optional[int],
divider : Optional[int],
counter_init_high_and_value : Optional[Tuple[bool, int]],
counter_low_and_high : Optional[Tuple[int, int]]
):
"""Change channel-specific DigitalOut instrument settings, if given."""
# The DigitalOut device has 14 regular "Set" functions (i.e., Set functions for which there is a Get counterpart).
# 8 of these are channel independent.
if enable_flag is not None:
digitalOut.enableSet(channel_index, enable_flag)
if output is not None:
digitalOut.outputSet(channel_index, output)
if type_ is not None:
digitalOut.typeSet(channel_index, type_)
if idle_mode is not None:
digitalOut.idleSet(channel_index, idle_mode)
if divider_init is not None:
digitalOut.dividerInitSet(channel_index, divider_init)
if divider is not None:
digitalOut.dividerSet(channel_index, divider)
if counter_init_high_and_value is not None:
digitalOut.counterInitSet(channel_index, *counter_init_high_and_value)
if counter_low_and_high is not None:
digitalOut.counterSet(channel_index, *counter_low_and_high)
def demo_digital_out_instrument_api(digitalOut):
"""Demonstrate DigitalOut instrument."""
# pylint: disable = too-many-locals, too-many-statements, too-many-branches
# - 11 "info" functions;
# - 14 "get" functions
# - 14 regular "get" functions (play data functions not included)
# - 1 "count" function
# - 8 "other" functions (below).
#
# total: 48 functions.
digitalOut.reset()
print("===========================================")
print("=== ===")
print("=== DigitalOut instrument static info ===")
print("=== ===")
print("===========================================")
print()
print_digital_output_info(digitalOut)
print("=========================================================")
print("=== ===")
print("=== DigitalOut instrument settings just after reset ===")
print("=== ===")
print("=========================================================")
print()
print_digital_output_settings(digitalOut)
change_digital_output_global_settings(
digitalOut,
run_duration = 0.800,
wait_duration = 0.200,
repeat_count = 4,
repeat_trigger_flag = False,
trigger_source = DwfTriggerSource.PC,
trigger_slope = DwfTriggerSlope.Rise
)
# Configure channel 0
change_digital_output_channel_settings(
digitalOut,
channel_index = 0,
enable_flag = True,
output = DwfDigitalOutOutput.PushPull,
type_ = DwfDigitalOutType.Pulse,
idle_mode = DwfDigitalOutIdle.Low,
divider_init = 0,
divider = 100000, # counter counts at 1 kHz
counter_init_high_and_value = (True, 0),
counter_low_and_high = (5, 5)
)
# Configure channel 1
change_digital_output_channel_settings(
digitalOut,
channel_index = 1,
enable_flag = True,
output = DwfDigitalOutOutput.PushPull,
type_ = DwfDigitalOutType.Pulse,
idle_mode = DwfDigitalOutIdle.Low,
divider_init = 10,
divider = 100000, # counter counts at 1 kHz
counter_init_high_and_value = (True, 0),
counter_low_and_high = (5, 5)
)
print("=================================================================")
print("=== ===")
print("=== DigitalOut instrument settings just after configuration ===")
print("=== ===")
print("=================================================================")
print()
print_digital_output_settings(digitalOut)
print("========================================")
print("=== ===")
print("=== DigitalOut instrument starting ===")
print("=== ===")
print("========================================")
print()
t_slack = 0.500 # slack before trigger and after DwfState.Done
t_max = 20.0
trigger_asserted = False
t_done_seen = None
# Start the device.
digitalOut.configure(True)
t0 = time.perf_counter()
status_list = []
while True:
# The 'status' call is needed to update the runStatus() and repeatStatus() values.
status = digitalOut.status()
t = time.perf_counter() - t0
if not trigger_asserted:
if t >= t_slack:
digitalOut.device.triggerPC()
trigger_asserted = True
actual_trigger_time = t
run_status = digitalOut.runStatus()
repeat_status = digitalOut.repeatStatus()
if repeat_status == 65535:
repeat_status = -1
repeat_status = round(repeat_status)
if run_status >= (2**47):
run_status -= (2**48)
print("[{:20.9f}] {:20} {:30} {:30}".format(t, status.name, run_status, repeat_status))
status_list.append((t, status.value, run_status, repeat_status))
if t_done_seen is None:
if status == DwfState.Done:
t_done_seen = t
t_max = min(t_max, t + t_slack)
if t > t_max:
break
time.sleep(-time.time() % 0.005)
actual_duration = time.perf_counter() - t0
expected_duration = (digitalOut.runGet(), digitalOut.waitGet(), digitalOut.repeatGet())
print()
print("Sequence done. Total duration: {:.9} [s] (expected: {} [s])".format(actual_duration, expected_duration))
status_array_dtype = np.dtype([
('t', np.float64),
('status', np.int32),
('run_status', np.float64),
('rep_status', np.int32)
])
st = np.array(status_list, dtype=status_array_dtype)
st["t"] -= actual_trigger_time
if t_done_seen is not None:
t_done_seen -= actual_trigger_time
run_status_valid = (st["run_status"] >= 0)
print("Invalid run_status values:", np.unique(st["run_status"][~run_status_valid]))
st["run_status"][~run_status_valid] = np.nan
scatter_size = 4.0
plt.gcf().set_size_inches(16, 9)
plt.subplots_adjust(hspace=0.4)
plt.suptitle("DigitalOut status behavior before and during Pulse playback")
plt.subplot(411)
plt.grid()
plt.axvline(0.0, c='red')
if t_done_seen is not None:
plt.axvline(t_done_seen, c='red')
plt.scatter(st["t"], st["status"], s=scatter_size)
plt.xlim(-t_slack, t_max)
plt.ylabel("status [DwfState]")
plt.subplot(412)
plt.axvline(0.0, c='red')
if t_done_seen is not None:
plt.axvline(t_done_seen, c='red')
plt.scatter(st["t"], run_status_valid, s=scatter_size)
plt.xlim(-t_slack, t_max)
plt.ylabel("run_status_valid")
plt.subplot(413)
plt.axvline(0.0, c='red')
if t_done_seen is not None:
plt.axvline(t_done_seen, c='red')
plt.scatter(st["t"], st["run_status"] / digitalOut.internalClockInfo(), s=scatter_size)
plt.xlim(-t_slack, t_max)
plt.ylabel("run_status [s]")
plt.subplot(414)
plt.axvline(0.0, c='red')
if t_done_seen is not None:
plt.axvline(t_done_seen, c='red')
plt.scatter(st["t"], st["rep_status"], s=scatter_size)
plt.xlim(-t_slack, t_max)
plt.xlabel("time [s]")
plt.ylabel("rep_status")
plt.show()
# Remaining untested calls:
#
# digitalOut.dataSet(channel_index: int, bits: str, tristate: bool=False)
# digitalOut.playDataSet(rg_bits: int, bits_per_sample: int, count_of_samples: int)
# digitalOut.playRateSet(rate_hz: float)
def main():
"""Parse arguments and start demo."""
parser = argparse.ArgumentParser(description="Demonstrate DigitalOut instrument usage.")
parser.add_argument(
"-sn", "--serial-number-filter",
type=str,
nargs='?',
dest="serial_number_filter",
help="serial number filter to select a specific Digilent Waveforms device"
)
args = parser.parse_args()
def maximize_digital_out_buffer_size(configuration_parameters):
"""Select the configuration with the highest possible digital out buffer size."""
return configuration_parameters[DwfEnumConfigInfo.DigitalOutBufferSize]
try:
dwf = DwfLibrary()
with openDwfDevice(dwf, serial_number_filter=args.serial_number_filter,
score_func=maximize_digital_out_buffer_size) as device:
demo_digital_out_instrument_api(device.digitalOut)
except PyDwfError as exception:
print("PyDwfError:", exception)
except KeyboardInterrupt:
print("Keyboard interrupt, ending demo.")
if __name__ == "__main__":
| |
<gh_stars>10-100
# Python imports.
from collections import defaultdict
import Queue
import random
import os
import sys
import cPickle
# Other imports.
from ActionAbstractionClass import ActionAbstraction
from OptionClass import Option
from simple_rl.planning.ValueIterationClass import ValueIteration
from simple_rl.mdp.MDPClass import MDP
from EqPredicateClass import EqPredicate, NeqPredicate
from PolicyFromDictClass import *
from simple_rl.tasks import GridWorldMDP
# ----------------------
# -- Directed Options --
# ----------------------
def get_directed_options_for_sa(mdp_distr, state_abstr, incl_self_loops=True, max_options=100):
'''
Args:
mdp_distr (MDPDistribution)
state_abstr (StateAbstraction)
incl_self_loops (bool)
max_options (int)
Returns:
(ActionAbstraction)
'''
print " Computing directed options."
sys.stdout.flush()
abs_states = state_abstr.get_abs_states()
# Check max # options.
total_clique_options = len(abs_states) * (len(abs_states) - 1)
if total_clique_options > max_options:
print "\tToo many options (" + str(total_clique_options) + "), need < " + str(max_options) + ". Increasing compression rate and continuing.\n"
return False
g_start_state = mdp_distr.get_init_state()
# Compute all directed options that transition between abstract states.
options = []
state_pairs = []
random_policy = lambda s : random.choice(mdp_distr.get_actions())
# For each s_{a,1} s_{a,2} pair.
for s_a in abs_states:
for s_a_prime in abs_states:
if not(s_a == s_a_prime):
# Make a non-self loop option.
init_predicate = EqPredicate(y=s_a, func=state_abstr.phi)
term_predicate = EqPredicate(y=s_a_prime, func=state_abstr.phi)
o = Option(init_predicate=init_predicate,
term_predicate=term_predicate,
policy=random_policy)
options.append(o)
state_pairs.append((s_a, s_a_prime))
elif incl_self_loops:
# Self loop.
init_predicate = EqPredicate(y=s_a, func=state_abstr.phi)
term_predicate = NeqPredicate(y=s_a, func=state_abstr.phi) # Terminate in any other abstract state.
o = Option(init_predicate=init_predicate,
term_predicate=term_predicate,
policy=random_policy)
# Initialize with random policy, we'll update it later.
options.append(o)
state_pairs.append((s_a, s_a_prime))
print "\tMade", len(options), "options (formed clique over S_A)."
print "\tPruning..."
sys.stdout.flush()
# Prune.
pruned_option_set = _prune_non_directed_options(options, state_pairs, state_abstr, mdp_distr)
print "\tFinished Pruning. Reduced to", len(pruned_option_set), "options."
return pruned_option_set
def _prune_non_directed_options(options, state_pairs, state_abstr, mdp_distr):
'''
Args:
Options(list)
state_pairs (list)
state_abstr (StateAbstraction)
mdp_distr (MDPDistribution)
Returns:
(list of Options)
Summary:
Removes redundant options. That is, if o_1 goes from s_A1 to s_A2, and
o_2 goes from s_A1 *through s_A2 to s_A3, then we get rid of o_2.
'''
good_options = set([])
bad_options = set([])
transition_func = mdp_distr.get_all_mdps()[0].get_transition_func()
# For each option we created, we'll check overlap.
for i, o in enumerate(options):
print "\t Option", i + 1, "of", len(options)
pre_abs_state, post_abs_state = state_pairs[i]
# Get init and terminal lower level states.
ground_init_states = state_abstr.get_lower_states_in_abs_state(pre_abs_state)
ground_term_states = state_abstr.get_lower_states_in_abs_state(post_abs_state)
rand_init_g_state = random.choice(ground_init_states)
# R and T for Option Mini MDP.
def _directed_option_reward_lambda(s, a):
s_prime = transition_func(s, a)
return int(s_prime in ground_term_states and not s in ground_term_states)
def new_trans_func(s, a):
original = s.is_terminal()
s.set_terminal(s in ground_term_states)
s_prime = transition_func(s,a)
# print s, s_prime, s.is_terminal(), s_prime.is_terminal(), pre_abs_state, post_abs_state, s == s_prime
s.set_terminal(original)
return s_prime
if pre_abs_state == post_abs_state:
# Self looping option.
mini_mdp_init_states = defaultdict(list)
# Self loop. Make an option per goal in the cluster.
goal_mdps = []
goal_state_action_pairs = defaultdict(list)
for i, mdp in enumerate(mdp_distr.get_all_mdps()):
add = False
# Check if there is a goal for this MDP in one of the ground states.
for s_g in ground_term_states:
for a in mdp.get_actions():
if mdp.get_reward_func()(s_g, a) > 0.0 and a not in goal_state_action_pairs[s_g]:
goal_state_action_pairs[s_g].append(a)
if isinstance(mdp, GridWorldMDP):
goals = tuple(mdp.get_goal_locs())
else:
goals = tuple(s_g)
mini_mdp_init_states[goals].append(s_g)
add = True
if add:
goal_mdps.append(mdp)
# For each goal.
for goal_mdp in goal_mdps:
def goal_new_trans_func(s, a):
original = s.is_terminal()
s.set_terminal(s not in ground_term_states or original)
s_prime = goal_mdp.get_transition_func()(s,a)
s.set_terminal(original)
return s_prime
if isinstance(goal_mdp, GridWorldMDP):
cluster_init_state = random.choice(mini_mdp_init_states[tuple(goal_mdp.get_goal_locs())])
else:
cluster_init_state = random.choice(ground_init_states)
# Make a new MDP.
mini_mdp = MDP(actions=goal_mdp.get_actions(),
init_state=cluster_init_state,
transition_func=goal_new_trans_func,
reward_func=goal_mdp.get_reward_func())
o_policy, mini_mdp_vi = _make_mini_mdp_option_policy(mini_mdp)
# Make new option.
new_option = Option(o.init_predicate, o.term_predicate, o_policy)
new_option.set_name(str(ground_init_states[0]) + "-sl")
good_options.add(new_option)
continue
else:
# This is a non-self looping option.
mini_mdp = MDP(actions=mdp_distr.get_actions(),
init_state=rand_init_g_state,
transition_func=new_trans_func,
reward_func=_directed_option_reward_lambda)
o_policy, mini_mdp_vi = _make_mini_mdp_option_policy(mini_mdp)
# Compute overlap w.r.t. plans from each state.
for init_g_state in ground_init_states:
# Prune overlapping ones.
plan, state_seq = mini_mdp_vi.plan(init_g_state)
opt_name = str(ground_init_states[0]) + "-" + str(ground_term_states[0])
o.set_name(opt_name)
options[i] = o
if not _check_overlap(o, state_seq, options, bad_options):
# Give the option the new directed policy and name.
o.set_policy(o_policy)
good_options.add(o)
break
else:
# The option overlaps, don't include it.
bad_options.add(o)
return good_options
def _make_mini_mdp_option_policy(mini_mdp):
'''
Args:
mini_mdp (MDP)
Returns:
Policy
'''
# Solve the MDP defined by the terminal abstract state.
mini_mdp_vi = ValueIteration(mini_mdp, delta=0.005, max_iterations=1000, sample_rate=30)
iters, val = mini_mdp_vi.run_vi()
o_policy_dict = make_dict_from_lambda(mini_mdp_vi.policy, mini_mdp_vi.get_states())
o_policy = PolicyFromDict(o_policy_dict)
return o_policy.get_action, mini_mdp_vi
def _check_overlap(option, state_seq, options, bad_options):
'''
Args:
state_seq (list of State)
options
Returns:
(bool): If true, we should remove this option.
'''
terminal_is_reachable = False
bad_options = set(bad_options)
for i, s_g in enumerate(state_seq):
for o_prime in options:
if o_prime in bad_options:
continue
is_in_middle = not (option.is_term_true(s_g) or option.is_init_true(s_g))
if is_in_middle and o_prime.is_init_true(s_g):
# We should get rid of @option, because it's path goes through another init.
return True
# Only keep options whose terminal states are reachable from the initiation set.
if option.is_term_true(s_g):
terminal_is_reachable = True
if not terminal_is_reachable:
# Can't reach the terminal state.
return True
return False
def compute_sub_opt_func_for_mdp_distr(mdp_distr):
'''
Args:
mdp_distr (dict)
Returns:
(list): Contains the suboptimality function for each MDP in mdp_distr.
subopt: V^*(s) - Q^(s,a)
'''
actions = mdp_distr.get_actions()
sub_opt_funcs = []
i = 0
for mdp in mdp_distr.get_mdps():
print "\t mdp", i + 1, "of", mdp_distr.get_num_mdps()
vi = ValueIteration(mdp, delta=0.001, max_iterations=1000)
iters, value = vi.run_vi()
new_sub_opt_func = defaultdict(float)
for s in vi.get_states():
max_q = float("-inf")
for a in actions:
next_q = vi.get_q_value(s, a)
if next_q > max_q:
max_q = next_q
for a in actions:
new_sub_opt_func[(s, a)] = max_q - vi.get_q_value(s,a)
sub_opt_funcs.append(new_sub_opt_func)
i+=1
return sub_opt_funcs
def _compute_agreement(sub_opt_funcs, mdp_distr, state, action, epsilon=0.00):
'''
Args:
sub_opt_funcs (list of dicts)
mdp_distr (dict)
state (simple_rl.State)
action (str)
epsilon (float)
Returns:
(list)
Summary:
Computes the MDPs for which @action is epsilon-optimal in @state.
'''
all_sub_opt_vals = [sof[(state, action)] for sof in sub_opt_funcs]
eps_opt_mdps = [int(sov <= epsilon) for sov in all_sub_opt_vals]
return eps_opt_mdps
def add_next_option(mdp_distr, next_decis_state, sub_opt_funcs):
'''
Args:
Returns:
(Option)
'''
# Init func and terminal func.
init_func = lambda s : s == next_decis_state
term_func = lambda s : True
term_func_states = []
# Misc.
reachable_states = Queue.Queue()
reachable_states.put(next_decis_state)
visited_states = set([next_decis_state])
policy_dict = defaultdict(str)
actions = mdp_distr.get_actions()
transition_func = mdp_distr.get_mdps()[0].get_transition_func()
# Tracks which MDPs share near-optimal action sequences.
mdps_active = [1 for m in range(len(sub_opt_funcs))]
while not reachable_states.empty():
# Pointers for this iteration.
cur_state = reachable_states.get()
next_action = random.choice(actions)
max_agreement = 0 # agreement for this state.
# Compute action with max agreement (num active MDPs with shared eps-opt action.)
for a in actions:
agreement_ls = _compute_agreement(sub_opt_funcs, mdp_distr, cur_state, a)
active_agreement_ls = [mdps_active[i] & agreement_ls[i] for i in range(len(agreement_ls))]
agreement = sum(active_agreement_ls)
if agreement > max_agreement:
next_action = a
max_agreement = agreement
# Set policy for this state to the action with maximal agreement.
policy_dict[cur_state] = next_action
max_agreement_ls = _compute_agreement(sub_opt_funcs, mdp_distr, cur_state, next_action)
mdps_active = [mdps_active[i] & max_agreement_ls[i] for i in range(len(max_agreement_ls))]
agreement = sum(mdps_active)
# Move to the next state according to max agreement action.
next_state = transition_func(cur_state, next_action)
if agreement <= 2 or next_state.is_terminal():
term_func_states.append(next_state)
if next_state not in visited_states:
reachable_states.put(next_state)
visited_states.add(next_state)
if len(term_func_states):
term_func_states.append(next_state)
# Turn policy dict into a function and make the option.
o = Option(init_func, term_func=term_func_states, policy=policy_dict)
return o
def make_greedy_options(mdp_distr):
'''
Assumptions:
Shared S, A, start state, T, gamma between all M in mdp_distr.
'''
if isinstance(mdp_distr, MDP):
print "Warning: attempting to create options for a single MDP."
mdp_distr = {1.0:mdp_distr}
# Grab relevant MDP distr. components.
init_state = mdp_distr.keys()[0].get_init_state()
transition_func = mdp_distr.keys()[0].get_transition_func()
actions = mdp_distr.keys()[0].get_actions()
# Setup data structures.
print "Computing advantage functions."
sub_opt_funcs = compute_sub_opt_func_for_mdp_distr(mdp_distr)
decision_states = Queue.Queue()
decision_states.put(init_state)
new_aa = ActionAbstraction(options=actions, prim_actions=actions)
visited_states = set([init_state])
# Loop over reachable states.
num_options = 0
print "Learning:"
while num_options < 2 and (not decision_states.empty()):
print "\toption", num_options + 1
# Add option as long as we have a decision state.
# A decision state is a state where we don't have a good option.
next_decis_state = decision_states.get()
o = add_next_option(mdp_distr, next_decis_state, sub_opt_funcs)
new_aa.add_option(o)
num_options += 1
new_state = o.act_until_terminal(next_decis_state, transition_func)
if new_state not in visited_states:
decision_states.put(new_state)
visited_states.add(new_state)
return new_aa
def print_aa(action_abstr, state_space):
'''
Args:
action_abstr (ActionAbstraction)
state_space (list of State)
Summary:
Prints out options in a convenient way.
'''
options = action_abstr.get_actions()
for o | |
modifier)
@keyword
def click_link(self, locator, modifier=False):
self.base(locator, f'Clicked link "{locator}"', f" {locator}", modifier)
@keyword
def click_element(self, locator, modifier=False, action_chain=False):
self.base(locator, f'Clicked "{locator}"', f" {locator}", modifier, action_chain)
@keyword
def click_element_at_coordinates(self, locator, xoffset, yoffset):
self.base(locator, f'Clicked "{locator}" at X:{xoffset} , Y:{yoffset}', f" {locator}", xoffset, yoffset)
@keyword
def double_click_element(self, locator):
self.base(self, f'Double clicked "{locator}"', f" {locator}")
@keyword
def set_focus_to_element(self, locator):
self.base(locator, f'Element "{locator}" was is focused', f" {locator}")
@keyword
def scroll_element_into_view(self, locator):
self.base(locator, f'Element "{locator}" was scrolled into view', f" {locator}")
@keyword
def drag_and_drop(self, locator, target):
self.base(locator, f'Element "{locator}" was dragged to "{target}"', f"Origin: {locator}, Target: {target}", target)
@keyword
def drag_and_drop_by_offset(self, locator, xoffset, yoffset):
self.base(locator, f'Element "{locator}" was dragged to X:{xoffset} , Y:{yoffset}', f" {locator}", xoffset, yoffset)
@keyword
def mouse_down(self, locator):
self.base(locator, f'Mouse down on: "{locator}"', f" {locator}")
@keyword
def mouse_out(self, locator):
self.base(locator, f'Mouse out on: "{locator}"', f" {locator}")
@keyword
def mouse_over(self, locator):
self.base(locator, f'Mouse over on: "{locator}"', f" {locator}")
@keyword
def mouse_up(self, locator):
self.base(locator, f'Mouse up on: "{locator}"', f" {locator}")
@keyword
def open_context_menu(self, locator):
self.base(locator, f'Context menu opened on: "{locator}"', f" {locator}")
@keyword
def simulate_event(self, locator, event):
self.base(locator, f'Event: "{event}" was simulated on {locator}', f"Element: {locator}, Event: {event}", event)
@keyword
def press_key(self, locator, key):
self.base(locator, f'Key pressed: "{key}"', f"Locator: {locator}, Key: {key}", key)
@keyword
def press_keys(self, locator=None, *keys):
self.base(locator, f'Keys pressed: "{keys}"\non element found at "{locator}"', f"{keys}", keys)
@keyword
def get_all_links(self):
return self.base("", "links", "")
@keyword
def mouse_down_on_link(self, locator):
self.base(locator, f'Mouse was pressed on "{locator}"', f" {locator}")
@keyword
def page_should_contain_link(self, locator, message=None, loglevel="TRACE"):
self.base(locator, 'Page did contain link in "{}"', f" {locator}", message, loglevel)
@keyword
def page_should_not_contain_link(self, locator, message=None, loglevel="TRACE"):
self.base(locator, f'Page did not contain link in "{locator}"', f" {locator}", message, loglevel)
@keyword
def mouse_down_on_image(self, locator):
self.base(locator, f'Mouse was down on image found at "{locator}"', f" {locator}")
@keyword
def page_should_contain_image(self, locator, message=None, loglevel="TRACE"):
self.base(locator, f'Page did contain "{locator}"', f" {locator}", message, loglevel)
@keyword
def page_should_not_contain_image(self, locator, message=None, loglevel="TRACE"):
self.base(locator, f'Page did not contain "{locator}"', f" {locator}", message, loglevel)
@keyword
def get_element_count(self, locator):
return self.base(locator, f'element "{locator}" count', f"Element: {locator}")
@keyword
def add_location_strategy(self, strategy_name, strategy_keyword, persist=False):
self.base("", f"Strategy '{strategy_name} was added'", strategy_name, strategy_keyword, persist)
@keyword
def remove_location_strategy(self, strategy_name):
self.base("", f"Strategy '{strategy_name}' was removed", f"Removed {strategy_name}", strategy_name)
# ELEMENTS END #
# ALERTS #
@keyword
def input_text_into_alert(self, text, action=ACCEPT, timeout=None):
self.base("", f"Typed {text} into alert\nAction used: {action}", f"Text: {text}", text, action, timeout)
@keyword
def alert_should_be_present(self, text="", action=ACCEPT, timeout=None):
self.base("", f"Action used: {action}", "", text, action, timeout)
@keyword
def alert_should_not_be_present(self, action=ACCEPT, timeout=0):
self.base("", f"Action used: {action}", "", action, timeout)
@keyword
def handle_alert(self, action=ACCEPT, timeout=None):
return self.base("", f"Alert handled with action: {action}", "", action, timeout)
# ALERTS END #
# COOKIES #
@keyword
def delete_all_cookies(self):
self.base("", "Deleted all cookies", "")
@keyword
def delete_cookie(self, name):
self.base("", "Deleted all cookies", f"{name}", name)
@keyword
def get_cookies(self, as_dict=False):
return self.base("", "Cookies", "", as_dict)
@keyword
def get_cookie(self, name):
return self.base("", "Cookie", f"{name}", name)
@keyword
def add_cookie(self, name, value, path=None, domain=None, secure=None, expiry=None):
self.base("", f"Added cookie: {name} with value: {value}", f"{name}", name, value, path, domain, secure, expiry)
# COOKIES END #
# JAVASCRIPT #
@keyword
def execute_javascript(self, *code):
return self.base("", f"Executed {code}", f"{code}", *code)
@keyword
def execute_async_javascript(self, *code):
return self.base("", f"Executed {code} Asynchronously", f"{code}", *code)
# JAVASCRIPT END #
# RUN ON FAILURE #
@keyword
def register_keyword_to_run_on_failure(self, keyword):
return self.base("", "Previous keyword", f"{keyword}", keyword)
# RUN ON FAILURE END #
# TABLE ELEMENT #
@keyword
def get_table_cell(self, locator, row, column, loglevel="TRACE"):
return self.base(locator, "Cell text", f"{locator} at Row: {row}, Col: {column}", row, column, loglevel)
@keyword
def table_cell_should_contain(self, locator, row, column, expected, loglevel="TRACE"):
self.base(
locator,
f"Cell at row: {row} and column {column} contained {expected}",
f"{locator} at Row: {row}, Col: {column}",
row,
column,
expected,
loglevel,
)
@keyword
def table_column_should_contain(self, locator, column, expected, loglevel="TRACE"):
self.base(locator, f"Column {column} contained {expected}", f"{locator} Col: {column}", column, expected, loglevel)
@keyword
def table_footer_should_contain(self, locator, expected, loglevel="TRACE"):
self.base(locator, f"Footer contained {expected}", f"Footer: {locator}, Expected: {expected}", expected, loglevel)
@keyword
def table_header_should_contain(self, locator, expected, loglevel="TRACE"):
self.base(locator, f"Header contained {expected}", f"Header: {locator}, Expected: {expected}", expected, loglevel)
@keyword
def table_row_should_contain(self, locator, row, expected, loglevel="TRACE"):
self.base(locator, f"Row {row} contained {expected}", f"{locator} Row: {row}", row, expected, loglevel)
@keyword
def table_should_contain(self, locator, expected, loglevel="TRACE"):
self.base(locator, f"Table contained {expected}", f"Table: {locator}, Expected: {expected}", expected, loglevel)
# TABLE ELEMENT END #
# WAITING #
@keyword
def wait_for_condition(self, condition, timeout=None, error=None):
message = self._set_message(timeout)
self.base("", f"Condition: '{condition}' was met {message}", f"{condition}", condition, timeout, error)
@keyword
def wait_until_location_is(self, expected, timeout=None, message=None):
message = self._set_message(timeout)
self.base("", f"Location was '{expected}' {message}", f"{expected}", expected, timeout, message)
@keyword
def wait_until_location_is_not(self, location, timeout=None, message=None):
message = self._set_message(timeout)
self.base("", f"Location was not '{location}' {message}", f"{location}", location, timeout, message)
@keyword
def wait_until_location_contains(self, expected, timeout=None, message=None):
message = self._set_message(timeout)
self.base("", f"Location contained '{expected}' {message}", f"{expected}", expected, timeout, message)
@keyword
def wait_until_location_does_not_contain(self, location, timeout=None, message=None):
message = self._set_message(timeout)
self.base("", f"Location does not contain '{location}' {message}", f"{location}", location, timeout, message)
@keyword
def wait_until_page_contains(self, text, timeout=None, error=None):
message = self._set_message(timeout)
self.base("", f"Page contained '{text}' {message}", f" {text}", text, timeout, error)
@keyword
def wait_until_page_does_not_contain(self, text, timeout=None, error=None):
message = self._set_message(timeout)
self.base("", f"Page does not contain '{text}' {message}", f" {text}", text, timeout, error)
@keyword
def wait_until_page_contains_element(self, locator, timeout=None, error=None, limit=None):
message = self._set_message(timeout)
self.base(locator, f"Page contained '{locator}' {message}", f" {locator}", timeout, error, limit)
@keyword
def wait_until_page_does_not_contain_element(self, locator, timeout=None, error=None, limit=None):
message = self._set_message(timeout)
self.base(locator, f"Page does not contain '{locator}' {message}", f" {locator}", timeout, error, limit)
@keyword
def wait_until_element_is_visible(self, locator, timeout=None, error=None):
message = self._set_message(timeout)
self.base(locator, f"Element '{locator}' was visible {message}", f" {locator}", timeout, error)
@keyword
def wait_until_element_is_not_visible(self, locator, timeout=None, error=None):
message = self._set_message(timeout)
self.base(locator, f"Element '{locator}' was not visible {message}", f" {locator}", timeout, error)
@keyword
def wait_until_element_is_enabled(self, locator, timeout=None, error=None):
message = self._set_message(timeout)
self.base(locator, f"Element '{locator}' was enabled {message}", f" {locator}", timeout, error)
@keyword
def wait_until_element_contains(self, locator, text, timeout=None, error=None):
message = self._set_message(timeout)
self.base(locator, f"Element '{locator}' contained {text} {message}", f" {text}", text, timeout, error)
@keyword
def wait_until_element_does_not_contain(self, locator, text, timeout=None, error=None):
message = self._set_message(timeout)
self.base(locator, f"Element '{locator}' does not contain {text} {message}", f" {text}", text, timeout, error)
def _set_message(self, timeout):
return "" if timeout is None else f"(timeout: {timeout} seconds)"
# WAITING END #
# WINDOW #
@keyword
def select_window(self, locator="MAIN", timeout=None):
return self.base(locator, f"Switched to {locator}", "", timeout)
@keyword
def switch_window(self, locator="MAIN", timeout=None, browser="CURRENT"):
return self.base(locator, f"Switched to {locator}", f"{browser}", timeout, browser)
@keyword
def close_window(self):
self.base("", "Window closed", "")
@keyword
def get_window_handles(self, browser="CURRENT"):
return self.base("", "Window Handles", "", browser)
@keyword
def get_window_identifiers(self, browser="CURRENT"):
return self.base("", "Window Identifiers", "", browser)
@keyword
def get_window_names(self, browser="CURRENT"):
return self.base("", "Window Names", "", browser)
@keyword
def get_window_titles(self, browser="CURRENT"):
return self.base("", "Window Titles", "", browser)
@keyword
def get_locations(self, browser="CURRENT"):
return self.base("", "All Locations", "", browser)
@keyword
def maximize_browser_window(self):
self.base("", "Window Maximized", "")
@keyword
def get_window_size(self, inner=False):
return self.base("", "Window size", "", inner)
@keyword
def set_window_size(self, width, height, inner=False):
self.base("", f"Size set to {width}, {height}", f"Width: {width}, Height: {height}", width, height, inner)
@keyword
def get_window_position(self):
return self.base("", "Window position", "")
@keyword
def set_window_position(self, x, y):
self.base("", f"Position set to {x}, {y}", f"X: {x}, Y: {y}", x, y)
# WINDOW END #
# FRAMES #
@keyword
def select_frame(self, locator):
self.base(locator, f"Switched from to {locator}", f"{locator}")
@keyword
def unselect_frame(self):
self.base("", "Returned to main frame", "")
@keyword
def current_frame_should_contain(self, text, loglevel="TRACE"):
self.base("", f"Current frame contains {text}", f"{text}", text, loglevel)
@keyword
def current_frame_should_not_contain(self, text, loglevel="TRACE"):
self.base("", f"Current frame does not contain {text}", f"{text}", text, loglevel)
@keyword
def frame_should_contain(self, locator, text, loglevel="TRACE"):
self.base(locator, f"{locator} contains {text}", f"Frame: {locator}, Text: {text}", text, loglevel)
# FRAMES END #
# FORM ELEMENT #
@keyword
def submit_form(self, locator=None):
self.base(locator, "Form submitted", f"{locator}")
@keyword
def checkbox_should_be_selected(self, locator):
self.base(locator, f"Checked box {locator} is selected", f"{locator}")
@keyword
def checkbox_should_not_be_selected(self, locator):
self.base(locator, f"Checked box {locator} is not selected", f"{locator}")
@keyword
def page_should_contain_checkbox(self, locator, message=None, loglevel="TRACE"):
self.base(locator, f"Page contains checkbox {locator}", f"{locator}", message, loglevel)
@keyword
def page_should_not_contain_checkbox(self, locator, message=None, loglevel="TRACE"):
self.base(locator, f"Page does not contain checkbox {locator}", f"{locator}", message, loglevel)
@keyword
def select_checkbox(self, locator):
self.base(locator, "Checkbox selected", f"{locator}")
@keyword
def unselect_checkbox(self, locator):
self.base(locator, "Checkbox unselected", | |
= False
os.system("rm -rf ip_test_log")
print "\nTerminating switches..."
print "\nTerminating routers..."
print "\nTerminating UMLs..."
print "\nCleaning the interprocess message queues"
# batch_ipcrm.clean_ipc_queues()
if brute_force:
# since we are working on remote machines, we don't care about
# the process there, so, we just ssh and kill -9 -1 (this kills
# all the process linked to the user
for i in range(0, len(ips[0]), 2):
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " kill -9 -1")
if (not options.keepOld):
print "\nDeleting GINI related files on remote machine %s...\n" % ips[0][i]
command = " rm -rf %s/GINI" % ips[0][i + 1]
os.system("ssh" + SSHOPTS + " " + ips[0][i] + command)
return True
else:
for i in range(0, len(ips[0]), 2):
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " killall -13 -u %s -q uswitch" % os.getenv("USER"))
os.system("ssh" + SSHOPTS + " " + ips[0][i] + " killall -u %s -q grouter glinux" % os.getenv("USER"))
if (not options.keepOld):
print "\nDeleting GINI related files on remote machine %s...\n" % ips[0][i]
time.sleep(0.5)
command = " rm -rf %s/GINI" % ips[0][i + 1]
os.system("ssh" + SSHOPTS + " " + ips[0][i] + command)
return True
# adapted from gloader with modifications
def checkProcAlive(procName, ipdirs):
alive = False
# grep the GINI processes
command = " ps aux | grep %s > %s" % (procName, GINI_TMP_FILE)
for i in range(0, len(ipdirs), 2):
os.system("ssh" + SSHOPTS + ipdirs[i] + command)
# analyse the grepped output
inFile = open(GINI_TMP_FILE)
line = inFile.readline()
while (line):
if (line.find("grep") == -1):
# don't consider the "grep" line
userName = os.environ["USER"]
lineParts = line.split()
if (lineParts[0] == userName):
# consider only the instances with the current user
alive = True
print "There is a live GINI %s on machine %s" % (procName, ipdirs[i])
line = inFile.readline()
inFile.close()
# clean up
os.remove(GINI_TMP_FILE)
return alive
# adapted from gloader with modifications
def writeSrcFile(options):
"write the configuration in the setup file"
outFile = open(SRC_FILENAME, "w")
outFile.write("%s\n" % options.xmlFile)
outFile.write("%s\n" % options.switchDir)
outFile.write("%s\n" % options.routerDir)
outFile.write("%s\n" % options.umlDir)
outFile.write("%s\n" % options.binDir)
outFile.write("%s\n" % options.ipSpecs)
outFile.close()
# taken from gloader
def deleteSrcFile():
"delete the setup file"
if (os.access(SRC_FILENAME, os.W_OK)):
os.remove(SRC_FILENAME)
else:
print "Could not delete the GINI setup file"
# adapted from gloader with modifications
def checkAliveGini(ips):
"check any of the gini components already running"
# Modified to check every machine in our ipSpecs file
result = False
if checkProcAlive(VS_PROG_BIN, ips[0]):
result = True
if checkProcAlive(VM_PROG_BIN, ips[0]):
result = True
if checkProcAlive(GR_PROG_BIN, ips[0]):
result = True
return result
#### -------------- MAIN start ----------------####
# create the program processor. This
# 1. accepts and process the command line options
# 2. creates XML processing engine, that in turn
# a) validates the XML file
# b) extracts the DOM object tree
# c) populates the GINI network class library
# d) performs some semantic/syntax checkings on
# the extracted specification
# e) validates the IP file for distribution
old = False
myProg = Start(sys.argv[0], SRC_FILENAME)
if (not myProg.processOptions(sys.argv[1:])):
sys.exit(1)
options = myProg.options
# Get the valid IPs and directories from the ipSpecs file
# Also check if the IPs and directories are valid
# we validate by: scp a file into given machine and directory
# ssh and remove the file. Once this is validated, we don't
# have to error-check these operations anymore
if old:
ipfilehandle = open(options.ipSpecs, 'r')
lines = ipfilehandle.readlines()
iptest = open("ip_test_log", 'w')
iptest.close()
ginitest = open("gini_ip_test", 'w')
ginitest.write("This is a test file\nIt should not be here\nIt should have been deleted automatically\nDelete it if you can read this!!!")
ginitest.close()
ipdircombos = []
res = False
for line in lines:
a = line.split("\n")
b = a[0].split(":")
ipdircombos.append(b[0])
ipdircombos.append(b[1])
if ((not myProg.undistOpt) or (not options.keepOld)):
os.system("ssh" + SSHOPTS + b[0] + " rm -rf " + b[1] + "/GINI")
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + b[0] + " mkdir " + b[1] + "/GINI")
i = os.system("scp" + SSHOPTS + "gini_ip_test " + a[0] + "/GINI/ >> ip_test_log")
if (not i == 0):
print "Problem with machine or directory %s" % a[0]
res = True
if (i == 0):
os.system("ssh" + SSHOPTS + b[0] + " rm -rf " + b[1] + "/GINI/gini_ip_test >> ip_test_log")
print "Machine and directory valid on %s" % a[0]
os.system("rm -rf gini_ip_test")
ipfilehandle.close()
if (res):
sys.exit(1)
# get the populated GINI network class
# its structure is the same as the XML specification
myGINI = myProg.giniNW
# We don't distribute switches
if (len(myGINI.switches) > 0):
print "\nCannot distriute switches...sorry"
print "These cannot be in the topology"
sys.exit(1)
# Let the user know about the number of IPs
total_ips_req = len(myGINI.vr) + len(myGINI.vm)
total_ips_giv = len(ipdircombos) / 2
# if (total_ips_req > total_ips_giv):
# print "\nThe given IPs aren't enough"
# print "There will be more than one GINI component on some machines\n"
ipvrcombos = []
ipvmcombos = []
ipcompcombos = []
j = 0
for i in range(len(myGINI.vr)):
ipvrcombos.append(ipdircombos[j])
ipvrcombos.append(myGINI.vr[i])
ipcompcombos.append(ipdircombos[j])
ipcompcombos.append(myGINI.vr[i].name)
j = (j + 2) % len(ipdircombos)
for i in range(len(myGINI.vm)):
ipvmcombos.append(ipdircombos[j])
ipvmcombos.append(myGINI.vm[i])
ipcompcombos.append(ipdircombos[j])
ipcompcombos.append(myGINI.vm[i].name)
j = (j + 2) % len(ipdircombos)
else:
if debug_mode:
print "checkpoint 1"
ipdircombos = []
ipvrcombos = []
ipvmcombos = []
ipvscombos = []
ipcompcombos = []
dev_dic = {}
hosts = []
rdfile = options.xmlFile[0:len(options.xmlFile)-4] + "_rdist"
rdhandle = open(rdfile, "r")
for line in rdhandle.readlines():
parts = line.strip().split(",")
if int(parts[1]):
if hosts.count(parts[2]):
pass
else:
hosts.append(parts[2])
for i in range(3, len(parts)):
dev_dic[parts[i]] = parts[2].split(":")[0]
rdhandle.close()
ginitest = open("gini_ip_test", 'w')
ginitest.write("This is a test file\nIt should not be here\nIt should have been deleted automatically\nDelete it if you can read this!!!")
ginitest.close()
res = False
if debug_mode:
print "checkpoint 2"
for host in hosts:
hostpath = host.split(":")
ipdircombos.append(hostpath[0])
if len(hostpath) < 2:
hostlogin = hostpath[0].split("@")
if len(hostlogin) < 2:
whoami = os.getenv("USER")
else:
whoami = hostlogin[0]
newpath = "/home/%s/gtemp" % whoami
hostpath.append(newpath)
host += ":" + hostpath[1]
if not myProg.undistOpt:
print "Warning, invalid remote path specified, defaulting to %s" % newpath
os.system("ssh" + SSHOPTS + hostpath[0] + " mkdir " + hostpath[1] + " 2> /dev/null")
else:
#os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + " 2> /dev/null")
pass
ipdircombos.append(hostpath[1])
if ((not myProg.undistOpt) and (not options.keepOld)):
os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + "/GINI")
time.sleep(GROUTER_WAIT)
os.system("ssh" + SSHOPTS + hostpath[0] + " mkdir " + hostpath[1] + "/GINI")
i = os.system("scp" + SSHOPTS + "gini_ip_test " + host + "/GINI/ >> ip_test_log")
if (not i == 0):
print "Problem with machine or directory %s" % host
res = True
if (i == 0):
os.system("ssh" + SSHOPTS + hostpath[0] + " rm -rf " + hostpath[1] + "/GINI/gini_ip_test >> ip_test_log")
print "Machine and directory valid on %s" % host
os.system("rm -rf gini_ip_test")
if (res):
sys.exit(1)
# get the populated GINI network class
# its structure is the same as the XML specification
myGINI = myProg.giniNW
# We don't distribute wireless components
if len(myGINI.vwr) > 0 or len(myGINI.vmb) > 0:
print "\nCannot distriute wireless devices...sorry"
print "These cannot be in the topology"
sys.exit(1)
# Let the user know about the number of IPs
total_ips_req = len(myGINI.vr) + len(myGINI.vm)
total_ips_giv = len(ipdircombos) / 2
# if (total_ips_req > total_ips_giv):
# print "\nThe given IPs aren't enough"
# print "There will be more than one GINI component on some machines\n"
if debug_mode:
print "checkpoint 3"
for router in myGINI.vr:
ipvrcombos.append(dev_dic[router.name])
ipvrcombos.append(router)
ipcompcombos.append(dev_dic[router.name])
ipcompcombos.append(router.name)
for uml in myGINI.vm:
ipvmcombos.append(dev_dic[uml.name])
ipvmcombos.append(uml)
ipcompcombos.append(dev_dic[uml.name])
ipcompcombos.append(uml.name)
for switch in myGINI.switches:
ipvscombos.append(dev_dic[switch.name])
ipvscombos.append(switch)
ipcompcombos.append(dev_dic[switch.name])
ipcompcombos.append(switch.name)
# Calculate switch port properties. If there is a
# link in the GINI topology between two components,
# then the switches for these components must have
# the same port number and their respective remote
# addresses should refer to each other
if debug_mode:
print "checkpoint 4"
ipports = []
for i in myGINI.vm:
for j in i.interfaces:
ipports.append(i.name)
ipports.append(j.name)
ipports.append(j.target)
ipports.append(0)
for i in myGINI.vr:
for j in i.netIF:
ipports.append(i.name)
ipports.append(j.name)
ipports.append(j.target)
ipports.append(0)
for i in myGINI.switches:
for j in range(2, len(ipports), 4):
if ipports[j] == i.name:
| |
<filename>masakari/hacking/checks.py<gh_stars>10-100
# Copyright (c) 2016, NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from hacking import core
"""
Guidelines for writing new hacking checks
- Use only for Masakari specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to masakari/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
rule_default_re = re.compile(r".*RuleDefault\(")
policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
r"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
r"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(
r"assertEqual\("r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(
r"assertEqual\("r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
asse_equal_end_with_none_re = re.compile(
r"assertEqual\(.*?,\s+None\)$")
asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,")
# NOTE(abhishekk): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(
r"assert(True|False)\("r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])"
r"+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(
r"assert(True|False)"r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|"
r"[][.'\", ])+[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
r"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\smasakari.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
yield_not_followed_by_space = re.compile(r"^\s*yield(?:\(|{|\[|\"|').*$")
_all_log_levels = {'critical', 'error', 'exception', 'info',
'warning', 'debug'}
_all_hints = {'_', '_LE', '_LI', '_LW', '_LC'}
log_translation_re = re.compile(
r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % {
'levels': '|'.join(_all_log_levels),
'hints': '|'.join(_all_hints),
})
@core.flake8ext
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "M301: public db api methods may not accept"
" session")
@core.flake8ext
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = ("M302: timeutils.utcnow() must be used instead of "
"datetime.%s()")
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
@core.flake8ext
def capital_cfg_help(logical_line, tokens):
msg = "M303: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield (0, msg)
@core.flake8ext
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
M305
"""
if asse_trueinst_re.match(logical_line):
yield (0, "M305: assertTrue(isinstance(a, b)) sentences "
"not allowed")
@core.flake8ext
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
M306
"""
if asse_equal_type_re.match(logical_line):
yield (0, "M306: assertEqual(type(A), B) sentences not allowed")
@core.flake8ext
def no_translate_logs(logical_line):
"""Check for 'LOG.*(_*("'
OpenStack no longer supports log translation, so we shouldn't
translate logs.
* This check assumes that 'LOG' is a logger.
M308
"""
if log_translation_re.match(logical_line):
yield (0, "M308: Log messages should not be translated")
@core.flake8ext
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from masakari.i18n import _'
M309
"""
if 'masakari/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield (0, "M309 Don't import translation in tests")
@core.flake8ext
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
M310
"""
if 'masakari/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "M310: Setting CONF.* attributes directly in "
"tests is forbidden. Use self.flags(option=value) "
"instead")
@core.flake8ext
def no_mutable_default_args(logical_line):
msg = "M315: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield (0, "M316: Found use of _() without explicit "
"import of _ !")
@core.flake8ext
def use_jsonutils(logical_line, filename):
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "M317: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
@core.flake8ext
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
M318
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "M318: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
@core.flake8ext
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
M319
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "M319: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
msg = ("M320: Must use a dict comprehension instead of a dict "
"constructor with a sequence of key-value pairs.")
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
M321
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "M321: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
@core.flake8ext
def check_greenthread_spawns(logical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
M322
"""
msg = ("M322: Use masakari.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "masakari/utils.py" in filename or "masakari/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
@core.flake8ext
def check_no_contextlib_nested(logical_line, filename):
msg = ("M323: contextlib.nested is deprecated. With Python 2.7"
"and later the with-statement supports multiple nested objects. "
"See https://docs.python.org/2/library/contextlib.html"
"#contextlib.nested for more information. masakari.test.nested() "
"is an alternative as well.")
if contextlib_nested.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_config_option_in_central_place(logical_line, filename):
msg = ("M324: Config options should be in the central location "
"'/masakari/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "masakari/conf/" in filename:
return
# (pooja_jadhav) All config options (with exceptions that are clarified
# in the list below) were moved to the central place. List below is for
# all options that were impossible to move without doing a major impact
# on code. Add full path to a module or folder.
conf_exceptions = [
# CLI opts are allowed to be outside of masakari/conf directory
'masakari/cmd/manage.py',
]
if any(f in filename for f in conf_exceptions):
return
if cfg_opt_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
M325
"""
msg = ("M325: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
@core.flake8ext
def check_python3_no_iteritems(logical_line):
msg = ("M326: Use dict.items() instead of dict.iteritems().")
if re.search(r".*\.iteritems\(\)", logical_line):
yield (0, msg)
@core.flake8ext
def check_python3_no_iterkeys(logical_line):
msg = ("M327: Use 'for key in dict' instead of 'for key in "
"dict.iterkeys()'.")
if re.search(r".*\.iterkeys\(\)", logical_line):
yield (0, msg)
@core.flake8ext
def check_python3_no_itervalues(logical_line):
msg = ("M328: Use dict.values() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield (0, msg)
@core.flake8ext
def no_os_popen(logical_line):
"""Disallow 'os.popen('
Deprecated library function os.popen() Replace it using | |
self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.allow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.allow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.allow_explosion_hard and ehf:
if self.frame_h == 0:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 1:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 2:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2:
ehf = False
self.allow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class Mapping:
def __init__(self):
self.x = 0
self.y = 0
self.frames = 0
self.convert_entities()
def convert_entities(self):
for row in MAPPING:
for col in row:
if col == 'H':
BACKGROUND_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'G':
GRASS_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'W':
WATER_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'B':
#BRICK_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
#BRICK_RECT_MANY.append(BRICK_IMG)
#self.convert_entities_mini()
pass
elif col == 'S':
SOLID_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '3':
EAGLE_Y.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '4':
EAGLE_G.append(pygame.Rect((self.x,self.y,SQM,SQM)))
self.x+=SQM
self.y+=SQM
self.x=0
def convert_entities_mini(self):
self.x_mini = self.x
self.y_mini = self.y
for i in range(2):
for j in range(2):
BRICK_RECT_MINI.append(pygame.Rect((self.x_mini,self.y_mini,SQM/2,SQM/2)))
self.x_mini += SQM/2
self.y_mini += SQM/2
self.x_mini = self.x
def draw_props(self, screen):
for x in BACKGROUND_RECT:
#pygame.draw.rect(screen,(89, 89, 89),x)
screen.blit(BACKGROUND_IMG, (x.x,x.y))
for x in GRASS_RECT:
#pygame.draw.rect(screen,(51, 204, 51),x)
screen.blit(GRASS_IMG, (x.x,x.y))
for x in WATER_RECT:
#pygame.draw.rect(screen,(0, 153, 255),x)
if self.frames <= 30:
screen.blit(WATER_1_IMG, (x.x,x.y))
else:
screen.blit(WATER_2_IMG, (x.x,x.y))
'''
for x in BRICK_RECT:
screen.blit(BRICK_IMG, (x.x,x.y))
for x in BRICK_RECT_MINI:
screen.blit(BRICK_IMG_MINI, (x.x,x.y))
'''
for x in SOLID_RECT:
screen.blit(SOLIDWALL_IMG, (x.x,x.y))
for x in EAGLE_Y:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
for x in EAGLE_G:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
self.frames += 1
if self.frames == 60:
self.frames = 0
class Bullet_TY(object):
def __init__(self,x,y,dir):
self.dir = dir
self.x = x
self.y = y
self.vel = 22
if self.dir == 'right':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'left':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'down':
self.x = x+18
self.y = y+15
self.width = 16
self.height = 22
elif self.dir == 'up':
self.x = x+18
self.y = y+7
self.width = 16
self.height = 22
def move(self):
if self.dir == 'right':
self.x += self.vel
elif self.dir == 'left':
self.x -= self.vel
elif self.dir == 'down':
self.y += self.vel
elif self.dir == 'up':
self.y -= self.vel
def movehitbox(self, rect):
if self.dir == 'right':
rect.x += self.vel
elif self.dir == 'left':
rect.x -= self.vel
elif self.dir == 'down':
rect.y += self.vel
elif self.dir == 'up':
rect.y -= self.vel
def draw(self, screen):
if self.dir == 'right':
self.BULLET_DRAW = BULLET_IMG[3]
elif self.dir == 'left':
self.BULLET_DRAW = BULLET_IMG[2]
elif self.dir == 'down':
self.BULLET_DRAW = BULLET_IMG[1]
elif self.dir == 'up':
self.BULLET_DRAW = BULLET_IMG[0]
screen.blit(self.BULLET_DRAW, (self.x, self.y))
class Tank_Yellow:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.yellow_tank_destroyed = False
self.yellow_tank_invicible = True
self.frames_inv = 0
self.bullet_dir = None
self.eagle_yellows_tank_on_hit_state = False
self.green_tank_on_hit_state = False
self.eagle_greens_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '1':
self.ty_pos_x = self.x
self.ty_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TY_mask = pygame.Rect(self.ty_pos_x, self.ty_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TY_face = TANK_YELLOW_IMG[2]
self.TY_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TY_face = TANK_YELLOW_IMG[0]
self.TY_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TY_face = TANK_YELLOW_IMG[1]
self.TY_face_txt = 'down'
self.TY_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TY_mask.right = tile.left
if self.movement[0] < 0:
self.TY_mask.left = tile.right
self.TY_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TY_mask.bottom = tile.top
if self.movement[1] < 0:
self.TY_mask.top = tile.bottom
self.collisions_sum = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in SOLID_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in WATER_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_Y:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_G:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT_MINI:
if self.TY_mask.colliderect(back):
colli.append(back)
return colli
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TY_face,(self.TY_mask.x,self.TY_mask.y))
if flag_2:
if (self.frames_inv % 4) == 0 or (self.frames_inv % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TY_mask.x,self.TY_mask.y))
elif (self.frames_inv % 4) == 2 or (self.frames_inv % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TY_mask.x,self.TY_mask.y))
if self.frames_inv >= 45:
self.yellow_tank_invicible = False
self.frames_inv += 1
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_yellow_shoot_cooldown = True
self.tank_yellow_shoot_allow = False
self.b_ty = Bullet_TY(self.TY_mask.x, self.TY_mask.y, self.TY_face_txt)
BULLETS_Y_objects.append(self.b_ty)
BULLETS_Y_RECT.append(pygame.Rect(self.b_ty.x,self.b_ty.y,self.b_ty.width,self.b_ty.height))
self.OHBY = On_Hit_By_Yellow(self.b_ty.dir)
self.bullet_dir = self.b_ty.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_Y_RECT) == 0 and self.frames > 20:
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TG_MASK, TG_CLASS, TY_CLASS, TG_DEST, TG_INVI, MAPPING, screen):
if len(BULLETS_Y_RECT) >= 1:
for i, e in enumerate(BULLETS_Y_RECT):
self.explosion_h_flag = True
self.explosion_l_flag = True
self.brick_on_hit_state = self.OHBY.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBY.background_on_hit(i, e)
self.green_tank_on_hit_state = self.OHBY.green_tank_on_hit(i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI)
self.solid_on_hit_state = self.OHBY.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBY.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBY.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBY.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.green_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBY.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBY.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBY.draw_explosion_hard(screen, self.explosion_h_flag)
def yellow_tank_position_relative_with_green_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TY_mask.x <= TG_mask.x:
flags[0] = True
if TY_mask.x >= TG_mask.x:
flags[1] = True
if TY_mask.y >= TG_mask.y:
flags[2] = True
if TY_mask.y <= TG_mask.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_Y:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def yellow_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TY_face_txt == 'right':
flags[0] = True
elif self.TY_face_txt == 'left':
flags[1] = True
elif self.TY_face_txt == 'up':
flags[2] = True
elif self.TY_face_txt == 'down':
flags[3] = True
return flags
def yellow_tank_bullet_presence(self):
flag = False
if self.tank_yellow_shoot_allow is True:
flag = False
elif self.tank_yellow_shoot_allow is False:
flag = True
return [flag]
def yellow_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def yellow_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK, win):
self.xn = TY_MASK.x + 26
self.yn = TY_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
| |
unnormalized_shape = shape[:-normalized_ndim]
# test that LN normalizes to mean 0 and stddev 1
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5)
# test that LN applies weight and bias correctly
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), bias, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), scale ** 2, delta=1e-5)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self):
input = Variable(torch.empty(2, 3, 3, 2).to("cuda", torch.half).random_(1, 10), requires_grad=True)
m = nn.LayerNorm([3, 2]).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqual(output.type(), input.type())
def test_LayerNorm_general(self):
self._test_LayerNorm_general()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_LayerNorm_general_cuda(self):
self._test_LayerNorm_general("cuda")
self._test_LayerNorm_cuda_half()
def _test_GroupNorm_general(self, device="cpu", dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
}
for shape, g in good_shape_g.items():
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
b = shape[0]
c = shape[1]
# test that GN normalizes to mean 0 and stddev 1
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var).mean(), 1, delta=1e-5)
# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var).mean(), 1, delta=1e-5)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
gn = nn.GroupNorm(g, shape[1])
input = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: gn(input))
def _test_GroupNorm_cuda_half(self):
input = Variable(torch.empty(2, 3, 3, 2).to("cuda", torch.half).random_(1, 10), requires_grad=True)
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqual(output.type(), input.type())
def test_GroupNorm_general(self):
self._test_GroupNorm_general(dtype=torch.float)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_GroupNorm_general_cuda(self):
self._test_GroupNorm_general("cuda", torch.float)
self._test_GroupNorm_cuda_half()
def test_pad(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,))
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,))
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,))
self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))
inputs = torch.randn(1, 2, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(torch.randn(1, 1, 2, 3), (1, 1, 3, 0), mode='reflect'))
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(torch.randn(1, 1, 2), (2, 1), mode='reflect'))
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = torch.tensor(input, requires_grad=True)
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
self.assertEqual(indices.data.squeeze(), expected_indices)
self.assertEqual(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
self.assertEqual(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
def test_Conv2d_naive_groups(self):
self._test_Conv2d_naive_groups()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_Conv2d_naive_groups_cuda(self, dtype=torch.float):
self._test_Conv2d_naive_groups("cuda", dtype)
def test_batchnorm_eval(self):
self._test_batchnorm_eval()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_eval_cuda(self, dtype=torch.float):
self._test_batchnorm_eval("cuda", dtype)
def test_batchnorm_simple_average(self):
self._test_batchnorm_simple_average()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_simple_average_cuda(self):
self._test_batchnorm_simple_average(torch.cuda.FloatTensor)
def test_MaxPool1d_indices(self):
self._test_maxpool_indices(1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool1d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(1, device="cuda", dtype=dtype)
def test_MaxPool2d_indices(self):
self._test_maxpool_indices(2)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool2d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(2, device="cuda", dtype=dtype)
def test_MaxPool3d_indices(self):
self._test_maxpool_indices(3)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool3d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(3, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool1d_indices(self):
self._test_maxpool_indices(1, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool1d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(1, adaptive=True, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool2d_indices(self):
self._test_maxpool_indices(2, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool2d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(2, adaptive=True, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool3d_indices(self):
self._test_maxpool_indices(3, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool3d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(3, adaptive=True, device="cuda", dtype=dtype)
@staticmethod
def _test_max_pool_nan(self, device, dtype=torch.float):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_max_pool_nan_cuda(self, dtype=torch.float):
self._test_max_pool_nan(self, device="cuda", dtype=dtype)
def test_max_pool_nan(self, dtype=torch.float):
self._test_max_pool_nan(self, device="cpu")
def _test_scatter(self, tensor):
x = torch.tensor(tensor, requires_grad=True)
result = dp.scatter(x, (0, 1))
self.assertEqual(len(result), 2)
self.assertEqual(result[0], x[:2])
self.assertEqual(result[0].get_device(), 0)
self.assertEqual(result[1], x[2:])
self.assertEqual(result[1].get_device(), 1)
grad = result[0].data.clone().fill_(2)
result[0].backward(grad)
self.assertEqual(x.grad.data[:2], grad)
self.assertEqual(x.grad.data[2:], grad.clone().zero_())
_assertGradAndGradgradChecks(self, lambda y: dp.scatter(y, (0, 1)), (x,))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda())
def _test_gather(self, output_device):
inputs = (
torch.randn(2, 4, device='cuda:0', requires_grad=True),
torch.randn(2, 4, device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([4, 4]))
self.assertEqual(result[:2], inputs[0])
self.assertEqual(result[2:], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn(4, 4)
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad.data, grad[:2])
self.assertEqual(inputs[1].grad.data, grad[2:])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
# test scalar inputs, should stack into a vector in this case
inputs = (
torch.randn((), device='cuda:0', requires_grad=True),
torch.randn((), device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([2]))
self.assertEqual(result[0], inputs[0])
self.assertEqual(result[1], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn(2)
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[0])
self.assertEqual(inputs[1].grad, grad[1])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_cpu(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_gpu(self):
self._test_gather(0)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_different_len_dicts(self):
inputs = (
{'a': Variable(torch.randn(1, 2).cuda(0), requires_grad=True)},
{
'b': Variable(torch.randn(1, 2).cuda(1), requires_grad=True),
'a': Variable(torch.randn(1, 2).cuda(1), requires_grad=True)
}
)
with self.assertRaises(ValueError):
_ = dp.gather(inputs, target_device=0)
def _test_broadcast_double_backwards(self, *tensors):
variables = tuple(torch.tensor(t, requires_grad=True) for t in tensors)
_assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), variables)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_double_backwards_gpu(self):
self._test_broadcast_double_backwards(torch.randn(4, 4).cuda(),
torch.randn(4, 4).cuda(),
torch.randn(4, 4).cuda())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
Variable(torch.randn(1, 2).cuda(), requires_grad=False),
Variable(torch.randn(1, 2).cuda(), requires_grad=False),
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
]
broadcasted_variables = | |
"browse-folder": TT("Browse"),
"in": TT("In"),
"opt-download_dir": TT("Temporary Download Folder"),
"explain-download_dir": TT(
"Location to store unprocessed downloads.<br /><i>Can only be changed when queue is empty.</i>"
),
"opt-download_free": TT("Minimum Free Space for Temporary Download Folder"),
"explain-download_free": TT(
'Auto-pause when free space is beneath this value.<br /><i>In bytes, optionally follow with K,M,G,T. For example: "800M" or "8G"</i>'
),
"opt-complete_dir": TT("Completed Download Folder"),
"explain-complete_dir": TT(
"Location to store finished, fully processed downloads.<br /><i>Can be overruled by user-defined categories.</i>"
),
"opt-complete_free": TT("Minimum Free Space for Completed Download Folder"),
"explain-complete_free": TT("Will not work if a category folder is on a different disk."),
"opt-fulldisk_autoresume": TT("Auto resume"),
"explain-fulldisk_autoresume": TT(
"Downloading will automatically resume if the minimum free space is available again.<br />Applies to both the Temporary and Complete Download Folder.<br />Checked every few minutes."
),
"opt-permissions": TT("Permissions for completed downloads"),
"explain-permissions": TT(
'Set permissions pattern for completed files/folders.<br /><i>In octal notation. For example: "755" or "777"</i>'
),
"opt-dirscan_dir": TT("Watched Folder"),
"explain-dirscan_dir": TT(
"Folder to monitor for .nzb files.<br /><i>Also scans .zip .rar and .tar.gz archives for .nzb files.</i>"
),
"opt-dirscan_speed": TT("Watched Folder Scan Speed"),
"explain-dirscan_speed": TT("Number of seconds between scans for .nzb files."),
"opt-script_dir": TT("Scripts Folder"),
"explain-script_dir": TT("Folder containing user scripts."),
"opt-email_dir": TT("Email Templates Folder"),
"explain-email_dir": TT("Folder containing user-defined email templates."),
"opt-password_file": TT("Password file"),
"explain-password_file": TT("File containing all passwords to be tried on encrypted RAR files."),
"systemFolders": TT("System Folders"),
"opt-admin_dir": TT("Administrative Folder"),
"explain-admin_dir1": TT(
"Location for queue admin and history database.<br /><i>Can only be changed when queue is empty.</i>"
),
"explain-admin_dir2": TT("<i>Data will <b>not</b> be moved. Requires SABnzbd restart!</i>"),
"opt-log_dir": TT("Log Folder"),
"explain-log_dir": TT("Location of log files for SABnzbd.<br /><i>Requires SABnzbd restart!</i>"),
"opt-nzb_backup_dir": TT(".nzb Backup Folder"),
"explain-nzb_backup_dir": TT("Location where .nzb files will be stored."),
"base-folder": TT("Default Base Folder"),
# Config->Switches
"opt-enable_all_par": TT("Download all par2 files"),
"explain-enable_all_par": TT("This prevents multiple repair runs by downloading all par2 files when needed."),
"opt-enable_recursive": TT("Enable recursive unpacking"),
"explain-enable_recursive": TT("Unpack archives (rar, zip, 7z) within archives."),
"opt-flat_unpack": TT("Ignore any folders inside archives"),
"explain-flat_unpack": TT("All files will go into a single folder."),
"opt-top_only": TT("Only Get Articles for Top of Queue"),
"explain-top_only": TT("Enable for less memory usage. Disable to prevent slow jobs from blocking the queue."),
"opt-safe_postproc": TT("Post-Process Only Verified Jobs"),
"explain-safe_postproc": TT(
"Only unpack and run scripts on jobs that passed the verification stage. If turned off, all jobs will be marked as Completed even if they are incomplete."
),
"opt-pause_on_pwrar": TT("Action when encrypted RAR is downloaded"),
"explain-pause_on_pwrar": TT('In case of "Pause", you\'ll need to set a password and resume the job.'),
"opt-no_dupes": TT("Detect Duplicate Downloads"),
"explain-no_dupes": TT(
"Detect identical NZB files (based on items in your History or files in .nzb Backup Folder)"
),
"opt-no_series_dupes": TT("Detect duplicate episodes in series"),
"explain-no_series_dupes": TT(
'Detect identical episodes in series (based on "name/season/episode" of items in your History)'
),
"opt-series_propercheck": TT("Allow proper releases"),
"explain-series_propercheck": TT(
"Bypass series duplicate detection if PROPER, REAL or REPACK is detected in the download name"
),
"nodupes-off": TT("Off"), #: Three way switch for duplicates
"nodupes-ignore": TT("Discard"), #: Four way switch for duplicates
"nodupes-pause": TT("Pause"), #: Four way switch for duplicates
"nodupes-fail": TT("Fail job (move to History)"), #: Four way switch for duplicates
"nodupes-tag": TT("Tag job"), #: Four way switch for duplicates
"abort": TT("Abort"), #: Three way switch for encrypted posts
"opt-action_on_unwanted_extensions": TT("Action when unwanted extension detected"),
"explain-action_on_unwanted_extensions": TT("Action when an unwanted extension is detected in RAR files"),
"opt-unwanted_extensions": TT("Unwanted extensions"),
"explain-unwanted_extensions": TT("List all unwanted extensions. For example: <b>exe</b> or <b>exe, com</b>"),
"opt-sfv_check": TT("Enable SFV-based checks"),
"explain-sfv_check": TT("Do an extra verification based on SFV files."),
"opt-script_can_fail": TT("User script can flag job as failed"),
"explain-script_can_fail": TT(
"When the user script returns a non-zero exit code, the job will be flagged as failed."
),
"opt-new_nzb_on_failure": TT("On failure, try alternative NZB"),
"explain-new_nzb_on_failure": TT("Some servers provide an alternative NZB when a download fails."),
"opt-enable_meta": TT("Use tags from indexer"),
"explain-enable_meta": TT(
"When sorting, use tags from indexer for title, season, episode, etc. Otherwise all naming is derived from the NZB name."
),
"opt-folder_rename": TT("Enable folder rename"),
"explain-folder_rename": TT(
"Use temporary names during post processing. Disable when your system doesn't handle that properly."
),
"opt-pre_script": TT("Pre-queue user script"),
"explain-pre_script": TT("Used before an NZB enters the queue."),
"opt-par_option": TT("Extra PAR2 Parameters"),
"explain-par_option": TT("Read the Wiki Help on this!"),
"opt-nice": TT("Nice Parameters"),
"explain-nice": TT("Read the Wiki Help on this!"),
"opt-ionice": TT("IONice Parameters"),
"explain-ionice": TT("Read the Wiki Help on this!"),
"opt-win_process_prio": TT("External process priority"),
"explain-win_process_prio": TT("Read the Wiki Help on this!"),
"win_process_prio-high": TT("High"),
"win_process_prio-normal": TT("Normal"),
"win_process_prio-low": TT("Low"),
"win_process_prio-idle": TT("Idle"),
"opt-auto_disconnect": TT("Disconnect on Empty Queue"),
"explain-auto_disconnect": TT("Disconnect from Usenet server(s) when queue is empty or paused."),
"opt-auto_sort": TT("Automatically sort queue"),
"explain-auto_sort": TT("Automatically sort jobs in the queue when a new job is added."),
"opt-direct_unpack": TT("Direct Unpack"),
"explain-direct_unpack": TT(
"Jobs will start unpacking during the downloading to reduce post-processing time. Only works for jobs that do not need repair."
),
"opt-propagation_delay": TT("Propagation delay"),
"explain-propagation_delay": TT(
"Posts will be paused untill they are at least this age. Setting job priority to Force will skip the delay."
),
"opt-check_new_rel": TT("Check for New Release"),
"explain-check_new_rel": TT("Weekly check for new SABnzbd release."),
"also-test": TT("Also test releases"), #: Pick list for weekly test for new releases
"opt-replace_spaces": TT("Replace Spaces in Foldername"),
"explain-replace_spaces": TT("Replace spaces with underscores in folder names."),
"opt-replace_dots": TT("Replace dots in Foldername"),
"explain-replace_dots": TT("Replace dots with spaces in folder names."),
"opt-sanitize_safe": TT("Make Windows compatible"),
"explain-sanitize_safe": TT("For servers: make sure names are compatible with Windows."),
"opt-auto_browser": TT("Launch Browser on Startup"),
"explain-auto_browser": TT("Launch the default web browser when starting SABnzbd."),
"opt-pause_on_post_processing": TT("Pause Downloading During Post-Processing"),
"explain-pause_on_post_processing": TT(
"Pauses downloading at the start of post processing and resumes when finished."
),
"opt-ignore_samples": TT("Ignore Samples"),
"explain-ignore_samples": TT("Filter out sample files (e.g. video samples)."),
"igsam-del": TT("Delete after download"),
"opt-deobfuscate_final_filenames": TT("Deobfuscate final filenames"),
"explain-deobfuscate_final_filenames": TT(
"If filenames of (large) files in the final folder look obfuscated or meaningless they will be renamed to the job name."
),
"opt-enable_https_verification": TT("HTTPS certificate verification"),
"explain-enable_https_verification": TT(
"Verify certificates when connecting to indexers and RSS-sources using HTTPS."
),
"swtag-server": TT("Server"),
"swtag-queue": TT("Queue"),
"swtag-pp": TT("Post processing"),
"swtag-naming": TT("Naming"),
"swtag-quota": TT("Quota"),
"swtag-indexing": TT("Indexing"),
"opt-quota_size": TT("Size"), #: Size of the download quota
"explain-quota_size": TT("How much can be downloaded this month (K/M/G)"),
"opt-quota_day": TT("Reset day"), #: Reset day of the download quota
"explain-quota_day": TT(
"On which day of the month or week (1=Monday) does your ISP reset the quota? (Optionally with hh:mm)"
),
"opt-quota_resume": TT("Auto resume"), #: Auto-resume download on the reset day
"explain-quota_resume": TT("Should downloading resume after the quota is reset?"),
"opt-quota_period": TT("Quota period"), #: Does the quota get reset every day, week or month?
"explain-quota_period": TT("Does the quota get reset each day, week or month?"),
"opt-pre_check": TT("Check before download"),
"explain-pre_check": TT("Try to predict successful completion before actual download (slower!)"),
"opt-ssl_ciphers": TT("SSL Ciphers"),
"explain-ssl_ciphers": TT("Increase performance by forcing a lower SSL encryption strength."),
"opt-max_art_tries": TT("Maximum retries"),
"explain-max_art_tries": TT("Maximum number of retries per server"),
"opt-fail_hopeless_jobs": TT("Abort jobs that cannot be completed"),
"explain-fail_hopeless_jobs": TT(
"When during download it becomes clear that too much data is missing, abort the job"
),
"opt-rating_enable": TT("Enable Indexer Integration"),
"explain-rating_enable": TT(
"Indexers can supply rating information when a job is added and SABnzbd can report to the indexer if a job couldn't be completed."
),
"opt-rating_api_key": TT("API Key"),
"explain-rating_api_key": TT("This key provides identity to indexer. Check your profile on the indexer's website."),
"opt-rating_filter_enable": TT("Enable Filtering"),
"explain-rating_filter_enable": TT("Action downloads according to filtering rules."),
"opt-rating_filter_abort_if": TT("Abort If"),
"opt-rating_filter_pause_if": TT("Else Pause If"),
"opt-rating_filter_video": TT("Video rating"),
"opt-rating_filter_audio": TT("Audio rating"),
"opt-rating_filter_passworded": TT("Passworded"),
"opt-rating_filter_spam": TT("Spam"),
"opt-rating_filter_confirmed": TT("Confirmed"),
"opt-rating_filter_downvoted": TT("More thumbs down than up"),
"opt-rating_filter_keywords": TT("Title keywords"),
"explain-rating_filter_keywords": TT("Comma separated list"),
"opt-load_balancing": TT("Server IP address selection"),
"no-load-balancing": TT("First IP address"),
"load-balancing": TT("Randomly selected IP address"),
"load-balancing-happy-eyeballs": TT("Quickest IP address, preferring IPv6"),
"explain-load_balancing": TT("Useful if a newsserver has more than one IPv4/IPv6 address"),
# Config->Server
"addServer": TT("Add Server"), #: Caption
"srv-displayname": TT("Server description"), #: User defined name for server
"srv-host": TT("Host"), #: Server hostname or IP
| |
<filename>summarization_utils.py
import ast
import hashlib
import json
import os
from collections import defaultdict
from typing import Tuple, Sequence, Dict, Optional, Union, Any, Set
import compress_pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
from filelock import FileLock
from allenact.utils.misc_utils import (
bootstrap_max_of_subset_statistic,
expected_max_of_subset_statistic,
all_equal,
)
from minigrid_and_pd_scripts.compute_random_performance_for_task import (
TASK_TO_RANDOM_PERFORMANCE,
)
from projects.advisor.advisor_constants import (
METHOD_ORDER,
METHOD_TO_COLOR,
METHOD_TO_LINE_MARKER,
EXPERIMENT_STR_TO_LABEL_DICT,
)
from projects.advisor.lighthouse_scripts.summarize_pairwise_imitation_data import (
set_size,
)
from projects.advisor.minigrid_constants import ENV_NAMES_TO_TITLE
plt.rc("font", **{"family": "serif", "serif": ["CMU"], "size": 16})
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
plt.rc("text", usetex=True)
plt.rc("text.latex", preamble=r"\usepackage{amsmath}")
METRIC_TO_LABEL = {
"reward": "Reward",
"rewards": "Reward",
"avg_ep_length": "Avg. Ep. Length",
"success": "Success",
}
def unzip(xs):
a = None
n = None
for x in xs:
if n is None:
n = len(x)
a = [[] for _ in range(n)]
for i, y in enumerate(x):
a[i].append(y)
return a
def add_columns_to_df(df):
keys = ["alpha_start", "alpha_stop", "fixed_alpha", "lr", "tf_ratio"]
for key in keys + ["pretty_label"]:
df[key] = [None] * df.shape[0]
def read_config_kwargs_str(config_kwargs_str):
if config_kwargs_str == "" or config_kwargs_str is None:
return {}
elif isinstance(config_kwargs_str, Dict):
return config_kwargs_str
else:
try:
return json.loads(config_kwargs_str)
except Exception:
return ast.literal_eval(config_kwargs_str)
df.loc[:, "config_kwargs"] = [
read_config_kwargs_str(config_kwargs_str)
for config_kwargs_str in df.loc[:, "config_kwargs_str"]
]
for i in range(df.shape[0]):
row = df.loc[i, :]
config_kwargs: Dict[str, Any] = row["config_kwargs"]
for key in keys:
df.loc[i, key] = config_kwargs.get(key.upper(), None)
for i in range(df.shape[0]):
df.loc[i, "pretty_label"] = run_info_to_pretty_label(dict(df.loc[i, :]))
return df
def plot_max_hp_curves(
x_to_y_list: Sequence[Dict[Union[int, float], float]],
x_to_bootstrap_ys_list: Sequence[Dict[Union[int, float], Sequence[float]]],
method_labels: Sequence[str],
colors: Sequence[Tuple[int, int, int]],
line_styles: Optional[Sequence] = None,
line_markers: Optional[Sequence] = None,
title: str = "",
xlabel: str = "",
ylabel: str = "",
fig_size=(4, 4 * 3.0 / 5.0),
save_path: Optional[str] = None,
put_legend_outside: bool = True,
include_legend: bool = False,
performance_of_random_agent: Optional[float] = None,
best_inds_to_highlight: Optional[Set] = None,
):
"""Plots E[max(metric | n hp runs)] curves.
For more information on studying sensitivity of methods to
hyperparameter tuning, refer to Dodge et al. EMNLP 2019
https://arxiv.org/abs/1909.03004
"""
line_styles = ["solid"] * len(colors) if line_styles is None else line_styles
line_markers = [""] * len(colors) if line_markers is None else line_markers
plt.grid(
b=True,
which="major",
color=np.array([0.93, 0.93, 0.93]),
linestyle="-",
zorder=-2,
)
plt.minorticks_on()
plt.grid(
b=True,
which="minor",
color=np.array([0.97, 0.97, 0.97]),
linestyle="-",
zorder=-2,
)
ax = plt.gca()
ax.set_axisbelow(True)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if best_inds_to_highlight is None:
best_inds_to_highlight = set(range(len(x_to_y_list)))
xscaled = False
for (
index,
(x_to_y, x_to_bootstrap_ys, method_label, color, line_style, line_marker,),
) in enumerate(
zip(
x_to_y_list,
x_to_bootstrap_ys_list,
method_labels,
colors,
line_styles,
line_markers,
)
):
xvals = list(sorted(x_to_bootstrap_ys.keys()))
points_list = [x_to_bootstrap_ys[x] for x in xvals]
points = [x_to_y[x] for x in xvals]
should_highlight = index in best_inds_to_highlight
if max(xvals) > 1e3:
xscaled = True
xvals = [x / 1e6 for x in xvals]
try:
lower, _, upper = unzip(
[np.percentile(points, [25, 50, 75]) for points in points_list]
)
except Exception as _:
print(
"Could not generate max_hp_curve for {}, too few points".format(
method_label
)
)
continue
if performance_of_random_agent is not None:
xvals = [0] + xvals
points = [performance_of_random_agent] + points
lower = [performance_of_random_agent] + lower
upper = [performance_of_random_agent] + upper
plt.gca().fill_between(
xvals,
lower,
upper,
color=np.array(color + (25 if should_highlight else 0,)) / 255,
zorder=1,
)
plot_kwargs = dict(
lw=2.5,
linestyle=line_style,
marker=line_marker,
markersize=8,
markevery=4 if len(xvals) > 10 else 1,
zorder=2,
)
label = (
r"{}.{}".format(index + 1, "\ \ " if index + 1 < 10 else " ") + method_label
)
color = np.array(color + (255,)) / 255
plt.plot([], [], label=label, color=color, **plot_kwargs) # FOR LEGEND ONLY
if not should_highlight:
color = np.array(color)
color[3] = 0.1
plt.plot(xvals, points, color=color, **plot_kwargs)
plt.title(title)
plt.xlabel(xlabel + (r"(Millions)" if xscaled and len(xlabel) != 0 else r""))
plt.ylabel(ylabel)
plt.ticklabel_format(style="plain")
plt.tight_layout()
if include_legend:
if put_legend_outside:
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
else:
plt.legend()
set_size(*fig_size)
if save_path is None:
plt.show()
else:
plt.savefig(
save_path, bbox_inches="tight",
)
plt.close()
print(f"Figure saved to {save_path}")
def create_comparison_hp_plots_from_tsv(
num_hp_evals_for_steps_plot: int,
tsv_file_path: str,
highlight_best: bool,
overwrite=True,
include_legend: bool = False,
hide_labels: bool = False,
):
assert os.path.exists(tsv_file_path)
file_dir, file_name = os.path.split(tsv_file_path)
with open(tsv_file_path, "r") as f:
tsv_hash = str(hashlib.md5(f.read().encode()).hexdigest())
df = pd.read_csv(tsv_file_path, sep="\t")
df = add_columns_to_df(df)
env_type_key = "env"
assert (
df[env_type_key] == df[env_type_key][0]
).all(), "env must be the same for all elements of df"
task_name = df[env_type_key][0]
del df[env_type_key]
df = df.sort_values(by=["exp_type", "seed"])
group_keys = ["exp_type"]
df_grouped = df.groupby(by=group_keys)
df_grouped_lists = df_grouped.agg(list)
# One sort index, based on the first metric
for metric_key in [
"reward",
# "success",
# "avg_ep_length",
]:
if not os.path.exists(file_dir):
print("IN WRONG DIRECTORY.")
else:
plots_dir = os.path.join(file_dir, "neurips21_plots", task_name)
os.makedirs(plots_dir, exist_ok=True)
box_save_path = os.path.join(
plots_dir,
"{}__box_{}_{}.pdf".format(
file_name.replace(".tsv", ""), task_name, metric_key,
),
)
if (not overwrite) and os.path.exists(box_save_path):
print(
"Plot {} exists and overwrite is `False`, skipping...".format(
box_save_path
)
)
continue
tsv_summary_dir = os.path.join(file_dir, "neurips21_summaries")
os.makedirs(tsv_summary_dir, exist_ok=True)
tsv_summary_save_path = os.path.join(
tsv_summary_dir, f"{metric_key}__all_results.tsv"
)
grouped_df_index = df_grouped_lists.index.to_frame(index=False)
method_keys = list(grouped_df_index["exp_type"])
sort_index = [
ind
for _, ind in sorted(
[
(METHOD_ORDER.index(method_key), sort_ind)
if method_key in METHOD_ORDER
else 1e6
for sort_ind, method_key in enumerate(method_keys)
if method_key in METHOD_ORDER
]
)
]
colors = [
METHOD_TO_COLOR.get(method_keys[ind], (0, 0, 0),) for ind in sort_index
]
line_styles = None
line_markers = [
METHOD_TO_LINE_MARKER.get(method_keys[ind], "",) for ind in sort_index
]
sorted_multi_index = [
tuple(grouped_df_index.loc[ind, :]) for ind in sort_index
]
sorted_multi_index = [
x if len(x) != 1 else x[0] for x in sorted_multi_index
]
result_lens = {
multi_ind: len(df_grouped_lists.loc[multi_ind, metric_key])
for multi_ind in sorted_multi_index
}
print(result_lens)
print(sum(result_lens.values()))
points_list = [
list(
map(ast.literal_eval, df_grouped_lists.loc[multi_ind, metric_key],)
)
for multi_ind in sorted_multi_index
]
exp_to_ckpt_training_steps_lists = [
df_grouped_lists.loc[multi_ind, "train_steps"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in exp_to_ckpt_training_steps_lists)
exp_ind_to_ckpt_training_steps = [
ast.literal_eval(training_steps_list[0])
for training_steps_list in exp_to_ckpt_training_steps_lists
]
pretty_label_lists = [
df_grouped_lists.loc[multi_ind, "pretty_label"]
for multi_ind in sorted_multi_index
]
assert all(all_equal(l) for l in pretty_label_lists)
yticklabels = [l[0] for l in pretty_label_lists]
subset_size_to_bootstrap_points_list = []
subset_size_to_expected_mas_est_list = []
ckpt_to_bootstrap_points_list = []
ckpt_to_expected_mas_est_list = []
print("Starting expected max reward computations")
for i in range(len(points_list)):
print(f"Computing expected max {metric_key} for {yticklabels[i]}")
vals_per_ckpt_mat = np.array(
points_list[i]
) # each col corresponds to a checkpoint
training_steps_inds_to_skip = []
training_steps = exp_ind_to_ckpt_training_steps[i]
cache_path = os.path.join(
plots_dir, "cache", f"{tsv_hash}_{i}_{metric_key}.pkl.gz"
)
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
if os.path.exists(cache_path):
cache = compress_pickle.load(cache_path)
ckpt_to_expected_mas_est_list.append(
cache["ckpt_to_expected_mas_est"]
)
ckpt_to_bootstrap_points_list.append(
cache["ckpt_to_bootstrap_points"]
)
subset_size_to_expected_mas_est_list.append(
cache["subset_size_to_expected_mas_est"]
)
subset_size_to_bootstrap_points_list.append(
cache["subset_size_to_bootstrap_points"]
)
else:
for j in range(len(training_steps) - 1):
# Skip some weird cases where checkpoints were saved too closely
if (training_steps[j + 1] - training_steps[j]) / training_steps[
-1
] < 0.05:
training_steps_inds_to_skip.append(j)
ckpt_to_expected_mas_est_list.append(
{
training_steps: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, j], m=num_hp_evals_for_steps_plot
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
ckpt_to_bootstrap_points_list.append(
{
training_steps: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, j],
m=num_hp_evals_for_steps_plot,
reps=500,
seed=j,
)
for j, training_steps in enumerate(training_steps)
if j not in training_steps_inds_to_skip
}
)
max_subset_size = len(points_list[i]) + 1 - 5
subset_size_to_expected_mas_est_list.append(
{
m: expected_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m
)
for m in range(1, max_subset_size)
}
)
subset_size_to_bootstrap_points_list.append(
{
m: bootstrap_max_of_subset_statistic(
vals_per_ckpt_mat[:, -1], m=m, reps=500, seed=m
)
for m in range(1, max_subset_size)
}
)
cache = {}
cache["ckpt_to_expected_mas_est"] = ckpt_to_expected_mas_est_list[
-1
]
cache["ckpt_to_bootstrap_points"] = ckpt_to_bootstrap_points_list[
-1
]
cache[
"subset_size_to_expected_mas_est"
] = subset_size_to_expected_mas_est_list[-1]
cache[
"subset_size_to_bootstrap_points"
] = subset_size_to_bootstrap_points_list[-1]
compress_pickle.dump(cache, cache_path)
color_to_best_val_and_index = defaultdict(lambda: (-float("inf"), -1))
color_to_inds = defaultdict(lambda: [])
for ind, c0 in enumerate(colors):
color_to_inds[c0].append(ind)
final_y = list(sorted(ckpt_to_expected_mas_est_list[ind].items()))[-1][
1
]
if final_y > color_to_best_val_and_index[c0][0]:
color_to_best_val_and_index[c0] = (final_y, ind)
best_inds_to_highlight = set(
v[1] for v in color_to_best_val_and_index.values()
)
plot_max_hp_curves(
x_to_y_list=ckpt_to_expected_mas_est_list,
x_to_bootstrap_ys_list=ckpt_to_bootstrap_points_list,
method_labels=yticklabels,
xlabel=("Training Steps" if not hide_labels else ""),
ylabel=(
f"Expected {METRIC_TO_LABEL[metric_key]}" if not hide_labels else ""
),
colors=colors,
line_styles=line_styles,
line_markers=line_markers,
fig_size=(3 * 1.05, 3 * 1.05),
save_path=box_save_path.replace("_box_", "_train_steps_"),
put_legend_outside=True,
include_legend=include_legend,
title=(ENV_NAMES_TO_TITLE[task_name] if not hide_labels else ""),
performance_of_random_agent=TASK_TO_RANDOM_PERFORMANCE.get(
task_name, {}
).get(metric_key, None),
best_inds_to_highlight=best_inds_to_highlight
if highlight_best
else None,
)
def save_expected_rewards_tsv(
task_name: str,
x_to_y_list: Sequence[Dict[Union[int, float], float]],
method_labels: Sequence[str],
save_path: str,
grouped_inds_list: Sequence[Sequence[int]],
):
def all_nearly_equal(seq):
s = seq[0]
return all(abs(s - ss) / min(s, ss) < 0.01 for ss in seq)
with FileLock(save_path + ".lock"):
if os.path.exists(save_path):
df = pandas.read_csv(save_path, sep="\t")
assert list(df["method"]) == method_labels
else:
df = pandas.DataFrame(data={"method": method_labels})
assert all_nearly_equal(
[max(x_to_y.keys()) for x_to_y in x_to_y_list]
)
if task_name in df.columns:
del df[task_name]
values_at_end_of_training = [
x_to_y[max(x_to_y.keys())] for x_to_y in x_to_y_list
]
df[task_name] | |
<filename>lib/DiskSpaceMonitor.py<gh_stars>0
# BSD Licence
# Copyright (c) 2012, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
A disk space monitor
See doc string for class DiskSpaceMonitor for details
"""
import os
import time
import tempfile
from FileUtils import futils
import LoggerClient
class DiskState(object):
# note - the numerical values are arbitrary but the
# ordering of them matters
GOOD = 4 # restart transfers if previously stopped
OKAY = 3 # allow transfers if not stopped, but do not restart
LOW = 2 # stop transfers of low prio dsets
VLOW = 1 # delete files
class DiskSpaceMonitor(object):
"""
A disk space monitor.
Monitors the disk space in a given set of data_stream directories,
and applies three thresholds:
- fall below low disk space threshold:
stop arrival of all data_streams which are of priority <= base
priority
- below very low disk space threshold:
Delete transfer units from data_streams starting from the lowest
priority, until back above this threshold. Apply to data_streams with
increasing priority as necessary, but only as far as the base
priority. If still below this threshold, then stop high-priority
(> base priority) data_streams but do not delete transfer units
from them.
- rise above low disk threshold:
restart arrival of high priority (> base prio) data_streams
- rise above good disk threshold:
restart arrival of all data_streams
"""
def __init__(self, filesys, gconfig, dconfigs,
desc_short = "dsm",
desc_long = "disk space monitor",
debug_on = False):
"""
filesys (string) is the name of the filesystem being monitored.
(In fact it will work if it is any directory on that filesystem,
but in practice the calling code will pass the actual mount point.)
gconfig should be a single GlobalConfig object
dconfigs should be an array of DatasetConfig objects
"""
self.filesys = filesys
self.gconfig = gconfig
self.dconfigs = dconfigs
self.config = gconfig["disk_space_monitor"] # for convenience
self.poll_interval = (self.config["poll_interval"]
or gconfig["global"]["general_poll_interval"])
self.base_prio = self.config["base_priority"]
self.prio_sect = 'data_stream'
self.prio_item = 'priority'
self.stop_file_name = gconfig["incoming"]["stop_file"]
if self.gconfig.checkSet("global.debug_on"):
self.debug_on = self.gconfig.get("global.debug_on")
else:
self.debug_on = debug_on
self.initLogger(desc_short, desc_long)
# sort configs by priority
self.applyDefaultPriorities()
self.dconfigs.sort(self.cmpByPrio)
for dc in self.dconfigs:
self.debug("Dataset %s priority %s" % (dc.name,
self.getPriority(dc)))
self.monitor() # enter main loop
def initLogger(self, desc_short, desc_long):
"""
Initialise the logger client module
"""
self.logger = LoggerClient.LoggerClient(self.gconfig,
tag = desc_short,
name = desc_long)
# import some methods from the logger
self.logger.exportMethods(self)
self.info("startup")
def diskStateTransition(self, threshold, state, prev_state, direction):
"""
Test for transition to specified disk state in specified
direction (1 for rising (improving), -1 for falling (worsening)).
Also true if the current state is on the right side of the
threshold and it is the first iteration (prev_state == None).
"""
cmp1 = cmp(state, threshold)
if not (cmp1 == 0 or cmp1 == direction):
return False
if prev_state == None:
return True
cmp2 = cmp(threshold, prev_state)
return (cmp2 == direction)
def monitor(self):
"""
The main loop
"""
prev = None
while True:
state = self.getDiskState()
self.debug("disk space state on %s: %s" %
(self.filesys, state))
if self.diskStateTransition(DiskState.GOOD, state, prev, 1):
self.restartAllDatasets()
if self.diskStateTransition(DiskState.OKAY, state, prev, 1):
self.restartHighPriorityDatasets()
if self.diskStateTransition(DiskState.LOW, state, prev, -1):
self.stopDatasetsExceptHighPrio()
# drasticAction called on every iteration if in VLOW state, not
# just on state transition, so that it can keep deleting files
# if more somehow arrive
if (state == DiskState.VLOW):
self.drasticAction()
time.sleep(self.poll_interval)
prev = state
def restartAllDatasets(self):
"""
Remove .stop files for all data_streams
"""
self.info("restartAllDatasets called")
for dc in self.dconfigs:
self.removeStopFile(dc)
def restartHighPriorityDatasets(self):
"""
Remove .stop files for data_streams whose priorities exceed the
base priority
"""
self.info("restartHighPriorityDatasets called")
for dc in self.dconfigs:
if self.getPriority(dc) > self.base_prio:
self.removeStopFile(dc)
def stopDatasetsExceptHighPrio(self):
"""
Create .stop files for data_streams whose priorities do not exceed the
base priority
"""
self.info("stopDatasetsExceptHighPrio called")
for dc in self.dconfigs:
if self.getPriority(dc) <= self.base_prio:
self.createStopFile(dc)
def drasticAction(self):
"""
Delete files from low or base priority data_streams if necessary,
until disk space is no longer VLOW,
and if still necessary then also stop high priority data_streams,
"""
self.info("drasticAction called")
deletions = []
for dc in self.dconfigs:
if self.getPriority(dc) <= self.base_prio:
# stop file should already have been created,
# but this is cheap, so repeat for good measure
self.createStopFile(dc)
if dc.checkSet("data_stream.deletion_enabled"):
if dc.get("data_stream.deletion_enabled") == True:
deleted_enough = \
self.deleteFilesWhileVeryLowDisk(
dc, deletions)
if deleted_enough:
break
else:
self.info("not deleting files - but need more disk space!")
else:
# deletions of items from low priority data_streams didn't fix
# the problem, so stop all arrivals
for dc in self.dconfigs:
self.createStopFile(dc)
self.error("Had to stop all arrivals")
if deletions:
self.error("Had to delete files: %s" % deletions)
def getTUsForDeletion(self, dconfig):
"""
Get list of candidate transfer units for deletion, in order,
for a data_streams (argument is a DatasetConfig object)
Looks in: arrivals directory (if there is one),
data_stream directory and quarantine directory.
First sort key is that it does arrivals dir before anything else, as
this reduces checksumming. Apart from that, it does most recently
created files first as these are the likely to be the easiest to find
another copy. (NB uses change time, as this will more accurately
reflect when it was really created on THIS system, whereas mtime can
be set by rsync to match the modification time on another system)
"""
ds_dir = dconfig["data_stream"]["directory"]
q_dir = dconfig["outgoing"]["quarantine_dir"]
arr_dir = dconfig["incoming"]["directory"]
list_dir_func = lambda dir_path: \
futils.listDir(dir_path,
fullPaths = True,
emptyListOnException = True)
# add items in dataset dir
transfer_units = list_dir_func(ds_dir)
if q_dir:
# if there is a quarantine directory, add items in the quarantine
# directory, but first exclude the quarantine dir itself, which
# may be an entry under the dataset dir
transfer_units = filter(lambda path: path != q_dir,
transfer_units) \
+ list_dir_func(q_dir)
transfer_units.sort(key = futils.getCtimeOrNone,
reverse = True)
# add items in arrivals dir at start (if there is one)
if arr_dir:
arr_transfer_units = list_dir_func(arr_dir)
arr_transfer_units.sort(key = futils.getCtimeOrNone,
reverse = True)
transfer_units = arr_transfer_units + transfer_units
# Okay we're done, though for good measure check they all really
# exist
transfer_units = filter(os.path.exists, transfer_units)
return transfer_units
def deleteFilesWhileVeryLowDisk(self, dconfig, deletions):
"""
Keep deleting files from a data_stream while disk state is
very low. Return a True/False value for whether a
better (i.e. not VLOW) disk state was reached.
dconfig is the DatasetConfig object
'deletions' argument is an array provided by the caller;
it will be appended to with pathnames deleted, so that
the caller can log these
"""
for tu_path in self.getTUsForDeletion(dconfig):
# test disk space before deleting
if self.getDiskState() != DiskState.VLOW:
return True
deletions.append(tu_path)
if os.path.isdir(tu_path):
# recursive deletion - may take a while, so
# move it inside a temporary dot-dir first (and then
# delete from the level of the dot-dir itself) to
# reduce chance of races with TransferUnitController
# trying to transfer it
parent_dir = os.path.dirname(tu_path)
del_dir = tempfile.mkdtemp(dir = parent_dir,
prefix = ".del_tmp_")
os.rename(tu_path, del_dir)
status = futils.deleteDir(del_dir)
else:
status = futils.deleteFile(tu_path)
if not status:
self.warn("could not delete %s: %s" % \
(tu_path, status))
# repeat the test one final time (after last deletion)
# to determine return value
return (self.getDiskState() != DiskState.VLOW)
def createStopFile(self, dconfig):
"""
Create stop file for a given data_stream.
"""
path = self.getStopFilePath(dconfig)
if not os.path.exists(path):
self.info("Creating stop file %s" % path)
fh = open(path, "w")
fh.close()
def removeStopFile(self, dconfig):
"""
Remove stop file for a given data_stream.
"""
path = self.getStopFilePath(dconfig)
if os.path.exists(path):
self.info("Removing stop file %s" % path)
os.remove(path)
def getStopFilePath(self, dconfig):
"""
Get the stop file path for a given data_stream.
This will be in either the incoming directory (if there is an
arrival monitor) or the data_stream directory (if there is not).
"""
iconfig = dconfig["incoming"]
if iconfig["require_arrival_monitor"]:
stop_file_dir = iconfig["directory"]
else:
stop_file_dir = dconfig["data_stream"]["directory"]
# | |
27, "[?2l", "Pound sterling: #\n"]
data += [14, "Hash: #\n\n"]
data += [27, "<Push <RETURN>"]
write_test(filename, data)
def create_vt52_character_set2(filename):
# Behaviour of this checked with the 'vt102' emulator.
data = [27, "[2J", 27, "[HTest of character set for VT52 mode with graphics\n\n"]
# Set G0=UK, G1=US (remember SI (15)=G0, SO (14)=G1)
data += [27, "(A", 27, ")B"]
data += ["ANSI mode:\n"]
data += [15, "G0 (UK) code 35: ", 35, "\n"]
data += [14, "G1 (US) code 35: ", 35, "\n"]
data += [27, "[?2l"]
# Demonstrate that we're still in G1 after switching to VT52 mode, and that
# we can use SI/SO to change character sets.
data += ["VT52 mode:\n"]
data += ["G1 code 35: ", 35, "\n"]
data += [15, "G0 code 35: ", 35, "\n"]
data += [14, "G1 code 35: ", 35, "\n"]
# Demonstrate that switching into graphics mode and back out resets G0 and
# G1 to US.
data += [15, "G0 code 35 then plus/minus then US code 35: ", 35, 27, "F", "g", 27, "G", 35, "\n"]
# Demonstrate that switching back to ANSI mode doesn't alter the character
# sets.
data += [27, "<", "US code 35 twice: ", 14, 35, 15, 35, "\n"]
data += ["\nPush <RETURN>"]
write_test(filename, data)
def create_auto_wrap_scroll(filename):
data = [27, "[2J", 27, "[HTest of auto wrap with scrolling\n\n"]
data += [27, "[3;4r", 27, "[3H\n"]
data += ["This sentence should wrap neatly around at the right margin, without any strangespacing or other formatting errors. Push <RETURN>"]
data += [27, "[r"]
write_test(filename, data)
def create_reset(filename):
data = [27, "[3;10r", 27, "[?6h", 27, "H"]
data += [27, "[?5h"]
data += [27, "[?20h"]
data += [27, "[4h"]
data += [27, "[4m"]
data += [27, "c"]
data += ["Test of reset\n\n"]
# junk would be pushed right and not overwritten if insert mode weren't
# reset.
data += ["junk\rThis should be normal text with no reverse video on\n"]
data += ["the top four lines of the screen. Push <RETURN>"]
write_test(filename, data)
def create_control_sequence_intermediate_character1(filename):
data = [27, "[2J", 27, "[HTest of control sequences with intermediate characters"]
# Testing with the 'vt102' emulator suggests the presence of intermediate
# characters causes the escape sequence to be ignored.
data += [27, "[3H", 27, "[##H"]
data += ["This should appear at the left margin on line 3. Push <RETURN>"]
write_test(filename, data)
def create_not_quite_line_attributes(filename):
# Simplified version of failure found with fuzz tester; the line attribute
# code was updating the flags before it checked for the presence of the
# essential # intermediate character; this caused the question mark to be
# printed in single height, single width. (Nothing special about the
# question mark, just an arbitrary character chosen for this test.) FWIW,
# the seed which found it was 1529457 (on a Master 128); B% gets to about
# 45000 at the failure point, which is caught because *REDRAW changes the
# screen.
data = [27, "[2J", 27, "[HTest of not-quite-line-attributes\n\n"]
data += [27, "#3Double-height question mark: ", 27, "5?\n"]
data += [27, "#4Double-height question mark: ", 27, "5?\n\n"]
data += ["The double-height question mark should actually be double height. Push <RETURN>"]
write_test(filename, data)
def create_insert_line_line_attributes(filename):
# Simplified version of failure found with fuzz tester (Master
# 128, S%=13569, B%=507904); the fast path cursor position wasn't being
# updated when an insert line changed the line attributes.
data = [27, "[2J", 27, "[H", 27, "#3Double-height text"]
data += [27, "[Le with non-default line attributes"]
data += [13, "Test of insert lin"]
data += ["\n", 27, "[L", "\n\n", 27, "#4Double-height text\n\n"]
data += ["The top line should be the test title in single-width text with no gaps. Beneath"]
data += ["that should be some double-height text. Push <RETURN>"]
write_test(filename, data)
def create_delete_line_line_attributes(filename):
# Test of delete line inspired by create_insert_line_line_attributes()
data = [27, "[2J", 27, "[HTest of delete line with non-default line attributes\n\n"]
data += [27, "#3foo", 27, "[Ms text should be single-width with no gaps. Push <RETURN>"]
data += [27, "7", 13, "Thi", 27, "8"]
write_test(filename, data)
def create_insert_delete_characters_with_attributes1(filename):
# Simplified version of failure found with fuzz tester (Master 128,
# S%=60605376, B%=102400); insert characters was not correctly inserting
# characters with the underline attribute set. Behaviour of the delete
# character part of this checked with 'vt102' emulator (it doesn't support
# insert character).
data = [27, "[2J", 27, "[HTest of insert/delete characters with character attributes 1"]
data += [27, "[3g", 27, "[1;80H", 27, "H", "\n"]
# The ordering of these tests is important for verifying the output; this
# way the reverse video underline is visible as it forms a line in the two adjacent
# reverse video lines, and the non-reverse video underline is visible as
# it's not adjacent to any reverse video.
data += ["\nThis line contains a '", 27, "7' quoted reverse video underlined blank."]
data += [27, "8", 27, "[4;7m", 27, "[4@", 27, "[0m"]
data += ["\nThis line contains a '", 27, "7' quoted reverse video blank."]
data += [27, "8", 27, "[7m", 27, "[4@", 27, "[0m"]
data += ["\nThis line contains a '", 27, "7' quoted underlined blank."]
data += [27, "8", 27, "[4m", 27, "[4@", 27, "[0m"]
data += ["\nThis line has a reverse video underlined blank at the right margin."]
data += [27, "7", 27, "[4;7m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\nThis line has a reverse video blank at the right margin."]
data += [27, "7", 27, "[7m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\nThis line has an underlined blank at the right margin."]
data += [27, "7", 27, "[4m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\n\nPush <RETURN>"]
write_test(filename, data)
def create_insert_delete_characters_with_attributes2(filename):
# Double-width version of create_insert_delete_characters_with_attributes1
data = [27, "[2J", 27, "[HTest of insert/delete characters with character attributes 2"]
data += [27, "[3g", 27, "[1;80H", 27, "H", "\n"]
# The ordering of these tests is important for verifying the output; this
# way the reverse video underline is visible as it forms a line in the two adjacent
# reverse video lines, and the non-reverse video underline is visible as
# it's not adjacent to any reverse video.
data += ["\n", 27, "#6'", 27, "7' reverse video underlined blank"]
data += [27, "8", 27, "[4;7m", 27, "[4@", 27, "[0m"]
data += ["\n", 27, "#6'", 27, "7' reverse video blank"]
data += [27, "8", 27, "[7m", 27, "[4@", 27, "[0m"]
data += ["\n", 27, "#6'", 27, "7' quoted underlined blank"]
data += [27, "8", 27, "[4m", 27, "[4@", 27, "[0m"]
data += ["\n", 27, "#6Reverse video underlined right"]
data += [27, "7", 27, "[4;7m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\n", 27, "#6Reverse video blank right"]
data += [27, "7", 27, "[7m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\n", 27, "#6Underlined blank right"]
data += [27, "7", 27, "[4m", "\t ", 27, "8", 27, "[4P", 27, "[0m"]
data += ["\n\nPush <RETURN>"]
write_test(filename, data)
def create_delete_line_high_count(filename):
# Test case for a bug found in the implemetation of delete_line where
# bottom_margin + 1 - count was negative.
data = [27, "[2J", 27, "[HTest of delete line with high count"]
data += [27, "[7HLine 5"]
data += [27, "[3;6r", 27, "[3HLine 1\nLine 2\nLine 3\nLine 4"]
data += [27, "[4H", 27, "[7M"]
data += [27, "[r", 27, "[4Hx\nx\nx\n\n\n"]
data += ["There should be three lines with just 'x' on between line 1 and line 5.\n"]
data += ["Push <RETURN>"]
write_test(filename, data)
def create_delete_character_high_count(filename):
# Test case for a bug found in the implementation of delete_character where
# line_length - count was negative; because the count is clamped at 80,
# this can only occur with double-width lines.
data = [27, "[2J", 27, "[HTest of delete character with high count\n\n"]
data += [27, "#6", "This sentence | |
<gh_stars>1-10
from dea.models import Journal
from decimal import Decimal
from product.attributes import get_product_attributes_data
from django_extensions.db.fields import AutoSlugField
from django.contrib.postgres.fields import HStoreField
from django.db import models
from django.db.models import Sum
from django.db.models.functions import Coalesce
from django.urls import reverse
from django.utils.encoding import smart_text
from django.utils.text import slugify
# from django_measurement.models import MeasurementField
# from measurement.measures import Weight
from mptt.managers import TreeManager
from mptt.models import MPTTModel,TreeForeignKey
from versatileimagefield.fields import PPOIField, VersatileImageField
from .weight import WeightUnits, zero_weight
from .managers import StockManager
from utils.friendlyid import encode
class Category(MPTTModel):
# gold ,silver ,other
name = models.CharField(max_length=128,unique=True)
slug = AutoSlugField(populate_from='name', blank=True)
description = models.TextField(blank=True)
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children',on_delete=models.CASCADE)
background_image = VersatileImageField(
upload_to='category-backgrounds', blank=True, null=True)
class MPPTMeta:
order_insertion_by = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product_category_detail', args=(self.slug,))
def get_update_url(self):
return reverse('product_category_update', args=(self.slug,))
class ProductType(models.Model):
# ring,bracelet,chain,necklace,harem,dollar,urupudi,coin,kalkas,moppu,mugti,kamal,tops,kassaset,jhapaka,mattal
name = models.CharField(max_length=128)
has_variants = models.BooleanField(default=True)
product_attributes = models.ManyToManyField(
'Attribute', related_name='product_types', blank=True)
variant_attributes = models.ManyToManyField(
'Attribute', related_name='product_variant_types', blank=True)
class Meta:
app_label = 'product'
ordering=('name',)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product_producttype_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_producttype_update', args=(self.pk,))
def __repr__(self):
class_ = type(self)
return '<%s.%s(pk=%r, name=%r)>' % (
class_.__module__, class_.__name__, self.pk, self.name)
class Product(models.Model):
# tv ring,plate ring,dc chain,gc chain
product_type = models.ForeignKey(ProductType,
related_name='products', on_delete=models.CASCADE)
name = models.CharField(max_length=128,unique = True)
description = models.TextField()
category = models.ForeignKey(Category,
related_name='products', on_delete=models.CASCADE)
attributes = HStoreField(default=dict, blank=True)
jattributes = models.JSONField(default = dict,blank = True,null = True)
class Meta:
app_label = 'product'
ordering=('name',)
def __iter__(self):
if not hasattr(self, '__variants'):
setattr(self, '__variants', self.variants.all())
return iter(getattr(self, '__variants'))
def __repr__(self):
class_ = type(self)
return '<%s.%s(pk=%r, name=%r)>' % (
class_.__module__, class_.__name__, self.pk, self.name)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product_product_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_product_update', args=(self.pk,))
def is_in_stock(self):
return any(variant.is_in_stock() for variant in self)
def get_first_image(self):
images = list(self.images.all())
return images[0].image if images else None
def get_attributes(self):
return get_product_attributes_data(self)
class ProductVariant(models.Model):
sku = models.CharField(max_length=32, unique=True)
name = models.CharField(max_length=255,unique = True)
product = models.ForeignKey(Product,
related_name='variants', on_delete=models.CASCADE)
product_code = models.CharField(max_length=32,unique = True)
attributes = HStoreField(default=dict, blank=True)
jattributes = models.JSONField(default = dict)
images = models.ManyToManyField('ProductImage', through='VariantImage')
class Meta:
app_label = 'product'
def __str__(self):
return f"{self.name} {self.product_code}"
def get_attributes(self):
return get_product_attributes_data(self.product)
def get_bal(self):
st = StockTransaction.objects.filter(stock__variant_id = self.id)
ins = st.filter(activity_type__in=['P','SR','AR'])
i={}
o={}
if ins.exists():
i = ins.aggregate(
wt = Sum('weight'),qty=Sum('quantity'))
else:
i['wt']=0
i['qty']=0
out = st.filter(activity_type__in=['S', 'PR', 'A'])
if out.exists():
o = out.aggregate(
wt=Sum('weight'), qty=Sum('quantity'))
else:
o['wt']=0
o['qty']=0
total = {'wt':i['wt']-o['wt'],'qty':i['qty']-o['qty']}
return total
def get_absolute_url(self):
return reverse('product_productvariant_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_productvariant_update', args=(self.pk,))
def display_product(self, translated=False):
if translated:
product = self.product.translated
variant_display = str(self.translated)
else:
variant_display = str(self)
product = self.product
product_display = (
'%s (%s)' % (product, variant_display)
if variant_display else str(product))
return smart_text(product_display)
def get_first_image(self):
images = list(self.images.all())
if images:
return images[0].image
return self.product.get_first_image()
def get_ajax_label(self):
return '%s, %s' % (
self.sku, self.display_product())
# class ProductPrice(models.Model):
# product = models.ForeignKey('ProductVariant',on_delete = models.CASCADE)
# price = models.IntegerField()
# def __str__(self):
# return f"{self.price}"
# class CostPrice(models.Model):
# productprice = models.ForeignKey('ProductPrice',on_delete = models.CASCADE)
# contact = models.ForeignKey('contact.Customer',on_delete = models.CASCADE)
# def __str__(self):
# return f"{self.price}"
class Attribute(models.Model):
name = models.CharField(max_length=50)
slug = AutoSlugField(populate_from='name', blank=True)
class Meta:
ordering = ('id', )
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product_attribute_detail', args=(self.slug,))
def get_update_url(self):
return reverse('product_attribute_update', args=(self.slug,))
def get_formfield_name(self):
return slugify('attribute-%s' % self.slug, allow_unicode=True)
def has_values(self):
return self.values.exists()
class AttributeValue(models.Model):
name = models.CharField(max_length=100)
value = models.CharField(max_length=100, default='')
slug = AutoSlugField(populate_from='name', blank=True)
attribute = models.ForeignKey(
Attribute, related_name='values', on_delete=models.CASCADE)
class Meta:
ordering = ('-id',)
unique_together = ('name', 'attribute')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product_attributevalue_detail', args=(self.slug,))
def get_update_url(self):
return reverse('product_attributevalue_update', args=(self.slug,))
def get_ordering_queryset(self):
return self.attribute.values.all()
class ProductImage(models.Model):
product = models.ForeignKey(Product,
related_name='images', on_delete=models.CASCADE)
image = VersatileImageField(
upload_to='product/', ppoi_field='ppoi', blank=False)
ppoi = PPOIField('Image PPOI')
alt = models.CharField(max_length=128, blank=True)
class Meta:
ordering = ('-id', )
app_label = 'product'
def get_absolute_url(self):
return reverse('product_productimage_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_productimage_update', args=(self.pk,))
def get_ordering_queryset(self):
return self.product.images.all()
class VariantImage(models.Model):
variant = models.ForeignKey(
'ProductVariant', related_name='variant_images',
on_delete=models.CASCADE)
image = models.ForeignKey(
ProductImage, related_name='variant_images', on_delete=models.CASCADE)
def get_absolute_url(self):
return reverse('product_variantimage_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_variantimage_update', args=(self.pk,))
# class StockItem(MPTTModel):
# created = models.DateTimeField(auto_now_add=True)
# name = models.CharField()
# barcode = models.CharField(unique = True)
# huid = models.CharField(unique = True,null = True,blank = True)
# variant = models.ForeignKey(
# Product,on_delete= models.CASCADE,
# related_name='variants',
# )
# is_batch = models.BooleanField(default = False)
# parent = TreeForeignKey(
# 'self',null = True,blank = True,related_name = 'batches',
# on_delete = models.CASCADE
# )
# melting = models.DecimalField(max_digits=10, decimal_places=3, default=100)
# cost = models.DecimalField(max_digits=10, decimal_places=3, default=100)
# touch = models.DecimalField(max_digits=10, decimal_places=3, default=0)
# wastage = models.DecimalField(max_digits=10, decimal_places=3, default=0)
# tracking_type = models.CharField(choices=(
# ('Lot', 'Lot'), ('Unique', 'Unique')),
# null=True, max_length=10,
# default='Lot')
# status = models.CharField(max_length=10, choices=(
# ('Empty', 'Empty'),
# ('Available', 'Available'), ('Sold', 'Sold'),
# ('Approval', 'Approval'), ('Return', 'Return'),
# ('Merged', 'Merged'),
# ),
# default='Empty')
# class MPPTMeta:
# order_insertion_by = ['name']
class Stock(models.Model):
created = models.DateTimeField(auto_now_add = True)
updated_on = models.DateTimeField(auto_now=True)
reorderat = models.IntegerField(default=1)
barcode = models.CharField(max_length=6, null = True,
blank=True, unique=True,editable = False)
huid = models.CharField(max_length=6,null=True,blank=True,unique = True)
variant = models.ForeignKey(ProductVariant, on_delete=models.CASCADE,
# related_name = 'stocks'
)
# following atrributes are not in dnf i.e duplkicates of variant
melting = models.DecimalField(max_digits =10,decimal_places=3, default =100)
cost = models.DecimalField(max_digits = 10,decimal_places = 3,default = 100)
touch = models.DecimalField(max_digits =10,decimal_places =3,default = 0)
wastage = models.DecimalField(max_digits =10 ,decimal_places =3,default = 0)
tracking_type = models.CharField(choices = (('Lot','Lot'),
('Unique','Unique')),verbose_name='track_by',
null = True,max_length=10,default = 'Lot')
status = models.CharField(max_length=10,choices = (
('Empty','Empty'),('Available','Available'),('Sold','Sold'),
('Approval','Approval'),('Return','Return'),('Merged','Merged'),
),default = 'Empty')
objects = StockManager()
class Meta:
ordering=('-created',)
def __str__(self):
cb = self.current_balance()
return f"{self.variant} {self.barcode} {cb['wt']} {cb['qty']}"
def get_absolute_url(self):
return reverse('product_stock_detail', args=(self.pk,))
def get_update_url(self):
return reverse('product_stock_update', args=(self.pk,))
def get_pure_by_melting(self):
bal = self.current_balance()
return bal['wt']*self.melting
def get_pure_by_cost(self):
bal = self.current_balance()
return bal['wt']*self.cost
def audit(self):
# get last audit cb,totalin,total out and then append following
try:
last_statement = self.stockstatement_set.latest()
except StockStatement.DoesNotExist:
last_statement = None
if last_statement is not None:
ls_wt = last_statement.Closing_wt
ls_qty = last_statement.Closing_qty
else:
ls_wt = 0
ls_qty = 0
stock_in = self.stock_in_txns(last_statement)
stock_out = self.stock_out_txns(last_statement)
cb_wt = ls_wt + (stock_in['wt'] - stock_out['wt'])
cb_qty = ls_qty + (stock_in['qty'] - stock_out['qty'])
return StockStatement.objects.create(stock = self,
Closing_wt = cb_wt,
Closing_qty = cb_qty,
total_wt_in = stock_in['wt'],
total_qty_in = stock_in['qty'],
total_wt_out = stock_out['wt'],
total_qty_out = stock_out['qty'])
def stock_in_txns(self,ls):
# filter since last audit
st = self.stocktransaction_set.all()
if ls :
st = st.filter(created__gte = ls.created)
st = st.filter(
activity_type__in=['P', 'SR', 'AR', 'AD'])
return st.aggregate(
qty=Coalesce(
Sum('quantity', output_field=models.IntegerField()), 0),
wt=Coalesce(
Sum('weight', output_field=models.DecimalField()), Decimal(0.0))
)
def stock_out_txns(self,ls):
# filter since last audit
st = self.stocktransaction_set.all()
if ls:
st = st.filter(created__gte = ls.created)
st = st.filter(
activity_type__in=['PR', 'S', 'A', 'RM'])
return st.aggregate(
qty=Coalesce(Sum('quantity', output_field=models.IntegerField()), 0),
wt=Coalesce(Sum('weight',output_field = models.DecimalField()), Decimal(0.0)))
def current_balance(self):
# compute cb from last audit and append following
bal = {}
try:
ls = self.stockstatement_set.latest()
Closing_wt = ls.Closing_wt
Closing_qty = ls.Closing_qty
except StockStatement.DoesNotExist:
ls = None
Closing_wt =0
Closing_qty =0
in_txns = self.stock_in_txns(ls)
out_txns = self.stock_out_txns(ls)
bal['wt'] = Closing_wt + (in_txns['wt'] - out_txns['wt'])
bal['qty'] = Closing_qty + (in_txns['qty'] - out_txns['qty'])
return bal
def get_age(self):
pass
# if self.tracking_type == 'Lot':
# get average of purchase date from today and average of sale dates from today then sub both
# return 0
# else:
# check if sold then timedelta between created and last sales transaction
# else timedelta between today and date created
def add(self,weight,quantity,journal,activity_type):
print('came to create stxns')
StockTransaction.objects.create(journal = journal,
stock = self,weight = weight,quantity = quantity,activity_type=activity_type)
self.update_status()
print('succeded')
def remove(self,weight,quantity,journal,activity_type):
StockTransaction.objects.create(
journal = journal,
stock=self,
weight=weight,quantity=quantity,
activity_type=activity_type)
self.update_status()
def split(self,weight):
# split from stock:tracking_type::lot to unique
cb = self.current_balance()
if self.tracking_type == "Lot" and cb['wt'] >= weight and cb['qty'] >1:
uniq_stock = Stock.objects.create(variant = self.variant,
tracking_type = 'Unique')
uniq_stock.barcode='je'+ str(uniq_stock.id)
uniq_stock.melting = self.melting
uniq_stock.cost = self.cost
uniq_stock.touch = self.touch
uniq_stock.wastage = self.wastage
uniq_stock.add(weight,1,
None,'AD')
self.remove(weight,1,None,at = 'RM')
else:
print('unique nodes cant be split.hint:merge to lot and then split')
def merge(self):
# merge stock:tracking_type:unique to lot
if self.tracking_type == "Unique":
lot = Stock.objects.get(variant = self.variant,tracking_type = "Lot")
cb=self.current_balance()
lot.add(cb['wt'],cb['qty'],None,'AD')
self.remove(cb['wt'],cb['qty'],None,at = "RM")
else:
print('lot cant be merged further')
def transfer():
pass
def update_status(self):
cb = self.current_balance()
if cb['wt'] <= 0.0 or cb['qty'] <=0:
self.status = "Empty"
else:
self.status = "Available"
self.save()
def save(self,*args,**kwargs):
super(Stock, self).save(*args, **kwargs)
if not self.barcode:
self.barcode = encode(self.pk)
self.save()
# class StockBatch(models.Model):
# created = models.DateTimeField(auto_now_add=True)
# qty = models.IntegerField(default =0)
# | |
False
>>> is_numpy_array(np.int64(3))
False
>>> is_numpy_array(3.5)
False
>>> is_numpy_array(np.float64(3.5))
False
>>> is_numpy_array('hi')
False
>>> is_numpy_array(None)
False
>>> is_numpy_array(None, allow_none=True)
True
"""
import numpy as np
return is_instance(arg, np.ndarray, allow_none)
def check_numpy_array(arg, allow_none=False, message='Argument "%(string)s" is not a NumPy array, but of type %(actual)s', level=1):
"""
>>> import numpy as np
>>> check_numpy_array(np.array([1, 2, 3]))
array([1, 2, 3])
>>> check_numpy_array(np.array([[1, 2, 3], [1, 2, 3]]))
array([[1, 2, 3],
[1, 2, 3]])
>>> check_numpy_array(np.array(3))
array(3)
>>> check_numpy_array([1, 2, 3])
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not a NumPy array, but of type <class 'list'>
>>> check_numpy_array(3)
Traceback (most recent call last):
...
AssertionError: Argument "3" is not a NumPy array, but of type <class 'int'>
>>> check_numpy_array(np.int64(3))
Traceback (most recent call last):
...
AssertionError: Argument "3" is not a NumPy array, but of type <class 'numpy.int64'>
>>> check_numpy_array(3.5)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not a NumPy array, but of type <class 'float'>
>>> check_numpy_array(np.float64(3.5))
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not a NumPy array, but of type <class 'numpy.float64'>
>>> check_numpy_array('hi')
Traceback (most recent call last):
...
AssertionError: Argument "hi" is not a NumPy array, but of type <class 'str'>
>>> check_numpy_array(None)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not a NumPy array, but of type <class 'NoneType'>
>>> check_numpy_array(None, allow_none=True)
"""
check(is_numpy_array(arg, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg)}, level)
return arg
def is_string(arg, allow_none=False):
"""
>>> is_string([1, 2, 3])
False
>>> is_string(3)
False
>>> is_string(3.5)
False
>>> is_string('hi')
True
>>> is_string("hi")
True
>>> is_string(None)
False
>>> is_string(None, allow_none=True)
True
"""
return is_instance(arg, str, allow_none)
def check_string(arg, allow_none=False, message='Argument "%(string)s" of type %(actual)s is not a string', level=1):
"""
>>> check_string([1, 2, 3])
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" of type <class 'list'> is not a string
>>> check_string(3)
Traceback (most recent call last):
...
AssertionError: Argument "3" of type <class 'int'> is not a string
>>> check_string(3.5)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" of type <class 'float'> is not a string
>>> check_string('hi')
'hi'
>>> check_string("hi")
'hi'
>>> check_string(None)
Traceback (most recent call last):
...
AssertionError: Argument "None" of type <class 'NoneType'> is not a string
>>> check_string(None, allow_none=True)
"""
check(is_string(arg, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg)}, level)
return arg
def is_date(arg, allow_none=False):
"""
>>> is_date([1, 2, 3])
False
>>> is_date(3)
False
>>> is_date(3.5)
False
>>> is_date('hi')
False
>>> is_date("hi")
False
>>> import datetime as dt
>>> is_date(dt.date(2019, 9, 10))
True
>>> is_date(dt.time(12, 3))
False
>>> is_date(dt.datetime(2019, 9, 10, 12, 3))
False
>>> is_date(dt.timedelta(seconds=5))
False
>>> import numpy as np
>>> is_date(np.timedelta64(5, 's'))
False
>>> import pandas as pd
>>> is_date(pd.Timedelta(5, 's'))
False
>>> is_date(None)
False
>>> is_date(None, allow_none=True)
True
"""
import datetime as dt
if is_instance(arg, dt.datetime, allow_none=False): return False
return is_instance(arg, dt.date, allow_none)
def check_date(arg, allow_none=False, message='Argument "%(string)s" of type %(actual)s is not a date', level=1):
"""
>>> check_date([1, 2, 3])
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" of type <class 'list'> is not a date
>>> check_date(3)
Traceback (most recent call last):
...
AssertionError: Argument "3" of type <class 'int'> is not a date
>>> check_date(3.5)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" of type <class 'float'> is not a date
>>> check_date('hi')
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is not a date
>>> check_date("hi")
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is not a date
>>> import datetime as dt
>>> check_date(dt.date(2019, 9, 10))
datetime.date(2019, 9, 10)
>>> check_date(dt.time(12, 3))
Traceback (most recent call last):
...
AssertionError: Argument "12:03:00" of type <class 'datetime.time'> is not a date
>>> check_date(dt.datetime(2019, 9, 10, 12, 3))
Traceback (most recent call last):
...
AssertionError: Argument "2019-09-10 12:03:00" of type <class 'datetime.datetime'> is not a date
>>> check_date(dt.timedelta(seconds=5))
Traceback (most recent call last):
...
AssertionError: Argument "0:00:05" of type <class 'datetime.timedelta'> is not a date
>>> import numpy as np
>>> check_date(np.timedelta64(5, 's'))
Traceback (most recent call last):
...
AssertionError: Argument "5 seconds" of type <class 'numpy.timedelta64'> is not a date
>>> import pandas as pd
>>> check_date(pd.Timedelta(5, 's'))
Traceback (most recent call last):
...
AssertionError: Argument "0 days 00:00:05" of type <class 'pandas._libs.tslibs.timedeltas.Timedelta'> is not a date
>>> check_date(None)
Traceback (most recent call last):
...
AssertionError: Argument "None" of type <class 'NoneType'> is not a date
>>> check_date(None, allow_none=True)
"""
check(is_date(arg, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg)}, level)
return arg
def is_some_date(arg, allow_none=False):
"""
>>> is_some_date([1, 2, 3])
False
>>> is_some_date(3)
False
>>> is_some_date(3.5)
False
>>> is_some_date('hi')
False
>>> is_some_date("hi")
False
>>> import datetime as dt
>>> is_some_date(dt.date(2019, 9, 10))
True
>>> is_some_date(dt.time(12, 3))
False
>>> is_some_date(dt.datetime(2019, 9, 10, 12, 3))
False
>>> is_some_date(dt.timedelta(seconds=5))
False
>>> import numpy as np
>>> is_some_date(np.timedelta64(5, 's'))
False
>>> import pandas as pd
>>> is_some_date(pd.Timedelta(5, 's'))
False
>>> is_some_date(None)
False
>>> is_some_date(None, allow_none=True)
True
"""
return is_date(arg, allow_none)
def check_some_date(arg, allow_none=False, message='Argument "%(string)s" of type %(actual)s is not a date', level=1):
"""
>>> check_some_date([1, 2, 3])
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" of type <class 'list'> is not a date
>>> check_some_date(3)
Traceback (most recent call last):
...
AssertionError: Argument "3" of type <class 'int'> is not a date
>>> check_some_date(3.5)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" of type <class 'float'> is not a date
>>> check_some_date('hi')
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is not a date
>>> check_some_date("hi")
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is not a date
>>> import datetime as dt
>>> check_some_date(dt.date(2019, 9, 10))
datetime.date(2019, 9, 10)
>>> check_some_date(dt.time(12, 3))
Traceback (most recent call last):
...
AssertionError: Argument "12:03:00" of type <class 'datetime.time'> is not a date
>>> check_some_date(dt.datetime(2019, 9, 10, 12, 3))
Traceback (most recent call last):
...
AssertionError: Argument "2019-09-10 12:03:00" of type <class 'datetime.datetime'> is not a date
>>> check_some_date(dt.timedelta(seconds=5))
Traceback (most recent call last):
...
AssertionError: Argument "0:00:05" of type <class 'datetime.timedelta'> is not a date
>>> import numpy as np
>>> check_some_date(np.timedelta64(5, 's'))
Traceback (most recent call last):
...
AssertionError: Argument "5 seconds" of type <class 'numpy.timedelta64'> is not a date
>>> import pandas as pd
>>> check_some_date(pd.Timedelta(5, 's'))
Traceback (most recent call last):
...
AssertionError: Argument "0 days 00:00:05" of type <class 'pandas._libs.tslibs.timedeltas.Timedelta'> is not a date
>>> check_some_date(None)
Traceback (most recent call last):
...
AssertionError: Argument "None" of type <class 'NoneType'> is not a date
>>> check_some_date(None, allow_none=True)
"""
check(is_some_date(arg, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg)}, level)
return arg
def is_time(arg, allow_none=False):
"""
>>> is_time([1, 2, 3])
False
>>> is_time(3)
False
>>> is_time(3.5)
False
>>> is_time('hi')
False
>>> is_time("hi")
False
>>> import datetime as dt
>>> is_time(dt.date(2019, 9, 10))
False
>>> is_time(dt.time(12, 3))
True
>>> is_time(dt.datetime(2019, 9, 10, 12, 3))
False
>>> is_time(dt.timedelta(seconds=5))
False
>>> import numpy as np
>>> is_time(np.timedelta64(5, 's'))
False
>>> import pandas as pd
>>> is_time(pd.Timedelta(5, 's'))
False
>>> is_time(None)
False
>>> is_time(None, allow_none=True)
True
"""
import datetime as dt
return is_instance(arg, dt.time, allow_none)
def check_time(arg, allow_none=False, message='Argument "%(string)s" of type %(actual)s is not a time', level=1):
"""
>>> check_time([1, 2, 3])
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" of type <class 'list'> is not a time
>>> check_time(3)
Traceback (most recent call last):
...
AssertionError: Argument "3" of type <class 'int'> is not a time
>>> check_time(3.5)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" of type <class 'float'> is not a time
>>> check_time('hi')
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is not a time
>>> check_time("hi")
Traceback (most recent call last):
...
AssertionError: Argument "hi" of type <class 'str'> is | |
<filename>auv_nav/process.py
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020, University of Southampton
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md file in the project root for full license information.
"""
import copy
import json
import threading
import time
from pathlib import Path
import numpy as np
# Import libraries
import yaml
# fmt: off
from auv_nav.localisation.dead_reckoning import dead_reckoning
from auv_nav.localisation.ekf import (
ExtendedKalmanFilter,
save_ekf_to_list,
update_camera_list,
)
from auv_nav.localisation.pf import run_particle_filter
from auv_nav.localisation.usbl_filter import usbl_filter
from auv_nav.localisation.usbl_offset import usbl_offset
from auv_nav.plot.plot_process_data import (
plot_2d_deadreckoning,
plot_deadreckoning_vs_time,
plot_orientation_vs_time,
plot_pf_uncertainty,
plot_uncertainty,
plot_velocity_vs_time,
)
from auv_nav.sensors import (
Altitude,
BodyVelocity,
Camera,
Depth,
InertialVelocity,
Orientation,
Other,
SyncedOrientationBodyVelocity,
Usbl,
)
from auv_nav.tools.body_to_inertial import body_to_inertial
from auv_nav.tools.csv_tools import spp_csv, write_csv, write_sidescan_csv
from auv_nav.tools.dvl_level_arm import compute_angular_speeds, correct_lever_arm
from auv_nav.tools.interpolate import interpolate, interpolate_sensor_list
from auv_nav.tools.latlon_wgs84 import metres_to_latlon
from auv_nav.tools.time_conversions import (
epoch_from_json,
epoch_to_datetime,
string_to_epoch,
)
from oplab import (
Console,
Mission,
Vehicle,
get_config_folder,
get_processed_folder,
valid_dive,
)
# fmt: on
"""
Assumes filename_camera of 1, 2, and 3 contains the image number between the
last 11 and 4 characters for appropriate csv pose estimate files output.
e.g. 'Xviii/Cam51707923/0094853.raw' or 'LM165/001/image0001011.tif'
Scripts to extract data from nav_standard.json, and combined.auv.raw an save
csv files and, if plot is True, save plots
"""
def process(filepath, force_overwite, start_datetime, finish_datetime):
# placeholders
interpolate_remove_flag = False
# selected start and finish time
epoch_start_time = 0
epoch_finish_time = 0
# velocity body placeholders (DVL)
velocity_body_list = []
# velocity inertial placeholders
velocity_inertial_list = []
# orientation placeholders (INS)
orientation_list = []
# depth placeholders
depth_list = []
# altitude placeholders
altitude_list = []
# USBL placeholders
usbl_list = []
# camera1 placeholders
camera1_list = []
camera1_ekf_list = []
camera1_pf_list = []
# camera2 placeholders
camera2_list = []
camera2_ekf_list = []
camera2_pf_list = []
# camera3 placeholders
camera3_list = []
camera3_ekf_list = []
camera3_pf_list = []
ekf_list = []
# placeholders for interpolated velocity body measurements based on
# orientation and transformed coordinates
dead_reckoning_centre_list = []
dead_reckoning_dvl_list = []
# placeholders for dvl_imu_data fused with usbl_data using particle filter
pf_fusion_dvl_list = []
pf_fusion_centre_list = []
pf_usbl_datapoints = []
pf_particles_list = []
pf_northings_std = []
pf_eastings_std = []
pf_yaw_std = []
# placeholders for chemical data
chemical_list = []
# chemical_ekf_list = []
# chemical_pf_list = []
# load auv_nav.yaml for particle filter and other setup
filepath = Path(filepath).resolve()
filepath = get_processed_folder(filepath)
localisation_file = filepath / "auv_nav.yaml"
localisation_file = get_config_folder(localisation_file)
# check that it is a valid dive folder
if not valid_dive(filepath):
Console.error(
"The dive folder supplied does not contain any mission or vehicle",
"YAML files. Is the path correct?",
)
Console.quit("Invalid path")
# check if auv_nav.yaml file exist, if not, generate one with default
# settings
if localisation_file.exists():
Console.info(
"Loading existing auv_nav.yaml at {}".format(localisation_file)
)
else:
root = Path(__file__).parents[1]
default_localisation = root / "auv_nav/default_yaml" / "auv_nav.yaml"
Console.info("default_localisation: {}".format(default_localisation))
Console.warn(
"Cannot find {}, generating default from {}".format(
localisation_file, default_localisation
)
)
# save localisation yaml to processed directory
if not localisation_file.parent.exists():
localisation_file.parent.mkdir(parents=True)
default_localisation.copy(localisation_file)
# copy the configuration file
localisation_file_processed = get_processed_folder(localisation_file)
localisation_file.copy(localisation_file_processed)
# Default to no EKF and PF and SPP
particle_filter_activate = False
ekf_activate = False
spp_output_activate = False
with localisation_file.open("r") as stream:
load_localisation = yaml.safe_load(stream)
if "usbl_filter" in load_localisation:
usbl_filter_activate = load_localisation["usbl_filter"]["activate"]
max_auv_speed = load_localisation["usbl_filter"]["max_auv_speed"]
sigma_factor = load_localisation["usbl_filter"]["sigma_factor"]
if "particle_filter" in load_localisation:
particle_filter_activate = load_localisation["particle_filter"][
"activate"
]
dvl_noise_sigma_factor = load_localisation["particle_filter"][
"dvl_noise_sigma_factor"
]
imu_noise_sigma_factor = load_localisation["particle_filter"][
"imu_noise_sigma_factor"
]
usbl_noise_sigma_factor = load_localisation["particle_filter"][
"usbl_noise_sigma_factor"
]
particles_number = load_localisation["particle_filter"][
"particles_number"
]
particles_time_interval = load_localisation["particle_filter"][
"particles_plot_time_interval"
]
if "std" in load_localisation:
sensors_std = load_localisation["std"]
if "position_xy" not in sensors_std:
sensors_std["position_xy"] = sensors_std["usbl"]
if "position_z" not in sensors_std:
sensors_std["position_z"] = sensors_std["depth"]
if "speed" not in sensors_std:
sensors_std["speed"] = sensors_std["dvl"]
# Default to use JSON uncertainties
if "model" not in sensors_std["position_xy"]:
Console.warn(
"No uncertainty model specified for position_xy, defaulting",
"to sensor (JSON).",
)
sensors_std["position_xy"]["model"] = "sensor"
if "model" not in sensors_std["speed"]:
Console.warn(
"No uncertainty model specified for speed, defaulting to",
"sensor (JSON).",
)
sensors_std["speed"]["model"] = "sensor"
if "model" not in sensors_std["position_z"]:
Console.warn(
"No uncertainty model specified for Depth, defaulting to",
"sensor (JSON).",
)
sensors_std["position_z"]["model"] = "sensor"
if "model" not in sensors_std["orientation"]:
Console.warn(
"No uncertainty model specified for Orientation, defaulting",
"to sensor (JSON).",
)
sensors_std["orientation"]["model"] = "sensor"
if "ekf" in load_localisation:
ekf_activate = load_localisation["ekf"]["activate"]
ekf_process_noise_covariance = load_localisation["ekf"][
"process_noise_covariance"
]
ekf_initial_estimate_covariance = load_localisation["ekf"][
"initial_estimate_covariance"
]
if len(ekf_process_noise_covariance) != 144:
d = np.asarray(ekf_process_noise_covariance).reshape((15, 15))
ekf_process_noise_covariance = d[0:12, 0:12]
d = np.asarray(ekf_initial_estimate_covariance).reshape(
(15, 15)
)
ekf_initial_estimate_covariance = d[0:12, 0:12]
else:
ekf_process_noise_covariance = np.asarray(
ekf_process_noise_covariance
).reshape((12, 12))
ekf_initial_estimate_covariance = np.asarray(
ekf_initial_estimate_covariance
).reshape((12, 12))
if "csv_output" in load_localisation:
# csv_active
csv_output_activate = load_localisation["csv_output"]["activate"]
csv_usbl = load_localisation["csv_output"]["usbl"]
csv_dr_auv_centre = load_localisation["csv_output"][
"dead_reckoning"
]["auv_centre"]
csv_dr_auv_dvl = load_localisation["csv_output"]["dead_reckoning"][
"auv_dvl"
]
csv_dr_camera_1 = load_localisation["csv_output"][
"dead_reckoning"
]["camera_1"]
csv_dr_camera_2 = load_localisation["csv_output"][
"dead_reckoning"
]["camera_2"]
csv_dr_camera_3 = load_localisation["csv_output"][
"dead_reckoning"
]["camera_3"]
csv_dr_chemical = load_localisation["csv_output"][
"dead_reckoning"
]["chemical"]
csv_pf_auv_centre = load_localisation["csv_output"][
"particle_filter"
]["auv_centre"]
csv_pf_auv_dvl = load_localisation["csv_output"][
"particle_filter"
]["auv_dvl"]
csv_pf_camera_1 = load_localisation["csv_output"][
"particle_filter"
]["camera_1"]
csv_pf_camera_2 = load_localisation["csv_output"][
"particle_filter"
]["camera_2"]
csv_pf_camera_3 = load_localisation["csv_output"][
"particle_filter"
]["camera_3"]
csv_pf_chemical = load_localisation["csv_output"][
"particle_filter"
]["chemical"]
csv_ekf_auv_centre = load_localisation["csv_output"]["ekf"][
"auv_centre"
]
csv_ekf_camera_1 = load_localisation["csv_output"]["ekf"][
"camera_1"
]
csv_ekf_camera_2 = load_localisation["csv_output"]["ekf"][
"camera_2"
]
csv_ekf_camera_3 = load_localisation["csv_output"]["ekf"][
"camera_3"
]
else:
csv_output_activate = False
Console.warn(
"csv output undefined in auv_nav.yaml. Has been"
+ ' set to "False". To activate, add as per'
+ " default auv_nav.yaml found within auv_nav"
+ ' file structure and set values to "True".'
)
if "spp_output" in load_localisation:
# spp_active
spp_output_activate = load_localisation["spp_output"]["activate"]
spp_ekf_camera_1 = load_localisation["spp_output"]["ekf"][
"camera_1"
]
spp_ekf_camera_2 = load_localisation["spp_output"]["ekf"][
"camera_2"
]
spp_ekf_camera_3 = load_localisation["spp_output"]["ekf"][
"camera_3"
]
if spp_output_activate and not ekf_activate:
Console.warn(
"SLAM++ will be disabled due to EKF being disabled.",
"Enable EKF to make it work.",
)
spp_output_activate = False
else:
spp_output_activate = False
Console.warn(
"SLAM++ output undefined in auv_nav.yaml. Has been"
+ ' set to "False". To activate, add as per'
+ " default auv_nav.yaml found within auv_nav"
+ ' file structure and set values to "True".'
)
if "plot_output" in load_localisation:
plot_output_activate = load_localisation["plot_output"]["activate"]
# pdf_plot = load_localisation["plot_output"]["pdf_plot"]
html_plot = load_localisation["plot_output"]["html_plot"]
Console.info("Loading vehicle.yaml")
vehicle_file = filepath / "vehicle.yaml"
vehicle_file = get_processed_folder(vehicle_file)
vehicle = Vehicle(vehicle_file)
Console.info("Loading mission.yaml")
mission_file = filepath / "mission.yaml"
mission_file = get_processed_folder(mission_file)
mission = Mission(mission_file)
camera1_offsets = [
vehicle.camera1.surge,
vehicle.camera1.sway,
vehicle.camera1.heave,
]
camera2_offsets = [
vehicle.camera2.surge,
vehicle.camera2.sway,
vehicle.camera2.heave,
]
# For BioCam, camera 3 is grayscale camera recording laser
# For SeaXerocks, camera 3 is a separate camera
camera3_offsets = [
vehicle.camera3.surge,
vehicle.camera3.sway,
vehicle.camera3.heave,
]
if mission.image.format == "biocam":
if mission.image.cameras[0].type == "grayscale":
camera3_offsets = [
vehicle.camera1.surge,
vehicle.camera1.sway,
vehicle.camera1.heave,
]
elif mission.image.cameras[1].type == "grayscale":
camera3_offsets = [
vehicle.camera2.surge,
vehicle.camera2.sway,
vehicle.camera2.heave,
]
else:
Console.quit(
"BioCam format is expected to have a grayscale camera."
)
chemical_offset = [
vehicle.chemical.surge,
vehicle.chemical.sway,
vehicle.chemical.heave,
]
outpath = filepath / "nav"
nav_standard_file = outpath / "nav_standard.json"
nav_standard_file = get_processed_folder(nav_standard_file)
Console.info("Loading json file {}".format(nav_standard_file))
with nav_standard_file.open("r") as nav_standard:
parsed_json_data = json.load(nav_standard)
# setup start and finish date time
if start_datetime == "":
epoch_start_time = epoch_from_json(parsed_json_data[1])
start_datetime = epoch_to_datetime(epoch_start_time)
else:
epoch_start_time = string_to_epoch(start_datetime)
if finish_datetime == "":
epoch_finish_time = epoch_from_json(parsed_json_data[-1])
finish_datetime = epoch_to_datetime(epoch_finish_time)
else:
epoch_finish_time = string_to_epoch(finish_datetime)
# read in data from json file
# i here is the number of the data packet
for i in range(len(parsed_json_data)):
epoch_timestamp = parsed_json_data[i]["epoch_timestamp"]
if (
epoch_timestamp >= epoch_start_time
and epoch_timestamp <= epoch_finish_time
):
if "velocity" in parsed_json_data[i]["category"]:
if "body" in parsed_json_data[i]["frame"]:
# to check for corrupted data point which have inertial
# frame data values
if "epoch_timestamp_dvl" in parsed_json_data[i]:
# confirm time stamps of dvl are aligned with main
# clock (within a second)
if (
abs(
parsed_json_data[i]["epoch_timestamp"]
- parsed_json_data[i]["epoch_timestamp_dvl"]
)
) < 1.0:
velocity_body = BodyVelocity()
velocity_body.from_json(
parsed_json_data[i], sensors_std["speed"]
)
velocity_body_list.append(velocity_body)
if "inertial" in parsed_json_data[i]["frame"]:
velocity_inertial = InertialVelocity()
velocity_inertial.from_json(parsed_json_data[i])
velocity_inertial_list.append(velocity_inertial)
if "orientation" in parsed_json_data[i]["category"]:
orientation = Orientation()
orientation.from_json(
parsed_json_data[i], sensors_std["orientation"]
)
orientation_list.append(orientation)
if "depth" in parsed_json_data[i]["category"]:
depth = Depth()
depth.from_json(parsed_json_data[i], sensors_std["position_z"])
depth_list.append(depth)
if "altitude" in parsed_json_data[i]["category"]:
altitude = Altitude()
altitude.from_json(parsed_json_data[i])
altitude_list.append(altitude)
if "usbl" in parsed_json_data[i]["category"]:
usbl = Usbl()
usbl.from_json(parsed_json_data[i], sensors_std["position_xy"])
usbl_list.append(usbl)
if "image" in parsed_json_data[i]["category"]:
camera1 = Camera()
# LC
camera1.from_json(parsed_json_data[i], "camera1")
camera1_list.append(camera1)
camera2 = Camera()
camera2.from_json(parsed_json_data[i], "camera2")
camera2_list.append(camera2)
if "laser" in parsed_json_data[i]["category"]:
camera3 = Camera()
camera3.from_json(parsed_json_data[i], "camera3")
camera3_list.append(camera3)
if "chemical" in parsed_json_data[i]["category"]:
chemical = Other()
chemical.from_json(parsed_json_data[i])
chemical_list.append(chemical)
# make path for | |
# Copyright (c) 2017-2019 Soft8Soft LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import bpy
import numpy as np
import mathutils
import pyosl.glslgen
ORTHO_EPS = 1e-5
DEFAULT_MAT_NAME = 'v3d_default_material'
selectedObject = None
selectedObjectsSave = []
prevActiveObject = None
def clamp(val, minval, maxval):
return max(minval, min(maxval, val))
def integerToBlSuffix(val):
suf = str(val)
for i in range(0, 3 - len(suf)):
suf = '0' + suf
return suf
def getLightCyclesStrength(bl_light):
return bl_light.energy
def getLightCyclesColor(bl_light):
col = bl_light.color
return [col[0], col[1], col[2]]
def setSelectedObject(bl_obj):
"""
Select object for NLA baking
"""
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject = bl_obj
selectedObjectsSave = bpy.context.selected_objects.copy()
# NOTE: seems like we need both selection and setting active object
for o in selectedObjectsSave:
o.select_set(False)
prevActiveObject = bpy.context.view_layer.objects.active
bpy.context.view_layer.objects.active = bl_obj
bl_obj.select_set(True)
def restoreSelectedObjects():
global prevActiveObject
global selectedObject, selectedObjectsSave
selectedObject.select_set(False)
for o in selectedObjectsSave:
o.select_set(True)
bpy.context.view_layer.objects.active = prevActiveObject
prevActiveObject = None
selectedObject = None
selectedObjectsSave = []
def getSceneByObject(obj):
for scene in bpy.data.scenes:
index = scene.objects.find(obj.name)
if index > -1 and scene.objects[index] == obj:
return scene
return None
def getTexImage(bl_tex):
"""
Get texture image from a texture, avoiding AttributeError for textures
without an image (e.g. a texture of type 'NONE').
"""
return getattr(bl_tex, 'image', None)
def getTextureName(bl_texture):
if (isinstance(bl_texture, (bpy.types.ShaderNodeTexImage,
bpy.types.ShaderNodeTexEnvironment))):
tex_name = bl_texture.image.name
else:
tex_name = bl_texture.name
return tex_name
def mat4IsIdentity(mat4):
return mat4 == mathutils.Matrix.Identity(4)
def mat4IsTRSDecomposable(mat4):
# don't use mathutils.Matrix.is_orthogonal_axis_vectors property, because it
# doesn't normalize vectors before checking
mat = mat4.to_3x3().transposed()
v0 = mat[0].normalized()
v1 = mat[1].normalized()
v2 = mat[2].normalized()
return (abs(v0.dot(v1)) < ORTHO_EPS
and abs(v0.dot(v2)) < ORTHO_EPS
and abs(v1.dot(v2)) < ORTHO_EPS)
def mat4SvdDecomposeToMatrs(mat4):
"""
Decompose the given matrix into a couple of TRS-decomposable matrices or
Returns None in case of an error.
"""
try:
u, s, vh = np.linalg.svd(mat4.to_3x3())
mat_u = mathutils.Matrix(u)
mat_s = mathutils.Matrix([[s[0], 0, 0], [0, s[1], 0], [0, 0, s[2]]])
mat_vh = mathutils.Matrix(vh)
# NOTE: a potential reflection part in U and VH matrices isn't considered
mat_trans = mathutils.Matrix.Translation(mat4.to_translation())
mat_left = mat_trans @ (mat_u @ mat_s).to_4x4()
return (mat_left, mat_vh.to_4x4())
except np.linalg.LinAlgError:
# numpy failed to decompose the matrix
return None
def findArmature(obj):
for mod in obj.modifiers:
if mod.type == 'ARMATURE' and mod.object is not None:
return mod.object
# use obj.find_armature as a last resort, because it doesn't work with many
# armature modifiers
return obj.find_armature()
def matHasBlendBackside(bl_mat):
return (matIsBlend(bl_mat) and
(hasattr(bl_mat, 'show_transparent_back') and bl_mat.show_transparent_back))
def matIsBlend(bl_mat):
return bl_mat.blend_method in ['BLEND', 'MULTIPLY', 'ADD']
def updateOrbitCameraView(cam_obj, scene):
target_obj = cam_obj.data.v3d.orbit_target_object
eye = cam_obj.matrix_world.to_translation()
target = (cam_obj.data.v3d.orbit_target if target_obj is None
else target_obj.matrix_world.to_translation())
quat = getLookAtAlignedUpMatrix(eye, target).to_quaternion()
quat.rotate(cam_obj.matrix_world.inverted())
quat.rotate(cam_obj.matrix_basis)
rot_mode = cam_obj.rotation_mode
cam_obj.rotation_mode = 'QUATERNION'
cam_obj.rotation_quaternion = quat
cam_obj.rotation_mode = rot_mode
# need to update the camera state (i.e. world matrix) immediately in case of
# several consecutive UI updates
bpy.context.view_layer.update()
def getLookAtAlignedUpMatrix(eye, target):
"""
This method uses camera axes for building the matrix.
"""
axis_z = (eye - target).normalized()
if axis_z.length == 0:
axis_z = mathutils.Vector((0, -1, 0))
axis_x = mathutils.Vector((0, 0, 1)).cross(axis_z)
if axis_x.length == 0:
axis_x = mathutils.Vector((1, 0, 0))
axis_y = axis_z.cross(axis_x)
return mathutils.Matrix([
axis_x,
axis_y,
axis_z,
]).transposed()
def objDataUsesLineRendering(bl_obj_data):
line_settings = getattr(getattr(bl_obj_data, 'v3d', None), 'line_rendering_settings', None)
return bool(line_settings and line_settings.enable)
def getObjectAllCollections(blObj):
return [coll for coll in bpy.data.collections if blObj in coll.all_objects[:]]
def getBlurPixelRadius(context, blLight):
if blLight.type == 'SUN':
relativeRadius = (blLight.shadow_buffer_soft / 100
* int(context.scene.eevee.shadow_cascade_size))
# blur strength doesn't increase after a certain point
return min(max(relativeRadius, 0), 100)
else:
blurGrade = math.floor(blLight.shadow_buffer_soft
* int(context.scene.eevee.shadow_cube_size) / 1000)
blurGrade = min(blurGrade, 9)
# some approximation of Blender blur radius
if blurGrade > 2:
return 4.22 * (blurGrade - 1.5)
else:
return blurGrade
def objHasExportedModifiers(obj):
"""
Check if an object has any modifiers that should be applied before export.
"""
return any([modifierNeedsExport(mod) for mod in obj.modifiers])
def obj_del_not_exported_modifiers(obj):
"""
Remove modifiers that shouldn't be applied before export from an object.
"""
for mod in obj.modifiers:
if not modifierNeedsExport(mod):
obj.modifiers.remove(mod)
def objAddTriModifier(obj):
mod = obj.modifiers.new('Temporary_Triangulation', 'TRIANGULATE')
mod.quad_method = 'FIXED'
mod.keep_custom_normals = True
def objApplyModifiers(obj):
"""
Creates a new mesh from applying modifiers to the mesh of the given object.
Assignes the newly created mesh to the given object. The old mesh's user
count will be decreased by 1.
"""
dg = bpy.context.evaluated_depsgraph_get()
need_linking = dg.scene.collection.objects.find(obj.name) == -1
need_showing = obj.hide_viewport
# NOTE: link the object if it's not in the 'Master Collection' and update
# the view layer to make the depsgraph able to apply modifiers to the object
if need_linking:
dg.scene.collection.objects.link(obj)
obj.update_tag()
# a hidden object doesn't get its modifiers applied, need to make it visible
# before updating the view layer
if need_showing:
obj.hide_viewport = False
bpy.context.view_layer.update()
# NOTE: some modifiers can remove UV layers from an object after applying
# (e.g. Skin), which is a consistent behavior regarding uv usage in the
# viewport (e.g. degenerate tangent space in the Normal Map node)
obj_eval = obj.evaluated_get(dg)
obj.data = bpy.data.meshes.new_from_object(obj_eval,
preserve_all_data_layers=True, depsgraph=dg)
obj.modifiers.clear()
if need_linking:
dg.scene.collection.objects.unlink(obj)
if need_showing:
obj.hide_viewport = True
def objTransferShapeKeys(obj_from, obj_to, depsgraph):
"""
Transfer shape keys from one object to another if it's possible:
- obj_from should be in the current view layer to be evaluated by depsgraph
- obj_to should not have shape keys
- obj_from (after evaluating) and obj_to should have the same amount of vertices
Returns a boolean flag indicating successful transfer.
"""
if obj_from.data.shape_keys is None:
return True
key_blocks_from = obj_from.data.shape_keys.key_blocks
keys_from = [key for key in key_blocks_from if key != key.relative_key
and key != obj_from.data.shape_keys.reference_key]
key_names = [key.name for key in keys_from]
key_values = [key.value for key in keys_from]
key_positions = []
for key in keys_from:
key.value = 0
same_vertex_count = True
for key in keys_from:
key.value = 1
obj_from.update_tag()
bpy.context.view_layer.update()
verts = obj_from.evaluated_get(depsgraph).data.vertices
if len(verts) != len(obj_to.data.vertices):
same_vertex_count = False
break
key_pos = [0] * 3 * len(verts)
verts.foreach_get('co', key_pos)
key_positions.append(key_pos)
key.value = 0
if same_vertex_count:
# basis shape key
obj_to.shape_key_add(name=obj_from.data.shape_keys.reference_key.name)
vert_co = [0] * 3 * len(obj_to.data.vertices)
for i in range(len(key_names)):
key_block = obj_to.shape_key_add(name=key_names[i])
key_block.value = key_values[i]
key_block.data.foreach_set('co', key_positions[i])
else:
# don't create nothing if vertex count isn't constant
pass
for i in range(len(keys_from)):
keys_from[i].value = key_values[i]
return same_vertex_count
def meshNeedTangentsForExport(mesh, optimize_tangents):
"""
Check if it's needed to export tangents for the given mesh.
"""
return (meshHasUvLayers(mesh) and (meshMaterialsUseTangents(mesh)
or not optimize_tangents))
def meshHasUvLayers(mesh):
return bool(mesh.uv_layers.active and len(mesh.uv_layers) > 0)
def meshMaterialsUseTangents(mesh):
for mat in mesh.materials:
if mat and mat.use_nodes and mat.node_tree != None:
node_trees = extractMaterialNodeTrees(mat.node_tree)
for node_tree in node_trees:
for bl_node in node_tree.nodes:
if matNodeUseTangents(bl_node):
return True
# HACK: in most cases this one indicates that object linking is used
# disable tangent optimizations for such cases
elif mat == None:
return True
return False
def matNodeUseTangents(bl_node):
if isinstance(bl_node, bpy.types.ShaderNodeNormalMap):
return True
if (isinstance(bl_node, bpy.types.ShaderNodeTangent)
and bl_node.direction_type == 'UV_MAP'):
return True
if isinstance(bl_node, bpy.types.ShaderNodeNewGeometry):
for out in bl_node.outputs:
if out.identifier == 'Tangent' and out.is_linked:
return True
return False
def extractMaterialNodeTrees(node_tree):
"""NOTE: located here since it's needed for meshMaterialsUseTangents()"""
out = [node_tree]
for bl_node in node_tree.nodes:
if isinstance(bl_node, bpy.types.ShaderNodeGroup):
out += extractMaterialNodeTrees(bl_node.node_tree)
return out
def meshHasNgons(mesh):
for poly in mesh.polygons:
if poly.loop_total > 4:
return True
return False
def modifierNeedsExport(mod):
"""
Modifiers that are applied before export shouldn't be:
- hidden during render (a way to disable export of a modifier)
- ARMATURE modifiers (used separately via skinning)
"""
return mod.show_render and mod.type != 'ARMATURE'
def getSocketDefvalCompat(socket, RGBAToRGB=False, isOSL=False):
"""
Get the default value of input/output sockets in some compatible form.
Vector types such as bpy_prop_aray, Vector, Euler, etc... are converted to lists,
primitive types are converted to int/float.
"""
if socket.type == 'VALUE' or socket.type == 'INT':
return socket.default_value
| |
id='Filtro_Tipo', className='Dropdown2', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'margin-top': '1vh',
'cursor': 'pointer'
}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim1", className="ms-auto", n_clicks=0, color='dark', outline=True,
)
),
],
id="modalPrim1",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DA 2º OPÇÃO DA CAIXA DE SELEÇÃO DA BARRA LATERAL:
modalPrim2 = html.Div(
[
dbc.Modal(
[
# TITULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Filtro: Segundo Gráfico (Barras em Grupos)", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# O CORPO SERÁ UM 'P'ARÁGRAFO E O DROPDOWN DE FILTRO DO 2º GRÁFICO:
html.P('Selecione a Localização da Receita Federal a Ser filtrada:', style={'color': 'white'}),
# DROPDOWN:
dcc.Dropdown(receita_filtragem, value='Todos', id='filtro4', className='Dropdown4', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'cursor': 'pointer'}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim2", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalPrim2",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DA 3º OPÇÃO DA CAIXA DE SELEÇÃO DA BARRA LATERAL:
modalPrim3 = html.Div(
[
dbc.Modal(
[
# TITULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Filtro: Terceiro Gráfico (Linhas)", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# O CORPO SERÁ UM 'P'ARÁGRAFO E O DROPDOWN DE FILTRO DO 3º GRÁFICO:
html.P('Selecione o Tipo de Café a ser filtrado:', style={'color': 'white'}),
# DROPDOWN:
dcc.Dropdown(opcoes3, value='Todos os Tipos de Café', id='filtro3', className='Dropdown3', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'cursor': 'pointer'}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim3", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalPrim3",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 1º MODAL:
modal1 = html.Div(
[
dbc.Modal(
[
# TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Compra de Café Brasileiro", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Gráfico em barras, representa a quantidade exportada de café brasileiro entre os principais países compradores do produto", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
dbc.Button(
"Fechar", id="close1", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal1",
is_open=False,
size='lg',
),
]
)
# DECLARAÇÃO DO 2º MODAL:
modal2 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Importação e Exportação por Receita Federal", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Dividido entre as receitas federais, este gráfico de barras, divididos em grupos, relata a Exportação e Importação de café.", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close2", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal2",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 3º MODAL:
modal3 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Preço Médio do Café Brasileiro", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Preço médio calculado mensalmente do café brasileiro, estão representadas neste gráfico de Linhas. (Valores em Dólar US$).", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close3", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal3",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 4º MODAL:
modal4 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Produção de Café entre Principais Países", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Os dados de produção do mapa esta localizada em cada ponto de seu local, para navegar entre eles, gire o planeta pressionando e arrastando o mouse.", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close4", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal4",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DO BOTÃO DE DESENVOLVEDORES:
modalDev = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Desenvolvedores:", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# LISTA DOS INTEGRANTES:
html.Ul([
html.Li('<NAME> - 211061583', style={'color': 'white'}),
html.Li('<NAME> - 211062910', style={'color': 'white'}),
html.Li('<NAME> - 211061592', style={'color': 'white'}),
html.Li('<NAME> - 211039564', style={'color': 'white'}),
html.Li('<NAME> - 211062956', style={'color': 'white'}),
html.Li('<NAME> - 211061897', style={'color': 'white'}),
html.Li('<NAME> - 211061743', style={'color': 'white'}),
html.Li('<NAME> - 190094711', style={'color': 'white'}),
html.Li('<NAME> - 211062938', style={'color': 'white'})
])
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closeDev", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalDev",
is_open=False,
size='xl'
),
]
)
#-----------------------------------------------------------------------------------------
# DECLARAÇÃO EM PARTES DO SITE:
# C) GRÁFICOS:
# DECLARAÇÃO DO DCC DO 1º GRÁFICO:
grafico1 = [
dcc.Graph(
id='Grafico_dados',
figure=fig1
)
]
# DECLARAÇÃO DO DCC DO 2º GRÁFICO:
grafico2 = [
dcc.Graph(
id='Grafico_dados2',
figure=fig2
)
]
# DECLARAÇÃO DO DCC DO 3º GRÁFICO:
grafico3 = [
dcc.Graph(
id='Grafico_dados3',
figure=fig3
),
]
# DECLARAÇÃO DO DCC DO 4º GRÁFICO:
grafico4 = [
dcc.Graph(
id='Grafico_dados4',
figure=fig4
)
]
# -----------------------------------------------------------------------------------
# DECLARAÇÃO EM PARTES DO SITE:
# D) LINHAS DO SITE:
# ORGANIZAÇÃO EM LINHAS DO SITE, NESTE CASO DA LINHA 1:
Conteudo_Linha1 = [
# A LINHA 1 SERÁ COMPOSTA PELOS GRÁFICOS "grafico1" E "grafico2", QUE SÃO VARIÁVEIS DECLARADAS LOGO ACIMA:
dbc.Col(html.Div(grafico1), width=5),
dbc.Col(html.Div(grafico2), width=5),
]
# ORGANIZAÇÃO EM LINHAS DO SITE, NESTE CASO DA LINHA 2:
Conteudo_Linha2 = [
# A LINHA 2 SERÁ COMPOSTA PELOS GRÁFICOS "grafico3" E "grafico4", QUE SÃO VARIÁVEIS DECLARADAS LOGO ACIMA:
dbc.Col(html.Div(grafico3), width=5),
dbc.Col(html.Div(grafico4), width=5),
]
# --------------------------------------------------------------------------------------
# DECLARAÇÃO FINAL DO SITE:
# E) LAYOUT:
# DECLARAÇÃO DE COMO FICARÁ O LAYOUT:
app.layout = html.Div(className='Tudo', id='Tudo', children=[
html.Div(className='Base', children= [
# DIV PARA A PRIMEIRA LINHA:
html.Div(className='PrimeiraLinha' , children=[
# A PRIMEIRA LINHA TERÁ O CONTEÚDO DA VARIÁVEL 'Conteudo_Linha1':
dbc.Row(
Conteudo_Linha1,
justify="end",
style={'margin-right': '2vw'}
)
]),
# DIV PARA A SEGUNDA LINHA:
html.Div(className='SegundaLinha', children=[
# A SEGUNDA LINHA TERÁ O CONTEÚDO DA VARIÁVEL 'Conteudo_Linha2':
dbc.Row(
Conteudo_Linha2,
justify="end",
style={'margin-right': '2vw'}
),
# DIV PARA A IMAGEM DA LOGO UNB NO FINAL DA PÁGINA:
html.Div([
html.Img(src='./assets/logo.png', id='ImagemId', width=200, className='ImagemClass'),
html.P('Desenvolvido por Alunos da Universidade de Brasília - FGA', id='textofinal', className='textofinalClass', style={'font-weight': 'bold'})
], className='finalClass', style={'margin-top': '4vh'})
])
# INCLUSÃO DAS VARIÁVEIS CRIADAS ACIMA:
]), barralateral, modal1, modal2, modal3, modal4, modalPrim1, modalPrim2, modalPrim3, modalDev])
# =====================================================================================================================
# DEFINIÇÃO DE FUNÇÃO:
# DEFINIÇÃO DE FUNÇÃO PARA FILTRAGEM QUE IRÁ SUBSTITUIR A FUNÇÃO 'LOC' DO PANDAS:
def filtragem(dataframe, pesquisa, coluna):
# ARGUMENTOS: DATAFRAME A SER FILTRADO, REFERÊNCIA DO QUE SERÁ PESQUISADO NOS DADOS E SE HOUVE COLUNAS ESPECIFICADAS PELO USUÁRIO.
Filtro = []
# CASO O USUÁRIO NÃO ESPECIFIQUE UMA COLUNA ESPECÍFICA:
if coluna == None:
# PERCORRER O 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1, 2, 3, 4, 5, 6 DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[2], linha[3], linha[4], linha[5], linha[6]]]
# CONDIÇÃO RESERVADA PARA O CALLBACK DO 2º GRÁFICO:
elif coluna == 3:
# PERCORRER O 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1, 2 DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[2]]]
# CASO O USUÁRIO, TAMBÉM, ESPECIFIQUE A COLUNA A SER FILTRADA:
else:
referencia = 2
# PERCORRER OS ELEMENTOS DENTRO DA LISTA 'opcoes2' (LINHA 49)):
for alternativa in opcoes2:
# CASO A COLUNA DE ESCOLHA DO USUÁRIO SEJA IGUAL AO ELEMENTO PERCORRIDO DA REPETIÇÃO ANTERIOR:
if str(coluna) == str(alternativa):
# PERCORRER AS LINHAS DO 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1 E A COLUNA DO VALOR DE 'referencia' DO MOMENTO, DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[referencia]]]
# CASO A COLUNA DO USUÁRIO NÃO BATA COM O ELEMENTO PERCORRIDO PELA REPTIÇÃO DA LINHA 706, REFERENCIA RECEBE +1:
referencia += 1
return Filtro
# =====================================================================================================================
# INICIAÇÃO AOS CALLBACKS:
# CALLBACK PARA O GRÁFICO 1 (EM BARRAS):
@app.callback(
Output('Grafico_dados', 'figure'),
Input('Filtro_Tipo', 'value'),
Input('Filtro_Continentes', 'value')
)
def update_de_dash(tipo, continente):
dfFl1 = df1.values
if tipo == 'TOTAL':
if continente == 'Todos os Continentes':
fig1 = px.bar(df1, x="CONTINENTE", y="TOTAL", color="PAÍS DESTINO", title='Compra de Café Brasileiro por País por Continente')
else:
filtro = filtragem(dfFl1, str(continente), None)
fig1 = px.bar(filtro, x=0, y=6, color=1, title=f'Compra de Café Brasileiro ({continente})', labels={'0': 'CONTINENTE', '6': 'TOTAL', '1': 'PAÍS DESTINO'})
else:
if continente == 'Todos os Continentes':
fig1 | |
<reponame>DarkCode01/rich
from collections.abc import Mapping, Sequence
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
from enum import Enum
from functools import wraps
import inspect
from itertools import chain
import os
from operator import itemgetter
import platform
import re
import shutil
import sys
import threading
from typing import (
Any,
Callable,
Dict,
IO,
Iterable,
List,
Optional,
NamedTuple,
overload,
Tuple,
TYPE_CHECKING,
Union,
)
from typing_extensions import Protocol, runtime_checkable, Literal
from ._emoji_replace import _emoji_replace
from .markup import render as render_markup
from .measure import measure_renderables, Measurement
from ._log_render import LogRender
from .default_styles import DEFAULT_STYLES
from . import errors
from .color import ColorSystem
from .control import Control
from .highlighter import NullHighlighter, ReprHighlighter
from .pretty import Pretty
from .style import Style
from .tabulate import tabulate_mapping
from . import highlighter
from . import themes
from .pretty import Pretty
from .terminal_theme import TerminalTheme, DEFAULT_TERMINAL_THEME
from .segment import Segment
from .text import Text
from .theme import Theme
if TYPE_CHECKING: # pragma: no cover
from .text import Text
WINDOWS = platform.system() == "Windows"
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyValues = Optional[Literal["left", "center", "right", "full"]]
CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<code>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
</code>
</body>
</html>
"""
@dataclass
class ConsoleOptions:
"""Options for __console__ method."""
min_width: int
max_width: int
is_terminal: bool
encoding: str
justify: Optional[JustifyValues] = None
def update(
self,
width: int = None,
min_width: int = None,
max_width: int = None,
justify: JustifyValues = None,
) -> "ConsoleOptions":
"""Update values, return a copy."""
options = replace(self)
if width is not None:
options.min_width = options.max_width = width
if min_width is not None:
options.min_width = min_width
if max_width is not None:
options.max_width = max_width
if justify is not None:
options.justify = justify
return options
@runtime_checkable
class RichCast(Protocol):
"""An object that may be 'cast' to a console renderable."""
def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover
...
@runtime_checkable
class ConsoleRenderable(Protocol):
"""An object that supports the console protocol."""
def __console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult": # pragma: no cover
...
"""A type that may be rendered by Console."""
RenderableType = Union[ConsoleRenderable, RichCast, Control, str]
"""The result of calling a __console__ method."""
RenderResult = Iterable[Union[RenderableType, Segment, Control]]
_null_highlighter = NullHighlighter()
class RenderGroup:
"""Takes a group of renderables and returns a renderable object that renders the group.
Args:
renderables (Iterable[RenderableType]): An iterable of renderable objects.
"""
def __init__(self, *renderables: RenderableType, fit: bool = True) -> None:
self._renderables = renderables
self.fit = fit
self._render: Optional[List[RenderableType]] = None
@property
def renderables(self) -> List["RenderableType"]:
if self._render is None:
self._render = list(self._renderables)
return self._render
def __measure__(self, console: "Console", max_width: int) -> "Measurement":
if self.fit:
return measure_renderables(console, self.renderables, max_width)
else:
return Measurement(max_width, max_width)
def __console__(
self, console: "Console", options: "ConsoleOptions"
) -> RenderResult:
yield from self.renderables
def render_group(fit: bool = False) -> Callable:
def decorator(method):
"""Convert a method that returns an iterable of renderables in to a RenderGroup."""
@wraps(method)
def _replace(*args, **kwargs):
renderables = method(*args, **kwargs)
return RenderGroup(*renderables, fit=fit)
return _replace
return decorator
class ConsoleDimensions(NamedTuple):
"""Size of the terminal."""
width: int
height: int
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
"windows": ColorSystem.WINDOWS,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
@dataclass
class ConsoleThreadLocals(threading.local):
"""Thread local values for Console context."""
buffer: List[Segment] = field(default_factory=list)
buffer_index: int = 0
control: List[str] = field(default_factory=list)
def _enable_legacy_windows_support() -> None:
"""Initialize Windows legacy support."""
from colorama import init
init()
class Console:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
file (IO, optional): A file object where the console should write to. Defaults to stdoutput.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html` and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
emoji (bool, optional): Enable emoji code. Defaults to True.
highlight (bool, optional): Enable automatic highlighting. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (str, optional): Log time format if ``log_time`` is enabled. Defaults to "[%X] ".
highlighter(HighlighterType, optional): Default highlighter.
"""
def __init__(
self,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor", "windows"]
] = "auto",
force_terminal: bool = False,
theme: Theme = None,
file: IO[str] = None,
width: int = None,
height: int = None,
tab_size: int = 8,
record: bool = False,
markup: bool = True,
emoji: bool = True,
highlight: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: str = "[%X] ",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
):
self._styles = themes.DEFAULT.styles if theme is None else theme.styles
self._width = width
self._height = height
self.tab_size = tab_size
self.record = record
self._markup = markup
self._emoji = emoji
self._highlight = highlight
self.legacy_windows: bool = "WINDIR" in os.environ and not "WT_SESSION" in os.environ
self._color_system: Optional[ColorSystem]
self._force_terminal = force_terminal
if self.legacy_windows:
_enable_legacy_windows_support()
self.file = file or sys.stdout
self._color_system = COLOR_SYSTEMS["windows"]
else:
self.file = file or sys.stdout
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self._log_render = LogRender(
show_time=log_time, show_path=log_path, time_format=log_time_format
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
self._record_buffer_lock = threading.RLock()
self._thread_locals = ConsoleThreadLocals()
self._record_buffer: List[Segment] = []
def __repr__(self) -> str:
return f"<console width={self.width} {str(self._color_system)}>"
@property
def _buffer(self) -> List[Segment]:
"""Get a thread local buffer."""
return self._thread_locals.buffer
@property
def _buffer_index(self) -> int:
"""Get a thread local buffer."""
return self._thread_locals.buffer_index
@_buffer_index.setter
def _buffer_index(self, value: int) -> None:
self._thread_locals.buffer_index = value
@property
def _control(self) -> List[str]:
"""Get control codes buffer."""
return self._thread_locals.control
def _detect_color_system(self) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if not self.is_terminal:
return None
color_term = os.environ.get("COLORTERM", "").strip().lower()
return (
ColorSystem.TRUECOLOR
if color_term in ("truecolor", "24bit")
else ColorSystem.EIGHT_BIT
)
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Exit buffer context."""
self._exit_buffer()
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return getattr(self.file, "encoding", "utf-8")
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writting to a device capable of
understanding terminal codes, otherwise False.
"""
if self._force_terminal:
return True
isatty = getattr(self.file, "isatty", None)
return False if isatty is None else isatty()
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width, self._height)
width, height = shutil.get_terminal_size()
if self.legacy_windows:
width -= 1
return ConsoleDimensions(
width if self._width is None else self._width,
height if self._height is None else self._height,
)
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
width, _ = self.size
return width
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
if count:
self._buffer.append(Segment("\n" * count))
self._check_buffer()
def show_cursor(self, show: bool = True) -> None:
"""Show or hide the cursor.
Args:
show (bool, optional): Set visibility of the cursor.
"""
self._check_buffer()
self.file.write("\033[?25h" if show else "\033[?25l")
def _render(
self,
renderable: Union[RenderableType, Control],
options: Optional[ConsoleOptions],
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the | |
"""Miscellaneous visualization tools.
These functions are similar to matplotlib functions like
:func:`~matplotlib.pyplot.scatter` and :func:`~matplotlib.pyplot.pcolormesh`.
When called, these functions default to creating plots on the current axis.
After plotting, functions like :func:`~matplotlib.pyplot.xlabel` and
:func:`~matplotlib.pyplot.title` may be used to further modify the axis.
Alternatively, if using maplotlib's object-oriented API, pass the `ax` parameter
to these functions.
.. note:: This module only works with ``ribs[all]`` installed. As such, it is
not imported with ``import ribs``, and it must be explicitly imported with
``import ribs.visualize``.
.. autosummary::
:toctree:
ribs.visualize.grid_archive_heatmap
ribs.visualize.cvt_archive_heatmap
ribs.visualize.sliding_boundaries_archive_heatmap
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.cm import ScalarMappable
from scipy.spatial import Voronoi # pylint: disable=no-name-in-module
# Matplotlib functions tend to have a ton of args.
# pylint: disable = too-many-arguments
__all__ = [
"grid_archive_heatmap",
"cvt_archive_heatmap",
"sliding_boundaries_archive_heatmap",
]
def _retrieve_cmap(cmap):
"""Retrieves colormap from matplotlib."""
if isinstance(cmap, str):
return matplotlib.cm.get_cmap(cmap)
if isinstance(cmap, list):
return matplotlib.colors.ListedColormap(cmap)
return cmap
def _get_pt_to_obj(cvt_archive):
"""Creates a dict from centroid index to objective value in a CVTArchive."""
data = cvt_archive.as_pandas(include_solutions=False)
pt_to_obj = {}
for row in data.itertuples():
# row.index is the centroid index. The dataframe index is row.Index.
pt_to_obj[row.index] = row.objective
return pt_to_obj
def grid_archive_heatmap(archive,
ax=None,
transpose_bcs=False,
cmap="magma",
square=False,
vmin=None,
vmax=None,
pcm_kwargs=None):
"""Plots heatmap of a :class:`~ribs.archives.GridArchive` with 2D behavior
space.
Essentially, we create a grid of cells and shade each cell with a color
corresponding to the objective value of that cell's elite. This method uses
:func:`~matplotlib.pyplot.pcolormesh` to generate the grid. For further
customization, pass extra kwargs to :func:`~matplotlib.pyplot.pcolormesh`
through the ``pcm_kwargs`` parameter. For instance, to create black
boundaries of width 0.1, pass in ``pcm_kwargs={"edgecolor": "black",
"linewidth": 0.1}``.
Examples:
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ribs.archives import GridArchive
>>> from ribs.visualize import grid_archive_heatmap
>>> # Populate the archive with the negative sphere function.
>>> archive = GridArchive([20, 20], [(-1, 1), (-1, 1)])
>>> archive.initialize(solution_dim=2)
>>> for x in np.linspace(-1, 1, 100):
... for y in np.linspace(-1, 1, 100):
... archive.add(solution=np.array([x,y]),
... objective_value=-(x**2 + y**2),
... behavior_values=np.array([x,y]))
>>> # Plot a heatmap of the archive.
>>> plt.figure(figsize=(8, 6))
>>> grid_archive_heatmap(archive)
>>> plt.title("Negative sphere function")
>>> plt.xlabel("x coords")
>>> plt.ylabel("y coords")
>>> plt.show()
Args:
archive (GridArchive): A 2D GridArchive.
ax (matplotlib.axes.Axes): Axes on which to plot the heatmap. If None,
the current axis will be used.
transpose_bcs (bool): By default, the first BC in the archive will
appear along the x-axis, and the second will be along the y-axis. To
switch this (i.e. to transpose the axes), set this to True.
cmap (str, list, matplotlib.colors.Colormap): Colormap to use when
plotting intensity. Either the name of a colormap, a list of RGB or
RGBA colors (i.e. an Nx3 or Nx4 array), or a colormap object.
square (bool): If True, set the axes aspect ratio to be "equal".
vmin (float): Minimum objective value to use in the plot. If None, the
minimum objective value in the archive is used.
vmax (float): Maximum objective value to use in the plot. If None, the
maximum objective value in the archive is used.
pcm_kwargs (dict): Additional kwargs to pass to
:func:`~matplotlib.pyplot.pcolormesh`.
Raises:
ValueError: The archive is not 2D.
"""
if archive.behavior_dim != 2:
raise ValueError("Cannot plot heatmap for non-2D archive.")
# Try getting the colormap early in case it fails.
cmap = _retrieve_cmap(cmap)
# Retrieve data from archive.
lower_bounds = archive.lower_bounds
upper_bounds = archive.upper_bounds
x_dim, y_dim = archive.dims
x_bounds = np.linspace(lower_bounds[0], upper_bounds[0], x_dim + 1)
y_bounds = np.linspace(lower_bounds[1], upper_bounds[1], y_dim + 1)
# Color for each cell in the heatmap.
archive_data = archive.as_pandas(include_solutions=False)
colors = np.full((y_dim, x_dim), np.nan)
for row in archive_data.itertuples():
colors[row.index_1, row.index_0] = row.objective
objective_values = archive_data["objective"]
if transpose_bcs:
# Since the archive is 2D, transpose by swapping the x and y boundaries
# and by flipping the bounds (the bounds are arrays of length 2).
x_bounds, y_bounds = y_bounds, x_bounds
lower_bounds = np.flip(lower_bounds)
upper_bounds = np.flip(upper_bounds)
colors = colors.T
# Initialize the axis.
ax = plt.gca() if ax is None else ax
ax.set_xlim(lower_bounds[0], upper_bounds[0])
ax.set_ylim(lower_bounds[1], upper_bounds[1])
if square:
ax.set_aspect("equal")
# Create the plot.
pcm_kwargs = {} if pcm_kwargs is None else pcm_kwargs
vmin = np.min(objective_values) if vmin is None else vmin
vmax = np.max(objective_values) if vmax is None else vmax
t = ax.pcolormesh(x_bounds,
y_bounds,
colors,
cmap=cmap,
vmin=vmin,
vmax=vmax,
**pcm_kwargs)
# Create the colorbar.
ax.figure.colorbar(t, ax=ax, pad=0.1)
def cvt_archive_heatmap(archive,
ax=None,
plot_centroids=True,
plot_samples=False,
transpose_bcs=False,
cmap="magma",
square=False,
ms=1,
lw=0.5,
vmin=None,
vmax=None):
"""Plots heatmap of a :class:`~ribs.archives.CVTArchive` with 2D behavior
space.
Essentially, we create a Voronoi diagram and shade in each cell with a
color corresponding to the objective value of that cell's elite.
Depending on how many bins are in the archive, ``ms`` and ``lw`` may need to
be tuned. If there are too many bins, the Voronoi diagram and centroid
markers will make the entire image appear black. In that case, try turning
off the centroids with ``plot_centroids=False`` or even removing the lines
completely with ``lw=0``.
Examples:
.. plot::
:context: close-figs
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ribs.archives import CVTArchive
>>> from ribs.visualize import cvt_archive_heatmap
>>> # Populate the archive with the negative sphere function.
>>> archive = CVTArchive(100, [(-1, 1), (-1, 1)])
>>> archive.initialize(solution_dim=2)
>>> for x in np.linspace(-1, 1, 100):
... for y in np.linspace(-1, 1, 100):
... archive.add(solution=np.array([x,y]),
... objective_value=-(x**2 + y**2),
... behavior_values=np.array([x,y]))
>>> # Plot a heatmap of the archive.
>>> plt.figure(figsize=(8, 6))
>>> cvt_archive_heatmap(archive)
>>> plt.title("Negative sphere function")
>>> plt.xlabel("x coords")
>>> plt.ylabel("y coords")
>>> plt.show()
Args:
archive (CVTArchive): A 2D CVTArchive.
ax (matplotlib.axes.Axes): Axes on which to plot the heatmap. If None,
the current axis will be used.
plot_centroids (bool): Whether to plot the cluster centroids.
plot_samples (bool): Whether to plot the samples used when generating
the clusters.
transpose_bcs (bool): By default, the first BC in the archive will
appear along the x-axis, and the second will be along the y-axis. To
switch this (i.e. to transpose the axes), set this to True.
cmap (str, list, matplotlib.colors.Colormap): Colormap to use when
plotting intensity. Either the name of a colormap, a list of RGB or
RGBA colors (i.e. an Nx3 or Nx4 array), or a colormap object.
square (bool): If True, set the axes aspect ratio to be "equal".
ms (float): Marker size for both centroids and samples.
lw (float): Line width when plotting the voronoi diagram.
vmin (float): Minimum objective value to use in the plot. If None, the
minimum objective value in the archive is used.
vmax (float): Maximum objective value to use in the plot. If None, the
maximum objective value in the archive is used.
Raises:
ValueError: The archive is not 2D.
"""
# pylint: disable = too-many-locals
if archive.behavior_dim != 2:
raise ValueError("Cannot plot heatmap for non-2D archive.")
# Try getting the colormap early in case it fails.
cmap = _retrieve_cmap(cmap)
# Retrieve data from archive.
lower_bounds = archive.lower_bounds
upper_bounds = archive.upper_bounds
centroids = archive.centroids
samples = archive.samples
if transpose_bcs:
lower_bounds = np.flip(lower_bounds)
upper_bounds = np.flip(upper_bounds)
centroids = np.flip(centroids, axis=1)
samples = np.flip(samples, axis=1)
# Retrieve and initialize the axis.
ax = plt.gca() if ax is None else ax
ax.set_xlim(lower_bounds[0], upper_bounds[0])
ax.set_ylim(lower_bounds[1], upper_bounds[1])
if square:
ax.set_aspect("equal")
# Add faraway points so that the edge regions of the Voronoi diagram are
# filled in. Refer to
# https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram
# for more info.
interval = upper_bounds - lower_bounds
scale = 1000
faraway_pts = [
upper_bounds + interval * scale, # Far upper right.
upper_bounds + interval * [-1, 1] * scale, # Far upper left.
lower_bounds + interval * [-1, -1] * scale, # Far bottom left.
lower_bounds + interval * [1, -1] * scale, # Far bottom right.
]
vor = Voronoi(np.append(centroids, faraway_pts, axis=0))
# Calculate objective value for each region. `vor.point_region` contains
# the region index of each point.
region_obj = [None] * len(vor.regions)
min_obj, max_obj = np.inf, -np.inf
pt_to_obj = _get_pt_to_obj(archive)
for pt_idx, region_idx in enumerate(
vor.point_region[:-4]): # Exclude faraway_pts.
if region_idx != -1 | |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class forwarding(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/lsp/forwarding. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: MPLS LSP forwarding information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__tunnel_vif_index','__lsp_id','__forwarding_up','__primary_active','__primary_up','__secondary_active','__secondary_up','__selected_secondary_active','__selected_secondary_up','__frr_active','__frr_up','__instance_id','__out_port_id','__out_port_name','__out_label',)
_yang_name = 'forwarding'
_rest_name = 'forwarding'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__selected_secondary_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="selected-secondary-active", rest_name="selected-secondary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__tunnel_vif_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tunnel-vif-index", rest_name="tunnel-vif-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__secondary_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secondary-active", rest_name="secondary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__out_port_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="out-port-id", rest_name="out-port-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__selected_secondary_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="selected-secondary-up", rest_name="selected-secondary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__out_label = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="out-label", rest_name="out-label", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__forwarding_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="forwarding-up", rest_name="forwarding-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="instance-id", rest_name="instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__primary_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-active", rest_name="primary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__out_port_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="out-port-name", rest_name="out-port-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__primary_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-up", rest_name="primary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__lsp_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__secondary_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secondary-up", rest_name="secondary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__frr_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-up", rest_name="frr-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__frr_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-active", rest_name="frr-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'lsp', u'forwarding']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'lsp', u'forwarding']
def _get_tunnel_vif_index(self):
"""
Getter method for tunnel_vif_index, mapped from YANG variable /mpls_state/lsp/forwarding/tunnel_vif_index (uint32)
YANG Description: lsp tunnel vif index
"""
return self.__tunnel_vif_index
def _set_tunnel_vif_index(self, v, load=False):
"""
Setter method for tunnel_vif_index, mapped from YANG variable /mpls_state/lsp/forwarding/tunnel_vif_index (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel_vif_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel_vif_index() directly.
YANG Description: lsp tunnel vif index
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tunnel-vif-index", rest_name="tunnel-vif-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tunnel_vif_index must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tunnel-vif-index", rest_name="tunnel-vif-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tunnel_vif_index = t
if hasattr(self, '_set'):
self._set()
def _unset_tunnel_vif_index(self):
self.__tunnel_vif_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tunnel-vif-index", rest_name="tunnel-vif-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_lsp_id(self):
"""
Getter method for lsp_id, mapped from YANG variable /mpls_state/lsp/forwarding/lsp_id (uint32)
YANG Description: lsp_forwarding_lsp_id
"""
return self.__lsp_id
def _set_lsp_id(self, v, load=False):
"""
Setter method for lsp_id, mapped from YANG variable /mpls_state/lsp/forwarding/lsp_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_id() directly.
YANG Description: lsp_forwarding_lsp_id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_id(self):
self.__lsp_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_forwarding_up(self):
"""
Getter method for forwarding_up, mapped from YANG variable /mpls_state/lsp/forwarding/forwarding_up (boolean)
YANG Description: lsp_forwarding_up
"""
return self.__forwarding_up
def _set_forwarding_up(self, v, load=False):
"""
Setter method for forwarding_up, mapped from YANG variable /mpls_state/lsp/forwarding/forwarding_up (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding_up is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding_up() directly.
YANG Description: lsp_forwarding_up
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="forwarding-up", rest_name="forwarding-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forwarding_up must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="forwarding-up", rest_name="forwarding-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__forwarding_up = t
if hasattr(self, '_set'):
self._set()
def _unset_forwarding_up(self):
self.__forwarding_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="forwarding-up", rest_name="forwarding-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_primary_active(self):
"""
Getter method for primary_active, mapped from YANG variable /mpls_state/lsp/forwarding/primary_active (boolean)
YANG Description: lsp_forwarding_primary_active
"""
return self.__primary_active
def _set_primary_active(self, v, load=False):
"""
Setter method for primary_active, mapped from YANG variable /mpls_state/lsp/forwarding/primary_active (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_primary_active is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_primary_active() directly.
YANG Description: lsp_forwarding_primary_active
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="primary-active", rest_name="primary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """primary_active must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-active", rest_name="primary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__primary_active = t
if hasattr(self, '_set'):
self._set()
def _unset_primary_active(self):
self.__primary_active = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-active", rest_name="primary-active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_primary_up(self):
"""
Getter method for primary_up, mapped from YANG variable /mpls_state/lsp/forwarding/primary_up (boolean)
YANG Description: lsp_forwarding_primary_up
"""
return self.__primary_up
def _set_primary_up(self, v, load=False):
"""
Setter method for primary_up, mapped from YANG variable /mpls_state/lsp/forwarding/primary_up (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_primary_up is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_primary_up() directly.
YANG Description: lsp_forwarding_primary_up
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="primary-up", rest_name="primary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """primary_up must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-up", rest_name="primary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__primary_up = t
if hasattr(self, '_set'):
self._set()
def _unset_primary_up(self):
self.__primary_up = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="primary-up", rest_name="primary-up", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_secondary_active(self):
"""
Getter method for secondary_active, mapped from YANG variable /mpls_state/lsp/forwarding/secondary_active (boolean)
YANG Description: lsp_forwarding_secondary_active
"""
return self.__secondary_active
def _set_secondary_active(self, | |
<reponame>AvocadoManYT/Test<filename>bot/cogs/img.py
import discord
import aiohttp
import datetime
import io
from datetime import datetime
from discord.ext import commands
import PIL
from PIL import Image, ImageFont, ImageDraw, ImageFilter
err_color = discord.Color.red()
class Images(commands.Cog):
""" Category for image commands """
def __init__(self, client):
self.client = client
self.ses = aiohttp.ClientSession()
# events
@commands.Cog.listener()
async def on_ready(self):
print('Img cog is ready.')
# commands
@commands.command()
async def wanted(self, ctx, user : discord.Member = None):
if user == None:
user = ctx.author
wanted = Image.open("images/Wanted.jpeg")
asset = user.avatar_url_as(size = 128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((106,106))
wanted.paste(pfp, (41,85))
wanted.save("images/profile.jpg")
await ctx.send(file = discord.File("images/profile.jpg"))
@commands.command()
async def amogus(self, ctx, user : discord.Member = None):
if user == None:
user = ctx.author
wanted = Image.open("images/amogu.png")
asset = user.avatar_url_as(size = 128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((48,48))
wanted.paste(pfp, (246, 310))
wanted.save("images/amog.png")
await ctx.send(file = discord.File("images/amog.png"))
@commands.command()
async def rip(self, ctx, member:discord.Member=None):
if member == None:
member = ctx.author
rip = Image.open('images/RIP.jpg')
asset = member.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((87, 87))
rip.paste(pfp, (57, 124))
rip.save('images/prip.jpg')
await ctx.send(file = discord.File('images/prip.jpg'))
@commands.command()
async def naruto(self, ctx, user: discord.Member = None):
if not user:
user = ctx.author
naruto = Image.open("images/Naruto.png")
asset = user.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((97, 97))
naruto.paste(pfp, (418, 120))
naruto.save("images/n.png")
await ctx.send(file=discord.File("images/n.png"))
@commands.command()
async def amongus(self, ctx, user: discord.Member = None):
if not user:
user = ctx.author
naruto = Image.open("images/Amongus.jpeg")
asset = user.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((108, 108))
naruto.paste(pfp, (87, 25))
naruto.save("images/au.jpeg")
await ctx.send(file=discord.File("images/au.jpeg"))
@commands.command()
async def text(self, ctx, *, text = "No text entered"):
img = Image.open("images/White.jpeg")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("JosefinSans-Regular.ttf", 24)
draw.text((100,100), text, (0,17,0), font = font)
img.save("images/text.jpeg")
await ctx.send(file = discord.File("images/text.jpeg"))
@commands.command(aliases=['trigger', 'trg'])
async def triggered(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/triggered?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'triggered.gif')) # sending the file
@commands.command()
async def wasted(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/wasted?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'wasted.png')) # sending the file
@commands.command(aliases=['redpanda', 'redpandaimg'])
async def redpandaimage(self, ctx):
async with aiohttp.ClientSession() as ses:
async with ses.get('https://some-random-api.ml/img/red_panda') as f:
if f.status in range(200, 299):
dat = await f.json()
img = dat['link']
em = discord.Embed(
title='Red Panda',
color = ctx.author.color
)
em.set_image(url=f'{img}')
await ctx.send(embed=em)
await ses.close()
else:
await ctx.reply("Error when trying to get a red panda image.")
await ses.close()
@commands.command(aliases=['pikachu', 'pikachuimg', 'pika', 'pikaimg', 'pikaimage'])
async def pikachuimage(self, ctx):
async with aiohttp.ClientSession() as ses:
async with ses.get('https://some-random-api.ml/img/pikachu') as f:
if f.status in range(200, 299):
dat = await f.json()
img = dat['link']
em = discord.Embed(
title='Pikachu Image',
color = ctx.author.color
)
em.set_image(url=f'{img}')
await ctx.send(embed=em)
await ses.close()
else:
await ctx.reply("Error when trying to get a pikachu image.")
await ses.close()
@commands.command()
async def glass(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/glass?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'glass.png')) # sending the file
@commands.command()
async def rainbow(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/gay?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'rainbow.png')) # sending the
@commands.command()
async def invert(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/invert?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'inverted.png')) # sending the
@commands.command()
async def blue(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/blue?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'blue.png')) # sending the
@commands.command()
async def green(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/green?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'green.png')) # sending the
@commands.command()
async def red(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/red?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'red.png')) # sending the
@commands.command()
async def sepia(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/sepia?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'sepia.png')) # sending the
@commands.command(aliases=['baw'])
async def blackwhite(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/threshold?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'baw.png')) # sending the
@commands.command(aliases=['bright'])
async def brightness(self, ctx, member: discord.Member=None):
if not member: # if no member is mentioned
member = ctx.author # the user who ran the command will be the member
async with aiohttp.ClientSession() as wastedSession:
async with wastedSession.get(f'https://some-random-api.ml/canvas/brightness?avatar={member.avatar_url_as(format="png", size=1024)}') as wastedImage: # get users avatar as png with 1024 size
imageData = BytesIO(await wastedImage.read()) # read the image/bytes
await ctx.send(file=discord.File(imageData, 'bright.png')) # sending the
@commands.command()
async def tweet(self, ctx, *, msg="No text entered"):
async with aiohttp.ClientSession() as ses:
async with ses.get(f"https://nekobot.xyz/api/imagegen?type=tweet&username={ctx.author.name}&text={msg}") as f:
if f.status in range(200, 299):
dat = await f.json()
img = dat['message']
em = discord.Embed(
title='Tweeted!',
color = ctx.author.color
)
em.set_image(url=f'{img}')
await ctx.send(embed=em)
await ses.close()
else:
await ctx.reply("Error when trying to tweet.")
await ses.close()
@commands.command()
async def magik(self, ctx, member:discord.Member=None):
if member == None:
member = ctx.author
async with aiohttp.ClientSession() as ses:
async with ses.get(f"https://nekobot.xyz/api/imagegen?type=magik&image={member.avatar_url}") as f:
if f.status in range(200, 299):
dat = await f.json()
img = dat['message']
em = discord.Embed(
title='Magikified!',
color = ctx.author.color
)
em.set_image(url=f'{img}')
await ctx.send(embed=em)
await ses.close()
else:
await ctx.reply("Error when trying to magikify.")
await ses.close()
@commands.command()
async def iphonex(self, ctx, member:discord.Member=None):
if member == None:
member = ctx.author
async with aiohttp.ClientSession() as ses:
| |
<reponame>EldritchJS/inference_results_v0.5
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
# import array
# import collections
import json
import logging
import os
import sys
# import threading
import time
# from queue import Queue
import mlperf_schedule as sch
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
}
SCENARIO_MAP = {
"SingleStream": sch.TestScenario.SingleStream,
"MultiStream": sch.TestScenario.MultiStream,
"Server": sch.TestScenario.Server,
"Offline": sch.TestScenario.Offline,
}
CASE_SETTINGS_MAP = {
# resnet50-tf
"resnet50-tf-SingleStream": {
"schedule_config": "/root/wsh/schedule-benchmark/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/root/wsh/mlperf-data/preprocess",
"dataset_path": "/root/wsh/mlperf-data/dataset-imagenet-ilsvrc2012-val",
#"model_path": "/root/wsh/mlperf-trt-models-v6/resnet50_int8_bs1.trt",
"model_path": "/root/wsh/mlperf-trt-models-v6/fp16/resnet50_fp16_ws2_bt256.trt",
"model_name": "resnet50",
"dataset": "imagenet",
"profile": "resnet50-tf",
"scenario": "SingleStream",
},
"resnet50-tf-MultiStream": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/resnet50_v1_16.trt",
"model_name": "resnet50",
"dataset": "imagenet",
"profile": "resnet50-tf",
"scenario": "MultiStream",
},
"resnet50-tf-Server": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/resnet50_v1_16.trt",
"model_name": "resnet50",
"dataset": "imagenet",
"profile": "resnet50-tf",
"scenario": "Server",
},
"resnet50-tf-Offline": {
"schedule_config": "/root/wsh/schedule-benchmark/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/root/wsh/mlperf-data/preprocess",
"dataset_path": "/root/wsh/mlperf-data/dataset-imagenet-ilsvrc2012-val",
#"model_path": "/root/wsh/mlperf-trt-models-v6/resnet50_fp16_bs1024.trt",
#"model_path": "/root/wsh/mlperf-trt-models-v6/fp16/resnet50_fp16_ws2_bt256.trt",
"model_path": "/root/wsh/mlperf-trt-models-v6/fp16/resnet50_fp16_ws2_bt512.trt",
"model_name": "resnet50",
"dataset": "imagenet",
"profile": "resnet50-tf",
"scenario": "Offline",
},
# mobilenet-tf
"mobilenet-tf-SingleStream": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/mobilenet_v1_1.0_224_1.trt",
"model_name": "mobilenet",
"dataset": "imagenet_mobilenet",
"profile": "mobilenet-tf",
"scenario": "SingleStream",
},
"mobilenet-tf-MultiStream": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/mobilenet_v1_1.0_224_16.trt",
"model_name": "mobilenet",
"dataset": "imagenet_mobilenet",
"profile": "mobilenet-tf",
"scenario": "MultiStream",
},
"mobilenet-tf-Server": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/mobilenet_v1_1.0_224_16.trt",
"model_name": "mobilenet",
"dataset": "imagenet_mobilenet",
"profile": "mobilenet-tf",
"scenario": "Server",
},
"mobilenet-tf-Offline": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-imagenet-ilsvrc2012-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/mobilenet_v1_1.0_224_16.trt",
"model_name": "mobilenet",
"dataset": "imagenet_mobilenet",
"profile": "mobilenet-tf",
"scenario": "Offline",
},
# ssd-mobilenet-tf
"ssd-mobilenet-tf-SingleStream": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-coco-2017-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/ssd_mobilenet_1.trt",
"model_name": "ssd-mobilenet",
"dataset": "coco-300",
"profile": "ssd-mobilenet-tf",
"scenario": "SingleStream",
},
"ssd-mobilenet-tf-MultiStream": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-coco-2017-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/ssd_mobilenet_16.trt",
"model_name": "ssd-mobilenet",
"dataset": "coco-300",
"profile": "ssd-mobilenet-tf",
"scenario": "MultiStream",
},
"ssd-mobilenet-tf-Server": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-coco-2017-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/ssd_mobilenet_16.trt",
"model_name": "ssd-mobilenet",
"dataset": "coco-300",
"profile": "ssd-mobilenet-tf",
"scenario": "Server",
},
"ssd-mobilenet-tf-Offline": {
"schedule_config": "/root/schedule/mlperf_inference_schedule.prototxt",
"config": "mlperf.conf",
"backend": "trt",
"data_format": "NCHW",
"cache_path": "/data/dataset-imagenet-ilsvrc2012-valILSVRC2012_img_val/preprocess",
"dataset_path": "/data/dataset-coco-2017-val",
"model_path": "/media/sunhy/inference-master-test/inference-master/v0.5/classification_and_detection/engineer/ssd_mobilenet_16.trt",
"model_name": "ssd-mobilenet",
"dataset": "coco-300",
"profile": "ssd-mobilenet-tf",
"scenario": "Offline",
},
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--test-case", choices=CASE_SETTINGS_MAP.keys(), help="test case")
parser.add_argument("--schedule-config", help="test case")
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--model-path", help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--cache-path", default="", help="cache path")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--config", default="../mlperf.conf", help="mlperf rules config")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
if args.test_case:
for key in CASE_SETTINGS_MAP[args.test_case]:
value = CASE_SETTINGS_MAP[args.test_case][key]
if key == "model_path" and args.max_batchsize:
import re
to_be_replaced = re.compile("\d+\.trt")
value = to_be_replaced.sub(str(args.max_batchsize) + ".trt", value)
print("new model path: ", value)
setattr(args, key, value)
log.info(args)
config = os.path.abspath(args.config)
if not os.path.exists(config):
log.error("{} not found".format(config))
sys.exit(1)
# override image format if given
image_format = args.data_format
if not image_format:
log.error("image_format invalid: {}".format(image_format))
sys.exit(1)
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
# if count:
# count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
cache_dir=args.cache_path,
count=count,
**kwargs)
final_results = {
"runtime": "TensorRT",
"version": "5.1.2",
"time": int(time.time()),
"cmdline": str(args),
}
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
# count = ds.get_item_count()
scenario = SCENARIO_MAP[args.scenario]
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = sch.GetInferenceSettings()
settings.FromConfig(config, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = sch.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = sch.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = sch.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
# if count_override:
# settings.min_query_count = count
# settings.max_query_count = count
if scenario == 'Offline':
settings.min_query_count = 1
settings.max_query_count = 1
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.qsl_rng_seed = 0x2b7e151628aed2a6
settings.sample_index_rng_seed = 0x093c467e37db0c7a
settings.schedule_rng_seed = 0x3243f6a8885a308d
ds_label = []
if args.dataset=='coco-300' or args.dataset=='coco-1200-tf':
for item in ds.label_list:
ds_label.append(item[0])
else:
ds_label = ds.label_list
if not os.path.exists(args.schedule_config):
log.error("schedule config path not exist: {}".format(args.schedule_config))
sys.exit(1)
if not os.path.exists(args.dataset_path):
log.error("dataset path not exist: {}".format(args.dataset_path))
sys.exit(1)
if not os.path.exists(args.model_path):
log.error("cache dir not exist: {}".format(args.model_path))
sys.exit(1)
if not os.path.exists(ds.cache_dir):
log.error("cache dir not exist: {}".format(ds.cache_dir))
sys.exit(1)
sch.InitSchedule(args.schedule_config,
settings, args.dataset, args.dataset_path, ds.cache_dir, args.model_path, args.profile, args.backend,
args.accuracy,
[SUPPORTED_PROFILES[args.profile]["inputs"] if "inputs" in SUPPORTED_PROFILES[args.profile] else ""],
[SUPPORTED_PROFILES[args.profile]["outputs"] if "outputs" in SUPPORTED_PROFILES[args.profile] else ""],
ds.image_list,
ds_label)
sch.InitMLPerf(process_latencies)
log.info("starting {}".format(scenario))
sch.StartTest()
upload_results = sch.UploadResults()
post_proc.update_results(upload_results)
if args.dataset.startswith("coco"):
results_coco = []
upload_results_data = sch.UploadResultsCoco()
for batch in upload_results_data:
batch_detects = []
for image in batch:
batch_detects.extend(image)
results_coco.append(batch_detects)
post_proc.update_results_coco(results_coco)
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
last_timeing.append(0.0)
else:
result_dict["good"] = post_proc.good
result_dict["total"] = post_proc.total
print(result_dict)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - sch.GetLastLoad(), args.accuracy)
# print(last_timeing)
#
# write final results
#
if | |
<gh_stars>1-10
import itertools
import typing
from abc import abstractmethod, ABC, ABCMeta
from collections import Counter
from dataclasses import dataclass, astuple, replace, field
from enum import unique, Enum, auto, EnumMeta
from functools import lru_cache
from typing import Tuple, Union, List, Generator, Dict, Generic, Optional
import gym
import numpy as np
import torch
from colored import fg
from gym import spaces
from utils import RESET
CoordType = Tuple[int, int]
IntGenerator = Generator[int, None, None]
IntListGenerator = Generator[List[int], None, None]
BoolGenerator = Generator[bool, None, None]
WORLD_SIZE = None
def move_from(origin: CoordType, toward: CoordType) -> CoordType:
origin = np.array(origin)
i, j = np.array(origin) + np.clip(
np.array(toward) - origin,
-1,
1,
)
return i, j
class InvalidInput(Exception):
pass
""" abstract classes """
class WorldObject:
@property
@abstractmethod
def symbol(self):
pass
@abstractmethod
def __eq__(self, other):
pass
class ActionComponentMeta(type):
pass
class ActionComponentEnumMeta(ActionComponentMeta, EnumMeta):
pass
class ActionComponentABCMeta(ActionComponentMeta, ABCMeta):
pass
class ActionComponent(metaclass=ActionComponentMeta):
@staticmethod
@abstractmethod
def parse(n: int) -> "ActionComponent":
pass
@staticmethod
@abstractmethod
def space() -> spaces.Discrete:
pass
@abstractmethod
def to_int(self) -> int:
pass
ActionComponentGenerator = Generator[ActionComponent, None, None]
class Building(WorldObject, ActionComponent, ABC, metaclass=ActionComponentABCMeta):
def __eq__(self, other):
return type(self) == type(other)
def __lt__(self, other):
# noinspection PyArgumentList
return self.value < other.value
def __hash__(self):
return hash(type)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return f"({Buildings.index(self)}) {str(self)}: {self.cost}"
@property
@abstractmethod
def cost(self) -> "Resources":
pass
def on(self, coord: "CoordType", building_positions: "BuildingPositions"):
return self == building_positions.get(coord)
@staticmethod
def parse(n: int) -> "Building":
return Buildings[n]
@staticmethod
def space() -> spaces.Discrete:
return spaces.Discrete(len(Buildings))
@property
@abstractmethod
def symbol(self) -> str:
pass
def to_int(self) -> int:
return Buildings.index(self)
class Assignment:
@abstractmethod
def execute(
self,
positions: "Positions",
worker: "Worker",
assignments: "Assignments",
building_positions: "BuildingPositions",
pending_positions: "BuildingPositions",
required: typing.Counter["Building"],
resources: typing.Counter["Resource"],
carrying: "Carrying",
) -> Optional[str]:
raise NotImplementedError
""" world objects"""
@unique
class Worker(WorldObject, ActionComponent, Enum, metaclass=ActionComponentEnumMeta):
W1 = auto()
W2 = auto()
W3 = auto()
# W4 = auto()
# W5 = auto()
# W6 = auto()
# W7 = auto()
# W8 = auto()
# W9 = auto()
# W10 = auto()
# W11 = auto()
# W12 = auto()
def __eq__(self, other):
# noinspection PyArgumentList
return Enum.__eq__(self, other)
def __lt__(self, other):
assert isinstance(other, Worker)
# noinspection PyArgumentList
return self.value < other.value
def __hash__(self):
# noinspection PyArgumentList
return Enum.__hash__(self)
def on(
self,
coord: "CoordType",
positions: "Positions",
) -> bool:
return positions[self] == coord
@staticmethod
def parse(n: int) -> "Worker":
return Worker(n)
@staticmethod
def space() -> spaces.Discrete:
return spaces.Discrete(len(Worker)) # binary: in or out
@property
def symbol(self) -> str:
return str(self.value)
def to_int(self) -> int:
return self.value
WorkerGenerator = Generator[Worker, None, None]
@unique
class Resource(WorldObject, Assignment, Enum):
MINERALS = auto()
GAS = auto()
def __hash__(self):
return Enum.__hash__(self)
def __eq__(self, other):
return Enum.__eq__(self, other)
def execute(
self,
positions: "Positions",
worker: "Worker",
assignments: "Assignments",
building_positions: "BuildingPositions",
pending_positions: "BuildingPositions",
required: typing.Counter["Building"],
resources: typing.Counter["Resource"],
carrying: "Carrying",
) -> Optional[str]:
worker_pos = positions[worker]
if carrying[worker] is None:
resource_pos = positions[self]
positions[worker] = move_from(worker_pos, toward=resource_pos)
worker_pos = positions[worker]
if worker_pos == resource_pos:
if self is Resource.GAS and not isinstance(
building_positions.get(positions[worker]), Assimilator
):
return "Assimilator required for harvesting gas" # no op on gas unless Assimilator
carrying[worker] = self
else:
nexus_positions: List[CoordType] = [
p for p, b in building_positions.items() if isinstance(b, Nexus)
]
nexus = get_nearest(nexus_positions, to=worker_pos)
positions[worker] = move_from(
worker_pos,
toward=nexus,
)
if positions[worker] == nexus:
resource = carrying[worker]
assert isinstance(resource, Resource)
resources[resource] += 100
carrying[worker] = None
return None
def on(
self,
coord: "CoordType",
positions: "Positions",
) -> bool:
return positions[self] == coord
@property
def symbol(self) -> str:
if self is Resource.GAS:
return fg("green") + "g" + RESET
if self is Resource.MINERALS:
return fg("blue") + "m" + RESET
raise RuntimeError
@dataclass(frozen=True)
class Resources:
minerals: int
gas: int
def __iter__(self):
yield from [Resource.MINERALS] * self.minerals
yield from [Resource.GAS] * self.gas
assert set(Resources(0, 0).__annotations__.keys()) == {
r.lower() for r in Resource.__members__
}
""" action components """
@dataclass
class Coord(ActionComponent):
i: int
j: int
@staticmethod
def parse(n: int) -> "Coord":
assert isinstance(WORLD_SIZE, int)
ij = np.unravel_index(n, (WORLD_SIZE, WORLD_SIZE))
return Coord(*ij)
@staticmethod
def possible_values():
assert isinstance(WORLD_SIZE, int)
for i in range(WORLD_SIZE):
for j in range(WORLD_SIZE):
yield i, j
@staticmethod
def space() -> spaces.Discrete:
assert isinstance(WORLD_SIZE, int)
return spaces.Discrete(WORLD_SIZE ** 2)
def to_int(self) -> int:
return int(np.ravel_multi_index((self.i, self.j), (WORLD_SIZE, WORLD_SIZE)))
@staticmethod
def zeros() -> IntGenerator:
yield 0
yield 0
BuildingPositions = Dict[CoordType, Building]
Positions = Dict[Union[Resource, Worker], CoordType]
Carrying = Dict[Worker, Optional[Resource]]
Assignments = Dict[Worker, Assignment]
@dataclass(frozen=True)
class BuildOrder(Assignment):
building: Building
coord: CoordType
def execute(
self,
positions: "Positions",
worker: "Worker",
assignments: "Assignments",
building_positions: "BuildingPositions",
pending_positions: "BuildingPositions",
required: typing.Counter["Building"],
resources: typing.Counter["Resource"],
carrying: "Carrying",
) -> Optional[str]:
if positions[worker] == self.coord:
building_positions[self.coord] = self.building
assignments[worker] = DoNothing()
return
else:
if self.coord not in pending_positions:
pending_positions[self.coord] = self.building
resources.subtract(self.building.cost)
return GoTo(self.coord).execute(
positions=positions,
worker=worker,
assignments=assignments,
building_positions=building_positions,
pending_positions=pending_positions,
required=required,
resources=resources,
carrying=carrying,
)
@dataclass(frozen=True)
class GoTo(Assignment):
coord: CoordType
def execute(
self, positions: "Positions", worker: "Worker", assignments, *args, **kwargs
) -> Optional[str]:
positions[worker] = move_from(positions[worker], toward=self.coord)
return
class DoNothing(Assignment):
def execute(self, *args, **kwargs) -> Optional[str]:
return
Command = Union[BuildOrder, Resource]
O = typing.TypeVar("O", torch.Tensor, np.ndarray, int, gym.Space)
@dataclass(frozen=True)
class Obs(typing.Generic[O]):
action_mask: O
line_mask: O
lines: O
obs: O
partial_action: O
ptr: O
resources: O
X = typing.TypeVar("X")
@dataclass(frozen=True)
class RawAction:
delta: Union[np.ndarray, torch.Tensor, X]
dg: Union[np.ndarray, torch.Tensor, X]
ptr: Union[np.ndarray, torch.Tensor, X]
a: Union[np.ndarray, torch.Tensor, X]
@staticmethod
def parse(*xs) -> "RawAction":
delta, dg, ptr, *a = xs
if a == [None]:
a = None
return RawAction(delta, dg, ptr, a)
def flatten(self) -> Generator[any, None, None]:
yield from astuple(self)
Ob = Optional[bool]
OB = Optional[Building]
OC = Optional[Coord]
@dataclass(frozen=True)
class CompoundAction:
worker_values: List[Ob] = field(default_factory=lambda: [False for _ in Worker])
building: OB = None
coord: OC = None
@staticmethod
def _worker_values() -> List[Ob]:
return [False, True]
@classmethod
def input_space(cls):
return spaces.MultiDiscrete(
[
*[len(cls._worker_values())] * len(Worker),
1 + Building.space().n,
1 + Coord.space().n,
]
)
@classmethod
def parse(cls, *values: int) -> "CompoundAction":
*ws, b, c = map(int, values)
return CompoundAction(
worker_values=[cls._worker_values()[w] for w in ws],
building=None if b == 0 else Building.parse(b - 1),
coord=None if c == 0 else Coord.parse(c - 1),
)
@classmethod
def possible_worker_values(cls) -> Generator[Tuple[bool, bool], None, None]:
yield from itertools.product(cls._worker_values(), repeat=len(Worker))
@classmethod
def representation_space(cls):
return cls.input_space()
def to_input_int(self) -> IntGenerator:
for w in self.worker_values:
yield self._worker_values().index(w)
for attr in [self.building, self.coord]:
yield 0 if attr is None else 1 + attr.to_int()
def to_representation_ints(self) -> IntGenerator:
yield from self.to_input_int()
def workers(self) -> Generator[Worker, None, None]:
for worker, value in zip(Worker, self.worker_values):
if value:
yield worker
CompoundActionGenerator = Generator[CompoundAction, None, None]
@dataclass(frozen=True)
class ActionStage:
@staticmethod
def _children() -> List[type]:
return [
NoWorkersAction,
# WorkersAction,
# BuildingAction,
CoordAction,
BuildingCoordAction,
]
@staticmethod
@abstractmethod
def _gate_openers() -> CompoundActionGenerator:
pass
@staticmethod
@abstractmethod
def _parse_string(s: str) -> CompoundAction:
pass
@staticmethod
@abstractmethod
def _permitted_values() -> CompoundActionGenerator:
pass
@staticmethod
@abstractmethod
def _prompt() -> str:
pass
@staticmethod
@abstractmethod
def _update(action: CompoundAction) -> "ActionStage":
pass
@abstractmethod
def action_components(self) -> CompoundAction:
pass
@abstractmethod
def assignment(self, positions: Positions) -> Optional[Assignment]:
pass
def from_input(self) -> "ActionStage":
compound_action = None
while compound_action is None:
string = input(self._prompt() + "\n")
if string:
try:
compound_action = self._parse_string(string)
except InvalidInput as e:
print(e)
else:
compound_action = CompoundAction()
return self._update(compound_action)
@classmethod
@lru_cache
def gate_openers(cls) -> np.ndarray:
return np.array([list(o.to_input_int()) for o in cls._gate_openers()])
@classmethod
def gate_opener_max_size(cls):
def opener_size():
for c in cls._children():
assert issubclass(c, ActionStage)
yield len(c.gate_openers())
return max(opener_size())
@abstractmethod
def get_workers(self) -> WorkerGenerator:
raise NotImplementedError
def invalid(
self,
resources: typing.Counter[Resource],
dependencies: Dict[Building, Building],
building_positions: BuildingPositions,
pending_positions: BuildingPositions,
positions: Positions,
) -> Optional[str]:
return
@classmethod
@lru_cache
def mask(cls) -> np.ndarray:
nvec = CompoundAction.input_space().nvec
mask = np.ones((len(nvec), max(nvec)))
R = np.arange(len(nvec))
for permitted_values in cls._permitted_values():
unmask = [*permitted_values.to_input_int()]
mask[R, unmask] = 0
return mask
def to_ints(self):
return self.action_components().to_representation_ints()
def update(self, *components: int) -> "ActionStage":
return self._update(CompoundAction.parse(*components))
class CoordCanOpenGate(ActionStage, ABC):
@staticmethod
def _gate_openers() -> CompoundActionGenerator:
for i, j in Coord.possible_values():
yield CompoundAction(coord=Coord(i, j))
@dataclass(frozen=True)
class NoWorkersAction(ActionStage):
@staticmethod
def _gate_openers() -> CompoundActionGenerator:
# selecting no workers is a no-op that allows gate to open
yield CompoundAction()
for building in Buildings:
yield CompoundAction(building=building)
for i, j in Coord.possible_values():
yield CompoundAction(coord=Coord(i, j))
for building in Buildings:
yield CompoundAction(building=building, coord=Coord(i, j))
@staticmethod
def _parse_string(s: str) -> CompoundAction:
try:
*ws, b, i, j = map(int, s.split())
except ValueError:
raise InvalidInput
return CompoundAction(
worker_values=[w.value in ws for w in Worker],
building=Building.parse(b),
coord=Coord(i, j),
)
@staticmethod
def _permitted_values() -> CompoundActionGenerator:
for worker_values, building, coord in itertools.product(
CompoundAction.possible_worker_values(),
[None, *Buildings],
[None, *Coord.possible_values()],
):
if coord:
| |
-> bool:
return self.get_shortname(schema).name in {'id', '__type__'}
def generic(self, schema: s_schema.Schema) -> bool:
return self.get_source(schema) is None
def get_referrer(self, schema: s_schema.Schema) -> Optional[so.Object]:
return self.get_source(schema)
def is_exclusive(self, schema: s_schema.Schema) -> bool:
if self.generic(schema):
raise ValueError(f'{self!r} is generic')
exclusive = schema.get('std::exclusive', type=constraints.Constraint)
ptr = self.get_nearest_non_derived_parent(schema)
for constr in ptr.get_constraints(schema).objects(schema):
if (constr.issubclass(schema, exclusive) and
not constr.get_subjectexpr(schema)):
return True
return False
def singular(
self,
schema: s_schema.Schema,
direction: PointerDirection = PointerDirection.Outbound,
) -> bool:
# Determine the cardinality of a given endpoint set.
if direction == PointerDirection.Outbound:
return (self.get_cardinality(schema) is
qltypes.SchemaCardinality.ONE)
else:
return self.is_exclusive(schema)
def get_implicit_bases(self, schema: s_schema.Schema) -> List[Pointer]:
bases = super().get_implicit_bases(schema)
# True implicit bases for pointers will have a different source.
my_source = self.get_source(schema)
return [
b for b in bases
if b.get_source(schema) != my_source
]
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return False
def allow_ref_propagation(
self,
schema: s_schema.Schema,
constext: sd.CommandContext,
refdict: so.RefDict,
) -> bool:
object_type = self.get_source(schema)
assert isinstance(object_type, s_types.Type)
return not object_type.is_view(schema)
def get_schema_reflection_default(
self,
schema: s_schema.Schema,
) -> Optional[str]:
"""Return the default expression if this is a reflection of a
schema class field and the field has a defined default value.
"""
ptr = self.get_nearest_non_derived_parent(schema)
src = ptr.get_source(schema)
if src is None:
# This is an abstract pointer
return None
ptr_name = ptr.get_name(schema)
if ptr_name.module not in {'schema', 'sys', 'cfg'}:
# This isn't a reflection type
return None
if isinstance(src, Pointer):
# This is a link property
tgt = src.get_target(schema)
assert tgt is not None
schema_objtype = tgt
else:
assert isinstance(src, s_types.Type)
schema_objtype = src
assert isinstance(schema_objtype, so.QualifiedObject)
src_name = schema_objtype.get_name(schema)
mcls = so.ObjectMeta.get_schema_metaclass(src_name.name)
if mcls is None:
# This schema class is not (publicly) reflected.
return None
fname = ptr.get_shortname(schema).name
if not mcls.has_field(fname):
# This pointer is not a schema field.
return None
field = mcls.get_field(fname)
if not isinstance(field, so.SchemaField):
# Not a schema field, no default possible.
return None
f_default = field.default
if (
f_default is None
or f_default is so.NoDefault
):
# No explicit default value.
return None
tgt = ptr.get_target(schema)
if f_default is so.DEFAULT_CONSTRUCTOR:
if (
issubclass(
field.type,
(collections.abc.Set, collections.abc.Sequence),
)
and not issubclass(field.type, (str, bytes))
):
return f'<{tgt.get_displayname(schema)}>[]'
else:
return None
default = qlquote.quote_literal(json.dumps(f_default))
if tgt.is_enum(schema):
return f'<{tgt.get_displayname(schema)}><str>to_json({default})'
else:
return f'<{tgt.get_displayname(schema)}>to_json({default})'
class PseudoPointer(s_abc.Pointer):
# An abstract base class for pointer-like objects, i.e.
# pseudo-links used by the compiler to represent things like
# tuple and type intersection.
def is_tuple_indirection(self) -> bool:
return False
def is_type_intersection(self) -> bool:
return False
def get_bases(self, schema: s_schema.Schema) -> so.ObjectList[Pointer]:
return so.ObjectList.create(schema, [])
def get_ancestors(self, schema: s_schema.Schema) -> so.ObjectList[Pointer]:
return so.ObjectList.create(schema, [])
def get_name(self, schema: s_schema.Schema) -> str:
raise NotImplementedError
def get_shortname(self, schema: s_schema.Schema) -> str:
return self.get_name(schema)
def get_displayname(self, schema: s_schema.Schema) -> str:
return self.get_name(schema)
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return False
def get_required(self, schema: s_schema.Schema) -> bool:
return True
def get_cardinality(
self,
schema: s_schema.Schema
) -> qltypes.SchemaCardinality:
raise NotImplementedError
def get_path_id_name(self, schema: s_schema.Schema) -> str:
return self.get_name(schema)
def get_is_derived(self, schema: s_schema.Schema) -> bool:
return False
def get_is_local(self, schema: s_schema.Schema) -> bool:
return True
def get_union_of(
self,
schema: s_schema.Schema,
) -> None:
return None
def get_default(
self,
schema: s_schema.Schema,
) -> Optional[s_expr.Expression]:
return None
def get_expr(self, schema: s_schema.Schema) -> Optional[s_expr.Expression]:
return None
def get_source(self, schema: s_schema.Schema) -> so.Object:
raise NotImplementedError
def get_target(self, schema: s_schema.Schema) -> s_types.Type:
raise NotImplementedError
def get_near_endpoint(
self,
schema: s_schema.Schema,
direction: PointerDirection,
) -> so.Object:
if direction is PointerDirection.Outbound:
return self.get_source(schema)
else:
raise AssertionError(
f'inbound direction is not valid for {type(self)}'
)
def get_far_endpoint(
self,
schema: s_schema.Schema,
direction: PointerDirection,
) -> so.Object:
if direction is PointerDirection.Outbound:
return self.get_target(schema)
else:
raise AssertionError(
f'inbound direction is not valid for {type(self)}'
)
def is_link_property(self, schema: s_schema.Schema) -> bool:
return False
def generic(self, schema: s_schema.Schema) -> bool:
return False
def singular(
self,
schema: s_schema.Schema,
direction: PointerDirection = PointerDirection.Outbound,
) -> bool:
raise NotImplementedError
def scalar(self) -> bool:
raise NotImplementedError
def material_type(self, schema: s_schema.Schema) -> PseudoPointer:
return self
def is_pure_computable(self, schema: s_schema.Schema) -> bool:
return False
def is_exclusive(self, schema: s_schema.Schema) -> bool:
return False
def get_schema_reflection_default(
self,
schema: s_schema.Schema,
) -> Optional[str]:
return None
PointerLike = Union[Pointer, PseudoPointer]
class ComputableRef(so.Object):
"""A shell for a computed target type."""
expr: qlast.Expr
def __init__(self, expr: qlast.Base) -> None:
super().__init__(_private_init=True)
self.__dict__['expr'] = expr
class PointerCommandContext(sd.ObjectCommandContext[Pointer],
s_anno.AnnotationSubjectCommandContext):
pass
class PointerCommandOrFragment(
referencing.ReferencedObjectCommandBase[Pointer]
):
def resolve_refs(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().resolve_refs(schema, context)
target_ref = self.get_local_attribute_value('target')
if target_ref is not None:
srcctx = self.get_attribute_source_context('target')
if isinstance(target_ref, s_types.TypeExprShell):
cc_cmd = s_types.ensure_schema_type_expr_type(
schema,
target_ref,
parent_cmd=self,
src_context=srcctx,
context=context,
)
if cc_cmd is not None:
schema = cc_cmd.apply(schema, context)
if isinstance(target_ref, s_types.TypeShell):
try:
target = target_ref.resolve(schema)
except errors.InvalidReferenceError as e:
refname = target_ref.get_refname(schema)
if refname is not None:
utils.enrich_schema_lookup_error(
e,
refname,
modaliases=context.modaliases,
schema=schema,
item_type=s_types.Type,
context=srcctx,
)
raise
elif isinstance(target_ref, ComputableRef):
schema, target_t, base = self._parse_computable(
target_ref.expr, schema, context)
if base is not None:
self.set_attribute_value(
'bases', so.ObjectList.create(schema, [base]),
)
self.set_attribute_value(
'is_derived', True
)
if context.declarative:
self.set_attribute_value(
'declared_overloaded', True
)
target = target_t
else:
target = target_ref
self.set_attribute_value('target', target, source_context=srcctx)
return schema
def _parse_computable(
self,
expr: qlast.Base,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> Tuple[s_schema.Schema, s_types.Type, Optional[PointerLike]]:
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.schema import objtypes as s_objtypes
# "source" attribute is set automatically as a refdict back-attr
parent_ctx = self.get_referrer_context(context)
assert parent_ctx is not None
source_name = parent_ctx.op.classname
source = schema.get(source_name, type=s_objtypes.ObjectType)
expression = s_expr.Expression.compiled(
s_expr.Expression.from_ast(expr, schema, context.modaliases),
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
anchors={qlast.Source().name: source},
path_prefix_anchor=qlast.Source().name,
singletons=frozenset([source]),
),
)
assert isinstance(expression.irast, irast.Statement)
base = None
target = expression.irast.stype
result_expr = expression.irast.expr.expr
if (isinstance(result_expr, irast.SelectStmt)
and result_expr.result.rptr is not None):
expr_rptr = result_expr.result.rptr
while isinstance(expr_rptr, irast.TypeIntersectionPointer):
expr_rptr = expr_rptr.source.rptr
is_ptr_alias = (
expr_rptr.direction is PointerDirection.Outbound
)
if is_ptr_alias:
new_schema, base = irtyputils.ptrcls_from_ptrref(
expr_rptr.ptrref, schema=schema
)
# Only pointers coming from the same source as the
# alias should be "inherited" (in order to preserve
# link props). Random paths coming from other sources
# get treated same as any other arbitrary expression
# in a computable.
if base.get_source(new_schema) != source:
base = None
else:
schema = new_schema
self.set_attribute_value('expr', expression)
required, card = expression.irast.cardinality.to_schema_value()
spec_required = self.get_attribute_value('required')
spec_card = self.get_attribute_value('cardinality')
if spec_required and not required:
ptr_name = sn.shortname_from_fullname(
self.get_attribute_value('name')).name
srcctx = self.get_attribute_source_context('target')
raise errors.SchemaDefinitionError(
f'possibly an empty set returned by an '
f'expression for the computable '
f'{ptr_name!r} '
f"declared as 'required'",
context=srcctx
)
if (
spec_card in {None, qltypes.SchemaCardinality.ONE} and
card is not qltypes.SchemaCardinality.ONE
):
ptr_name = sn.shortname_from_fullname(
self.get_attribute_value('name')).name
srcctx = self.get_attribute_source_context('target')
raise errors.SchemaDefinitionError(
f'possibly more than one element returned by an '
f'expression for the computable '
f'{ptr_name!r} '
f"declared as 'single'",
context=srcctx
)
if spec_card is None:
self.set_attribute_value('cardinality', card)
if not spec_required:
self.set_attribute_value('required', required)
self.set_attribute_value('computable', True)
return schema, target, base
def _deparse_name(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
name: str,
) -> qlast.ObjectRef:
ref = super()._deparse_name(schema, context, name)
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return ref
else:
ref.module = ''
return ref
class PointerCommand(
referencing.ReferencedInheritingObjectCommand[Pointer],
constraints.ConsistencySubjectCommand[Pointer],
s_anno.AnnotationSubjectCommand,
PointerCommandOrFragment,
):
def _set_pointer_type(
self,
schema: s_schema.Schema,
astnode: qlast.CreateConcretePointer,
context: sd.CommandContext,
target_ref: Union[so.Object, so.ObjectShell],
) -> None:
return None
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._create_begin(schema, context)
if not context.canonical:
self._validate_pointer_def(schema, context)
return schema
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
if not context.canonical:
self._validate_pointer_def(schema, context)
return schema
def _validate_pointer_def(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
"""Check that pointer definition is sound."""
from edb.ir import ast as irast
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return
scls = self.scls
if not scls.get_is_local(schema):
return
default_expr = scls.get_default(schema)
if default_expr is not None:
if default_expr.irast is None:
default_expr = default_expr.compiled(default_expr, schema)
assert isinstance(default_expr.irast, irast.Statement)
default_type = default_expr.irast.stype
assert default_type is not None
ptr_target = scls.get_target(schema)
assert ptr_target is not None
source_context = self.get_attribute_source_context('default')
if not default_type.assignment_castable_to(ptr_target, schema):
raise errors.SchemaDefinitionError(
f'default expression is of invalid type: '
f'{default_type.get_displayname(schema)}, '
f'expected {ptr_target.get_displayname(schema)}',
context=source_context,
)
# "required" status of defaults should not be enforced
# because it's impossible to actually | |
in state %s"%(time.time()-start_t, ))
Trace.trace(self.trace_level+4,"process_write_request: next write volume returned %s" % (v,))
# volume clerk returned error
if v["status"][0] != e_errors.OK:
rq.ticket["reject_reason"] = (v["status"][0],v["status"][1])
if v['status'][0] == e_errors.BROKEN: # too many volumes set to NOACCESS
if self.lm_lock != e_errors.BROKEN:
Trace.alarm(e_errors.ERROR,"LM %s goes to %s state" %
(self.name, e_errors.BROKEN))
self.lm_lock = e_errors.BROKEN
return None, None
if v["status"][0] == e_errors.NOVOLUME or v["status"][0] == e_errors.QUOTAEXCEEDED:
if not self.process_for_bound_vol:
#if wr_en > rq.ticket["vc"]["file_family_width"]:
# if volume veto list is not empty then work can be done later after
# the tape is available again
if not vol_veto_list or v["status"][0] == e_errors.QUOTAEXCEEDED:
# remove this request and send regret to the client
rq.ticket['status'] = v['status']
self.send_regret(rq.ticket)
self.pending_work.delete(rq)
rq = None
else:
rq.ticket["status"] = v["status"]
#rq.ticket["reject_reason"] = (v["status"][0],v["status"][1])
self.continue_scan = 1
return rq, key_to_check
else:
rq.ticket["status"] = v["status"]
external_label = v["external_label"]
else:
external_label = self.process_for_bound_vol
# found a volume that has write work pending - return it
rq.ticket["fc"]["external_label"] = external_label
rq.ticket["fc"]["size"] = rq.ticket["wrapper"]["size_bytes"]
# request has passed about all the criterias
# check if it passes the fair share criteria
# temprorarily store selected request to use it in case
# when other request(s) based on fair share criteria
# for some other reason(s) do not get selected
# in any case if request SG limit is 0 and temporarily stored rq. SG limit is not,
# do not update temporarily stored rq.
rq_sg = volume_family.extract_storage_group(vol_family)
if (rq.ticket.get('ignore_fair_share', None)):
# do not count this request against fair share
# this is an automigration request
sg_limit = 0
else:
sg_limit = self.get_sg_limit(rq_sg)
self.postponed_requests.put(rq)
if self.tmp_rq:
if sg_limit != 0: # replace tmp_rq if rq SG limit is not 0
# replace tmp_rq based on priority
if rq.pri > self.tmp_rq.pri:
self.tmp_rq = rq
else: self.tmp_rq = rq
if self.sg_exceeded and self.process_for_bound_vol:
rq = None
self.continue_scan = 0
key_to_check = None
Trace.trace(self.trace_level+4, "process_write_request: returning %s %s"%(rq, key_to_check))
return rq, key_to_check
# is there any work for any volume?
def next_work_any_volume(self, requestor):
"""
Is there any work for any volume?
:type requestor: :obj:`dict`
:arg requestor: mover ticket
:rtype: :obj:`tuple` (:obj:`manage_queue.Request` - request or :obj:`None`,
:obj:`tuple` - (error, :obj:`str` or :obj:`None`) - status)
"""
Trace.trace(self.trace_level, "next_work_any_volume")
self.init_request_selection() # start request selection cycle
# The list of the active volumes.
# Read request for the file on the active volume
# can not get assigned to the idle mover.
active_vols = self.volumes_at_movers.active_volumes()
# look in pending work queue for reading or writing work
#rq=self.pending_work.get(active_volumes=active_vols)
rq=self._get_request(requestor, self.pending_work.get, active_volumes=active_vols)
Trace.trace(self.trace_level+10, "next_work_any_volume: RQ: %s"%(rq,))
while rq:
rej_reason = None
Trace.trace(self.trace_level+10, "next_work_any_volume: rq %s"%(rq.ticket,))
if rq.ticket.has_key('reject_reason'):
try:
rej_reason = rq.ticket['reject_reason'][0]
del(rq.ticket['reject_reason'])
except KeyError:
exc, msg, tb = sys.exc_info()
Trace.handle_error(exc, msg, tb)
Trace.trace(self.trace_level+10, "next_work_any_volume KeyError: rq %s"%(rq.ticket,))
continue
if rq.work == "read_from_hsm":
rq, key = self.process_read_request(rq, requestor)
Trace.trace(self.trace_level+41,"next_work_any_volume: process_read_request returned %s %s %s" % (rq, key,self.continue_scan))
if self.continue_scan:
if key:
#rq = self.pending_work.get(key, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
rq = self._get_request(requestor, self.pending_work.get, key, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
# if there are no more requests for a given volume
# rq will be None, but we do not want to stop here
if rq:
# continue check with current volume
continue
#rq = self.pending_work.get(next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts) # get next request
Trace.trace(self.trace_level+41,"next_work_any_volume: new rq %s" % (rq,))
continue
break
elif rq.work == "write_to_hsm":
rq, key = self.process_write_request(rq, requestor)
Trace.trace(self.trace_level+10,"next_work_any_volume: process_write_request returned %s %s %s" % (rq, key,self.continue_scan))
if self.continue_scan:
if key:
#rq = self.pending_work.get(key, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
rq = self._get_request(requestor, self.pending_work.get, key, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
# if there are no more requests for a given volume family
# rq will be None, but we do not want to stop here
if rq:
# continue check with current volume
continue
#rq = self.pending_work.get(next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts) # get next request
rq = self._get_request(requestor, self.pending_work.get, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts) # get next request
Trace.trace(self.trace_level+41,"next_work_any_volume: new rq %s" % (rq,))
continue
break
# alas, all I know about is reading and writing
else:
Trace.log(e_errors.ERROR,
"next_work_any_volume assertion error in next_work_any_volume %s"%(rq.ticket,))
raise AssertionError
Trace.trace(self.trace_level+41,"next_work_any_volume: continue")
#rq = self.pending_work.get(next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
rq = self._get_request(requestor, self.pending_work.get, next=1, active_volumes=active_vols, disabled_hosts=self.disabled_hosts)
if not rq or (rq.ticket.has_key('reject_reason') and rq.ticket['reject_reason'][0] == 'PURSUING'):
saved_rq = rq
# see if there is a temporary stored request
Trace.trace(self.trace_level+10,"next_work_any_volume: using exceeded mover limit request")
rq, self.postponed_sg = self.postponed_requests.get()
Trace.trace(self.trace_level+10,"next_work_any_volume: get from postponed %s"%(rq,))
if rq:
self.postponed_rq = 1 # request comes from postponed requests list
# check postponed request
if rq.work == "read_from_hsm":
rq, key = self.process_read_request(rq, requestor)
Trace.trace(self.trace_level+10,"next_work_any_volume: process_read_request for postponed returned %s %s" %
(rq, key))
elif rq.work == "write_to_hsm":
rq, key = self.process_write_request(rq, requestor)
Trace.trace(self.trace_level+10,"next_work_any_volume: process_write_request for postponed returned %s %s" %
(rq, key))
else:
if saved_rq:
rq = saved_rq
if rq.ticket.has_key('reject_reason'):
del rq.ticket['reject_reason']
Trace.trace(self.trace_level+10,"next_work_any_volume: proceed with rejected %s"%(rq,))
elif self.tmp_rq:
rq = self.tmp_rq
Trace.trace(self.trace_level+10,"next_work_any_volume: get from tmp_rq %s"%(rq,))
if rq.work == "write_to_hsm":
rq, key = self.process_write_request(rq, requestor)
Trace.trace(self.trace_level+10, "next_work_any_volume: tmp_rq %s %s"%(rq.ticket, key))
# check if this volume is ok to work with
if rq:
w = rq.ticket
if w["status"][0] == e_errors.OK:
if self.mover_type(requestor) == 'DiskMover':
key = 'vc' if (w['work'] == 'read_from_hsm') else 'fc'
label = w[key]['external_label']
ret = self.is_disk_vol_available(rq.work,label, requestor)
else:
fsize = w['wrapper'].get('size_bytes', 0L)
method = w.get('method', None)
if method and method != "read_tape_start":
# size has a meaning only for general rq
fsize = fsize+self.min_file_size
try:
start_t=time.time()
ret = self.is_vol_available(rq.work,
w['fc']['external_label'],
w["vc"]["volume_family"],
fsize,
w['vc']['address'],
mover = requestor.get('mover'),
override_notallowed = bool(w.get("override_notallowed",0)))
Trace.trace(100, "next_work_any_volume: vcc.is_vol_available, time in state %s"%(time.time()-start_t, ))
except KeyError, msg:
ret = w
ret['status'] = (e_errors.ERROR, "KeyError")
Trace.log(e_errors.ERROR, "Keyerror calling is_vol_available %s %s"%(w, msg))
return (None, (e_errors.NOWORK, None))
if ret['status'][0] != e_errors.OK:
if ret['status'][0] == e_errors.BROKEN:
if self.lm_lock != e_errors.BROKEN:
Trace.alarm(e_errors.ERROR,"LM %s goes to %s state" %
(self.name, e_errors.BROKEN))
self.lm_lock = e_errors.BROKEN
return None, (e_errors.NOWORK, None)
Trace.trace(self.trace_level+10,"next_work_any_volume: work can not be done at this volume %s"%(ret,))
#w['status'] = ret['status']
if not (ret['status'][0] == e_errors.VOL_SET_TO_FULL or
ret['status'][0] == 'full' or
ret['status'][0] == e_errors.MEDIA_IN_ANOTHER_DEVICE):
w['status'] = ret['status']
self.pending_work.delete(rq)
self.send_regret(w)
Trace.log(e_errors.ERROR,
"next_work_any_volume: cannot do the work for %s status:%s" %
(rq.ticket['fc']['external_label'], rq.ticket['status'][0]))
return (None, (e_errors.NOWORK, None))
else:
if (w['work'] == 'write_to_hsm' and
(w['status'][0] == e_errors.VOL_SET_TO_FULL or
w['status'][0] == 'full')):
return None, (e_errors.NOWORK, None)
return (rq, rq.ticket['status'])
return (None, (e_errors.NOWORK, None))
def schedule(self, mover):
"""
What is next on our list of work?
:type mover: :obj:`dict`
:arg mover: mover ticket
:rtype: :obj:`tuple` (:obj:`manage_queue.Request` - request or :obj:`None`,
:obj:`tuple` - (error, :obj:`str` or :obj:`None`) - status)
"""
while 1:
rq, status = self.next_work_any_volume(mover)
if (status[0] == e_errors.OK or
status[0] == e_errors.NOWORK):
if rq and rq.ticket.has_key('reject_reason') and rq.ticket['reject_reason'][0] == "RESTRICTED_ACCESS":
Trace.trace(self.trace_level, "schedule: This request should not get here %s"%(rq,))
status = (e_errors.NOWORK, None)
rq = None
return rq, status
# some sort of error, like write work and no volume available
# so bounce. status is already bad...
self.pending_work.delete(rq)
self.send_regret(rq.ticket)
Trace.log(e_errors.INFO,"schedule: Error detected %s" % (rq.ticket,))
return None, status
def check_write_request(self, external_label, rq, requestor):
"""
Check if write request can be sent to the mover.
:type external_label: :obj:`str`
:arg external_label: label of the volume to check
:type rq: :obj:`manage_queue.Request`
:arg rq: request to process
:type requestor: :obj:`dict`
:arg requestor: mover ticket
:rtype: :obj:`tuple` (:obj:`manage_queue.Request` - request or :obj:`None`,
:obj:`str` - key to check next or :obj:`None`)
"""
Trace.trace(self.trace_level, "check_write_request: label %s rq %s requestor %s"%
(external_label, rq, requestor))
if self.mover_type(requestor) == 'DiskMover':
ret = self.is_disk_vol_available(rq.work, external_label, requestor)
else:
vol_veto_list, wr_en = self.busy_volumes(rq.ticket['vc']['volume_family'])
Trace.trace(self.trace_level+11, "check_write_request: vet_list %s wr_en %s"%(vol_veto_list, wr_en))
label = rq.ticket['fc'].get('external_label', external_label)
if label != external_label:
# this is a case with admin pri
# process it carefuly
# check if tape is already mounted somewhere
if label in vol_veto_list:
rq.ticket["reject_reason"] = ("VOLS_IN_WORK","")
Trace.trace(self.trace_level+11, "check_write_request: request for volume %s rejected %s Mounted somwhere else"%
(external_label, rq.ticket["reject_reason"]))
rq.ticket['status'] = ("VOLS_IN_WORK",None)
return rq, rq.ticket['status']
external_label = label
Trace.trace(self.trace_level+11, "check_write_request %s %s"%(external_label, rq.ticket))
if wr_en >= rq.ticket["vc"]["file_family_width"]:
if (not external_label in vol_veto_list) and (wr_en > rq.ticket["vc"]["file_family_width"]):
| |
#!/usr/bin/python
#-*-coding: utf-8 -*-
'''
Axile -- Outil de conception/simulation de parapentes Nervures
Classe ParamGeneraux
@author: <NAME>
@copyright: 2013 Nervures. All rights reserved.
@license: LGPL
@contact: <EMAIL>
@deffield creation: 08 Jan 2013
__updated__ = "2019-02-06"
'''
import sys, os
from spleen.utilitaires.utilitairesdivers import debug, rdebug, stack
from spleen.preferences import ProfilPrefs
class ProfsParamNew(object):
u'''
Attributs :
---------
* Modifiables :
self.nptprof => nb de points profil
self.iouvext, self.iouvint => index ouverture
self.iba => index BA = le point le plus éloigné du BF
self.nbpbf => nb de pts réservés au BF pour pinces
self.pourcentbf => % de corde au BF occupé par les nbpbf points réservés
* NON modifiables : les autres
self.nptint => nb points intrados
self.nptext => nb points extrados
self.nptret => nb points retour Y COMPRIS le point BA (BA -> iouvext)
self.copy (property) fournit un clone de self.
Méthodes & exemples :
-------------------
>>> pp1 = ProfsParam(10, 6, 7)
>>> pp2 = ProfsParam(11, 5, 7)
>>> pp1 == pp2 #tester si deux profparam sont identiques
False
>>> pp1 != pp2 #tester si deux profparam sont différents
True
>>> print pp1
nb points [ext+int] : 16=[10+6]; iouverture=(9,10); iba=7, nb points retour=3
'''
# def __init__(self, npe=0, npi=0, iba=0):
def __init__(self, nptprof=0, iouvext=0, iouvint=0, iba=0):
"""Suivant la date de création du projet, et l'endroit d'ou il est invoqué,
l'instanciation peut être faite de différentes manières :
- ProfsParamNew(pp) ou pp peut être
* un dict {'nptprof':nptprof, 'iouvext':iouvext, 'iouvint':iouvint, 'iba':iba}
* une tuple pp=(nptprof, iouvext, iouvint, iba)
* une liste pp=[nptprof, iouvext, iouvint, iba]
* un str ou unicode ou QString
- ProfsParamNew(nptprof, [iouvext, [iouvint, [iba]]])
"""
raise TypeError("Interdit d'instancier un ProfParamNew")
if isinstance(nptprof, (tuple,list)):
raise TypeError('Attention, il faut probablement ecrire pp=ProfsParamNew(*liste), au lieu de pp=ProfsParamNew(liste)')
elif isinstance(nptprof, (ProfsParam, ProfsParamNew)) :
raise TypeError('Attention, il faut probablement ecrire pp=x, au lieu de pp=ProfsParamNew(x)')
elif isinstance(nptprof, dict) :
raise TypeError('Attention, il faut probablement ecrire pp=ProfsParamNew(**x), au lieu de pp=ProfsParamNew(x)')
# elif isinstance(nptprof, (str, QString, unicode)) :
# raise TypeError('Attention, il faut probablement ecrire pp=ProfsParamNew(**x), au lieu de pp=ProfsParamNew(*x)')
self._nptprof = nptprof
self._iba = iba
self.iouverture = ((iouvext,iouvint))
# self._iouvext = iouvext
# self._iouvint = iouvint
def __eq__(self, other):
return self.nptprof == other.nptprof and\
self.iouvext == other.iouvext and\
self.iouvint == other.iouvint and\
self.iba==other.iba
def __ne__(self, other):
return not self.__eq__(other)
def castToProfsParam1(self):
""""""
return ProfsParam1(self.nptprof, self.iouvext, self.iouvint, self.iba,
ProfilPrefs.nbpbf, ProfilPrefs.pourcentbf)
def isSuspect(self):
suspect = [
self.nptext <= 0,
self.nptint <= 0,
self.iouvint <= 0,
self.iouvext <= 0,
self.nptret <= 0,#ouverture a l'extrados
self.nptouv < 0,#recouvrement
self.iouvint < self.iouvext,#recouvrement
self.nptouv > self.nptprof,
self.iba >= self.nptprof,
self.iba <= 0
]
s = False
for k, v in enumerate(suspect) :
s = s or v
return s
def isForbiden(self):
forbiden = [
self.nptext <= 0,
self.nptint <= 0,
self.nptouv < 0,
self.iouvint <= 0,
self.iouvext <= 0,
self.nptret <= 0,
self.iba <= 0,
self.iouvint < self.iouvext,
self.nptouv > self.nptprof,
self.iba >= self.nptprof,
]
f = False
for v in forbiden :
f = f or v
return f
@property
def nptext(self):
return self.iouvext + 1
@property
def nptint(self):
return self.nptprof - self.iouvint
@property
def nptexttheorique(self):
return self.iba + 1
@property
def nptinttheorique(self):
return self.nptprof - self.iba
@property
def nptouv(self):#nb de points dans l'ouverture NON compris les bouts
return self.nptprof - self.nptext - self.nptint
@property
def nptret(self):#nb points retour, Y COMPRIS le point BA et ouvext
return self.iouvext - self.iba + 1
@property
def iouverture(self):
return self.iouvext, self.iouvint
@iouverture.setter
def iouverture(self, (ie, ii)):
u"""
Afin d'éviter les erreurs, on ne peut modifier les valeurs de iouvext et iouvint que via
>>> iouverture = (ke, ki)
"""
# debug( iouvext=ie, iouvint=ii)
if ie < self.iba or ii < self.iba :
rdebug('Attention, ouverture (partiellement) sur l\'extrados, l\'echantillonnage est impossible (TODO)')
self._iouvext, self._iouvint = ie, ii
elif 0 <= ie <= ii <= self.nptprof :
self._iouvext, self._iouvint = ie, ii
else:
rdebug('Valeurs incorrectes, on devrait avoir 0 <= iouvext(=%d) <= iouvint(=%d) <= nptprof(=%d)'%(ie, ii, self.nptprof))
self._iouvext, self._iouvint = ie, ii
# raise ValueError('Valeurs incorrectes, on devrait avoir 0 <= iouvext(=%d) <= iouvint(=%d) <= nptprof(=%d)'%(ie, ii, self.nptprof))
@property
def iouvext(self):
return self._iouvext
@iouvext.setter
def iouvext(self, k):
raise NotImplementedError("On ne peut pas modifier iouvext seul.\
On doit modifier les deux points de l'ouverture en utilisant 'iouverture = (ke, ki)'.")
@property
def iouvint(self):
return self._iouvint
@iouvint.setter
def iouvint(self, k):
raise NotImplementedError("On ne peut pas modifier iouvint seul.\
On doit modifier les deux points de l'ouverture en utilisant 'iouverture = (ke, ki)'.")
@property
def nptprof(self):
return self._nptprof
@nptprof.setter
def nptprof(self, npt):
raise ValueError("c'est ici !!! : npt=%d"%npt)
@property
def iba(self):
# rdebug( "BUG : iba peut varier d'un profil a l'autre")
return self._iba
@iba.setter
def iba(self, k):
self._iba = k
# raise NotImplementedError
############################################################
@property
def copy(self):
return ProfsParamNew(self.nptprof, self.iouvext, self.iouvint, self.iba)
@property
def info(self):
return ['<%s>'%self.__class__.__name__,
' nb points profil = %d'%(self.nptprof),
' nb points extrados tissu = %d'%(self.nptext),
' nb points extrados theorique = %d'%(self.nptexttheorique),
' indice BA = %d'%(self.iba),
' nb points retour = %d'%(self.nptret),
' indices ouverture = (%d,%d)'%self.iouverture,
' nb points ouverture = %d'%(self.nptouv),
' nb points intrados tissu = %d'%(self.nptint),
' nb points intrados theorique = %d'%(self.nptinttheorique),
]
def shortinfo(self):
# return ' <%s> [nptprof=%d, iouverture=%s, iba=%d]'%(self.__class__.__name__, self.nptprof, str(self.iouverture), self.iba, )
return ' [nptprof=%d, iouverture=%s, iba=%d]'%(self.nptprof, str(self.iouverture), self.iba, )
def __str__(self):
return self.shortinfo()
return '\n'.join(self.info)
@property
def dump(self):
return self.toDump()
def toDump(self):
todump = {'nptprof': self.nptprof, 'iouvext':self.iouvext,
'iouvint':self.iouvint, 'iba':self.iba}
return todump
def load(self,dump):
self.__init__(**dump)
class ProfsParam1(object):
u'''
Attributs :
---------
* Modifiables :
self.nptprof => nb de points profil
self.iouvext, self.iouvint => index ouverture
self.iba => index BA = le point le plus éloigné du BF
self.nbpbf => nb de pts réservés au BF pour pinces
self.pourcentbf => % de corde au BF occupé par les nbpbf points réservés
* NON modifiables : les autres
self.nptint => nb points intrados
self.nptext => nb points extrados
self.nptret => nb points retour Y COMPRIS le point BA (BA -> iouvext)
self.copy (property) fournit un clone de self.
Méthodes & exemples :
-------------------
>>> pp1 = ProfsParam1(10, 6, 7)
>>> pp2 = ProfsParam1(11, 5, 7)
>>> pp1 == pp2 #tester si deux profparam sont identiques
False
>>> pp1 != pp2 #tester si deux profparam sont différents
True
>>> print pp1
nb points [ext+int] : 16=[10+6]; iouverture=(9,10); iba=7, nb points retour=3
'''
# def __init__(self, npe=0, npi=0, iba=0):
def __init__(self, nptprof=0, iouvext=0, iouvint=0, iba=0,
nbpbf=0, pourcentbf=0.0):
"""Suivant la date de création du projet, et l'endroit d'ou il est invoqué,
l'instanciation peut être faite de différentes manières :
- ProfsParam1(pp) ou pp peut être
* un dict {'nptprof':nptprof, 'iouvext':iouvext, 'iouvint':iouvint,
'iba':iba, 'nbpbf':nbpbf, 'pourcentbf':pourcentbf}
* une tuple pp=(nptprof, iouvext, iouvint, iba, [nbpbf, [pourcentbf]])
* une liste pp=[nptprof, iouvext, iouvint, iba, [nbpbf, [pourcentbf]]]
* un str ou unicode ou QString
- ProfsParam1(nptprof, [iouvext, [iouvint, [iba, [nbpbf, [pourcentbf]]]]])
"""
if isinstance(nptprof, (tuple,list)):
raise TypeError('Attention, il faut probablement ecrire pp=ProfsParam1(*liste), au lieu de pp=ProfsParam1(liste)')
elif isinstance(nptprof, (ProfsParam, ProfsParam1)) :
raise TypeError('Attention, il faut probablement ecrire pp=x, au lieu de pp=ProfsParam1(x)')
elif isinstance(nptprof, dict) :
raise TypeError('Attention, il faut probablement ecrire pp=ProfsParam1(**x), au lieu de pp=ProfsParam1(x)')
# elif isinstance(nptprof, (str, QString, unicode)) :
# raise TypeError('Attention, il faut probablement ecrire pp=ProfsParam1(**x), au lieu de pp=ProfsParam1(*x)')
self._nptprof = nptprof
self._iba = iba
self.iouverture = ((iouvext,iouvint))
try :
self.nbpbf = nbpbf
except AttributeError :
self.nbpbf = ProfilPrefs.nbpbf
try :
self.pourcentbf = pourcentbf
except AttributeError :
self.pourcentbf = ProfilPrefs.pourcentbf
# self._iouvext = iouvext
# self._iouvint = iouvint
def __eq__(self, other):
return self.nptprof == other.nptprof and\
self.iouvext == other.iouvext and\
self.iouvint == other.iouvint and\
self.iba == other.iba and\
self.nbpbf == other.nbpbf and\
self.pourcentbf == other.pourcentbf
def __ne__(self, other):
return not self.__eq__(other)
def castToProfsParam1(self):
"""pour compatibilite ascendante"""
return self
def isSuspect(self):
suspect = [
self.nptext <= 0,
self.nptint <= 0,
self.iouvint <= 0,
self.iouvext <= 0,
self.nptret <= 0,#ouverture a l'extrados
self.nptouv < 0,#recouvrement
self.iouvint < self.iouvext,#recouvrement
self.nptouv > self.nptprof,
self.iba >= self.nptprof,
self.iba <= 0,
self.nbpbf >= self.nptext,
self.nbpbf >= self.nptint
]
msgs = [
'nptext=%-2d <= 0'%self.nptext,
'nptint=%-2d <= 0'%self.nptint,
'iouvint=%-2d <= 0'%self.iouvint,
'iouvext=%-2d | |
<reponame>rboixaderg/guillotina
from collections import namedtuple
from guillotina import configure
from guillotina import schema
from guillotina.component import get_adapter
from guillotina.component import query_adapter
from guillotina.exceptions import ValueDeserializationError
from guillotina.fields.interfaces import IPatchField
from guillotina.fields.interfaces import IPatchFieldOperation
from guillotina.interfaces import IJSONToValue
from guillotina.schema.interfaces import IArrayJSONField
from guillotina.schema.interfaces import IDict
from guillotina.schema.interfaces import IInt
from guillotina.schema.interfaces import IList
from guillotina.schema.interfaces import IObjectJSONField
from guillotina.schema.interfaces import ITuple
from guillotina.utils import apply_coroutine
from zope.interface import implementer
@implementer(IPatchField)
class PatchField(schema.Field):
operation_type = IPatchFieldOperation
def __init__(self, field, max_ops=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field = field
self._bound_field = kwargs.pop("bound_field", None)
self.required = field.required
self.max_ops = self.field.max_ops = max_ops
@property
def bound_field(self):
if self._bound_field is None:
bound = self.field.bind(self.field.context)
bound.__name__ = self.__name__
return bound
return self._bound_field
async def set(self, obj, value):
self.field.__name__ = self.__name__
await apply_coroutine(self.bound_field.set, obj, value)
obj.register()
def bind(self, object):
bound = super().bind(object)
bound.field = self.field
bound._bound_field = self.field.bind(object)
bound._bound_field.__name__ = self.__name__
return bound
def validate(self, value):
return self.bound_field.validate(value)
@configure.value_deserializer(IPatchField)
def field_converter(field, value, context):
field.field.__name__ = field.__name__
if isinstance(value, dict) and "op" in value:
if not isinstance(value, dict):
raise ValueDeserializationError(field, value, "Not an object")
operation_name = value.get("op", "undefined")
if operation_name == "multi":
operation = query_adapter(field, field.operation_type, name=operation_name)
if operation is None:
raise ValueDeserializationError(field, value, f'"{operation_name}" not a valid operation')
value = operation(context, value.get("value"))
else:
bound_field = field.field.bind(context)
operation = query_adapter(bound_field, field.operation_type, name=operation_name)
if operation is None:
raise ValueDeserializationError(field, value, f'"{operation_name}" not a valid operation')
value = operation(context, value.get("value"))
elif isinstance(value, (dict, list)):
value = get_adapter(field.field, IJSONToValue, args=[value, context])
return value
@configure.adapter(for_=IPatchField, provides=IPatchFieldOperation, name="multi")
class MultiPatch:
def __init__(self, field):
super().__init__()
self.field = field
def __call__(self, context, value):
if self.field.max_ops and len(value) > self.field.max_ops:
raise ValueDeserializationError(
self.field, value, f"Exceeded max allowed operations for field: {self.field.max_ops}"
)
bound_field = self.field.field.bind(context)
resulting_value = None
for op in value:
if not isinstance(op, dict) or "op" not in op:
raise ValueDeserializationError(self.field, value, f"{op} not a valid operation")
resulting_value = field_converter(self.field, op, context)
bound_field.set(context, resulting_value)
return resulting_value
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="append")
class PatchListAppend:
def __init__(self, field):
super().__init__()
self.field = field
def get_value(self, value, existing=None, field_type=None):
if field_type is None:
if self.field.value_type:
field_type = self.field.value_type
if field_type:
field_type.__name__ = self.field.__name__
# for sub objects, we need to assign temp object type
# to work with json schema correctly
valid_type = namedtuple("temp_assign_type", [self.field.__name__])
ob = valid_type(**{field_type.__name__: existing})
value = get_adapter(field_type, IJSONToValue, args=[value, ob])
return value
def do_operation(self, existing, value):
existing.append(value)
return existing
def __call__(self, context, value):
value = self.get_value(value, None)
if self.field.value_type:
self.field.value_type.validate(value)
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or []
return self.do_operation(existing, value)
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="appendunique")
class PatchListAppendUnique(PatchListAppend):
def do_operation(self, existing, value):
if value not in existing:
existing.append(value)
return existing
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="clear")
class PatchListClear(PatchListAppend):
def __call__(self, context, value):
return []
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="append")
class PatchTupleAppend(PatchListAppend):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="appendunique")
class PatchTupleAppendUnique(PatchListAppendUnique):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="clear")
class PatchTupleClear(PatchListClear):
def __call__(self, context, value):
return ()
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="extend")
class PatchListExtend(PatchListAppend):
def do_operation(self, existing, value):
existing.extend(value)
return existing
def __call__(self, context, value):
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or []
if not isinstance(value, list): # pragma: no cover
raise ValueDeserializationError(self.field, value, "Not valid list")
if self.field.max_ops and len(value) > self.field.max_ops:
raise ValueDeserializationError(
self.field, value, f"Exceeded max allowed operations for field: {self.field.max_ops}"
)
values = []
for item in value:
if self.field.value_type:
item_value = self.get_value(item, None, field_type=self.field.value_type)
self.field.value_type.validate(item_value)
values.append(item_value)
return self.do_operation(existing, values)
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="extendunique")
class PatchListExtendUnique(PatchListExtend):
def do_operation(self, existing, value):
for item in value:
if item not in existing:
existing.append(item)
return existing
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="extend")
class PatchTupleExtend(PatchListExtend):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="extendunique")
class PatchTupleExtendUnique(PatchListExtendUnique):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="del")
class PatchListDel(PatchListAppend):
def do_operation(self, existing, value):
try:
del existing[value]
except (IndexError, TypeError): # pragma: no cover
raise ValueDeserializationError(self.field, value, "Not valid index value")
return existing
def __call__(self, context, value):
existing = self.field.query(context) or {}
return self.do_operation(existing, value)
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="del")
class PatchTupleDel(PatchListDel):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="remove")
class PatchListRemove(PatchListAppend):
def do_operation(self, existing, value):
try:
existing.remove(value)
except ValueError:
raise ValueDeserializationError(self.field, value, "{} not in value".format(value))
return existing
def __call__(self, context, value):
existing = self.field.query(context) or {}
return self.do_operation(existing, value)
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="remove")
class PatchTupleRemove(PatchListRemove):
def do_operation(self, existing, value):
return tuple(super().do_operation(list(existing), value))
@configure.adapter(for_=IList, provides=IPatchFieldOperation, name="update")
class PatchListUpdate(PatchListAppend):
def do_operation(self, existing, index, result_value):
existing[index] = result_value
return existing
def __call__(self, context, value):
if "index" not in value or "value" not in value:
raise ValueDeserializationError(self.field, value, "Not valid patch value")
existing = self.field.query(context) or {}
try:
existing_item = existing[value["index"]]
except IndexError:
existing_item = None
result_value = self.get_value(value["value"], existing_item)
if self.field.value_type:
self.field.value_type.validate(result_value)
return self.do_operation(existing, value["index"], result_value)
@configure.adapter(for_=ITuple, provides=IPatchFieldOperation, name="update")
class PatchTupleUpdate(PatchListUpdate):
def do_operation(self, existing, index, result_value):
return tuple(super().do_operation(list(existing), index, result_value))
@configure.adapter(for_=IDict, provides=IPatchFieldOperation, name="assign")
class PatchDictSet(PatchListAppend):
def __call__(self, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(self.field, value, "Not valid patch value")
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or {}
existing_item = existing.get(value["key"])
new_value = self.get_value(value["value"], existing_item)
if self.field.key_type:
self.field.key_type.validate(value["key"])
if self.field.value_type:
self.field.value_type.validate(new_value)
existing[value["key"]] = new_value
return existing
@configure.adapter(for_=IDict, provides=IPatchFieldOperation, name="update")
class PatchDictUpdate(PatchListAppend):
def __call__(self, context, value):
if not isinstance(value, list):
raise ValueDeserializationError(
self.field, value, f"Invalid type patch data, must be list of updates"
)
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or {}
if self.field.max_ops and len(value) > self.field.max_ops:
raise ValueDeserializationError(
self.field, value, f"Exceeded max allowed operations for field: {self.field.max_ops}"
)
for item in value:
if "key" not in item or "value" not in item:
raise ValueDeserializationError(self.field, value, "Not valid patch value")
existing_item = existing.get(item["key"])
new_value = self.get_value(item["value"], existing_item)
if self.field.key_type:
self.field.key_type.validate(item["key"])
if self.field.value_type:
self.field.value_type.validate(new_value)
existing[item["key"]] = new_value
return existing
@configure.adapter(for_=IDict, provides=IPatchFieldOperation, name="del")
class PatchDictDel(PatchListAppend):
def __call__(self, context, value):
if self.field.key_type:
self.field.key_type.validate(value)
existing = self.field.query(context)
try:
del existing[value]
except (IndexError, KeyError, TypeError):
raise ValueDeserializationError(self.field, value, "Not valid index value")
return existing
@configure.adapter(for_=IDict, provides=IPatchFieldOperation, name="clear")
class PatchDictClear(PatchListAppend):
def __call__(self, context, value):
return {}
class BasePatchIntOperation:
def __init__(self, field):
super().__init__()
self.field = field
@configure.adapter(for_=IInt, provides=IPatchFieldOperation, name="inc")
class PatchIntIncrement(BasePatchIntOperation):
def __call__(self, context, value):
if value:
self.field.validate(value)
# Increment one by default
to_increment = value or 1
existing = self.field.query(context)
if existing is None:
# Get default value or assume 0
existing = self.field.default or 0
return existing + to_increment
@configure.adapter(for_=IInt, provides=IPatchFieldOperation, name="dec")
class PatchIntDecrement(BasePatchIntOperation):
def __call__(self, context, value):
if value:
self.field.validate(value)
# Decrement one by default
to_decrement = value or 1
existing = self.field.query(context)
if existing is None:
# Get default value or assume 0
existing = self.field.default or 0
return existing - to_decrement
@configure.adapter(for_=IInt, provides=IPatchFieldOperation, name="reset")
class PatchIntReset(BasePatchIntOperation):
def __call__(self, context, value):
# This will reset to the passed value or to the field's
# default (if set) or 0.
if value:
self.field.validate(value)
return value or self.field.default or 0
@configure.adapter(for_=IArrayJSONField, provides=IPatchFieldOperation, name="append")
class PatchJSONArrayFieldAppend(PatchListAppend):
def do_operation(self, existing, value):
existing.append(value)
return existing
def __call__(self, context, value):
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or []
return self.do_operation(existing, value)
@configure.adapter(for_=IArrayJSONField, provides=IPatchFieldOperation, name="appendunique")
class PatchJSONAppendUnique(PatchJSONArrayFieldAppend):
def do_operation(self, existing, value):
if value not in existing:
existing.append(value)
return existing
@configure.adapter(for_=IArrayJSONField, provides=IPatchFieldOperation, name="clear")
class PatchJSONArrayClear(PatchJSONArrayFieldAppend):
def __call__(self, context, value):
return []
@configure.adapter(for_=IObjectJSONField, provides=IPatchFieldOperation, name="clear")
class PatchJSONObjectClear(PatchJSONArrayFieldAppend):
def __call__(self, context, value):
return {}
@configure.adapter(for_=IArrayJSONField, provides=IPatchFieldOperation, name="extend")
class PatchJSONExtend(PatchJSONArrayFieldAppend):
def do_operation(self, existing, value):
existing.extend(value)
return existing
def __call__(self, context, value):
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or []
if not isinstance(value, list): # pragma: no cover
raise ValueDeserializationError(self.field, value, "Not valid list")
if self.field.max_ops and len(value) > self.field.max_ops:
raise ValueDeserializationError(
self.field, value, f"Exceeded max allowed operations for field: {self.field.max_ops}"
)
return self.do_operation(existing, value)
@configure.adapter(for_=IArrayJSONField, provides=IPatchFieldOperation, name="assign")
class PatchJSONAssign(PatchJSONArrayFieldAppend):
def __call__(self, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(self.field, value, "Not valid patch value")
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or {}
existing[value["key"]] = value["value"]
return existing
@configure.adapter(for_=IObjectJSONField, provides=IPatchFieldOperation, name="assign")
class PatchJSONObjetAssign(PatchJSONArrayFieldAppend):
def __call__(self, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(self.field, value, "Not valid patch value")
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or {}
existing[value["key"]] = value["value"]
return existing
@configure.adapter(for_=IObjectJSONField, provides=IPatchFieldOperation, name="update")
class PatchJSONObjetUpdate(PatchJSONObjetAssign):
def __call__(self, context, value):
if not isinstance(value, list):
raise ValueDeserializationError(
self.field, value, f"Invalid type patch data, must be list of updates"
)
existing = self.field.query(context)
if existing is None:
existing = self.field.missing_value or {}
if self.field.max_ops and len(value) > self.field.max_ops:
raise ValueDeserializationError(
self.field, value, | |
#end def read_text
def write_text(self):
c=''
if self.filetype=='xsf': # only write structure/datagrid if present
if self.periodicity=='molecule' and 'elem' in self:
c += self.write_coord()
elif 'primvec' in self:
c += ' {0}\n'.format(self.periodicity.upper())
c += self.write_vec('primvec',self.primvec)
if 'convvec' in self:
c += self.write_vec('convvec',self.convvec)
#end if
if 'elem' in self:
c+= self.write_coord()
#end if
#end if
if 'data' in self:
c += self.write_data()
#end if
elif self.filetype=='axsf': # only write image structures
c += ' ANIMSTEPS {0}\n'.format(self.animsteps)
if self.periodicity!='molecule':
c += ' {0}\n'.format(self.periodicity.upper())
#end if
if 'primvec' in self:
c += self.write_vec('primvec',self.primvec)
#end if
if 'convvec' in self:
c += self.write_vec('convvec',self.convvec)
#end if
for i in range(1,len(self.images)+1):
image = self.images[i]
if 'primvec' in image:
c += self.write_vec('primvec',image.primvec,i)
#end if
if 'convvec' in image:
c += self.write_vec('convvec',image.convvec,i)
#end if
c += self.write_coord(image,i)
#end for
elif self.filetype=='bxsf': # only write bandgrid
c += self.write_band()
#end if
return c
#end def write_text
def write_coord(self,image=None,index=''):
if image is None:
s = self
else:
s = image
#end if
c = ''
if self.periodicity=='molecule':
c += ' ATOMS {0}\n'.format(index)
else:
c += ' PRIMCOORD {0}\n'.format(index)
c += ' {0} 1\n'.format(len(s.elem))
if not 'force' in s:
for i in range(len(s.elem)):
r = s.pos[i]
c += ' {0:>3} {1:12.8f} {2:12.8f} {3:12.8f}\n'.format(s.elem[i],r[0],r[1],r[2])
#end for
else:
for i in range(len(s.elem)):
r = s.pos[i]
f = s.force[i]
c += ' {0:>3} {1:12.8f} {2:12.8f} {3:12.8f} {4:12.8f} {5:12.8f} {6:12.8f}\n'.format(s.elem[i],r[0],r[1],r[2],f[0],f[1],f[2])
#end for
#end if
return c
#end def write_coord
def write_vec(self,name,vec,index=''):
c = ' {0} {1}\n'.format(name.upper(),index)
for v in vec:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(v[0],v[1],v[2])
#end for
return c
#end def write_vec
def write_data(self):
c = ''
ncols = 4
data = self.data
for d in sorted(data.keys()):
bdg_xd = data[d] # all block datagrids 2 or 3 D
for bdgk in sorted(bdg_xd.keys()):
c += ' BEGIN_BLOCK_DATAGRID_{0}D\n'.format(d)
c += ' {0}\n'.format(bdgk)
bdg = bdg_xd[bdgk] # single named block data grid
for dgk in sorted(bdg.keys()):
c += ' BEGIN_DATAGRID_{0}D_{1}\n'.format(d,dgk)
dg = bdg[dgk] # single named data grid
if d==2:
c += ' {0} {1}\n'.format(*dg.grid)
elif d==3:
c += ' {0} {1} {2}\n'.format(*dg.grid)
#end if
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*dg.corner)
for v in dg.cell:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*v)
#end for
c = c[:-1]
n=0
for v in dg.values.ravel():
if n%ncols==0:
c += '\n '
#end if
c += ' {0:12.8f}'.format(v)
n+=1
#end for
c += '\n END_DATAGRID_{0}D_{1}\n'.format(d,dgk)
#end for
c += ' END_BLOCK_DATAGRID_{0}D\n'.format(d)
#end for
#end for
return c
#end def write_data
def write_band(self):
c = ''
ncols = 4
band = self.band
for d in sorted(band.keys()):
bdg_xd = band[d] # all block bandgrids 2 or 3 D
for bdgk in sorted(bdg_xd.keys()):
c += ' BEGIN_BLOCK_BANDGRID_{0}D\n'.format(d)
c += ' {0}\n'.format(bdgk)
bdg = bdg_xd[bdgk] # single named block band grid
for dgk in sorted(bdg.keys()):
c += ' BEGIN_BANDGRID_{0}D_{1}\n'.format(d,dgk)
dg = bdg[dgk] # single named band grid
if d==2:
c += ' {0} {1}\n'.format(*dg.grid)
elif d==3:
c += ' {0} {1} {2}\n'.format(*dg.grid)
#end if
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*dg.corner)
for v in dg.cell:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*v)
#end for
for bi in sorted(dg.bands.keys()):
c += ' BAND: {0}'.format(bi)
n=0
for v in dg.bands[bi].ravel():
if n%ncols==0:
c += '\n '
#end if
c += ' {0:12.8f}'.format(v)
n+=1
#end for
c += '\n'
#end for
c += ' END_BANDGRID_{0}D_{1}\n'.format(d,dgk)
#end for
c += ' END_BLOCK_BANDGRID_{0}D\n'.format(d)
#end for
#end for
return c
#end def write_band
def dimension(self):
if self.periodicity in self.dimensions:
return self.dimensions[self.periodicity]
else:
return None
#end if
#end def dimension
def initialized(self):
return self.filetype!=None
#end def initialized
def has_animation(self):
return self.filetype=='axsf' and 'animsteps' in self
#end def has_animation
def has_bands(self):
return self.filetype=='bxsf' and 'band' in self and 'info' in self
#end def has_bands
def has_structure(self):
hs = self.filetype=='xsf'
hs &= 'elem' in self and 'pos' in self
d = self.dimension()
if d!=0:
hs &= 'primvec' in self
#end if
return hs
#end def has_structure
def has_data(self):
return self.filetype=='xsf' and 'data' in self
#end def has_data
def validity_checks(self):
ha = self.has_animation()
hb = self.has_bands()
hs = self.has_structure()
hd = self.has_data()
v = ha or hb or hs or hd
if v:
return []
else:
return ['xsf file must have animation, bands, structure, or data\nthe current file is missing all of these']
#end if
#end def validity_checks
def incorporate_structure(self,structure):
s = structure.copy()
s.change_units('A')
s.recenter()
elem = []
for e in s.elem:
ne = len(e)
if ne>1:
if ne==2 and not e[1].isalpha():
e = e[0]
elif ne>2:
e = e[0:2]
#end if
#end if
elem.append(ptable.elements[e].atomic_number)
#end for
self.filetype = 'xsf'
self.periodicity = 'crystal' # assumed
self.primvec = s.axes
self.elem = array(elem,dtype=int)
self.pos = s.pos
#end def incorporate_structure
def add_density(self,cell,density,name='density',corner=None,grid=None,centered=False,add_ghost=False,transpose=False):
if corner is None:
corner = zeros((3,),dtype=float)
#end if
if grid is None:
grid = density.shape
#end if
grid = array(grid,dtype=int)
corner = array(corner,dtype=float)
cell = array(cell ,dtype=float)
density = array(density,dtype=float)
density.shape = tuple(grid)
if centered: # shift corner by half a grid cell to center it
dc = 0.5/grid
dc = dot(dc,cell)
corner += dc
#end if
if add_ghost: # add ghost points to make a 'general' xsf grid
g = grid # this is an extra shell of points in PBC
d = density
grid = g+1
density = zeros(tuple(grid),dtype=float)
density[:g[0],:g[1],:g[2]] = d[:,:,:] # volume copy
density[ -1,:g[1],:g[2]] = d[0,:,:] # face copies
density[:g[0], -1,:g[2]] = d[:,0,:]
density[:g[0],:g[1], -1] = d[:,:,0]
density[ -1, -1,:g[2]] = d[0,0,:] # edge copies
density[ -1,:g[1], -1] = d[0,:,0]
density[:g[0], -1, -1] = d[:,0,0]
density[ -1, -1, -1] = d[0,0,0] # corner copy
#end if
if transpose: # shift from row major to column major
g = grid
d = density
density = zeros((d.size,))
n = 0
for k in xrange(g[2]):
for j in xrange(g[1]):
for i in xrange(g[0]):
density[n] = d[i,j,k]
n+=1
#end for
#end for
#end for
density.shape = tuple(grid)
#end if
self.data = obj()
self.data[3] = obj()
self.data[3][name] = obj()
self.data[3][name][name] = obj(
grid = grid,
corner = corner,
cell = cell,
values = density
)
#end def add_density
def get_density(self):
return self.data.first().first().first()
#end def get_density
def change_units(self,in_unit,out_unit):
fac = 1.0/convert(1.0,in_unit,out_unit)**3
density = self.get_density()
density.values *= fac
if 'values_noghost' in density:
density.values_noghost *= fac
#end if
#end def change_units
def remove_ghost(self,density=None,transpose=True):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' in density:
return density.values_noghost
#end if
data = density.values
if transpose: # switch from column major to row major
g = data.shape
d = data.ravel()
data = zeros(g,dtype=float)
n = 0
for k in xrange(g[2]):
for j in xrange(g[1]):
for i in xrange(g[0]):
data[i,j,k] = d[n]
n+=1
#end for
#end for
#end for
#end if
# remove the ghost cells
d = data
g = array(d.shape,dtype=int)-1
data = zeros(tuple(g),dtype=float)
data[:,:,:] = d[:g[0],:g[1],:g[2]]
density.values_noghost = data
return data
#end def remove_ghost
def norm(self,density=None,vnorm=True):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' not in density:
self.remove_ghost(density)
#end if
data = density.values_noghost
if vnorm:
dV = det(density.cell)/data.size
else:
dV = 1.0
#end if
return data.ravel().sum()*dV
#end def norm
def line_data(self,dim,density=None):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' not in density:
self.remove_ghost(density)
#end if
data = density.values_noghost
dV = det(density.cell)/data.size
dr = norm(density.cell[dim])/data.shape[dim]
ndim = 3
permute = dim!=0
if permute:
r = range(0,ndim)
r.pop(dim)
permutation = tuple([dim]+r)
data = data.transpose(permutation)
#end if
s = data.shape
data.shape = s[0],s[1]*s[2]
line_data = data.sum(1)*dV/dr
r_data = density.corner[dim] + dr*arange(len(line_data),dtype=float)
return r_data,line_data
#end def line_data
def line_plot(self,dim,filepath):
r,d = self.line_data(dim)
savetxt(filepath,array(zip(r,d)))
#end def line_plot
#end class XsfFile
class PoscarFile(StandardFile):
sftype = 'POSCAR'
def __init__(self,filepath=None):
self.description = None
self.scale = None
self.axes = None
self.elem = None
self.elem_count = None
self.coord = None
self.pos = None
self.dynamic = None
self.vel_coord = None
self.vel = None
StandardFile.__init__(self,filepath)
#end def __init__
def assign_defaults(self):
if self.description is None:
self.description = 'System cell and coordinates'
#end if
#end def assign_defaults
def validity_checks(self):
msgs = []
if self.description | |
import pickle
import os
import tensorflow as tf
import numpy as np
from collections import defaultdict
from tqdm import tqdm
from capreolus.extractor import Extractor
from capreolus import Dependency, ConfigOption, get_logger
from capreolus.utils.common import padlist
from capreolus.utils.exceptions import MissingDocError
from capreolus.tokenizer.punkt import PunktTokenizer
logger = get_logger(__name__)
@Extractor.register
class BertPassage(Extractor):
"""
Extracts passages from the document to be later consumed by a BERT based model.
Does NOT use all the passages. The first passages is always used. Use the `prob` config to control the probability
of a passage being selected
Gotcha: In Tensorflow the train tfrecords have shape (batch_size, maxseqlen) while dev tf records have the shape
(batch_size, num_passages, maxseqlen). This is because during inference, we want to pool over the scores of the
passages belonging to a doc
"""
module_name = "bertpassage"
dependencies = [
Dependency(key="benchmark", module="benchmark", name=None),
Dependency(
key="index", module="index", name="anserini", default_config_overrides={"indexstops": True, "stemmer": "none"}
),
Dependency(key="tokenizer", module="tokenizer", name="berttokenizer"),
]
config_spec = [
ConfigOption("maxseqlen", 256, "Maximum input length (query+document)"),
ConfigOption("maxqlen", 20, "Maximum query length"),
ConfigOption("usecache", False, "Should the extracted features be cached?"),
ConfigOption("passagelen", 150, "Length of the extracted passage"),
ConfigOption("stride", 100, "Stride"),
ConfigOption("sentences", False, "Use a sentence tokenizer to form passages"),
ConfigOption("numpassages", 16, "Number of passages per document"),
ConfigOption(
"prob",
0.1,
"The probability that a passage from the document will be used for training " "(the first passage is always used)",
),
]
def build(self):
self.pad = self.tokenizer.bert_tokenizer.pad_token_id
self.cls = self.tokenizer.bert_tokenizer.cls_token_id
self.sep = self.tokenizer.bert_tokenizer.sep_token_id
self.pad_tok = self.tokenizer.bert_tokenizer.pad_token
self.cls_tok = self.tokenizer.bert_tokenizer.cls_token
self.sep_tok = self.tokenizer.bert_tokenizer.sep_token
def load_state(self, qids, docids):
cache_fn = self.get_state_cache_file_path(qids, docids)
logger.debug("loading state from: %s", cache_fn)
with open(cache_fn, "rb") as f:
state_dict = pickle.load(f)
self.qid2toks = state_dict["qid2toks"]
self.docid2passages = state_dict["docid2passages"]
def cache_state(self, qids, docids):
os.makedirs(self.get_cache_path(), exist_ok=True)
with open(self.get_state_cache_file_path(qids, docids), "wb") as f:
state_dict = {"qid2toks": self.qid2toks, "docid2passages": self.docid2passages}
pickle.dump(state_dict, f, protocol=-1)
def get_tf_feature_description(self):
feature_description = {
"pos_bert_input": tf.io.FixedLenFeature([], tf.string),
"pos_mask": tf.io.FixedLenFeature([], tf.string),
"pos_seg": tf.io.FixedLenFeature([], tf.string),
"neg_bert_input": tf.io.FixedLenFeature([], tf.string),
"neg_mask": tf.io.FixedLenFeature([], tf.string),
"neg_seg": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string),
}
return feature_description
def create_tf_train_feature(self, sample):
"""
Returns a set of features from a doc.
Of the num_passages passages that are present in a document, we use only a subset of it.
params:
sample - A dict where each entry has the shape [batch_size, num_passages, maxseqlen]
Returns a list of features. Each feature is a dict, and each value in the dict has the shape [batch_size, maxseqlen].
Yes, the output shape is different to the input shape because we sample from the passages.
"""
num_passages = self.config["numpassages"]
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte. Our features are multi-dimensional tensors."""
if isinstance(value, type(tf.constant(0))): # if value ist tensor
value = value.numpy() # get value of tensor
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
posdoc, negdoc, negdoc_id = sample["pos_bert_input"], sample["neg_bert_input"], sample["negdocid"]
posdoc_mask, posdoc_seg, negdoc_mask, negdoc_seg = (
sample["pos_mask"],
sample["pos_seg"],
sample["neg_mask"],
sample["neg_seg"],
)
label = sample["label"]
features = []
for i in range(num_passages):
# Always use the first passage, then sample from the remaining passages
if i > 0 and self.rng.random() > self.config["prob"]:
continue
bert_input_line = posdoc[i]
bert_input_line = " ".join(self.tokenizer.bert_tokenizer.convert_ids_to_tokens(list(bert_input_line)))
passage = bert_input_line.split(self.sep_tok)[-2]
# Ignore empty passages as well
if passage.strip() == self.pad_tok:
continue
feature = {
"pos_bert_input": _bytes_feature(tf.io.serialize_tensor(posdoc[i])),
"pos_mask": _bytes_feature(tf.io.serialize_tensor(posdoc_mask[i])),
"pos_seg": _bytes_feature(tf.io.serialize_tensor(posdoc_seg[i])),
"neg_bert_input": _bytes_feature(tf.io.serialize_tensor(negdoc[i])),
"neg_mask": _bytes_feature(tf.io.serialize_tensor(negdoc_mask[i])),
"neg_seg": _bytes_feature(tf.io.serialize_tensor(negdoc_seg[i])),
"label": _bytes_feature(tf.io.serialize_tensor(label[i])),
}
features.append(feature)
return features
def create_tf_dev_feature(self, sample):
"""
Unlike the train feature, the dev set uses all passages. Both the input and the output are dicts with the shape
[batch_size, num_passages, maxseqlen]
"""
posdoc, negdoc, negdoc_id = sample["pos_bert_input"], sample["neg_bert_input"], sample["negdocid"]
posdoc_mask, posdoc_seg, negdoc_mask, negdoc_seg = (
sample["pos_mask"],
sample["pos_seg"],
sample["neg_mask"],
sample["neg_seg"],
)
label = sample["label"]
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))): # if value ist tensor
value = value.numpy() # get value of tensor
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
feature = {
"pos_bert_input": _bytes_feature(tf.io.serialize_tensor(posdoc)),
"pos_mask": _bytes_feature(tf.io.serialize_tensor(posdoc_mask)),
"pos_seg": _bytes_feature(tf.io.serialize_tensor(posdoc_seg)),
"neg_bert_input": _bytes_feature(tf.io.serialize_tensor(negdoc)),
"neg_mask": _bytes_feature(tf.io.serialize_tensor(negdoc_mask)),
"neg_seg": _bytes_feature(tf.io.serialize_tensor(negdoc_seg)),
"label": _bytes_feature(tf.io.serialize_tensor(label)),
}
return [feature]
def parse_tf_train_example(self, example_proto):
feature_description = self.get_tf_feature_description()
parsed_example = tf.io.parse_example(example_proto, feature_description)
def parse_tensor_as_int(x):
parsed_tensor = tf.io.parse_tensor(x, tf.int64)
parsed_tensor.set_shape([self.config["maxseqlen"]])
return parsed_tensor
def parse_label_tensor(x):
parsed_tensor = tf.io.parse_tensor(x, tf.float32)
parsed_tensor.set_shape([2])
return parsed_tensor
pos_bert_input = tf.map_fn(parse_tensor_as_int, parsed_example["pos_bert_input"], dtype=tf.int64)
pos_mask = tf.map_fn(parse_tensor_as_int, parsed_example["pos_mask"], dtype=tf.int64)
pos_seg = tf.map_fn(parse_tensor_as_int, parsed_example["pos_seg"], dtype=tf.int64)
neg_bert_input = tf.map_fn(parse_tensor_as_int, parsed_example["neg_bert_input"], dtype=tf.int64)
neg_mask = tf.map_fn(parse_tensor_as_int, parsed_example["neg_mask"], dtype=tf.int64)
neg_seg = tf.map_fn(parse_tensor_as_int, parsed_example["neg_seg"], dtype=tf.int64)
label = tf.map_fn(parse_label_tensor, parsed_example["label"], dtype=tf.float32)
return (pos_bert_input, pos_mask, pos_seg, neg_bert_input, neg_mask, neg_seg), label
def parse_tf_dev_example(self, example_proto):
feature_description = self.get_tf_feature_description()
parsed_example = tf.io.parse_example(example_proto, feature_description)
def parse_tensor_as_int(x):
parsed_tensor = tf.io.parse_tensor(x, tf.int64)
parsed_tensor.set_shape([self.config["numpassages"], self.config["maxseqlen"]])
return parsed_tensor
def parse_label_tensor(x):
parsed_tensor = tf.io.parse_tensor(x, tf.float32)
parsed_tensor.set_shape([self.config["numpassages"], 2])
return parsed_tensor
pos_bert_input = tf.map_fn(parse_tensor_as_int, parsed_example["pos_bert_input"], dtype=tf.int64)
pos_mask = tf.map_fn(parse_tensor_as_int, parsed_example["pos_mask"], dtype=tf.int64)
pos_seg = tf.map_fn(parse_tensor_as_int, parsed_example["pos_seg"], dtype=tf.int64)
neg_bert_input = tf.map_fn(parse_tensor_as_int, parsed_example["neg_bert_input"], dtype=tf.int64)
neg_mask = tf.map_fn(parse_tensor_as_int, parsed_example["neg_mask"], dtype=tf.int64)
neg_seg = tf.map_fn(parse_tensor_as_int, parsed_example["neg_seg"], dtype=tf.int64)
label = tf.map_fn(parse_label_tensor, parsed_example["label"], dtype=tf.float32)
return (pos_bert_input, pos_mask, pos_seg, neg_bert_input, neg_mask, neg_seg), label
def _prepare_doc_psgs(self, doc):
"""
Extract passages from the doc.
If there are too many passages, keep the first and the last one and sample from the rest.
If there are not enough packages, pad.
"""
passages = []
numpassages = self.config["numpassages"]
doc = self.tokenizer.tokenize(doc)
for i in range(0, len(doc), self.config["stride"]):
if i >= len(doc):
assert len(passages) > 0, f"no passage can be built from empty document {doc}"
break
passages.append(doc[i : i + self.config["passagelen"]])
n_actual_passages = len(passages)
# If we have a more passages than required, keep the first and last, and sample from the rest
if n_actual_passages > numpassages:
if numpassages > 1:
# passages = [passages[0]] + list(self.rng.choice(passages[1:-1], numpassages - 2, replace=False)) + [passages[-1]]
passages = passages[:numpassages]
else:
passages = [passages[0]]
else:
# Pad until we have the required number of passages
passages.extend([[self.pad_tok] for _ in range(numpassages - n_actual_passages)])
assert len(passages) == self.config["numpassages"]
return passages
# from https://github.com/castorini/birch/blob/2dd0401ebb388a1c96f8f3357a064164a5db3f0e/src/utils/doc_utils.py#L73
def _chunk_sent(self, sent, max_len):
words = self.tokenizer.tokenize(sent)
if len(words) <= max_len:
return [words]
chunked_sents = []
size = int(len(words) / max_len)
for i in range(0, size):
seq = words[i * max_len : (i + 1) * max_len]
chunked_sents.append(seq)
return chunked_sents
def _build_passages_from_sentences(self, docids):
punkt = PunktTokenizer()
for docid in tqdm(docids, "extract passages"):
passages = []
numpassages = self.config["numpassages"]
for sentence in punkt.tokenize(self.index.get_doc(docid)):
if len(passages) >= numpassages:
break
passages.extend(self._chunk_sent(sentence, self.config["passagelen"]))
if numpassages != 0:
passages = passages[:numpassages]
n_actual_passages = len(passages)
for _ in range(numpassages - n_actual_passages):
# randomly use one of previous passages when the document is exhausted
# idx = random.randint(0, n_actual_passages - 1)
# passages.append(passages[idx])
# append empty passages
passages.append([""])
assert len(passages) == self.config["numpassages"]
self.docid2passages[docid] = sorted(passages, key=len)
def _build_vocab(self, qids, docids, topics):
if self.is_state_cached(qids, docids) and self.config["usecache"]:
self.load_state(qids, docids)
logger.info("Vocabulary loaded from cache")
elif self.config["sentences"]:
self.docid2passages = {}
self._build_passages_from_sentences(docids)
self.qid2toks = {qid: self.tokenizer.tokenize(topics[qid]) for qid in tqdm(qids, desc="querytoks")}
self.cache_state(qids, docids)
else:
logger.info("Building bertpassage vocabulary")
self.qid2toks = {qid: self.tokenizer.tokenize(topics[qid]) for qid in tqdm(qids, desc="querytoks")}
self.docid2passages = {
docid: self._prepare_doc_psgs(self.index.get_doc(docid)) for docid in tqdm(sorted(docids), "extract passages")
}
self.cache_state(qids, docids)
def exist(self):
return hasattr(self, "docid2passages") and len(self.docid2passages)
def preprocess(self, qids, docids, topics):
if self.exist():
return
self.index.create_index()
self.qid2toks = defaultdict(list)
self.docid2passages = None
self._build_vocab(qids, docids, topics)
def _prepare_bert_input(self, query_toks, psg_toks):
maxseqlen, maxqlen = self.config["maxseqlen"], self.config["maxqlen"]
if len(query_toks) > maxqlen:
logger.warning(f"Truncating query from {len(query_toks)} to {maxqlen}")
query_toks = query_toks[:maxqlen]
psg_toks = psg_toks[: maxseqlen - len(query_toks) - 3]
psg_toks = " ".join(psg_toks).split() # in case that psg_toks is np.array
input_line = [self.cls_tok] + query_toks + [self.sep_tok] + psg_toks + [self.sep_tok]
padded_input_line = padlist(input_line, padlen=maxseqlen, pad_token=self.pad_tok)
inp = self.tokenizer.convert_tokens_to_ids(padded_input_line)
mask = [1] * len(input_line) + [0] * (len(padded_input_line) - len(input_line))
seg = [0] * (len(query_toks) + 2) + [1] * len(psg_toks) + [0]*(len(padded_input_line) - len(input_line))
return inp, mask, seg
def id2vec(self, qid, posid, negid=None, label=None):
"""
See parent class for docstring
"""
assert label is not None
maxseqlen = self.config["maxseqlen"]
numpassages = self.config["numpassages"]
query_toks = self.qid2toks[qid]
pos_bert_inputs, pos_bert_masks, pos_bert_segs = [], [], []
# N.B: The passages in self.docid2passages are not bert tokenized
pos_passages = self.docid2passages[posid]
for tokenized_passage in pos_passages:
inp, mask, seg = self._prepare_bert_input(query_toks, tokenized_passage)
pos_bert_inputs.append(inp)
pos_bert_masks.append(mask)
pos_bert_segs.append(seg)
# TODO: Rename the posdoc key in the below dict to 'pos_bert_input'
data = {
"qid": qid,
"posdocid": posid,
"pos_bert_input": | |
<filename>pydra/engine/tests/test_graph.py
from ..graph import DiGraph
from .utils import DOT_FLAG
import pytest
class ObjTest:
def __init__(self, name):
self.name = name
self.state = None
A = ObjTest("a")
B = ObjTest("b")
C = ObjTest("c")
D = ObjTest("d")
E = ObjTest("e")
def test_no_edges():
"""a, b"""
graph = DiGraph(nodes=[A, B])
# checking nodes and edges
assert [nd.name for nd in graph.nodes] == ["a", "b"]
assert [(edg[0].name, edg[1].name) for edg in graph.edges] == []
# checking names
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == []
def test_edges_1():
"""a -> b"""
graph = DiGraph(nodes=[A, B], edges=[(A, B)])
assert [nd.name for nd in graph.nodes] == ["a", "b"]
assert [(edg[0].name, edg[1].name) for edg in graph.edges] == [("a", "b")]
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
def test_edges_1a():
"""a -> b (add_nodes and add_edges)"""
graph = DiGraph()
graph.add_nodes([A, B])
graph.add_edges((A, B))
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
def test_edges_2():
"""a -> b"""
graph = DiGraph(nodes=[B, A], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"b", "a"}
assert graph.edges_names == [("a", "b")]
def test_edges_3():
"""a-> b -> c; a -> c; d"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
def test_edges_ecxeption_1():
with pytest.raises(Exception) as excinfo:
graph = DiGraph(nodes=[A, B, A], edges=[(A, B)])
assert "repeated elements" in str(excinfo.value)
def test_edges_ecxeption_2():
with pytest.raises(Exception) as excinfo:
graph = DiGraph(nodes=[A, B], edges=[(A, C)])
assert "can't be added" in str(excinfo.value)
def test_sort_1():
"""a -> b"""
graph = DiGraph(nodes=[A, B], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b"]
def test_sort_2():
"""a -> b"""
graph = DiGraph(nodes=[B, A], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"b", "a"}
assert graph.edges_names == [("a", "b")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b"]
def test_sort_3():
"""a-> b -> c; a -> c; d"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
def test_sort_4():
"""a-> b -> c; a -> c; a -> d"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C), (A, D)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c"), ("a", "d")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b", "d", "c"]
def test_sort_5():
"""a-> b -> c; a -> c; d -> c"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C), (D, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c"), ("d", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
def test_sort_5a():
"""a-> b -> c; a -> c; d -> c (add_nodes/edges)"""
graph = DiGraph(nodes=[A, C, D], edges=[(A, C), (D, C)])
graph.add_nodes(B)
graph.add_edges([(A, B), (B, C)])
assert set(graph.nodes_names_map.keys()) == {"a", "c", "d", "b"}
assert graph.edges_names == [("a", "c"), ("d", "c"), ("a", "b"), ("b", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
def test_sort_5b():
"""a-> b -> c; a -> c; d -> c (add_nodes/edges)"""
graph = DiGraph(nodes=[A, C, D], edges=[(A, C), (D, C)])
assert set(graph.nodes_names_map.keys()) == {"a", "c", "d"}
assert graph.edges_names == [("a", "c"), ("d", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "c"]
graph.add_nodes(B)
assert set(graph.nodes_names_map.keys()) == {"a", "c", "d", "b"}
assert graph.edges_names == [("a", "c"), ("d", "c")]
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
graph.add_edges([(A, B), (B, C)])
assert set(graph.nodes_names_map.keys()) == {"a", "c", "d", "b"}
assert graph.edges_names == [("a", "c"), ("d", "c"), ("a", "b"), ("b", "c")]
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
def test_sort_6():
"""a -> b -> c -> e; a -> c -> e; a -> b -> d -> e"""
graph = DiGraph(
nodes=[D, E, C, B, A], edges=[(A, B), (B, C), (A, C), (B, D), (C, E), (D, E)]
)
assert set(graph.nodes_names_map.keys()) == {"d", "e", "c", "b", "a"}
assert graph.edges_names == [
("a", "b"),
("b", "c"),
("a", "c"),
("b", "d"),
("c", "e"),
("d", "e"),
]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b", "d", "c", "e"]
def test_remove_1():
"""a -> b (removing a node)"""
graph = DiGraph(nodes=[A, B], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b"]
# removing a node (e.g. after is sent to run)
graph.remove_nodes(A)
assert set(graph.nodes_names_map.keys()) == {"b"}
assert graph.edges_names == [("a", "b")]
assert graph.sorted_nodes_names == ["b"]
# removing all connections (e.g. after the task is done)
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b"}
assert graph.edges_names == []
assert graph.sorted_nodes_names == ["b"]
def test_remove_2():
"""a-> b -> c; a -> c; d (removing a node)"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
graph.remove_nodes(A)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
assert graph.sorted_nodes_names == ["d", "b", "c"]
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d"}
assert graph.edges_names == [("b", "c")]
assert graph.sorted_nodes_names == ["d", "b", "c"]
def test_remove_3():
"""a-> b -> c; a -> c; d (removing a node)"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
graph.remove_nodes(D)
graph.remove_nodes_connections(D)
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
assert graph.sorted_nodes_names == ["a", "b", "c"]
def test_remove_4():
"""a-> b -> c; a -> d -> e (removing A and later D)"""
graph = DiGraph(nodes=[B, A, C, D, E], edges=[(A, B), (B, C), (A, D), (D, E)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d", "e"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "d"), ("d", "e")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b", "d", "c", "e"]
graph.remove_nodes(A)
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d", "e"}
assert graph.edges_names == [("b", "c"), ("d", "e")]
assert graph.sorted_nodes_names == ["b", "d", "c", "e"]
graph.remove_nodes(D)
graph.remove_nodes_connections(D)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "e"}
assert graph.edges_names == [("b", "c")]
assert graph.sorted_nodes_names == ["b", "e", "c"]
def test_remove_5():
"""a-> b -> c; a -> d -> e (removing A, and [B, D] at the same time)"""
graph = DiGraph(nodes=[B, A, C, D, E], edges=[(A, B), (B, C), (A, D), (D, E)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d", "e"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "d"), ("d", "e")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b", "d", "c", "e"]
graph.remove_nodes(A)
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d", "e"}
assert graph.edges_names == [("b", "c"), ("d", "e")]
assert graph.sorted_nodes_names == ["b", "d", "c", "e"]
graph.remove_nodes([B, D])
graph.remove_nodes_connections([B, D])
assert set(graph.nodes_names_map.keys()) == {"c", "e"}
assert graph.edges_names == []
assert graph.sorted_nodes_names == ["c", "e"]
def test_remove_exception_1():
"""a -> b (removing a node)"""
graph = DiGraph(nodes=[A, B], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b"]
with pytest.raises(Exception) as excinfo:
graph.remove_nodes(B)
assert "has to wait" in str(excinfo.value)
def test_remove_add_1():
"""a -> b (removing and adding nodes)"""
graph = DiGraph(nodes=[A, B], edges=[(A, B)])
assert set(graph.nodes_names_map.keys()) == {"a", "b"}
assert graph.edges_names == [("a", "b")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "b"]
# removing a node (e.g. after is sent to run)
graph.remove_nodes(A)
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b"}
assert graph.edges_names == []
assert graph.sorted_nodes_names == ["b"]
graph.add_nodes(A)
graph.add_edges((A, B))
assert set(graph.nodes_names_map.keys()) == {"b", "a"}
assert graph.edges_names == [("a", "b")]
assert graph.sorted_nodes_names == ["a", "b"]
def test_remove_add_2():
"""a-> b -> c; a -> c; d (removing and adding nodes)"""
graph = DiGraph(nodes=[B, A, C, D], edges=[(A, B), (B, C), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "a", "c", "d"}
assert graph.edges_names == [("a", "b"), ("b", "c"), ("a", "c")]
graph.sorting()
assert graph.sorted_nodes_names == ["a", "d", "b", "c"]
graph.remove_nodes(A)
graph.remove_nodes_connections(A)
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d"}
assert graph.edges_names == [("b", "c")]
assert graph.sorted_nodes_names == ["d", "b", "c"]
graph.add_nodes(A)
graph.add_edges([(A, B), (A, C)])
assert set(graph.nodes_names_map.keys()) == {"b", "c", "d", "a"}
assert graph.edges_names == [("b", "c"), ("a", "b"), ("a", | |
really care that much
t = get_mtime(p.strValue)
except OSError:
return h
hasher = sha_hash()
hasher.update(h)
hasher.update(str(t))
return hasher.digest()
class File(Path):
"""File is a VisTrails Module that represents a file stored on a
file system local to the machine where VisTrails is running."""
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:FileChooserWidget" % \
constant_config_path))
_input_ports = [IPort("value", "File"),
IPort("create_file", "Boolean", optional=True)]
_output_ports = [OPort("value", "File"),
OPort("local_filename", "String", optional=True)]
def compute(self):
n = self.get_name()
if (self.has_input("create_file") and self.get_input("create_file")):
vistrails.core.system.touch(n)
if not os.path.isfile(n):
raise ModuleError(self, 'File %r does not exist' % n)
self.set_results(n)
self.set_output("local_filename", n)
class Directory(Path):
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:DirectoryChooserWidget" % \
constant_config_path))
_input_ports = [IPort("value", "Directory"),
IPort("create_directory", "Boolean", optional=True)]
_output_ports = [OPort("value", "Directory"),
OPort("itemList", "List")]
def compute(self):
n = self.get_name()
if (self.has_input("create_directory") and
self.get_input("create_directory")):
try:
vistrails.core.system.mkdir(n)
except Exception, e:
raise ModuleError(self, 'mkdir: %s' % format_exception(e))
if not os.path.isdir(n):
raise ModuleError(self, 'Directory "%s" does not exist' % n)
self.set_results(n)
dir_list = os.listdir(n)
output_list = []
for item in dir_list:
full_path = os.path.join(n, item)
output_list.append(PathObject(full_path))
self.set_output('itemList', output_list)
##############################################################################
class OutputPath(Path):
_settings = ModuleSettings(constant_widget=("%s:OutputPathChooserWidget" % \
constant_config_path))
_input_ports = [IPort("value", "OutputPath")]
_output_ports = [OPort("value", "OutputPath")]
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
class FileSink(NotCacheable, Module):
"""FileSink takes a file and writes it to a user-specified
location in the file system. The file is stored at location
specified by the outputPath. The overwrite flag allows users to
specify whether an existing path should be overwritten."""
_input_ports = [IPort("file", File),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True,
default=True),
IPort("publishFile", Boolean, optional=True)]
def compute(self):
input_file = self.get_input("file")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.isfile(full_path):
if self.get_input('overwrite'):
try:
os.remove(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(self, msg)
else:
raise ModuleError(self,
"Could not copy file to '%s': file already "
"exists")
try:
vistrails.core.system.link_or_copy(input_file.name, full_path)
except OSError, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(self, msg)
if (self.has_input("publishFile") and
self.get_input("publishFile") or
not self.has_input("publishFile")):
if self.moduleInfo.has_key('extra_info'):
if self.moduleInfo['extra_info'].has_key('pathDumpCells'):
folder = self.moduleInfo['extra_info']['pathDumpCells']
base_fname = os.path.basename(full_path)
(base_fname, file_extension) = os.path.splitext(base_fname)
base_fname = os.path.join(folder, base_fname)
# make a unique filename
filename = base_fname + file_extension
counter = 2
while os.path.exists(filename):
filename = base_fname + "_%d%s" % (counter,
file_extension)
counter += 1
try:
vistrails.core.system.link_or_copy(input_file.name, filename)
except OSError, e:
msg = "Could not publish file '%s' \n on '%s':" % (
full_path, filename)
# I am not sure whether we should raise an error
# I will just print a warning for now (Emanuele)
debug.warning("%s" % msg, e)
class DirectorySink(NotCacheable, Module):
"""DirectorySink takes a directory and writes it to a
user-specified location in the file system. The directory is
stored at location specified by the outputPath. The overwrite
flag allows users to specify whether an existing path should be
overwritten."""
_input_ports = [IPort("dir", Directory),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True, default="True")]
def compute(self):
input_dir = self.get_input("dir")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.exists(full_path):
if self.get_input("overwrite"):
try:
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(
self,
'%s\n%s' % (msg, format_exception(e)))
else:
msg = ('Could not write to existing path "%s" '
'(overwrite off)' % full_path)
raise ModuleError(self, msg)
try:
shutil.copytree(input_dir.name, full_path)
except OSError, e:
msg = 'Could not copy path from "%s" to "%s"' % \
(input_dir.name, full_path)
raise ModuleError(self, '%s\n%s' % (msg, format_exception(e)))
##############################################################################
class WriteFile(Converter):
"""Writes a String to a temporary File.
"""
_input_ports = [IPort('in_value', String),
IPort('suffix', String, optional=True, default=""),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', File)]
def compute(self):
contents = self.get_input('in_value')
suffix = self.force_get_input('suffix', '')
result = self.interpreter.filePool.create_file(suffix=suffix)
if self.has_input('encoding'):
contents = contents.decode('utf-8') # VisTrails uses UTF-8
# internally (I hope)
contents = contents.encode(self.get_input('encoding'))
with open(result.name, 'wb') as fp:
fp.write(contents)
self.set_output('out_value', result)
class ReadFile(Converter):
"""Reads a File to a String.
"""
_input_ports = [IPort('in_value', File),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', String)]
def compute(self):
filename = self.get_input('in_value').name
with open(filename, 'rb') as fp:
contents = fp.read()
if self.has_input('encoding'):
contents = contents.decode(self.get_input('encoding'))
contents = contents.encode('utf-8') # VisTrails uses UTF-8
# internally (for now)
self.set_output('out_value', contents)
##############################################################################
class Color(Constant):
# We set the value of a color object to be an InstanceObject that
# contains a tuple because a tuple would be interpreted as a
# type(tuple) which messes with the interpreter
_settings = ModuleSettings(constant_widgets=[
'%s:ColorWidget' % constant_config_path,
ConstantWidgetConfig('%s:ColorEnumWidget' % \
constant_config_path,
widget_type='enum'),
QueryWidgetConfig('%s:ColorQueryWidget' % \
query_config_path),
ParamExpWidgetConfig('%s:RGBExploreWidget' % \
paramexp_config_path,
widget_type='rgb'),
ParamExpWidgetConfig('%s:HSVExploreWidget' % \
paramexp_config_path,
widget_type='hsv')])
_input_ports = [IPort("value", "Color")]
_output_ports = [OPort("value", "Color")]
default_value = InstanceObject(tuple=(1,1,1))
@staticmethod
def translate_to_python(x):
return InstanceObject(
tuple=tuple([float(a) for a in x.split(',')]))
@staticmethod
def translate_to_string(v):
return ','.join('%f' % c for c in v.tuple)
@staticmethod
def validate(x):
return isinstance(x, InstanceObject) and hasattr(x, 'tuple')
@staticmethod
def to_string(r, g, b):
return "%s,%s,%s" % (r,g,b)
@staticmethod
def query_compute(value_a, value_b, query_method):
# SOURCE: http://www.easyrgb.com/index.php?X=MATH
def rgb_to_xyz(r, g, b):
# r,g,b \in [0,1]
if r > 0.04045:
r = ( ( r + 0.055 ) / 1.055 ) ** 2.4
else:
r = r / 12.92
if g > 0.04045:
g = ( ( g + 0.055 ) / 1.055 ) ** 2.4
else:
g = g / 12.92
if b > 0.04045:
b = ( ( b + 0.055 ) / 1.055 ) ** 2.4
else:
b = b / 12.92
r *= 100
g *= 100
b *= 100
# Observer. = 2 deg, Illuminant = D65
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return (x,y,z)
def xyz_to_cielab(x,y,z):
# Observer= 2 deg, Illuminant= D65
ref_x, ref_y, ref_z = (95.047, 100.000, 108.883)
x /= ref_x
y /= ref_y
z /= ref_z
if x > 0.008856:
x = x ** ( 1/3.0 )
else:
x = ( 7.787 * x ) + ( 16 / 116.0 )
if y > 0.008856:
y = y ** ( 1/3.0 )
else:
y = ( 7.787 * y ) + ( 16 / 116.0 )
if z > 0.008856:
z = z ** ( 1/3.0 )
else:
z = ( 7.787 * z ) + ( 16 / 116.0 )
L = ( 116 * y ) - 16
a = 500 * ( x - y )
b = 200 * ( y - z )
return (L, a, b)
def rgb_to_cielab(r,g,b):
return xyz_to_cielab(*rgb_to_xyz(r,g,b))
value_a_rgb = (float(a) for a in value_a.split(','))
value_b_rgb = (float(b) for b in value_b.split(','))
value_a_lab = rgb_to_cielab(*value_a_rgb)
value_b_lab = rgb_to_cielab(*value_b_rgb)
# cie76 difference
diff = sum((v_1 - v_2) ** 2
for v_1, v_2 in izip(value_a_lab, value_b_lab)) ** (0.5)
# print "CIE 76 DIFFERENCE:", diff
if query_method is None:
query_method = '2.3'
return diff < float(query_method)
##############################################################################
class StandardOutput(NotCacheable, Module):
"""StandardOutput is a VisTrails Module that simply prints the
value connected on its port to standard output. It is intended
mostly as a debugging device."""
_input_ports = [IPort("value", 'Variant')]
def compute(self):
v = self.get_input("value")
if isinstance(v, PathObject):
try:
fp = open(v.name, 'rb')
except IOError:
print v
else:
try:
CHUNKSIZE = 2048
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
while len(chunk) == CHUNKSIZE:
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
sys.stdout.write('\n')
finally:
fp.close()
else:
print v
##############################################################################
# Tuple will be reasonably magic right now. We'll integrate it better
# with vistrails later.
# TODO: Check Tuple class, test, integrate.
class Tuple(Module):
"""Tuple represents a tuple of values. Tuple might not be well
integrated with the rest of VisTrails, so don't use it unless
you know what you're doing."""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:TupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.input_ports_order = []
self.values = tuple()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
def compute(self):
| |
<filename>third_party/frc971/control_loops/python/drivetrain.py
#!/usr/bin/python
from third_party.frc971.control_loops.python import control_loop
from third_party.frc971.control_loops.python import controls
import numpy
import sys
from matplotlib import pylab
import glog
class DrivetrainParams(object):
def __init__(self, J, mass, robot_radius, wheel_radius, G_high, G_low,
q_pos_low, q_pos_high, q_vel_low, q_vel_high,
motor_type = control_loop.CIM(), num_motors = 2, dt = 0.00500,
controller_poles=[0.90, 0.90], observer_poles=[0.02, 0.02], efficiency_high = 1.0, efficiency_low = 1.0):
"""Defines all constants of a drivetrain.
Args:
J: float, Moment of inertia of drivetrain in kg m^2
mass: float, Mass of the robot in kg.
robot_radius: float, Radius of the robot, in meters (requires tuning by
hand).
wheel_radius: float, Radius of the wheels, in meters.
G_high: float, Gear ratio for high gear.
G_low: float, Gear ratio for low gear.
dt: float, Control loop time step.
q_pos_low: float, q position low gear.
q_pos_high: float, q position high gear.
q_vel_low: float, q velocity low gear.
q_vel_high: float, q velocity high gear.
motor_type: object, class of values defining the motor in drivetrain.
num_motors: int, number of motors on one side of drivetrain.
controller_poles: array, An array of poles. (See control_loop.py)
observer_poles: array, An array of poles. (See control_loop.py)
"""
self.J = J
self.mass = mass
self.robot_radius = robot_radius
self.wheel_radius = wheel_radius
self.G_high = G_high
self.G_low = G_low
self.dt = dt
self.q_pos_low = q_pos_low
self.q_pos_high = q_pos_high
self.q_vel_low = q_vel_low
self.q_vel_high = q_vel_high
self.motor_type = motor_type
self.num_motors = num_motors
self.controller_poles = controller_poles
self.observer_poles = observer_poles
self.efficiency_high = efficiency_high
self.efficiency_low = efficiency_low
class Drivetrain(control_loop.ControlLoop):
def __init__(self, drivetrain_params, name="Drivetrain", left_low=True,
right_low=True):
"""Defines a base drivetrain for a robot.
Args:
drivetrain_params: DrivetrainParams, class of values defining the drivetrain.
name: string, Name of this drivetrain.
left_low: bool, Whether the left is in high gear.
right_low: bool, Whether the right is in high gear.
"""
super(Drivetrain, self).__init__(name)
self.right_efficiency = drivetrain_params.efficiency_low if right_low else drivetrain_params.efficiency_high
self.left_efficiency = drivetrain_params.efficiency_low if left_low else drivetrain_params.efficiency_high
# Moment of inertia of the drivetrain in kg m^2
self.J = drivetrain_params.J
# Mass of the robot, in kg.
self.mass = drivetrain_params.mass
# Radius of the robot, in meters (requires tuning by hand)
self.robot_radius = drivetrain_params.robot_radius
# Radius of the wheels, in meters.
self.r = drivetrain_params.wheel_radius
# Gear ratios
self.G_low = drivetrain_params.G_low
self.G_high = drivetrain_params.G_high
if left_low:
self.Gl = self.G_low
else:
self.Gl = self.G_high
if right_low:
self.Gr = self.G_low
else:
self.Gr = self.G_high
# Control loop time step
self.dt = drivetrain_params.dt
self.BuildDrivetrain(drivetrain_params.motor_type, drivetrain_params.num_motors);
if left_low or right_low:
q_pos = drivetrain_params.q_pos_low
q_vel = drivetrain_params.q_vel_low
else:
q_pos = drivetrain_params.q_pos_high
q_vel = drivetrain_params.q_vel_high
self.BuildDrivetrainController(q_pos, q_vel)
self.InitializeState()
def BuildDrivetrain(self, motor, num_motors_per_side):
self.motor = motor
# Number of motors per side
self.num_motors = num_motors_per_side
# Stall Torque in N m
self.stall_torque = motor.stall_torque * self.num_motors * 0.60
# Stall Current in Amps
self.stall_current = motor.stall_current * self.num_motors
# Free Speed in rad/s
self.free_speed = motor.free_speed
# Free Current in Amps
self.free_current = motor.free_current * self.num_motors
# Effective motor resistance in ohms.
self.resistance = 12.0 / self.stall_current
# Resistance of the motor, divided by the number of motors.
# Motor velocity constant
self.Kv = (self.free_speed / (12.0 - self.resistance * self.free_current))
# Torque constant
self.Kt = self.stall_torque / self.stall_current
# These describe the way that a given side of a robot will be influenced
# by the other side. Units of 1 / kg.
self.msp = 1.0 / self.mass + self.robot_radius * self.robot_radius / self.J
self.msn = 1.0 / self.mass - self.robot_radius * self.robot_radius / self.J
# The calculations which we will need for A and B.
self.tcl = self.Kt / self.Kv / (self.Gl * self.Gl * self.resistance * self.r * self.r)
self.tcr = self.Kt / self.Kv / (self.Gr * self.Gr * self.resistance * self.r * self.r)
self.mpl = self.Kt / (self.Gl * self.resistance * self.r)
self.mpr = self.Kt / (self.Gr * self.resistance * self.r)
# State feedback matrices
# X will be of the format
# [[positionl], [velocityl], [positionr], velocityr]]
self.A_continuous = numpy.matrix(
[[0, 1, 0, 0],
[0, -self.msp * self.tcl, 0, -self.msn * self.tcr],
[0, 0, 0, 1],
[0, -self.msn * self.tcl, 0, -self.msp * self.tcr]])
self.B_continuous = numpy.matrix(
[[0, 0],
[self.left_efficiency * self.msp * self.mpl, self.left_efficiency * self.msn * self.mpr],
[0, 0],
[self.right_efficiency * self.msn * self.mpl, self.right_efficiency * self.msp * self.mpr]])
self.C = numpy.matrix([[1, 0, 0, 0],
[0, 0, 1, 0]])
self.D = numpy.matrix([[0, 0],
[0, 0]])
self.A, self.B = self.ContinuousToDiscrete(
self.A_continuous, self.B_continuous, self.dt)
def BuildDrivetrainController(self, q_pos, q_vel):
# Tune the LQR controller
self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0, 0.0, 0.0],
[0.0, (1.0 / (q_vel ** 2.0)), 0.0, 0.0],
[0.0, 0.0, (1.0 / (q_pos ** 2.0)), 0.0],
[0.0, 0.0, 0.0, (1.0 / (q_vel ** 2.0))]])
self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0)), 0.0],
[0.0, (1.0 / (12.0 ** 2.0))]])
self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
glog.debug('DT q_pos %f q_vel %s %s', q_pos, q_vel, self._name)
glog.debug(str(numpy.linalg.eig(self.A - self.B * self.K)[0]))
glog.debug('K %s', repr(self.K))
self.hlp = 0.3
self.llp = 0.4
self.PlaceObserverPoles([self.hlp, self.hlp, self.llp, self.llp])
self.U_max = numpy.matrix([[12.0], [12.0]])
self.U_min = numpy.matrix([[-12.0], [-12.0]])
class KFDrivetrain(Drivetrain):
def __init__(self, drivetrain_params, name="KFDrivetrain",
left_low=True, right_low=True):
"""Kalman filter values of a drivetrain.
Args:
drivetrain_params: DrivetrainParams, class of values defining the drivetrain.
name: string, Name of this drivetrain.
left_low: bool, Whether the left is in high gear.
right_low: bool, Whether the right is in high gear.
"""
super(KFDrivetrain, self).__init__(drivetrain_params, name, left_low, right_low)
self.unaugmented_A_continuous = self.A_continuous
self.unaugmented_B_continuous = self.B_continuous
# The practical voltage applied to the wheels is
# V_left = U_left + left_voltage_error
#
# The states are
# [left position, left velocity, right position, right velocity,
# left voltage error, right voltage error, angular_error]
#
# The left and right positions are filtered encoder positions and are not
# adjusted for heading error.
# The turn velocity as computed by the left and right velocities is
# adjusted by the gyro velocity.
# The angular_error is the angular velocity error between the wheel speed
# and the gyro speed.
self.A_continuous = numpy.matrix(numpy.zeros((7, 7)))
self.B_continuous = numpy.matrix(numpy.zeros((7, 2)))
self.A_continuous[0:4,0:4] = self.unaugmented_A_continuous
self.A_continuous[0:4,4:6] = self.unaugmented_B_continuous
self.B_continuous[0:4,0:2] = self.unaugmented_B_continuous
self.A_continuous[0,6] = 1
self.A_continuous[2,6] = -1
self.A, self.B = self.ContinuousToDiscrete(
self.A_continuous, self.B_continuous, self.dt)
self.C = numpy.matrix([[1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, -0.5 / drivetrain_params.robot_radius, 0, 0.5 / drivetrain_params.robot_radius, 0, 0, 0]])
self.D = numpy.matrix([[0, 0],
[0, 0],
[0, 0]])
q_pos = 0.05
q_vel = 1.00
q_voltage = 10.0
q_encoder_uncertainty = 2.00
self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, (q_vel ** 2.0), 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, (q_pos ** 2.0), 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, (q_vel ** 2.0), 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, (q_voltage ** 2.0), 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, (q_voltage ** 2.0), 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, (q_encoder_uncertainty ** 2.0)]])
r_pos = 0.0001
r_gyro = 0.000001
self.R = numpy.matrix([[(r_pos ** 2.0), 0.0, 0.0],
[0.0, (r_pos ** 2.0), 0.0],
[0.0, 0.0, (r_gyro ** 2.0)]])
# Solving for kf gains.
self.KalmanGain, self.Q_steady = controls.kalman(
A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)
self.L = self.A * self.KalmanGain
unaug_K = self.K
# Implement a nice closed loop controller for use by the closed loop
# controller.
self.K = numpy.matrix(numpy.zeros((self.B.shape[1], self.A.shape[0])))
self.K[0:2, 0:4] = unaug_K
self.K[0, 4] = 1.0
self.K[1, 5] = 1.0
self.Qff = numpy.matrix(numpy.zeros((4, 4)))
# qff_pos = 0.005
qff_vel = 1.00
# self.Qff[0, 0] = 1.0 / qff_pos ** 2.0
self.Qff[1, 1] = 1.0 / qff_vel ** 2.0
# self.Qff[2, 2] = 1.0 / qff_pos ** 2.0
self.Qff[3, 3] = 1.0 / qff_vel ** 2.0
self.Kff = numpy.matrix(numpy.zeros((2, 7)))
self.Kff[0:2, 0:4] = controls.TwoStateFeedForwards(self.B[0:4,:], self.Qff)
self.InitializeState()
def WriteDrivetrain(drivetrain_files, kf_drivetrain_files, year_namespace,
drivetrain_params):
WriteDrivetrainFullName(drivetrain_files, kf_drivetrain_files,
[year_namespace, 'drivetrain'],
[year_namespace, 'subsystems', 'drivetrain'],
drivetrain_params)
def WriteDrivetrainFullName(drivetrain_files, kf_drivetrain_files,
namespaces, directories, drivetrain_params):
# Write the generated constants out to a file.
drivetrain_low_low = Drivetrain(name="DrivetrainLowLow",
left_low=True, right_low=True, drivetrain_params=drivetrain_params)
drivetrain_low_high = Drivetrain(name="DrivetrainLowHigh",
left_low=True, right_low=False, drivetrain_params=drivetrain_params)
drivetrain_high_low = Drivetrain(name="DrivetrainHighLow",
left_low=False, right_low=True, drivetrain_params=drivetrain_params)
drivetrain_high_high = Drivetrain(name="DrivetrainHighHigh",
left_low=False, right_low=False, drivetrain_params=drivetrain_params)
kf_drivetrain_low_low = KFDrivetrain(name="KFDrivetrainLowLow",
left_low=True, right_low=True, drivetrain_params=drivetrain_params)
kf_drivetrain_low_high = KFDrivetrain(name="KFDrivetrainLowHigh",
left_low=True, right_low=False, drivetrain_params=drivetrain_params)
kf_drivetrain_high_low = KFDrivetrain(name="KFDrivetrainHighLow",
left_low=False, right_low=True, drivetrain_params=drivetrain_params)
kf_drivetrain_high_high = | |
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/zones', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Zone', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_zone(self, server_id, zone_id, **kwargs): # noqa: E501
"""Deletes this zone, all attached metadata and rrsets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_zone(server_id, zone_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone_id: The id of the zone to retrieve (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_zone_with_http_info(server_id, zone_id, **kwargs) # noqa: E501
else:
(data) = self.delete_zone_with_http_info(server_id, zone_id, **kwargs) # noqa: E501
return data
def delete_zone_with_http_info(self, server_id, zone_id, **kwargs): # noqa: E501
"""Deletes this zone, all attached metadata and rrsets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_zone_with_http_info(server_id, zone_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone_id: The id of the zone to retrieve (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'zone_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_zone" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `delete_zone`") # noqa: E501
# verify the required parameter 'zone_id' is set
if ('zone_id' not in params or
params['zone_id'] is None):
raise ValueError("Missing the required parameter `zone_id` when calling `delete_zone`") # noqa: E501
collection_formats = {}
path_params = {}
if 'server_id' in params:
path_params['server_id'] = params['server_id'] # noqa: E501
if 'zone_id' in params:
path_params['zone_id'] = params['zone_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/zones/{zone_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_zone(self, server_id, zone_id, **kwargs): # noqa: E501
"""zone managed by a server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_zone(server_id, zone_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone_id: The id of the zone to retrieve (required)
:param bool rrsets: “true” (default) or “false”, whether to include the “rrsets” in the response Zone object.
:return: Zone
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_zone_with_http_info(server_id, zone_id, **kwargs) # noqa: E501
else:
(data) = self.list_zone_with_http_info(server_id, zone_id, **kwargs) # noqa: E501
return data
def list_zone_with_http_info(self, server_id, zone_id, **kwargs): # noqa: E501
"""zone managed by a server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_zone_with_http_info(server_id, zone_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone_id: The id of the zone to retrieve (required)
:param bool rrsets: “true” (default) or “false”, whether to include the “rrsets” in the response Zone object.
:return: Zone
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'zone_id', 'rrsets'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_zone" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `list_zone`") # noqa: E501
# verify the required parameter 'zone_id' is set
if ('zone_id' not in params or
params['zone_id'] is None):
raise ValueError("Missing the required parameter `zone_id` when calling `list_zone`") # noqa: E501
collection_formats = {}
path_params = {}
if 'server_id' in params:
path_params['server_id'] = params['server_id'] # noqa: E501
if 'zone_id' in params:
path_params['zone_id'] = params['zone_id'] # noqa: E501
query_params = []
if 'rrsets' in params:
query_params.append(('rrsets', params['rrsets'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/zones/{zone_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Zone', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_zones(self, server_id, **kwargs): # noqa: E501
"""List all Zones in a server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_zones(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone: When set to the name of a zone, only this zone is returned. If no zone with that name exists, the response is an empty array. This can e.g. be used to check if a zone exists in the database without having to guess/encode the zone's id or to check if a zone exists.
:param bool dnssec: “true” (default) or “false”, whether to include the “dnssec” and ”edited_serial” fields in the Zone objects. Setting this to ”false” will make the query a lot faster.
:return: list[Zone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_zones_with_http_info(server_id, **kwargs) # noqa: E501
else:
(data) = self.list_zones_with_http_info(server_id, **kwargs) # noqa: E501
return data
def list_zones_with_http_info(self, server_id, **kwargs): # noqa: E501
"""List all Zones in a server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_zones_with_http_info(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str zone: When set to the name of a zone, only this zone is returned. If no zone with that name exists, the response is an empty array. This can e.g. be used to check if a zone exists in the database without having to guess/encode the zone's id or to check if a zone exists.
:param bool dnssec: “true” (default) or “false”, whether to include the “dnssec” and ”edited_serial” fields in the Zone objects. Setting this to ”false” will make the query a lot faster.
:return: list[Zone]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'zone', 'dnssec'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_zones" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `list_zones`") # | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui Core.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import re
import sys
import types
from karesansui.lib.conf import read_conf, write_conf
from karesansui.lib.utils import uniq_sort
from karesansui.lib.utils import preprint_r
from karesansui.lib.collectd.utils import create_plugin_selector, plugin_selector_to_dict
from karesansui.lib.const import VENDOR_PREFIX, KARESANSUI_PREFIX, \
VENDOR_DATA_DIR, \
COUNTUP_DATABASE_PATH, COLLECTD_LOG_DIR, \
COLLECTD_DF_RRPORT_BY_DEVICE
from karesansui.lib.parser.collectdplugin import PARSER_COLLECTD_PLUGIN_DIR
MODULE = "collectd"
DictOp = None
DEFAULT_KARESANSUI_CONF = "/etc/karesansui/application.conf"
COLLECTD_PLUGIN_DIR = "%s/lib64/collectd" % VENDOR_PREFIX
if os.path.exists(COLLECTD_PLUGIN_DIR):
COLLECTD_PLUGIN_DIR = "%s/lib/collectd" % VENDOR_PREFIX
COLLECTD_SHARE_DIR = "%s/share/collectd" % VENDOR_PREFIX
KARESANSUI_PYTHON_PATH = "%s/lib/python" % KARESANSUI_PREFIX
COLLECTD_PYTHON_MODULE_DIR = "%s/karesansui/lib/collectd" % KARESANSUI_PYTHON_PATH
COLLECTD_DATA_DIR = "%s/collectd" % VENDOR_DATA_DIR
COLLECTD_PID_FILE = "/var/run/collectd.pid"
#COLLECTD_PLUGINS = ["cpu", "df", "disk", "exec", "interface", "iptables", "libvirt", "load", "logfile", "memory", "network", "python", "rrdcached", "rrdtool", "sensors", "snmp", "syslog", "tail", "uptime", "users"]
#COLLECTD_PLUGINS = ["cpu", "df", "disk", "interface", "libvirt", "load", "logfile", "memory", "python", "rrdcached", "rrdtool", "sensors", "syslog"]
COLLECTD_PLUGINS = ["cpu", "df", "disk", "interface", "libvirt", "load", "logfile", "memory", "python", "rrdtool"]
def _get_collectd_config(webobj=None, host=None):
modules = ["collectd","collectdplugin"]
dop = read_conf(modules, webobj, host)
if dop is False:
return False
return dop
DictOp = _get_collectd_config()
def get_collectd_param(param=None, section=None, dop=None, webobj=None, host=None):
global DictOp
retval = None
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
if section is None:
if dop.cdp_isset("collectd",[param]) is True:
return dop.cdp_get("collectd",[param])
else:
if not "collectdplugin" in dop.ModuleNames:
dop.addconf("collectdplugin",{})
from karesansui.lib.parser.collectdplugin import collectdpluginParser
if dop.cdp_isset("collectdplugin",[section,"Plugin",section],multiple_file=True) is False:
extra_args = {"include":"^(%s)$" % section}
new_conf_arr = collectdpluginParser().read_conf(extra_args)
for _k,_v in new_conf_arr.items():
if _k[0:1] != "@":
dop.set("collectdplugin",[_k],_v['value'])
if dop.cdp_isset("collectdplugin",[section,"Plugin",section,param],multiple_file=True) is True:
return dop.cdp_get("collectdplugin",[section,"Plugin",section,param],multiple_file=True)
def plugin_list(webobj=None, host=None, dop=None):
global DictOp
retval = []
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
try:
load_plugins = dop.query("collectd",["LoadPlugin"])
if load_plugins is False:
load_plugins = []
except:
load_plugins = []
try:
plugins = dop.getconf("collectdplugin")
for _k,_v in plugins.items():
_load_plugins = dop.query("collectdplugin",[_k,"LoadPlugin"])
if type(_load_plugins) is list or (_load_plugins is not False and len(_load_plugins) > 0):
load_plugins = load_plugins + _load_plugins
del plugins
except:
pass
retval = uniq_sort(load_plugins)
return retval
def active_plugin_list(webobj=None, host=None, dop=None):
global DictOp
retval = []
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
list = plugin_list(dop, webobj, host)
#preprint_r(list)
for plugin_name in list:
iscomment = True
if dop.isset("collectd",["LoadPlugin",plugin_name]) is True:
iscomment = dop.iscomment("collectd",["LoadPlugin",plugin_name])
if iscomment is True:
plugins = dop.getconf("collectdplugin")
for _k,_v in plugins.items():
if dop.isset("collectdplugin",[_k,"LoadPlugin",plugin_name]) is True:
iscomment = dop.iscomment("collectdplugin",[_k,"LoadPlugin",plugin_name])
break
del plugins
if iscomment is False:
retval.append(plugin_name)
return retval
def inactive_plugin_list(dop=None, webobj=None, host=None):
global DictOp
retval = []
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
list = plugin_list(dop, webobj, host)
active = active_plugin_list(dop, webobj, host)
for plugin_name in list:
if not plugin_name in active:
retval.append(plugin_name)
return retval
def is_enabled_plugin(plugin_name, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
active = active_plugin_list(dop, webobj, host)
if plugin_name in active:
retval = True
return retval
def enabled_plugin(plugin_name, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
active = active_plugin_list(dop, webobj, host)
if not plugin_name in active:
if dop.cdp_isset("collectd",["LoadPlugin",plugin_name]) is True:
retval = dop.cdp_uncomment("collectd",["LoadPlugin",plugin_name])
else:
plugins = dop.getconf("collectdplugin")
for _k,_v in plugins.items():
if dop.cdp_isset("collectdplugin",[_k,"LoadPlugin",plugin_name],multiple_file=True) is True:
retval = dop.cdp_uncomment("collectdplugin",[_k,"LoadPlugin",plugin_name],multiple_file=True)
break
del plugins
return retval
def disabled_plugin(plugin_name, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
active = active_plugin_list(dop, webobj, host)
if plugin_name in active:
if dop.cdp_isset("collectd",["LoadPlugin",plugin_name]) is True:
retval = dop.cdp_comment("collectd",["LoadPlugin",plugin_name])
else:
plugins = dop.getconf("collectdplugin")
for _k,_v in plugins.items():
if dop.cdp_isset("collectdplugin",[_k,"LoadPlugin",plugin_name],multiple_file=True) is True:
retval = dop.cdp_comment("collectdplugin",[_k,"LoadPlugin",plugin_name],multiple_file=True)
break
del plugins
return retval
def get_global_parameter(name, dop=None, webobj=None, host=None):
global DictOp
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
retval = dop.cdp_get("collectd",[name])
if retval is False:
retval = dop.get("collectd",[name])
return retval
def set_global_parameter(name, value, dop=None, webobj=None, host=None, is_cdp=True):
global DictOp
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
if is_cdp is True:
return dop.cdp_set("collectd",[name],value)
else:
return dop.set("collectd",[name],value)
def where_is_plugin(plugin_name, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
keyword = "LoadPlugin"
keyword = "Plugin"
if dop.cdp_isset("collectd",[keyword,plugin_name]) is True:
retval = "@global"
plugins = dop.getconf("collectdplugin")
for _k,_v in plugins.items():
if dop.cdp_isset("collectdplugin",[_k,keyword,plugin_name]) is True:
retval = _k
break
del plugins
return retval
def switch_python_plugin(flag=True, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
configName = "python"
# まだ読み込まれていなければ読み込む
from karesansui.lib.parser.collectdplugin import collectdpluginParser
if dop.isset("collectdplugin",[configName]) is False:
extra_args = {"include":"^(%s)$" % configName}
new_conf_arr = collectdpluginParser().read_conf(extra_args)
for _k,_v in new_conf_arr.items():
if _k[0:1] != "@":
dop.set("collectdplugin",[_k],_v['value'])
dop.cdp_set("collectdplugin",[configName,"LoadPlugin","python","Globals"],"true",multiple_file=True)
_keys = [configName,"Plugin","python"]
keys = _keys + ["ModulePath"]
value = "\"%s\"" % COLLECTD_PYTHON_MODULE_DIR
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Encoding"]
value = "utf-8"
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["LogTraces"]
value = "true"
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Interactive"]
value = "false"
dop.cdp_comment("collectdplugin",keys,multiple_file=True)
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Import"]
value = "\"notification\""
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Module","notification","CountupDBPath"]
value = "\"%s\"" % COUNTUP_DATABASE_PATH
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Module","notification","LogFile"]
value = "\"%s/notification.log\"" % COLLECTD_LOG_DIR
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Module","notification","LogLevel"]
value = "7"
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
keys = _keys + ["Module","notification","Environ"]
envs = []
try:
envs.append("LANG=%s" % os.environ["LANG"])
except:
pass
try:
envs.append("KARESANSUI_CONF=%s" % os.environ["KARESANSUI_CONF"])
except:
envs.append("KARESANSUI_CONF=%s" % DEFAULT_KARESANSUI_CONF)
pass
value = "\"" + "\" \"".join(envs) + "\""
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
if flag is True:
dop.cdp_uncomment("collectdplugin",[configName,"LoadPlugin","python"],recursive=True,multiple_file=True)
dop.cdp_uncomment("collectdplugin",_keys,recursive=True,multiple_file=True)
else:
dop.cdp_comment("collectdplugin",[configName,"LoadPlugin","python"],recursive=True,multiple_file=True)
dop.cdp_comment("collectdplugin",_keys,recursive=True,multiple_file=True)
def enable_python_plugin(dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
switch_python_plugin(flag=True, dop=dop, webobj=webobj, host=host)
def disable_python_plugin(dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
switch_python_plugin(flag=False, dop=dop, webobj=webobj, host=host)
def switch_syslog_plugin(flag=True, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
configName = "syslog"
dop.cdp_set("collectdplugin",[configName,"LoadPlugin","syslog"],"syslog",multiple_file=True,is_opt_multi=True)
_keys = [configName,"Plugin","syslog"]
keys = _keys + ["LogLevel"]
value = "\"info\"" # debug|info|notice|warning|err
dop.cdp_set("collectdplugin",keys,value,multiple_file=True)
if flag is True:
dop.cdp_uncomment("collectdplugin",[configName,"LoadPlugin","syslog"],recursive=True,multiple_file=True)
dop.cdp_uncomment("collectdplugin",_keys,recursive=True,multiple_file=True)
else:
dop.cdp_comment("collectdplugin",[configName,"LoadPlugin","syslog"],recursive=True,multiple_file=True)
dop.cdp_comment("collectdplugin",_keys,recursive=True,multiple_file=True)
def enable_syslog_plugin(dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
switch_syslog_plugin(flag=True, dop=dop, webobj=webobj, host=host)
def disable_syslog_plugin(dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = DictOp
else:
dop = _get_collectd_config(webobj, host)
switch_syslog_plugin(flag=False, dop=dop, webobj=webobj, host=host)
def switch_logfile_plugin(flag=True, dop=None, webobj=None, host=None):
global DictOp
retval = False
if dop is None:
if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp":
dop = | |
<gh_stars>0
from __future__ import print_function
import time
import numpy as np
import tqdm
import global_vars as Global
from datasets import MirroredDataset
from utils.iterative_trainer import IterativeTrainerConfig
from utils.logger import Logger
from termcolor import colored
from torch.utils.data.dataloader import DataLoader
import torch
import os
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
import torch.nn as nn
from sklearn.metrics import roc_auc_score, auc, precision_recall_curve
from methods import AbstractMethodInterface
class Energy(AbstractMethodInterface):
def __init__(self, args):
super(Energy, self).__init__()
self.base_model = None
self.H_class = None
self.args = args
self.class_count = 0
self.default_model = 0
self.add_identifier = ""
self.known_loader = None
self.unknown_loader = None
self.train_loader = None
self.train_dataset_name = ""
self.valid_dataset_name = ""
self.test_dataset_name = ""
self.train_dataset_length = 0
self.seed = 1
self.model_name = ""
self.workspace_dir = "workspace/energy"
def propose_H(self, dataset, mirror=True):
config = self.get_H_config(dataset, mirror)
from models import get_ref_model_path
h_path = get_ref_model_path(self.args, config.model.__class__.__name__, dataset.name)
self.best_h_path = os.path.join(h_path, 'model.best.pth')
# trainer = IterativeTrainer(config, self.args)
if not os.path.isfile(self.best_h_path):
raise NotImplementedError("Please use model_setup to pretrain the networks first!")
else:
print(colored('Loading H1 model from %s' % self.best_h_path, 'red'))
config.model.load_state_dict(torch.load(self.best_h_path))
self.base_model = config.model
self.base_model.eval()
self.class_count = self.base_model.output_size()[1].item()
self.add_identifier = self.base_model.__class__.__name__
self.train_dataset_name = dataset.name
self.model_name = "VGG" if self.add_identifier.find("VGG") >= 0 else "Resnet"
if hasattr(self.base_model, 'preferred_name'):
self.add_identifier = self.base_model.preferred_name()
def method_identifier(self):
output = "Energy"
# if len(self.add_identifier) > 0:
# output = output + "/" + self.add_identifier
return output
def get_H_config(self, dataset, mirror):
if self.args.D1 in Global.mirror_augment and mirror:
print(colored("Mirror augmenting %s" % self.args.D1, 'green'))
new_train_ds = dataset + MirroredDataset(dataset)
dataset = new_train_ds
self.train_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=self.args.workers,
pin_memory=True, shuffle=True)
self.train_dataset_length = len(dataset)
self.input_shape = iter(dataset).__next__()[0].shape
# Set up the model
model = Global.get_ref_classifier(self.args.D1)[self.default_model]().to(self.args.device)
# model.forward()
# Set up the config
config = IterativeTrainerConfig()
base_model_name = self.base_model.__class__.__name__
if hasattr(self.base_model, 'preferred_name'):
base_model_name = self.base_model.preferred_name()
config.name = '_%s[%s](%s->%s)' % (self.__class__.__name__, base_model_name, self.args.D1, self.args.D2)
config.train_loader = self.train_loader
config.visualize = not self.args.no_visualize
config.model = model
config.logger = Logger()
return config
def train_H(self, dataset):
self.known_loader = DataLoader(dataset.datasets[0], batch_size=self.args.batch_size, shuffle=True,
num_workers=self.args.workers,
pin_memory=True)
self.unknown_loader = DataLoader(dataset.datasets[1], batch_size=self.args.batch_size, shuffle=False,
num_workers=self.args.workers,
pin_memory=True)
self.valid_dataset_name = dataset.datasets[1].name
self.valid_dataset_length = len(dataset.datasets[0])
best_acc = 0
epochs = 10
for m_in in [-23]:
for m_out in [-5]:
self._fine_tune_model(epochs=epochs, m_in=m_in, m_out=m_out)
acc = self._find_threshold()
self.base_model.load_state_dict(torch.load(self.best_h_path))
if acc > best_acc:
best_acc = acc
self.m_in = m_in
self.m_out = m_out
model_path = os.path.join(os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(self.m_in) + '_mout' + str(self.m_out) +
'_epoch_' + str(epochs - 1) + '.pt'))
if os.path.exists(model_path):
self.base_model.load_state_dict(torch.load(model_path))
return
def test_H(self, dataset):
self.base_model.eval()
with tqdm.tqdm(total=len(dataset)) as pbar:
with torch.no_grad():
for t in [1]:
correct = 0.0
all_probs = np.array([])
labels = np.array([])
dataset_iter = DataLoader(dataset, batch_size=self.args.batch_size, shuffle=False,
num_workers=self.args.workers, pin_memory=True)
self._generate_execution_times(dataset_iter)
return 0, 0, 0
counter = 0
for i, (image, label) in enumerate(dataset_iter):
pbar.update()
counter += 1
# Get and prepare data.
input, target = image.to(self.args.device), label.to(self.args.device)
logits = self.base_model(input, softmax=False)
scores = self._get_energy_score(logits, temperature=t)
classification = np.where(scores > self.threshold, 1, 0)
correct += (classification == label.numpy()).sum()
if all_probs.size:
labels = np.concatenate((labels, label))
all_probs = np.concatenate((all_probs, scores))
else:
labels = label
all_probs = scores
auroc = roc_auc_score(labels, all_probs)
p, r, _ = precision_recall_curve(labels, all_probs)
aupr = auc(r, p)
print("Final Test average accuracy %s" % (
colored('%.4f%%' % (correct / labels.shape[0] * 100), 'red')))
print(f"Auroc: {auroc} aupr: {aupr}")
print(counter)
return correct / labels.shape[0], auroc, aupr
def _cosine_annealing(self, step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
def _fine_tune_model(self, epochs, m_in, m_out):
model_path = os.path.join(os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(m_in) + '_mout' + str(
m_out) + '_epoch_' + str(epochs - 1) + '.pt'))
if os.path.exists(model_path):
self.base_model.load_state_dict(torch.load(model_path))
return
if not os.path.exists(self.workspace_dir):
os.makedirs(self.workspace_dir)
if not os.path.isdir(self.workspace_dir):
raise Exception('%s is not a dir' % self.workspace_dir)
torch.manual_seed(self.seed)
np.random.seed(self.seed)
with open(os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(m_in) + '_mout' + str(
m_out) +
'_training_results.csv'), 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
print('Beginning Training\n')
self.optimizer = torch.optim.SGD(
self.base_model.parameters(), 0.001, momentum=0.9,
weight_decay=0.0005, nesterov=True)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lambda step: self._cosine_annealing(step,
10 * self.valid_dataset_length,
1,
1e-6 / 0.001))
# Main loop
for epoch in range(0, epochs):
self.epoch = epoch
begin_epoch = time.time()
self._train_epoch(m_in=m_in, m_out=m_out)
self._eval_model()
# Save model
torch.save(self.base_model.state_dict(),
os.path.join(os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(m_in) + '_mout' + str(
m_out) +
'_epoch_' + str(epoch) + '.pt')))
# Let us not waste space and delete the previous model
prev_path = os.path.join(os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(m_in) + '_mout' + str(
m_out) +
'_epoch_' + str(epoch - 1) + '.pt'))
if os.path.exists(prev_path): os.remove(prev_path)
# Show results
with open(
os.path.join(self.workspace_dir,
self.train_dataset_name + '_' + self.valid_dataset_name + '_' + self.model_name + '_s' + str(
self.seed) + '_min' + str(m_in) + '_mout' + str(
m_out) +
'_training_results.csv'), 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_epoch,
self._train_loss,
self._test_loss,
100 - 100. * self._test_accuracy,
))
# # print state with rounded decimals
# print({k: round(v, 4) if isinstance(v, float) else v for k, v in state.items()})
print('Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} | Test Error {4:.2f}'.format(
(epoch + 1),
int(time.time() - begin_epoch),
self._train_loss,
self._test_loss,
100 - 100. * self._test_accuracy,
))
def _train_epoch(self, m_in, m_out):
self.base_model.train() # enter train mode
loss_avg = 0.0
# start at a random point of the outlier dataset; this induces more randomness without obliterating locality
self.unknown_loader.dataset.offset = np.random.randint(self.valid_dataset_length)
for in_set, out_set in zip(self.train_loader, self.unknown_loader):
data = torch.cat((in_set[0], out_set[0]), 0)
target = in_set[1]
data, target = data.cuda(), target.cuda()
# forward
x = self.base_model(data, softmax=False)
# backward
self.scheduler.step()
self.optimizer.zero_grad()
loss = F.cross_entropy(x[:len(in_set[0])], target)
# cross-entropy from softmax distribution to uniform distribution
Ec_out = -torch.logsumexp(x[len(in_set[0]):], dim=1)
Ec_in = -torch.logsumexp(x[:len(in_set[0])], dim=1)
loss += 0.1 * (torch.pow(F.relu(Ec_in - m_in), 2).mean() + torch.pow(F.relu(m_out - Ec_out),
2).mean())
loss.backward()
self.optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
self._train_loss = loss_avg
def _eval_model(self):
self.base_model.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for data, target in self.train_loader:
data, target = data.cuda(), target.cuda()
# forward
output = self.base_model(data, softmax=False)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
# print(f"data {data.shape} output: {output.shape} pred: {pred.shape} targetL {target.shape} f {target.data} f {pred.eq(target.data).sum()}")
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
self._test_loss = loss_avg / self.train_dataset_length
print(f"correct {correct} len: {self.train_dataset_length}")
self._test_accuracy = correct / self.train_dataset_length
def _find_threshold(self):
scores_known = np.array([])
scores_unknown = np.array([])
with torch.no_grad():
for i, (image, label) in enumerate(self.known_loader):
# Get and prepare data.
input, target = image.to(self.args.device), label.to(self.args.device)
logits = self.base_model(input, softmax=False)
scores = self._get_energy_score(logits, temperature=1)
if scores_known.size:
scores_known = np.concatenate((scores_known, scores))
else:
scores_known = scores
for i, (image, label) in enumerate(self.unknown_loader):
# Get and prepare data.
input, target = image.to(self.args.device), label.to(self.args.device)
logits = self.base_model(input, softmax=False)
scores = self._get_energy_score(logits, temperature=1)
if scores_unknown.size:
scores_unknown = np.concatenate((scores_unknown, scores))
else:
scores_unknown = scores
min = np.max([scores_unknown.min(), scores_known.min()])
max = np.min([scores_unknown.max(), scores_known.max()])
cut_threshold = np.quantile(scores_known, .95)
cut_correct_count = (scores_unknown > cut_threshold).sum()
cut_correct_count += (scores_known <= cut_threshold).sum()
best_correct_count = 0
best_threshold = 0
for i in np.linspace(min, max, num=1000):
correct_count = 0
correct_count += (scores_unknown > i).sum()
correct_count += (scores_known <= i).sum()
if best_correct_count < correct_count:
best_correct_count = correct_count
best_threshold = i
if best_threshold > cut_threshold:
best_correct_count = cut_correct_count
best_threshold = cut_threshold
self.threshold = best_threshold
acc = best_correct_count / (scores_known.shape[0] * 2)
print(f"Best th: {best_threshold} acc: {acc}")
return acc
def _get_energy_score(self, logits, temperature=1):
scores = -(temperature * torch.logsumexp(logits.data.cpu() / temperature, dim=1).numpy())
return scores
def _generate_execution_times(self, loader):
import time
import numpy as np
n_times = 1000
exec_times = np.ones(n_times)
trainiter = iter(loader)
x = trainiter.__next__()[0][0].unsqueeze(0).to(self.args.device)
with torch.no_grad():
for i in range(n_times):
start_time = time.time()
logits = self.base_model(x, softmax=False)
scores = self._get_energy_score(logits, temperature=1)
_ = np.where(scores > self.threshold, 1, 0)
exec_times[i] = time.time() - start_time
exec_times = exec_times.mean()
np.savez("results/article_plots/execution_times/" + self.method_identifier() + "_" + | |
financial model storing input financial parameters
:return: float, LCOE in US dollars per kWh
"""
years = financials.analysis_years # length of financial life
if financials.third_party_ownership:
discount_pct = financials.owner_discount_pct
federal_tax_pct = financials.owner_tax_pct
else:
discount_pct = financials.offtaker_discount_pct
federal_tax_pct = financials.offtaker_tax_pct
new_kw = (tech_results_dict.get('size_kw') or 0) - (tech_inputs_dict.get('existing_kw') or 0) # new capacity
if new_kw == 0:
return None
capital_costs = new_kw * tech_inputs_dict['installed_cost_us_dollars_per_kw'] # pre-incentive capital costs
annual_om = new_kw * tech_inputs_dict['om_cost_us_dollars_per_kw'] # NPV of O&M charges escalated over financial life
om_series = [annual_om * (1+financials.om_cost_escalation_pct)**yr for yr in range(1, years+1)]
npv_om = sum([om * (1.0/(1.0+discount_pct))**yr for yr, om in enumerate(om_series,1)])
#Incentives as calculated in the spreadsheet, note utility incentives are applied before state incentives
utility_ibi = min(capital_costs * tech_inputs_dict['utility_ibi_pct'], tech_inputs_dict['utility_ibi_max_us_dollars'])
utility_cbi = min(new_kw * tech_inputs_dict['utility_rebate_us_dollars_per_kw'], tech_inputs_dict['utility_rebate_max_us_dollars'])
state_ibi = min((capital_costs - utility_ibi - utility_cbi) * tech_inputs_dict['state_ibi_pct'], tech_inputs_dict['state_ibi_max_us_dollars'])
state_cbi = min(new_kw * tech_inputs_dict['state_rebate_us_dollars_per_kw'], tech_inputs_dict['state_rebate_max_us_dollars'])
federal_cbi = new_kw * tech_inputs_dict['federal_rebate_us_dollars_per_kw']
ibi = utility_ibi + state_ibi #total investment-based incentives
cbi = utility_cbi + federal_cbi + state_cbi #total capacity-based incentives
#calculate energy in the BAU case, used twice later on
if 'year_one_energy_produced_bau_kwh' in tech_results_dict.keys():
existing_energy_bau = tech_results_dict['year_one_energy_produced_bau_kwh'] or 0
else:
existing_energy_bau = 0
#calculate the value of the production-based incentive stream
npv_pbi = 0
if tech_inputs_dict['pbi_max_us_dollars'] > 0:
for yr in range(years):
if yr < tech_inputs_dict['pbi_years']:
degredation_pct = (1- (tech_inputs_dict.get('degradation_pct') or 0))**yr
base_pbi = min(tech_inputs_dict['pbi_us_dollars_per_kwh'] * \
((tech_results_dict['year_one_energy_produced_kwh'] or 0) - existing_energy_bau) * \
degredation_pct, tech_inputs_dict['pbi_max_us_dollars'] * degredation_pct )
base_pbi = base_pbi * (1.0/(1.0+discount_pct))**(yr+1)
npv_pbi += base_pbi
npv_federal_itc = 0
depreciation_schedule = np.array([0.0 for _ in range(years)])
if tech_inputs_dict['macrs_option_years'] in [5,7]:
if tech_inputs_dict['macrs_option_years'] == 5:
schedule = macrs_five_year
if tech_inputs_dict['macrs_option_years'] == 7:
schedule = macrs_seven_year
federal_itc_basis = capital_costs - state_ibi - utility_ibi - state_cbi - utility_cbi - federal_cbi
federal_itc_amount = tech_inputs_dict['federal_itc_pct'] * federal_itc_basis
npv_federal_itc = federal_itc_amount * (1.0/(1.0+discount_pct))
macrs_bonus_basis = federal_itc_basis - (federal_itc_basis * tech_inputs_dict['federal_itc_pct'] * tech_inputs_dict['macrs_itc_reduction'])
macrs_basis = macrs_bonus_basis * (1 - tech_inputs_dict['macrs_bonus_pct'])
for i,r in enumerate(schedule):
if i < len(depreciation_schedule):
depreciation_schedule[i] = macrs_basis * r
depreciation_schedule[0] += (tech_inputs_dict['macrs_bonus_pct'] * macrs_bonus_basis)
tax_deductions = (np.array(om_series) + np.array(depreciation_schedule)) * federal_tax_pct
npv_tax_deductions = sum([i* (1.0/(1.0+discount_pct))**yr for yr,i in enumerate(tax_deductions,1)])
#we only care about the energy produced by new capacity in LCOE calcs
annual_energy = (tech_results_dict['year_one_energy_produced_kwh'] or 0) - existing_energy_bau
npv_annual_energy = sum([annual_energy * ((1.0/(1.0+discount_pct))**yr) * \
(1- (tech_inputs_dict.get('degradation_pct') or 0))**(yr-1) for yr, i in enumerate(tax_deductions,1)])
#LCOE is calculated as annualized costs divided by annualized energy
lcoe = (capital_costs + npv_om - npv_pbi - cbi - ibi - npv_federal_itc - npv_tax_deductions ) / \
(npv_annual_energy)
return round(lcoe,4)
def get_output(self):
self.get_nested()
output_dict = self.nested_outputs
return output_dict
@staticmethod
def setup_nested():
"""
Set up up empty nested dict for outputs.
:return: nested dict for outputs with values set to None. Results are filled in using "get_nested" method
"""
nested_outputs = dict()
nested_outputs["Scenario"] = dict()
nested_outputs["Scenario"]["Profile"] = dict()
nested_outputs["Scenario"]["Site"] = dict()
# Loop through all sub-site dicts and init
for name, d in nested_output_definitions["outputs"]["Scenario"]["Site"].items():
nested_outputs["Scenario"]["Site"][name] = dict()
for k in d.keys():
nested_outputs["Scenario"]["Site"][name].setdefault(k, None)
return nested_outputs
def get_nested(self):
"""
Translates the "flat" results_dict (which is just the JSON output from REopt mosel code)
into the nested output dict.
:return: None (modifies self.nested_outputs)
"""
# TODO: move the filling in of outputs to reopt.jl
self.nested_outputs["Scenario"]["status"] = self.results_dict["status"]
self.nested_outputs["Scenario"]["lower_bound"] = self.results_dict.get("lower_bound")
self.nested_outputs["Scenario"]["optimality_gap"] = self.results_dict.get("optimality_gap")
financials = FinancialModel.objects.filter(run_uuid=meta['run_uuid']).first() #getting financial inputs for wind and pv lcoe calculations
for name, d in nested_output_definitions["outputs"]["Scenario"]["Site"].items():
if name == "LoadProfile":
self.nested_outputs["Scenario"]["Site"][name]["year_one_electric_load_series_kw"] = self.dm["LoadProfile"].get("year_one_electric_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["critical_load_series_kw"] = self.dm["LoadProfile"].get("critical_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_kwh"] = self.dm["LoadProfile"].get("annual_kwh")
self.nested_outputs["Scenario"]["Site"][name]["resilience_check_flag"] = self.dm["LoadProfile"].get("resilience_check_flag")
self.nested_outputs["Scenario"]["Site"][name]["sustain_hours"] = int(self.dm["LoadProfile"].get("bau_sustained_time_steps") / (len(self.dm["LoadProfile"].get("year_one_electric_load_series_kw"))/8760))
self.nested_outputs["Scenario"]["Site"][name]["bau_sustained_time_steps"] = self.dm["LoadProfile"].get("bau_sustained_time_steps")
self.nested_outputs["Scenario"]["Site"][name]['loads_kw'] = self.dm["LoadProfile"].get("year_one_electric_load_series_kw")
elif name == "LoadProfileBoilerFuel":
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_boiler_fuel_load_mmbtu_bau"] = \
self.dm["LoadProfile"].get("annual_heating_mmbtu")
self.nested_outputs["Scenario"]["Site"][name]["year_one_boiler_fuel_load_series_mmbtu_per_hr"] = \
self.dm["LoadProfile"].get("year_one_boiler_fuel_load_series_mmbtu_per_hr")
self.nested_outputs["Scenario"]["Site"][name]["year_one_boiler_thermal_load_series_mmbtu_per_hr"] = \
[x * self.dm.get("boiler_efficiency", 0) \
for x in self.dm["LoadProfile"].get("year_one_boiler_fuel_load_series_mmbtu_per_hr")]
elif name == "LoadProfileChillerThermal":
self.nested_outputs["Scenario"]["Site"][name]["annual_calculated_kwh_bau"] = \
self.dm["LoadProfile"].get("annual_cooling_kwh")
self.nested_outputs["Scenario"]["Site"][name]["year_one_chiller_electric_load_series_kw"] = \
self.dm["LoadProfile"].get("year_one_chiller_electric_load_series_kw")
self.nested_outputs["Scenario"]["Site"][name]["year_one_chiller_thermal_load_series_ton"] = \
[x * self.dm.get("elecchl_cop", 0) / TONHOUR_TO_KWHT \
for x in self.dm["LoadProfile"].get("year_one_chiller_electric_load_series_kw")]
elif name == "Financial":
self.nested_outputs["Scenario"]["Site"][name]["lcc_us_dollars"] = self.results_dict.get("lcc")
self.nested_outputs["Scenario"]["Site"][name]["lcc_bau_us_dollars"] = self.results_dict.get(
"lcc_bau")
self.nested_outputs["Scenario"]["Site"][name]["npv_us_dollars"] = self.results_dict.get("npv")
self.nested_outputs["Scenario"]["Site"][name][
"net_capital_costs_plus_om_us_dollars"] = self.results_dict.get("net_capital_costs_plus_om")
self.nested_outputs["Scenario"]["Site"][name]["net_om_us_dollars_bau"] = self.results_dict.get(
"net_capital_costs_plus_om_bau")
self.nested_outputs["Scenario"]["Site"][name]["net_capital_costs"] = self.results_dict.get(
"net_capital_costs")
self.nested_outputs["Scenario"]["Site"][name]["microgrid_upgrade_cost_us_dollars"] = \
self.results_dict.get("net_capital_costs") * financials.microgrid_upgrade_cost_pct
self.nested_outputs["Scenario"]["Site"][name]["total_om_costs_us_dollars"] = self.results_dict.get(
"total_om_costs_after_tax")
self.nested_outputs["Scenario"]["Site"][name]["year_one_om_costs_us_dollars"] = self.results_dict.get(
"year_one_om_costs_after_tax")
self.nested_outputs["Scenario"]["Site"][name]["year_one_om_costs_before_tax_us_dollars"] = \
self.results_dict.get("year_one_om_costs_before_tax")
elif name == "PV":
pv_models = list(PVModel.objects.filter(run_uuid=meta['run_uuid']).order_by('pv_number'))
template_pv = copy.deepcopy(self.nested_outputs['Scenario']["Site"][name])
self.nested_outputs['Scenario']["Site"][name] = []
for i, pv_model in enumerate(pv_models):
i += 1
pv = copy.deepcopy(template_pv)
pv["pv_number"] = i
pv["size_kw"] = self.results_dict.get("PV{}_kw".format(i)) or 0
pv["average_yearly_energy_produced_kwh"] = self.results_dict.get("average_yearly_energy_produced_PV{}".format(i))
pv["average_yearly_energy_produced_bau_kwh"] = self.results_dict.get("average_yearly_energy_produced_PV{}_bau".format(i))
pv["average_yearly_energy_exported_kwh"] = self.results_dict.get("average_annual_energy_exported_PV{}".format(i))
pv["year_one_energy_produced_kwh"] = self.results_dict.get("year_one_energy_produced_PV{}".format(i))
pv["year_one_energy_produced_bau_kwh"] = self.results_dict.get("year_one_PV{}_energy_produced_bau".format(i))
pv["year_one_to_battery_series_kw"] = self.results_dict.get("PV{}toBatt".format(i))
pv["year_one_to_load_series_kw"] = self.results_dict.get("PV{}toLoad".format(i))
pv["year_one_to_grid_series_kw"] = self.results_dict.get("PV{}toGrid".format(i))
pv['year_one_curtailed_production_series_kw'] = self.results_dict.get("PV{}toCurtail".format(i))
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_grid_series_kw")
if not pv.get("year_one_to_battery_series_kw") is None:
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_battery_series_kw")
else:
pv["year_one_power_production_series_kw"] = \
list(np.array(pv["year_one_power_production_series_kw"]) +
np.array(pv.get("year_one_to_battery_series_kw")))
if not pv.get("year_one_to_load_series_kw") is None:
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = pv.get("year_one_to_load_series_kw")
else:
pv["year_one_power_production_series_kw"] = \
list(np.array(pv["year_one_power_production_series_kw"]) +
np.array(pv.get("year_one_to_load_series_kw")))
if pv["year_one_power_production_series_kw"] is None:
pv["year_one_power_production_series_kw"] = []
pv["existing_pv_om_cost_us_dollars"] = self.results_dict.get("PV{}_net_fixed_om_costs_bau".format(i))
pv["station_latitude"] = pv_model.station_latitude
pv["station_longitude"] = pv_model.station_longitude
pv["station_distance_km"] = pv_model.station_distance_km
pv['lcoe_us_dollars_per_kwh'] = self.calculate_lcoe(pv, pv_model.__dict__, financials)
self.nested_outputs['Scenario']["Site"][name].append(pv)
elif name == "Wind":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("wind_kw", 0)
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_produced_kwh"] = self.results_dict.get("average_wind_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_exported_kwh"] = self.results_dict.get(
"average_annual_energy_exported_wind")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_produced_kwh"] = self.results_dict.get("year_one_wind_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get("WINDtoBatt")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("WINDtoLoad")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("WINDtoGrid")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_curtailed_production_series_kw"] = self.results_dict.get("WINDtoCurtail")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_power_production_series_kw"] = self.compute_total_power(name)
if self.nested_outputs["Scenario"]["Site"][name]["size_kw"] > 0: #setting up
wind_model = WindModel.objects.get(run_uuid=meta['run_uuid'])
self.nested_outputs["Scenario"]["Site"][name]['lcoe_us_dollars_per_kwh'] = \
self.calculate_lcoe(self.nested_outputs["Scenario"]["Site"][name], wind_model.__dict__, financials)
data['inputs']['Scenario']["Site"]["Wind"]["installed_cost_us_dollars_per_kw"] = \
wind_model.installed_cost_us_dollars_per_kw
data['inputs']['Scenario']["Site"]["Wind"]["federal_itc_pct"] = wind_model.federal_itc_pct
else:
self.nested_outputs["Scenario"]["Site"][name]['lcoe_us_dollars_per_kwh'] = None
elif name == "Storage":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("batt_kw", 0)
self.nested_outputs["Scenario"]["Site"][name]["size_kwh"] = self.results_dict.get("batt_kwh", 0)
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("ElecFromBatt")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("ElecFromBattExport")
self.nested_outputs["Scenario"]["Site"][name]["year_one_soc_series_pct"] = \
self.results_dict.get("year_one_soc_series_pct")
elif name == "ElectricTariff":
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_us_dollars"] = self.results_dict.get("year_one_energy_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_us_dollars"] = self.results_dict.get("year_one_demand_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_cost_us_dollars"] = self.results_dict.get("year_one_fixed_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_min_charge_adder_us_dollars"] = self.results_dict.get("year_one_min_charge_adder")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_coincident_peak_cost_us_dollars"] = self.results_dict.get("year_one_coincident_peak_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_bau_us_dollars"] = self.results_dict.get("year_one_energy_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_bau_us_dollars"] = self.results_dict.get("year_one_demand_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_cost_bau_us_dollars"] = self.results_dict.get("year_one_fixed_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_min_charge_adder_bau_us_dollars"] = self.results_dict.get(
"year_one_min_charge_adder_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_coincident_peak_cost_bau_us_dollars"] = self.results_dict.get("year_one_coincident_peak_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_energy_cost_us_dollars"] = self.results_dict.get("total_energy_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_demand_cost_us_dollars"] = self.results_dict.get("total_demand_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_cost_us_dollars"] = self.results_dict.get("total_fixed_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_min_charge_adder_us_dollars"] = self.results_dict.get("total_min_charge_adder")
self.nested_outputs["Scenario"]["Site"][name][
"total_coincident_peak_cost_us_dollars"] = self.results_dict.get("total_coincident_peak_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_energy_cost_bau_us_dollars"] = self.results_dict.get("total_energy_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_demand_cost_bau_us_dollars"] = self.results_dict.get("total_demand_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_cost_bau_us_dollars"] = self.results_dict.get("total_fixed_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_min_charge_adder_bau_us_dollars"] = self.results_dict.get("total_min_charge_adder_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_coincident_peak_cost_bau_us_dollars"] = self.results_dict.get("total_coincident_peak_cost_bau")
self.nested_outputs["Scenario"]["Site"][name]["year_one_bill_us_dollars"] = self.results_dict.get(
"year_one_bill")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_bill_bau_us_dollars"] = self.results_dict.get("year_one_bill_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_export_benefit_us_dollars"] = self.results_dict.get("year_one_export_benefit")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_export_benefit_bau_us_dollars"] = self.results_dict.get("year_one_export_benefit_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_export_benefit_us_dollars"] = self.results_dict.get("total_export_benefit")
self.nested_outputs["Scenario"]["Site"][name][
"total_export_benefit_bau_us_dollars"] = self.results_dict.get("total_export_benefit_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_cost_series_us_dollars_per_kwh"] = \
self.dm.get('year_one_energy_cost_series_us_dollars_per_kwh')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_demand_cost_series_us_dollars_per_kw"] = \
self.dm.get('year_one_demand_cost_series_us_dollars_per_kw')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get('GridToLoad')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_bau_kw"] = self.results_dict.get('GridToLoad_bau')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get('GridToBatt')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_supplied_kwh"] = self.results_dict.get("year_one_utility_kwh")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_supplied_kwh_bau"] = self.results_dict.get("year_one_utility_kwh_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_chp_standby_cost_us_dollars"] = self.results_dict.get("year_one_chp_standby_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_chp_standby_cost_us_dollars"] = self.results_dict.get("total_chp_standby_cost")
elif name == "FuelTariff":
self.nested_outputs["Scenario"]["Site"][name][
"total_boiler_fuel_cost_us_dollars"] = self.results_dict.get("total_boiler_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"total_boiler_fuel_cost_bau_us_dollars"] = self.results_dict.get("total_boiler_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_cost_us_dollars"] = self.results_dict.get("year_one_boiler_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_cost_bau_us_dollars"] = self.results_dict.get("year_one_boiler_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_chp_fuel_cost_us_dollars"] = self.results_dict.get("total_chp_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_chp_fuel_cost_us_dollars"] = self.results_dict.get("year_one_chp_fuel_cost")
elif name == "Generator":
self.nested_outputs["Scenario"]["Site"][name]["size_kw"] = self.results_dict.get("generator_kw", 0)
self.nested_outputs["Scenario"]["Site"][name]["fuel_used_gal"] = self.results_dict.get(
"fuel_used_kwh") / GAL_DIESEL_TO_KWH
self.nested_outputs["Scenario"]["Site"][name]["fuel_used_gal_bau"] = self.results_dict.get(
"fuel_used_kwh_bau") / GAL_DIESEL_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get('GENERATORtoLoad')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get('GENERATORtoBatt')
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get('GENERATORtoGrid')
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_produced_kwh"] = self.results_dict.get(
"average_yearly_gen_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"average_yearly_energy_exported_kwh"] = self.results_dict.get(
"average_annual_energy_exported_gen")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_energy_produced_kwh"] = self.results_dict.get(
"year_one_gen_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_power_production_series_kw"] = self.compute_total_power(name)
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_fixed_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_fixed_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_fixed_om_cost_us_dollars"] = self.results_dict.get("gen_net_fixed_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fixed_om_cost_us_dollars"] = self.results_dict.get("gen_year_one_fixed_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_variable_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_year_one_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_variable_om_costs_bau")
self.nested_outputs["Scenario"]["Site"][name][
"total_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_net_variable_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_variable_om_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_variable_om_costs")
self.nested_outputs["Scenario"]["Site"][name][
"total_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_total_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_fuel_cost")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_total_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_total_fuel_cost_bau")
self.nested_outputs["Scenario"]["Site"][name][
"existing_gen_year_one_fuel_cost_us_dollars"] = self.results_dict.get(
"gen_year_one_fuel_cost_bau")
elif name == "CHP":
self.nested_outputs["Scenario"]["Site"][name][
"size_kw"] = self.results_dict.get("chp_kw")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_fuel_used_mmbtu"] = self.results_dict.get("year_one_chp_fuel_used") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_energy_produced_kwh"] = self.results_dict.get("year_one_chp_electric_energy_produced")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_energy_produced_mmbtu"] = self.results_dict.get("year_one_chp_thermal_energy_produced") / MMBTU_TO_KWH
self.nested_outputs["Scenario"]["Site"][name][
"year_one_electric_production_series_kw"] = self.results_dict.get("chp_electric_production_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_battery_series_kw"] = self.results_dict.get("chp_to_battery_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_load_series_kw"] = self.results_dict.get("chp_electric_to_load_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_to_grid_series_kw"] = self.results_dict.get("chp_to_grid_series")
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_load_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_tes_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_waste_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("chp_thermal_to_waste_series")]
elif name == "Boiler":
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_consumption_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("fuel_to_boiler_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_thermal_production_series_mmbtu_per_hr"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_production_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_load_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_to_load_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_thermal_to_tes_series_mmbtu_per_hour"] = [x / MMBTU_TO_KWH for x in self.results_dict.get("boiler_thermal_to_tes_series")]
self.nested_outputs["Scenario"]["Site"][name][
"year_one_boiler_fuel_consumption_mmbtu"] = self.results_dict.get("year_one_fuel_to_boiler_kwh") / | |
import os
import scipy as sp
import gzip
import h5py
import sys
from ldpred import sum_stats_parsers
from ldpred import reporting
from ldpred import util
from ldpred import plinkfiles
from plinkio import plinkfile
import time
def _verify_coord_data_(data_dict):
"""
Verify that merged data is ok
"""
num_snps = len(data_dict['raw_snps_ref'])
assert num_snps ==len(data_dict['snp_stds_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_means_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['freqs_ref']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['ps']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['positions']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['nts']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['sids']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['betas']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['log_odds']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['ns']), 'Inconsistencies in coordinated data sizes'
if 'raw_snps_val' in data_dict:
assert num_snps ==len(data_dict['raw_snps_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_stds_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['snp_means_val']), 'Inconsistencies in coordinated data sizes'
assert num_snps ==len(data_dict['freqs_val']), 'Inconsistencies in coordinated data sizes'
def write_coord_data(cord_data_g, coord_dict, debug=True):
_verify_coord_data_(coord_dict)
if debug:
print('Storing coordinated data to HDF5 file.')
ofg = cord_data_g.create_group(coord_dict['chrom'])
ofg.create_dataset('raw_snps_ref', data=coord_dict['raw_snps_ref'], compression='lzf')
ofg.create_dataset('snp_stds_ref', data=coord_dict['snp_stds_ref'])
ofg.create_dataset('snp_means_ref', data=coord_dict['snp_means_ref'])
ofg.create_dataset('freqs_ref', data=coord_dict['freqs_ref'])
if 'raw_snps_val' in coord_dict:
ofg.create_dataset('raw_snps_val', data=coord_dict['raw_snps_val'], compression='lzf')
ofg.create_dataset('snp_stds_val', data=coord_dict['snp_stds_val'])
ofg.create_dataset('snp_means_val', data=coord_dict['snp_means_val'])
ofg.create_dataset('freqs_val', data=coord_dict['freqs_val'])
ofg.create_dataset('log_odds_prs', data=coord_dict['log_odds_prs'])
ofg.create_dataset('ps', data=coord_dict['ps'])
ofg.create_dataset('positions', data=coord_dict['positions'])
ofg.create_dataset('nts', data=sp.array(coord_dict['nts'],dtype=util.nts_dtype))
ofg.create_dataset('sids', data=sp.array(coord_dict['sids'],dtype=util.sids_dtype))
ofg.create_dataset('betas', data=coord_dict['betas'])
ofg.create_dataset('log_odds', data=coord_dict['log_odds'])
ofg.create_dataset('ns', data=coord_dict['ns'])
if coord_dict['genetic_map'] is not None:
ofg.create_dataset('genetic_map', data=coord_dict['genetic_map'])
def write_parameter_data(p_dict, h5f, debug=True):
if debug:
print('Storing parameter information in coordinated file.')
print (p_dict)
pg = h5f.create_group('parameters')
if p_dict['N'] is not None:
pg.create_dataset('N', data=p_dict['N'])
pg.create_dataset('only_hm3', data=p_dict['only_hm3'])
pg.create_dataset('eff_type', data=p_dict['eff_type'])
pg.create_dataset('skip_coordination', data=p_dict['skip_coordination'])
pg.create_dataset('match_genomic_pos', data=p_dict['match_genomic_pos'])
pg.create_dataset('maf', data=p_dict['maf'])
pg.create_dataset('max_freq_discrep', data=p_dict['max_freq_discrep'])
pg.create_dataset('ssf_format', data=p_dict['ssf_format'])
pg.create_dataset('rs', data=p_dict['rs'])
pg.create_dataset('A1', data=p_dict['A1'])
pg.create_dataset('A2', data=p_dict['A2'])
pg.create_dataset('pos', data=p_dict['pos'])
pg.create_dataset('info', data=p_dict['info'])
pg.create_dataset('chr', data=p_dict['chr'])
pg.create_dataset('reffreq', data=p_dict['reffreq'])
pg.create_dataset('pval', data=p_dict['pval'])
pg.create_dataset('eff', data=p_dict['eff'])
pg.create_dataset('se', data=p_dict['se'])
pg.create_dataset('ncol', data=p_dict['ncol'])
if p_dict['case_freq'] is not None:
pg.create_dataset('case_freq', data=p_dict['case_freq'])
if p_dict['control_freq'] is not None:
pg.create_dataset('control_freq', data=p_dict['control_freq'])
if p_dict['case_n'] is not None:
pg.create_dataset('case_n', data=p_dict['case_n'])
if p_dict['control_n'] is not None:
pg.create_dataset('control_n', data=p_dict['control_n'])
pg.create_dataset('z_from_se', data=p_dict['z_from_se'])
def get_snp_stds(raw_snps):
return sp.std(raw_snps, axis=1, dtype='float32')
def get_mean_sample_size(n, cord_data_g):
if n is None:
all_ns = []
for chrom_str in util.chromosomes_list:
if chrom_str in cord_data_g:
g = cord_data_g[chrom_str]
all_ns.extend(g['ns'][...])
assert all_ns is not None, 'Sample size missing. Please use --N flag, or ensure they are parsed as part of the summary statistics.'
mean_n = sp.mean(all_ns)
else:
mean_n = n
return mean_n
def filter_coord_data(cd, filter_mask):
#data_keys = ['raw_snps', 'snp_stds', 'snp_means', 'freqs' ,'ps', 'ns', 'positions', 'nts', 'sids','betas','log_odds']
data_keys = ['raw_snps_ref', 'snp_stds_ref', 'snp_means_ref', 'freqs_ref' ,'ps', 'ns', 'positions', 'nts', 'sids','betas','log_odds']
if 'raw_snps_val' in cd.keys():
data_keys.extend(['raw_snps_val', 'snp_stds_val', 'snp_means_val', 'freqs_val'])
a = len(cd['raw_snps_ref'])
b = len(filter_mask)
print(a)
print(b)
diff = b-a
if diff!=0:
filter_mask = filter_mask[diff:b]
print(a)
print(b)
for k in data_keys:
cd[k] = cd[k][filter_mask]
def coordinate_datasets(reference_genotype_file, hdf5_file, summary_dict,
validation_genotype_file=None,
genetic_map_dir=None,
min_maf=0.01,
skip_coordination=False,
max_freq_discrep = 0.15,
debug=True):
summary_dict[3.9]={'name':'dash', 'value':'Coordination'}
t0 = time.time()
if validation_genotype_file is not None:
print('Coordinating datasets (Summary statistics, LD reference genotypes, and Validation genotypes).')
else:
print('Coordinating datasets (Summary statistics and LD reference genotypes).')
plinkf = plinkfile.PlinkFile(reference_genotype_file)
# Figure out chromosomes and positions.
if debug:
print('Parsing plinkf_dict_val reference genotypes')
loci = plinkf.get_loci()
plinkf.close()
summary_dict[4]={'name':'Num individuals in LD Reference data:','value':plinkfiles.get_num_indivs(reference_genotype_file)}
summary_dict[4.1]={'name':'SNPs in LD Reference data:','value':len(loci)}
gf_chromosomes = [l.chromosome for l in loci]
chromosomes = sp.unique(gf_chromosomes)
chromosomes.sort()
chr_dict = plinkfiles.get_chrom_dict(loci, chromosomes, debug)
if validation_genotype_file is not None:
if debug:
print('Parsing LD validation bim file')
plinkf_val = plinkfile.PlinkFile(validation_genotype_file)
# Loads only the individuals...
plinkf_dict_val = plinkfiles.get_phenotypes(plinkf_val)
loci_val = plinkf_val.get_loci()
plinkf_val.close()
summary_dict[5]={'name':'SNPs in Validation data:','value':len(loci_val)}
chr_dict_val = plinkfiles.get_chrom_dict(loci_val, chromosomes, debug)
# Open HDF5 file and prepare out data
assert not 'iids' in hdf5_file, 'Something is wrong with the HDF5 file, no individuals IDs were found.'
if plinkf_dict_val['has_phenotype']:
hdf5_file.create_dataset('y', data=plinkf_dict_val['phenotypes'])
summary_dict[6]={'name':'Num validation phenotypes:','value':plinkf_dict_val['num_individs']}
hdf5_file.create_dataset('fids', data=sp.array(plinkf_dict_val['fids'], dtype=util.fids_dtype))
hdf5_file.create_dataset('iids', data=sp.array(plinkf_dict_val['iids'], dtype=util.iids_dtype))
maf_adj_risk_scores = sp.zeros(plinkf_dict_val['num_individs'])
# Now summary statistics
ssf = hdf5_file['sum_stats']
cord_data_g = hdf5_file.create_group('cord_data')
chromosomes_found = set()
num_snps_common_before_filtering =0
num_snps_common_after_filtering =0
tot_num_non_matching_nts = 0
tot_num_non_supported_nts = 0
tot_num_ambig_nts = 0
tot_num_freq_discrep_filtered_snps = 0
tot_num_maf_filtered_snps = 0
tot_g_ss_nt_concord_count = 0
tot_num_flipped_nts = 0
if validation_genotype_file is not None:
tot_g_vg_nt_concord_count = 0
tot_vg_ss_nt_concord_count = 0
# Now iterate over chromosomes
chrom_i = 0
for chrom in chromosomes:
chrom_i +=1
if not debug:
sys.stdout.write('\r%0.2f%%' % (100.0 * (float(chrom_i) / (len(chromosomes)+1))))
sys.stdout.flush()
try:
chr_str = 'chrom_%d' % chrom
ssg = ssf[chr_str]
except Exception as err_str:
if debug:
print(err_str)
print('Did not find chromosome %d in SS dataset.'%chrom)
continue
if debug:
print('Coordinating data for chromosome %s' % chr_str)
chromosomes_found.add(chrom)
#Get summary statistics chromosome group
ssg = ssf['chrom_%d' % chrom]
ss_sids = (ssg['sids'][...]).astype(util.sids_u_dtype)
if validation_genotype_file is not None:
chrom_d_val = chr_dict_val[chr_str]
vg_sids = chrom_d_val['sids']
common_sids = sp.intersect1d(ss_sids, vg_sids)
# A map from sid to index for validation data
vg_sid_dict = {}
for i, sid in enumerate(vg_sids):
vg_sid_dict[sid] = i
else:
common_sids = ss_sids
# A map from sid to index for summary stats
ss_sid_dict = {}
for i, sid in enumerate(ss_sids):
ss_sid_dict[sid] = i
#The indices to retain for the LD reference genotypes
chrom_d = chr_dict[chr_str]
g_sids = chrom_d['sids']
common_sids = sp.intersect1d(common_sids, g_sids)
# A map from sid to index for LD reference data
g_sid_dict = {}
for i, sid in enumerate(g_sids):
g_sid_dict[sid] = i
if debug:
print('Found %d SNPs on chrom %d that were common across all datasets' % (len(common_sids), chrom))
print('Ordering SNPs by genomic positions (based on LD reference genotypes).')
g_snp_map = []
for sid in common_sids:
g_snp_map.append(g_sid_dict[sid])
# order by positions (based on LD reference file)
g_positions = sp.array(chrom_d['positions'])[g_snp_map]
order = sp.argsort(g_positions)
g_snp_map = sp.array(g_snp_map)[order]
g_snp_map = g_snp_map.tolist()
common_sids = sp.array(common_sids)[order]
# Get the ordered sum stats SNPs indices.
ss_snp_map = []
for sid in common_sids:
ss_snp_map.append(ss_sid_dict[sid])
# Get the ordered validation SNPs indices
if validation_genotype_file is not None:
vg_snp_map = []
for sid in common_sids:
vg_snp_map.append(vg_sid_dict[sid])
vg_nts = sp.array(chrom_d_val['nts'])
vg_nts_ok = sp.array(vg_nts)[vg_snp_map]
g_nts = sp.array(chrom_d['nts'])
ss_nts = (ssg['nts'][...]).astype(util.nts_u_dtype)
betas = ssg['betas'][...]
log_odds = ssg['log_odds'][...]
if 'freqs' in ssg:
ss_freqs = ssg['freqs'][...]
g_ss_nt_concord_count = sp.sum(
g_nts[g_snp_map] == ss_nts[ss_snp_map]) / 2.0
if validation_genotype_file is not None:
vg_ss_nt_concord_count = sp.sum(vg_nts_ok == ss_nts[ss_snp_map]) / 2.0
g_vg_nt_concord_count = sp.sum(g_nts[g_snp_map] == vg_nts_ok) / 2.0
if debug:
print('Nucleotide concordance counts out of %d genotypes, vg-rg: %d ; vg-ss: %d' % (len(g_snp_map), g_vg_nt_concord_count, vg_ss_nt_concord_count))
tot_vg_ss_nt_concord_count += vg_ss_nt_concord_count
tot_g_vg_nt_concord_count += g_vg_nt_concord_count
tot_g_ss_nt_concord_count += g_ss_nt_concord_count
if debug:
print('Nucleotide concordance counts out of %d genotypes, rg-ss: %d' % (len(g_snp_map), g_ss_nt_concord_count))
num_freq_discrep_filtered_snps = 0
num_non_matching_nts = 0
num_non_supported_nts = 0
num_ambig_nts = 0
# Identifying which SNPs have nucleotides that are ok..
ok_nts = []
ok_indices = {'g': [], 'ss': []}
if validation_genotype_file is not None:
ok_indices['vg']=[]
#Now loop over SNPs to coordinate nucleotides.
if validation_genotype_file is not None:
for g_i, vg_i, ss_i in zip(g_snp_map, vg_snp_map, ss_snp_map):
# To make sure, is the SNP id the same?
assert g_sids[g_i] == vg_sids[vg_i] == ss_sids[ss_i], 'Some issues with coordinating the genotypes.'
g_nt = g_nts[g_i]
if not skip_coordination:
vg_nt = vg_nts[vg_i]
ss_nt = ss_nts[ss_i]
# Is the nucleotide ambiguous.
g_nt = [g_nts[g_i][0], g_nts[g_i][1]]
if tuple(g_nt) in util.ambig_nts:
num_ambig_nts += 1
continue
# First check if nucleotide is sane?
if (not g_nt[0] in util.valid_nts) or (not g_nt[1] in util.valid_nts):
num_non_supported_nts += 1
continue
os_g_nt = sp.array(
[util.opp_strand_dict[g_nt[0]], util.opp_strand_dict[g_nt[1]]])
flip_nts = False
#Coordination is a bit more complicate when validation genotypes are provided..
if not ((sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)) and (sp.all(g_nt == vg_nt) or sp.all(os_g_nt == vg_nt))):
if sp.all(g_nt == vg_nt) or sp.all(os_g_nt == vg_nt):
flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (
os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
# Try flipping the SS nt
if flip_nts:
tot_num_flipped_nts +=1
betas[ss_i] = -betas[ss_i]
log_odds[ss_i] = -log_odds[ss_i]
if 'freqs' in ssg:
ss_freqs[ss_i] = 1 - ss_freqs[ss_i]
else:
if debug:
print("Nucleotides don't match after all?: g_sid=%s, ss_sid=%s, g_i=%d, ss_i=%d, g_nt=%s, ss_nt=%s" % \
(g_sids[g_i], ss_sids[ss_i], g_i,
ss_i, str(g_nt), str(ss_nt)))
num_non_matching_nts += 1
continue
else:
num_non_matching_nts += 1
continue
# Opposite strand nucleotides
# everything seems ok.
ok_indices['g'].append(g_i)
| |
then the global context is used.
>>> Single = FPSort(8, 24)
>>> Double = FPSort(11, 53)
>>> Single
FPSort(8, 24)
>>> x = Const('x', Single)
>>> eq(x, FP('x', FPSort(8, 24)))
True
"""
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort(ctx.ref(), ebits, sbits), ctx)
def _to_float_str(val, exp=0):
if isinstance(val, float):
if math.isnan(val):
res = "NaN"
elif val == 0.0:
sone = math.copysign(1.0, val)
if sone < 0.0:
return "-0.0"
else:
return "+0.0"
elif val == float("+inf"):
res = "+oo"
elif val == float("-inf"):
res = "-oo"
else:
v = val.as_integer_ratio()
num = v[0]
den = v[1]
rvs = str(num) + '/' + str(den)
res = rvs + 'p' + _to_int_str(exp)
elif isinstance(val, bool):
if val:
res = "1.0"
else:
res = "0.0"
elif _is_int(val):
res = str(val)
elif isinstance(val, str):
inx = val.find('*(2**')
if inx == -1:
res = val
elif val[-1] == ')':
res = val[0:inx]
exp = str(int(val[inx+5:-1]) + int(exp))
else:
_z3_assert(False, "String does not have floating-point numeral form.")
elif z3_debug():
_z3_assert(False, "Python value cannot be used to create floating-point numerals.")
if exp == 0:
return res
else:
return res + 'p' + exp
def fpNaN(s):
"""Create a Z3 floating-point NaN term.
>>> s = FPSort(8, 24)
>>> set_fpa_pretty(True)
>>> fpNaN(s)
NaN
>>> pb = get_fpa_pretty()
>>> set_fpa_pretty(False)
>>> fpNaN(s)
fpNaN(FPSort(8, 24))
>>> set_fpa_pretty(pb)
"""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_nan(s.ctx_ref(), s.ast), s.ctx)
def fpPlusInfinity(s):
"""Create a Z3 floating-point +oo term.
>>> s = FPSort(8, 24)
>>> pb = get_fpa_pretty()
>>> set_fpa_pretty(True)
>>> fpPlusInfinity(s)
+oo
>>> set_fpa_pretty(False)
>>> fpPlusInfinity(s)
fpPlusInfinity(FPSort(8, 24))
>>> set_fpa_pretty(pb)
"""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, False), s.ctx)
def fpMinusInfinity(s):
"""Create a Z3 floating-point -oo term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, True), s.ctx)
def fpInfinity(s, negative):
"""Create a Z3 floating-point +oo or -oo term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
_z3_assert(isinstance(negative, bool), "expected Boolean flag")
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, negative), s.ctx)
def fpPlusZero(s):
"""Create a Z3 floating-point +0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, False), s.ctx)
def fpMinusZero(s):
"""Create a Z3 floating-point -0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, True), s.ctx)
def fpZero(s, negative):
"""Create a Z3 floating-point +0.0 or -0.0 term."""
_z3_assert(isinstance(s, FPSortRef), "sort mismatch")
_z3_assert(isinstance(negative, bool), "expected Boolean flag")
return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, negative), s.ctx)
def FPVal(sig, exp=None, fps=None, ctx=None):
"""Return a floating-point value of value `val` and sort `fps`. If `ctx=None`, then the global context is used.
>>> v = FPVal(20.0, FPSort(8, 24))
>>> v
1.25*(2**4)
>>> print("0x%.8x" % v.exponent_as_long(False))
0x00000004
>>> v = FPVal(2.25, FPSort(8, 24))
>>> v
1.125*(2**1)
>>> v = FPVal(-2.25, FPSort(8, 24))
>>> v
-1.125*(2**1)
>>> FPVal(-0.0, FPSort(8, 24))
-0.0
>>> FPVal(0.0, FPSort(8, 24))
+0.0
>>> FPVal(+0.0, FPSort(8, 24))
+0.0
"""
ctx = _get_ctx(ctx)
if is_fp_sort(exp):
fps = exp
exp = None
elif fps is None:
fps = _dflt_fps(ctx)
_z3_assert(is_fp_sort(fps), "sort mismatch")
if exp is None:
exp = 0
val = _to_float_str(sig)
if val == "NaN" or val == "nan":
return fpNaN(fps)
elif val == "-0.0":
return fpMinusZero(fps)
elif val == "0.0" or val == "+0.0":
return fpPlusZero(fps)
elif val == "+oo" or val == "+inf" or val == "+Inf":
return fpPlusInfinity(fps)
elif val == "-oo" or val == "-inf" or val == "-Inf":
return fpMinusInfinity(fps)
else:
return FPNumRef(Z3_mk_numeral(ctx.ref(), val, fps.ast), ctx)
def FP(name, fpsort, ctx=None):
"""Return a floating-point constant named `name`.
`fpsort` is the floating-point sort.
If `ctx=None`, then the global context is used.
>>> x = FP('x', FPSort(8, 24))
>>> is_fp(x)
True
>>> x.ebits()
8
>>> x.sort()
FPSort(8, 24)
>>> word = FPSort(8, 24)
>>> x2 = FP('x', word)
>>> eq(x, x2)
True
"""
if isinstance(fpsort, FPSortRef) and ctx is None:
ctx = fpsort.ctx
else:
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_const(ctx.ref(), to_symbol(name, ctx), fpsort.ast), ctx)
def FPs(names, fpsort, ctx=None):
"""Return an array of floating-point constants.
>>> x, y, z = FPs('x y z', FPSort(8, 24))
>>> x.sort()
FPSort(8, 24)
>>> x.sbits()
24
>>> x.ebits()
8
>>> fpMul(RNE(), fpAdd(RNE(), x, y), z)
fpMul(RNE(), fpAdd(RNE(), x, y), z)
"""
ctx = _get_ctx(ctx)
if isinstance(names, str):
names = names.split(" ")
return [FP(name, fpsort, ctx) for name in names]
def fpAbs(a, ctx=None):
"""Create a Z3 floating-point absolute value expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FPVal(1.0, s)
>>> fpAbs(x)
fpAbs(1)
>>> y = FPVal(-20.0, s)
>>> y
-1.25*(2**4)
>>> fpAbs(y)
fpAbs(-1.25*(2**4))
>>> fpAbs(-1.25*(2**4))
fpAbs(-1.25*(2**4))
>>> fpAbs(x).sort()
FPSort(8, 24)
"""
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
return FPRef(Z3_mk_fpa_abs(ctx.ref(), a.as_ast()), ctx)
def fpNeg(a, ctx=None):
"""Create a Z3 floating-point addition expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> fpNeg(x)
-x
>>> fpNeg(x).sort()
FPSort(8, 24)
"""
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
return FPRef(Z3_mk_fpa_neg(ctx.ref(), a.as_ast()), ctx)
def _mk_fp_unary(f, rm, a, ctx):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a), "Second argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast()), ctx)
def _mk_fp_unary_pred(f, a, ctx):
ctx = _get_ctx(ctx)
[a] = _coerce_fp_expr_list([a], ctx)
if z3_debug():
_z3_assert(is_fp(a), "First argument must be a Z3 floating-point expression")
return BoolRef(f(ctx.ref(), a.as_ast()), ctx)
def _mk_fp_bin(f, rm, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a) or is_fp(b), "Second or third argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_bin_norm(f, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fp(a) or is_fp(b), "First or second argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_bin_pred(f, a, b, ctx):
ctx = _get_ctx(ctx)
[a, b] = _coerce_fp_expr_list([a, b], ctx)
if z3_debug():
_z3_assert(is_fp(a) or is_fp(b), "First or second argument must be a Z3 floating-point expression")
return BoolRef(f(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def _mk_fp_tern(f, rm, a, b, c, ctx):
ctx = _get_ctx(ctx)
[a, b, c] = _coerce_fp_expr_list([a, b, c], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression")
_z3_assert(is_fp(a) or is_fp(b) or is_fp(c), "Second, third or fourth argument must be a Z3 floating-point expression")
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast(), b.as_ast(), c.as_ast()), ctx)
def fpAdd(rm, a, b, ctx=None):
"""Create a Z3 floating-point addition expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpAdd(rm, x, y)
fpAdd(RNE(), x, y)
>>> fpAdd(RTZ(), x, y) # default rounding mode is RTZ
x + y
>>> fpAdd(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_add, rm, a, b, ctx)
def fpSub(rm, a, b, ctx=None):
"""Create a Z3 floating-point subtraction expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpSub(rm, x, y)
fpSub(RNE(), x, y)
>>> fpSub(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_sub, rm, a, b, ctx)
def fpMul(rm, a, b, ctx=None):
"""Create a Z3 floating-point multiplication expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpMul(rm, x, y)
fpMul(RNE(), x, y)
>>> fpMul(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_mul, rm, a, b, ctx)
def fpDiv(rm, a, b, ctx=None):
"""Create a Z3 floating-point division expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpDiv(rm, x, y)
fpDiv(RNE(), x, y)
>>> fpDiv(rm, x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin(Z3_mk_fpa_div, rm, a, b, ctx)
def fpRem(a, b, ctx=None):
"""Create a Z3 floating-point remainder expression.
>>> s = FPSort(8, 24)
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpRem(x, y)
fpRem(x, y)
>>> fpRem(x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin_norm(Z3_mk_fpa_rem, a, b, ctx)
def fpMin(a, b, ctx=None):
"""Create a Z3 floating-point minimum expression.
>>> s = FPSort(8, 24)
>>> rm = RNE()
>>> x = FP('x', s)
>>> y = FP('y', s)
>>> fpMin(x, y)
fpMin(x, y)
>>> fpMin(x, y).sort()
FPSort(8, 24)
"""
return _mk_fp_bin_norm(Z3_mk_fpa_min, a, b, ctx)
def fpMax(a, b, ctx=None):
"""Create a Z3 floating-point maximum expression.
>>> s = FPSort(8, 24)
>>> | |
<gh_stars>0
"Functions implementing widget editing"
import re, html, json
from ... import skilift
from ....skilift import fromjson, editsection, editpage, editwidget, versions
from .. import utils
from ... import FailPage, ValidateError, ServerError, GoTo
from ....ski.project_class_definition import SectionData
# a search for anything none-alphanumeric and not an underscore
_AN = re.compile('[^\w]')
def _field_name(widget, field_argument):
"Returns a field name"
if "set_names" not in widget:
return field_argument
name_dict = widget["set_names"]
if field_argument in name_dict:
return name_dict[field_argument]
return field_argument
def _field_value(widget, field_argument):
"Returns value,string value"
value = widget["fields"][field_argument]
if value is None:
field_value = ''
elif isinstance(value, list):
if value:
field_value = ','.join(str(val) for val in value)
else:
field_value = ''
else:
field_value = str(value)
return value, field_value
def _field_ref(widgetdescription, field_argument):
"Returns a field textblock reference string"
if field_argument == 'show':
return 'widgets.show'
elif field_argument == 'widget_class':
return 'widgets.widget_class'
elif field_argument == 'widget_style':
return 'widgets.widget_style'
elif field_argument == 'show_error':
return 'widgets.show_error'
elif field_argument == 'clear_error':
return 'widgets.clear_error'
else:
return ".".join(("widgets", widgetdescription.modulename, widgetdescription.classname, field_argument))
def retrieve_widget(skicall):
"Fills in the edit a widget page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
# get the widget name
if ("left_nav","navbuttons","nav_links") in call_data:
# should be submitted as widgetname from left navigation links
widget_name = call_data["left_nav","navbuttons","nav_links"]
elif 'widget_name' in call_data:
widget_name = call_data['widget_name']
elif 'part_tuple' in call_data:
# called from dom table, via responder that finds what is being edited
# and has set it into part_tuple
part_tuple = call_data['part_tuple']
widget_name = part_tuple.name
else:
raise FailPage(message="Invalid widget")
if not widget_name:
raise FailPage(message="Invalid widget")
# and this is the widget to be edited, it is now set into session data
call_data['widget_name'] = widget_name
# Fill in header
sd["page_head","large_text"] = "Widget " + widget_name
pd.update(sd)
project = call_data['editedprojname']
section_name = None
pagenumber = None
if 'section_name' in call_data:
section_name = call_data['section_name']
elif 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message="No section or page given")
try:
if section_name:
widgetdescription = editwidget.section_widget_description(project, section_name, call_data['schange'], widget_name)
widget = editwidget.section_widget(project, section_name, call_data['schange'], widget_name)
else:
widgetdescription = editwidget.page_widget_description(project, pagenumber, call_data['pchange'], widget_name)
widget = editwidget.page_widget(project, pagenumber, call_data['pchange'], widget_name)
except ServerError as e:
raise FailPage(e.message)
pd['widget_type','para_text'] = "This widget is of type %s.%s." % (widgetdescription.modulename, widgetdescription.classname)
pd['widget_textblock','textblock_ref'] = ".".join(("widgets", widgetdescription.modulename, widgetdescription.classname))
pd['widget_name','input_text'] = widget_name
pd['widget_brief','input_text'] = widgetdescription.brief
# widgetdescription.fields_single is a list of namedtuples, each inner namedtuple representing a field
# with items ['field_arg', 'field_type', 'valdt', 'jsonset', 'cssclass', 'cssstyle']
args = widgetdescription.fields_single
arg_list = widgetdescription.fields_list
arg_table = widgetdescription.fields_table
arg_dict = widgetdescription.fields_dictionary
if arg_list or arg_table or arg_dict:
pd['args_multi','show'] = True
else:
pd['args_multi','show'] = False
# args is shown on a LinkTextBlockTable2
# contents row is
# col 0 is the visible text to place in the link,
# col 1 is the get field of the link
# col 2 is the second get field of the link
# col 3 is text appearing in the second table column
# col 4 is the reference string of a textblock to appear the third table column
# col 5 is text to appear if the reference cannot be found in the database
# col 6 normally empty string, if set to text it will replace the textblock
args_valdt = False
args_content = []
if args:
for arg in args:
name = _field_name(widget, arg.field_arg)
ref = _field_ref(widgetdescription, arg.field_arg)
if arg.valdt:
name = "* " + name
args_valdt = True
# field value
value,field_value = _field_value(widget, arg.field_arg)
if len(field_value) > 20:
field_value = field_value[:18]
field_value += '...'
arg_row = [ name, arg.field_arg, '',field_value, ref, 'No description for %s' % (ref,), '']
args_content.append(arg_row)
pd['args','link_table'] = args_content
else:
pd['args','show'] = False
pd['args_description','show'] = False
# arg_list, arg_table and arg_dict are shown on LinkTextBlockTable widgets
# contents row is
# col 0 is the visible text to place in the link,
# col 1 is the get field of the link
# col 2 is the second get field of the link
# col 3 is the reference string of a textblock to appear in the column adjacent to the link
# col 4 is text to appear if the reference cannot be found in the database
# col 5 normally empty string, if set to text it will replace the textblock
arg_list_content = []
if arg_list:
for arg in arg_list:
name = _field_name(widget, arg.field_arg)
ref = _field_ref(widgetdescription, arg.field_arg)
if arg.valdt:
name = "* " + name
args_valdt = True
arg_row = [ name, arg.field_arg, '', ref, 'No description for %s' % (ref,), '']
arg_list_content.append(arg_row)
pd['arg_list','link_table'] = arg_list_content
else:
pd['arg_list','show'] = False
pd['arg_list_description','show'] = False
arg_table_content = []
if arg_table:
for arg in arg_table:
name = _field_name(widget, arg.field_arg)
ref = _field_ref(widgetdescription, arg.field_arg)
if arg.valdt:
name = "* " + name
args_valdt = True
arg_row = [ name, arg.field_arg, '', ref, 'No description for %s' % (ref,), '']
arg_table_content.append(arg_row)
pd['arg_table','link_table'] = arg_table_content
else:
pd['arg_table','show'] = False
pd['arg_table_description','show'] = False
arg_dict_content = []
if arg_dict:
for arg in arg_dict:
name = _field_name(widget, arg.field_arg)
ref = _field_ref(widgetdescription, arg.field_arg)
if arg.valdt:
name = "* " + name
args_valdt = True
arg_row = [ name, arg.field_arg, '', ref, 'No description for %s' % (ref,), '']
arg_dict_content.append(arg_row)
pd['arg_dict','link_table'] = arg_dict_content
else:
pd['arg_dict','show'] = False
pd['arg_dict_description','show'] = False
pd['args_valdt','show'] = args_valdt
# display the widget html
pd['widget_code','pre_text'] = widgetdescription.illustration
if widgetdescription.containers:
pd['containerdesc','show'] = True
# remove any unwanted fields from session call_data
if 'container' in call_data:
del call_data['container']
if 'location' in call_data:
del call_data['location']
if 'part' in call_data:
del call_data['part']
if 'field_arg' in call_data:
del call_data['field_arg']
if 'validx' in call_data:
del call_data['validx']
def set_widget_params(skicall):
"Sets widget name and brief"
call_data = skicall.call_data
project = call_data['editedprojname']
section_name = None
pagenumber = None
if 'section_name' in call_data:
section_name = call_data['section_name']
elif 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message="No section or page given")
if 'widget_name' in call_data:
widget_name = call_data['widget_name']
else:
raise FailPage(message="Widget not identified")
new_name = None
brief = None
if 'new_widget_name' in call_data:
new_name = call_data['new_widget_name']
elif 'widget_brief' in call_data:
brief = call_data['widget_brief']
else:
raise FailPage(message="No new name or brief given")
try:
if section_name:
if new_name:
call_data['schange'] = editwidget.rename_section_widget(project, section_name, call_data['schange'], widget_name, new_name)
call_data['status'] = "Widget name changed"
call_data['widget_name'] = new_name
else:
call_data['schange'] = editwidget.new_brief_in_section_widget(project, section_name, call_data['schange'], widget_name, brief)
call_data['status'] = "Widget brief changed"
else:
if new_name:
call_data['pchange'] = editwidget.rename_page_widget(project, pagenumber, call_data['pchange'], widget_name, new_name)
call_data['status'] = "Widget name changed"
call_data['widget_name'] = new_name
else:
call_data['pchange'] = editwidget.new_brief_in_page_widget(project, pagenumber, call_data['pchange'], widget_name, brief)
call_data['status'] = "Widget brief changed"
except ServerError as e:
raise FailPage(e.message)
def retrieve_editfield(skicall):
"Fills in the edit a widget field page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
project = call_data['editedprojname']
section_name = None
pagenumber = None
if 'section_name' in call_data:
section_name = call_data['section_name']
elif 'page_number' in call_data:
pagenumber = call_data['page_number']
else:
raise FailPage(message="No section or page given")
if 'widget_name' in call_data:
widget_name = call_data['widget_name']
else:
raise FailPage(message="Widget not identified")
if 'field_arg' in call_data:
field_arg = call_data['field_arg']
else:
raise FailPage("Field not identified")
try:
if section_name:
widgetdescription = editwidget.section_widget_description(project, section_name, call_data['schange'], widget_name)
widget = editwidget.section_widget(project, section_name, call_data['schange'], widget_name)
else:
widgetdescription = editwidget.page_widget_description(project, pagenumber, call_data['pchange'], widget_name)
widget = editwidget.page_widget(project, pagenumber, call_data['pchange'], widget_name)
except ServerError as e:
raise FailPage(e.message)
pd['widget_type','para_text'] = "Widget type : %s.%s" % (widgetdescription.modulename, widgetdescription.classname)
pd['widget_name','para_text'] = "Widget name : %s" % (widget_name,)
pd['field_type','para_text'] = "Field type : %s" % (field_arg,)
# widgetdescription.fields_single is a list of namedtuples, each inner namedtuple representing a field
# with items ['field_arg', 'field_type', 'valdt', 'jsonset', 'cssclass', 'cssstyle']
# create dictionaries of {field_arg : namedtuples }
fields_single = { arg.field_arg:arg for arg in widgetdescription.fields_single }
fields_list = { arg.field_arg:arg for arg in widgetdescription.fields_list }
fields_table = { arg.field_arg:arg for arg in widgetdescription.fields_table }
fields_dictionary = { arg.field_arg:arg for arg in widgetdescription.fields_dictionary }
if field_arg in fields_single:
field_datalist = fields_single[field_arg]
elif field_arg in fields_list:
field_datalist = fields_list[field_arg]
elif field_arg in fields_table:
field_datalist = fields_table[field_arg]
elif field_arg in fields_dictionary:
field_datalist = fields_dictionary[field_arg]
else:
raise FailPage("Field not identified")
if field_datalist.jsonset:
pd['json_enabled','para_text'] = "JSON Enabled : Yes"
else:
pd['json_enabled','para_text'] = "JSON Enabled : No"
if field_arg | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['QosPolicyArgs', 'QosPolicy']
@pulumi.input_type
class QosPolicyArgs:
def __init__(__self__, *,
dest_cidr: pulumi.Input[str],
dest_port_range: pulumi.Input[str],
ip_protocol: pulumi.Input[str],
priority: pulumi.Input[int],
qos_id: pulumi.Input[str],
source_cidr: pulumi.Input[str],
source_port_range: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a QosPolicy resource.
:param pulumi.Input[str] dest_cidr: The destination CIDR block.
:param pulumi.Input[str] dest_port_range: The destination port range.
:param pulumi.Input[str] ip_protocol: The transport layer protocol.
:param pulumi.Input[int] priority: The priority of the quintuple rule. A smaller value indicates a higher priority. If the priorities of two quintuple rules are the same, the rule created earlier is applied first.Value range: 1 to 7.
:param pulumi.Input[str] qos_id: The instance ID of the QoS policy to which the quintuple rule is created.
:param pulumi.Input[str] source_cidr: The source CIDR block.
:param pulumi.Input[str] source_port_range: The source port range of the transport layer.
:param pulumi.Input[str] description: The description of the QoS policy.
:param pulumi.Input[str] end_time: The expiration time of the quintuple rule.
:param pulumi.Input[str] name: The name of the QoS policy.
:param pulumi.Input[str] start_time: The time when the quintuple rule takes effect.
"""
pulumi.set(__self__, "dest_cidr", dest_cidr)
pulumi.set(__self__, "dest_port_range", dest_port_range)
pulumi.set(__self__, "ip_protocol", ip_protocol)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "qos_id", qos_id)
pulumi.set(__self__, "source_cidr", source_cidr)
pulumi.set(__self__, "source_port_range", source_port_range)
if description is not None:
pulumi.set(__self__, "description", description)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if name is not None:
pulumi.set(__self__, "name", name)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="destCidr")
def dest_cidr(self) -> pulumi.Input[str]:
"""
The destination CIDR block.
"""
return pulumi.get(self, "dest_cidr")
@dest_cidr.setter
def dest_cidr(self, value: pulumi.Input[str]):
pulumi.set(self, "dest_cidr", value)
@property
@pulumi.getter(name="destPortRange")
def dest_port_range(self) -> pulumi.Input[str]:
"""
The destination port range.
"""
return pulumi.get(self, "dest_port_range")
@dest_port_range.setter
def dest_port_range(self, value: pulumi.Input[str]):
pulumi.set(self, "dest_port_range", value)
@property
@pulumi.getter(name="ipProtocol")
def ip_protocol(self) -> pulumi.Input[str]:
"""
The transport layer protocol.
"""
return pulumi.get(self, "ip_protocol")
@ip_protocol.setter
def ip_protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_protocol", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
The priority of the quintuple rule. A smaller value indicates a higher priority. If the priorities of two quintuple rules are the same, the rule created earlier is applied first.Value range: 1 to 7.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="qosId")
def qos_id(self) -> pulumi.Input[str]:
"""
The instance ID of the QoS policy to which the quintuple rule is created.
"""
return pulumi.get(self, "qos_id")
@qos_id.setter
def qos_id(self, value: pulumi.Input[str]):
pulumi.set(self, "qos_id", value)
@property
@pulumi.getter(name="sourceCidr")
def source_cidr(self) -> pulumi.Input[str]:
"""
The source CIDR block.
"""
return pulumi.get(self, "source_cidr")
@source_cidr.setter
def source_cidr(self, value: pulumi.Input[str]):
pulumi.set(self, "source_cidr", value)
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Input[str]:
"""
The source port range of the transport layer.
"""
return pulumi.get(self, "source_port_range")
@source_port_range.setter
def source_port_range(self, value: pulumi.Input[str]):
pulumi.set(self, "source_port_range", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the QoS policy.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
"""
The expiration time of the quintuple rule.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the QoS policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the quintuple rule takes effect.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@pulumi.input_type
class _QosPolicyState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
dest_cidr: Optional[pulumi.Input[str]] = None,
dest_port_range: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
ip_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
qos_id: Optional[pulumi.Input[str]] = None,
source_cidr: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering QosPolicy resources.
:param pulumi.Input[str] description: The description of the QoS policy.
:param pulumi.Input[str] dest_cidr: The destination CIDR block.
:param pulumi.Input[str] dest_port_range: The destination port range.
:param pulumi.Input[str] end_time: The expiration time of the quintuple rule.
:param pulumi.Input[str] ip_protocol: The transport layer protocol.
:param pulumi.Input[str] name: The name of the QoS policy.
:param pulumi.Input[int] priority: The priority of the quintuple rule. A smaller value indicates a higher priority. If the priorities of two quintuple rules are the same, the rule created earlier is applied first.Value range: 1 to 7.
:param pulumi.Input[str] qos_id: The instance ID of the QoS policy to which the quintuple rule is created.
:param pulumi.Input[str] source_cidr: The source CIDR block.
:param pulumi.Input[str] source_port_range: The source port range of the transport layer.
:param pulumi.Input[str] start_time: The time when the quintuple rule takes effect.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if dest_cidr is not None:
pulumi.set(__self__, "dest_cidr", dest_cidr)
if dest_port_range is not None:
pulumi.set(__self__, "dest_port_range", dest_port_range)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if ip_protocol is not None:
pulumi.set(__self__, "ip_protocol", ip_protocol)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if qos_id is not None:
pulumi.set(__self__, "qos_id", qos_id)
if source_cidr is not None:
pulumi.set(__self__, "source_cidr", source_cidr)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the QoS policy.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destCidr")
def dest_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The destination CIDR block.
"""
return pulumi.get(self, "dest_cidr")
@dest_cidr.setter
def dest_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dest_cidr", value)
@property
@pulumi.getter(name="destPortRange")
def dest_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The destination port range.
"""
return pulumi.get(self, "dest_port_range")
@dest_port_range.setter
def dest_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dest_port_range", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
"""
The expiration time of the quintuple rule.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="ipProtocol")
def ip_protocol(self) -> Optional[pulumi.Input[str]]:
"""
The transport layer protocol.
"""
return pulumi.get(self, "ip_protocol")
@ip_protocol.setter
def ip_protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_protocol", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the QoS policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The priority of the quintuple rule. A smaller value indicates a higher priority. If the priorities of two quintuple rules are the same, the rule created earlier is applied first.Value range: 1 to 7.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="qosId")
def qos_id(self) -> Optional[pulumi.Input[str]]:
"""
The instance ID of the QoS policy to which the quintuple rule is created.
"""
return pulumi.get(self, "qos_id")
@qos_id.setter
def qos_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "qos_id", value)
@property
@pulumi.getter(name="sourceCidr")
def source_cidr(self) -> Optional[pulumi.Input[str]]:
"""
The source CIDR block.
"""
return pulumi.get(self, "source_cidr")
@source_cidr.setter
def source_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_cidr", value)
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The source port range of the transport layer.
"""
return pulumi.get(self, "source_port_range")
@source_port_range.setter
def source_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_port_range", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the quintuple rule takes effect.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
class QosPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dest_cidr: Optional[pulumi.Input[str]] = None,
dest_port_range: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
ip_protocol: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
qos_id: Optional[pulumi.Input[str]] = None,
source_cidr: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Sag qos policy resource.
You need to create a QoS policy to set priorities, rate limits, and quintuple rules for different messages.
For information about Sag Qos Policy and how to use it, see [What is Qos Policy](https://www.alibabacloud.com/help/doc-detail/140065.htm).
> **NOTE:** Available in 1.60.0+
> **NOTE:** Only the | |
j*deg, **fmtspec)
ret += 'ret.v{i}.v{k} = vec_ld(0, buf);\n\n'.\
format(i=i, k=k, **fmtspec)
ret += 'return ret;'
return ret
# Load 1 for every supported types
if deg == 1:
if aligned:
return 'return vec_ld(0, {in0});'.format(**fmtspec)
else:
return 'return *(({ppc_typ}*) {in0});'.\
format(ppc_typ=ppc_vec_type(typ), **fmtspec)
# Code to load aligned/unaligned vectors
if aligned:
load = 'nsimd_{simd_ext}_v{typ}x{deg} ret;\n'.format(deg=deg, **fmtspec) + \
'\n'.join(['nsimd_{simd_ext}_v{typ} in{i} = vec_ld({i} * 16, {in0});'. \
format(i=i, **fmtspec) \
for i in range (0, deg)])
else:
load = 'nsimd_{simd_ext}_v{typ}x{deg} ret;\n'.format(deg=deg, **fmtspec) + \
'\n'.join(['nsimd_{simd_ext}_v{typ} in{i} = *(({ppc_typ}*) ({in0} + {i}*{vec_size}));'. \
format(vec_size=str(128//int(typ[1:])),
ppc_typ=ppc_vec_type(typ), i=i, **fmtspec) \
for i in range (0, deg)])
# Load 2 for every supported types
if deg == 2:
if typ[1:] == '32':
return '''
{load}
nsimd_{simd_ext}_v{typ} tmp0 = vec_mergeh(in0, in1);
nsimd_{simd_ext}_v{typ} tmp1 = vec_mergel(in0, in1);
ret.v0 = vec_mergeh(tmp0, tmp1);
ret.v1 = vec_mergel(tmp0, tmp1);
return ret;
'''.format(load=load, **fmtspec)
elif typ[1:] == '16':
return '''
{load}
nsimd_{simd_ext}_v{typ} tmp0 = vec_mergeh(in0, in1);
nsimd_{simd_ext}_v{typ} tmp1 = vec_mergel(in0, in1);
in0 = vec_mergeh(tmp0, tmp1);
in1 = vec_mergel(tmp0, tmp1);
ret.v0 = vec_mergeh(in0, in1);
ret.v1 = vec_mergel(in0, in1);
return ret;
'''.format(load=load, **fmtspec)
elif typ[1:] == '8':
return '''
__vector unsigned char perm1 = NSIMD_PERMUTE_MASK_8(
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
__vector unsigned char perm2 = NSIMD_PERMUTE_MASK_8(
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
{load}
ret.v0 = vec_perm(in0, in1, perm1);
ret.v1 = vec_perm(in0, in1, perm2);
return ret;
'''.format(load=load, **fmtspec)
# Load 3 for every supported types
elif deg == 3:
if typ[1:] == '32':
return '''
__vector char perm1 = NSIMD_PERMUTE_MASK_32(0, 3, 6, 0);
{load}
nsimd_{simd_ext}_v{typ} tmp0 = vec_perm(in0, in1, perm1);
nsimd_{simd_ext}_v{typ} tmp1 = vec_perm(in1, in2, perm1);
nsimd_{simd_ext}_v{typ} tmp2 = vec_perm(in2, in0, perm1);
__vector char perm2 = NSIMD_PERMUTE_MASK_32(0, 1, 2, 5);
__vector char perm3 = NSIMD_PERMUTE_MASK_32(5, 0, 1, 2);
__vector char perm4 = NSIMD_PERMUTE_MASK_32(2, 5, 0, 1);
ret.v0 = vec_perm(tmp0, in2, perm2);
ret.v1 = vec_perm(tmp1, in0, perm3);
ret.v2 = vec_perm(tmp2, in1, perm4);
return ret;
'''.format(load=load, **fmtspec)
elif typ[1:] == '16':
return '''
{load}
__vector char permRAB = NSIMD_PERMUTE_MASK_16(0, 3, 6, 9, 12, 15, 0, 0);
__vector char permRDC = NSIMD_PERMUTE_MASK_16(0, 1, 2, 3, 4, 5, 10, 13);
nsimd_{simd_ext}_v{typ} tmp0 = vec_perm(in0, in1, permRAB);
ret.v0 = vec_perm(tmp0, in2, permRDC);
__vector char permGAB = NSIMD_PERMUTE_MASK_16(1, 4, 7, 10, 13, 0, 0, 0);
__vector char permGEC = NSIMD_PERMUTE_MASK_16(0, 1, 2, 3, 4, 8, 11, 14);
nsimd_{simd_ext}_v{typ} tmp1 = vec_perm(in0, in1, permGAB);
ret.v1 = vec_perm(tmp1, in2, permGEC);
__vector char permBAB = NSIMD_PERMUTE_MASK_16(2, 5, 8, 11, 14, 0, 0, 0);
__vector char permBFC = NSIMD_PERMUTE_MASK_16(0, 1, 2, 3, 4, 9, 12, 15);
nsimd_{simd_ext}_v{typ} tmp2 = vec_perm(in0, in1, permBAB);
ret.v2 = vec_perm(tmp2, in2, permBFC);
return ret;
'''.format(load=load, **fmtspec)
elif typ[1:] == '8':
return '''
{load}
__vector char permRAB = NSIMD_PERMUTE_MASK_8(0, 3, 6, 9, 12, 15,
18, 21, 24, 27, 30, 0, 0, 0, 0, 0);
__vector char permRDC = NSIMD_PERMUTE_MASK_8(0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 17, 20, 23, 26, 29);
nsimd_{simd_ext}_v{typ} tmp0 = vec_perm(in0, in1, permRAB);
ret.v0 = vec_perm(tmp0, in2, permRDC);
__vector char permGAB = NSIMD_PERMUTE_MASK_8(1, 4, 7, 10, 13, 16,
19, 22, 25, 28, 31, 0, 0, 0, 0, 0);
__vector char permGEC = NSIMD_PERMUTE_MASK_8(0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 18, 21, 24, 27, 30);
nsimd_{simd_ext}_v{typ} tmp1 = vec_perm(in0, in1, permGAB);
ret.v1 = vec_perm(tmp1, in2, permGEC);
__vector char permBAB = NSIMD_PERMUTE_MASK_8(2, 5, 8, 11, 14, 17,
20, 23, 26, 29, 0, 0, 0, 0, 0, 0);
__vector char permBFC = NSIMD_PERMUTE_MASK_8(0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 16, 19, 22, 25, 28, 31);
nsimd_{simd_ext}_v{typ} tmp2 = vec_perm(in0, in1, permBAB);
ret.v2 = vec_perm(tmp2, in2, permBFC);
return ret;
'''.format(load=load, **fmtspec)
# load 4 for every supported types
else:
if typ[1:] == '32':
return '''
{load}
nsimd_{simd_ext}_v{typ} tmp0 = vec_mergeh(in0, in2);
nsimd_{simd_ext}_v{typ} tmp1 = vec_mergel(in0, in2);
nsimd_{simd_ext}_v{typ} tmp2 = vec_mergeh(in1, in3);
nsimd_{simd_ext}_v{typ} tmp3 = vec_mergel(in1, in3);
ret.v0 = vec_mergeh(tmp0, tmp2);
ret.v1 = vec_mergel(tmp0, tmp2);
ret.v2 = vec_mergeh(tmp1, tmp3);
ret.v3 = vec_mergel(tmp1, tmp3);
return ret;
'''.format (load=load, **fmtspec)
elif typ[1:] == '16':
return '''
{load}
ret.v0 = vec_mergeh(in0, in2);
ret.v1 = vec_mergel(in0, in2);
ret.v2 = vec_mergeh(in1, in3);
ret.v3 = vec_mergel(in1, in3);
nsimd_{simd_ext}_v{typ} tmp0 = vec_mergeh(ret.v0, ret.v2);
nsimd_{simd_ext}_v{typ} tmp1 = vec_mergel(ret.v0, ret.v2);
nsimd_{simd_ext}_v{typ} tmp2 = vec_mergeh(ret.v1, ret.v3);
nsimd_{simd_ext}_v{typ} tmp3 = vec_mergel(ret.v1, ret.v3);
ret.v0 = vec_mergeh(tmp0, tmp2);
ret.v1 = vec_mergel(tmp0, tmp2);
ret.v2 = vec_mergeh(tmp1, tmp3);
ret.v3 = vec_mergel(tmp1, tmp3);
return ret;
'''.format(load=load, **fmtspec)
elif typ[1:] == '8':
return '''
{load}
nsimd_{simd_ext}_v{typ} tmp0 = vec_mergeh(in0, in2);
nsimd_{simd_ext}_v{typ} tmp1 = vec_mergel(in0, in2);
nsimd_{simd_ext}_v{typ} tmp2 = vec_mergeh(in1, in3);
nsimd_{simd_ext}_v{typ} tmp3 = vec_mergel(in1, in3);
ret.v0 = vec_mergeh(tmp0, tmp2);
ret.v1 = vec_mergel(tmp0, tmp2);
ret.v2 = vec_mergeh(tmp1, tmp3);
ret.v3 = vec_mergel(tmp1, tmp3);
tmp0 = vec_mergeh(ret.v0, ret.v2);
tmp1 = vec_mergel(ret.v0, ret.v2);
tmp2 = vec_mergeh(ret.v1, ret.v3);
tmp3 = vec_mergel(ret.v1, ret.v3);
ret.v0 = vec_mergeh(tmp0, tmp2);
ret.v1 = vec_mergel(tmp0, tmp2);
ret.v2 = vec_mergeh(tmp1, tmp3);
ret.v3 = vec_mergel(tmp1, tmp3);
return ret;
'''.format(load=load, **fmtspec)
## Stores of degree 1, 2, 3 and 4
def store1234(simd_ext, typ, deg, aligned):
# store n for 64 bits types
if typ[1:] == '64':
return \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v0;'. \
format(i - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)]) + '\n' + \
'\n'.join(['*({{in0}} + {}) = {{in{}}}.v1;'. \
format(i + deg - 1, i).format(**fmtspec) \
for i in range(1, deg + 1)])
if typ == 'f16':
if deg == 1:
return \
'''f32 buf[4];
vec_st({in1}.v0, 0, buf);
*((u16*){in0} ) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 1) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 2) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 3) = nsimd_f32_to_u16(buf[3]);
vec_st({in1}.v1, 0, buf);
*((u16*){in0} + 4) = nsimd_f32_to_u16(buf[0]);
*((u16*){in0} + 5) = nsimd_f32_to_u16(buf[1]);
*((u16*){in0} + 6) = nsimd_f32_to_u16(buf[2]);
*((u16*){in0} + 7) = nsimd_f32_to_u16(buf[3]);
'''.format(**fmtspec)
else:
ret = 'f32 buf[4];\n'
for i in range(0, deg):
for k in range(0, 2):
ret += 'vec_st({{in{i}}}.v{k}, 0, buf);\n'.\
format(i=i+1, k=k).format(**fmtspec)
for j in range (0, 4):
ret += '*((u16*){in0} + {shift}) = nsimd_f32_to_u16(buf[{j}]);\n'. \
format(j=j, shift=i + k*4*deg + j*deg, **fmtspec)
return ret
# store 1 for every supported types
if deg == 1:
if aligned:
return 'vec_st({in1}, 0, {in0});'.format(**fmtspec)
else:
return '*(({ppc_typ}*) {in0}) = {in1};'.\
format(ppc_typ=ppc_vec_type(typ), **fmtspec)
# Code to store aligned/unaligned vectors
if aligned:
store = '\n'.join(['vec_st(ret{i}, 16*{i}, {in0});'. \
format(i=i, **fmtspec) \
for i in range (0, deg)])
else:
store = '\n'.join(['*({ppc_typ}*) ({in0} + {i}*{vec_size}) = ret{i};'. \
format(vec_size=str(128//int(typ[1:])),
ppc_typ=ppc_vec_type(typ), i=i, **fmtspec) \
for i in range (0, deg)])
# store 2 for every supported types
if deg == 2:
return '''
nsimd_{simd_ext}_v{typ} ret0 = vec_mergeh({in1}, {in2});
nsimd_{simd_ext}_v{typ} ret1 = vec_mergel({in1}, {in2});
{store}
'''.format(store=store, **fmtspec)
# store 3 for every supported types
elif deg == 3:
if typ[1:] == '32':
return '''
__vector char perm1 = NSIMD_PERMUTE_MASK_32(0, 2, 4, 6);
__vector char perm2 = NSIMD_PERMUTE_MASK_32(0, 2, 5, 7);
__vector char perm3 = NSIMD_PERMUTE_MASK_32(1, 3, 5, 7);
nsimd_{simd_ext}_v{typ} tmp0 = vec_perm({in1}, {in2}, perm1);
nsimd_{simd_ext}_v{typ} tmp1 = vec_perm({in3}, {in1}, perm2);
nsimd_{simd_ext}_v{typ} tmp2 = vec_perm({in2}, {in3}, perm3);
nsimd_{simd_ext}_v{typ} ret0 = vec_perm(tmp0, tmp1, perm1);
nsimd_{simd_ext}_v{typ} ret1 = vec_perm(tmp2, tmp0, perm2);
nsimd_{simd_ext}_v{typ} ret2 = vec_perm(tmp1, tmp2, perm3);
{store}
'''.format(store=store, **fmtspec)
elif typ[1:] == '16':
return '''
__vector char permARG = NSIMD_PERMUTE_MASK_16(0, 8, 0, 1, 9, 0, 2, 10);
__vector char permAXB = NSIMD_PERMUTE_MASK_16(0, 1, 8, 3, 4, 9, 6, 7);
nsimd_{simd_ext}_v{typ} tmp0 = vec_perm({in1}, {in2}, permARG);
nsimd_{simd_ext}_v{typ} ret0 = vec_perm(tmp0, {in3}, permAXB);
__vector char permBRG = NSIMD_PERMUTE_MASK_16(0, 3, 11, 0, 4, 12, 0, 5);
__vector char permBYB = NSIMD_PERMUTE_MASK_16(10, 1, 2, 11, 4, 5, 12, 7);
nsimd_{simd_ext}_v{typ} tmp1 = vec_perm({in1}, {in2}, permBRG);
nsimd_{simd_ext}_v{typ} ret1 = vec_perm(tmp1, {in3}, permBYB);
__vector char permCRG = NSIMD_PERMUTE_MASK_16(13, 0, 6, 14, 0, 7, 15, 0);
__vector char permCZB = NSIMD_PERMUTE_MASK_16(0, 13, 2, 3, 14, 5, 6, 15);
nsimd_{simd_ext}_v{typ} tmp2 = vec_perm({in1}, {in2}, permCRG);
nsimd_{simd_ext}_v{typ} ret2 = vec_perm(tmp2, {in3}, permCZB);
{store}
'''.format(store=store, **fmtspec)
elif typ[1:] == '8':
return '''
__vector char mARG = NSIMD_PERMUTE_MASK_8(0, | |
<gh_stars>0
# Copyright (c) 2011, <NAME> All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import fnmatch
import functools
import itertools
import json
import math
import os
import platform
import random
import re
import signal
import socket
import string
import subprocess
import sys
import threading
import time
import traceback
import urllib.request
from collections import defaultdict, deque, Counter
from datetime import datetime, timedelta
from typing import Set
from oyoyo.parse import parse_nick
import botconfig
import src
import src.settings as var
from src.utilities import *
from src import db, events, dispatcher, channels, users, hooks, logger, debuglog, errlog, plog, cats
from src.users import User
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.decorators import command, cmd, hook, handle_error, event_listener, COMMANDS
from src.messages import messages
from src.warnings import *
from src.context import IRCContext
from src.status import try_protection, add_dying, is_dying, kill_players, get_absent, is_silent
from src.votes import chk_decision
from src.cats import All, Wolf, Wolfchat, Wolfteam, Killer, Neutral, Hidden
from src.functions import (
get_players, get_all_players, get_participants,
get_main_role, get_all_roles, get_reveal_role,
get_target, change_role
)
# done this way so that events is accessible in !eval (useful for debugging)
Event = events.Event
# Game Logic Begins:
var.LAST_STATS = None
var.LAST_ADMINS = None
var.LAST_GSTATS = None
var.LAST_PSTATS = None
var.LAST_RSTATS = None
var.LAST_TIME = None
var.LAST_GOAT = {}
var.USERS = {}
var.ADMIN_PINGING = False
var.DCED_LOSERS = UserSet() # type: Set[users.User]
var.PLAYERS = {}
var.DCED_PLAYERS = {}
var.ADMIN_TO_PING = None
var.AFTER_FLASTGAME = None
var.PINGING_IFS = False
var.TIMERS = {}
var.PHASE = "none"
var.OLD_MODES = defaultdict(set)
var.ROLES = UserDict() # type: Dict[str, Set[users.User]]
var.ORIGINAL_ROLES = UserDict() # type: Dict[str, Set[users.User]]
var.MAIN_ROLES = UserDict() # type: Dict[users.User, str]
var.ORIGINAL_MAIN_ROLES = UserDict() # type: Dict[users.User, str]
var.ALL_PLAYERS = UserList()
var.FORCE_ROLES = DefaultUserDict(UserSet)
var.DEAD = UserSet()
var.DEADCHAT_PLAYERS = UserSet()
var.SPECTATING_WOLFCHAT = UserSet()
var.SPECTATING_DEADCHAT = UserSet()
var.ORIGINAL_SETTINGS = {}
var.LAST_SAID_TIME = {}
var.GAME_START_TIME = datetime.now() # for idle checker only
var.CAN_START_TIME = 0
var.STARTED_DAY_PLAYERS = 0
var.DISCONNECTED = {} # players who are still alive but disconnected
var.RESTARTING = False
if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_TIMERS:
var.NIGHT_TIME_LIMIT = 0 # 120
var.NIGHT_TIME_WARN = 0 # 90
var.DAY_TIME_LIMIT = 0 # 720
var.DAY_TIME_WARN = 0 # 600
var.SHORT_DAY_LIMIT = 0 # 520
var.SHORT_DAY_WARN = 0 # 400
if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_REAPER:
var.KILL_IDLE_TIME = 0 # 300
var.WARN_IDLE_TIME = 0 # 180
var.PM_WARN_IDLE_TIME = 0 # 240
var.JOIN_TIME_LIMIT = 0 # 3600
if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_STASIS:
var.LEAVE_PENALTY = 0
var.IDLE_PENALTY = 0
var.PART_PENALTY = 0
var.ACC_PENALTY = 0
if botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_TIME_LORD:
var.TIME_LORD_DAY_LIMIT = 0 # 60
var.TIME_LORD_DAY_WARN = 0 # 45
var.TIME_LORD_NIGHT_LIMIT = 0 # 30
var.TIME_LORD_NIGHT_WARN = 0 # 20
plog("Loading Werewolf IRC bot")
def connect_callback():
db.init_vars()
SIGUSR1 = getattr(signal, "SIGUSR1", None)
SIGUSR2 = getattr(signal, "SIGUSR2", None)
def sighandler(signum, frame):
wrapper = dispatcher.MessageDispatcher(users.FakeUser.from_nick("<console>"), channels.Main)
if signum == signal.SIGINT:
# Exit immediately if Ctrl-C is pressed twice
signal.signal(signal.SIGINT, signal.SIG_DFL)
if signum in (signal.SIGINT, signal.SIGTERM):
forced_exit.func(var, wrapper, "")
elif signum == SIGUSR1:
restart_program.func(var, wrapper, "")
elif signum == SIGUSR2:
plog("Scheduling aftergame restart")
aftergame.func(var, wrapper, "frestart")
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
if SIGUSR1:
signal.signal(SIGUSR1, sighandler)
if SIGUSR2:
signal.signal(SIGUSR2, sighandler)
def who_end(event, var, request):
if request is channels.Main:
if "WHOX" not in hooks.Features:
if not var.DISABLE_ACCOUNTS:
plog("IRCd does not support WHOX, disabling account-related features.")
var.DISABLE_ACCOUNTS = True
var.ACCOUNTS_ONLY = False
# Devoice all on connect
mode = hooks.Features["PREFIX"]["+"]
pending = []
for user in channels.Main.modes.get(mode, ()):
pending.append(("-" + mode, user))
accumulator.send(pending)
next(accumulator, None)
# Expire tempbans
expire_tempbans()
players = db.get_pre_restart_state()
if players:
channels.Main.send(*players, first="PING! ")
channels.Main.send(messages["game_restart_cancel"])
var.CURRENT_GAMEMODE = var.GAME_MODES["default"][0]()
reset()
events.remove_listener("who_end", who_end)
def end_listmode(event, var, chan, mode):
if chan is channels.Main and mode == var.QUIET_MODE:
pending = []
for quiet in chan.modes.get(mode, ()):
if re.search(r"^{0}.+\!\*@\*$".format(var.QUIET_PREFIX), quiet):
pending.append(("-" + mode, quiet))
accumulator.send(pending)
next(accumulator, None)
events.remove_listener("end_listmode", end_listmode)
def mode_change(event, var, actor, target):
if target is channels.Main: # we may or may not be opped; assume we are
accumulator.send([])
next(accumulator, None)
events.remove_listener("mode_change", mode_change)
events.add_listener("who_end", who_end)
events.add_listener("end_listmode", end_listmode)
events.add_listener("mode_change", mode_change)
def accumulate_cmodes(count):
modes = []
for i in range(count):
item = yield
modes.extend(item)
yield i
if modes:
channels.Main.mode(*modes)
accumulator = accumulate_cmodes(3)
accumulator.send(None)
@hook("mode") # XXX Get rid of this when the user/channel refactor is done
def check_for_modes(cli, rnick, chan, modeaction, *target):
nick = parse_nick(rnick)[0]
if chan != botconfig.CHANNEL:
return
oldpref = ""
trgt = ""
keeptrg = False
target = list(target)
if target and target != [users.Bot.nick]:
while modeaction:
if len(modeaction) > 1:
prefix = modeaction[0]
change = modeaction[1]
else:
prefix = oldpref
change = modeaction[0]
if not keeptrg:
if target:
trgt = target.pop(0)
else:
trgt = "" # Last item, no target
keeptrg = False
if not prefix in ("-", "+"):
change = prefix
prefix = oldpref
else:
oldpref = prefix
modeaction = modeaction[modeaction.index(change)+1:]
if change in var.MODES_NOSET:
keeptrg = True
if prefix == "-" and change in var.MODES_ONLYSET:
keeptrg = True
if change not in var.MODES_PREFIXES.values():
continue
if trgt in var.USERS:
if prefix == "+":
var.USERS[trgt]["modes"].add(change)
if change in var.USERS[trgt]["moded"]:
var.USERS[trgt]["moded"].remove(change)
elif change in var.USERS[trgt]["modes"]:
var.USERS[trgt]["modes"].remove(change)
def reset_settings():
var.CURRENT_GAMEMODE.teardown()
var.CURRENT_GAMEMODE = var.GAME_MODES["default"][0]()
for attr in list(var.ORIGINAL_SETTINGS.keys()):
setattr(var, attr, var.ORIGINAL_SETTINGS[attr])
var.ORIGINAL_SETTINGS.clear()
def reset_modes_timers(var):
# Reset game timers
with var.WARNING_LOCK: # make sure it isn't being used by the ping join handler
for x, timr in var.TIMERS.items():
timr[0].cancel()
var.TIMERS = {}
# Reset modes
cmodes = []
for plr in get_players():
if not plr.is_fake:
cmodes.append(("-v", plr.nick))
for user, modes in var.OLD_MODES.items():
for mode in modes:
cmodes.append(("+" + mode, user))
var.OLD_MODES.clear()
if var.QUIET_DEAD_PLAYERS:
for deadguy in var.DEAD:
if not deadguy.is_fake:
cmodes.append(("-{0}".format(var.QUIET_MODE), var.QUIET_PREFIX + deadguy.nick + "!*@*"))
channels.Main.mode("-m", *cmodes)
def reset():
var.PHASE = "none" # "join", "day", or "night"
var.GAME_ID = 0
var.ALL_PLAYERS.clear()
var.RESTART_TRIES = 0
var.DEAD.clear()
var.JOINED_THIS_GAME = set() # keeps track of who already joined this game at least once (hostmasks)
var.JOINED_THIS_GAME_ACCS = set() # same, except accounts
var.PINGED_ALREADY = set()
var.PINGED_ALREADY_ACCS = set()
var.FGAMED = False
var.GAMEMODE_VOTES = {} #list of players who have used !game
var.ROLE_STATS = frozenset() # type: FrozenSet[FrozenSet[Tuple[str, int]]]
reset_settings()
var.LAST_SAID_TIME.clear()
var.PLAYERS.clear()
var.DCED_PLAYERS.clear()
var.DISCONNECTED.clear()
var.DCED_LOSERS.clear()
var.SPECTATING_WOLFCHAT.clear()
var.SPECTATING_DEADCHAT.clear()
var.ROLES.clear()
var.ORIGINAL_ROLES.clear()
var.ROLES["person"] = UserSet()
var.MAIN_ROLES.clear()
var.ORIGINAL_MAIN_ROLES.clear()
var.FORCE_ROLES.clear()
evt = Event("reset", {})
evt.dispatch(var)
@command("sync", "fsync", flag="m", pm=True)
def fsync(var, wrapper, message):
"""Makes the bot apply the currently appropriate channel modes."""
sync_modes(var)
@event_listener("sync_modes")
def on_sync_modes(evt, var):
sync_modes(var)
def sync_modes(var):
voices = [None]
mode = hooks.Features["PREFIX"]["+"]
pl = get_players()
for user in channels.Main.users:
if var.DEVOICE_DURING_NIGHT and var.PHASE == "night":
if mode in user.channels[channels.Main]:
voices.append(("-" + mode, user))
elif user in pl and mode not in user.channels[channels.Main]:
voices.append(("+" + mode, user))
elif user not in pl and mode in user.channels[channels.Main]:
voices.append(("-" + mode, user))
if var.PHASE in var.GAME_PHASES:
voices[0] = "+m"
else:
voices[0] = "-m"
channels.Main.mode(*voices)
@command("refreshdb", flag="m", pm=True)
def refreshdb(var, wrapper, message):
"""Updates our tracking vars to the current db state."""
db.expire_stasis()
db.init_vars()
expire_tempbans()
wrapper.reply("Done.")
@command("fdie", "fbye", flag="F", pm=True)
def forced_exit(var, wrapper, message):
"""Forces the bot to close."""
args = message.split()
# Force in debug mode by default
force = botconfig.DEBUG_MODE
if args and args[0] == "-dirty":
# use as a last resort
os.abort()
elif args and args[0] == "-force":
force = True
message = " ".join(args[1:])
if var.PHASE in var.GAME_PHASES:
if var.PHASE == "join" or force or wrapper.source.nick == "<console>":
stop_game(var, log=False)
else:
wrapper.pm(messages["stop_bot_ingame_safeguard"].format(
what="stop", cmd="fdie", prefix=botconfig.CMD_CHAR))
return
reset_modes_timers(var)
reset()
msg = "{0} quit from {1}"
if message.strip():
msg += " ({2})"
hooks.quit(wrapper, msg.format("Scheduled" if forced_exit.aftergame else "Forced",
wrapper.source, message.strip()))
def _restart_program(mode=None):
plog("RESTARTING")
python = sys.executable
if mode is not None:
print(mode)
assert mode in ("normal", "verbose", | |
raise Exception("Expected source_model_tag_ to be a str, received: {}".format(type(source_model_tag_)))
if spaces_ is not None and not isinstance(spaces_, (bytes, str, list)):
raise Exception("Expected spaces_ to be a Sequence, received: {}".format(type(spaces_)))
if users_ is not None and not isinstance(users_, (bytes, str, list)):
raise Exception("Expected users_ to be a Sequence, received: {}".format(type(users_)))
self.application_description = application_description_
self.bindings = bindings_
self.endpoints = endpoints_
self.offer_name = offer_name_
self.offer_url = offer_url_
self.offer_uuid = offer_uuid_
self.source_model_tag = source_model_tag_
self.spaces = spaces_
self.users = users_
self.unknown_fields = unknown_fields
class ApplicationOfferResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : ApplicationOfferAdminDetails
'''
error_ = Error.from_json(error) if error else None
result_ = ApplicationOfferAdminDetails.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, ApplicationOfferAdminDetails)):
raise Exception("Expected result_ to be a ApplicationOfferAdminDetails, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class ApplicationOfferStatus(Type):
_toSchema = {'active_connected_count': 'active-connected-count', 'application_name': 'application-name', 'charm': 'charm', 'endpoints': 'endpoints', 'err': 'err', 'offer_name': 'offer-name', 'total_connected_count': 'total-connected-count'}
_toPy = {'active-connected-count': 'active_connected_count', 'application-name': 'application_name', 'charm': 'charm', 'endpoints': 'endpoints', 'err': 'err', 'offer-name': 'offer_name', 'total-connected-count': 'total_connected_count'}
def __init__(self, active_connected_count=None, application_name=None, charm=None, endpoints=None, err=None, offer_name=None, total_connected_count=None, **unknown_fields):
'''
active_connected_count : int
application_name : str
charm : str
endpoints : typing.Mapping[str, ~RemoteEndpoint]
err : Error
offer_name : str
total_connected_count : int
'''
active_connected_count_ = active_connected_count
application_name_ = application_name
charm_ = charm
endpoints_ = endpoints
err_ = Error.from_json(err) if err else None
offer_name_ = offer_name
total_connected_count_ = total_connected_count
# Validate arguments against known Juju API types.
if active_connected_count_ is not None and not isinstance(active_connected_count_, int):
raise Exception("Expected active_connected_count_ to be a int, received: {}".format(type(active_connected_count_)))
if application_name_ is not None and not isinstance(application_name_, (bytes, str)):
raise Exception("Expected application_name_ to be a str, received: {}".format(type(application_name_)))
if charm_ is not None and not isinstance(charm_, (bytes, str)):
raise Exception("Expected charm_ to be a str, received: {}".format(type(charm_)))
if endpoints_ is not None and not isinstance(endpoints_, dict):
raise Exception("Expected endpoints_ to be a Mapping, received: {}".format(type(endpoints_)))
if err_ is not None and not isinstance(err_, (dict, Error)):
raise Exception("Expected err_ to be a Error, received: {}".format(type(err_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise Exception("Expected offer_name_ to be a str, received: {}".format(type(offer_name_)))
if total_connected_count_ is not None and not isinstance(total_connected_count_, int):
raise Exception("Expected total_connected_count_ to be a int, received: {}".format(type(total_connected_count_)))
self.active_connected_count = active_connected_count_
self.application_name = application_name_
self.charm = charm_
self.endpoints = endpoints_
self.err = err_
self.offer_name = offer_name_
self.total_connected_count = total_connected_count_
self.unknown_fields = unknown_fields
class ApplicationOffersResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ApplicationOfferResult]
'''
results_ = [ApplicationOfferResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ApplicationRelationsChange(Type):
_toSchema = {'changed': 'changed', 'removed': 'removed'}
_toPy = {'changed': 'changed', 'removed': 'removed'}
def __init__(self, changed=None, removed=None, **unknown_fields):
'''
changed : typing.Sequence[~RelationChange]
removed : typing.Sequence[int]
'''
changed_ = [RelationChange.from_json(o) for o in changed or []]
removed_ = removed
# Validate arguments against known Juju API types.
if changed_ is not None and not isinstance(changed_, (bytes, str, list)):
raise Exception("Expected changed_ to be a Sequence, received: {}".format(type(changed_)))
if removed_ is not None and not isinstance(removed_, (bytes, str, list)):
raise Exception("Expected removed_ to be a Sequence, received: {}".format(type(removed_)))
self.changed = changed_
self.removed = removed_
self.unknown_fields = unknown_fields
class ApplicationRelationsWatchResult(Type):
_toSchema = {'applicationrelationswatcherid': 'ApplicationRelationsWatcherId', 'changes': 'changes', 'error': 'error'}
_toPy = {'ApplicationRelationsWatcherId': 'applicationrelationswatcherid', 'changes': 'changes', 'error': 'error'}
def __init__(self, applicationrelationswatcherid=None, changes=None, error=None, **unknown_fields):
'''
applicationrelationswatcherid : str
changes : ApplicationRelationsChange
error : Error
'''
applicationrelationswatcherid_ = applicationrelationswatcherid
changes_ = ApplicationRelationsChange.from_json(changes) if changes else None
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if applicationrelationswatcherid_ is not None and not isinstance(applicationrelationswatcherid_, (bytes, str)):
raise Exception("Expected applicationrelationswatcherid_ to be a str, received: {}".format(type(applicationrelationswatcherid_)))
if changes_ is not None and not isinstance(changes_, (dict, ApplicationRelationsChange)):
raise Exception("Expected changes_ to be a ApplicationRelationsChange, received: {}".format(type(changes_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.applicationrelationswatcherid = applicationrelationswatcherid_
self.changes = changes_
self.error = error_
self.unknown_fields = unknown_fields
class ApplicationSet(Type):
_toSchema = {'application': 'application', 'branch': 'branch', 'options': 'options'}
_toPy = {'application': 'application', 'branch': 'branch', 'options': 'options'}
def __init__(self, application=None, branch=None, options=None, **unknown_fields):
'''
application : str
branch : str
options : typing.Mapping[str, str]
'''
application_ = application
branch_ = branch
options_ = options
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if branch_ is not None and not isinstance(branch_, (bytes, str)):
raise Exception("Expected branch_ to be a str, received: {}".format(type(branch_)))
if options_ is not None and not isinstance(options_, dict):
raise Exception("Expected options_ to be a Mapping, received: {}".format(type(options_)))
self.application = application_
self.branch = branch_
self.options = options_
self.unknown_fields = unknown_fields
class ApplicationSetCharm(Type):
_toSchema = {'application': 'application', 'channel': 'channel', 'charm_url': 'charm-url', 'config_settings': 'config-settings', 'config_settings_yaml': 'config-settings-yaml', 'force': 'force', 'force_series': 'force-series', 'force_units': 'force-units', 'generation': 'generation', 'resource_ids': 'resource-ids', 'storage_constraints': 'storage-constraints'}
_toPy = {'application': 'application', 'channel': 'channel', 'charm-url': 'charm_url', 'config-settings': 'config_settings', 'config-settings-yaml': 'config_settings_yaml', 'force': 'force', 'force-series': 'force_series', 'force-units': 'force_units', 'generation': 'generation', 'resource-ids': 'resource_ids', 'storage-constraints': 'storage_constraints'}
def __init__(self, application=None, channel=None, charm_url=None, config_settings=None, config_settings_yaml=None, force=None, force_series=None, force_units=None, generation=None, resource_ids=None, storage_constraints=None, **unknown_fields):
'''
application : str
channel : str
charm_url : str
config_settings : typing.Mapping[str, str]
config_settings_yaml : str
force : bool
force_series : bool
force_units : bool
generation : str
resource_ids : typing.Mapping[str, str]
storage_constraints : typing.Mapping[str, ~StorageConstraints]
'''
application_ = application
channel_ = channel
charm_url_ = charm_url
config_settings_ = config_settings
config_settings_yaml_ = config_settings_yaml
force_ = force
force_series_ = force_series
force_units_ = force_units
generation_ = generation
resource_ids_ = resource_ids
storage_constraints_ = storage_constraints
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if channel_ is not None and not isinstance(channel_, (bytes, str)):
raise Exception("Expected channel_ to be a str, received: {}".format(type(channel_)))
if charm_url_ is not None and not isinstance(charm_url_, (bytes, str)):
raise Exception("Expected charm_url_ to be a str, received: {}".format(type(charm_url_)))
if config_settings_ is not None and not isinstance(config_settings_, dict):
raise Exception("Expected config_settings_ to be a Mapping, received: {}".format(type(config_settings_)))
if config_settings_yaml_ is not None and not isinstance(config_settings_yaml_, (bytes, str)):
raise Exception("Expected config_settings_yaml_ to be a str, received: {}".format(type(config_settings_yaml_)))
if force_ is not None and not isinstance(force_, bool):
raise Exception("Expected force_ to be a bool, received: {}".format(type(force_)))
if force_series_ is not None and not isinstance(force_series_, bool):
raise Exception("Expected force_series_ to be a bool, received: {}".format(type(force_series_)))
if force_units_ is not None and not isinstance(force_units_, bool):
raise Exception("Expected force_units_ to be a bool, received: {}".format(type(force_units_)))
if generation_ is not None and not isinstance(generation_, (bytes, str)):
raise Exception("Expected generation_ to be a str, received: {}".format(type(generation_)))
if resource_ids_ is not None and not isinstance(resource_ids_, dict):
raise Exception("Expected resource_ids_ to be a Mapping, received: {}".format(type(resource_ids_)))
if storage_constraints_ is not None and not isinstance(storage_constraints_, dict):
raise Exception("Expected storage_constraints_ to be a Mapping, received: {}".format(type(storage_constraints_)))
self.application = application_
self.channel = channel_
self.charm_url = charm_url_
self.config_settings = config_settings_
self.config_settings_yaml = config_settings_yaml_
self.force = force_
self.force_series = force_series_
self.force_units = force_units_
self.generation = generation_
self.resource_ids = resource_ids_
self.storage_constraints = storage_constraints_
self.unknown_fields = unknown_fields
class ApplicationSetCharmProfile(Type):
_toSchema = {'application': 'application', 'charm_url': 'charm-url'}
_toPy = {'application': 'application', 'charm-url': 'charm_url'}
def __init__(self, application=None, charm_url=None, **unknown_fields):
'''
application : str
charm_url : str
'''
application_ = application
charm_url_ = charm_url
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if charm_url_ is not None and not isinstance(charm_url_, | |
b c2.')
>>> s.makeMeasures(inPlace=True)
>>> s.measure(2).leftBarline = bar.Repeat(direction='start')
>>> s.measure(2).rightBarline = bar.Repeat(direction='end', times=3)
>>> s.measure(4).leftBarline = bar.Repeat(direction='start')
>>> s.measure(4).rightBarline = bar.Repeat(direction='end', times=2)
processInnermostRepeatBars only will expand the first set of repeats.
>>> e = repeat.Expander(s)
>>> s2 = e.processInnermostRepeatBars(s)
>>> s2.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.BassClef>
{0.0} <music21.meter.TimeSignature 3/4>
{0.0} <music21.note.Note A>
{3.0} <music21.stream.Measure 2 offset=3.0>
{0.0} <music21.bar.Barline type=double>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.note.Note E>
{3.0} <music21.bar.Barline type=double>
{6.0} <music21.stream.Measure 2a offset=6.0>
{0.0} <music21.bar.Barline type=double>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.note.Note E>
{3.0} <music21.bar.Barline type=double>
{9.0} <music21.stream.Measure 2b offset=9.0>
{0.0} <music21.bar.Barline type=double>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.note.Note E>
{3.0} <music21.bar.Barline type=double>
{12.0} <music21.stream.Measure 3 offset=12.0>
{0.0} <music21.note.Note F>
{15.0} <music21.stream.Measure 4 offset=15.0>
{0.0} <music21.bar.Repeat direction=start>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note A>
{2.0} <music21.note.Note B>
{3.0} <music21.bar.Repeat direction=end times=2>
{18.0} <music21.stream.Measure 5 offset=18.0>
{0.0} <music21.note.Note C>
{3.0} <music21.bar.Barline type=final>
Calling it again will complete the job, as .process() does
>>> s3 = e.processInnermostRepeatBars(s2)
>>> s3.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
...
{3.0} <music21.stream.Measure 2 offset=3.0>
...
{6.0} <music21.stream.Measure 2a offset=6.0>
...
{9.0} <music21.stream.Measure 2b offset=9.0>
...
{12.0} <music21.stream.Measure 3 offset=12.0>
...
{15.0} <music21.stream.Measure 4 offset=15.0>
{0.0} <music21.bar.Barline type=double>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note A>
{2.0} <music21.note.Note B>
{3.0} <music21.bar.Barline type=double>
{18.0} <music21.stream.Measure 4a offset=18.0>
{0.0} <music21.bar.Barline type=double>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note A>
{2.0} <music21.note.Note B>
{3.0} <music21.bar.Barline type=double>
{21.0} <music21.stream.Measure 5 offset=21.0>
...
Should work even if no start repeat is given:
>>> s = converter.parse('tinynotation: 3/4 A2. C4 D E F2. G4 a b c2.')
>>> s.makeMeasures(inPlace=True)
>>> s.measure(2).rightBarline = bar.Repeat(direction='end')
>>> e = repeat.Expander(s)
>>> s2 = e.processInnermostRepeatBars(s)
>>> s2.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
...
{3.0} <music21.stream.Measure 2 offset=3.0>
...
{6.0} <music21.stream.Measure 1a offset=6.0>
...
{9.0} <music21.stream.Measure 2a offset=9.0>
...
{3.0} <music21.bar.Barline type=double>
{12.0} <music21.stream.Measure 3 offset=12.0>
...
{15.0} <music21.stream.Measure 4 offset=15.0>
...
{18.0} <music21.stream.Measure 5 offset=18.0>
...
{3.0} <music21.bar.Barline type=final>
'''
lowercase_alphabet = string.ascii_lowercase
# get class from src
new = streamObj.__class__()
# can provide indices
forcedIndices = False
if repeatIndices is None:
# find innermost
repeatIndices = self.findInnermostRepeatIndices(streamObj)
else: # use passed
forcedIndices = True
# environLocal.printDebug(['got new repeat indices:', repeatIndices])
# renumber measures starting with the first number found here
# number = streamObj[0].number
# if number is None:
# number = 1
# handling of end repeat as left barline
stripFirstNextMeasure = False
# use index values instead of an iterator
i = 0
repeatTimesFound = 0
while i < len(streamObj):
if not repeatIndices:
break
# environLocal.printDebug(['processing measure index:', i,
# 'repeatIndices', repeatIndices])
# if this index is the start of the repeat
if i == repeatIndices[0]:
mEndBarline = None
mLast = None
try:
mLast, mEndBarline, repeatTimesFound = self._getEndRepeatBar(streamObj,
repeatIndices[-1])
except ExpanderException:
# this may fail if we have supplied arbitrary repeat indices
if not forcedIndices:
raise # raise error
# otherwise let pass; mLast is mEndBarline, as we want
if repeatTimes is None: # if not passed as arg
repeatTimes = repeatTimesFound
for times in range(repeatTimes):
# environLocal.printDebug(['repeat times:', times])
# copy the range of measures; this will include the first
# always copying from the same source
for j in repeatIndices:
mSub = copy.deepcopy(streamObj[j])
# must do for each pass, b/c not changing source
# stream
# environLocal.printDebug(['got j, repeatIndices', j, repeatIndices])
if j in [repeatIndices[0], repeatIndices[-1]]:
self._stripRepeatBarlines(mSub)
# mSub.number = number
# only keep repeat expressions found at the end
# only remove anything if we have 2 or more repeats
# and this is not the last time
if repeatTimes >= 2 and times < repeatTimes - 1:
self._stripRepeatExpressions(mSub)
if times != 0:
mSub.numberSuffix = lowercase_alphabet[(times - 1) % 26] # just in case
new.append(mSub)
# renumber at end
# number += 1
# check if need to clear repeats from next bar
if mLast is not mEndBarline:
stripFirstNextMeasure = True
# set i to next measure after mLast
i = repeatIndices[-1] + 1
# if is not in repeat indices, just add this measure
else:
# iterate through each measure, always add first
if not returnExpansionOnly:
# TODO: this deepcopy is necessary to avoid a problem in
# testExpandRepeatH; the copy is not actually used, but
# for some reason removes an id clash when inserting into
# new
junk = copy.deepcopy(streamObj[i])
# cannot deepcopy here, as we might orphan a spanner
# attached to bracket after this repeat segment
m = streamObj[i]
# environLocal.printDebug(['about to insert m into new', 'id(m)', id(m),
# 'new', id(new), 'all ids:', [id(e) for e in new]])
# m = copy.deepcopy(streamObj[i])
# new.show('t')
if stripFirstNextMeasure:
# environLocal.printDebug(['got stripFirstNextMeasure'])
self._stripRepeatBarlines(m)
# change in source too
self._stripRepeatBarlines(streamObj[i])
stripFirstNextMeasure = False
# renumber at end
# m.number = number
new.append(m) # this may be the first version
# number += 1
i += 1
# return the complete stream with just the expanded measures
return new
def _processInnermostRepeatsAndBrackets(self,
streamObj,
repeatBracketsMemo=None):
'''
Return a new complete Stream with repeats and brackets
expanded.
The `repeatBracketsMemo` is a dictionary that stores
id(rb): rb entries for all RepeatBrackets.
This is not recursively applied here, but done in __processRecursiveRepeatBars
'''
if repeatBracketsMemo is None:
repeatBracketsMemo = {}
# possible replace calls to above with this
# need to remove groups that are already used
groups = self._groupRepeatBracketIndices(streamObj)
# if we do not groups when expected it is probably b/c spanners have
# been orphaned
# environLocal.printDebug(['got groups:', groups])
if not groups: # none found:
return self.processInnermostRepeatBars(streamObj)
# need to find innermost repeat, and then see it it has any
# repeat brackets that are relevant for the span
# this group may ultimately extend beyond the innermost, as this may
# be the first of a few brackets
innermost = self.findInnermostRepeatIndices(streamObj)
groupFocus = None # find a group to apply to, or None
for group in groups:
if not innermost:
continue
rBrackets = group['repeatBrackets']
mStart = streamObj[innermost[0]]
mEnd = streamObj[innermost[-1]]
for rb in rBrackets:
if id(rb) in repeatBracketsMemo:
# environLocal.printDebug(['skipping rb as in memo keys:', rb])
break # do not need to look at components
elif rb.hasSpannedElement(mStart) or rb.hasSpannedElement(mEnd):
# environLocal.printDebug(['matched rb as component' ])
groupFocus = group
break
else:
pass
# environLocal.printDebug(['rb does not have measure as a spanned element',
# 'rb', rb, 'mEnd', mEnd])
if groupFocus is not None:
break
# if the innermost measures are not part of a group, process normally
if groupFocus is None:
# environLocal.printDebug(['cannot find innermost in a group:',
# 'innermost', innermost, 'groupFocus', groupFocus])
return self.processInnermostRepeatBars(streamObj)
# else: # have innermost in a bracket
rBrackets = groupFocus['repeatBrackets']
# get all measures before bracket
streamObjPre = streamObj[:innermost[0]]
streamBracketRepeats = [] # store a list
# will need to know index of last measure copied
highestIndexRepeated = None
# store for each rb {repeatBracket:[], validIndices=[]}
boundaries = []
# it is critical that the brackets are in order as presented
for rb in rBrackets:
repeatBracketsMemo[id(rb)] = rb
startIndex = innermost[0]
# first measure under spanner is not the start index, but the
# measure that begins the spanned repeat bracket
mFirst = rb.getFirst()
mLast = rb.getLast()
endIndex = None
bracketStartIndex = None # not the startIndex
# iterate over all provided measures to find the last index
for i, m in enumerate(streamObj):
if id(m) == id(mLast):
endIndex = i
# use if: start and end may be the same
if id(m) == id(mFirst):
bracketStartIndex = i
# error check: probably orphaned spanners
if endIndex is None or bracketStartIndex is None:
raise ExpanderException(
'failed to find start or end index of bracket expansion')
# if mLast does not have a repeat bar, its probably not a repeat
mLastRightBar = mLast.rightBarline
if (mLastRightBar is not None
and 'music21.bar.Repeat' in mLastRightBar.classSet):
indices = list(range(startIndex, endIndex + 1))
# condition of when to repeat next is not always clear
# if we have [1 x :|[2 | |
<filename>seam_carving.py
import numpy as np
import cv2
class SeamCarver:
def __init__(self, filename, out_height, out_width, protect_mask='', object_mask=''):
# initialize parameter
self.filename = filename
self.out_height = out_height
self.out_width = out_width
# read in image and store as np.float64 format
self.in_image = cv2.imread(filename).astype(np.float64)
self.in_height, self.in_width = self.in_image.shape[: 2]
# keep tracking resulting image
self.out_image = np.copy(self.in_image)
# object removal --> self.object = True
self.object = (object_mask != '')
if self.object:
# read in object mask image file as np.float64 format in gray scale
self.mask = cv2.imread(object_mask, 0).astype(np.float64)
self.protect = False
# image re-sizing with or without protect mask
else:
self.protect = (protect_mask != '')
if self.protect:
# if protect_mask filename is provided, read in protect mask image file as np.float64 format in gray scale
self.mask = cv2.imread(protect_mask, 0).astype(np.float64)
# kernel for forward energy map calculation
self.kernel_x = np.array([[0., 0., 0.], [-1., 0., 1.], [0., 0., 0.]], dtype=np.float64)
self.kernel_y_left = np.array([[0., 0., 0.], [0., 0., 1.], [0., -1., 0.]], dtype=np.float64)
self.kernel_y_right = np.array([[0., 0., 0.], [1., 0., 0.], [0., -1., 0.]], dtype=np.float64)
# constant for covered area by protect mask or object mask
self.constant = 1000
# starting program
self.start()
def start(self):
"""
:return:
If object mask is provided --> object removal function will be executed
else --> seam carving function (image retargeting) will be process
"""
if self.object:
self.object_removal()
else:
self.seams_carving()
def seams_carving(self):
"""
:return:
We first process seam insertion or removal in vertical direction then followed by horizontal direction.
If targeting height or width is greater than original ones --> seam insertion,
else --> seam removal
The algorithm is written for seam processing in vertical direction (column), so image is rotated 90 degree
counter-clockwise for seam processing in horizontal direction (row)
"""
# calculate number of rows and columns needed to be inserted or removed
delta_row, delta_col = int(self.out_height - self.in_height), int(self.out_width - self.in_width)
# remove column
if delta_col < 0:
self.seams_removal(delta_col * -1)
# insert column
elif delta_col > 0:
self.seams_insertion(delta_col)
# remove row
if delta_row < 0:
self.out_image = self.rotate_image(self.out_image, 1)
if self.protect:
self.mask = self.rotate_mask(self.mask, 1)
self.seams_removal(delta_row * -1)
self.out_image = self.rotate_image(self.out_image, 0)
# insert row
elif delta_row > 0:
self.out_image = self.rotate_image(self.out_image, 1)
if self.protect:
self.mask = self.rotate_mask(self.mask, 1)
self.seams_insertion(delta_row)
self.out_image = self.rotate_image(self.out_image, 0)
def object_removal(self):
"""
:return:
Object covered by mask will be removed first and seam will be inserted to return to original image dimension
"""
rotate = False
object_height, object_width = self.get_object_dimension()
if object_height < object_width:
self.out_image = self.rotate_image(self.out_image, 1)
self.mask = self.rotate_mask(self.mask, 1)
rotate = True
while len(np.where(self.mask[:, :] > 0)[0]) > 0:
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask[:, :] > 0)] *= -self.constant
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
if not rotate:
num_pixels = self.in_width - self.out_image.shape[1]
else:
num_pixels = self.in_height - self.out_image.shape[1]
self.seams_insertion(num_pixels)
if rotate:
self.out_image = self.rotate_image(self.out_image, 0)
def seams_removal(self, num_pixel):
if self.protect:
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask > 0)] *= self.constant
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
else:
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
cumulative_map = self.cumulative_map_forward(energy_map)
seam_idx = self.find_seam(cumulative_map)
self.delete_seam(seam_idx)
def seams_insertion(self, num_pixel):
if self.protect:
temp_image = np.copy(self.out_image)
temp_mask = np.copy(self.mask)
seams_record = []
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
energy_map[np.where(self.mask[:, :] > 0)] *= self.constant
cumulative_map = self.cumulative_map_backward(energy_map)
seam_idx = self.find_seam(cumulative_map)
seams_record.append(seam_idx)
self.delete_seam(seam_idx)
self.delete_seam_on_mask(seam_idx)
self.out_image = np.copy(temp_image)
self.mask = np.copy(temp_mask)
n = len(seams_record)
for dummy in range(n):
seam = seams_record.pop(0)
self.add_seam(seam)
self.add_seam_on_mask(seam)
seams_record = self.update_seams(seams_record, seam)
else:
temp_image = np.copy(self.out_image)
seams_record = []
for dummy in range(num_pixel):
energy_map = self.calc_energy_map()
cumulative_map = self.cumulative_map_backward(energy_map)
seam_idx = self.find_seam(cumulative_map)
seams_record.append(seam_idx)
self.delete_seam(seam_idx)
self.out_image = np.copy(temp_image)
n = len(seams_record)
for dummy in range(n):
seam = seams_record.pop(0)
self.add_seam(seam)
seams_record = self.update_seams(seams_record, seam)
def calc_energy_map(self):
b, g, r = cv2.split(self.out_image)
b_energy = np.absolute(cv2.Scharr(b, -1, 1, 0)) + np.absolute(cv2.Scharr(b, -1, 0, 1))
g_energy = np.absolute(cv2.Scharr(g, -1, 1, 0)) + np.absolute(cv2.Scharr(g, -1, 0, 1))
r_energy = np.absolute(cv2.Scharr(r, -1, 1, 0)) + np.absolute(cv2.Scharr(r, -1, 0, 1))
return b_energy + g_energy + r_energy
def cumulative_map_backward(self, energy_map):
m, n = energy_map.shape
output = np.copy(energy_map)
for row in range(1, m):
for col in range(n):
output[row, col] = \
energy_map[row, col] + np.amin(output[row - 1, max(col - 1, 0): min(col + 2, n - 1)])
return output
def cumulative_map_forward(self, energy_map):
matrix_x = self.calc_neighbor_matrix(self.kernel_x)
matrix_y_left = self.calc_neighbor_matrix(self.kernel_y_left)
matrix_y_right = self.calc_neighbor_matrix(self.kernel_y_right)
m, n = energy_map.shape
output = np.copy(energy_map)
for row in range(1, m):
for col in range(n):
if col == 0:
e_right = output[row - 1, col + 1] + matrix_x[row - 1, col + 1] + matrix_y_right[row - 1, col + 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_right, e_up)
elif col == n - 1:
e_left = output[row - 1, col - 1] + matrix_x[row - 1, col - 1] + matrix_y_left[row - 1, col - 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_left, e_up)
else:
e_left = output[row - 1, col - 1] + matrix_x[row - 1, col - 1] + matrix_y_left[row - 1, col - 1]
e_right = output[row - 1, col + 1] + matrix_x[row - 1, col + 1] + matrix_y_right[row - 1, col + 1]
e_up = output[row - 1, col] + matrix_x[row - 1, col]
output[row, col] = energy_map[row, col] + min(e_left, e_right, e_up)
return output
def calc_neighbor_matrix(self, kernel):
b, g, r = cv2.split(self.out_image)
output = np.absolute(cv2.filter2D(b, -1, kernel=kernel)) + \
np.absolute(cv2.filter2D(g, -1, kernel=kernel)) + \
np.absolute(cv2.filter2D(r, -1, kernel=kernel))
return output
def find_seam(self, cumulative_map):
m, n = cumulative_map.shape
output = np.zeros((m,), dtype=np.uint32)
output[-1] = np.argmin(cumulative_map[-1])
for row in range(m - 2, -1, -1):
prv_x = output[row + 1]
if prv_x == 0:
output[row] = np.argmin(cumulative_map[row, : 2])
else:
output[row] = np.argmin(cumulative_map[row, prv_x - 1: min(prv_x + 2, n - 1)]) + prv_x - 1
return output
def delete_seam(self, seam_idx):
m, n = self.out_image.shape[: 2]
output = np.zeros((m, n - 1, 3))
for row in range(m):
col = seam_idx[row]
output[row, :, 0] = np.delete(self.out_image[row, :, 0], [col])
output[row, :, 1] = np.delete(self.out_image[row, :, 1], [col])
output[row, :, 2] = np.delete(self.out_image[row, :, 2], [col])
self.out_image = np.copy(output)
def add_seam(self, seam_idx):
m, n = self.out_image.shape[: 2]
output = np.zeros((m, n + 1, 3))
for row in range(m):
col = seam_idx[row]
for ch in range(3):
if col == 0:
p = np.average(self.out_image[row, col: col + 2, ch])
output[row, col, ch] = self.out_image[row, col, ch]
output[row, col + 1, ch] = p
output[row, col + 1:, ch] = self.out_image[row, col:, ch]
else:
p = np.average(self.out_image[row, col - 1: col + 1, ch])
output[row, : col, ch] = self.out_image[row, : col, ch]
output[row, col, ch] = p
output[row, col + 1:, ch] = self.out_image[row, col:, ch]
self.out_image = np.copy(output)
def update_seams(self, remaining_seams, current_seam):
output = []
for seam in remaining_seams:
seam[np.where(seam >= current_seam)] += 2
output.append(seam)
return output
def rotate_image(self, image, ccw):
m, n, ch = image.shape
output = np.zeros((n, m, ch))
if ccw:
image_flip = np.fliplr(image)
for c in range(ch):
for row in range(m):
output[:, row, c] = image_flip[row, :, c]
else:
for c in range(ch):
for row in range(m):
output[:, m - 1 - row, c] = image[row, :, c]
return output
def rotate_mask(self, mask, ccw):
m, n = mask.shape
output = np.zeros((n, m))
if ccw > 0:
image_flip = np.fliplr(mask)
for row in range(m):
output[:, row] = image_flip[row, : ]
else:
for row in range(m):
output[:, m - 1 - row] = mask[row, : ]
return output
def delete_seam_on_mask(self, seam_idx):
m, n = self.mask.shape
output = np.zeros((m, n - 1))
for row in range(m):
col = seam_idx[row]
output[row, : ] = np.delete(self.mask[row, : ], [col])
self.mask = np.copy(output)
def add_seam_on_mask(self, seam_idx):
m, n = self.mask.shape
output = np.zeros((m, n + 1))
for row in range(m):
col = seam_idx[row]
if col == 0:
p = np.average(self.mask[row, col: col + 2])
output[row, col] = self.mask[row, | |
atom = res.atoms[atomname]
aname = atom.name
rname = atom.resname
return rname, aname
def getGroup(self, resname, atomname):
"""
Get the group/type associated with the input
fields. If not found, return a null string.
Parameters:
resname: The residue name (string)
atomname: The atom name (string)
"""
group = ""
if resname in self.map:
resid = self.map[resname]
if resid.hasAtom(atomname):
atom = resid.atoms[atomname]
group = atom.group
return group
def getParams(self, resname, atomname):
"""
Get the parameters associated with the input fields.
The residue itself is needed instead of simply its name
because the forcefield may use a different residue name
than the standard amino acid name.
Parameters
resname: The residue name (string)
atomname: The atom name (string)
Returns
charge: The charge on the atom (float)
radius: The radius of the atom (float)
"""
charge = None
radius = None
#print self.map.keys()
if resname in self.map:
resid = self.map[resname]
if resid.hasAtom(atomname):
atom = resid.atoms[atomname]
charge = atom.charge
radius = atom.radius
return charge, radius
def getParams1(self, residue, name):
"""
Get the parameters associated with the input fields.
The residue itself is needed instead of simply its name
because the forcefield may use a different residue name
than the standard amino acid name.
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
charge: The charge on the atom (float)
radius: The radius of the atom (float)
"""
charge = None
radius = None
resname = ""
atomname = ""
if self.name == "amber":
resname, atomname = self.getAmberParams(residue, name)
elif self.name == "charmm":
resname, atomname = self.getCharmmParams(residue, name)
elif self.name == "parse":
resname, atomname = self.getParseParams(residue, name)
else:
resname = residue.name
atomname = name
defresidue = self.getResidue(resname)
# print "resname: %s, defresidue: %s" % (resname, defresidue)
if defresidue == None:
return charge, radius
atom = defresidue.getAtom(atomname)
if atom != None:
charge = atom.get("charge")
radius = atom.get("radius")
return charge, radius
def getAmberParams(self, residue, name):
"""
Get the forcefield definitions from the Amber database
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
resname: The name of the amber residue
atomname: The name of the amber atom
"""
atomname = name
type = residue.get("type")
if type == 4:
resname = residue.get("naname")
else:
resname = residue.get("name")
# Residue Substitutions
if residue.get("name") == "CYS" and "HG" not in residue.get("map"):
resname = "CYX"
elif residue.get("name") == "HIS":
if "HD1" in residue.get("map") and "HE2" in residue.get("map"):
resname = "HIP"
elif "HD1" in residue.get("map"):
resname = "HID"
elif "HE2" in residue.get("map"):
resname = "HIE"
else:
resname = "HID" # Default for no hydrogens
elif residue.get("name") == "HSP":
resname = "HIP"
elif residue.get("name") == "HSE":
resname = "HIE"
elif residue.get("name") == "HSD":
resname = "HID"
elif residue.get("name") == "GLU" or residue.get("name") == "GLH":
if "HE1" in residue.get("map"):
resname = "GLH"
if atomname == "HE1": atomname = "HE2"
elif atomname == "OE1": atomname = "OE2"
elif atomname == "OE2": atomname = "OE1"
elif "HE2" in residue.get("map"): resname = "GLH"
elif residue.get("name") == "ASP" or residue.get("name") == "ASH":
if "HD1" in residue.get("map"):
resname = "ASH"
if atomname == "HD1": atomname = "HD2"
elif atomname == "OD1": atomname = "OD2"
elif atomname == "OD2": atomname = "OD1"
elif "HD2" in residue.get("map"): resname = "ASH"
if residue.get("isCterm"):
resname = "C" + resname
elif residue.get("isNterm"):
resname = "N" + resname
# Atom Substitutions
if resname == "WAT":
if atomname == "O": atomname = "OW"
elif atomname == "H1": atomname = "HW"
elif atomname == "H2": atomname = "HW"
elif resname == "ILE":
if atomname == "CD": atomname = "CD1"
if resname[0] == "N" and resname != "NME": # N-terminal
if atomname == "H": atomname = "H1"
if (resname == "CCYS" or resname == "NCYS") and atomname == "HG": atomname = "HSG"
if resname == "CYM" and atomname == "H": atomname = "HN"
if residue.get("isNterm") and resname == "NPRO" and atomname == "HN2":
atomname = "H2"
if residue.get("isNterm") and resname == "NPRO" and atomname == "HN1":
atomname = "H3"
return resname, atomname
def getParseParams(self, residue, name):
"""
Get the forcefield definitions from the Parse database
Parameters
residue: The residue (residue)
name: The atom name (string)
Returns
resname: The name of the amber residue
atomname: The name of the amber atom
"""
atomname = name
resname = residue.name
# Terminal/Water Substitutions
nterm = residue.get("isNterm")
cterm = residue.get("isCterm")
if nterm and resname != "ACE":
if resname == "PRO" and nterm == 2:
resname = "PR+"
if atomname == "H2": atomname = "HN1"
elif atomname == "H3": atomname = "HN2"
elif resname == "PRO" and nterm == 1:
resname = "PRN"
if atomname == "H2" or atomname == "H3": atomname = "HN"
elif nterm == 2: # Neutral
if atomname in ["N","H","H2","H3","CA","HA","C","O"]:
resname = "BKN"
if atomname == "H":
atomname = "H1"
if atomname == 'H3':
atomname='H2'
elif nterm == 3: # Positive
if atomname in ["N","H","H2","H3","CA","HA","C","O"]:
resname = "BK+"
if atomname == "H": atomname = "H1"
elif cterm:
if atomname == "O": atomname = "O1"
elif atomname == "OXT": atomname = "O2"
if cterm == 1 and atomname in ["N","H","HA","CA","C","O1","O2"]:
resname = "BK-"
elif cterm == 2 and atomname in ["N","H","HA","CA","C","O1","O2","HO"]:
if atomname == "HO": atomname = "H2"
resname = "BKC"
#print 'Cterm resname is',resname
elif residue.get("type") == 3:
resname = "H2O"
if atomname == "O": atomname = "OH"
elif atomname == "H1": atomname = "HH1"
elif atomname == "H2": atomname = "HH2"
# Residue Substitutions
if resname == "HSD": resname = "HID"
elif resname in ["HIE","HSE"]: resname = "HIS"
elif resname in ["HIP","HSP"]: resname = "HI+"
elif resname == "ILE":
if atomname == "HG12": atomname = "HG11"
elif atomname == "HG13": atomname = "HG12"
elif atomname == "CD": atomname = "CD1"
elif resname == "CYS" and "HG" not in residue.get("map"):
resname = "CSS"
#
# Histidine
#
elif resname == "HIS":
if "HD1" in residue.get("map") and "HE2" in residue.get("map"):
resname = "HI+"
elif "HD1" in residue.get("map"):
resname = "HID"
elif "HE2" in residue.get("map"):
resname = "HIS"
elif resname == "GLU" or resname == "GLH":
if "HE1" in residue.get("map"):
resname = "GL0"
if atomname == "HE1": atomname = "HE2"
elif atomname == "OE1": atomname = "OE2"
elif atomname == "OE2": atomname = "OE1"
elif "HE2" in residue.get("map"): resname = "GL0"
elif resname == "ASP" or resname == "ASH":
if "HD1" in residue.get("map"):
resname = "AS0"
if atomname == "HD1": atomname = "HD2"
elif atomname == "OD1": atomname = "OD2"
elif atomname == "OD2": atomname = "OD1"
elif "HD2" in residue.get("map"): resname = "AS0"
elif resname == "ACE":
if atomname == "HH31": atomname = "HA1"
elif atomname == "HH32": atomname = "HA2"
elif atomname == "HH33": atomname = "HA3"
elif atomname == "CH3": atomname = "CA"
elif resname == "TYR":
if not "HH" in residue.get("map"):
resname="TYM"
elif resname == "TYM": resname = "TY-"
elif resname == "CYM": resname = "CY-"
elif resname == "LYN": resname = "LY0"
#
# Neutral LYS and neutral ARG detection based on hydrogens - added by Jens
#
elif resname == "LYS":
if not "HZ3" in residue.get("map"):
resname="LY0"
elif resname == "ARG":
if not "HE" in residue.get("map"):
resname="AR0"
elif resname == "NME":
resname = "N-M"
if atomname == "CH3": atomname = "CA"
elif atomname == "H": atomname = "H1"
elif atomname.startswith("HH"): atomname = "HA" + atomname[-1]
# Hydrogen Substitutions
if atomname == "H": atomname = "HN"
elif atomname == "HA2": atomname = "HA1"
elif atomname == "HA3": atomname = "HA2"
elif atomname == "HB2" and resname not in ["ALA"]: atomname = "HB1"
elif atomname == "HB3" and resname not in ["ALA"]: atomname = "HB2"
elif atomname == "HD2" and resname not in ["HIS","HI+","HID","AS0"]: atomname = "HD1"
elif atomname == "HD3" and resname not in ["HIS","HI+","HID"]: atomname = "HD2"
elif atomname == "HE2" and resname not in ["TRP","HIS","HI+","HID","GL0"]: atomname = "HE1"
elif atomname == "HE3" and resname not in | |
"""
Checks a sample if it matches PHE defined recipes for VOC/VUIs. Outputs to stdout
a tab delimited list of the following:
- PHE name for the matching VOC/VUI. "none" if no match. "multiple" if multiple matches.
- pangolin name for the matching VOC/VUI. "none" if no match. "multiple" if multiple matches.
- confidence of the match. "NA" if no match. "multiple" if multiple matches.
- current time on system
Logs debugging information to stderr
"""
from csv import reader
from argparse import ArgumentParser
from yaml import full_load as load_yaml
from datetime import datetime
from sys import exit, stderr
import logging
from recipe_graph import RecipeDirectedGraph
from typing import Tuple
WUHAN_REFERENCE_LENGTH = 29903
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler(stderr)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
def get_recipe_match_confidence(recipe: dict, sequence: str, cached_results: dict) -> str:
"""
Calculate the confidence of a sample matching a given PHE VOC/VUI recipe.
PHE VOC/VUI recipes have a list of defining mutations and
different confidence thresholds based on the number of defining mutations that are found
in the sample. A PHE recipe typically defines only a single lineage,
and can depend on other PHE recipes to define required mutations from the ancestral lineages.
Assumes that a recipe can only depend on a single other recipe.
We use child to refer to a recipe that depends on another recipe,
and parent to refer to the other recipe being depended on by the recipe.
Ancestors are the chain of recipes being depended on by a child.
A child returns "confirmed" confidence if it passes "confirmed" threshold of it's own mutation definition,
AND all it's ancestors return "confirmed" or "probable".
A child returns "probable" confidence if it only passes the "probable" threshold of it's own mutation definition,
AND all it's ancestors return "confirmed" or "probable".
A child returns "NA" confidence if it fails all thresholds of it's own mutation definition OR
any of it's ancestors return "NA"
NB: If the recipe depends on another recipe, then calls get_recipe_match_confidence()
recursively. Does not check for cyclic dependencies in the recipes.
If you notice that this method is taking forever, check that there isn't a cycle in the
recipe dependencies causing an infinite recursion loop.
See more details on PHE recipes at https://github.com/phe-genomics/variant_definitions
Parameters:
-------------
recipe : dict
a dict representation of a single PHE recipe.
Expects the recipe dict to contain items:
- unique-id (str): the recipe name
- variants (list): list of SNPs, MNPs, deletions, insertions that define the lineage.
Each mutation will have nested items:
- one-based-reference-position (int): reference position, 1 based
- type: one of [SNP, MNP, deletion, insertion]
- reference-base: ref base if type=SNP, contiguous ref bases if type=MNP, ref base
before insertion if type=insertion, ref base before deletion and deleted ref bases if type=deletion
- variant-base: alt base if type=SNP, contiguous alt bases if type=MNP,
ref base before deletion if type=deletion, ref base before insertion followed by inserted bases if
type=insertion
- special (bool): only if the mutation is absolutely required
- calling-definition (dict): dict of how many mutations are required
for confirmed or probable confidence
- belongs-to-lineage (dict): nested dict containing item {"PANGO" => pangolin lineage}
- phe-label (str): PHE name for the VOC/VUI
- requires (str): the name of the recipe that the current recipe depends on. Can be missing if no dependencies.
sequence: str
Wuhan aligned sequence of sample. Deletions with respect to the reference must be padded with "-",
insertions with respect to the reference must be excised.
cached_results: dict
Hack to keep track of previous results in case we need to recursively call
get_recipe_match_confidence() to get the confidence of nested recipes.
Should have format {recipe_name => confidence}
Returns: str
------------
The confidence of the match for the recipe, taking into account all ancestral recipes if any
"""
recipe_name = recipe["unique-id"]
if recipe_name in cached_results:
logger.debug("Using cached results: " + cached_results[recipe_name])
return cached_results[recipe_name]
alt_match = 0
ref_match = 0
#if a "special mutation", e.g. E484K is required, but not present, this will be flipped to False
special_mutations = True
# Keep track of matched variants for logging
log_alt_match = []
log_ref_match = []
log_wrong_alt = []
pango_alias = "none"
phe_label = "none"
confidence = "NA"
req_recipe_confidence = None
for lineage_mutation in recipe['variants']:
pos = int(lineage_mutation['one-based-reference-position'])-1
if lineage_mutation['type'] == "MNP":
size = len(lineage_mutation['reference-base'])
seq_val = sequence[pos:pos+size]
elif lineage_mutation['type'] == "SNP":
seq_val = sequence[pos]
else:
#not considering indels at present
continue
log_is_special = "_spec" if "special" in lineage_mutation else ""
if seq_val == lineage_mutation['variant-base']:
alt_match += 1
log_alt_match.append("{}{}{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
seq_val,
log_is_special
))
elif seq_val == lineage_mutation['reference-base']:
ref_match += 1
if "special" in lineage_mutation:
special_mutations = False
log_ref_match.append("{}{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
log_is_special))
else:
if "special" in lineage_mutation:
special_mutations = False
log_wrong_alt.append("{}{}{}/{}{}".format(
lineage_mutation['reference-base'],
lineage_mutation['one-based-reference-position'],
lineage_mutation['variant-base'],
seq_val,
log_is_special))
calling_definition = recipe['calling-definition']
confidence = "NA"
pango_alias = recipe['belongs-to-lineage']['PANGO']
phe_label = recipe['phe-label']
if (special_mutations and
alt_match >= calling_definition['confirmed']['mutations-required'] and
ref_match <= calling_definition['confirmed']['allowed-wildtype']):
confidence = "confirmed"
elif ('probable' in calling_definition and
special_mutations and
alt_match >= calling_definition['probable']['mutations-required'] and
ref_match <= calling_definition['probable']['allowed-wildtype']):
confidence = "probable"
overall_confidence = confidence
if "requires" in recipe and confidence in ["confirmed", "probable"]:
req_recipe_name = recipe["requires"]
req_recipe_pango_alias = recipes[req_recipe_name]['belongs-to-lineage']['PANGO']
req_recipe_phe_label = recipes[req_recipe_name]['phe-label']
logger.debug(f"Checking required recipe {req_recipe_name} - {req_recipe_pango_alias} " +
f"of dependent recipe {recipe_name} - {pango_alias} ")
req_recipe_confidence = get_recipe_match_confidence(
recipe=recipes[req_recipe_name],
sequence=sequence,
cached_results=cached_results)
logger.debug(f"Required recipe pango: {req_recipe_pango_alias}" +
f", confidence: {req_recipe_confidence}" +
f", phe-label: {req_recipe_phe_label}" +
f" for reciped recipe {req_recipe_name} - {req_recipe_pango_alias} " +
f" of dependent recipe {recipe_name} - {pango_alias} ")
if req_recipe_confidence not in ["confirmed", "probable"]:
overall_confidence = "NA"
if confidence in ["confirmed", "probable"]:
logger.debug(f"Matched pango: {pango_alias} " +
f", confidence: {confidence} " +
f", overall-confidence: {overall_confidence} " +
f", phe-label: {phe_label}. " )
logger.debug("Alt matches: " + ", ".join(log_alt_match))
logger.debug("Ref matches: " + ", ".join(log_ref_match))
logger.debug("Wrong Alt: " + ", ".join(log_wrong_alt))
return overall_confidence
def find_all_matching_recipes(recipes: dict, sequence: str) -> Tuple[str, str, str]:
"""
Traverse through all PHE VOC/VUI recipes and find all matches.
If a sample matches multiple PHE recipes, and
and the recipes are not along the same branch in the recipe dependency graph,
then the sample is marked as matching "multiple" recipes.
If the sample matches multiple PHE lineage recipes, and
the lineages are related along the same tree branch,
(EG AY.4.2 is a child of B.1.617.2),
then the sample is marked as the lowest lineage along the branch.
Parameters:
--------------------
recipes : dict
{recipe_name => recipe_dict}
Load the dict of recipes from phe_recipes.yaml.
sequence: str
wuhan aligned sequence of sample. Deletions padded with "-". Insertions removed.
Returns: tuple (str, str, str)
---------------------------------
- matched_recipe_phe_label: str
PHE name for the VOC/VUI. "none" if no match. "multiple" if multiple matches.
- matched_recipe_pango_alias: str
pangolin name for the VOC/VUI. "none" if no match. "multiple" if multiple matches.
- matched_confidence: str
confidence of the match. "NA" if no match. "multiple" if multiple matches.
"""
# traverse the recipes and cache any matching recipes and
# associated confidence in dict matched_recipe_name_to_conf
matched_recipe_name_to_conf = {}
for recipe in recipes.values():
confidence = get_recipe_match_confidence(
recipe=recipe,
sequence=sequence,
cached_results=matched_recipe_name_to_conf)
if confidence != "NA":
recipe_name = recipe["unique-id"]
matched_recipe_name_to_conf[recipe_name] = confidence
# If there are multiple matching recipes, but they are all recipes for related lineages
# along the same branch in the lineage tree, then
# we return the lineage recipe for leaf-most lineage.
# If the matching lineages are from different branches in the lineage tree,
# then we mark the sample as "multiple", indicating that there are
# multiple conflicting lineage matches
if len(matched_recipe_name_to_conf.keys()) > 1:
matched_recipes = [recipes[recipe_name] for recipe_name in matched_recipe_name_to_conf.keys()]
matched_recipe_graph = RecipeDirectedGraph(matched_recipes)
if matched_recipe_graph.is_single_branch():
leaf_recipe_name = matched_recipe_graph.get_leaf_name()
leaf_recipe = recipes[leaf_recipe_name]
matched_recipe_pango_alias = leaf_recipe['belongs-to-lineage']['PANGO']
matched_recipe_phe_label = leaf_recipe['phe-label']
matched_confidence = matched_recipe_name_to_conf[leaf_recipe_name]
| |
md5hash="323",
artifactfile=self.test_file,
)
Artifact.objects.create(
project=self.project,
revision=self.revision1,
md5hash="324",
artifactfile=self.test_file,
)
self.revision1.delete()
with self.assertRaises(Revision.DoesNotExist):
Revision.objects.get(revision="1")
with self.assertRaises(Artifact.DoesNotExist):
Artifact.objects.get(md5hash="321")
with self.assertRaises(Artifact.DoesNotExist):
Artifact.objects.get(md5hash="322")
with self.assertRaises(Artifact.DoesNotExist):
Artifact.objects.get(md5hash="323")
with self.assertRaises(Artifact.DoesNotExist):
Artifact.objects.get(md5hash="324")
# @todo(Stephan):
# These commented out tests need to be restructured once we can enforce
# that only the latest N Revisions are kept.
# This is not done right now
# def test_branch_revision_limit(self):
# """Tests that the revision limit of a branch is respected.
# """
# n_revisions_on_master = self.branch_master.revisions.count()
# n_revisions_kept_on_master = self.branch_master.nr_of_revisions_kept
# # Add exactly the amount of Revisions that the master branch allows
# for i in xrange(n_revisions_kept_on_master - n_revisions_on_master):
# new_revision = Revision.objects.create(revision=str(Revision.objects.count() + i + 1),
# project=self.project)
# new_revision.branches.add(self.branch_master)
# self.assertEqual(self.branch_master.revisions.count(),
# self.branch_master.nr_of_revisions_kept)
# # Add one more revision, the revision count of the branch should not go up
# new_revision = Revision.objects.create(revision='tooMuch', project=self.project)
# new_revision.branches.add(self.branch_master)
# self.assertEqual(self.branch_master.revisions.count(),
# self.branch_master.nr_of_revisions_kept)
# def test_change_branch_revision_limit(self):
# """Tests if we can change the number of Revisions a branch is
# allowed to have and immediately enforce this change.
# """
# self.branch_master.nr_of_revisions_kept = 1
# self.branch_master.save()
# self.assertEqual(self.branch_master.revisions.count(),
# self.branch_master.nr_of_revisions_kept)
def test_no_remove_earliest_revision_if_no_limit(self):
"""Tests that the earliest revision added for a branch is deleted if
there are too many of them.
In this case the revisions do not belong to any branch
"""
now = datetime.datetime.now()
nb_revs = 4
revisions = []
artifacts = []
for i in range(nb_revs):
rev = Revision.objects.create(
revision="%s" % (i + 9000),
project=self.project,
commit_time=now + datetime.timedelta(seconds=-i),
)
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="%s" % i,
artifactfile=self.test_file,
)
artifacts.append(art)
revisions.append(rev)
self.new_series.artifacts.add(art)
try:
for i in range(nb_revs):
self.assertIsNotNone(Revision.objects.get(revision="%s" % (i + 9000)))
except Revision.DoesNotExist:
self.fail(
"[Revision.DoesNotExist] One of the Revisions returned no object from the get query"
)
except Revision.MultipleObjectsReturned:
self.fail(
"[Revision.MultipleObjectsReturned] One of the Revisions returned more than one object from the get query"
)
except:
self.fail("Unexpected Exception in get query")
raise
def test_remove_earliest_revision_no_branch(self):
"""Tests that the earliest revision added for a branch is deleted if
there are too many of them.
In this case the revisions do not belong to any branch
"""
now = datetime.datetime.now()
nb_revs = 4
revisions = []
artifacts = []
# setting up the limit
self.new_series.nb_revisions_to_keep = 2
self.new_series.save() # save is needed
for i in range(nb_revs):
rev = Revision.objects.create(
revision="%s" % (i + 9000),
project=self.project,
commit_time=now + datetime.timedelta(seconds=i),
)
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="%s" % i,
artifactfile=self.test_file,
)
artifacts.append(art)
revisions.append(rev)
self.new_series.artifacts.add(art)
# print self.new_series.artifacts.all()
# self.assertEqual(Revision.objects.prefetch_related('artifacts__serie').count(), 2)
self.assertEqual(self.new_series.artifacts.count(), 2)
# there should be a better way to manipulate this expression
# self.assertEqual(Revision.objects.filter(artifacts__serie=self.new_series).all().distinct().count(), 2)
self.assertSetEqual(
set([art.revision for art in self.new_series.artifacts.all()]),
set(
[
Revision.objects.get(revision="9002"),
Revision.objects.get(revision="9003"),
]
),
)
try:
for i in range(nb_revs)[2:]:
self.assertIsNotNone(Revision.objects.get(revision="%s" % (i + 9000)))
except Revision.DoesNotExist:
self.fail(
"[Revision.DoesNotExist] One of the Revisions returned no object from the get query"
)
except Revision.MultipleObjectsReturned:
self.fail(
"[Revision.MultipleObjectsReturned] One of the Revisions returned more than one object from the get query"
)
except Exception as e:
self.fail("Unexpected Exception in get query %s" % e)
raise
# those revisions are the oldest, and should have been removed
with self.assertRaises(Revision.DoesNotExist):
Revision.objects.get(revision="9000")
with self.assertRaises(Revision.DoesNotExist):
Revision.objects.get(revision="9001")
def test_remove_earliest_revision_no_branch_several_artifacts(self):
"""Tests that the earliest revision added for a branch is deleted if
there are too many of them.
Some revisions may contain several artifacts.
"""
now = datetime.datetime.now()
nb_revs = 4
revisions = []
artifacts = []
# setting up the limit
self.new_series.nb_revisions_to_keep = 2
self.new_series.save() # save is needed
for i in range(nb_revs):
rev = Revision.objects.create(
revision="%s" % (i + 9000),
project=self.project,
commit_time=now + datetime.timedelta(seconds=i),
)
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="%s" % i,
artifactfile=self.test_file,
)
artifacts.append(art)
if (i % 2) == 0:
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="x%s" % i,
artifactfile=self.test_file,
)
artifacts.append(art)
revisions.append(rev)
for art in artifacts:
self.new_series.artifacts.add(art)
# print self.new_series.artifacts.all()
# self.assertEqual(Revision.objects.prefetch_related('artifacts__serie').count(), 2)
self.assertEqual(self.new_series.artifacts.count(), 3)
try:
for i in range(nb_revs)[2:]:
self.assertIsNotNone(Revision.objects.get(revision="%s" % (i + 9000)))
except Revision.DoesNotExist:
self.fail(
"[Revision.DoesNotExist] One of the Revisions returned no object from the get query"
)
except Revision.MultipleObjectsReturned:
self.fail(
"[Revision.MultipleObjectsReturned] One of the Revisions returned more than one object from the get query"
)
except Exception as e:
self.fail("Unexpected Exception in get query %s" % e)
raise
# those revisions are the oldest, and should have been removed
with self.assertRaises(Revision.DoesNotExist):
Revision.objects.get(revision="9000")
with self.assertRaises(Revision.DoesNotExist):
Revision.objects.get(revision="9001")
# there should be a better way to manipulate this expression
# self.assertEqual(Revision.objects.filter(artifacts__serie=self.new_series).all().distinct().count(), 2)
self.assertSetEqual(
set([art.revision for art in self.new_series.artifacts.all()]),
set(
[
Revision.objects.get(revision="9002"),
Revision.objects.get(revision="9003"),
]
),
)
def test_remove_earliest_revision_no_branch_several_artifacts_several_series(self):
"""Tests that the earliest revision added for a branch is deleted if
there are too many of them.
Some all_revisions may contain several all_artifacts, some all_artifacts may be part
of several series.
"""
s2 = ProjectSeries.objects.create(
series="123456", project=self.project, release_date=datetime.datetime.now()
)
now = datetime.datetime.now()
nb_revs = 4
all_revisions = []
all_artifacts = []
# setting up the limit
self.new_series.nb_revisions_to_keep = 2
self.new_series.save() # save is needed
for i in range(nb_revs):
rev = Revision.objects.create(
revision="%s" % (i + 9000),
project=self.project,
commit_time=now + datetime.timedelta(seconds=i),
)
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="%s" % i,
artifactfile=self.test_file,
)
all_artifacts.append(art)
if (i % 2) == 0:
art = Artifact.objects.create(
project=self.project,
revision=rev,
md5hash="x%s" % i,
artifactfile=self.test_file,
)
all_artifacts.append(art)
all_revisions.append(rev)
# no limit on s2
for art in all_artifacts:
s2.artifacts.add(art)
# limit on self.new_series
for art in all_artifacts:
self.new_series.artifacts.add(art)
# print self.new_series.all_artifacts.all()
# self.assertEqual(Revision.objects.prefetch_related('artifacts__serie').count(), 2)
self.assertEqual(s2.artifacts.count(), 6)
self.assertEqual(self.new_series.artifacts.count(), 3)
# in this test, no revision has been removed
# those revisions are the oldest, and should have been removed
self.assertIsNotNone(Revision.objects.get(revision="9000"))
self.assertIsNotNone(Revision.objects.get(revision="9001"))
# there should be a better way to manipulate this expression
# self.assertEqual(Revision.objects.filter(artifacts__serie=self.new_series).all().distinct().count(), 2)
self.assertSetEqual(
set([art.revision for art in self.new_series.artifacts.all()]),
set(
[
Revision.objects.get(revision="9002"),
Revision.objects.get(revision="9003"),
]
),
)
self.assertSetEqual(
set([art.revision for art in s2.artifacts.all()]), set(all_revisions)
)
def create_several_artifacts(self):
from django.utils.timezone import now as now_
now = now_()
if not hasattr(self, "nb_revs"):
self.nb_revs = 4
all_artifacts = []
for i in range(self.nb_revs):
art = Artifact.objects.create(
project=self.project,
md5hash="%s" % i,
artifactfile=self.test_file,
upload_date=now + datetime.timedelta(seconds=i),
)
all_artifacts.append((art, art.md5hash))
if (i % 2) == 0:
art = Artifact.objects.create(
project=self.project,
md5hash="x%s" % i,
artifactfile=self.test_file,
upload_date=now + datetime.timedelta(seconds=i + 100),
)
all_artifacts.append((art, art.md5hash))
return all_artifacts
def create_several_revisions(self):
from django.utils.timezone import now as now_
now = now_()
all_revisions = []
if not hasattr(self, "nb_revs"):
self.nb_revs = 4
for i in range(self.nb_revs):
rev = Revision.objects.create(
revision="%s" % (i + 9000),
project=self.project,
commit_time=now + datetime.timedelta(seconds=i),
)
all_revisions.append(rev)
return all_revisions
def test_remove_oldest_artifacts_without_revision(self):
"""Tests that the earliest artifacts added for a series are removed if
there are too many of them.
The artifacts do not have any revision, they are a revision on their own.
Since the pruning is based on the date of the artifact, a grouping by
time is performed: if two artifacts have the same creation date, they are
considered as one unique artifact as well.
"""
self.nb_revs = 6
all_artifacts = self.create_several_artifacts()
# setting up the limit
self.new_series.nb_revisions_to_keep = 3
self.new_series.save() # save is needed
for art in all_artifacts:
self.new_series.artifacts.add(art[0])
self.assertEqual(self.new_series.artifacts.count(), 3)
# all the ones with an x are the newests
kept_artifacts_index = [1, 4, 7]
try:
for k in kept_artifacts_index:
self.assertIsNotNone(Artifact.objects.get(md5hash=all_artifacts[k][1]))
except Artifact.DoesNotExist as e:
self.fail(
"[Artifact.DoesNotExist] One of the Artifacts returned no object from the get query %s"
% e
)
except Artifact.MultipleObjectsReturned:
self.fail(
"[Revision.MultipleObjectsReturned] One of the Revisions returned more than one object from the get query"
)
except Exception as e:
self.fail("Unexpected Exception in get query %s" % e)
raise
for k, art in enumerate(all_artifacts):
if k in kept_artifacts_index:
continue
with self.assertRaises(Artifact.DoesNotExist):
Artifact.objects.get(md5hash=art[1])
# @skip('to fix')
# def test_remove_earliest_revision_with_branch(self):
# """Tests that the earliest revision added for a branch is deleted if
# there are too many of them.
#
# In this case the all_revisions do not belong to any branch
# """
# branch = Branch.objects.create(name='branch', nb_revisions_to_keep=3)
#
# revision1 = Revision.objects.create(revision='9991', project=self.project)
# revision2 = Revision.objects.create(revision='9992', project=self.project)
# revision3 = Revision.objects.create(revision='9993', project=self.project)
# revision4 = Revision.objects.create(revision='9994', project=self.project)
#
# branch.all_revisions.add(revision1, revision2, revision3)
#
# branch.all_revisions.add(revision4)
#
# self.assertEqual(branch.all_revisions.count(), 3)
#
# try:
# | |
self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/interest-categories/{interest_category_id}/interests/{interest_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_list_member(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""Archive list member # noqa: E501
Archive a list member. To permanently delete, use the delete-permanent action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_member(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_list_member_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
else:
(data) = self.delete_list_member_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
return data
def delete_list_member_with_http_info(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""Archive list member # noqa: E501
Archive a list member. To permanently delete, use the delete-permanent action. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_member_with_http_info(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'subscriber_hash'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_list_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'subscriber_hash' is set
if ('subscriber_hash' not in params or
params['subscriber_hash'] is None):
raise ValueError("Missing the required parameter `subscriber_hash` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'subscriber_hash' in params:
path_params['subscriber_hash'] = params['subscriber_hash'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/members/{subscriber_hash}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_list_member_note(self, list_id, subscriber_hash, note_id, **kwargs): # noqa: E501
"""Delete note # noqa: E501
Delete a specific note for a specific list member. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_member_note(list_id, subscriber_hash, note_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param str note_id: The id for the note. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_list_member_note_with_http_info(list_id, subscriber_hash, note_id, **kwargs) # noqa: E501
else:
(data) = self.delete_list_member_note_with_http_info(list_id, subscriber_hash, note_id, **kwargs) # noqa: E501
return data
def delete_list_member_note_with_http_info(self, list_id, subscriber_hash, note_id, **kwargs): # noqa: E501
"""Delete note # noqa: E501
Delete a specific note for a specific list member. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_member_note_with_http_info(list_id, subscriber_hash, note_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param str note_id: The id for the note. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'subscriber_hash', 'note_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_list_member_note" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'subscriber_hash' is set
if ('subscriber_hash' not in params or
params['subscriber_hash'] is None):
raise ValueError("Missing the required parameter `subscriber_hash` when calling ``") # noqa: E501
# verify the required parameter 'note_id' is set
if ('note_id' not in params or
params['note_id'] is None):
raise ValueError("Missing the required parameter `note_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'subscriber_hash' in params:
path_params['subscriber_hash'] = params['subscriber_hash'] # noqa: E501
if 'note_id' in params:
path_params['note_id'] = params['note_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/members/{subscriber_hash}/notes/{note_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_list_merge_field(self, list_id, merge_id, **kwargs): # noqa: E501
"""Delete merge field # noqa: E501
Delete a specific merge field in a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_merge_field(list_id, merge_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str merge_id: The id for the merge field. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_list_merge_field_with_http_info(list_id, merge_id, **kwargs) # noqa: E501
else:
(data) = self.delete_list_merge_field_with_http_info(list_id, merge_id, **kwargs) # noqa: E501
return data
def delete_list_merge_field_with_http_info(self, list_id, merge_id, **kwargs): # noqa: E501
"""Delete merge field # noqa: E501
Delete a specific merge field in a list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_list_merge_field_with_http_info(list_id, merge_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str merge_id: The id for the merge field. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'merge_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_list_merge_field" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'merge_id' is set
if ('merge_id' not in params or
params['merge_id'] is None):
raise ValueError("Missing the required parameter `merge_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'merge_id' in params:
path_params['merge_id'] = params['merge_id'] # noqa: E501
query_params = []
| |
grids (e.g., travel time, azimuth and take off angle)
"""
__valid_grid_type__ = ['TIME', 'TIME2D', 'ANGLE', 'ANGLE2D']
def __init__(self, network_code, data_or_dims, origin, spacing, seed,
seed_label, phase='P', value=0,
grid_units=__default_grid_units__,
grid_type='TIME', float_type="FLOAT", model_id=None):
self.seed = seed
self.seed_label = seed_label
self.network_code = network_code
if grid_type not in self.__valid_grid_type__:
raise ValueError()
self.grid_type = grid_type
super().__init__(data_or_dims, origin, spacing,
phase=phase, value=value,
grid_type='TIME', grid_units=grid_units,
float_type=float_type, model_id=model_id)
def __repr__(self):
line = f'Travel Time Grid\n' \
f' origin : {self.origin}\n' \
f' spacing : {self.spacing}\n' \
f' dimensions : {self.shape}\n' \
f' seed label : {self.seed_label}\n' \
f' seed location : {self.seed}'
return line
@classmethod
def get_base_name(cls, network_code, phase, seed_label, grid_type):
validate_phase(phase)
if grid_type not in cls.__valid_grid_type__:
raise ValueError(f'{grid_type} is not a valid grid type')
base_name = f'{network_code}.{phase}.{seed_label}.' \
f'{grid_type.lower()}'
return base_name
@property
def base_name(self):
base_name = self.get_base_name(self.network_code, self.phase,
self.seed_label, self.grid_type)
return base_name
def write(self, path='.'):
base_name = self.base_name
self._write_grid_data(base_name, path=path)
self._write_grid_header(base_name, path=path, seed=self.seed,
seed_label=self.seed_label,
seed_units=self.grid_units)
self._write_grid_model_id(base_name, path=path)
class TTGrid(SeededGrid):
def __init__(self, network_code, data_or_dims, origin, spacing, seed,
seed_label, phase='P', value=0, float_type="FLOAT",
model_id=None, grid_units='METER'):
super().__init__(network_code, data_or_dims, origin, spacing, seed,
seed_label, phase=phase, value=value,
grid_type='TIME', float_type=float_type,
model_id=model_id, grid_units=grid_units)
def to_azimuth(self):
"""
This function calculate the take off angle and azimuth for every
grid point given a travel time grid calculated using an Eikonal solver
:return: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
gds_tmp = np.gradient(self.data)
gds = [-gd for gd in gds_tmp]
azimuth = np.arctan2(gds[0], gds[1]) * 180 / np.pi
# azimuth is zero northwards
return AngleGrid(self.network_code, azimuth, self.origin, self.spacing,
self.seed, self.seed_label, 'AZIMUTH',
phase=self.phase, float_type=self.float_type,
model_id=self.model_id, grid_units=self.grid_units)
def to_takeoff(self):
gds_tmp = np.gradient(self.data)
gds = [-gd for gd in gds_tmp]
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
takeoff = np.arctan2(hor, -gds[2]) * 180 / np.pi
# takeoff is zero pointing down
return AngleGrid(self.network_code, takeoff, self.origin, self.spacing,
self.seed, self.seed_label, 'TAKEOFF',
phase=self.phase, float_type=self.float_type,
model_id=self.model_id, grid_units=self.grid_units)
def to_azimuth_point(self, coord, grid_space=False, mode='nearest',
order=1, **kwargs):
"""
calculate the azimuth angle at a particular point on the grid for a
given seed location
:param coord: coordinates at which to calculate the takeoff angle
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param mode: interpolation mode
:param order: interpolation order
:return: takeoff angle at the location coord
"""
return self.to_azimuth().interpolate(coord,
grid_space=grid_space,
mode=mode, order=order,
**kwargs)[0]
def to_takeoff_point(self, coord, grid_space=False, mode='nearest',
order=1, **kwargs):
"""
calculate the takeoff angle at a particular point on the grid for a
given seed location
:param coord: coordinates at which to calculate the takeoff angle
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param mode: interpolation mode
:param order: interpolation order
:return: takeoff angle at the location coord
"""
return self.to_takeoff().interpolate(coord,
grid_space=grid_space,
mode=mode, order=order,
**kwargs)[0]
def ray_tracer(self, start, grid_space=False, max_iter=1000,
arrival_id=None):
"""
This function calculates the ray between a starting point (start) and an
end point, which should be the seed of the travel_time grid, using the
gradient descent method.
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param max_iter: maximum number of iteration
:param arrival_id: id of the arrival associated to the ray if
applicable
:rtype: numpy.array
"""
return ray_tracer(self, start, grid_space=grid_space,
max_iter=max_iter, arrival_id=arrival_id,
earth_model_id=self.model_id,
network=self.network_code)
@classmethod
def from_velocity(cls, seed, seed_label, velocity_grid):
return velocity_grid.to_time(seed, seed_label)
def write(self, path='.'):
return super().write(path=path)
@property
def site(self):
return self.seed_label
class TravelTimeEnsemble:
def __init__(self, travel_time_grids):
"""
Combine a list of travel time grids together providing meta
functionality (multi-threaded ray tracing, sorting, travel-time
calculation for a specific location etc.). It is assumed that
all grids are compatible, i.e., that all the grids have the same
origin, spacing and dimensions.
:param travel_time_grids: a list of TTGrid objects
"""
self.travel_time_grids = travel_time_grids
self.__i__ = 0
for tt_grid in self.travel_time_grids:
try:
assert tt_grid.check_compatibility(travel_time_grids[0])
except:
raise AssertionError('grids are not all compatible')
def __len__(self):
return len(self.travel_time_grids)
def __add__(self, other):
for travel_time_grid in other.travel_time_grids:
self.travel_time_grids.append(travel_time_grid)
return TravelTimeEnsemble(self.travel_time_grids)
def __iter__(self):
self.__i__ = 0
return self
def __next__(self):
if self.__i__ < len(self):
result = self.travel_time_grids[self.__i__]
self.__i__ += 1
return result
else:
raise StopIteration
def __getitem__(self, item):
if isinstance(item, int):
return self.travel_time_grids[item]
if isinstance(item, str):
tt_grid_out = None
for travel_time_grid in self.travel_time_grids:
if travel_time_grid.seed_label == item:
return travel_time_grid
raise KeyError(f'{item} not found')
def __repr__(self):
line = f'Number of travel time grids: {len(self)}'
return line
@classmethod
def from_files(cls, path):
"""
create a travel time ensemble from files located in a directory
:param path: the base path to the directory containing the travel time
files.
:return:
"""
tt_grids = []
for fle in Path(path).glob('*time*.hdr'):
path = fle.parent
base_name = '.'.join(fle.name.split('.')[:-1])
fname = str(Path(path) / base_name)
tt_grid = read_grid(fname, format='NLLOC',
float_type=__default_float_type__)
tt_grids.append(tt_grid)
return cls(tt_grids)
def select(self, seed_labels: Optional[list] = None,
phase: Optional[list] = None):
"""
return the a list of grid corresponding to seed_labels.
:param seed_labels: seed labels of the travel time grids to return
:param phase: the phase {'P' or 'S'}, both if None.
:return: a list of travel time grids
:rtype: TravelTimeEnsemble
"""
if (seed_labels is None) and (phase is None):
return self
tmp = []
if seed_labels is None:
seed_labels = np.unique(self.seed_labels)
if phase is None:
phase = ['P', 'S']
returned_grids = []
for travel_time_grid in self.travel_time_grids:
if travel_time_grid.seed_label in seed_labels:
if travel_time_grid.phase in phase:
returned_grids.append(travel_time_grid)
return TravelTimeEnsemble(returned_grids)
def sort(self, ascending:bool = True):
"""
sorting the travel time grid by seed_label
:param ascending: if true the grids are sorted in ascending order
:param ascending: bool
:return: sorted travel time grids.
:rtype: TravelTimeEnsemble
"""
i = np.sort(self.seed_labels)
if not ascending:
i = i[-1::-1]
sorted_tt_grids = np.array(self.travel_time_grids)[i]
return TravelTimeEnsemble(sorted_tt_grids)
def travel_time(self, seed, grid_space: bool = False,
seed_labels: Optional[list] = None,
phase: Optional[list] = None):
"""
calculate the travel time at a specific point for a series of site
ids
:param seed: travel time seed
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param seed_labels: a list of sites from which to calculate the
travel time.
:param phase: a list of phases for which the travel time need to be
calculated
:return: a list of dictionary containing the travel time and site id
"""
if isinstance(seed, list):
seed = np.array(seed)
if grid_space:
seed = self.travel_time_grids[0].transform_from(seed)
if not self.travel_time_grids[0].in_grid(seed):
raise ValueError('seed is outside the grid')
tt_grids = self.select(seed_labels=seed_labels, phase=phase)
tts = []
labels = []
phases = []
for tt_grid in tt_grids:
labels.append(tt_grid.seed_label)
tts.append(tt_grid.interpolate(seed.T,
grid_space=False)[0])
phases.append(tt_grid.phase)
tts_dict = {}
for phase in np.unique(phases):
tts_dict[phase] = {}
for label, tt, phase in zip(labels, tts, phases):
tts_dict[phase][label] = tt
return tts_dict
def angles(self, seed, grid_space: bool = False,
seed_labels: Optional[list] = None,
phase: Optional[list] = None, **kwargs):
"""
calculate the azimuth at a specific point for a series of site
ids
:param seed: travel time seed
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param seed_labels: a list of sites from which to calculate the
travel time.
:param phase: a list of phases for which the travel time need to be
calculated
:return: a list of dictionary containing the azimuth and site id
"""
if isinstance(seed, list):
seed = np.array(seed)
if grid_space:
seed = self.travel_time_grids[0].transform_from(seed)
if not self.travel_time_grids[0].in_grid(seed):
raise ValueError('seed is outside the grid')
tt_grids = self.select(seed_labels=seed_labels, phase=phase)
azimuths = []
takeoffs = []
labels = []
phases = []
for tt_grid in tt_grids:
labels.append(tt_grid.seed_label)
azimuths.append(tt_grid.to_azimuth_point(seed.T,
grid_space=False,
**kwargs))
takeoffs.append(tt_grid.to_takeoff_point(seed.T,
grid_space=False,
**kwargs))
phases.append(tt_grid.phase)
azimuth_dict = {}
takeoff_dict = {}
for phase in np.unique(phases):
azimuth_dict[phase] = {}
takeoff_dict[phase] = {}
for label, azimuth, takeoff, phase in zip(labels, azimuths, takeoffs,
phases):
takeoff_dict[phase][label] = takeoff
azimuth_dict[phase][label] = azimuth
angle_dict = | |
"grafana",
"image": "x",
"cpu": 64,
"memoryReservation": 128,
"links": ["loki"],
"portMappings": [
{"containerPort": 3000, "hostPort": 0, "protocol": "tcp"}
],
"essential": True,
"entryPoint": [],
"environment": [],
"mountPoints": [
{"sourceVolume": "grafana", "containerPath": "/var/lib/grafana"}
],
"volumesFrom": [],
"user": "0",
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "loggroup",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "promstack",
},
},
},
{
"name": "prometheus",
"image": "x",
"cpu": 64,
"memoryReservation": 256,
"links": ["loki"],
"portMappings": [
{"containerPort": 9090, "hostPort": 0, "protocol": "tcp"}
],
"essential": True,
"entryPoint": [],
"environment": [],
"mountPoints": [
{"sourceVolume": "discovery", "containerPath": "/discovery"}
],
"volumesFrom": [],
"user": "0",
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "loggroup",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "promstack",
},
},
},
{
"name": "prometheus_discovery",
"image": "x",
"cpu": 8,
"memoryReservation": 32,
"portMappings": [],
"essential": True,
"command": [
"--interval",
"15",
"--directory",
"/output",
"--region",
"eu-central-1",
],
"environment": [],
"mountPoints": [
{"sourceVolume": "discovery", "containerPath": "/output"}
],
"volumesFrom": [],
},
{
"name": "loki",
"image": "x",
"cpu": 64,
"memoryReservation": 128,
"portMappings": [
{"containerPort": 3100, "hostPort": 0, "protocol": "tcp"}
],
"essential": True,
"environment": [],
"mountPoints": [],
"volumesFrom": [],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "loggroup",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "promstack",
},
},
},
],
"family": "promstack",
"taskRoleArn": "arn:aws:iam::123456789123:role/promstack-discovery",
"executionRoleArn": "arn:aws:iam::123456789123:role/cluster-name_ecs_task_execution_role",
"revision": 25,
"volumes": [],
"status": "ACTIVE",
"requiresAttributes": [
{"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"},
{"name": "ecs.capability.execution-role-awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"},
{"name": "ecs.capability.efs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.17"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.21"},
{"name": "com.amazonaws.ecs.capability.task-iam-role"},
{"name": "ecs.capability.container-ordering"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.25"},
],
"placementConstraints": [],
"compatibilities": ["EC2"],
},
"ResponseMetadata": {
"RequestId": "01234567-8901-2345-6789-012345678901",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "01234567-8901-2345-6789-012345678901",
"content-type": "application/x-amz-json-1.1",
"content-length": "11380",
"date": "Tue, 20 Jul 2020 20:00:00 GMT",
},
"RetryAttempts": 0,
},
},
{
"taskDefinition": {
"taskDefinitionArn": "arn:aws:ecs:eu-central-1:123456789123:task-definition/what:13",
"containerDefinitions": [
{
"name": "ssh-forward",
"image": "x",
"cpu": 8,
"memoryReservation": 50,
"portMappings": [],
"essential": True,
"environment": [
{
"name": "SSH_PARM",
"value": "-v -oStrictHostKeyChecking=accept-new -oExitOnForwardFailure=yes -oServerAliveInterval=60 -R 5434:main.cr5lxi0fhhms.eu-central-1.rds.amazonaws.com:5432 -N",
},
{"name": "SSH_TARGET", "value": "[email protected]"},
{"name": "SSH_TARGET_CMD", "value": ""},
],
"mountPoints": [],
"volumesFrom": [],
"secrets": [],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "loggroup",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "ecs",
},
},
}
],
"family": "ssh-forward",
"executionRoleArn": "arn:aws:iam::123456789123:role/cluster-name_ecs_task_execution_role",
"revision": 13,
"volumes": [],
"status": "ACTIVE",
"requiresAttributes": [
{"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"},
{"name": "ecs.capability.execution-role-awslogs"},
{"name": "com.amazonaws.ecs.capability.ecr-auth"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.21"},
{"name": "ecs.capability.execution-role-ecr-pull"},
{"name": "ecs.capability.secrets.ssm.environment-variables"},
],
"placementConstraints": [],
"compatibilities": ["EC2"],
},
"ResponseMetadata": {
"RequestId": "01234567-8901-2345-6789-012345678901",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "01234567-8901-2345-6789-012345678901",
"content-type": "application/x-amz-json-1.1",
"content-length": "1546",
"date": "Tue, 20 Jul 2020 20:00:00 GMT",
},
"RetryAttempts": 0,
},
},
{
"taskDefinition": {
"taskDefinitionArn": "arn:aws:ecs:eu-central-1:123456789123:task-definition/eoifjioejffew:41",
"containerDefinitions": [
{
"name": "testapp",
"image": "x",
"cpu": 8,
"memoryReservation": 500,
"portMappings": [
{"containerPort": 80, "hostPort": 0, "protocol": "tcp"}
],
"essential": True,
"environment": [{"name": "PORT", "value": "80"}],
"mountPoints": [],
"volumesFrom": [],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "loggroup",
"awslogs-region": "eu-central-1",
"awslogs-stream-prefix": "ecs",
},
},
}
],
"family": "testapp",
"executionRoleArn": "arn:aws:iam::123456789123:role/cluster-name_ecs_task_execution_role",
"revision": 41,
"volumes": [],
"status": "ACTIVE",
"requiresAttributes": [
{"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"},
{"name": "ecs.capability.execution-role-awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.21"},
],
"placementConstraints": [],
"compatibilities": ["EC2"],
},
"ResponseMetadata": {
"RequestId": "01234567-8901-2345-6789-012345678901",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "01234567-8901-2345-6789-012345678901",
"content-type": "application/x-amz-json-1.1",
"content-length": "998",
"date": "Tue, 20 Jul 2020 20:00:00 GMT",
},
"RetryAttempts": 0,
},
},
]
describe_container_instances_parameters = {
"cluster": list_clusters_response["clusterArns"][0],
"containerInstances": list_container_instances_response["containerInstanceArns"],
}
describe_container_instances_response = {
"containerInstances": [
{
"containerInstanceArn": "arn:aws:ecs:eu-central-1:123456789123:container-instance/01234567-8901-8325-6789-012345678901",
"ec2InstanceId": "i-08c7123ef038a7cc4",
"version": 13,
"versionInfo": {
"agentVersion": "1.40.0",
"agentHash": "17e8d834",
"dockerVersion": "DockerVersion: 19.03.6-ce",
},
"remainingResources": [],
"registeredResources": [],
"status": "ACTIVE",
"agentConnected": True,
"runningTasksCount": 7,
"pendingTasksCount": 0,
"attributes": [
{"name": "ecs.capability.secrets.asm.environment-variables"},
{
"name": "ecs.capability.branch-cni-plugin-version",
"value": "ee068761-",
},
{"name": "ecs.ami-id", "value": "ami-08c4be469fbdca0fa"},
{"name": "ecs.capability.secrets.asm.bootstrap.log-driver"},
{"name": "ecs.capability.task-eia.optimized-cpu"},
{"name": "com.amazonaws.ecs.capability.logging-driver.none"},
{"name": "ecs.capability.ecr-endpoint"},
{"name": "ecs.capability.docker-plugin.local"},
{"name": "ecs.capability.task-cpu-mem-limit"},
{"name": "ecs.capability.secrets.ssm.bootstrap.log-driver"},
{"name": "ecs.capability.efsAuth"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.30"},
{"name": "ecs.capability.full-sync"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.31"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.32"},
{"name": "ecs.capability.firelens.options.config.file"},
{"name": "ecs.availability-zone", "value": "eu-central-1a"},
{"name": "ecs.capability.aws-appmesh"},
{"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.24"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.25"},
{"name": "ecs.capability.task-eni-trunking"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.26"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.27"},
{"name": "com.amazonaws.ecs.capability.privileged-container"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.28"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.29"},
{"name": "ecs.cpu-architecture", "value": "x86_64"},
{"name": "ecs.capability.firelens.fluentbit"},
{"name": "com.amazonaws.ecs.capability.ecr-auth"},
{"name": "ecs.os-type", "value": "linux"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.20"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.21"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.22"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.23"},
{"name": "ecs.capability.task-eia"},
{"name": "ecs.capability.private-registry-authentication.secretsmanager"},
{"name": "com.amazonaws.ecs.capability.logging-driver.syslog"},
{"name": "com.amazonaws.ecs.capability.logging-driver.awsfirelens"},
{"name": "ecs.capability.firelens.options.config.s3"},
{"name": "com.amazonaws.ecs.capability.logging-driver.json-file"},
{"name": "ecs.vpc-id", "value": "vpc-0e98052f61664466c"},
{"name": "ecs.capability.execution-role-awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.17"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"},
{"name": "ecs.capability.docker-plugin.amazon-ecs-volume-plugin"},
{"name": "ecs.capability.task-eni"},
{"name": "ecs.capability.firelens.fluentd"},
{"name": "ecs.capability.efs"},
{"name": "ecs.capability.execution-role-ecr-pull"},
{"name": "ecs.capability.container-health-check"},
{"name": "ecs.subnet-id", "value": "subnet-017a912d7d7b5fecc"},
{"name": "ecs.instance-type", "value": "t3a.medium"},
{"name": "com.amazonaws.ecs.capability.task-iam-role-network-host"},
{"name": "ecs.capability.container-ordering"},
{
"name": "ecs.capability.cni-plugin-version",
"value": "9066095f-2019.10.0",
},
{"name": "ecs.capability.env-files.s3"},
{"name": "ecs.capability.secrets.ssm.environment-variables"},
{"name": "ecs.capability.pid-ipc-namespace-sharing"},
{"name": "com.amazonaws.ecs.capability.task-iam-role"},
],
"registeredAt": datetime.datetime(
2020, 7, 29, 8, 57, 39, 384000, tzinfo=tzlocal()
),
"attachments": [],
"tags": [],
},
{
"containerInstanceArn": "arn:aws:ecs:eu-central-1:123456789123:container-instance/e0e32bca-2890-4988-9c04-ebdef8085892",
"ec2InstanceId": "i-0b967c2479dd4f5af",
"version": 14,
"versionInfo": {
"agentVersion": "1.40.0",
"agentHash": "17e8d834",
"dockerVersion": "DockerVersion: 19.03.6-ce",
},
"remainingResources": [
{
"name": "CPU",
"type": "INTEGER",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 1638,
},
{
"name": "MEMORY",
"type": "INTEGER",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 1956,
},
{
"name": "PORTS",
"type": "STRINGSET",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 0,
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
},
{
"name": "PORTS_UDP",
"type": "STRINGSET",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 0,
"stringSetValue": [],
},
],
"registeredResources": [
{
"name": "CPU",
"type": "INTEGER",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 2048,
},
{
"name": "MEMORY",
"type": "INTEGER",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 3896,
},
{
"name": "PORTS",
"type": "STRINGSET",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 0,
"stringSetValue": ["22", "2376", "2375", "51678", "51679"],
},
{
"name": "PORTS_UDP",
"type": "STRINGSET",
"doubleValue": 0.0,
"longValue": 0,
"integerValue": 0,
"stringSetValue": [],
},
],
"status": "ACTIVE",
"agentConnected": True,
"runningTasksCount": 9,
"pendingTasksCount": 0,
"attributes": [
{"name": "ecs.capability.secrets.asm.environment-variables"},
{
"name": "ecs.capability.branch-cni-plugin-version",
"value": "ee068761-",
},
{"name": "ecs.ami-id", "value": "ami-08c4be469fbdca0fa"},
{"name": "ecs.capability.secrets.asm.bootstrap.log-driver"},
{"name": "ecs.capability.task-eia.optimized-cpu"},
{"name": "com.amazonaws.ecs.capability.logging-driver.none"},
{"name": "ecs.capability.ecr-endpoint"},
{"name": "ecs.capability.docker-plugin.local"},
{"name": "ecs.capability.task-cpu-mem-limit"},
{"name": "ecs.capability.secrets.ssm.bootstrap.log-driver"},
{"name": "ecs.capability.efsAuth"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.30"},
{"name": "ecs.capability.full-sync"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.31"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.32"},
{"name": "ecs.capability.firelens.options.config.file"},
{"name": "ecs.availability-zone", "value": "eu-central-1b"},
{"name": "ecs.capability.aws-appmesh"},
{"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.24"},
{"name": "ecs.capability.task-eni-trunking"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.25"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.26"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.27"},
{"name": "com.amazonaws.ecs.capability.privileged-container"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.28"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.29"},
{"name": "ecs.cpu-architecture", "value": "x86_64"},
{"name": "com.amazonaws.ecs.capability.ecr-auth"},
{"name": "ecs.capability.firelens.fluentbit"},
{"name": "ecs.os-type", "value": "linux"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.20"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.21"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.22"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.23"},
{"name": "ecs.capability.task-eia"},
{"name": "ecs.capability.private-registry-authentication.secretsmanager"},
{"name": "com.amazonaws.ecs.capability.logging-driver.syslog"},
{"name": "com.amazonaws.ecs.capability.logging-driver.awsfirelens"},
{"name": "ecs.capability.firelens.options.config.s3"},
{"name": "com.amazonaws.ecs.capability.logging-driver.json-file"},
{"name": "ecs.vpc-id", "value": "vpc-0e98052f61664466c"},
{"name": "ecs.capability.execution-role-awslogs"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.17"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18"},
{"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"},
{"name": "ecs.capability.docker-plugin.amazon-ecs-volume-plugin"},
{"name": "ecs.capability.task-eni"},
{"name": "ecs.capability.firelens.fluentd"},
{"name": "ecs.capability.efs"},
{"name": "ecs.capability.execution-role-ecr-pull"},
{"name": "ecs.capability.container-health-check"},
{"name": "ecs.subnet-id", "value": "subnet-032157d2370144a5c"},
{"name": "ecs.instance-type", "value": "t3a.medium"},
{"name": "com.amazonaws.ecs.capability.task-iam-role-network-host"},
{"name": "ecs.capability.container-ordering"},
{
"name": "ecs.capability.cni-plugin-version",
"value": "9066095f-2019.10.0",
},
{"name": "ecs.capability.env-files.s3"},
{"name": "ecs.capability.secrets.ssm.environment-variables"},
{"name": "ecs.capability.pid-ipc-namespace-sharing"},
{"name": "com.amazonaws.ecs.capability.task-iam-role"},
],
"registeredAt": datetime.datetime(
2020, 7, 29, 8, 57, 40, 648000, tzinfo=tzlocal()
),
"attachments": [],
"tags": [],
},
],
"failures": [],
"ResponseMetadata": {
"RequestId": "01234567-8901-2345-6789-012345678901",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "01234567-8901-2345-6789-012345678901",
"content-type": "application/x-amz-json-1.1",
"content-length": "9386",
"date": "Tue, 20 Jul 2020 20:00:00 GMT",
},
"RetryAttempts": 0,
},
}
describe_instances_parameters = {"InstanceIds": []}
for container_instance in describe_container_instances_response["containerInstances"]:
describe_instances_parameters["InstanceIds"].append(
container_instance["ec2InstanceId"]
)
describe_instances_response = {
"Reservations": [
{
"Groups": [],
"Instances": [
{
"AmiLaunchIndex": 0,
"ImageId": "ami-08c4be469fbdca0fa",
"InstanceId": "i-08c7123ef038a7cc4",
"InstanceType": "t3a.medium",
"KeyName": "data-dev",
"LaunchTime": datetime.datetime(
2020, 7, 29, 6, 56, 33, tzinfo=tzutc()
),
"Monitoring": {"State": "enabled"},
"Placement": {
"AvailabilityZone": "eu-central-1a",
"GroupName": "",
"Tenancy": "default",
},
"PrivateDnsName": "ip-10-0-1-73.eu-central-1.compute.internal",
"PrivateIpAddress": "10.0.1.73",
"ProductCodes": [],
"PublicDnsName": "",
"State": {"Code": 16, "Name": "running"},
"StateTransitionReason": "",
"SubnetId": "subnet-017a912d7d7b5fecc",
"VpcId": "vpc-0e98052f61664466c",
"Architecture": "x86_64",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/xvda",
"Ebs": {
"AttachTime": datetime.datetime(
2020, 7, 29, 6, 56, 34, tzinfo=tzutc()
),
"DeleteOnTermination": True,
"Status": "attached",
"VolumeId": "vol-09595d7c24c463548",
},
},
{
"DeviceName": "/dev/xvds",
"Ebs": {
"AttachTime": datetime.datetime(
2020, 7, 29, 6, 56, 34, tzinfo=tzutc()
),
"DeleteOnTermination": True,
"Status": "attached",
"VolumeId": "vol-097b3fd039a2062b6",
},
},
],
"ClientToken": "<PASSWORD>",
"EbsOptimized": False,
"EnaSupport": True,
"Hypervisor": "xen",
"IamInstanceProfile": {
"Arn": "arn:aws:iam::123456789123:instance-profile/cluster-name",
"Id": "AIPA3ZZY5WMSYDKBUKM3U",
},
"NetworkInterfaces": [
{
"Attachment": {
"AttachTime": datetime.datetime(
2020, 7, 29, 6, 56, 33, tzinfo=tzutc()
),
"AttachmentId": "eni-attach-06bbff614335d2a60",
"DeleteOnTermination": True,
"DeviceIndex": 0,
"Status": "attached",
},
"Description": "",
"Groups": [
{
"GroupName": "terraform-20190906062328155300000001",
"GroupId": "sg-0b6a05f14d26f4be8",
}
],
"Ipv6Addresses": [],
"MacAddress": "02:3b:d2:1d:af:9e",
"NetworkInterfaceId": "eni-09de93eb056ea7c7f",
"OwnerId": "123456789123",
"PrivateDnsName": "ip-10-0-1-73.eu-central-1.compute.internal",
"PrivateIpAddress": "10.0.1.73",
"PrivateIpAddresses": [
{
"Primary": True,
"PrivateDnsName": "ip-10-0-1-73.eu-central-1.compute.internal",
"PrivateIpAddress": "10.0.1.73",
}
],
"SourceDestCheck": True,
"Status": "in-use",
"SubnetId": "subnet-017a912d7d7b5fecc",
"VpcId": "vpc-0e98052f61664466c",
"InterfaceType": "interface",
}
],
"RootDeviceName": "/dev/xvda",
"RootDeviceType": "ebs",
"SecurityGroups": [],
"SourceDestCheck": True,
"Tags": [],
"VirtualizationType": "hvm",
"CpuOptions": {"CoreCount": 1, "ThreadsPerCore": 2},
"CapacityReservationSpecification": {
"CapacityReservationPreference": "open"
},
"HibernationOptions": {"Configured": False},
"MetadataOptions": {
"State": "applied",
"HttpTokens": "optional",
"HttpPutResponseHopLimit": 1,
"HttpEndpoint": "enabled",
},
}
],
"OwnerId": "123456789123",
"RequesterId": "053592188284",
"ReservationId": "r-0d8935cfaa871752b",
},
{
"Groups": [],
"Instances": [
{
"AmiLaunchIndex": 0,
"ImageId": "ami-08c4be469fbdca0fa",
"InstanceId": "i-0b967c2479dd4f5af",
"InstanceType": "t3a.medium",
"KeyName": "data-dev",
"LaunchTime": datetime.datetime(
2020, 7, 29, 6, 56, 33, | |
<reponame>fusion-research/TrajectoryNet<gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
import numpy as np
import tensorflow as tf
from sklearn import preprocessing
import os
import inspect
import sys
import datetime
import cProfile
from enum import Enum
from sklearn import metrics
from sklearn.metrics import recall_score
from sklearn.metrics import average_precision_score
import threading
from tensorflow.python.platform import flags
from sklearn.metrics import confusion_matrix
from tensorflow.python.client import timeline
from customized_activations import maxout
from customized_rnncell import NewGRUCell
import Learning_rate
import Monitor
from param import RNNType
from MyThread import MyThread
from Log import Log
from Evaluate import evaluate_accuracy, evaluate_stat, evaluate_confusion
import Data
import Config
# for testing only
import cProfile
# check num of parameters
if len(sys.argv) < 2:
dconfile = 'config.json'
elif (sys.argv[1].isdigit()):
dconfile = 'config.json'
test_task = int(sys.argv[1]) # speed up testing
else:
dconfile = sys.argv[1]
logPath = './log/'
dataPath = './data/'
conf = Config.DataConfig(confile=dconfile)
task = conf.task
# overwrite testing task
try:
test_task
except NameError:
pass
else:
conf.test_id = [test_task]
# this data are generated from create_npy.py
x_file = 'x_mobility_context.npy'
y_file = 'y_mobility_point.npy'
mmsi_file = 'mmsi_mobility_point.npy'
# selection of cell type
rnnType = RNNType.GRU_b
gpuMode = conf.useGPU
exp_seq_len = conf.truncated_seq_len
deep_output = False
use_dropout = False
weight_initializer = conf.weight_initializer
evaluate_freq = conf.evaluate_freq
bias_initializer = tf.random_uniform_initializer(0, 0.001)
if conf.activation == "maxout":
rnnCell = NewGRUCell
activation_function = tf.nn.tanh
else:
rnnCell = tf.contrib.rnn.GRUCell
if conf.activation == "sigmoid":
activation_function = tf.nn.sigmoid
elif conf.activation == "relu":
activation_function = tf.nn.relu
else:
activation_function = tf.nn.tanh
lr = Learning_rate.Learning_rate(global_lr=0.001, decay_rate=0.999, decay_step=50)
# load data
x = np.load(dataPath + x_file)
y = np.load(dataPath+y_file)
mmsi = np.load(dataPath+mmsi_file)
# feature selection
def filter_features(x):
print("warning: not all featuers are used")
x = x[:, :, 0:40]
return x
#x = filter_features(x)
def filter_classes(x, y, mmsi, cls):
valid_index = np.concatenate([np.where(mmsi == i) for i in cls], axis=1)[0]
num_features = x.shape[2]
(x, y, mmsi) = Data.Data.reorganizeSeq(x, y, mmsi, exp_seq_len)
num_examples = x.shape[0]
unique_mmsi = np.unique(mmsi[0])
num_classes = len(np.unique(y))
test_vessel = conf.test_id
val_vessel = conf.val_id
if conf.testmode == "lobo":
(train_index, test_index, valid_index) = Data.Data.splitDataset(mmsi[0], test_vessel, val_vessel)
elif conf.testmode == "random":
(train_index, test_index, valid_index) = Data.Data.randomSplitDataset(mmsi[0], train_perc = conf.train_ratio, val_perc = conf.val_ratio)
print(train_index)
train_seq_len = mmsi[1][train_index]
test_seq_len = mmsi[1][test_index]
valid_seq_len = mmsi[1][valid_index]
num_class = np.unique(y).size
log = Log(task, logPath, num_class)
monitor = Monitor.Monitor(loss=True, num_class=num_class)
def encode_label(y):
"""encode label into a matrix based on the number of classes"""
num_class = np.unique(y).size
if num_class > 2: # multi-class
lb = preprocessing.LabelBinarizer()
lb.fit(range(num_class))
labels = np.array([lb.transform(i) for i in y])
#labels = lb.transform(y)
else: # 2-class
# the labels are stored in reserve in the numpy array
# fishing is labeled 0
Y0 = np.logical_not(y) * 1 # Y1 represents fishing
Y1 = y # Y0 represents non-fishing
labels = np.array([Y0, Y1])
labels = labels.transpose(1,2,0) # dim: [example; length; classes]
return labels
#labels = encode_label(y) # no need to encode y
labels = y
def get_all_data(conf):
"""generate data for all vessels"""
early = mmsi[1]
X = x.transpose((1, 0, 2))
return (X, labels, early)
class VesselModel(object):
"""The vessel classification lstm model."""
def __init__(self, config):
self.num_threads = conf.num_threads
self.hidden_size = conf.hidden_size
self.learning_rate = conf.learning_rate
self.num_layers = conf.num_layers
self.num_epochs = conf.num_epochs
self.batch_size = config.batch_size
self.is_training = config.is_training
self.is_validation = config.is_validation
self.current_step = tf.Variable(0)
# place holder for sequence that we will provide at runtime
# batch size will be different for training and testing set
self._input_data = tf.placeholder(tf.float32, [exp_seq_len, self.batch_size, num_features], name="input-data")
# target for one batch
self._targets = tf.placeholder(tf.int64, [self.batch_size, exp_seq_len], name = "y-target")
# get the length of all training and test sequences
if self.is_training:
self.seq_len = exp_seq_len*self.batch_size #sum(train_seq_len)
elif self.is_validation:
self.seq_len = sum(valid_seq_len)
else:
self.seq_len = sum(test_seq_len)
with tf.name_scope("lstm-cell") as scope:
rnn_cell = self.get_rnn_cell()
with tf.name_scope("multi-rnn-cell") as scope:
cell = self.get_multi_rnn_cell(rnn_cell)
# what timesteps we want to stop at, notice it's different for each batch
self._early_stop = tf.placeholder(tf.int64, shape=[self.batch_size], name = "early-stop")
self.set_initial_states(cell)
#with tf.name_scope("dropout") as scope:
# if self.is_training and config.keep_prob < 1:
# self._input_data = tf.nn.dropout(self._input_data, config.keep_prob)
outputs = []
# Creates a recurrent neural network specified by RNNCell "cell
# inputs for rnn needs to be a list, each item being a timestep.
# Args:
# cell: An instance of RNNCell.
# inputs: A length T list of inputs, each a tensor of shape
# [batch_size, cell.input_size].
# initial_state: (optional) An initial state for the RNN. This must be
# a tensor of appropriate type and shape [batch_size x cell.state_size].
# dtype: (optional) The data type for the initial state. Required if
# initial_state is not provided.
# sequence_length: Specifies the length of each sequence in inputs.
# An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
# scope: VariableScope for the created subgraph; defaults to "RNN".
#
# Returns:
# A pair (outputs, state) where:
# outputs is a length T list of outputs (one for each input)
# state is the final state
with tf.name_scope("rnn-outputs") as scope:
self.get_outputs(cell)
self.valid_target = self.get_valid_sequence(tf.reshape(self._targets, [exp_seq_len * self.batch_size]), num_classes) # valid digit target
self.lstm_output = self.valid_output
if deep_output:
with tf.name_scope("deep-output-layer") as scope:
softmax_size = self.hidden_size * 2 if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b else self.hidden_size
softmax_wout = tf.get_variable("softmax_w_deepout", [softmax_size, self.higher_hidden_size])
softmaxb_dout = tf.get_variable("softmax_b_deepout", [self.higher_hidden_size])
self.valid_output = tf.sigmoid(tf.matmul(self.valid_output, softmax_wout) + softmaxb_dout)
if use_dropout:
self.valid_output = tf.nn.dropout(self.valid_output, keep_prob = 0.5)
#softmax_wout2 = tf.get_variable("softmax_w_deepout2", [self.hidden_size, self.hidden_size])
#softmaxb_dout2 = tf.get_variable("softmax_b_deepout2", [self.hidden_size])
#self.valid_output = tf.matmul(self.valid_output, softmax_wout2) + softmaxb_dout2
#if use_dropout:
# self.valid_output = tf.nn.dropout(self.valid_output, keep_prob = 0.5)
with tf.name_scope("softmax-W") as scope:
softmax_w = self.get_softmax_layer()
self.w = softmax_w
with tf.name_scope("softmax-b") as scope:
softmax_b = tf.get_variable("softmax_b", [num_classes], initializer=bias_initializer)
with tf.name_scope("softmax-predictions") as scope:
self._predictions = tf.matmul(self.valid_output, softmax_w) + softmax_b
self._prob_predictions = tf.nn.softmax(self._predictions)
self.digit_predictions = tf.argmax(self._prob_predictions, axis=1)
with tf.name_scope("confusion-matrix") as scope:
self.confusion_matrix = tf.confusion_matrix(self.valid_target, self.digit_predictions)
# Weighted cross-entropy loss for a sequence of logits (per example).
# at: tensorflow/python/ops/seq2seq.py
# Args:
# logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
# targets: List of 1D batch-sized int32 Tensors of the same length as logits.
# weights: List of 1D batch-sized float-Tensors of the same length as logits.
with tf.name_scope("seq2seq-loss-by-example") as scpoe:
self.loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[self._predictions],
[self.valid_target],
[tf.ones([int(self.getTensorShape(self.valid_target)[0])])])
self._cost = tf.reduce_mean(self.loss)
self._accuracy = tf.contrib.metrics.accuracy(self.digit_predictions, self.valid_target)
# Add summary ops to collect data
if conf.tensorboard:
self.w_hist = tf.summary.histogram("weights", softmax_w)
self.b_hist = tf.summary.histogram("biases", softmax_b)
self.y_hist_train = tf.summary.histogram("train-predictions", self._predictions)
self.y_hist_test = tf.summary.histogram("test-predictions", self._predictions)
self.mse_summary_train = tf.summary.scalar("train-cross-entropy-cost", self._cost)
self.mse_summary_test = tf.summary.scalar("test-cross-entropy-cost", self._cost)
with tf.name_scope("optimization") as scope:
self._train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self._cost, global_step=self.current_step)
#self._train_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self._cost, global_step=self.current_step)
def get_rnn_cell(self):
"""Create rnn_cell based on RNN type"""
if rnnType == RNNType.LSTM_b:
lstm_cell_fw = tf.contrib.rnn.LSTMCell(self.hidden_size, state_is_tuple=True, use_peepholes=conf.peephole)
lstm_cell_bw = tf.contrib.rnn.LSTMCell(self.hidden_size, state_is_tuple=True, use_peepholes=conf.peephole)
return (lstm_cell_fw, lstm_cell_bw)
elif rnnType == RNNType.LSTM_u:
lstm_cell = rnn_cell.BasicLSTMCell(self.hidden_size, forget_bias=1, state_is_tuple=True, orthogonal_scale_factor=conf.init_scale, initializer = weight_initializer)
return lstm_cell
elif rnnType == RNNType.GRU:
gru_cell = rnnCell(self.hidden_size, activation=activation_function)
return gru_cell
else:
lstm_cell_fw = rnnCell(self.hidden_size, activation=activation_function)
lstm_cell_bw = rnnCell(self.hidden_size, activation=activation_function)
return (lstm_cell_fw, lstm_cell_bw)
def get_multi_rnn_cell(self, rnn_cell):
"""Create multiple layers of rnn_cell based on RNN type"""
if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
(lstm_cell_fw, lstm_cell_bw) = rnn_cell
cell_fw = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
cell_bw = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
return (lstm_cell_fw, lstm_cell_bw)
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
cell = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
return cell
def set_initial_states(self, cell):
"""set initial states based on RNN types"""
# Initial state of the LSTM memory
# If `state_size` is an int or TensorShape, then the return value is a
# `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
# If `state_size` is a nested list or tuple, then the return value is
# a nested list or tuple (of the same structure) of `2-D` tensors with
# the shapes `[batch_size x s]` for each s in `state_size`.
if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
(cell_fw, cell_bw) = cell
self.initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
self.initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
self._initial_state = cell.zero_state(self.batch_size, tf.float32)
def get_outputs(self, cell):
""" get output tensor of the RNN"""
# At: tensorflow/tensorflow/python/ops/rnn.py
# Args:
# Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
# it is a single `Tensor` where the maximum time is either the first or second
# dimension (see the parameter `time_major`). The corresponding output is
# a single `Tensor` having the same number of time steps and batch size.
#
# If time_major == False (default), this must be a tensor of shape:
# `[batch_size, max_time, input_size]`, or a nested tuple | |
pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DestinationOptionsPropertiesArgs:
def __init__(__self__, *,
file_format: pulumi.Input['FlowLogDestinationOptionsPropertiesFileFormat'],
hive_compatible_partitions: pulumi.Input[bool],
per_hour_partition: pulumi.Input[bool]):
pulumi.set(__self__, "file_format", file_format)
pulumi.set(__self__, "hive_compatible_partitions", hive_compatible_partitions)
pulumi.set(__self__, "per_hour_partition", per_hour_partition)
@property
@pulumi.getter(name="fileFormat")
def file_format(self) -> pulumi.Input['FlowLogDestinationOptionsPropertiesFileFormat']:
return pulumi.get(self, "file_format")
@file_format.setter
def file_format(self, value: pulumi.Input['FlowLogDestinationOptionsPropertiesFileFormat']):
pulumi.set(self, "file_format", value)
@property
@pulumi.getter(name="hiveCompatiblePartitions")
def hive_compatible_partitions(self) -> pulumi.Input[bool]:
return pulumi.get(self, "hive_compatible_partitions")
@hive_compatible_partitions.setter
def hive_compatible_partitions(self, value: pulumi.Input[bool]):
pulumi.set(self, "hive_compatible_partitions", value)
@property
@pulumi.getter(name="perHourPartition")
def per_hour_partition(self) -> pulumi.Input[bool]:
return pulumi.get(self, "per_hour_partition")
@per_hour_partition.setter
def per_hour_partition(self, value: pulumi.Input[bool]):
pulumi.set(self, "per_hour_partition", value)
@pulumi.input_type
class EC2FleetAcceleratorCountRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetAcceleratorTotalMemoryMiBRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetBaselineEbsBandwidthMbpsRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetCapacityRebalanceArgs:
def __init__(__self__, *,
replacement_strategy: Optional[pulumi.Input['EC2FleetCapacityRebalanceReplacementStrategy']] = None,
termination_delay: Optional[pulumi.Input[int]] = None):
if replacement_strategy is not None:
pulumi.set(__self__, "replacement_strategy", replacement_strategy)
if termination_delay is not None:
pulumi.set(__self__, "termination_delay", termination_delay)
@property
@pulumi.getter(name="replacementStrategy")
def replacement_strategy(self) -> Optional[pulumi.Input['EC2FleetCapacityRebalanceReplacementStrategy']]:
return pulumi.get(self, "replacement_strategy")
@replacement_strategy.setter
def replacement_strategy(self, value: Optional[pulumi.Input['EC2FleetCapacityRebalanceReplacementStrategy']]):
pulumi.set(self, "replacement_strategy", value)
@property
@pulumi.getter(name="terminationDelay")
def termination_delay(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "termination_delay")
@termination_delay.setter
def termination_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "termination_delay", value)
@pulumi.input_type
class EC2FleetCapacityReservationOptionsRequestArgs:
def __init__(__self__, *,
usage_strategy: Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestUsageStrategy']] = None):
if usage_strategy is not None:
pulumi.set(__self__, "usage_strategy", usage_strategy)
@property
@pulumi.getter(name="usageStrategy")
def usage_strategy(self) -> Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestUsageStrategy']]:
return pulumi.get(self, "usage_strategy")
@usage_strategy.setter
def usage_strategy(self, value: Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestUsageStrategy']]):
pulumi.set(self, "usage_strategy", value)
@pulumi.input_type
class EC2FleetFleetLaunchTemplateConfigRequestArgs:
def __init__(__self__, *,
launch_template_specification: Optional[pulumi.Input['EC2FleetFleetLaunchTemplateSpecificationRequestArgs']] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetFleetLaunchTemplateOverridesRequestArgs']]]] = None):
if launch_template_specification is not None:
pulumi.set(__self__, "launch_template_specification", launch_template_specification)
if overrides is not None:
pulumi.set(__self__, "overrides", overrides)
@property
@pulumi.getter(name="launchTemplateSpecification")
def launch_template_specification(self) -> Optional[pulumi.Input['EC2FleetFleetLaunchTemplateSpecificationRequestArgs']]:
return pulumi.get(self, "launch_template_specification")
@launch_template_specification.setter
def launch_template_specification(self, value: Optional[pulumi.Input['EC2FleetFleetLaunchTemplateSpecificationRequestArgs']]):
pulumi.set(self, "launch_template_specification", value)
@property
@pulumi.getter
def overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetFleetLaunchTemplateOverridesRequestArgs']]]]:
return pulumi.get(self, "overrides")
@overrides.setter
def overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetFleetLaunchTemplateOverridesRequestArgs']]]]):
pulumi.set(self, "overrides", value)
@pulumi.input_type
class EC2FleetFleetLaunchTemplateOverridesRequestArgs:
def __init__(__self__, *,
availability_zone: Optional[pulumi.Input[str]] = None,
instance_requirements: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestArgs']] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_price: Optional[pulumi.Input[str]] = None,
placement: Optional[pulumi.Input['EC2FleetPlacementArgs']] = None,
priority: Optional[pulumi.Input[float]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
weighted_capacity: Optional[pulumi.Input[float]] = None):
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if instance_requirements is not None:
pulumi.set(__self__, "instance_requirements", instance_requirements)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if max_price is not None:
pulumi.set(__self__, "max_price", max_price)
if placement is not None:
pulumi.set(__self__, "placement", placement)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="instanceRequirements")
def instance_requirements(self) -> Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestArgs']]:
return pulumi.get(self, "instance_requirements")
@instance_requirements.setter
def instance_requirements(self, value: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestArgs']]):
pulumi.set(self, "instance_requirements", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="maxPrice")
def max_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_price")
@max_price.setter
def max_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_price", value)
@property
@pulumi.getter
def placement(self) -> Optional[pulumi.Input['EC2FleetPlacementArgs']]:
return pulumi.get(self, "placement")
@placement.setter
def placement(self, value: Optional[pulumi.Input['EC2FleetPlacementArgs']]):
pulumi.set(self, "placement", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "weighted_capacity", value)
@pulumi.input_type
class EC2FleetFleetLaunchTemplateSpecificationRequestArgs:
def __init__(__self__, *,
launch_template_id: Optional[pulumi.Input[str]] = None,
launch_template_name: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
if launch_template_id is not None:
pulumi.set(__self__, "launch_template_id", launch_template_id)
if launch_template_name is not None:
pulumi.set(__self__, "launch_template_name", launch_template_name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="launchTemplateId")
def launch_template_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_id")
@launch_template_id.setter
def launch_template_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_id", value)
@property
@pulumi.getter(name="launchTemplateName")
def launch_template_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_name")
@launch_template_name.setter
def launch_template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_name", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class EC2FleetInstanceRequirementsRequestArgs:
def __init__(__self__, *,
accelerator_count: Optional[pulumi.Input['EC2FleetAcceleratorCountRequestArgs']] = None,
accelerator_manufacturers: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorManufacturersItem']]]] = None,
accelerator_names: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorNamesItem']]]] = None,
accelerator_total_memory_mi_b: Optional[pulumi.Input['EC2FleetAcceleratorTotalMemoryMiBRequestArgs']] = None,
accelerator_types: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorTypesItem']]]] = None,
bare_metal: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBareMetal']] = None,
baseline_ebs_bandwidth_mbps: Optional[pulumi.Input['EC2FleetBaselineEbsBandwidthMbpsRequestArgs']] = None,
burstable_performance: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBurstablePerformance']] = None,
cpu_manufacturers: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestCpuManufacturersItem']]]] = None,
excluded_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_generations: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestInstanceGenerationsItem']]]] = None,
local_storage: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorage']] = None,
local_storage_types: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorageTypesItem']]]] = None,
memory_gi_b_per_v_cpu: Optional[pulumi.Input['EC2FleetMemoryGiBPerVCpuRequestArgs']] = None,
memory_mi_b: Optional[pulumi.Input['EC2FleetMemoryMiBRequestArgs']] = None,
network_interface_count: Optional[pulumi.Input['EC2FleetNetworkInterfaceCountRequestArgs']] = None,
on_demand_max_price_percentage_over_lowest_price: Optional[pulumi.Input[int]] = None,
require_hibernate_support: Optional[pulumi.Input[bool]] = None,
spot_max_price_percentage_over_lowest_price: Optional[pulumi.Input[int]] = None,
total_local_storage_gb: Optional[pulumi.Input['EC2FleetTotalLocalStorageGBRequestArgs']] = None,
v_cpu_count: Optional[pulumi.Input['EC2FleetVCpuCountRangeRequestArgs']] = None):
if accelerator_count is not None:
pulumi.set(__self__, "accelerator_count", accelerator_count)
if accelerator_manufacturers is not None:
pulumi.set(__self__, "accelerator_manufacturers", accelerator_manufacturers)
if accelerator_names is not None:
pulumi.set(__self__, "accelerator_names", accelerator_names)
if accelerator_total_memory_mi_b is not None:
pulumi.set(__self__, "accelerator_total_memory_mi_b", accelerator_total_memory_mi_b)
if accelerator_types is not None:
pulumi.set(__self__, "accelerator_types", accelerator_types)
if bare_metal is not None:
pulumi.set(__self__, "bare_metal", bare_metal)
if baseline_ebs_bandwidth_mbps is not None:
pulumi.set(__self__, "baseline_ebs_bandwidth_mbps", baseline_ebs_bandwidth_mbps)
if burstable_performance is not None:
pulumi.set(__self__, "burstable_performance", burstable_performance)
if cpu_manufacturers is not None:
pulumi.set(__self__, "cpu_manufacturers", cpu_manufacturers)
if excluded_instance_types is not None:
pulumi.set(__self__, "excluded_instance_types", excluded_instance_types)
if instance_generations is not None:
pulumi.set(__self__, "instance_generations", instance_generations)
if local_storage is not None:
pulumi.set(__self__, "local_storage", local_storage)
if local_storage_types is not None:
pulumi.set(__self__, "local_storage_types", local_storage_types)
if memory_gi_b_per_v_cpu is not None:
pulumi.set(__self__, "memory_gi_b_per_v_cpu", memory_gi_b_per_v_cpu)
if memory_mi_b is not None:
pulumi.set(__self__, "memory_mi_b", memory_mi_b)
if network_interface_count is not None:
pulumi.set(__self__, "network_interface_count", network_interface_count)
if on_demand_max_price_percentage_over_lowest_price is not None:
pulumi.set(__self__, "on_demand_max_price_percentage_over_lowest_price", on_demand_max_price_percentage_over_lowest_price)
if require_hibernate_support is not None:
pulumi.set(__self__, "require_hibernate_support", require_hibernate_support)
if spot_max_price_percentage_over_lowest_price is not None:
pulumi.set(__self__, "spot_max_price_percentage_over_lowest_price", spot_max_price_percentage_over_lowest_price)
if total_local_storage_gb is not None:
pulumi.set(__self__, "total_local_storage_gb", total_local_storage_gb)
if v_cpu_count is not None:
pulumi.set(__self__, "v_cpu_count", v_cpu_count)
@property
@pulumi.getter(name="acceleratorCount")
def accelerator_count(self) -> Optional[pulumi.Input['EC2FleetAcceleratorCountRequestArgs']]:
return pulumi.get(self, "accelerator_count")
@accelerator_count.setter
def accelerator_count(self, value: Optional[pulumi.Input['EC2FleetAcceleratorCountRequestArgs']]):
pulumi.set(self, "accelerator_count", value)
@property
@pulumi.getter(name="acceleratorManufacturers")
def accelerator_manufacturers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorManufacturersItem']]]]:
return pulumi.get(self, "accelerator_manufacturers")
@accelerator_manufacturers.setter
def accelerator_manufacturers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorManufacturersItem']]]]):
pulumi.set(self, "accelerator_manufacturers", value)
@property
@pulumi.getter(name="acceleratorNames")
def accelerator_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorNamesItem']]]]:
return pulumi.get(self, "accelerator_names")
@accelerator_names.setter
def accelerator_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorNamesItem']]]]):
pulumi.set(self, "accelerator_names", value)
@property
@pulumi.getter(name="acceleratorTotalMemoryMiB")
def accelerator_total_memory_mi_b(self) -> Optional[pulumi.Input['EC2FleetAcceleratorTotalMemoryMiBRequestArgs']]:
return pulumi.get(self, "accelerator_total_memory_mi_b")
@accelerator_total_memory_mi_b.setter
def accelerator_total_memory_mi_b(self, value: Optional[pulumi.Input['EC2FleetAcceleratorTotalMemoryMiBRequestArgs']]):
pulumi.set(self, "accelerator_total_memory_mi_b", value)
@property
@pulumi.getter(name="acceleratorTypes")
def accelerator_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorTypesItem']]]]:
return pulumi.get(self, "accelerator_types")
@accelerator_types.setter
def accelerator_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestAcceleratorTypesItem']]]]):
pulumi.set(self, "accelerator_types", value)
@property
@pulumi.getter(name="bareMetal")
def bare_metal(self) -> Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBareMetal']]:
return pulumi.get(self, "bare_metal")
@bare_metal.setter
def bare_metal(self, value: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBareMetal']]):
pulumi.set(self, "bare_metal", value)
@property
@pulumi.getter(name="baselineEbsBandwidthMbps")
def baseline_ebs_bandwidth_mbps(self) -> Optional[pulumi.Input['EC2FleetBaselineEbsBandwidthMbpsRequestArgs']]:
return pulumi.get(self, "baseline_ebs_bandwidth_mbps")
@baseline_ebs_bandwidth_mbps.setter
def baseline_ebs_bandwidth_mbps(self, value: Optional[pulumi.Input['EC2FleetBaselineEbsBandwidthMbpsRequestArgs']]):
pulumi.set(self, "baseline_ebs_bandwidth_mbps", value)
@property
@pulumi.getter(name="burstablePerformance")
def burstable_performance(self) -> Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBurstablePerformance']]:
return pulumi.get(self, "burstable_performance")
@burstable_performance.setter
def burstable_performance(self, value: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestBurstablePerformance']]):
pulumi.set(self, "burstable_performance", value)
@property
@pulumi.getter(name="cpuManufacturers")
def cpu_manufacturers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestCpuManufacturersItem']]]]:
return pulumi.get(self, "cpu_manufacturers")
@cpu_manufacturers.setter
def cpu_manufacturers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestCpuManufacturersItem']]]]):
pulumi.set(self, "cpu_manufacturers", value)
@property
@pulumi.getter(name="excludedInstanceTypes")
def excluded_instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "excluded_instance_types")
@excluded_instance_types.setter
def excluded_instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_instance_types", value)
@property
@pulumi.getter(name="instanceGenerations")
def instance_generations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestInstanceGenerationsItem']]]]:
return pulumi.get(self, "instance_generations")
@instance_generations.setter
def instance_generations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestInstanceGenerationsItem']]]]):
pulumi.set(self, "instance_generations", value)
@property
@pulumi.getter(name="localStorage")
def local_storage(self) -> Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorage']]:
return pulumi.get(self, "local_storage")
@local_storage.setter
def local_storage(self, value: Optional[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorage']]):
pulumi.set(self, "local_storage", value)
@property
@pulumi.getter(name="localStorageTypes")
def local_storage_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorageTypesItem']]]]:
return pulumi.get(self, "local_storage_types")
@local_storage_types.setter
def local_storage_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetInstanceRequirementsRequestLocalStorageTypesItem']]]]):
pulumi.set(self, "local_storage_types", value)
@property
@pulumi.getter(name="memoryGiBPerVCpu")
def memory_gi_b_per_v_cpu(self) -> Optional[pulumi.Input['EC2FleetMemoryGiBPerVCpuRequestArgs']]:
return pulumi.get(self, "memory_gi_b_per_v_cpu")
@memory_gi_b_per_v_cpu.setter
def memory_gi_b_per_v_cpu(self, value: Optional[pulumi.Input['EC2FleetMemoryGiBPerVCpuRequestArgs']]):
pulumi.set(self, "memory_gi_b_per_v_cpu", value)
@property
@pulumi.getter(name="memoryMiB")
def | |
self.Label64_5.place(relx=0.25, rely=0.625, height=21, width=200)
self.Label64_5.configure(activebackground="#f9f9f9")
self.Label64_5.configure(activeforeground="black")
self.Label64_5.configure(background="#86bad8")
self.Label64_5.configure(disabledforeground="#a3a3a3")
self.Label64_5.configure(foreground="#000000")
self.Label64_5.configure(highlightbackground="#d9d9d9")
self.Label64_5.configure(highlightcolor="black")
self.Label64_5.configure(text='''Fracture Density Vs Beta''')
# self.FracBetaCanvas.configure(width=323)
# self.FracDensityExport = tk.Button(self.TNotebook1_t3)
# self.FracDensityExport.place(relx=0.279, rely=0.017, height=24, width=44)
# self.FracDensityExport.configure(activebackground="#ececec")
# self.FracDensityExport.configure(activeforeground="#000000")
# self.FracDensityExport.configure(background="#ffae21")
# self.FracDensityExport.configure(disabledforeground="#a3a3a3")
# self.FracDensityExport.configure(foreground="#000000")
# self.FracDensityExport.configure(highlightbackground="#d9d9d9")
# self.FracDensityExport.configure(highlightcolor="black")
# self.FracDensityExport.configure(pady="0")
# self.FracDensityExport.configure(text='''Export''')
# self.LogFracDensityExport = tk.Button(self.TNotebook1_t3)
# self.LogFracDensityExport.place(relx=0.623, rely=0.017, height=24
# , width=44)
# self.LogFracDensityExport.configure(activebackground="#ececec")
# self.LogFracDensityExport.configure(activeforeground="#000000")
# self.LogFracDensityExport.configure(background="#ffae21")
# self.LogFracDensityExport.configure(disabledforeground="#a3a3a3")
# self.LogFracDensityExport.configure(foreground="#000000")
# self.LogFracDensityExport.configure(highlightbackground="#d9d9d9")
# self.LogFracDensityExport.configure(highlightcolor="black")
# self.LogFracDensityExport.configure(pady="0")
# self.LogFracDensityExport.configure(text='''Export''')
# self.UniformExport = tk.Button(self.TNotebook1_t3)
# self.UniformExport.place(relx=0.926, rely=0.5, height=24, width=44)
# self.UniformExport.configure(activebackground="#ececec")
# self.UniformExport.configure(activeforeground="#000000")
# self.UniformExport.configure(background="#ffae21")
# self.UniformExport.configure(disabledforeground="#a3a3a3")
# self.UniformExport.configure(foreground="#000000")
# self.UniformExport.configure(highlightbackground="#d9d9d9")
# self.UniformExport.configure(highlightcolor="black")
# self.UniformExport.configure(pady="0")
# self.UniformExport.configure(text='''Export''')
# self.LogFracDensityExport = tk.Button(self.TNotebook1_t3)
# self.LogFracDensityExport.place(relx=0.157, rely=0.617, height=24
# , width=44)
# self.LogFracDensityExport.configure(activebackground="#ececec")
# self.LogFracDensityExport.configure(activeforeground="#000000")
# self.LogFracDensityExport.configure(background="#ffae21")
# self.LogFracDensityExport.configure(disabledforeground="#a3a3a3")
# self.LogFracDensityExport.configure(foreground="#000000")
# self.LogFracDensityExport.configure(highlightbackground="#d9d9d9")
# self.LogFracDensityExport.configure(highlightcolor="black")
# self.LogFracDensityExport.configure(pady="0")
# self.LogFracDensityExport.configure(text='''Export''')
# self.LogFracDensityExport = tk.Button(self.TNotebook1_t3)
# self.LogFracDensityExport.place(relx=0.623, rely=0.625, height=24
# , width=44)
# self.LogFracDensityExport.configure(activebackground="#ececec")
# self.LogFracDensityExport.configure(activeforeground="#000000")
# self.LogFracDensityExport.configure(background="#ffae21")
# self.LogFracDensityExport.configure(disabledforeground="#a3a3a3")
# self.LogFracDensityExport.configure(foreground="#000000")
# self.LogFracDensityExport.configure(highlightbackground="#d9d9d9")
# self.LogFracDensityExport.configure(highlightcolor="black")
# self.LogFracDensityExport.configure(pady="0")
# self.LogFracDensityExport.configure(text='''Export''')
self.Label4_4 = tk.Label(self.TNotebook1_t3, anchor='w')
self.Label4_4.place(relx=0.01, rely=0.625, height=21, width=200)
self.Label4_4.configure(activebackground="#f9f9f9")
self.Label4_4.configure(activeforeground="black")
self.Label4_4.configure(background="#86bad8")
self.Label4_4.configure(disabledforeground="#a3a3a3")
self.Label4_4.configure(foreground="#000000")
self.Label4_4.configure(highlightbackground="#d9d9d9")
self.Label4_4.configure(highlightcolor="black")
self.Label4_4.configure(text='''Fracture Density vs Multinomial''')
self.CorrToolbarFrame = tk.Frame(self.TNotebook1_t4)
self.HistToolbarFrame = tk.Frame(self.TNotebook1_t4)
self.CorrelationCanvas = tk.Canvas(self.TNotebook1_t4)
self.CorrelationCanvas.place(relx=0.029, rely=0.18, relheight=0.75
, relwidth=0.441)
self.CorrelationCanvas.configure(background="#d9d9d9")
self.CorrelationCanvas.configure(borderwidth="2")
self.CorrelationCanvas.configure(highlightbackground="#d9d9d9")
self.CorrelationCanvas.configure(highlightcolor="black")
self.CorrelationCanvas.configure(insertbackground="black")
self.CorrelationCanvas.configure(relief='ridge')
self.CorrelationCanvas.configure(selectbackground="#c4c4c4")
self.CorrelationCanvas.configure(selectforeground="black")
self.CorrelationCanvas.configure(width=125)
self.HistogramCanvas = tk.Canvas(self.TNotebook1_t4)
self.HistogramCanvas.place(relx=0.529, rely=0.18, relheight=0.75
, relwidth=0.441)
self.HistogramCanvas.configure(background="#d9d9d9")
self.HistogramCanvas.configure(borderwidth="2")
self.HistogramCanvas.configure(highlightbackground="#d9d9d9")
self.HistogramCanvas.configure(highlightcolor="black")
self.HistogramCanvas.configure(insertbackground="black")
self.HistogramCanvas.configure(relief='ridge')
self.HistogramCanvas.configure(selectbackground="#c4c4c4")
self.HistogramCanvas.configure(selectforeground="black")
self.HistogramCanvas.configure(width=125)
self.Label5_1 = tk.Label(self.TNotebook1_t4, anchor='w')
self.Label5_1.place(relx=0.025, rely=0.04, height=21, width=350)
self.Label5_1.configure(activebackground="#f9f9f9")
self.Label5_1.configure(activeforeground="black")
self.Label5_1.configure(background="#86bad8")
self.Label5_1.configure(disabledforeground="#a3a3a3")
self.Label5_1.configure(foreground="#000000")
self.Label5_1.configure(highlightbackground="#d9d9d9")
self.Label5_1.configure(highlightcolor="black")
self.Label5_1.configure(text='''Correlation of Beta and Multinomial''')
self.Label5_1.configure(font=font17)
self.PearsonLabel = tk.Label(self.TNotebook1_t4,justify='left', anchor="w")
self.PearsonLabel.place(relx=0.025, rely=0.08, height=21, width=350)
self.PearsonLabel.configure(activebackground="#f9f9f9")
self.PearsonLabel.configure(activeforeground="black")
self.PearsonLabel.configure(background="#86bad8")
self.PearsonLabel.configure(disabledforeground="#a3a3a3")
self.PearsonLabel.configure(foreground="#000000")
self.PearsonLabel.configure(highlightbackground="#d9d9d9")
self.PearsonLabel.configure(highlightcolor="black")
self.PearsonLabel.configure(text='''Pearson Correlation (Beta vs Multinomial):''')
self.SpearmanLabel = tk.Label(self.TNotebook1_t4,justify='left', anchor="w")
self.SpearmanLabel.place(relx=0.025, rely=0.12, height=21, width=350)
self.SpearmanLabel.configure(activebackground="#f9f9f9")
self.SpearmanLabel.configure(activeforeground="black")
self.SpearmanLabel.configure(background="#86bad8")
self.SpearmanLabel.configure(disabledforeground="#a3a3a3")
self.SpearmanLabel.configure(foreground="#000000")
self.SpearmanLabel.configure(highlightbackground="#d9d9d9")
self.SpearmanLabel.configure(highlightcolor="black")
self.SpearmanLabel.configure(text='''Spearman Correlation (Beta vs Multinomial):''')
self.HistogramLabel = tk.Label(self.TNotebook1_t4,anchor='w')
self.HistogramLabel.place(relx=0.529, rely=0.12, height=21, width=350)
self.HistogramLabel.configure(activebackground="#f9f9f9")
self.HistogramLabel.configure(activeforeground="black")
self.HistogramLabel.configure(background="#86bad8")
self.HistogramLabel.configure(disabledforeground="#a3a3a3")
self.HistogramLabel.configure(foreground="#000000")
self.HistogramLabel.configure(highlightbackground="#d9d9d9")
self.HistogramLabel.configure(highlightcolor="black")
self.HistogramLabel.configure(text='''Histogram of Sample vs Uniform''')
self.HistogramLabel.configure(font=font17)
self.HistogramLabel.configure(width=186)
# self.CorrelationExport = tk.Button(self.TNotebook1_t4)
# self.CorrelationExport.place(relx=0.422, rely=0.167, height=24, width=44)
# self.CorrelationExport.configure(activebackground="#ececec")
# self.CorrelationExport.configure(activeforeground="#000000")
# self.CorrelationExport.configure(background="#ffae21")
# self.CorrelationExport.configure(disabledforeground="#a3a3a3")
# self.CorrelationExport.configure(foreground="#000000")
# self.CorrelationExport.configure(highlightbackground="#d9d9d9")
# self.CorrelationExport.configure(highlightcolor="black")
# self.CorrelationExport.configure(pady="0")
# self.CorrelationExport.configure(text='''Export''')
# self.HistogramExport = tk.Button(self.TNotebook1_t4)
# self.HistogramExport.place(relx=0.922, rely=0.167, height=24, width=44)
# self.HistogramExport.configure(activebackground="#ececec")
# self.HistogramExport.configure(activeforeground="#000000")
# self.HistogramExport.configure(background="#ffae21")
# self.HistogramExport.configure(disabledforeground="#a3a3a3")
# self.HistogramExport.configure(foreground="#000000")
# self.HistogramExport.configure(highlightbackground="#d9d9d9")
# self.HistogramExport.configure(highlightcolor="black")
# self.HistogramExport.configure(pady="0")
# self.HistogramExport.configure(text='''Export''')
self.Label6_1 = tk.Label(self.TNotebook1_t5)
self.Label6_1.place(relx=0.01, rely=0.033, height=21, width=272)
self.Label6_1.configure(activebackground="#f9f9f9")
self.Label6_1.configure(activeforeground="black")
self.Label6_1.configure(background="#d9d9d9")
self.Label6_1.configure(disabledforeground="#a3a3a3")
self.Label6_1.configure(foreground="#000000")
self.Label6_1.configure(highlightbackground="#d9d9d9")
self.Label6_1.configure(highlightcolor="black")
self.Label6_1.configure(text='''Don't Need Any Settings for Now...But Just in Case''')
self.LabelSetting1 = tk.Label(self.TNotebook1_t5)
self.LabelSetting1.place(relx=0.01, rely=0.1, height=21, width=34)
self.LabelSetting1.configure(activebackground="#f9f9f9")
self.LabelSetting1.configure(activeforeground="black")
self.LabelSetting1.configure(background="#d9d9d9")
self.LabelSetting1.configure(disabledforeground="#a3a3a3")
self.LabelSetting1.configure(foreground="#000000")
self.LabelSetting1.configure(highlightbackground="#d9d9d9")
self.LabelSetting1.configure(highlightcolor="black")
self.LabelSetting1.configure(text='''Label''')
self.LabelSetting2 = tk.Label(self.TNotebook1_t5)
self.LabelSetting2.place(relx=0.01, rely=0.15, height=21, width=34)
self.LabelSetting2.configure(activebackground="#f9f9f9")
self.LabelSetting2.configure(activeforeground="black")
self.LabelSetting2.configure(background="#d9d9d9")
self.LabelSetting2.configure(disabledforeground="#a3a3a3")
self.LabelSetting2.configure(foreground="#000000")
self.LabelSetting2.configure(highlightbackground="#d9d9d9")
self.LabelSetting2.configure(highlightcolor="black")
self.LabelSetting2.configure(text='''Label''')
self.LabelSetting3 = tk.Label(self.TNotebook1_t5)
self.LabelSetting3.place(relx=0.01, rely=0.2, height=21, width=34)
self.LabelSetting3.configure(activebackground="#f9f9f9")
self.LabelSetting3.configure(activeforeground="black")
self.LabelSetting3.configure(background="#d9d9d9")
self.LabelSetting3.configure(disabledforeground="#a3a3a3")
self.LabelSetting3.configure(foreground="#000000")
self.LabelSetting3.configure(highlightbackground="#d9d9d9")
self.LabelSetting3.configure(highlightcolor="black")
self.LabelSetting3.configure(text='''Label''')
self.Label12 = tk.Label(self.TNotebook1_t5)
self.Label12.place(relx=0.01, rely=0.25, height=21, width=34)
self.Label12.configure(activebackground="#f9f9f9")
self.Label12.configure(activeforeground="black")
self.Label12.configure(background="#d9d9d9")
self.Label12.configure(disabledforeground="#a3a3a3")
self.Label12.configure(foreground="#000000")
self.Label12.configure(highlightbackground="#d9d9d9")
self.Label12.configure(highlightcolor="black")
self.Label12.configure(text='''Label''')
self.LabelSetting5 = tk.Label(self.TNotebook1_t5)
self.LabelSetting5.place(relx=0.01, rely=0.3, height=21, width=34)
self.LabelSetting5.configure(activebackground="#f9f9f9")
self.LabelSetting5.configure(activeforeground="black")
self.LabelSetting5.configure(background="#d9d9d9")
self.LabelSetting5.configure(disabledforeground="#a3a3a3")
self.LabelSetting5.configure(foreground="#000000")
self.LabelSetting5.configure(highlightbackground="#d9d9d9")
self.LabelSetting5.configure(highlightcolor="black")
self.LabelSetting5.configure(text='''Label''')
self.LabelSetting7 = tk.Label(self.TNotebook1_t5)
self.LabelSetting7.place(relx=0.01, rely=0.35, height=21, width=34)
self.LabelSetting7.configure(activebackground="#f9f9f9")
self.LabelSetting7.configure(activeforeground="black")
self.LabelSetting7.configure(background="#d9d9d9")
self.LabelSetting7.configure(disabledforeground="#a3a3a3")
self.LabelSetting7.configure(foreground="#000000")
self.LabelSetting7.configure(highlightbackground="#d9d9d9")
self.LabelSetting7.configure(highlightcolor="black")
self.LabelSetting7.configure(text='''Label''')
self.LabelSetting8_ = tk.Label(self.TNotebook1_t5)
self.LabelSetting8_.place(relx=0.01, rely=0.45, height=21, width=34)
self.LabelSetting8_.configure(activebackground="#f9f9f9")
self.LabelSetting8_.configure(activeforeground="black")
self.LabelSetting8_.configure(background="#d9d9d9")
self.LabelSetting8_.configure(disabledforeground="#a3a3a3")
self.LabelSetting8_.configure(foreground="#000000")
self.LabelSetting8_.configure(highlightbackground="#d9d9d9")
self.LabelSetting8_.configure(highlightcolor="black")
self.LabelSetting8_.configure(text='''Label''')
self.EntrySetting1 = tk.Entry(self.TNotebook1_t5)
self.EntrySetting1.place(relx=0.118, rely=0.1, height=20, relwidth=0.161)
self.EntrySetting1.configure(background="white")
self.EntrySetting1.configure(disabledforeground="#a3a3a3")
self.EntrySetting1.configure(font=font10)
self.EntrySetting1.configure(foreground="#000000")
self.EntrySetting1.configure(highlightbackground="#d9d9d9")
self.EntrySetting1.configure(highlightcolor="black")
self.EntrySetting1.configure(insertbackground="black")
self.EntrySetting1.configure(selectbackground="#c4c4c4")
self.EntrySetting1.configure(selectforeground="black")
self.EntrySetting2 = tk.Entry(self.TNotebook1_t5)
self.EntrySetting2.place(relx=0.118, rely=0.15, height=20
, relwidth=0.161)
self.EntrySetting2.configure(background="white")
self.EntrySetting2.configure(disabledforeground="#a3a3a3")
self.EntrySetting2.configure(font=font10)
self.EntrySetting2.configure(foreground="#000000")
self.EntrySetting2.configure(highlightbackground="#d9d9d9")
self.EntrySetting2.configure(highlightcolor="black")
self.EntrySetting2.configure(insertbackground="black")
self.EntrySetting2.configure(selectbackground="#c4c4c4")
self.EntrySetting2.configure(selectforeground="black")
self.EntrySetting3 = tk.Entry(self.TNotebook1_t5)
self.EntrySetting3.place(relx=0.118, rely=0.2, height=20, relwidth=0.161)
self.EntrySetting3.configure(background="white")
self.EntrySetting3.configure(disabledforeground="#a3a3a3")
self.EntrySetting3.configure(font=font10)
self.EntrySetting3.configure(foreground="#000000")
self.EntrySetting3.configure(highlightbackground="#d9d9d9")
self.EntrySetting3.configure(highlightcolor="black")
self.EntrySetting3.configure(insertbackground="black")
self.EntrySetting3.configure(selectbackground="#c4c4c4")
self.EntrySetting3.configure(selectforeground="black")
self.EntrySetting4 = tk.Entry(self.TNotebook1_t5)
self.EntrySetting4.place(relx=0.118, rely=0.25, height=20
, relwidth=0.161)
self.EntrySetting4.configure(background="white")
self.EntrySetting4.configure(disabledforeground="#a3a3a3")
self.EntrySetting4.configure(font=font10)
self.EntrySetting4.configure(foreground="#000000")
self.EntrySetting4.configure(highlightbackground="#d9d9d9")
self.EntrySetting4.configure(highlightcolor="black")
self.EntrySetting4.configure(insertbackground="black")
self.EntrySetting4.configure(selectbackground="#c4c4c4")
self.EntrySetting4.configure(selectforeground="black")
self.TComboboxSetting5 = ttk.Combobox(self.TNotebook1_t5)
self.TComboboxSetting5.place(relx=0.118, rely=0.3, relheight=0.035
, relwidth=0.14)
self.TComboboxSetting5.configure(takefocus="")
self.TComboboxSetting6 = ttk.Combobox(self.TNotebook1_t5)
self.TComboboxSetting6.place(relx=0.118, rely=0.35, relheight=0.035
, relwidth=0.14)
self.TComboboxSetting6.configure(takefocus="")
self.TCombobox3 = ttk.Combobox(self.TNotebook1_t5)
self.TCombobox3.place(relx=0.118, rely=0.4, relheight=0.035
, relwidth=0.14)
self.TCombobox3.configure(takefocus="")
self.TComboboxSetting8 = ttk.Combobox(self.TNotebook1_t5)
self.TComboboxSetting8.place(relx=0.118, rely=0.45, relheight=0.035
, relwidth=0.14)
self.TComboboxSetting8.configure(takefocus="")
self.SettingCheck1 = tk.Checkbutton(self.TNotebook1_t5)
self.SettingCheck1.place(relx=0.01, rely=0.517, relheight=0.042
, relwidth=0.06)
self.SettingCheck1.configure(activebackground="#ececec")
self.SettingCheck1.configure(activeforeground="#000000")
self.SettingCheck1.configure(background="#d9d9d9")
self.SettingCheck1.configure(disabledforeground="#a3a3a3")
self.SettingCheck1.configure(foreground="#000000")
self.SettingCheck1.configure(highlightbackground="#d9d9d9")
self.SettingCheck1.configure(highlightcolor="black")
self.SettingCheck1.configure(justify='left')
self.SettingCheck1.configure(text='''Check''')
self.SettingCheck1.configure(variable=GUI_support.che103)
self.SettingCheck2 = tk.Checkbutton(self.TNotebook1_t5)
self.SettingCheck2.place(relx=0.01, rely=0.567, relheight=0.042
, relwidth=0.06)
self.SettingCheck2.configure(activebackground="#ececec")
self.SettingCheck2.configure(activeforeground="#000000")
self.SettingCheck2.configure(background="#d9d9d9")
self.SettingCheck2.configure(disabledforeground="#a3a3a3")
self.SettingCheck2.configure(foreground="#000000")
self.SettingCheck2.configure(highlightbackground="#d9d9d9")
self.SettingCheck2.configure(highlightcolor="black")
self.SettingCheck2.configure(justify='left')
self.SettingCheck2.configure(text='''Check''')
self.SettingCheck2.configure(variable=GUI_support.che104)
self.SettingCheck3 = tk.Checkbutton(self.TNotebook1_t5)
self.SettingCheck3.place(relx=0.01, rely=0.617, relheight=0.042
, relwidth=0.06)
self.SettingCheck3.configure(activebackground="#ececec")
self.SettingCheck3.configure(activeforeground="#000000")
self.SettingCheck3.configure(background="#d9d9d9")
self.SettingCheck3.configure(disabledforeground="#a3a3a3")
self.SettingCheck3.configure(foreground="#000000")
self.SettingCheck3.configure(highlightbackground="#d9d9d9")
self.SettingCheck3.configure(highlightcolor="black")
self.SettingCheck3.configure(justify='left')
self.SettingCheck3.configure(text='''Check''')
self.SettingCheck3.configure(variable=GUI_support.che105)
self.SettingCheck4 = tk.Checkbutton(self.TNotebook1_t5)
self.SettingCheck4.place(relx=0.01, rely=0.667, relheight=0.042
, relwidth=0.06)
self.SettingCheck4.configure(activebackground="#ececec")
self.SettingCheck4.configure(activeforeground="#000000")
self.SettingCheck4.configure(background="#d9d9d9")
self.SettingCheck4.configure(disabledforeground="#a3a3a3")
self.SettingCheck4.configure(foreground="#000000")
self.SettingCheck4.configure(highlightbackground="#d9d9d9")
self.SettingCheck4.configure(highlightcolor="black")
self.SettingCheck4.configure(justify='left')
self.SettingCheck4.configure(text='''Check''')
self.SettingCheck4.configure(variable=GUI_support.che107)
self.Radiobutton1 = tk.Radiobutton(self.TNotebook1_t5)
self.Radiobutton1.place(relx=0.52, rely=0.15, relheight=0.042
, relwidth=0.057)
self.Radiobutton1.configure(activebackground="#ececec")
self.Radiobutton1.configure(activeforeground="#000000")
self.Radiobutton1.configure(background="#d9d9d9")
self.Radiobutton1.configure(disabledforeground="#a3a3a3")
self.Radiobutton1.configure(foreground="#000000")
self.Radiobutton1.configure(highlightbackground="#d9d9d9")
self.Radiobutton1.configure(highlightcolor="black")
self.Radiobutton1.configure(justify='left')
self.Radiobutton1.configure(text='''Radio''')
self.Radiobutton2 = tk.Radiobutton(self.TNotebook1_t5)
self.Radiobutton2.place(relx=0.52, rely=0.2, relheight=0.042
, relwidth=0.057)
self.Radiobutton2.configure(activebackground="#ececec")
self.Radiobutton2.configure(activeforeground="#000000")
self.Radiobutton2.configure(background="#d9d9d9")
self.Radiobutton2.configure(disabledforeground="#a3a3a3")
self.Radiobutton2.configure(foreground="#000000")
self.Radiobutton2.configure(highlightbackground="#d9d9d9")
self.Radiobutton2.configure(highlightcolor="black")
self.Radiobutton2.configure(justify='left')
self.Radiobutton2.configure(text='''Radio''')
self.Radiobutton3 = tk.Radiobutton(self.TNotebook1_t5)
self.Radiobutton3.place(relx=0.52, rely=0.25, relheight=0.042
, relwidth=0.057)
self.Radiobutton3.configure(activebackground="#ececec")
self.Radiobutton3.configure(activeforeground="#000000")
self.Radiobutton3.configure(background="#d9d9d9")
self.Radiobutton3.configure(disabledforeground="#a3a3a3")
self.Radiobutton3.configure(foreground="#000000")
self.Radiobutton3.configure(highlightbackground="#d9d9d9")
self.Radiobutton3.configure(highlightcolor="black")
self.Radiobutton3.configure(justify='left')
self.Radiobutton3.configure(text='''Radio''')
self.Radiobutton4 = tk.Radiobutton(self.TNotebook1_t5)
self.Radiobutton4.place(relx=0.52, rely=0.3, relheight=0.042
, relwidth=0.057)
self.Radiobutton4.configure(activebackground="#ececec")
self.Radiobutton4.configure(activeforeground="#000000")
self.Radiobutton4.configure(background="#d9d9d9")
self.Radiobutton4.configure(disabledforeground="#a3a3a3")
self.Radiobutton4.configure(foreground="#000000")
self.Radiobutton4.configure(highlightbackground="#d9d9d9")
self.Radiobutton4.configure(highlightcolor="black")
self.Radiobutton4.configure(justify='left')
self.Radiobutton4.configure(text='''Radio''')
self.LabelSetting7 = tk.Label(self.TNotebook1_t5)
self.LabelSetting7.place(relx=0.01, rely=0.4, height=21, width=34)
self.LabelSetting7.configure(activebackground="#f9f9f9")
self.LabelSetting7.configure(activeforeground="black")
self.LabelSetting7.configure(background="#d9d9d9")
self.LabelSetting7.configure(disabledforeground="#a3a3a3")
self.LabelSetting7.configure(foreground="#000000")
self.LabelSetting7.configure(highlightbackground="#d9d9d9")
self.LabelSetting7.configure(highlightcolor="black")
self.LabelSetting7.configure(text='''Label''')
self.Label6_10 = tk.Label(self.TNotebook1_t5)
self.Label6_10.place(relx=0.51, rely=0.117, height=21, width=34)
self.Label6_10.configure(activebackground="#f9f9f9")
self.Label6_10.configure(activeforeground="black")
self.Label6_10.configure(background="#d9d9d9")
self.Label6_10.configure(disabledforeground="#a3a3a3")
self.Label6_10.configure(foreground="#000000")
self.Label6_10.configure(highlightbackground="#d9d9d9")
self.Label6_10.configure(highlightcolor="black")
self.Label6_10.configure(text='''Label''')
self.c1_n.delete(0, 'end')
self.c1_mean.delete(0, 'end')
self.c1_sigma.delete(0, 'end')
self.c1_xmin.delete(0, 'end')
self.c1_xmax.delete(0, 'end')
self.c1_ymin.delete(0, 'end')
self.c1_ymax.delete(0, 'end')
self.c2_n.delete(0, 'end')
self.c2_mean.delete(0, 'end')
self.c2_sigma.delete(0, 'end')
self.c2_xmin.delete(0, 'end')
self.c2_xmax.delete(0, 'end')
self.c2_ymin.delete(0, 'end')
self.c2_ymax.delete(0, 'end')
self.c3_n.delete(0, 'end')
self.c3_mean.delete(0, 'end')
self.c3_sigma.delete(0, 'end')
self.c3_xmin.delete(0, 'end')
self.c3_xmax.delete(0, 'end')
self.c3_ymin.delete(0, 'end')
self.c3_ymax.delete(0, 'end')
self.c4_n.delete(0, 'end')
self.c4_mean.delete(0, 'end')
self.c4_sigma.delete(0, 'end')
self.c4_xmin.delete(0, 'end')
self.c4_xmax.delete(0, 'end')
self.c4_ymin.delete(0, 'end')
self.c4_ymax.delete(0, 'end')
self.c5_n.delete(0, 'end')
self.c5_mean.delete(0, 'end')
self.c5_sigma.delete(0, 'end')
self.c5_xmin.delete(0, 'end')
self.c5_xmax.delete(0, 'end')
self.c5_ymin.delete(0, 'end')
self.c5_ymax.delete(0, 'end')
self.c6_n.delete(0, 'end')
self.c6_mean.delete(0, 'end')
self.c6_sigma.delete(0, 'end')
self.c6_xmin.delete(0, 'end')
self.c6_xmax.delete(0, 'end')
self.c6_ymin.delete(0, 'end')
self.c6_ymax.delete(0, 'end')
self.c1_n.insert('end',0)
self.c1_mean.insert('end',0)
self.c1_sigma.insert('end',0)
self.c1_xmin.insert('end',0)
self.c1_xmax.insert('end',0)
self.c1_ymin.insert('end',0)
self.c1_ymax.insert('end',0)
self.c2_n.insert('end',0)
self.c2_mean.insert('end',0)
self.c2_sigma.insert('end',0)
self.c2_xmin.insert('end',0)
self.c2_xmax.insert('end',0)
self.c2_ymin.insert('end',0)
self.c2_ymax.insert('end',0)
self.c3_n.insert('end',0)
self.c3_mean.insert('end',0)
self.c3_sigma.insert('end',0)
self.c3_xmin.insert('end',0)
self.c3_xmax.insert('end',0)
self.c3_ymin.insert('end',0)
self.c3_ymax.insert('end',0)
self.c4_n.insert('end',0)
self.c4_mean.insert('end',0)
self.c4_sigma.insert('end',0)
self.c4_xmin.insert('end',0)
self.c4_xmax.insert('end',0)
self.c4_ymin.insert('end',0)
self.c4_ymax.insert('end',0)
self.c5_n.insert('end',0)
self.c5_mean.insert('end',0)
self.c5_sigma.insert('end',0)
self.c5_xmin.insert('end',0)
self.c5_xmax.insert('end',0)
self.c5_ymin.insert('end',0)
self.c5_ymax.insert('end',0)
self.c6_n.insert('end',0)
self.c6_mean.insert('end',0)
self.c6_sigma.insert('end',0)
self.c6_xmin.insert('end',0)
self.c6_xmax.insert('end',0)
self.c6_ymin.insert('end',0)
self.c6_ymax.insert('end',0)
###############################################################################
###############################################################################
#need to add in
self._job = None #use to tell if need to update
###############################################################################
###############################################################################
def updateValue(self, event): #get updated value of silder1
if self._job:
self.TNotebook1_t0.after_cancel(self._job)
self._job = self.TNotebook1_t0.after(1, self._update_bar2)
###############################################################################
###############################################################################
def keep_original(self, event): #get updated value of silder1
if self.current_filename.get()!='Your Filename Here':
self.dx=(self.max_gx-self.min_gx)/self.s1.get()
self.s2.set(int(np.around((self.max_gy-self.min_gy)/self.dx)))
self.s3.set(self.s1.get()*self.s2.get())
# self.dx+=self.dx/4
# floor=(np.floor(np.log10(self.dx)))
# if floor<0:
# self.dx=np.around(self.dx+self.dx/5,decimals=int(np.abs(floor)))
# print(self.dx)
###############################################################################
###############################################################################
def _update_bar2(self): #update value of bar2
self._job = None
dx=(self.max_gx-self.min_gx)/self.s1.get()
self.s2.set(int(np.around((self.max_gy-self.min_gy)/dx)))
self.s3.set(self.s1.get()*self.s2.get())
self.plot_grid()
###############################################################################
###############################################################################
def load_file(self):
'''
-----------------------------------------------------------------------
This next section of code handles reading in a sample file. You can choose
a csv, tab delimited text, or both .xls and .xlsx type excel files. Some
errors are raised if there is a problem with the text file, but it is not
all encompassing.
-----------------------------------------------------------------------
'''
if self.use_rand_val.get()==1:
self.sample=self.random_sample.copy()
else:
###############################################
#activeate this in the final version. easier ot hardcode for now
# filename = tk.filedialog.askopenfilename(title = "Select X, Y, Strike data") # Open single file
self.current_filename.set(tk.filedialog.askopenfilename(title = "Select X, Y, Strike data") )
################################################
if ".txt" in self.current_filename.get() or ".csv" in self.current_filename.get():
# print(type(self.current_filename.get()))
with open(self.current_filename.get()) as self.f:
try:
self.content = self.f.read()
except IndexError as err:
messagebox.showerror("File Type Error", "Selected File Is Not of the Correct Type \n\n Select One of The Following Types [.csv, .txt, .xlsx, .xls] \n\n This is the actual error for debugging purposes: \n{}".format(err))
if '\t' in self.content:
self.sample=pd.read_csv(self.current_filename.get(), sep='\t').values.T
else:
self.sample=pd.read_csv(self.current_filename.get()).values.T
elif ".xls" in self.current_filename.get() or ".xlsx" in self.current_filename.get():
try:
self.sample=pd.read_excel(self.current_filename.get()).values.T
except:
messagebox.showerror("File Error", "An Unknown Error Occurred Loading and Excel File")
else:
messagebox.showerror("File Error", "An Unknown Error Occurred Loading File \n\n Check That File Is Correct Type \n\n Select One of The Following Types [.csv, .txt, .xlsx, .xls]")
'''
7/10/20
-----------------------------------
Ideally I would eventually add something here to actually make sure you gotta data in
'''
# try:
# self.col_len=len(self.sample[:,0])
# if len(self.sample)==0:
# raise Exception('Your filetype appears to ".csv" or ".txt" but no tab delimited or comma delimited data was found')
'''
7/10/20
--------------
Commenting code form about a year ago. The following section handles
the creation of a grid around the sample points. The idea is to expand
the grid to be 1% larger than the maximum extents of the sample data.
I had to modify this on 7/10/20 because the old method didnt work if the
scale of the data was a lot smaller than the overall scale being used
(like UTM)
'''
self.bulk_mean_val.set(np.round(np.median(self.sample[2,:]), decimals=1))
self.bulk_sigma_val.set(np.round(np.std(self.sample[2,:]), decimals=1))
self.recommend_theta()
self.bin_center.set(self.bulk_mean_val.get())
x=self.sample[0]
y=self.sample[1]
self.min_x=min(x)
self.min_y=min(y)
self.max_x=max(x)
self.max_y=max(y)
self.x_scale=self.max_x-self.min_x
self.y_scale=self.max_y-self.min_y
self.min_gx=min(x)-0.01*self.x_scale
self.min_gy=min(y)-0.01*self.y_scale
self.max_gx=max(x)+0.01*self.x_scale
self.max_gy=max(y)+0.01*self.y_scale
self.dx=(self.max_gx-self.min_gx)/self.s1.get()
# self.dy=(self.max_gy-self.min_gy)/self.s2.get()
self.plot_bin_spin()
self.plot_grid()
###############################################################################
###############################################################################
def recommend_theta(self):
theta=np.round(4*self.bulk_sigma_val.get()/self.neighbors_val.get(),decimals=1)
self.Label17.config(text=theta)
#hold
###############################################################################
###############################################################################
def quitit(self):
root.quit()
root.destroy()
sys.exit("Script No Longer Running")
###############################################################################
###############################################################################
def plotbins(self):
plot_bins=[0,30,60,90,120,150,180,210,240,270,300,330,360]
if self.custom_bin_check.get()==1:
plot_bins=np.deg2rad(np.array(plot_bins))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.